The Gaudi Framework  master (37c0b60a)
BaseTest.py
Go to the documentation of this file.
1 
11 
12 import json
13 import logging
14 import os
15 import platform
16 import re
17 import signal
18 import sys
19 import threading
20 import time
21 from datetime import datetime, timedelta
22 from html import escape as escape_for_html
23 from subprocess import PIPE, STDOUT, Popen
24 from tempfile import NamedTemporaryFile, mkdtemp
25 from unittest import TestCase
26 
27 if sys.version_info < (3, 5):
28  # backport of 'backslashreplace' handling of UnicodeDecodeError
29  # to Python < 3.5
30  from codecs import backslashreplace_errors, register_error
31 
33  if isinstance(exc, UnicodeDecodeError):
34  code = hex(ord(exc.object[exc.start]))
35  return ("\\" + code[1:], exc.start + 1)
36  else:
37  return backslashreplace_errors(exc)
38 
39  register_error("backslashreplace", _new_backslashreplace_errors)
40  del register_error
41  del backslashreplace_errors
42  del _new_backslashreplace_errors
43 
44 SKIP_RETURN_CODE = 77
45 
46 # default of 100MB
47 OUTPUT_LIMIT = int(os.environ.get("GAUDI_TEST_STDOUT_LIMIT", 100 * 1024**2))
48 
49 
50 def sanitize_for_xml(data):
51  """
52  Take a string with invalid ASCII/UTF characters and quote them so that the
53  string can be used in an XML text.
54 
55  >>> sanitize_for_xml('this is \x1b')
56  'this is [NON-XML-CHAR-0x1B]'
57  """
58  bad_chars = re.compile("[\x00-\x08\x0b\x0c\x0e-\x1f\ud800-\udfff\ufffe\uffff]")
59 
60  def quote(match):
61  "helper function"
62  return "".join("[NON-XML-CHAR-0x%2X]" % ord(c) for c in match.group())
63 
64  return bad_chars.sub(quote, data)
65 
66 
67 def dumpProcs(name):
68  """helper to debug GAUDI-1084, dump the list of processes"""
69  from getpass import getuser
70 
71  if "WORKSPACE" in os.environ:
72  p = Popen(["ps", "-fH", "-U", getuser()], stdout=PIPE)
73  with open(os.path.join(os.environ["WORKSPACE"], name), "wb") as f:
74  f.write(p.communicate()[0])
75 
76 
77 def kill_tree(ppid, sig):
78  """
79  Send a signal to a process and all its child processes (starting from the
80  leaves).
81  """
82  log = logging.getLogger("kill_tree")
83  ps_cmd = ["ps", "--no-headers", "-o", "pid", "--ppid", str(ppid)]
84  # Note: start in a clean env to avoid a freeze with libasan.so
85  # See https://sourceware.org/bugzilla/show_bug.cgi?id=27653
86  get_children = Popen(ps_cmd, stdout=PIPE, stderr=PIPE, env={})
87  children = map(int, get_children.communicate()[0].split())
88  for child in children:
89  kill_tree(child, sig)
90  try:
91  log.debug("killing process %d", ppid)
92  os.kill(ppid, sig)
93  except OSError as err:
94  if err.errno != 3: # No such process
95  raise
96  log.debug("no such process %d", ppid)
97 
98 
99 # -------------------------------------------------------------------------#
100 
101 
102 class BaseTest(object):
103  _common_tmpdir = None
104 
105  def __init__(self):
106  self.program = ""
107  self.args = []
108  self.reference = ""
109  self.error_reference = ""
110  self.options = ""
111  self.stderr = ""
112  self.timeout = 600
113  self.exit_code = None
114  self.environment = dict(os.environ)
116  self.signal = None
117  self.workdir = os.curdir
118  self.use_temp_dir = False
119  # Variables not for users
120  self.status = None
121  self.name = ""
122  self.causes = []
123  self.result = Result(self)
124  self.returnedCode = 0
125  self.out = ""
126  self.err = ""
127  self.proc = None
128  self.stack_trace = None
129  self.basedir = os.getcwd()
130  self.validate_time = None
131 
132  def run(self):
133  logging.debug("running test %s", self.name)
134 
135  self.result = Result(
136  {
137  "CAUSE": None,
138  "EXCEPTION": None,
139  "RESOURCE": None,
140  "TARGET": None,
141  "TRACEBACK": None,
142  "START_TIME": None,
143  "END_TIME": None,
144  "TIMEOUT_DETAIL": None,
145  }
146  )
147 
148  if self.options:
149  if re.search(
150  r"from\s+Gaudi.Configuration\s+import\s+\*|"
151  r"from\s+Configurables\s+import",
152  self.options,
153  ):
154  suffix, lang = ".py", "python"
155  else:
156  suffix, lang = ".opts", "c++"
157  self.result["Options"] = (
158  '<pre><code class="language-{}">{}</code></pre>'.format(
159  lang, escape_for_html(self.options)
160  )
161  )
162  optionFile = NamedTemporaryFile(suffix=suffix)
163  optionFile.file.write(self.options.encode("utf-8"))
164  optionFile.seek(0)
165  self.args.append(RationalizePath(optionFile.name))
166 
167  platform_id = (
168  self.environment.get("BINARY_TAG")
169  or self.environment.get("CMTCONFIG")
170  or platform.platform()
171  )
172  # If at least one regex matches we skip the test.
173  skip_test = bool(
174  [
175  None
176  for prex in self.unsupported_platforms
177  if re.search(prex, platform_id)
178  ]
179  )
180 
181  if not skip_test:
182  # handle working/temporary directory options
183  workdir = self.workdir
184  if self.use_temp_dir:
185  if self._common_tmpdir:
186  workdir = self._common_tmpdir
187  else:
188  workdir = mkdtemp()
189 
190  # prepare the command to execute
191  prog = ""
192  if self.program != "":
193  prog = self.program
194  elif "GAUDIEXE" in self.environment:
195  prog = self.environment["GAUDIEXE"]
196  else:
197  prog = "Gaudi.exe"
198 
199  prog_ext = os.path.splitext(prog)[1]
200  if prog_ext not in [".exe", ".py", ".bat"]:
201  prog += ".exe"
202  prog_ext = ".exe"
203 
204  prog = which(prog) or prog
205 
206  args = list(map(RationalizePath, self.args))
207 
208  if prog_ext == ".py":
209  params = ["python3", RationalizePath(prog)] + args
210  else:
211  params = [RationalizePath(prog)] + args
212 
213  # we need to switch directory because the validator expects to run
214  # in the same dir as the program
215  os.chdir(workdir)
216 
217  tmp_streams = {
218  "stdout": NamedTemporaryFile(),
219  "stderr": NamedTemporaryFile(),
220  }
221 
222  # launching test in a different thread to handle timeout exception
223  def target():
224  logging.debug("executing %r in %s", params, workdir)
225  self.proc = Popen(
226  params,
227  stdout=tmp_streams["stdout"],
228  stderr=tmp_streams["stderr"],
229  env=self.environment,
230  )
231  logging.debug("(pid: %d)", self.proc.pid)
232  self.proc.communicate()
233  tmp_streams["stdout"].seek(0)
234  self.out = (
235  tmp_streams["stdout"]
236  .read()
237  .decode("utf-8", errors="backslashreplace")
238  )
239  tmp_streams["stderr"].seek(0)
240  self.err = (
241  tmp_streams["stderr"]
242  .read()
243  .decode("utf-8", errors="backslashreplace")
244  )
245 
246  thread = threading.Thread(target=target)
247  thread.start()
248  # checking for timeout and stdout/err cutoff
249  when_to_stop = datetime.now() + timedelta(seconds=self.timeout)
250  too_big_stream = None
251  while (
252  datetime.now() < when_to_stop
253  and thread.is_alive()
254  and not too_big_stream
255  ):
256  # we check stdout and stderr size a few times per second
257  thread.join(0.1)
258  # if we are done, there is no need to check output size
259  if thread.is_alive():
260  for stream in tmp_streams:
261  if os.path.getsize(tmp_streams[stream].name) > OUTPUT_LIMIT:
262  too_big_stream = stream
263 
264  if thread.is_alive():
265  if not too_big_stream:
266  logging.debug(
267  "time out in test %s (pid %d)", self.name, self.proc.pid
268  )
269  # get the stack trace of the stuck process
270  cmd = [
271  "gdb",
272  "--pid",
273  str(self.proc.pid),
274  "--batch",
275  "--eval-command=thread apply all backtrace",
276  ]
277  gdb = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
278  self.stack_trace = gdb.communicate()[0].decode(
279  "utf-8", errors="backslashreplace"
280  )
281  self.causes.append("timeout")
282  else:
283  logging.debug(
284  "too big %s detected (pid %d)", too_big_stream, self.proc.pid
285  )
286  self.result[f"{too_big_stream} limit"] = str(OUTPUT_LIMIT)
287  self.result[f"{too_big_stream} size"] = str(
288  os.path.getsize(tmp_streams[too_big_stream].name)
289  )
290  self.causes.append(f"too big {too_big_stream}")
291 
292  kill_tree(self.proc.pid, signal.SIGTERM)
293  thread.join(60)
294  if thread.is_alive():
295  kill_tree(self.proc.pid, signal.SIGKILL)
296 
297  else:
298  self.returnedCode = self.proc.returncode
299  if self.returnedCode != SKIP_RETURN_CODE:
300  logging.debug(
301  f"completed test {self.name} with returncode = {self.returnedCode}"
302  )
303  logging.debug("validating test...")
304  val_start_time = time.perf_counter()
305  self.result, self.causes = self.ValidateOutput(
306  stdout=self.out, stderr=self.err, result=self.result
307  )
308  self.validate_time = round(time.perf_counter() - val_start_time, 2)
309  else:
310  logging.debug(f"skipped test {self.name}")
311  self.status = "skipped"
312 
313  # remove the temporary directory if we created it
314  if self.use_temp_dir and not self._common_tmpdir:
315  shutil.rmtree(workdir, True)
316 
317  os.chdir(self.basedir)
318 
319  if self.status != "skipped":
320  # handle application exit code
321  if self.signal is not None:
322  if int(self.returnedCode) != -int(self.signal):
323  self.causes.append("exit code")
324 
325  elif self.exit_code is not None:
326  if int(self.returnedCode) != int(self.exit_code):
327  self.causes.append("exit code")
328 
329  elif self.returnedCode != 0:
330  self.causes.append("exit code")
331 
332  if self.causes:
333  self.status = "failed"
334  else:
335  self.status = "passed"
336 
337  else:
338  self.status = "skipped"
339 
340  logging.debug("%s: %s", self.name, self.status)
341  field_mapping = {
342  "Exit Code": "returnedCode",
343  "stderr": "err",
344  "Arguments": "args",
345  "Runtime Environment": "environment",
346  "Status": "status",
347  "stdout": "out",
348  "Program Name": "program",
349  "Name": "name",
350  "Validator": "validator",
351  "Validation execution time": "validate_time",
352  "Output Reference File": "reference",
353  "Error Reference File": "error_reference",
354  "Causes": "causes",
355  # 'Validator Result': 'result.annotations',
356  "Unsupported Platforms": "unsupported_platforms",
357  "Stack Trace": "stack_trace",
358  }
359  resultDict = [
360  (key, getattr(self, attr))
361  for key, attr in field_mapping.items()
362  if getattr(self, attr)
363  ]
364  resultDict.append(
365  (
366  "Working Directory",
367  RationalizePath(os.path.join(os.getcwd(), self.workdir)),
368  )
369  )
370  # print(dict(resultDict).keys())
371  resultDict.extend(self.result.annotations.items())
372  # print(self.result.annotations.keys())
373  resultDict = dict(resultDict)
374 
375  # Special cases
376  if "Validator" in resultDict:
377  resultDict["Validator"] = (
378  '<pre><code class="language-{}">{}</code></pre>'.format(
379  "python", escape_for_html(resultDict["Validator"])
380  )
381  )
382  return resultDict
383 
384  # -------------------------------------------------#
385  # ----------------Validating tool------------------#
386  # -------------------------------------------------#
387 
388  def ValidateOutput(self, stdout, stderr, result):
389  if not self.stderr:
390  self.validateWithReference(stdout, stderr, result, self.causes)
391  elif stderr.strip() != self.stderr.strip():
392  self.causes.append("standard error")
393  return result, self.causes
394 
396  self,
397  reference=None,
398  stdout=None,
399  result=None,
400  causes=None,
401  signature_offset=0,
402  signature=None,
403  id=None,
404  ):
405  """
406  Given a block of text, tries to find it in the output. The block had to be identified by a signature line. By default, the first line is used as signature, or the line pointed to by signature_offset. If signature_offset points outside the block, a signature line can be passed as signature argument. Note: if 'signature' is None (the default), a negative signature_offset is interpreted as index in a list (e.g. -1 means the last line), otherwise the it is interpreted as the number of lines before the first one of the block the signature must appear. The parameter 'id' allow to distinguish between different calls to this function in the same validation code.
407  """
408 
409  if reference is None:
410  reference = self.reference
411  if stdout is None:
412  stdout = self.out
413  if result is None:
414  result = self.result
415  if causes is None:
416  causes = self.causes
417 
418  reflines = list(filter(None, map(lambda s: s.rstrip(), reference.splitlines())))
419  if not reflines:
420  raise RuntimeError("Empty (or null) reference")
421  # the same on standard output
422  outlines = list(filter(None, map(lambda s: s.rstrip(), stdout.splitlines())))
423 
424  res_field = "GaudiTest.RefBlock"
425  if id:
426  res_field += "_%s" % id
427 
428  if signature is None:
429  if signature_offset < 0:
430  signature_offset = len(reference) + signature_offset
431  signature = reflines[signature_offset]
432  # find the reference block in the output file
433  try:
434  pos = outlines.index(signature)
435  outlines = outlines[
436  pos - signature_offset : pos + len(reflines) - signature_offset
437  ]
438  if reflines != outlines:
439  msg = "standard output"
440  # I do not want 2 messages in causes if the function is called
441  # twice
442  if msg not in causes:
443  causes.append(msg)
444  result[res_field + ".observed"] = result.Quote("\n".join(outlines))
445  except ValueError:
446  causes.append("missing signature")
447  result[res_field + ".signature"] = result.Quote(signature)
448  if len(reflines) > 1 or signature != reflines[0]:
449  result[res_field + ".expected"] = result.Quote("\n".join(reflines))
450  return causes
451 
453  self, expected={"ERROR": 0, "FATAL": 0}, stdout=None, result=None, causes=None
454  ):
455  """
456  Count the number of messages with required severity (by default ERROR and FATAL)
457  and check if their numbers match the expected ones (0 by default).
458  The dictionary "expected" can be used to tune the number of errors and fatals
459  allowed, or to limit the number of expected warnings etc.
460  """
461 
462  if stdout is None:
463  stdout = self.out
464  if result is None:
465  result = self.result
466  if causes is None:
467  causes = self.causes
468 
469  # prepare the dictionary to record the extracted lines
470  errors = {}
471  for sev in expected:
472  errors[sev] = []
473 
474  outlines = stdout.splitlines()
475  from math import log10
476 
477  fmt = "%%%dd - %%s" % (int(log10(len(outlines) + 1)))
478 
479  linecount = 0
480  for l in outlines:
481  linecount += 1
482  words = l.split()
483  if len(words) >= 2 and words[1] in errors:
484  errors[words[1]].append(fmt % (linecount, l.rstrip()))
485 
486  for e in errors:
487  if len(errors[e]) != expected[e]:
488  causes.append("%s(%d)" % (e, len(errors[e])))
489  result["GaudiTest.lines.%s" % e] = result.Quote("\n".join(errors[e]))
490  result["GaudiTest.lines.%s.expected#" % e] = result.Quote(
491  str(expected[e])
492  )
493 
494  return causes
495 
497  self,
498  stdout=None,
499  result=None,
500  causes=None,
501  trees_dict=None,
502  ignore=r"Basket|.*size|Compression",
503  ):
504  """
505  Compare the TTree summaries in stdout with the ones in trees_dict or in
506  the reference file. By default ignore the size, compression and basket
507  fields.
508  The presence of TTree summaries when none is expected is not a failure.
509  """
510  if stdout is None:
511  stdout = self.out
512  if result is None:
513  result = self.result
514  if causes is None:
515  causes = self.causes
516  if trees_dict is None:
517  lreference = self._expandReferenceFileName(self.reference)
518  # call the validator if the file exists
519  if lreference and os.path.isfile(lreference):
520  trees_dict = findTTreeSummaries(open(lreference).read())
521  else:
522  trees_dict = {}
523 
524  from pprint import PrettyPrinter
525 
526  pp = PrettyPrinter()
527  if trees_dict:
528  result["GaudiTest.TTrees.expected"] = result.Quote(pp.pformat(trees_dict))
529  if ignore:
530  result["GaudiTest.TTrees.ignore"] = result.Quote(ignore)
531 
532  trees = findTTreeSummaries(stdout)
533  failed = cmpTreesDicts(trees_dict, trees, ignore)
534  if failed:
535  causes.append("trees summaries")
536  msg = "%s: %s != %s" % getCmpFailingValues(trees_dict, trees, failed)
537  result["GaudiTest.TTrees.failure_on"] = result.Quote(msg)
538  result["GaudiTest.TTrees.found"] = result.Quote(pp.pformat(trees))
539 
540  return causes
541 
543  self, stdout=None, result=None, causes=None, dict=None, ignore=None
544  ):
545  """
546  Compare the TTree summaries in stdout with the ones in trees_dict or in
547  the reference file. By default ignore the size, compression and basket
548  fields.
549  The presence of TTree summaries when none is expected is not a failure.
550  """
551  if stdout is None:
552  stdout = self.out
553  if result is None:
554  result = self.result
555  if causes is None:
556  causes = self.causes
557 
558  if dict is None:
559  lreference = self._expandReferenceFileName(self.reference)
560  # call the validator if the file exists
561  if lreference and os.path.isfile(lreference):
562  dict = findHistosSummaries(open(lreference).read())
563  else:
564  dict = {}
565 
566  from pprint import PrettyPrinter
567 
568  pp = PrettyPrinter()
569  if dict:
570  result["GaudiTest.Histos.expected"] = result.Quote(pp.pformat(dict))
571  if ignore:
572  result["GaudiTest.Histos.ignore"] = result.Quote(ignore)
573 
574  histos = findHistosSummaries(stdout)
575  failed = cmpTreesDicts(dict, histos, ignore)
576  if failed:
577  causes.append("histos summaries")
578  msg = "%s: %s != %s" % getCmpFailingValues(dict, histos, failed)
579  result["GaudiTest.Histos.failure_on"] = result.Quote(msg)
580  result["GaudiTest.Histos.found"] = result.Quote(pp.pformat(histos))
581 
582  return causes
583 
585  self, stdout=None, stderr=None, result=None, causes=None, preproc=None
586  ):
587  """
588  Default validation acti*on: compare standard output and error to the
589  reference files.
590  """
591 
592  if stdout is None:
593  stdout = self.out
594  if stderr is None:
595  stderr = self.err
596  if result is None:
597  result = self.result
598  if causes is None:
599  causes = self.causes
600 
601  # set the default output preprocessor
602  if preproc is None:
603  preproc = normalizeTestSuite
604  # check standard output
605  lreference = self._expandReferenceFileName(self.reference)
606  # call the validator if the file exists
607  if lreference and os.path.isfile(lreference):
608  causes += ReferenceFileValidator(
609  lreference, "standard output", "Output Diff", preproc=preproc
610  )(stdout, result)
611  elif lreference:
612  causes += ["missing reference file"]
613  # Compare TTree summaries
614  causes = self.CheckTTreesSummaries(stdout, result, causes)
615  causes = self.CheckHistosSummaries(stdout, result, causes)
616  if causes and lreference: # Write a new reference file for stdout
617  try:
618  cnt = 0
619  newrefname = ".".join([lreference, "new"])
620  while os.path.exists(newrefname):
621  cnt += 1
622  newrefname = ".".join([lreference, "~%d~" % cnt, "new"])
623  newref = open(newrefname, "w")
624  # sanitize newlines
625  for l in stdout.splitlines():
626  newref.write(l.rstrip() + "\n")
627  del newref # flush and close
628  result["New Output Reference File"] = os.path.relpath(
629  newrefname, self.basedir
630  )
631  except IOError:
632  # Ignore IO errors when trying to update reference files
633  # because we may be in a read-only filesystem
634  pass
635 
636  # check standard error
637  lreference = self._expandReferenceFileName(self.error_reference)
638  # call the validator if we have a file to use
639  if lreference:
640  if os.path.isfile(lreference):
641  newcauses = ReferenceFileValidator(
642  lreference, "standard error", "Error Diff", preproc=preproc
643  )(stderr, result)
644  else:
645  newcauses = ["missing error reference file"]
646  causes += newcauses
647  if newcauses and lreference: # Write a new reference file for stdedd
648  cnt = 0
649  newrefname = ".".join([lreference, "new"])
650  while os.path.exists(newrefname):
651  cnt += 1
652  newrefname = ".".join([lreference, "~%d~" % cnt, "new"])
653  newref = open(newrefname, "w")
654  # sanitize newlines
655  for l in stderr.splitlines():
656  newref.write(l.rstrip() + "\n")
657  del newref # flush and close
658  result["New Error Reference File"] = os.path.relpath(
659  newrefname, self.basedir
660  )
661  else:
662  causes += BasicOutputValidator(
663  lreference, "standard error", "ExecTest.expected_stderr"
664  )(stderr, result)
665  return causes
666 
668  self,
669  output_file,
670  reference_file,
671  result=None,
672  causes=None,
673  detailed=True,
674  ):
675  """
676  JSON validation action: compare json file to reference file
677  """
678 
679  if result is None:
680  result = self.result
681  if causes is None:
682  causes = self.causes
683 
684  if not os.path.isfile(output_file):
685  causes.append(f"output file {output_file} does not exist")
686  return causes
687 
688  try:
689  with open(output_file) as f:
690  output = json.load(f)
691  except json.JSONDecodeError as err:
692  causes.append("json parser error")
693  result["output_parse_error"] = f"json parser error in {output_file}: {err}"
694  return causes
695 
696  lreference = self._expandReferenceFileName(reference_file)
697  if not lreference:
698  causes.append("reference file not set")
699  elif not os.path.isfile(lreference):
700  causes.append("reference file does not exist")
701  else:
702  causes += JSONOutputValidator()(lreference, output, result, detailed)
703  if causes and lreference: # Write a new reference file for output
704  try:
705  cnt = 0
706  newrefname = ".".join([lreference, "new"])
707  while os.path.exists(newrefname):
708  cnt += 1
709  newrefname = ".".join([lreference, "~%d~" % cnt, "new"])
710  with open(newrefname, "w") as newref:
711  json.dump(output, newref, indent=4)
712  result["New JSON Output Reference File"] = os.path.relpath(
713  newrefname, self.basedir
714  )
715  except IOError:
716  # Ignore IO errors when trying to update reference files
717  # because we may be in a read-only filesystem
718  pass
719  return causes
720 
721  def _expandReferenceFileName(self, reffile):
722  # if no file is passed, do nothing
723  if not reffile:
724  return ""
725 
726  # function to split an extension in constituents parts
727  import re
728 
729  def platformSplit(p):
730  return set(re.split(r"[-+]", p))
731 
732  reference = os.path.normpath(
733  os.path.join(self.basedir, os.path.expandvars(reffile))
734  )
735 
736  # old-style platform-specific reference name
737  spec_ref = reference[:-3] + GetPlatform(self)[0:3] + reference[-3:]
738  if os.path.isfile(spec_ref):
739  reference = spec_ref
740  else: # look for new-style platform specific reference files:
741  # get all the files whose name start with the reference filename
742  dirname, basename = os.path.split(reference)
743  if not dirname:
744  dirname = "."
745  head = basename + "."
746  head_len = len(head)
747  platform = platformSplit(GetPlatform(self))
748  if "do0" in platform:
749  platform.add("dbg")
750  candidates = []
751  for f in os.listdir(dirname):
752  if f.startswith(head):
753  req_plat = platformSplit(f[head_len:])
754  if platform.issuperset(req_plat):
755  candidates.append((len(req_plat), f))
756  if candidates: # take the one with highest matching
757  # FIXME: it is not possible to say if x86_64-slc5-gcc43-dbg
758  # has to use ref.x86_64-gcc43 or ref.slc5-dbg
759  candidates.sort()
760  reference = os.path.join(dirname, candidates[-1][1])
761  return reference
762 
763 
764 # ======= GAUDI TOOLS =======
765 
766 import difflib
767 import shutil
768 
769 try:
770  from GaudiKernel import ROOT6WorkAroundEnabled
771 except ImportError:
772 
774  # dummy implementation
775  return False
776 
777 
778 # --------------------------------- TOOLS ---------------------------------#
779 
780 
782  """
783  Function used to normalize the used path
784  """
785  p = os.path.expandvars(p)
786 
787  # handle the special case "path/to/file:some_suffix"
788  suffix = ""
789  if ":" in p:
790  p, suffix = p.rsplit(":", 1)
791  suffix = f":{suffix}"
792 
793  if os.path.exists(p):
794  p = os.path.realpath(p)
795  return p + suffix
796 
797 
798 def which(executable):
799  """
800  Locates an executable in the executables path ($PATH) and returns the full
801  path to it. An application is looked for with or without the '.exe' suffix.
802  If the executable cannot be found, None is returned
803  """
804  if os.path.isabs(executable):
805  if not os.path.isfile(executable):
806  if executable.endswith(".exe"):
807  if os.path.isfile(executable[:-4]):
808  return executable[:-4]
809  else:
810  executable = os.path.split(executable)[1]
811  else:
812  return executable
813  for d in os.environ.get("PATH").split(os.pathsep):
814  fullpath = os.path.join(d, executable)
815  if os.path.isfile(fullpath):
816  return fullpath
817  elif executable.endswith(".exe") and os.path.isfile(fullpath[:-4]):
818  return fullpath[:-4]
819  return None
820 
821 
822 # -------------------------------------------------------------------------#
823 # ----------------------------- Result Classe -----------------------------#
824 # -------------------------------------------------------------------------#
825 
826 
827 class Result:
828  PASS = "PASS"
829  FAIL = "FAIL"
830  ERROR = "ERROR"
831  UNTESTED = "UNTESTED"
832 
833  EXCEPTION = ""
834  RESOURCE = ""
835  TARGET = ""
836  TRACEBACK = ""
837  START_TIME = ""
838  END_TIME = ""
839  TIMEOUT_DETAIL = ""
840 
841  def __init__(self, kind=None, id=None, outcome=PASS, annotations={}):
842  self.annotations = annotations.copy()
843 
844  def __getitem__(self, key):
845  assert isinstance(key, str)
846  return self.annotations[key]
847 
848  def __setitem__(self, key, value):
849  assert isinstance(key, str)
850  assert isinstance(value, str), "{!r} is not a string".format(value)
851  self.annotations[key] = value
852 
853  def Quote(self, text):
854  """
855  Convert text to html by escaping special chars and adding <pre> tags.
856  """
857  return "<pre>{}</pre>".format(escape_for_html(text))
858 
859 
860 # -------------------------------------------------------------------------#
861 # --------------------------- Validator Classes ---------------------------#
862 # -------------------------------------------------------------------------#
863 
864 # Basic implementation of an option validator for Gaudi test. This
865 # implementation is based on the standard (LCG) validation functions used
866 # in QMTest.
867 
868 
870  def __init__(self, ref, cause, result_key):
871  self.ref = ref
872  self.cause = cause
873  self.result_key = result_key
874 
875  def __call__(self, out, result):
876  """Validate the output of the program.
877  'stdout' -- A string containing the data written to the standard output
878  stream.
879  'stderr' -- A string containing the data written to the standard error
880  stream.
881  'result' -- A 'Result' object. It may be used to annotate
882  the outcome according to the content of stderr.
883  returns -- A list of strings giving causes of failure."""
884 
885  causes = []
886  # Check the output
887  if not self.__CompareText(out, self.ref):
888  causes.append(self.cause)
889  result[self.result_key] = result.Quote(self.ref)
890 
891  return causes
892 
893  def __CompareText(self, s1, s2):
894  """Compare 's1' and 's2', ignoring line endings.
895  's1' -- A string.
896  's2' -- A string.
897  returns -- True if 's1' and 's2' are the same, ignoring
898  differences in line endings."""
899  if ROOT6WorkAroundEnabled("ReadRootmapCheck"):
900  # FIXME: (MCl) Hide warnings from new rootmap sanity check until we
901  # can fix them
902  to_ignore = re.compile(
903  r"Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*"
904  )
905 
906  def keep_line(l):
907  return not to_ignore.match(l)
908 
909  return list(filter(keep_line, s1.splitlines())) == list(
910  filter(keep_line, s2.splitlines())
911  )
912  else:
913  return s1.splitlines() == s2.splitlines()
914 
915 
916 # ------------------------ Preprocessor elements ------------------------#
918  """Base class for a callable that takes a file and returns a modified
919  version of it."""
920 
921  def __processLine__(self, line):
922  return line
923 
924  def __processFile__(self, lines):
925  output = []
926  for l in lines:
927  l = self.__processLine__(l)
928  if l:
929  output.append(l)
930  return output
931 
932  def __call__(self, input):
933  if not isinstance(input, str):
934  lines = input
935  mergeback = False
936  else:
937  lines = input.splitlines()
938  mergeback = True
939  output = self.__processFile__(lines)
940  if mergeback:
941  output = "\n".join(output)
942  return output
943 
944  def __add__(self, rhs):
945  return FilePreprocessorSequence([self, rhs])
946 
947 
949  def __init__(self, members=[]):
950  self.members = members
951 
952  def __add__(self, rhs):
953  return FilePreprocessorSequence(self.members + [rhs])
954 
955  def __call__(self, input):
956  output = input
957  for pp in self.members:
958  output = pp(output)
959  return output
960 
961 
963  def __init__(self, strings=[], regexps=[]):
964  import re
965 
966  self.strings = strings
967  self.regexps = list(map(re.compile, regexps))
968 
969  def __processLine__(self, line):
970  for s in self.strings:
971  if line.find(s) >= 0:
972  return None
973  for r in self.regexps:
974  if r.search(line):
975  return None
976  return line
977 
978 
980  def __init__(self, start, end):
981  self.start = start
982  self.end = end
983  self._skipping = False
984 
985  def __processLine__(self, line):
986  if self.start in line:
987  self._skipping = True
988  return None
989  elif self.end in line:
990  self._skipping = False
991  elif self._skipping:
992  return None
993  return line
994 
995 
997  def __init__(self, orig, repl="", when=None):
998  if when:
999  when = re.compile(when)
1000  self._operations = [(when, re.compile(orig), repl)]
1001 
1002  def __add__(self, rhs):
1003  if isinstance(rhs, RegexpReplacer):
1004  res = RegexpReplacer("", "", None)
1005  res._operations = self._operations + rhs._operations
1006  else:
1007  res = FilePreprocessor.__add__(self, rhs)
1008  return res
1009 
1010  def __processLine__(self, line):
1011  for w, o, r in self._operations:
1012  if w is None or w.search(line):
1013  line = o.sub(r, line)
1014  return line
1015 
1016 
1017 # Common preprocessors
1018 maskPointers = RegexpReplacer("0x[0-9a-fA-F]{4,16}", "0x########")
1019 normalizeDate = RegexpReplacer(
1020  "[0-2]?[0-9]:[0-5][0-9]:[0-5][0-9] [0-9]{4}[-/][01][0-9][-/][0-3][0-9][ A-Z]*",
1021  "00:00:00 1970-01-01",
1022 )
1023 normalizeEOL = FilePreprocessor()
1024 normalizeEOL.__processLine__ = lambda line: str(line).rstrip() + "\n"
1025 
1026 skipEmptyLines = FilePreprocessor()
1027 # FIXME: that's ugly
1028 skipEmptyLines.__processLine__ = lambda line: (line.strip() and line) or None
1029 
1030 # Special preprocessor sorting the list of strings (whitespace separated)
1031 # that follow a signature on a single line
1032 
1033 
1035  def __init__(self, signature):
1036  self.signature = signature
1037  self.siglen = len(signature)
1038 
1039  def __processLine__(self, line):
1040  pos = line.find(self.signature)
1041  if pos >= 0:
1042  line = line[: (pos + self.siglen)]
1043  lst = line[(pos + self.siglen) :].split()
1044  lst.sort()
1045  line += " ".join(lst)
1046  return line
1047 
1048 
1050  """
1051  Sort group of lines matching a regular expression
1052  """
1053 
1054  def __init__(self, exp):
1055  self.exp = exp if hasattr(exp, "match") else re.compile(exp)
1056 
1057  def __processFile__(self, lines):
1058  match = self.exp.match
1059  output = []
1060  group = []
1061  for l in lines:
1062  if match(l):
1063  group.append(l)
1064  else:
1065  if group:
1066  group.sort()
1067  output.extend(group)
1068  group = []
1069  output.append(l)
1070  return output
1071 
1072 
1073 # Preprocessors for GaudiTestSuite
1074 normalizeTestSuite = maskPointers + normalizeDate
1075 for w, o, r in [
1076  ("TIMER", r"\s+[+-]?[0-9]+[0-9.e+-]*", " 0"), # Normalize time output
1077  ("release all pending", r"^.*/([^/]*:.*)", r"\1"),
1078  ("^#.*file", r"file '.*[/\\]([^/\\]*)$", r"file '\1"),
1079  (
1080  "^JobOptionsSvc.*options successfully read in from",
1081  r"read in from .*[/\\]([^/\\]*)$",
1082  r"file \1",
1083  ), # normalize path to options
1084  # Normalize UUID, except those ending with all 0s (i.e. the class IDs)
1085  (
1086  None,
1087  r"[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}(?!-0{12})-[0-9A-Fa-f]{12}",
1088  "00000000-0000-0000-0000-000000000000",
1089  ),
1090  # Absorb a change in ServiceLocatorHelper
1091  (
1092  "ServiceLocatorHelper::",
1093  "ServiceLocatorHelper::(create|locate)Service",
1094  "ServiceLocatorHelper::service",
1095  ),
1096  # Remove the leading 0 in Windows' exponential format
1097  (None, r"e([-+])0([0-9][0-9])", r"e\1\2"),
1098  # Output line changed in Gaudi v24
1099  (None, r"Service reference count check:", r"Looping over all active services..."),
1100  # Ignore count of declared properties (anyway they are all printed)
1101  (
1102  None,
1103  r"^(.*(DEBUG|SUCCESS) List of ALL properties of .*#properties = )\d+",
1104  r"\1NN",
1105  ),
1106  ("ApplicationMgr", r"(declareMultiSvcType|addMultiSvc): ", ""),
1107  (r"Property \['Name': Value\]", r"( = '[^']+':)'(.*)'", r"\1\2"),
1108  ("TimelineSvc", "to file 'TimelineFile':", "to file "),
1109  ("DataObjectHandleBase", r'DataObjectHandleBase\‍("([^"]*)"\‍)', r"'\1'"),
1110  # Output line changes in Gaudi v38r3
1111  (
1112  "Added successfully Conversion service:",
1113  "Added successfully Conversion service:",
1114  "Added successfully Conversion service ",
1115  ),
1116 ]:
1117  normalizeTestSuite += RegexpReplacer(o, r, w)
1118 
1119 lineSkipper = LineSkipper(
1120  [
1121  "//GP:",
1122  "JobOptionsSvc INFO # ",
1123  "JobOptionsSvc WARNING # ",
1124  "Time User",
1125  "Welcome to",
1126  "This machine has a speed",
1127  "running on",
1128  "ToolSvc.Sequenc... INFO",
1129  "DataListenerSvc INFO XML written to file:",
1130  "[INFO]",
1131  "[WARNING]",
1132  "DEBUG No writable file catalog found which contains FID:",
1133  "DEBUG Service base class initialized successfully",
1134  # changed between v20 and v21
1135  "DEBUG Incident timing:",
1136  # introduced with patch #3487
1137  # changed the level of the message from INFO to
1138  # DEBUG
1139  "INFO 'CnvServices':[",
1140  # message removed because could be printed in constructor
1141  "DEBUG 'CnvServices':[",
1142  # The signal handler complains about SIGXCPU not
1143  # defined on some platforms
1144  "SIGXCPU",
1145  # Message removed with redesing of JobOptionsSvc
1146  "ServiceLocatorHelper::service: found service JobOptionsSvc",
1147  # Ignore warnings for properties case mismatch
1148  "mismatching case for property name:",
1149  # Message demoted to DEBUG in gaudi/Gaudi!992
1150  "Histograms saving not required.",
1151  # Message added in gaudi/Gaudi!577
1152  "Properties are dumped into",
1153  # Messages changed in gaudi/Gaudi!1426
1154  "WARNING no ROOT output file name",
1155  "INFO Writing ROOT histograms to:",
1156  "INFO Completed update of ROOT histograms in:",
1157  # absorb changes in data dependencies reports (https://gitlab.cern.ch/gaudi/Gaudi/-/merge_requests/1348)
1158  "Data Deps for ",
1159  "data dependencies:",
1160  ],
1161  regexps=[
1162  r"^JobOptionsSvc INFO *$",
1163  r"^# ", # Ignore python comments
1164  # skip the message reporting the version of the root file
1165  r"(Always|SUCCESS)\s*(Root f|[^ ]* F)ile version:",
1166  r"File '.*.xml' does not exist",
1167  r"INFO Refer to dataset .* by its file ID:",
1168  r"INFO Referring to dataset .* by its file ID:",
1169  r"INFO Disconnect from dataset",
1170  r"INFO Disconnected from dataset",
1171  r"INFO Disconnected data IO:",
1172  r"IncidentSvc\s*(DEBUG (Adding|Removing)|VERBOSE Calling)",
1173  # Ignore StatusCodeSvc related messages
1174  r".*StatusCodeSvc.*",
1175  r".*StatusCodeCheck.*",
1176  r"Num\s*\|\s*Function\s*\|\s*Source Library",
1177  r"^[-+]*\s*$",
1178  # Hide the fake error message coming from POOL/ROOT (ROOT 5.21)
1179  r"ERROR Failed to modify file: .* Errno=2 No such file or directory",
1180  # Hide unchecked StatusCodes from dictionaries
1181  r"^ +[0-9]+ \|.*ROOT",
1182  r"^ +[0-9]+ \|.*\|.*Dict",
1183  # Hide EventLoopMgr total timing report
1184  r"EventLoopMgr.*---> Loop Finished",
1185  r"HiveSlimEventLo.*---> Loop Finished",
1186  # Remove ROOT TTree summary table, which changes from one version to the
1187  # other
1188  r"^\*.*\*$",
1189  # Remove Histos Summaries
1190  r"SUCCESS\s*Booked \d+ Histogram\‍(s\‍)",
1191  r"^ \|",
1192  r"^ ID=",
1193  # Ignore added/removed properties
1194  r"Property(.*)'Audit(Algorithm|Tool|Service)s':",
1195  r"Property(.*)'Audit(Begin|End)Run':",
1196  # these were missing in tools
1197  r"Property(.*)'AuditRe(start|initialize)':",
1198  r"Property(.*)'Blocking':",
1199  # removed with gaudi/Gaudi!273
1200  r"Property(.*)'ErrorCount(er)?':",
1201  # added with gaudi/Gaudi!306
1202  r"Property(.*)'Sequential':",
1203  # added with gaudi/Gaudi!314
1204  r"Property(.*)'FilterCircularDependencies':",
1205  # removed with gaudi/Gaudi!316
1206  r"Property(.*)'IsClonable':",
1207  # ignore uninteresting/obsolete messages
1208  r"Property update for OutputLevel : new value =",
1209  r"EventLoopMgr\s*DEBUG Creating OutputStream",
1210  r".*StalledEventMonitoring.*",
1211  ],
1212 )
1213 
1214 if ROOT6WorkAroundEnabled("ReadRootmapCheck"):
1215  # FIXME: (MCl) Hide warnings from new rootmap sanity check until we can
1216  # fix them
1217  lineSkipper += LineSkipper(
1218  regexps=[
1219  r"Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*",
1220  ]
1221  )
1222 
1223 normalizeTestSuite = (
1224  lineSkipper
1225  + normalizeTestSuite
1226  + skipEmptyLines
1227  + normalizeEOL
1228  + LineSorter("Services to release : ")
1229  + SortGroupOfLines(r"^\S+\s+(DEBUG|SUCCESS) Property \[\'Name\':")
1230 )
1231 # for backward compatibility
1232 normalizeExamples = normalizeTestSuite
1233 
1234 # --------------------- Validation functions/classes ---------------------#
1235 
1236 
1238  def __init__(self, reffile, cause, result_key, preproc=normalizeTestSuite):
1239  self.reffile = os.path.expandvars(reffile)
1240  self.cause = cause
1241  self.result_key = result_key
1242  self.preproc = preproc
1243 
1244  def __call__(self, stdout, result):
1245  causes = []
1246  if os.path.isfile(self.reffile):
1247  orig = open(self.reffile).readlines()
1248  if self.preproc:
1249  orig = self.preproc(orig)
1250  result[self.result_key + ".preproc.orig"] = result.Quote(
1251  "\n".join(map(str.strip, orig))
1252  )
1253  else:
1254  orig = []
1255  new = stdout.splitlines()
1256  if self.preproc:
1257  new = self.preproc(new)
1258 
1259  # Note: we have to make sure that we do not have `\n` in the comparison
1260  filterdiffs = list(
1261  difflib.unified_diff(
1262  [l.rstrip() for l in orig],
1263  [l.rstrip() for l in new],
1264  n=1,
1265  fromfile="Reference file",
1266  tofile="Actual output",
1267  lineterm="",
1268  )
1269  )
1270  if filterdiffs:
1271  result[self.result_key] = result.Quote("\n".join(filterdiffs))
1272  result[self.result_key + ".preproc.new"] = result.Quote(
1273  "\n".join(map(str.strip, new))
1274  )
1275  causes.append(self.cause)
1276  return causes
1277 
1278 
1280  """
1281  Scan stdout to find ROOT TTree summaries and digest them.
1282  """
1283  stars = re.compile(r"^\*+$")
1284  outlines = stdout.splitlines()
1285  nlines = len(outlines)
1286  trees = {}
1287 
1288  i = 0
1289  while i < nlines: # loop over the output
1290  # look for
1291  while i < nlines and not stars.match(outlines[i]):
1292  i += 1
1293  if i < nlines:
1294  tree, i = _parseTTreeSummary(outlines, i)
1295  if tree:
1296  trees[tree["Name"]] = tree
1297 
1298  return trees
1299 
1300 
1301 def cmpTreesDicts(reference, to_check, ignore=None):
1302  """
1303  Check that all the keys in reference are in to_check too, with the same value.
1304  If the value is a dict, the function is called recursively. to_check can
1305  contain more keys than reference, that will not be tested.
1306  The function returns at the first difference found.
1307  """
1308  fail_keys = []
1309  # filter the keys in the reference dictionary
1310  if ignore:
1311  ignore_re = re.compile(ignore)
1312  keys = [key for key in reference if not ignore_re.match(key)]
1313  else:
1314  keys = reference.keys()
1315  # loop over the keys (not ignored) in the reference dictionary
1316  for k in keys:
1317  if k in to_check: # the key must be in the dictionary to_check
1318  if isinstance(reference[k], dict) and isinstance(to_check[k], dict):
1319  # if both reference and to_check values are dictionaries,
1320  # recurse
1321  failed = fail_keys = cmpTreesDicts(reference[k], to_check[k], ignore)
1322  else:
1323  # compare the two values
1324  failed = to_check[k] != reference[k]
1325  else: # handle missing keys in the dictionary to check (i.e. failure)
1326  to_check[k] = None
1327  failed = True
1328  if failed:
1329  fail_keys.insert(0, k)
1330  break # exit from the loop at the first failure
1331  return fail_keys # return the list of keys bringing to the different values
1332 
1333 
1334 def getCmpFailingValues(reference, to_check, fail_path):
1335  c = to_check
1336  r = reference
1337  for k in fail_path:
1338  c = c.get(k, None)
1339  r = r.get(k, None)
1340  if c is None or r is None:
1341  break # one of the dictionaries is not deep enough
1342  return (fail_path, r, c)
1343 
1344 
1345 # signature of the print-out of the histograms
1346 h_count_re = re.compile(r"^(.*)SUCCESS\s+Booked (\d+) Histogram\‍(s\‍) :\s+([\s\w=-]*)")
1347 
1348 
1349 def _parseTTreeSummary(lines, pos):
1350  """
1351  Parse the TTree summary table in lines, starting from pos.
1352  Returns a tuple with the dictionary with the digested informations and the
1353  position of the first line after the summary.
1354  """
1355  result = {}
1356  i = pos + 1 # first line is a sequence of '*'
1357  count = len(lines)
1358 
1359  def splitcols(l):
1360  return [f.strip() for f in l.strip("*\n").split(":", 2)]
1361 
1362  def parseblock(ll):
1363  r = {}
1364  delta_i = 0
1365  cols = splitcols(ll[0])
1366 
1367  if len(ll) == 3:
1368  # default one line name/title
1369  r["Name"], r["Title"] = cols[1:]
1370  elif len(ll) == 4:
1371  # in case title is moved to next line due to too long name
1372  delta_i = 1
1373  r["Name"] = cols[1]
1374  r["Title"] = ll[1].strip("*\n").split("|")[1].strip()
1375  else:
1376  assert False
1377 
1378  cols = splitcols(ll[1 + delta_i])
1379  r["Entries"] = int(cols[1])
1380 
1381  sizes = cols[2].split()
1382  r["Total size"] = int(sizes[2])
1383  if sizes[-1] == "memory":
1384  r["File size"] = 0
1385  else:
1386  r["File size"] = int(sizes[-1])
1387 
1388  cols = splitcols(ll[2 + delta_i])
1389  sizes = cols[2].split()
1390  if cols[0] == "Baskets":
1391  r["Baskets"] = int(cols[1])
1392  r["Basket size"] = int(sizes[2])
1393  r["Compression"] = float(sizes[-1])
1394 
1395  return r
1396 
1397  def nextblock(lines, i):
1398  delta_i = 1
1399  dots = re.compile(r"^\.+$")
1400  stars = re.compile(r"^\*+$")
1401  count = len(lines)
1402  while (
1403  i + delta_i < count
1404  and not dots.match(lines[i + delta_i][1:-1])
1405  and not stars.match(lines[i + delta_i])
1406  ):
1407  delta_i += 1
1408  return i + delta_i
1409 
1410  if i < (count - 3) and lines[i].startswith("*Tree"):
1411  i_nextblock = nextblock(lines, i)
1412  result = parseblock(lines[i:i_nextblock])
1413  result["Branches"] = {}
1414  i = i_nextblock + 1
1415  while i < (count - 3) and lines[i].startswith("*Br"):
1416  if i < (count - 2) and lines[i].startswith("*Branch "):
1417  # skip branch header
1418  i += 3
1419  continue
1420  i_nextblock = nextblock(lines, i)
1421  if i_nextblock >= count:
1422  break
1423  branch = parseblock(lines[i:i_nextblock])
1424  result["Branches"][branch["Name"]] = branch
1425  i = i_nextblock + 1
1426 
1427  return (result, i)
1428 
1429 
1430 def parseHistosSummary(lines, pos):
1431  """
1432  Extract the histograms infos from the lines starting at pos.
1433  Returns the position of the first line after the summary block.
1434  """
1435  global h_count_re
1436  h_table_head = re.compile(
1437  r'(?:INFO|SUCCESS)\s+(1D|2D|3D|1D profile|2D profile|3d profile) histograms in directory\s+"(\w*)"'
1438  )
1439  h_short_summ = re.compile(r"ID=([^\"]+)\s+\"([^\"]*)\"\s+(.*)")
1440 
1441  nlines = len(lines)
1442 
1443  # decode header
1444  m = h_count_re.search(lines[pos])
1445  name = m.group(1).strip()
1446  total = int(m.group(2))
1447  header = {}
1448  for k, v in [x.split("=") for x in m.group(3).split()]:
1449  header[k] = int(v)
1450  pos += 1
1451  header["Total"] = total
1452 
1453  summ = {}
1454  while pos < nlines:
1455  m = h_table_head.search(lines[pos])
1456  if m:
1457  t, d = m.groups(1) # type and directory
1458  t = t.replace(" profile", "Prof")
1459  pos += 1
1460  if pos < nlines:
1461  l = lines[pos]
1462  else:
1463  l = ""
1464  cont = {}
1465  if l.startswith(" | ID"):
1466  # table format
1467  titles = [x.strip() for x in l.split("|")][1:]
1468  pos += 1
1469  while pos < nlines and lines[pos].startswith(" |"):
1470  l = lines[pos]
1471  values = [x.strip() for x in l.split("|")][1:]
1472  hcont = {}
1473  for i in range(len(titles)):
1474  hcont[titles[i]] = values[i]
1475  cont[hcont["ID"]] = hcont
1476  pos += 1
1477  elif l.startswith(" ID="):
1478  while pos < nlines and lines[pos].startswith(" ID="):
1479  values = [
1480  x.strip() for x in h_short_summ.search(lines[pos]).groups()
1481  ]
1482  cont[values[0]] = values
1483  pos += 1
1484  else: # not interpreted
1485  raise RuntimeError("Cannot understand line %d: '%s'" % (pos, l))
1486  if d not in summ:
1487  summ[d] = {}
1488  summ[d][t] = cont
1489  summ[d]["header"] = header
1490  else:
1491  break
1492  if not summ:
1493  # If the full table is not present, we use only the header
1494  summ[name] = {"header": header}
1495  return summ, pos
1496 
1497 
1499  """
1500  Scan stdout to find ROOT TTree summaries and digest them.
1501  """
1502  outlines = stdout.splitlines()
1503  nlines = len(outlines) - 1
1504  summaries = {}
1505  global h_count_re
1506 
1507  pos = 0
1508  while pos < nlines:
1509  summ = {}
1510  # find first line of block:
1511  match = h_count_re.search(outlines[pos])
1512  while pos < nlines and not match:
1513  pos += 1
1514  match = h_count_re.search(outlines[pos])
1515  if match:
1516  summ, pos = parseHistosSummary(outlines, pos)
1517  summaries.update(summ)
1518  return summaries
1519 
1520 
1521 def GetPlatform(self):
1522  """
1523  Return the platform Id defined in CMTCONFIG or SCRAM_ARCH.
1524  """
1525  arch = "None"
1526  # check architecture name
1527  if "BINARY_TAG" in os.environ:
1528  arch = os.environ["BINARY_TAG"]
1529  elif "CMTCONFIG" in os.environ:
1530  arch = os.environ["CMTCONFIG"]
1531  elif "SCRAM_ARCH" in os.environ:
1532  arch = os.environ["SCRAM_ARCH"]
1533  elif os.environ.get("ENV_CMAKE_BUILD_TYPE", "") in (
1534  "Debug",
1535  "FastDebug",
1536  "Developer",
1537  ):
1538  arch = "dummy-dbg"
1539  elif os.environ.get("ENV_CMAKE_BUILD_TYPE", "") in (
1540  "Release",
1541  "MinSizeRel",
1542  "RelWithDebInfo",
1543  "",
1544  ): # RelWithDebInfo == -O2 -g -DNDEBUG
1545  arch = "dummy-opt"
1546  return arch
1547 
1548 
1549 def isWinPlatform(self):
1550  """
1551  Return True if the current platform is Windows.
1552 
1553  This function was needed because of the change in the CMTCONFIG format,
1554  from win32_vc71_dbg to i686-winxp-vc9-dbg.
1555  """
1556  platform = GetPlatform(self)
1557  return "winxp" in platform or platform.startswith("win")
1558 
1559 
1561  def __call__(self, ref, out, result, detailed=True):
1562  """Validate JSON output.
1563  returns -- A list of strings giving causes of failure."""
1564 
1565  causes = []
1566  try:
1567  with open(ref) as f:
1568  expected = json.load(f)
1569  except json.JSONDecodeError as err:
1570  causes.append("json parser error")
1571  result["reference_parse_error"] = f"json parser error in {ref}: {err}"
1572  return causes
1573 
1574  if not detailed:
1575  if expected != out:
1576  causes.append("json content")
1577  result["json_diff"] = "detailed diff was turned off"
1578  return causes
1579 
1580  # piggyback on TestCase dict diff report
1581  t = TestCase()
1582  # sort both lists (these are list of entities) as the order is not supposed to matter
1583  # indeed, the JSONSink implementation does not garantee any particular order
1584  # but as JSON does not have sets, we get back a sorted list here
1585  expected = sorted(expected, key=lambda item: (item["component"], item["name"]))
1586  out = sorted(out, key=lambda item: (item["component"], item["name"]))
1587  try:
1588  t.assertEqual(expected, out)
1589  except AssertionError as err:
1590  causes.append("json content")
1591  result["json_diff"] = str(err).splitlines()[0]
1592 
1593  return causes
GaudiTesting.BaseTest.ReferenceFileValidator.reffile
reffile
Definition: BaseTest.py:1239
GaudiTesting.BaseTest.BaseTest.causes
causes
Definition: BaseTest.py:122
GaudiTesting.BaseTest.SortGroupOfLines.__init__
def __init__(self, exp)
Definition: BaseTest.py:1054
GaudiTesting.BaseTest.BaseTest.options
options
Definition: BaseTest.py:110
GaudiTesting.BaseTest.FilePreprocessor
Definition: BaseTest.py:917
MSG::hex
MsgStream & hex(MsgStream &log)
Definition: MsgStream.h:281
GaudiTesting.BaseTest.Result.__getitem__
def __getitem__(self, key)
Definition: BaseTest.py:844
GaudiTesting.BaseTest.BasicOutputValidator.ref
ref
Definition: BaseTest.py:871
GaudiTesting.BaseTest.dumpProcs
def dumpProcs(name)
Definition: BaseTest.py:67
GaudiTesting.BaseTest.LineSorter.siglen
siglen
Definition: BaseTest.py:1037
GaudiTesting.BaseTest.FilePreprocessor.__call__
def __call__(self, input)
Definition: BaseTest.py:932
GaudiTesting.BaseTest.LineSorter
Definition: BaseTest.py:1034
GaudiTesting.BaseTest._parseTTreeSummary
def _parseTTreeSummary(lines, pos)
Definition: BaseTest.py:1349
GaudiTesting.BaseTest.LineSorter.__processLine__
def __processLine__(self, line)
Definition: BaseTest.py:1039
GaudiTesting.BaseTest.BaseTest.out
out
Definition: BaseTest.py:125
GaudiTesting.BaseTest.BaseTest.CheckHistosSummaries
def CheckHistosSummaries(self, stdout=None, result=None, causes=None, dict=None, ignore=None)
Definition: BaseTest.py:542
GaudiTesting.BaseTest.sanitize_for_xml
def sanitize_for_xml(data)
Definition: BaseTest.py:50
GaudiTesting.BaseTest.BaseTest._common_tmpdir
_common_tmpdir
Definition: BaseTest.py:103
GaudiTesting.BaseTest.BaseTest.reference
reference
Definition: BaseTest.py:108
GaudiTesting.BaseTest.BasicOutputValidator.__init__
def __init__(self, ref, cause, result_key)
Definition: BaseTest.py:870
GaudiTesting.BaseTest.BaseTest.timeout
timeout
Definition: BaseTest.py:112
GaudiTesting.BaseTest.ReferenceFileValidator.preproc
preproc
Definition: BaseTest.py:1242
GaudiTesting.BaseTest.BaseTest.validateWithReference
def validateWithReference(self, stdout=None, stderr=None, result=None, causes=None, preproc=None)
Definition: BaseTest.py:584
GaudiTesting.BaseTest.getCmpFailingValues
def getCmpFailingValues(reference, to_check, fail_path)
Definition: BaseTest.py:1334
GaudiPartProp.decorators.get
get
decorate the vector of properties
Definition: decorators.py:283
GaudiTesting.BaseTest.BasicOutputValidator.result_key
result_key
Definition: BaseTest.py:873
GaudiTesting.BaseTest.BaseTest.proc
proc
Definition: BaseTest.py:127
GaudiTesting.BaseTest._new_backslashreplace_errors
def _new_backslashreplace_errors(exc)
Definition: BaseTest.py:32
GaudiTesting.BaseTest.BaseTest.stack_trace
stack_trace
Definition: BaseTest.py:128
GaudiTesting.BaseTest.FilePreprocessor.__processFile__
def __processFile__(self, lines)
Definition: BaseTest.py:924
GaudiTesting.BaseTest.BaseTest.environment
environment
Definition: BaseTest.py:114
GaudiTesting.BaseTest.LineSorter.signature
signature
Definition: BaseTest.py:1036
GaudiTesting.BaseTest.BaseTest.exit_code
exit_code
Definition: BaseTest.py:113
GaudiTesting.BaseTest.BlockSkipper.start
start
Definition: BaseTest.py:981
GaudiTesting.BaseTest.kill_tree
def kill_tree(ppid, sig)
Definition: BaseTest.py:77
GaudiTesting.BaseTest.Result.Quote
def Quote(self, text)
Definition: BaseTest.py:853
GaudiTesting.BaseTest.FilePreprocessorSequence.__add__
def __add__(self, rhs)
Definition: BaseTest.py:952
Containers::map
struct GAUDI_API map
Parametrisation class for map-like implementation.
Definition: KeyedObjectManager.h:35
GaudiTesting.BaseTest.BaseTest.validateJSONWithReference
def validateJSONWithReference(self, output_file, reference_file, result=None, causes=None, detailed=True)
Definition: BaseTest.py:667
GaudiTesting.BaseTest.FilePreprocessorSequence
Definition: BaseTest.py:948
GaudiTesting.BaseTest.BaseTest.__init__
def __init__(self)
Definition: BaseTest.py:105
GaudiTesting.BaseTest.RegexpReplacer._operations
_operations
Definition: BaseTest.py:1000
GaudiTesting.BaseTest.BaseTest.err
err
Definition: BaseTest.py:126
compareOutputFiles.target
target
Definition: compareOutputFiles.py:489
GaudiTesting.BaseTest.SortGroupOfLines.__processFile__
def __processFile__(self, lines)
Definition: BaseTest.py:1057
GaudiTesting.BaseTest.BlockSkipper
Definition: BaseTest.py:979
GaudiTesting.BaseTest.BaseTest.args
args
Definition: BaseTest.py:107
GaudiTesting.BaseTest.BaseTest.result
result
Definition: BaseTest.py:123
GaudiTesting.BaseTest.FilePreprocessor.__processLine__
def __processLine__(self, line)
Definition: BaseTest.py:921
GaudiTesting.BaseTest.FilePreprocessorSequence.__call__
def __call__(self, input)
Definition: BaseTest.py:955
GaudiTesting.BaseTest.BaseTest.workdir
workdir
Definition: BaseTest.py:117
GaudiTesting.BaseTest.BlockSkipper._skipping
_skipping
Definition: BaseTest.py:983
GaudiTesting.BaseTest.ReferenceFileValidator.cause
cause
Definition: BaseTest.py:1240
GaudiTesting.BaseTest.parseHistosSummary
def parseHistosSummary(lines, pos)
Definition: BaseTest.py:1430
GaudiTesting.BaseTest.BaseTest.validate_time
validate_time
Definition: BaseTest.py:130
GaudiTesting.BaseTest.RegexpReplacer
Definition: BaseTest.py:996
GaudiTesting.BaseTest.isWinPlatform
def isWinPlatform(self)
Definition: BaseTest.py:1549
GaudiTesting.BaseTest.LineSkipper.regexps
regexps
Definition: BaseTest.py:967
GaudiTesting.BaseTest.BaseTest.basedir
basedir
Definition: BaseTest.py:129
GaudiTesting.BaseTest.which
def which(executable)
Definition: BaseTest.py:798
GaudiTesting.BaseTest.SortGroupOfLines.exp
exp
Definition: BaseTest.py:1055
GaudiTesting.BaseTest.BaseTest.unsupported_platforms
unsupported_platforms
Definition: BaseTest.py:115
GaudiTesting.BaseTest.Result.__init__
def __init__(self, kind=None, id=None, outcome=PASS, annotations={})
Definition: BaseTest.py:841
GaudiTesting.BaseTest.BlockSkipper.end
end
Definition: BaseTest.py:982
GaudiTesting.BaseTest.BaseTest.returnedCode
returnedCode
Definition: BaseTest.py:124
GaudiTesting.BaseTest.LineSkipper.strings
strings
Definition: BaseTest.py:966
GaudiTesting.BaseTest.BasicOutputValidator.__call__
def __call__(self, out, result)
Definition: BaseTest.py:875
GaudiTesting.BaseTest.cmpTreesDicts
def cmpTreesDicts(reference, to_check, ignore=None)
Definition: BaseTest.py:1301
GaudiTesting.BaseTest.Result.annotations
annotations
Definition: BaseTest.py:842
GaudiTesting.BaseTest.BaseTest.name
name
Definition: BaseTest.py:121
format
GAUDI_API std::string format(const char *,...)
MsgStream format utility "a la sprintf(...)".
Definition: MsgStream.cpp:119
GaudiTesting.BaseTest.JSONOutputValidator
Definition: BaseTest.py:1560
GaudiTesting.BaseTest.RegexpReplacer.__processLine__
def __processLine__(self, line)
Definition: BaseTest.py:1010
GaudiTesting.BaseTest.FilePreprocessorSequence.members
members
Definition: BaseTest.py:950
GaudiTesting.BaseTest.BaseTest._expandReferenceFileName
def _expandReferenceFileName(self, reffile)
Definition: BaseTest.py:721
GaudiTesting.BaseTest.BaseTest.signal
signal
Definition: BaseTest.py:116
GaudiTesting.BaseTest.SortGroupOfLines
Definition: BaseTest.py:1049
GaudiTesting.BaseTest.BaseTest.findReferenceBlock
def findReferenceBlock(self, reference=None, stdout=None, result=None, causes=None, signature_offset=0, signature=None, id=None)
Definition: BaseTest.py:395
GaudiTesting.BaseTest.ReferenceFileValidator.__init__
def __init__(self, reffile, cause, result_key, preproc=normalizeTestSuite)
Definition: BaseTest.py:1238
GaudiTesting.BaseTest.RationalizePath
def RationalizePath(p)
Definition: BaseTest.py:781
GaudiTesting.BaseTest.LineSkipper
Definition: BaseTest.py:962
GaudiTesting.BaseTest.ReferenceFileValidator
Definition: BaseTest.py:1237
hivetimeline.read
def read(f, regex=".*", skipevents=0)
Definition: hivetimeline.py:32
GaudiTesting.BaseTest.BaseTest.program
program
Definition: BaseTest.py:106
GaudiTesting.BaseTest.FilePreprocessorSequence.__init__
def __init__(self, members=[])
Definition: BaseTest.py:949
GaudiTesting.BaseTest.ReferenceFileValidator.__call__
def __call__(self, stdout, result)
Definition: BaseTest.py:1244
GaudiTesting.BaseTest.BasicOutputValidator.cause
cause
Definition: BaseTest.py:872
GaudiTesting.BaseTest.FilePreprocessor.__add__
def __add__(self, rhs)
Definition: BaseTest.py:944
GaudiTesting.BaseTest.ReferenceFileValidator.result_key
result_key
Definition: BaseTest.py:1241
GaudiTesting.BaseTest.BlockSkipper.__init__
def __init__(self, start, end)
Definition: BaseTest.py:980
GaudiTesting.BaseTest.RegexpReplacer.__init__
def __init__(self, orig, repl="", when=None)
Definition: BaseTest.py:997
GaudiTesting.BaseTest.findHistosSummaries
def findHistosSummaries(stdout)
Definition: BaseTest.py:1498
GaudiTesting.BaseTest.Result.__setitem__
def __setitem__(self, key, value)
Definition: BaseTest.py:848
GaudiTesting.BaseTest.BaseTest.CheckTTreesSummaries
def CheckTTreesSummaries(self, stdout=None, result=None, causes=None, trees_dict=None, ignore=r"Basket|.*size|Compression")
Definition: BaseTest.py:496
GaudiTesting.BaseTest.BaseTest
Definition: BaseTest.py:102
GaudiTesting.BaseTest.BaseTest.countErrorLines
def countErrorLines(self, expected={"ERROR":0, "FATAL":0}, stdout=None, result=None, causes=None)
Definition: BaseTest.py:452
GaudiTesting.BaseTest.BaseTest.error_reference
error_reference
Definition: BaseTest.py:109
GaudiTesting.BaseTest.BaseTest.ValidateOutput
def ValidateOutput(self, stdout, stderr, result)
Definition: BaseTest.py:388
GaudiTesting.BaseTest.JSONOutputValidator.__call__
def __call__(self, ref, out, result, detailed=True)
Definition: BaseTest.py:1561
GaudiTesting.BaseTest.BasicOutputValidator
Definition: BaseTest.py:869
GaudiTesting.BaseTest.LineSkipper.__init__
def __init__(self, strings=[], regexps=[])
Definition: BaseTest.py:963
GaudiTesting.BaseTest.Result
Definition: BaseTest.py:827
GaudiTesting.BaseTest.BaseTest.run
def run(self)
Definition: BaseTest.py:132
GaudiTesting.BaseTest.findTTreeSummaries
def findTTreeSummaries(stdout)
Definition: BaseTest.py:1279
GaudiTesting.BaseTest.BasicOutputValidator.__CompareText
def __CompareText(self, s1, s2)
Definition: BaseTest.py:893
GaudiTesting.BaseTest.RegexpReplacer.__add__
def __add__(self, rhs)
Definition: BaseTest.py:1002
compareOutputFiles.pp
pp
Definition: compareOutputFiles.py:507
GaudiTesting.BaseTest.BaseTest.stderr
stderr
Definition: BaseTest.py:111
GaudiTesting.BaseTest.LineSorter.__init__
def __init__(self, signature)
Definition: BaseTest.py:1035
GaudiTesting.BaseTest.LineSkipper.__processLine__
def __processLine__(self, line)
Definition: BaseTest.py:969
GaudiTesting.BaseTest.ROOT6WorkAroundEnabled
def ROOT6WorkAroundEnabled(id=None)
Definition: BaseTest.py:773
GaudiTesting.BaseTest.BaseTest.use_temp_dir
use_temp_dir
Definition: BaseTest.py:118
GaudiTesting.BaseTest.GetPlatform
def GetPlatform(self)
Definition: BaseTest.py:1521
GaudiTesting.BaseTest.BaseTest.status
status
Definition: BaseTest.py:120
Gaudi::Functional::details::zip::range
decltype(auto) range(Args &&... args)
Zips multiple containers together to form a single range.
Definition: details.h:97
GaudiTesting.BaseTest.BlockSkipper.__processLine__
def __processLine__(self, line)
Definition: BaseTest.py:985