Loading [MathJax]/extensions/tex2jax.js
The Gaudi Framework  v36r7 (7f57a304)
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Modules Pages
BaseTest.py
Go to the documentation of this file.
1 # -*- coding: utf-8 -*-
2 
12 
13 import inspect
14 import json
15 import logging
16 import os
17 import platform
18 import re
19 import signal
20 import sys
21 import tempfile
22 import threading
23 import time
24 from subprocess import PIPE, STDOUT, Popen
25 from unittest import TestCase
26 
27 try:
28  from html import escape as escape_for_html
29 except ImportError: # Python2
30  from cgi import escape as escape_for_html
31 
32 import six
33 
34 if sys.version_info < (3, 5):
35  # backport of 'backslashreplace' handling of UnicodeDecodeError
36  # to Python < 3.5
37  from codecs import backslashreplace_errors, register_error
38 
40  if isinstance(exc, UnicodeDecodeError):
41  code = hex(ord(exc.object[exc.start]))
42  return ("\\" + code[1:], exc.start + 1)
43  else:
44  return backslashreplace_errors(exc)
45 
46  register_error("backslashreplace", _new_backslashreplace_errors)
47  del register_error
48  del backslashreplace_errors
49  del _new_backslashreplace_errors
50 
51 SKIP_RETURN_CODE = 77
52 
53 
54 def sanitize_for_xml(data):
55  """
56  Take a string with invalid ASCII/UTF characters and quote them so that the
57  string can be used in an XML text.
58 
59  >>> sanitize_for_xml('this is \x1b')
60  'this is [NON-XML-CHAR-0x1B]'
61  """
62  bad_chars = re.compile("[\x00-\x08\x0b\x0c\x0e-\x1F\uD800-\uDFFF\uFFFE\uFFFF]")
63 
64  def quote(match):
65  "helper function"
66  return "".join("[NON-XML-CHAR-0x%2X]" % ord(c) for c in match.group())
67 
68  return bad_chars.sub(quote, data)
69 
70 
71 def dumpProcs(name):
72  """helper to debug GAUDI-1084, dump the list of processes"""
73  from getpass import getuser
74 
75  if "WORKSPACE" in os.environ:
76  p = Popen(["ps", "-fH", "-U", getuser()], stdout=PIPE)
77  with open(os.path.join(os.environ["WORKSPACE"], name), "wb") as f:
78  f.write(p.communicate()[0])
79 
80 
81 def kill_tree(ppid, sig):
82  """
83  Send a signal to a process and all its child processes (starting from the
84  leaves).
85  """
86  log = logging.getLogger("kill_tree")
87  ps_cmd = ["ps", "--no-headers", "-o", "pid", "--ppid", str(ppid)]
88  get_children = Popen(ps_cmd, stdout=PIPE, stderr=PIPE)
89  children = map(int, get_children.communicate()[0].split())
90  for child in children:
91  kill_tree(child, sig)
92  try:
93  log.debug("killing process %d", ppid)
94  os.kill(ppid, sig)
95  except OSError as err:
96  if err.errno != 3: # No such process
97  raise
98  log.debug("no such process %d", ppid)
99 
100 
101 # -------------------------------------------------------------------------#
102 
103 
104 class BaseTest(object):
105 
106  _common_tmpdir = None
107 
108  def __init__(self):
109  self.program = ""
110  self.args = []
111  self.reference = ""
112  self.error_reference = ""
113  self.options = ""
114  self.stderr = ""
115  self.timeout = 600
116  self.exit_code = None
117  self.environment = dict(os.environ)
119  self.signal = None
120  self.workdir = os.curdir
121  self.use_temp_dir = False
122  # Variables not for users
123  self.status = None
124  self.name = ""
125  self.causes = []
126  self.result = Result(self)
127  self.returnedCode = 0
128  self.out = ""
129  self.err = ""
130  self.proc = None
131  self.stack_trace = None
132  self.basedir = os.getcwd()
133 
134  def run(self):
135  logging.debug("running test %s", self.name)
136 
137  self.result = Result(
138  {
139  "CAUSE": None,
140  "EXCEPTION": None,
141  "RESOURCE": None,
142  "TARGET": None,
143  "TRACEBACK": None,
144  "START_TIME": None,
145  "END_TIME": None,
146  "TIMEOUT_DETAIL": None,
147  }
148  )
149 
150  if self.options:
151  if re.search(
152  r"from\s+Gaudi.Configuration\s+import\s+\*|"
153  "from\s+Configurables\s+import",
154  self.options,
155  ):
156  suffix, lang = ".py", "python"
157  else:
158  suffix, lang = ".opts", "c++"
159  self.result["Options"] = '<code lang="{}"><pre>{}</pre></code>'.format(
160  lang, escape_for_html(self.options)
161  )
162  optionFile = tempfile.NamedTemporaryFile(suffix=suffix)
163  optionFile.file.write(self.options.encode("utf-8"))
164  optionFile.seek(0)
165  self.args.append(RationalizePath(optionFile.name))
166 
167  platform_id = (
168  self.environment.get("BINARY_TAG")
169  or self.environment.get("CMTCONFIG")
170  or platform.platform()
171  )
172  # If at least one regex matches we skip the test.
173  skip_test = bool(
174  [
175  None
176  for prex in self.unsupported_platforms
177  if re.search(prex, platform_id)
178  ]
179  )
180 
181  if not skip_test:
182  # handle working/temporary directory options
183  workdir = self.workdir
184  if self.use_temp_dir:
185  if self._common_tmpdir:
186  workdir = self._common_tmpdir
187  else:
188  workdir = tempfile.mkdtemp()
189 
190  # prepare the command to execute
191  prog = ""
192  if self.program != "":
193  prog = self.program
194  elif "GAUDIEXE" in self.environment:
195  prog = self.environment["GAUDIEXE"]
196  else:
197  prog = "Gaudi.exe"
198 
199  prog_ext = os.path.splitext(prog)[1]
200  if prog_ext not in [".exe", ".py", ".bat"]:
201  prog += ".exe"
202  prog_ext = ".exe"
203 
204  prog = which(prog) or prog
205 
206  args = list(map(RationalizePath, self.args))
207 
208  if prog_ext == ".py":
209  params = ["python", RationalizePath(prog)] + args
210  else:
211  params = [RationalizePath(prog)] + args
212 
213  # we need to switch directory because the validator expects to run
214  # in the same dir as the program
215  os.chdir(workdir)
216 
217  # launching test in a different thread to handle timeout exception
218  def target():
219  logging.debug("executing %r in %s", params, workdir)
220  self.proc = Popen(
221  params, stdout=PIPE, stderr=PIPE, env=self.environment
222  )
223  logging.debug("(pid: %d)", self.proc.pid)
224  out, err = self.proc.communicate()
225  self.out = out.decode("utf-8", errors="backslashreplace")
226  self.err = err.decode("utf-8", errors="backslashreplace")
227 
228  thread = threading.Thread(target=target)
229  thread.start()
230  # catching timeout
231  thread.join(self.timeout)
232 
233  if thread.is_alive():
234  logging.debug("time out in test %s (pid %d)", self.name, self.proc.pid)
235  # get the stack trace of the stuck process
236  cmd = [
237  "gdb",
238  "--pid",
239  str(self.proc.pid),
240  "--batch",
241  "--eval-command=thread apply all backtrace",
242  ]
243  gdb = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
244  self.stack_trace = gdb.communicate()[0].decode(
245  "utf-8", errors="backslashreplace"
246  )
247 
248  kill_tree(self.proc.pid, signal.SIGTERM)
249  thread.join(60)
250  if thread.is_alive():
251  kill_tree(self.proc.pid, signal.SIGKILL)
252  self.causes.append("timeout")
253  else:
254  self.returnedCode = self.proc.returncode
255  if self.returnedCode != SKIP_RETURN_CODE:
256  logging.debug(
257  f"completed test {self.name} with returncode = {self.returnedCode}"
258  )
259  logging.debug("validating test...")
260  self.result, self.causes = self.ValidateOutput(
261  stdout=self.out, stderr=self.err, result=self.result
262  )
263  else:
264  logging.debug(f"skipped test {self.name}")
265  self.status = "skipped"
266 
267  # remove the temporary directory if we created it
268  if self.use_temp_dir and not self._common_tmpdir:
269  shutil.rmtree(workdir, True)
270 
271  os.chdir(self.basedir)
272 
273  if self.status != "skipped":
274  # handle application exit code
275  if self.signal is not None:
276  if int(self.returnedCode) != -int(self.signal):
277  self.causes.append("exit code")
278 
279  elif self.exit_code is not None:
280  if int(self.returnedCode) != int(self.exit_code):
281  self.causes.append("exit code")
282 
283  elif self.returnedCode != 0:
284  self.causes.append("exit code")
285 
286  if self.causes:
287  self.status = "failed"
288  else:
289  self.status = "passed"
290 
291  else:
292  self.status = "skipped"
293 
294  logging.debug("%s: %s", self.name, self.status)
295  field_mapping = {
296  "Exit Code": "returnedCode",
297  "stderr": "err",
298  "Arguments": "args",
299  "Runtime Environment": "environment",
300  "Status": "status",
301  "stdout": "out",
302  "Program Name": "program",
303  "Name": "name",
304  "Validator": "validator",
305  "Output Reference File": "reference",
306  "Error Reference File": "error_reference",
307  "Causes": "causes",
308  # 'Validator Result': 'result.annotations',
309  "Unsupported Platforms": "unsupported_platforms",
310  "Stack Trace": "stack_trace",
311  }
312  resultDict = [
313  (key, getattr(self, attr))
314  for key, attr in field_mapping.items()
315  if getattr(self, attr)
316  ]
317  resultDict.append(
318  (
319  "Working Directory",
320  RationalizePath(os.path.join(os.getcwd(), self.workdir)),
321  )
322  )
323  # print(dict(resultDict).keys())
324  resultDict.extend(self.result.annotations.items())
325  # print(self.result.annotations.keys())
326  resultDict = dict(resultDict)
327 
328  # Special cases
329  if "Validator" in resultDict:
330  resultDict["Validator"] = '<code lang="{}"><pre>{}</pre></code>'.format(
331  "python", escape_for_html(resultDict["Validator"])
332  )
333  return resultDict
334 
335  # -------------------------------------------------#
336  # ----------------Validating tool------------------#
337  # -------------------------------------------------#
338 
339  def ValidateOutput(self, stdout, stderr, result):
340  if not self.stderr:
341  self.validateWithReference(stdout, stderr, result, self.causes)
342  elif stderr.strip() != self.stderr.strip():
343  self.causes.append("standard error")
344  return result, self.causes
345 
347  self,
348  reference=None,
349  stdout=None,
350  result=None,
351  causes=None,
352  signature_offset=0,
353  signature=None,
354  id=None,
355  ):
356  """
357  Given a block of text, tries to find it in the output. The block had to be identified by a signature line. By default, the first line is used as signature, or the line pointed to by signature_offset. If signature_offset points outside the block, a signature line can be passed as signature argument. Note: if 'signature' is None (the default), a negative signature_offset is interpreted as index in a list (e.g. -1 means the last line), otherwise the it is interpreted as the number of lines before the first one of the block the signature must appear. The parameter 'id' allow to distinguish between different calls to this function in the same validation code.
358  """
359 
360  if reference is None:
361  reference = self.reference
362  if stdout is None:
363  stdout = self.out
364  if result is None:
365  result = self.result
366  if causes is None:
367  causes = self.causes
368 
369  reflines = list(filter(None, map(lambda s: s.rstrip(), reference.splitlines())))
370  if not reflines:
371  raise RuntimeError("Empty (or null) reference")
372  # the same on standard output
373  outlines = list(filter(None, map(lambda s: s.rstrip(), stdout.splitlines())))
374 
375  res_field = "GaudiTest.RefBlock"
376  if id:
377  res_field += "_%s" % id
378 
379  if signature is None:
380  if signature_offset < 0:
381  signature_offset = len(reference) + signature_offset
382  signature = reflines[signature_offset]
383  # find the reference block in the output file
384  try:
385  pos = outlines.index(signature)
386  outlines = outlines[
387  pos - signature_offset : pos + len(reflines) - signature_offset
388  ]
389  if reflines != outlines:
390  msg = "standard output"
391  # I do not want 2 messages in causes if the function is called
392  # twice
393  if not msg in causes:
394  causes.append(msg)
395  result[res_field + ".observed"] = result.Quote("\n".join(outlines))
396  except ValueError:
397  causes.append("missing signature")
398  result[res_field + ".signature"] = result.Quote(signature)
399  if len(reflines) > 1 or signature != reflines[0]:
400  result[res_field + ".expected"] = result.Quote("\n".join(reflines))
401  return causes
402 
404  self, expected={"ERROR": 0, "FATAL": 0}, stdout=None, result=None, causes=None
405  ):
406  """
407  Count the number of messages with required severity (by default ERROR and FATAL)
408  and check if their numbers match the expected ones (0 by default).
409  The dictionary "expected" can be used to tune the number of errors and fatals
410  allowed, or to limit the number of expected warnings etc.
411  """
412 
413  if stdout is None:
414  stdout = self.out
415  if result is None:
416  result = self.result
417  if causes is None:
418  causes = self.causes
419 
420  # prepare the dictionary to record the extracted lines
421  errors = {}
422  for sev in expected:
423  errors[sev] = []
424 
425  outlines = stdout.splitlines()
426  from math import log10
427 
428  fmt = "%%%dd - %%s" % (int(log10(len(outlines) + 1)))
429 
430  linecount = 0
431  for l in outlines:
432  linecount += 1
433  words = l.split()
434  if len(words) >= 2 and words[1] in errors:
435  errors[words[1]].append(fmt % (linecount, l.rstrip()))
436 
437  for e in errors:
438  if len(errors[e]) != expected[e]:
439  causes.append("%s(%d)" % (e, len(errors[e])))
440  result["GaudiTest.lines.%s" % e] = result.Quote("\n".join(errors[e]))
441  result["GaudiTest.lines.%s.expected#" % e] = result.Quote(
442  str(expected[e])
443  )
444 
445  return causes
446 
448  self,
449  stdout=None,
450  result=None,
451  causes=None,
452  trees_dict=None,
453  ignore=r"Basket|.*size|Compression",
454  ):
455  """
456  Compare the TTree summaries in stdout with the ones in trees_dict or in
457  the reference file. By default ignore the size, compression and basket
458  fields.
459  The presence of TTree summaries when none is expected is not a failure.
460  """
461  if stdout is None:
462  stdout = self.out
463  if result is None:
464  result = self.result
465  if causes is None:
466  causes = self.causes
467  if trees_dict is None:
468  lreference = self._expandReferenceFileName(self.reference)
469  # call the validator if the file exists
470  if lreference and os.path.isfile(lreference):
471  trees_dict = findTTreeSummaries(open(lreference).read())
472  else:
473  trees_dict = {}
474 
475  from pprint import PrettyPrinter
476 
477  pp = PrettyPrinter()
478  if trees_dict:
479  result["GaudiTest.TTrees.expected"] = result.Quote(pp.pformat(trees_dict))
480  if ignore:
481  result["GaudiTest.TTrees.ignore"] = result.Quote(ignore)
482 
483  trees = findTTreeSummaries(stdout)
484  failed = cmpTreesDicts(trees_dict, trees, ignore)
485  if failed:
486  causes.append("trees summaries")
487  msg = "%s: %s != %s" % getCmpFailingValues(trees_dict, trees, failed)
488  result["GaudiTest.TTrees.failure_on"] = result.Quote(msg)
489  result["GaudiTest.TTrees.found"] = result.Quote(pp.pformat(trees))
490 
491  return causes
492 
494  self, stdout=None, result=None, causes=None, dict=None, ignore=None
495  ):
496  """
497  Compare the TTree summaries in stdout with the ones in trees_dict or in
498  the reference file. By default ignore the size, compression and basket
499  fields.
500  The presence of TTree summaries when none is expected is not a failure.
501  """
502  if stdout is None:
503  stdout = self.out
504  if result is None:
505  result = self.result
506  if causes is None:
507  causes = self.causes
508 
509  if dict is None:
510  lreference = self._expandReferenceFileName(self.reference)
511  # call the validator if the file exists
512  if lreference and os.path.isfile(lreference):
513  dict = findHistosSummaries(open(lreference).read())
514  else:
515  dict = {}
516 
517  from pprint import PrettyPrinter
518 
519  pp = PrettyPrinter()
520  if dict:
521  result["GaudiTest.Histos.expected"] = result.Quote(pp.pformat(dict))
522  if ignore:
523  result["GaudiTest.Histos.ignore"] = result.Quote(ignore)
524 
525  histos = findHistosSummaries(stdout)
526  failed = cmpTreesDicts(dict, histos, ignore)
527  if failed:
528  causes.append("histos summaries")
529  msg = "%s: %s != %s" % getCmpFailingValues(dict, histos, failed)
530  result["GaudiTest.Histos.failure_on"] = result.Quote(msg)
531  result["GaudiTest.Histos.found"] = result.Quote(pp.pformat(histos))
532 
533  return causes
534 
536  self, stdout=None, stderr=None, result=None, causes=None, preproc=None
537  ):
538  """
539  Default validation acti*on: compare standard output and error to the
540  reference files.
541  """
542 
543  if stdout is None:
544  stdout = self.out
545  if stderr is None:
546  stderr = self.err
547  if result is None:
548  result = self.result
549  if causes is None:
550  causes = self.causes
551 
552  # set the default output preprocessor
553  if preproc is None:
554  preproc = normalizeExamples
555  # check standard output
556  lreference = self._expandReferenceFileName(self.reference)
557  # call the validator if the file exists
558  if lreference and os.path.isfile(lreference):
559  causes += ReferenceFileValidator(
560  lreference, "standard output", "Output Diff", preproc=preproc
561  )(stdout, result)
562  elif lreference:
563  causes += ["missing reference file"]
564  # Compare TTree summaries
565  causes = self.CheckTTreesSummaries(stdout, result, causes)
566  causes = self.CheckHistosSummaries(stdout, result, causes)
567  if causes and lreference: # Write a new reference file for stdout
568  try:
569  cnt = 0
570  newrefname = ".".join([lreference, "new"])
571  while os.path.exists(newrefname):
572  cnt += 1
573  newrefname = ".".join([lreference, "~%d~" % cnt, "new"])
574  newref = open(newrefname, "w")
575  # sanitize newlines
576  for l in stdout.splitlines():
577  newref.write(l.rstrip() + "\n")
578  del newref # flush and close
579  result["New Output Reference File"] = os.path.relpath(
580  newrefname, self.basedir
581  )
582  except IOError:
583  # Ignore IO errors when trying to update reference files
584  # because we may be in a read-only filesystem
585  pass
586 
587  # check standard error
588  lreference = self._expandReferenceFileName(self.error_reference)
589  # call the validator if we have a file to use
590  if lreference:
591  if os.path.isfile(lreference):
592  newcauses = ReferenceFileValidator(
593  lreference, "standard error", "Error Diff", preproc=preproc
594  )(stderr, result)
595  else:
596  newcauses = ["missing error reference file"]
597  causes += newcauses
598  if newcauses and lreference: # Write a new reference file for stdedd
599  cnt = 0
600  newrefname = ".".join([lreference, "new"])
601  while os.path.exists(newrefname):
602  cnt += 1
603  newrefname = ".".join([lreference, "~%d~" % cnt, "new"])
604  newref = open(newrefname, "w")
605  # sanitize newlines
606  for l in stderr.splitlines():
607  newref.write(l.rstrip() + "\n")
608  del newref # flush and close
609  result["New Error Reference File"] = os.path.relpath(
610  newrefname, self.basedir
611  )
612  else:
613  causes += BasicOutputValidator(
614  lreference, "standard error", "ExecTest.expected_stderr"
615  )(stderr, result)
616  return causes
617 
619  self,
620  output_file,
621  reference_file,
622  result=None,
623  causes=None,
624  detailed=True,
625  ):
626  """
627  JSON validation action: compare json file to reference file
628  """
629 
630  if result is None:
631  result = self.result
632  if causes is None:
633  causes = self.causes
634 
635  if not os.path.isfile(output_file):
636  causes.append(f"output file {output_file} does not exist")
637  return causes
638 
639  try:
640  with open(output_file) as f:
641  output = json.load(f)
642  except json.JSONDecodeError as err:
643  causes.append("json parser error")
644  result["output_parse_error"] = f"json parser error in {output_file}: {err}"
645  return causes
646 
647  lreference = self._expandReferenceFileName(reference_file)
648  if not lreference:
649  causes.append("reference file not set")
650  elif not os.path.isfile(lreference):
651  causes.append("reference file does not exist")
652  else:
653  causes += JSONOutputValidator()(lreference, output, result, detailed)
654  if causes and lreference: # Write a new reference file for output
655  try:
656  cnt = 0
657  newrefname = ".".join([lreference, "new"])
658  while os.path.exists(newrefname):
659  cnt += 1
660  newrefname = ".".join([lreference, "~%d~" % cnt, "new"])
661  with open(newrefname, "w") as newref:
662  json.dump(output, newref, indent=4)
663  result["New JSON Output Reference File"] = os.path.relpath(
664  newrefname, self.basedir
665  )
666  except IOError:
667  # Ignore IO errors when trying to update reference files
668  # because we may be in a read-only filesystem
669  pass
670  return causes
671 
672  def _expandReferenceFileName(self, reffile):
673  # if no file is passed, do nothing
674  if not reffile:
675  return ""
676 
677  # function to split an extension in constituents parts
678  def platformSplit(p):
679  import re
680 
681  delim = re.compile("-" in p and r"[-+]" or r"_")
682  return set(delim.split(p))
683 
684  reference = os.path.normpath(
685  os.path.join(self.basedir, os.path.expandvars(reffile))
686  )
687 
688  # old-style platform-specific reference name
689  spec_ref = reference[:-3] + GetPlatform(self)[0:3] + reference[-3:]
690  if os.path.isfile(spec_ref):
691  reference = spec_ref
692  else: # look for new-style platform specific reference files:
693  # get all the files whose name start with the reference filename
694  dirname, basename = os.path.split(reference)
695  if not dirname:
696  dirname = "."
697  head = basename + "."
698  head_len = len(head)
699  platform = platformSplit(GetPlatform(self))
700  if "do0" in platform:
701  platform.add("dbg")
702  candidates = []
703  for f in os.listdir(dirname):
704  if f.startswith(head):
705  req_plat = platformSplit(f[head_len:])
706  if platform.issuperset(req_plat):
707  candidates.append((len(req_plat), f))
708  if candidates: # take the one with highest matching
709  # FIXME: it is not possible to say if x86_64-slc5-gcc43-dbg
710  # has to use ref.x86_64-gcc43 or ref.slc5-dbg
711  candidates.sort()
712  reference = os.path.join(dirname, candidates[-1][1])
713  return reference
714 
715 
716 # ======= GAUDI TOOLS =======
717 
718 import calendar
719 import difflib
720 import shutil
721 import string
722 
723 try:
724  from GaudiKernel import ROOT6WorkAroundEnabled
725 except ImportError:
726 
728  # dummy implementation
729  return False
730 
731 
732 # --------------------------------- TOOLS ---------------------------------#
733 
734 
736  """
737  Function used to normalize the used path
738  """
739  newPath = os.path.normpath(os.path.expandvars(p))
740  if os.path.exists(newPath):
741  p = os.path.realpath(newPath)
742  return p
743 
744 
745 def which(executable):
746  """
747  Locates an executable in the executables path ($PATH) and returns the full
748  path to it. An application is looked for with or without the '.exe' suffix.
749  If the executable cannot be found, None is returned
750  """
751  if os.path.isabs(executable):
752  if not os.path.isfile(executable):
753  if executable.endswith(".exe"):
754  if os.path.isfile(executable[:-4]):
755  return executable[:-4]
756  else:
757  executable = os.path.split(executable)[1]
758  else:
759  return executable
760  for d in os.environ.get("PATH").split(os.pathsep):
761  fullpath = os.path.join(d, executable)
762  if os.path.isfile(fullpath):
763  return fullpath
764  elif executable.endswith(".exe") and os.path.isfile(fullpath[:-4]):
765  return fullpath[:-4]
766  return None
767 
768 
769 # -------------------------------------------------------------------------#
770 # ----------------------------- Result Classe -----------------------------#
771 # -------------------------------------------------------------------------#
772 import types
773 
774 
775 class Result:
776 
777  PASS = "PASS"
778  FAIL = "FAIL"
779  ERROR = "ERROR"
780  UNTESTED = "UNTESTED"
781 
782  EXCEPTION = ""
783  RESOURCE = ""
784  TARGET = ""
785  TRACEBACK = ""
786  START_TIME = ""
787  END_TIME = ""
788  TIMEOUT_DETAIL = ""
789 
790  def __init__(self, kind=None, id=None, outcome=PASS, annotations={}):
791  self.annotations = annotations.copy()
792 
793  def __getitem__(self, key):
794  assert isinstance(key, six.string_types)
795  return self.annotations[key]
796 
797  def __setitem__(self, key, value):
798  assert isinstance(key, six.string_types)
799  assert isinstance(value, six.string_types), "{!r} is not a string".format(value)
800  self.annotations[key] = value
801 
802  def Quote(self, text):
803  """
804  Convert text to html by escaping special chars and adding <pre> tags.
805  """
806  return "<pre>{}</pre>".format(escape_for_html(text))
807 
808 
809 # -------------------------------------------------------------------------#
810 # --------------------------- Validator Classes ---------------------------#
811 # -------------------------------------------------------------------------#
812 
813 # Basic implementation of an option validator for Gaudi test. This
814 # implementation is based on the standard (LCG) validation functions used
815 # in QMTest.
816 
817 
819  def __init__(self, ref, cause, result_key):
820  self.ref = ref
821  self.cause = cause
822  self.result_key = result_key
823 
824  def __call__(self, out, result):
825  """Validate the output of the program.
826  'stdout' -- A string containing the data written to the standard output
827  stream.
828  'stderr' -- A string containing the data written to the standard error
829  stream.
830  'result' -- A 'Result' object. It may be used to annotate
831  the outcome according to the content of stderr.
832  returns -- A list of strings giving causes of failure."""
833 
834  causes = []
835  # Check the output
836  if not self.__CompareText(out, self.ref):
837  causes.append(self.cause)
838  result[self.result_key] = result.Quote(self.ref)
839 
840  return causes
841 
842  def __CompareText(self, s1, s2):
843  """Compare 's1' and 's2', ignoring line endings.
844  's1' -- A string.
845  's2' -- A string.
846  returns -- True if 's1' and 's2' are the same, ignoring
847  differences in line endings."""
848  if ROOT6WorkAroundEnabled("ReadRootmapCheck"):
849  # FIXME: (MCl) Hide warnings from new rootmap sanity check until we
850  # can fix them
851  to_ignore = re.compile(
852  r"Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*"
853  )
854 
855  def keep_line(l):
856  return not to_ignore.match(l)
857 
858  return list(filter(keep_line, s1.splitlines())) == list(
859  filter(keep_line, s2.splitlines())
860  )
861  else:
862  return s1.splitlines() == s2.splitlines()
863 
864 
865 # ------------------------ Preprocessor elements ------------------------#
867  """Base class for a callable that takes a file and returns a modified
868  version of it."""
869 
870  def __processLine__(self, line):
871  return line
872 
873  def __processFile__(self, lines):
874  output = []
875  for l in lines:
876  l = self.__processLine__(l)
877  if l:
878  output.append(l)
879  return output
880 
881  def __call__(self, input):
882  if not isinstance(input, six.string_types):
883  lines = input
884  mergeback = False
885  else:
886  lines = input.splitlines()
887  mergeback = True
888  output = self.__processFile__(lines)
889  if mergeback:
890  output = "\n".join(output)
891  return output
892 
893  def __add__(self, rhs):
894  return FilePreprocessorSequence([self, rhs])
895 
896 
898  def __init__(self, members=[]):
899  self.members = members
900 
901  def __add__(self, rhs):
902  return FilePreprocessorSequence(self.members + [rhs])
903 
904  def __call__(self, input):
905  output = input
906  for pp in self.members:
907  output = pp(output)
908  return output
909 
910 
912  def __init__(self, strings=[], regexps=[]):
913  import re
914 
915  self.strings = strings
916  self.regexps = list(map(re.compile, regexps))
917 
918  def __processLine__(self, line):
919  for s in self.strings:
920  if line.find(s) >= 0:
921  return None
922  for r in self.regexps:
923  if r.search(line):
924  return None
925  return line
926 
927 
929  def __init__(self, start, end):
930  self.start = start
931  self.end = end
932  self._skipping = False
933 
934  def __processLine__(self, line):
935  if self.start in line:
936  self._skipping = True
937  return None
938  elif self.end in line:
939  self._skipping = False
940  elif self._skipping:
941  return None
942  return line
943 
944 
946  def __init__(self, orig, repl="", when=None):
947  if when:
948  when = re.compile(when)
949  self._operations = [(when, re.compile(orig), repl)]
950 
951  def __add__(self, rhs):
952  if isinstance(rhs, RegexpReplacer):
953  res = RegexpReplacer("", "", None)
954  res._operations = self._operations + rhs._operations
955  else:
956  res = FilePreprocessor.__add__(self, rhs)
957  return res
958 
959  def __processLine__(self, line):
960  for w, o, r in self._operations:
961  if w is None or w.search(line):
962  line = o.sub(r, line)
963  return line
964 
965 
966 # Common preprocessors
967 maskPointers = RegexpReplacer("0x[0-9a-fA-F]{4,16}", "0x########")
968 normalizeDate = RegexpReplacer(
969  "[0-2]?[0-9]:[0-5][0-9]:[0-5][0-9] [0-9]{4}[-/][01][0-9][-/][0-3][0-9][ A-Z]*",
970  "00:00:00 1970-01-01",
971 )
972 normalizeEOL = FilePreprocessor()
973 normalizeEOL.__processLine__ = lambda line: str(line).rstrip() + "\n"
974 
975 skipEmptyLines = FilePreprocessor()
976 # FIXME: that's ugly
977 skipEmptyLines.__processLine__ = lambda line: (line.strip() and line) or None
978 
979 # Special preprocessor sorting the list of strings (whitespace separated)
980 # that follow a signature on a single line
981 
982 
984  def __init__(self, signature):
985  self.signature = signature
986  self.siglen = len(signature)
987 
988  def __processLine__(self, line):
989  pos = line.find(self.signature)
990  if pos >= 0:
991  line = line[: (pos + self.siglen)]
992  lst = line[(pos + self.siglen) :].split()
993  lst.sort()
994  line += " ".join(lst)
995  return line
996 
997 
999  """
1000  Sort group of lines matching a regular expression
1001  """
1002 
1003  def __init__(self, exp):
1004  self.exp = exp if hasattr(exp, "match") else re.compile(exp)
1005 
1006  def __processFile__(self, lines):
1007  match = self.exp.match
1008  output = []
1009  group = []
1010  for l in lines:
1011  if match(l):
1012  group.append(l)
1013  else:
1014  if group:
1015  group.sort()
1016  output.extend(group)
1017  group = []
1018  output.append(l)
1019  return output
1020 
1021 
1022 # Preprocessors for GaudiExamples
1023 normalizeExamples = maskPointers + normalizeDate
1024 for w, o, r in [
1025  # ("TIMER.TIMER",r"[0-9]", "0"), # Normalize time output
1026  ("TIMER.TIMER", r"\s+[+-]?[0-9]+[0-9.]*", " 0"), # Normalize time output
1027  ("release all pending", r"^.*/([^/]*:.*)", r"\1"),
1028  ("^#.*file", r"file '.*[/\\]([^/\\]*)$", r"file '\1"),
1029  (
1030  "^JobOptionsSvc.*options successfully read in from",
1031  r"read in from .*[/\\]([^/\\]*)$",
1032  r"file \1",
1033  ), # normalize path to options
1034  # Normalize UUID, except those ending with all 0s (i.e. the class IDs)
1035  (
1036  None,
1037  r"[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}(?!-0{12})-[0-9A-Fa-f]{12}",
1038  "00000000-0000-0000-0000-000000000000",
1039  ),
1040  # Absorb a change in ServiceLocatorHelper
1041  (
1042  "ServiceLocatorHelper::",
1043  "ServiceLocatorHelper::(create|locate)Service",
1044  "ServiceLocatorHelper::service",
1045  ),
1046  # Remove the leading 0 in Windows' exponential format
1047  (None, r"e([-+])0([0-9][0-9])", r"e\1\2"),
1048  # Output line changed in Gaudi v24
1049  (None, r"Service reference count check:", r"Looping over all active services..."),
1050  # Ignore count of declared properties (anyway they are all printed)
1051  (
1052  None,
1053  r"^(.*(DEBUG|SUCCESS) List of ALL properties of .*#properties = )\d+",
1054  r"\1NN",
1055  ),
1056  ("ApplicationMgr", r"(declareMultiSvcType|addMultiSvc): ", ""),
1057  (r"Property \['Name': Value\]", r"( = '[^']+':)'(.*)'", r"\1\2"),
1058  ("TimelineSvc", "to file 'TimelineFile':", "to file "),
1059  ("DataObjectHandleBase", r'DataObjectHandleBase\‍("([^"]*)"\‍)', r"'\1'"),
1060 ]: # [ ("TIMER.TIMER","[0-9]+[0-9.]*", "") ]
1061  normalizeExamples += RegexpReplacer(o, r, w)
1062 
1063 lineSkipper = LineSkipper(
1064  [
1065  "//GP:",
1066  "JobOptionsSvc INFO # ",
1067  "JobOptionsSvc WARNING # ",
1068  "Time User",
1069  "Welcome to",
1070  "This machine has a speed",
1071  "TIME:",
1072  "running on",
1073  "ToolSvc.Sequenc... INFO",
1074  "DataListenerSvc INFO XML written to file:",
1075  "[INFO]",
1076  "[WARNING]",
1077  "DEBUG No writable file catalog found which contains FID:",
1078  "DEBUG Service base class initialized successfully",
1079  # changed between v20 and v21
1080  "DEBUG Incident timing:",
1081  # introduced with patch #3487
1082  # changed the level of the message from INFO to
1083  # DEBUG
1084  "INFO 'CnvServices':[",
1085  # message removed because could be printed in constructor
1086  "DEBUG 'CnvServices':[",
1087  # The signal handler complains about SIGXCPU not
1088  # defined on some platforms
1089  "SIGXCPU",
1090  # Message removed with redesing of JobOptionsSvc
1091  "ServiceLocatorHelper::service: found service JobOptionsSvc",
1092  # Ignore warnings for properties case mismatch
1093  "mismatching case for property name:",
1094  # Message demoted to DEBUG in gaudi/Gaudi!992
1095  "Histograms saving not required.",
1096  # Message added in gaudi/Gaudi!577
1097  "Properties are dumped into",
1098  ],
1099  regexps=[
1100  r"^JobOptionsSvc INFO *$",
1101  r"^# ", # Ignore python comments
1102  # skip the message reporting the version of the root file
1103  r"(Always|SUCCESS)\s*(Root f|[^ ]* F)ile version:",
1104  r"File '.*.xml' does not exist",
1105  r"INFO Refer to dataset .* by its file ID:",
1106  r"INFO Referring to dataset .* by its file ID:",
1107  r"INFO Disconnect from dataset",
1108  r"INFO Disconnected from dataset",
1109  r"INFO Disconnected data IO:",
1110  r"IncidentSvc\s*(DEBUG (Adding|Removing)|VERBOSE Calling)",
1111  # Ignore StatusCodeSvc related messages
1112  r".*StatusCodeSvc.*",
1113  r".*StatusCodeCheck.*",
1114  r"Num\s*\|\s*Function\s*\|\s*Source Library",
1115  r"^[-+]*\s*$",
1116  # Hide the fake error message coming from POOL/ROOT (ROOT 5.21)
1117  r"ERROR Failed to modify file: .* Errno=2 No such file or directory",
1118  # Hide unchecked StatusCodes from dictionaries
1119  r"^ +[0-9]+ \|.*ROOT",
1120  r"^ +[0-9]+ \|.*\|.*Dict",
1121  # Hide EventLoopMgr total timing report
1122  r"EventLoopMgr.*---> Loop Finished",
1123  r"HiveSlimEventLo.*---> Loop Finished",
1124  # Remove ROOT TTree summary table, which changes from one version to the
1125  # other
1126  r"^\*.*\*$",
1127  # Remove Histos Summaries
1128  r"SUCCESS\s*Booked \d+ Histogram\‍(s\‍)",
1129  r"^ \|",
1130  r"^ ID=",
1131  # Ignore added/removed properties
1132  r"Property(.*)'Audit(Algorithm|Tool|Service)s':",
1133  r"Property(.*)'Audit(Begin|End)Run':",
1134  # these were missing in tools
1135  r"Property(.*)'AuditRe(start|initialize)':",
1136  r"Property(.*)'Blocking':",
1137  # removed with gaudi/Gaudi!273
1138  r"Property(.*)'ErrorCount(er)?':",
1139  # added with gaudi/Gaudi!306
1140  r"Property(.*)'Sequential':",
1141  # added with gaudi/Gaudi!314
1142  r"Property(.*)'FilterCircularDependencies':",
1143  # removed with gaudi/Gaudi!316
1144  r"Property(.*)'IsClonable':",
1145  # ignore uninteresting/obsolete messages
1146  r"Property update for OutputLevel : new value =",
1147  r"EventLoopMgr\s*DEBUG Creating OutputStream",
1148  ],
1149 )
1150 
1151 if ROOT6WorkAroundEnabled("ReadRootmapCheck"):
1152  # FIXME: (MCl) Hide warnings from new rootmap sanity check until we can
1153  # fix them
1154  lineSkipper += LineSkipper(
1155  regexps=[
1156  r"Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*",
1157  ]
1158  )
1159 
1160 normalizeExamples = (
1161  lineSkipper
1162  + normalizeExamples
1163  + skipEmptyLines
1164  + normalizeEOL
1165  + LineSorter("Services to release : ")
1166  + SortGroupOfLines(r"^\S+\s+(DEBUG|SUCCESS) Property \[\'Name\':")
1167 )
1168 
1169 # --------------------- Validation functions/classes ---------------------#
1170 
1171 
1173  def __init__(self, reffile, cause, result_key, preproc=normalizeExamples):
1174  self.reffile = os.path.expandvars(reffile)
1175  self.cause = cause
1176  self.result_key = result_key
1177  self.preproc = preproc
1178 
1179  def __call__(self, stdout, result):
1180  causes = []
1181  if os.path.isfile(self.reffile):
1182  orig = open(self.reffile).readlines()
1183  if self.preproc:
1184  orig = self.preproc(orig)
1185  result[self.result_key + ".preproc.orig"] = result.Quote(
1186  "\n".join(map(str.strip, orig))
1187  )
1188  else:
1189  orig = []
1190  new = stdout.splitlines()
1191  if self.preproc:
1192  new = self.preproc(new)
1193 
1194  diffs = difflib.ndiff(orig, new, charjunk=difflib.IS_CHARACTER_JUNK)
1195  filterdiffs = list(
1196  map(lambda x: x.strip(), filter(lambda x: x[0] != " ", diffs))
1197  )
1198  if filterdiffs:
1199  result[self.result_key] = result.Quote("\n".join(filterdiffs))
1200  result[self.result_key] += result.Quote(
1201  """
1202  Legend:
1203  -) reference file
1204  +) standard output of the test"""
1205  )
1206  result[self.result_key + ".preproc.new"] = result.Quote(
1207  "\n".join(map(str.strip, new))
1208  )
1209  causes.append(self.cause)
1210  return causes
1211 
1212 
1214  """
1215  Scan stdout to find ROOT TTree summaries and digest them.
1216  """
1217  stars = re.compile(r"^\*+$")
1218  outlines = stdout.splitlines()
1219  nlines = len(outlines)
1220  trees = {}
1221 
1222  i = 0
1223  while i < nlines: # loop over the output
1224  # look for
1225  while i < nlines and not stars.match(outlines[i]):
1226  i += 1
1227  if i < nlines:
1228  tree, i = _parseTTreeSummary(outlines, i)
1229  if tree:
1230  trees[tree["Name"]] = tree
1231 
1232  return trees
1233 
1234 
1235 def cmpTreesDicts(reference, to_check, ignore=None):
1236  """
1237  Check that all the keys in reference are in to_check too, with the same value.
1238  If the value is a dict, the function is called recursively. to_check can
1239  contain more keys than reference, that will not be tested.
1240  The function returns at the first difference found.
1241  """
1242  fail_keys = []
1243  # filter the keys in the reference dictionary
1244  if ignore:
1245  ignore_re = re.compile(ignore)
1246  keys = [key for key in reference if not ignore_re.match(key)]
1247  else:
1248  keys = reference.keys()
1249  # loop over the keys (not ignored) in the reference dictionary
1250  for k in keys:
1251  if k in to_check: # the key must be in the dictionary to_check
1252  if (type(reference[k]) is dict) and (type(to_check[k]) is dict):
1253  # if both reference and to_check values are dictionaries,
1254  # recurse
1255  failed = fail_keys = cmpTreesDicts(reference[k], to_check[k], ignore)
1256  else:
1257  # compare the two values
1258  failed = to_check[k] != reference[k]
1259  else: # handle missing keys in the dictionary to check (i.e. failure)
1260  to_check[k] = None
1261  failed = True
1262  if failed:
1263  fail_keys.insert(0, k)
1264  break # exit from the loop at the first failure
1265  return fail_keys # return the list of keys bringing to the different values
1266 
1267 
1268 def getCmpFailingValues(reference, to_check, fail_path):
1269  c = to_check
1270  r = reference
1271  for k in fail_path:
1272  c = c.get(k, None)
1273  r = r.get(k, None)
1274  if c is None or r is None:
1275  break # one of the dictionaries is not deep enough
1276  return (fail_path, r, c)
1277 
1278 
1279 # signature of the print-out of the histograms
1280 h_count_re = re.compile(r"^(.*)SUCCESS\s+Booked (\d+) Histogram\‍(s\‍) :\s+([\s\w=-]*)")
1281 
1282 
1283 def _parseTTreeSummary(lines, pos):
1284  """
1285  Parse the TTree summary table in lines, starting from pos.
1286  Returns a tuple with the dictionary with the digested informations and the
1287  position of the first line after the summary.
1288  """
1289  result = {}
1290  i = pos + 1 # first line is a sequence of '*'
1291  count = len(lines)
1292 
1293  def splitcols(l):
1294  return [f.strip() for f in l.strip("*\n").split(":", 2)]
1295 
1296  def parseblock(ll):
1297  r = {}
1298  cols = splitcols(ll[0])
1299  r["Name"], r["Title"] = cols[1:]
1300 
1301  cols = splitcols(ll[1])
1302  r["Entries"] = int(cols[1])
1303 
1304  sizes = cols[2].split()
1305  r["Total size"] = int(sizes[2])
1306  if sizes[-1] == "memory":
1307  r["File size"] = 0
1308  else:
1309  r["File size"] = int(sizes[-1])
1310 
1311  cols = splitcols(ll[2])
1312  sizes = cols[2].split()
1313  if cols[0] == "Baskets":
1314  r["Baskets"] = int(cols[1])
1315  r["Basket size"] = int(sizes[2])
1316  r["Compression"] = float(sizes[-1])
1317  return r
1318 
1319  if i < (count - 3) and lines[i].startswith("*Tree"):
1320  result = parseblock(lines[i : i + 3])
1321  result["Branches"] = {}
1322  i += 4
1323  while i < (count - 3) and lines[i].startswith("*Br"):
1324  if i < (count - 2) and lines[i].startswith("*Branch "):
1325  # skip branch header
1326  i += 3
1327  continue
1328  branch = parseblock(lines[i : i + 3])
1329  result["Branches"][branch["Name"]] = branch
1330  i += 4
1331 
1332  return (result, i)
1333 
1334 
1335 def parseHistosSummary(lines, pos):
1336  """
1337  Extract the histograms infos from the lines starting at pos.
1338  Returns the position of the first line after the summary block.
1339  """
1340  global h_count_re
1341  h_table_head = re.compile(
1342  r'SUCCESS\s+(1D|2D|3D|1D profile|2D profile) histograms in directory\s+"(\w*)"'
1343  )
1344  h_short_summ = re.compile(r"ID=([^\"]+)\s+\"([^\"]+)\"\s+(.*)")
1345 
1346  nlines = len(lines)
1347 
1348  # decode header
1349  m = h_count_re.search(lines[pos])
1350  name = m.group(1).strip()
1351  total = int(m.group(2))
1352  header = {}
1353  for k, v in [x.split("=") for x in m.group(3).split()]:
1354  header[k] = int(v)
1355  pos += 1
1356  header["Total"] = total
1357 
1358  summ = {}
1359  while pos < nlines:
1360  m = h_table_head.search(lines[pos])
1361  if m:
1362  t, d = m.groups(1) # type and directory
1363  t = t.replace(" profile", "Prof")
1364  pos += 1
1365  if pos < nlines:
1366  l = lines[pos]
1367  else:
1368  l = ""
1369  cont = {}
1370  if l.startswith(" | ID"):
1371  # table format
1372  titles = [x.strip() for x in l.split("|")][1:]
1373  pos += 1
1374  while pos < nlines and lines[pos].startswith(" |"):
1375  l = lines[pos]
1376  values = [x.strip() for x in l.split("|")][1:]
1377  hcont = {}
1378  for i in range(len(titles)):
1379  hcont[titles[i]] = values[i]
1380  cont[hcont["ID"]] = hcont
1381  pos += 1
1382  elif l.startswith(" ID="):
1383  while pos < nlines and lines[pos].startswith(" ID="):
1384  values = [
1385  x.strip() for x in h_short_summ.search(lines[pos]).groups()
1386  ]
1387  cont[values[0]] = values
1388  pos += 1
1389  else: # not interpreted
1390  raise RuntimeError("Cannot understand line %d: '%s'" % (pos, l))
1391  if not d in summ:
1392  summ[d] = {}
1393  summ[d][t] = cont
1394  summ[d]["header"] = header
1395  else:
1396  break
1397  if not summ:
1398  # If the full table is not present, we use only the header
1399  summ[name] = {"header": header}
1400  return summ, pos
1401 
1402 
1404  """
1405  Scan stdout to find ROOT TTree summaries and digest them.
1406  """
1407  outlines = stdout.splitlines()
1408  nlines = len(outlines) - 1
1409  summaries = {}
1410  global h_count_re
1411 
1412  pos = 0
1413  while pos < nlines:
1414  summ = {}
1415  # find first line of block:
1416  match = h_count_re.search(outlines[pos])
1417  while pos < nlines and not match:
1418  pos += 1
1419  match = h_count_re.search(outlines[pos])
1420  if match:
1421  summ, pos = parseHistosSummary(outlines, pos)
1422  summaries.update(summ)
1423  return summaries
1424 
1425 
1426 def GetPlatform(self):
1427  """
1428  Return the platform Id defined in CMTCONFIG or SCRAM_ARCH.
1429  """
1430  arch = "None"
1431  # check architecture name
1432  if "BINARY_TAG" in os.environ:
1433  arch = os.environ["BINARY_TAG"]
1434  elif "CMTCONFIG" in os.environ:
1435  arch = os.environ["CMTCONFIG"]
1436  elif "SCRAM_ARCH" in os.environ:
1437  arch = os.environ["SCRAM_ARCH"]
1438  elif os.environ.get("ENV_CMAKE_BUILD_TYPE", "") in (
1439  "Debug",
1440  "FastDebug",
1441  "Developer",
1442  ):
1443  arch = "dummy-dbg"
1444  elif os.environ.get("ENV_CMAKE_BUILD_TYPE", "") in (
1445  "Release",
1446  "MinSizeRel",
1447  "RelWithDebInfo",
1448  "",
1449  ): # RelWithDebInfo == -O2 -g -DNDEBUG
1450  arch = "dummy-opt"
1451  return arch
1452 
1453 
1454 def isWinPlatform(self):
1455  """
1456  Return True if the current platform is Windows.
1457 
1458  This function was needed because of the change in the CMTCONFIG format,
1459  from win32_vc71_dbg to i686-winxp-vc9-dbg.
1460  """
1461  platform = GetPlatform(self)
1462  return "winxp" in platform or platform.startswith("win")
1463 
1464 
1466  def __call__(self, ref, out, result, detailed=True):
1467  """Validate JSON output.
1468  returns -- A list of strings giving causes of failure."""
1469 
1470  causes = []
1471  try:
1472  with open(ref) as f:
1473  expected = json.load(f)
1474  except json.JSONDecodeError as err:
1475  causes.append("json parser error")
1476  result["reference_parse_error"] = f"json parser error in {ref}: {err}"
1477  return causes
1478 
1479  if not detailed:
1480  if expected != out:
1481  causes.append("json content")
1482  result["json_diff"] = "detailed diff was turned off"
1483  return causes
1484 
1485  # piggyback on TestCase dict diff report
1486  t = TestCase()
1487  try:
1488  t.assertEqual(expected, out)
1489  except AssertionError as err:
1490  causes.append("json content")
1491  result["json_diff"] = str(err).splitlines()[0]
1492 
1493  return causes
GaudiTesting.BaseTest.ReferenceFileValidator.reffile
reffile
Definition: BaseTest.py:1174
GaudiTesting.BaseTest.BaseTest.causes
causes
Definition: BaseTest.py:125
GaudiTesting.BaseTest.SortGroupOfLines.__init__
def __init__(self, exp)
Definition: BaseTest.py:1003
GaudiTesting.BaseTest.BaseTest.options
options
Definition: BaseTest.py:113
GaudiTesting.BaseTest.FilePreprocessor
Definition: BaseTest.py:866
MSG::hex
MsgStream & hex(MsgStream &log)
Definition: MsgStream.h:282
GaudiTesting.BaseTest.Result.__getitem__
def __getitem__(self, key)
Definition: BaseTest.py:793
GaudiTesting.BaseTest.BasicOutputValidator.ref
ref
Definition: BaseTest.py:820
GaudiTesting.BaseTest.dumpProcs
def dumpProcs(name)
Definition: BaseTest.py:71
GaudiTesting.BaseTest.LineSorter.siglen
siglen
Definition: BaseTest.py:986
GaudiTesting.BaseTest.FilePreprocessor.__call__
def __call__(self, input)
Definition: BaseTest.py:881
GaudiTesting.BaseTest.LineSorter
Definition: BaseTest.py:983
GaudiTesting.BaseTest._parseTTreeSummary
def _parseTTreeSummary(lines, pos)
Definition: BaseTest.py:1283
GaudiTesting.BaseTest.LineSorter.__processLine__
def __processLine__(self, line)
Definition: BaseTest.py:988
GaudiTesting.BaseTest.BaseTest.out
out
Definition: BaseTest.py:128
GaudiTesting.BaseTest.BaseTest.CheckHistosSummaries
def CheckHistosSummaries(self, stdout=None, result=None, causes=None, dict=None, ignore=None)
Definition: BaseTest.py:493
GaudiTesting.BaseTest.sanitize_for_xml
def sanitize_for_xml(data)
Definition: BaseTest.py:54
GaudiTesting.BaseTest.BaseTest._common_tmpdir
_common_tmpdir
Definition: BaseTest.py:106
GaudiTesting.BaseTest.BaseTest.reference
reference
Definition: BaseTest.py:111
GaudiTesting.BaseTest.BasicOutputValidator.__init__
def __init__(self, ref, cause, result_key)
Definition: BaseTest.py:819
GaudiTesting.BaseTest.BaseTest.timeout
timeout
Definition: BaseTest.py:115
GaudiTesting.BaseTest.ReferenceFileValidator.preproc
preproc
Definition: BaseTest.py:1177
GaudiTesting.BaseTest.BaseTest.validateWithReference
def validateWithReference(self, stdout=None, stderr=None, result=None, causes=None, preproc=None)
Definition: BaseTest.py:535
GaudiTesting.BaseTest.getCmpFailingValues
def getCmpFailingValues(reference, to_check, fail_path)
Definition: BaseTest.py:1268
GaudiTesting.BaseTest.BasicOutputValidator.result_key
result_key
Definition: BaseTest.py:822
GaudiTesting.BaseTest.BaseTest.proc
proc
Definition: BaseTest.py:130
GaudiTesting.BaseTest._new_backslashreplace_errors
def _new_backslashreplace_errors(exc)
Definition: BaseTest.py:39
GaudiTesting.BaseTest.BaseTest.stack_trace
stack_trace
Definition: BaseTest.py:131
GaudiTesting.BaseTest.FilePreprocessor.__processFile__
def __processFile__(self, lines)
Definition: BaseTest.py:873
GaudiTesting.BaseTest.BaseTest.environment
environment
Definition: BaseTest.py:117
GaudiTesting.BaseTest.LineSorter.signature
signature
Definition: BaseTest.py:985
GaudiTesting.BaseTest.BaseTest.exit_code
exit_code
Definition: BaseTest.py:116
GaudiTesting.BaseTest.BlockSkipper.start
start
Definition: BaseTest.py:930
GaudiTesting.BaseTest.kill_tree
def kill_tree(ppid, sig)
Definition: BaseTest.py:81
GaudiTesting.BaseTest.Result.Quote
def Quote(self, text)
Definition: BaseTest.py:802
GaudiTesting.BaseTest.FilePreprocessorSequence.__add__
def __add__(self, rhs)
Definition: BaseTest.py:901
Containers::map
struct GAUDI_API map
Parametrisation class for map-like implementation.
Definition: KeyedObjectManager.h:35
GaudiTesting.BaseTest.BaseTest.validateJSONWithReference
def validateJSONWithReference(self, output_file, reference_file, result=None, causes=None, detailed=True)
Definition: BaseTest.py:618
GaudiTesting.BaseTest.FilePreprocessorSequence
Definition: BaseTest.py:897
GaudiTesting.BaseTest.BaseTest.__init__
def __init__(self)
Definition: BaseTest.py:108
GaudiTesting.BaseTest.RegexpReplacer._operations
_operations
Definition: BaseTest.py:949
GaudiTesting.BaseTest.BaseTest.err
err
Definition: BaseTest.py:129
GaudiTesting.BaseTest.SortGroupOfLines.__processFile__
def __processFile__(self, lines)
Definition: BaseTest.py:1006
GaudiTesting.BaseTest.BlockSkipper
Definition: BaseTest.py:928
GaudiTesting.BaseTest.BaseTest.args
args
Definition: BaseTest.py:110
GaudiTesting.BaseTest.BaseTest.result
result
Definition: BaseTest.py:126
GaudiTesting.BaseTest.FilePreprocessor.__processLine__
def __processLine__(self, line)
Definition: BaseTest.py:870
GaudiTesting.BaseTest.FilePreprocessorSequence.__call__
def __call__(self, input)
Definition: BaseTest.py:904
GaudiTesting.BaseTest.BaseTest.workdir
workdir
Definition: BaseTest.py:120
Gaudi::Functional::details::get
auto get(const Handle &handle, const Algo &, const EventContext &) -> decltype(details::deref(handle.get()))
Definition: FunctionalDetails.h:444
GaudiTesting.BaseTest.BlockSkipper._skipping
_skipping
Definition: BaseTest.py:932
GaudiTesting.BaseTest.ReferenceFileValidator.cause
cause
Definition: BaseTest.py:1175
GaudiTesting.BaseTest.parseHistosSummary
def parseHistosSummary(lines, pos)
Definition: BaseTest.py:1335
GaudiTesting.BaseTest.RegexpReplacer
Definition: BaseTest.py:945
GaudiTesting.BaseTest.isWinPlatform
def isWinPlatform(self)
Definition: BaseTest.py:1454
GaudiTesting.BaseTest.LineSkipper.regexps
regexps
Definition: BaseTest.py:916
GaudiTesting.BaseTest.BaseTest.basedir
basedir
Definition: BaseTest.py:132
GaudiTesting.BaseTest.which
def which(executable)
Definition: BaseTest.py:745
GaudiTesting.BaseTest.SortGroupOfLines.exp
exp
Definition: BaseTest.py:1004
GaudiTesting.BaseTest.BaseTest.unsupported_platforms
unsupported_platforms
Definition: BaseTest.py:118
GaudiTesting.BaseTest.Result.__init__
def __init__(self, kind=None, id=None, outcome=PASS, annotations={})
Definition: BaseTest.py:790
GaudiTesting.BaseTest.BlockSkipper.end
end
Definition: BaseTest.py:931
GaudiTesting.BaseTest.BaseTest.returnedCode
returnedCode
Definition: BaseTest.py:127
GaudiTesting.BaseTest.LineSkipper.strings
strings
Definition: BaseTest.py:915
GaudiTesting.BaseTest.BasicOutputValidator.__call__
def __call__(self, out, result)
Definition: BaseTest.py:824
GaudiTesting.BaseTest.cmpTreesDicts
def cmpTreesDicts(reference, to_check, ignore=None)
Definition: BaseTest.py:1235
GaudiTesting.BaseTest.Result.annotations
annotations
Definition: BaseTest.py:791
GaudiTesting.BaseTest.BaseTest.name
name
Definition: BaseTest.py:124
format
GAUDI_API std::string format(const char *,...)
MsgStream format utility "a la sprintf(...)".
Definition: MsgStream.cpp:119
GaudiTesting.BaseTest.JSONOutputValidator
Definition: BaseTest.py:1465
GaudiTesting.BaseTest.RegexpReplacer.__processLine__
def __processLine__(self, line)
Definition: BaseTest.py:959
GaudiTesting.BaseTest.ReferenceFileValidator.__init__
def __init__(self, reffile, cause, result_key, preproc=normalizeExamples)
Definition: BaseTest.py:1173
GaudiTesting.BaseTest.FilePreprocessorSequence.members
members
Definition: BaseTest.py:899
GaudiTesting.BaseTest.BaseTest._expandReferenceFileName
def _expandReferenceFileName(self, reffile)
Definition: BaseTest.py:672
GaudiTesting.BaseTest.BaseTest.signal
signal
Definition: BaseTest.py:119
GaudiTesting.BaseTest.SortGroupOfLines
Definition: BaseTest.py:998
GaudiTesting.BaseTest.BaseTest.findReferenceBlock
def findReferenceBlock(self, reference=None, stdout=None, result=None, causes=None, signature_offset=0, signature=None, id=None)
Definition: BaseTest.py:346
GaudiTesting.BaseTest.RationalizePath
def RationalizePath(p)
Definition: BaseTest.py:735
GaudiTesting.BaseTest.LineSkipper
Definition: BaseTest.py:911
GaudiTesting.BaseTest.ReferenceFileValidator
Definition: BaseTest.py:1172
hivetimeline.read
def read(f, regex=".*", skipevents=0)
Definition: hivetimeline.py:33
gaudirun.type
type
Definition: gaudirun.py:160
GaudiTesting.BaseTest.BaseTest.program
program
Definition: BaseTest.py:109
GaudiTesting.BaseTest.FilePreprocessorSequence.__init__
def __init__(self, members=[])
Definition: BaseTest.py:898
GaudiTesting.BaseTest.ReferenceFileValidator.__call__
def __call__(self, stdout, result)
Definition: BaseTest.py:1179
GaudiTesting.BaseTest.BasicOutputValidator.cause
cause
Definition: BaseTest.py:821
GaudiTesting.BaseTest.FilePreprocessor.__add__
def __add__(self, rhs)
Definition: BaseTest.py:893
GaudiTesting.BaseTest.ReferenceFileValidator.result_key
result_key
Definition: BaseTest.py:1176
GaudiTesting.BaseTest.BlockSkipper.__init__
def __init__(self, start, end)
Definition: BaseTest.py:929
GaudiTesting.BaseTest.RegexpReplacer.__init__
def __init__(self, orig, repl="", when=None)
Definition: BaseTest.py:946
GaudiTesting.BaseTest.findHistosSummaries
def findHistosSummaries(stdout)
Definition: BaseTest.py:1403
GaudiTesting.BaseTest.Result.__setitem__
def __setitem__(self, key, value)
Definition: BaseTest.py:797
GaudiTesting.BaseTest.BaseTest.CheckTTreesSummaries
def CheckTTreesSummaries(self, stdout=None, result=None, causes=None, trees_dict=None, ignore=r"Basket|.*size|Compression")
Definition: BaseTest.py:447
GaudiTesting.BaseTest.BaseTest
Definition: BaseTest.py:104
GaudiTesting.BaseTest.BaseTest.countErrorLines
def countErrorLines(self, expected={"ERROR":0, "FATAL":0}, stdout=None, result=None, causes=None)
Definition: BaseTest.py:403
GaudiTesting.BaseTest.BaseTest.error_reference
error_reference
Definition: BaseTest.py:112
GaudiTesting.BaseTest.BaseTest.ValidateOutput
def ValidateOutput(self, stdout, stderr, result)
Definition: BaseTest.py:339
GaudiTesting.BaseTest.JSONOutputValidator.__call__
def __call__(self, ref, out, result, detailed=True)
Definition: BaseTest.py:1466
GaudiTesting.BaseTest.BasicOutputValidator
Definition: BaseTest.py:818
GaudiTesting.BaseTest.LineSkipper.__init__
def __init__(self, strings=[], regexps=[])
Definition: BaseTest.py:912
GaudiTesting.BaseTest.Result
Definition: BaseTest.py:775
GaudiTesting.BaseTest.BaseTest.run
def run(self)
Definition: BaseTest.py:134
GaudiTesting.BaseTest.findTTreeSummaries
def findTTreeSummaries(stdout)
Definition: BaseTest.py:1213
GaudiTesting.BaseTest.BasicOutputValidator.__CompareText
def __CompareText(self, s1, s2)
Definition: BaseTest.py:842
GaudiTesting.BaseTest.RegexpReplacer.__add__
def __add__(self, rhs)
Definition: BaseTest.py:951
compareOutputFiles.pp
pp
Definition: compareOutputFiles.py:513
GaudiTesting.BaseTest.BaseTest.stderr
stderr
Definition: BaseTest.py:114
GaudiTesting.BaseTest.LineSorter.__init__
def __init__(self, signature)
Definition: BaseTest.py:984
GaudiTesting.BaseTest.LineSkipper.__processLine__
def __processLine__(self, line)
Definition: BaseTest.py:918
GaudiTesting.BaseTest.ROOT6WorkAroundEnabled
def ROOT6WorkAroundEnabled(id=None)
Definition: BaseTest.py:727
GaudiTesting.BaseTest.BaseTest.use_temp_dir
use_temp_dir
Definition: BaseTest.py:121
GaudiTesting.BaseTest.GetPlatform
def GetPlatform(self)
Definition: BaseTest.py:1426
GaudiTesting.BaseTest.BaseTest.status
status
Definition: BaseTest.py:123
Gaudi::Functional::details::zip::range
decltype(auto) range(Args &&... args)
Zips multiple containers together to form a single range.
Definition: FunctionalDetails.h:102
GaudiTesting.BaseTest.BlockSkipper.__processLine__
def __processLine__(self, line)
Definition: BaseTest.py:934