The Gaudi Framework  v33r1 (b1225454)
BaseTest.py
Go to the documentation of this file.
1 # -*- coding: utf-8 -*-
2 
12 
13 import os
14 import sys
15 import time
16 import signal
17 import threading
18 import platform
19 import tempfile
20 import inspect
21 import re
22 import logging
23 
24 from subprocess import Popen, PIPE, STDOUT
25 
26 import six
27 
28 if sys.version_info < (3, 5):
29  # backport of 'backslashreplace' handling of UnicodeDecodeError
30  # to Python < 3.5
31  from codecs import register_error, backslashreplace_errors
32 
34  if isinstance(exc, UnicodeDecodeError):
35  code = hex(ord(exc.object[exc.start]))
36  return (u'\\' + code[1:], exc.start + 1)
37  else:
38  return backslashreplace_errors(exc)
39 
40  register_error('backslashreplace', _new_backslashreplace_errors)
41  del register_error
42  del backslashreplace_errors
43  del _new_backslashreplace_errors
44 
45 
46 def sanitize_for_xml(data):
47  '''
48  Take a string with invalid ASCII/UTF characters and quote them so that the
49  string can be used in an XML text.
50 
51  >>> sanitize_for_xml('this is \x1b')
52  'this is [NON-XML-CHAR-0x1B]'
53  '''
54  bad_chars = re.compile(
55  u'[\x00-\x08\x0b\x0c\x0e-\x1F\uD800-\uDFFF\uFFFE\uFFFF]')
56 
57  def quote(match):
58  'helper function'
59  return ''.join('[NON-XML-CHAR-0x%2X]' % ord(c) for c in match.group())
60 
61  return bad_chars.sub(quote, data)
62 
63 
64 def dumpProcs(name):
65  '''helper to debug GAUDI-1084, dump the list of processes'''
66  from getpass import getuser
67  if 'WORKSPACE' in os.environ:
68  p = Popen(['ps', '-fH', '-U', getuser()], stdout=PIPE)
69  with open(os.path.join(os.environ['WORKSPACE'], name), 'wb') as f:
70  f.write(p.communicate()[0])
71 
72 
73 def kill_tree(ppid, sig):
74  '''
75  Send a signal to a process and all its child processes (starting from the
76  leaves).
77  '''
78  log = logging.getLogger('kill_tree')
79  ps_cmd = ['ps', '--no-headers', '-o', 'pid', '--ppid', str(ppid)]
80  get_children = Popen(ps_cmd, stdout=PIPE, stderr=PIPE)
81  children = map(int, get_children.communicate()[0].split())
82  for child in children:
83  kill_tree(child, sig)
84  try:
85  log.debug('killing process %d', ppid)
86  os.kill(ppid, sig)
87  except OSError as err:
88  if err.errno != 3: # No such process
89  raise
90  log.debug('no such process %d', ppid)
91 
92 
93 # -------------------------------------------------------------------------#
94 
95 
96 class BaseTest(object):
97 
98  _common_tmpdir = None
99 
100  def __init__(self):
101  self.program = ''
102  self.args = []
103  self.reference = ''
104  self.error_reference = ''
105  self.options = ''
106  self.stderr = ''
107  self.timeout = 600
108  self.exit_code = None
109  self.environment = None
111  self.signal = None
112  self.workdir = os.curdir
113  self.use_temp_dir = False
114  # Variables not for users
115  self.status = None
116  self.name = ''
117  self.causes = []
118  self.result = Result(self)
119  self.returnedCode = 0
120  self.out = ''
121  self.err = ''
122  self.proc = None
123  self.stack_trace = None
124  self.basedir = os.getcwd()
125 
126  def run(self):
127  logging.debug('running test %s', self.name)
128 
129  if self.options:
130  if re.search(
131  r'from\s+Gaudi.Configuration\s+import\s+\*|'
132  'from\s+Configurables\s+import', self.options):
133  optionFile = tempfile.NamedTemporaryFile(suffix='.py')
134  else:
135  optionFile = tempfile.NamedTemporaryFile(suffix='.opts')
136  optionFile.file.write(self.options.encode('utf-8'))
137  optionFile.seek(0)
138  self.args.append(RationalizePath(optionFile.name))
139 
140  # If not specified, setting the environment
141  if self.environment is None:
142  self.environment = os.environ
143  else:
144  self.environment = dict(
145  list(self.environment.items()) + list(os.environ.items()))
146 
147  platform_id = (os.environ.get('BINARY_TAG')
148  or os.environ.get('CMTCONFIG') or platform.platform())
149  # If at least one regex matches we skip the test.
150  skip_test = bool([
151  None for prex in self.unsupported_platforms
152  if re.search(prex, platform_id)
153  ])
154 
155  if not skip_test:
156  # handle working/temporary directory options
157  workdir = self.workdir
158  if self.use_temp_dir:
159  if self._common_tmpdir:
160  workdir = self._common_tmpdir
161  else:
162  workdir = tempfile.mkdtemp()
163 
164  # prepare the command to execute
165  prog = ''
166  if self.program != '':
167  prog = self.program
168  elif "GAUDIEXE" in os.environ:
169  prog = os.environ["GAUDIEXE"]
170  else:
171  prog = "Gaudi.exe"
172 
173  dummy, prog_ext = os.path.splitext(prog)
174  if prog_ext not in [".exe", ".py", ".bat"]:
175  prog += ".exe"
176  prog_ext = ".exe"
177 
178  prog = which(prog) or prog
179 
180  args = list(map(RationalizePath, self.args))
181 
182  if prog_ext == ".py":
183  params = ['python', RationalizePath(prog)] + args
184  else:
185  params = [RationalizePath(prog)] + args
186 
187  validatorRes = Result({
188  'CAUSE': None,
189  'EXCEPTION': None,
190  'RESOURCE': None,
191  'TARGET': None,
192  'TRACEBACK': None,
193  'START_TIME': None,
194  'END_TIME': None,
195  'TIMEOUT_DETAIL': None
196  })
197  self.result = validatorRes
198 
199  # we need to switch directory because the validator expects to run
200  # in the same dir as the program
201  os.chdir(workdir)
202 
203  # launching test in a different thread to handle timeout exception
204  def target():
205  logging.debug('executing %r in %s', params, workdir)
206  self.proc = Popen(
207  params, stdout=PIPE, stderr=PIPE, env=self.environment)
208  logging.debug('(pid: %d)', self.proc.pid)
209  out, err = self.proc.communicate()
210  self.out = out.decode('utf-8', errors='backslashreplace')
211  self.err = err.decode('utf-8', errors='backslashreplace')
212 
213  thread = threading.Thread(target=target)
214  thread.start()
215  # catching timeout
216  thread.join(self.timeout)
217 
218  if thread.is_alive():
219  logging.debug('time out in test %s (pid %d)', self.name,
220  self.proc.pid)
221  # get the stack trace of the stuck process
222  cmd = [
223  'gdb', '--pid',
224  str(self.proc.pid), '--batch',
225  '--eval-command=thread apply all backtrace'
226  ]
227  gdb = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
228  self.stack_trace = gdb.communicate()[0].decode(
229  'utf-8', errors='backslashreplace')
230 
231  kill_tree(self.proc.pid, signal.SIGTERM)
232  thread.join(60)
233  if thread.is_alive():
234  kill_tree(self.proc.pid, signal.SIGKILL)
235  self.causes.append('timeout')
236  else:
237  logging.debug('completed test %s', self.name)
238 
239  # Getting the error code
240  logging.debug('returnedCode = %s', self.proc.returncode)
241  self.returnedCode = self.proc.returncode
242 
243  logging.debug('validating test...')
244  self.result, self.causes = self.ValidateOutput(
245  stdout=self.out, stderr=self.err, result=validatorRes)
246 
247  # remove the temporary directory if we created it
248  if self.use_temp_dir and not self._common_tmpdir:
249  shutil.rmtree(workdir, True)
250 
251  os.chdir(self.basedir)
252 
253  # handle application exit code
254  if self.signal is not None:
255  if int(self.returnedCode) != -int(self.signal):
256  self.causes.append('exit code')
257 
258  elif self.exit_code is not None:
259  if int(self.returnedCode) != int(self.exit_code):
260  self.causes.append('exit code')
261 
262  elif self.returnedCode != 0:
263  self.causes.append("exit code")
264 
265  if self.causes:
266  self.status = "failed"
267  else:
268  self.status = "passed"
269 
270  else:
271  self.status = "skipped"
272 
273  logging.debug('%s: %s', self.name, self.status)
274  field_mapping = {
275  'Exit Code': 'returnedCode',
276  'stderr': 'err',
277  'Arguments': 'args',
278  'Environment': 'environment',
279  'Status': 'status',
280  'stdout': 'out',
281  'Program Name': 'program',
282  'Name': 'name',
283  'Validator': 'validator',
284  'Output Reference File': 'reference',
285  'Error Reference File': 'error_reference',
286  'Causes': 'causes',
287  # 'Validator Result': 'result.annotations',
288  'Unsupported Platforms': 'unsupported_platforms',
289  'Stack Trace': 'stack_trace'
290  }
291  resultDict = [(key, getattr(self, attr))
292  for key, attr in field_mapping.items()
293  if getattr(self, attr)]
294  resultDict.append(('Working Directory',
296  os.path.join(os.getcwd(), self.workdir))))
297  # print(dict(resultDict).keys())
298  resultDict.extend(self.result.annotations.items())
299  # print(self.result.annotations.keys())
300  return dict(resultDict)
301 
302  # -------------------------------------------------#
303  # ----------------Validating tool------------------#
304  # -------------------------------------------------#
305 
306  def ValidateOutput(self, stdout, stderr, result):
307  if not self.stderr:
308  self.validateWithReference(stdout, stderr, result, self.causes)
309  elif stderr.strip() != self.stderr.strip():
310  self.causes.append('standard error')
311  return result, self.causes
312 
314  reference=None,
315  stdout=None,
316  result=None,
317  causes=None,
318  signature_offset=0,
319  signature=None,
320  id=None):
321  """
322  Given a block of text, tries to find it in the output. The block had to be identified by a signature line. By default, the first line is used as signature, or the line pointed to by signature_offset. If signature_offset points outside the block, a signature line can be passed as signature argument. Note: if 'signature' is None (the default), a negative signature_offset is interpreted as index in a list (e.g. -1 means the last line), otherwise the it is interpreted as the number of lines before the first one of the block the signature must appear. The parameter 'id' allow to distinguish between different calls to this function in the same validation code.
323  """
324 
325  if reference is None:
326  reference = self.reference
327  if stdout is None:
328  stdout = self.out
329  if result is None:
330  result = self.result
331  if causes is None:
332  causes = self.causes
333 
334  reflines = list(
335  filter(None, map(lambda s: s.rstrip(), reference.splitlines())))
336  if not reflines:
337  raise RuntimeError("Empty (or null) reference")
338  # the same on standard output
339  outlines = list(
340  filter(None, map(lambda s: s.rstrip(), stdout.splitlines())))
341 
342  res_field = "GaudiTest.RefBlock"
343  if id:
344  res_field += "_%s" % id
345 
346  if signature is None:
347  if signature_offset < 0:
348  signature_offset = len(reference) + signature_offset
349  signature = reflines[signature_offset]
350  # find the reference block in the output file
351  try:
352  pos = outlines.index(signature)
353  outlines = outlines[pos - signature_offset:pos + len(reflines) -
354  signature_offset]
355  if reflines != outlines:
356  msg = "standard output"
357  # I do not want 2 messages in causes if the function is called
358  # twice
359  if not msg in causes:
360  causes.append(msg)
361  result[res_field + ".observed"] = result.Quote(
362  "\n".join(outlines))
363  except ValueError:
364  causes.append("missing signature")
365  result[res_field + ".signature"] = result.Quote(signature)
366  if len(reflines) > 1 or signature != reflines[0]:
367  result[res_field + ".expected"] = result.Quote("\n".join(reflines))
368  return causes
369 
370  def countErrorLines(self,
371  expected={
372  'ERROR': 0,
373  'FATAL': 0
374  },
375  stdout=None,
376  result=None,
377  causes=None):
378  """
379  Count the number of messages with required severity (by default ERROR and FATAL)
380  and check if their numbers match the expected ones (0 by default).
381  The dictionary "expected" can be used to tune the number of errors and fatals
382  allowed, or to limit the number of expected warnings etc.
383  """
384 
385  if stdout is None:
386  stdout = self.out
387  if result is None:
388  result = self.result
389  if causes is None:
390  causes = self.causes
391 
392  # prepare the dictionary to record the extracted lines
393  errors = {}
394  for sev in expected:
395  errors[sev] = []
396 
397  outlines = stdout.splitlines()
398  from math import log10
399  fmt = "%%%dd - %%s" % (int(log10(len(outlines) + 1)))
400 
401  linecount = 0
402  for l in outlines:
403  linecount += 1
404  words = l.split()
405  if len(words) >= 2 and words[1] in errors:
406  errors[words[1]].append(fmt % (linecount, l.rstrip()))
407 
408  for e in errors:
409  if len(errors[e]) != expected[e]:
410  causes.append('%s(%d)' % (e, len(errors[e])))
411  result["GaudiTest.lines.%s" % e] = result.Quote('\n'.join(
412  errors[e]))
413  result["GaudiTest.lines.%s.expected#" % e] = result.Quote(
414  str(expected[e]))
415 
416  return causes
417 
419  stdout=None,
420  result=None,
421  causes=None,
422  trees_dict=None,
423  ignore=r"Basket|.*size|Compression"):
424  """
425  Compare the TTree summaries in stdout with the ones in trees_dict or in
426  the reference file. By default ignore the size, compression and basket
427  fields.
428  The presence of TTree summaries when none is expected is not a failure.
429  """
430  if stdout is None:
431  stdout = self.out
432  if result is None:
433  result = self.result
434  if causes is None:
435  causes = self.causes
436  if trees_dict is None:
437  lreference = self._expandReferenceFileName(self.reference)
438  # call the validator if the file exists
439  if lreference and os.path.isfile(lreference):
440  trees_dict = findTTreeSummaries(open(lreference).read())
441  else:
442  trees_dict = {}
443 
444  from pprint import PrettyPrinter
445  pp = PrettyPrinter()
446  if trees_dict:
447  result["GaudiTest.TTrees.expected"] = result.Quote(
448  pp.pformat(trees_dict))
449  if ignore:
450  result["GaudiTest.TTrees.ignore"] = result.Quote(ignore)
451 
452  trees = findTTreeSummaries(stdout)
453  failed = cmpTreesDicts(trees_dict, trees, ignore)
454  if failed:
455  causes.append("trees summaries")
456  msg = "%s: %s != %s" % getCmpFailingValues(trees_dict, trees,
457  failed)
458  result["GaudiTest.TTrees.failure_on"] = result.Quote(msg)
459  result["GaudiTest.TTrees.found"] = result.Quote(pp.pformat(trees))
460 
461  return causes
462 
464  stdout=None,
465  result=None,
466  causes=None,
467  dict=None,
468  ignore=None):
469  """
470  Compare the TTree summaries in stdout with the ones in trees_dict or in
471  the reference file. By default ignore the size, compression and basket
472  fields.
473  The presence of TTree summaries when none is expected is not a failure.
474  """
475  if stdout is None:
476  stdout = self.out
477  if result is None:
478  result = self.result
479  if causes is None:
480  causes = self.causes
481 
482  if dict is None:
483  lreference = self._expandReferenceFileName(self.reference)
484  # call the validator if the file exists
485  if lreference and os.path.isfile(lreference):
486  dict = findHistosSummaries(open(lreference).read())
487  else:
488  dict = {}
489 
490  from pprint import PrettyPrinter
491  pp = PrettyPrinter()
492  if dict:
493  result["GaudiTest.Histos.expected"] = result.Quote(
494  pp.pformat(dict))
495  if ignore:
496  result["GaudiTest.Histos.ignore"] = result.Quote(ignore)
497 
498  histos = findHistosSummaries(stdout)
499  failed = cmpTreesDicts(dict, histos, ignore)
500  if failed:
501  causes.append("histos summaries")
502  msg = "%s: %s != %s" % getCmpFailingValues(dict, histos, failed)
503  result["GaudiTest.Histos.failure_on"] = result.Quote(msg)
504  result["GaudiTest.Histos.found"] = result.Quote(pp.pformat(histos))
505 
506  return causes
507 
509  stdout=None,
510  stderr=None,
511  result=None,
512  causes=None,
513  preproc=None):
514  '''
515  Default validation acti*on: compare standard output and error to the
516  reference files.
517  '''
518 
519  if stdout is None:
520  stdout = self.out
521  if stderr is None:
522  stderr = self.err
523  if result is None:
524  result = self.result
525  if causes is None:
526  causes = self.causes
527 
528  # set the default output preprocessor
529  if preproc is None:
530  preproc = normalizeExamples
531  # check standard output
532  lreference = self._expandReferenceFileName(self.reference)
533  # call the validator if the file exists
534  if lreference and os.path.isfile(lreference):
535  causes += ReferenceFileValidator(
536  lreference, "standard output", "Output Diff",
537  preproc=preproc)(stdout, result)
538  elif lreference:
539  causes += ["missing reference file"]
540  # Compare TTree summaries
541  causes = self.CheckTTreesSummaries(stdout, result, causes)
542  causes = self.CheckHistosSummaries(stdout, result, causes)
543  if causes and lreference: # Write a new reference file for stdout
544  try:
545  cnt = 0
546  newrefname = '.'.join([lreference, 'new'])
547  while os.path.exists(newrefname):
548  cnt += 1
549  newrefname = '.'.join([lreference, '~%d~' % cnt, 'new'])
550  newref = open(newrefname, "w")
551  # sanitize newlines
552  for l in stdout.splitlines():
553  newref.write(l.rstrip() + '\n')
554  del newref # flush and close
555  result['New Output Reference File'] = os.path.relpath(
556  newrefname, self.basedir)
557  except IOError:
558  # Ignore IO errors when trying to update reference files
559  # because we may be in a read-only filesystem
560  pass
561 
562  # check standard error
563  lreference = self._expandReferenceFileName(self.error_reference)
564  # call the validator if we have a file to use
565  if lreference:
566  if os.path.isfile(lreference):
567  newcauses = ReferenceFileValidator(
568  lreference,
569  "standard error",
570  "Error Diff",
571  preproc=preproc)(stderr, result)
572  else:
573  newcauses += ["missing error reference file"]
574  causes += newcauses
575  if newcauses and lreference: # Write a new reference file for stdedd
576  cnt = 0
577  newrefname = '.'.join([lreference, 'new'])
578  while os.path.exists(newrefname):
579  cnt += 1
580  newrefname = '.'.join([lreference, '~%d~' % cnt, 'new'])
581  newref = open(newrefname, "w")
582  # sanitize newlines
583  for l in stderr.splitlines():
584  newref.write(l.rstrip() + '\n')
585  del newref # flush and close
586  result['New Error Reference File'] = os.path.relpath(
587  newrefname, self.basedir)
588  else:
589  causes += BasicOutputValidator(lreference, "standard error",
590  "ExecTest.expected_stderr")(stderr,
591  result)
592  return causes
593 
594  def _expandReferenceFileName(self, reffile):
595  # if no file is passed, do nothing
596  if not reffile:
597  return ""
598 
599  # function to split an extension in constituents parts
600  def platformSplit(p):
601  import re
602  delim = re.compile('-' in p and r"[-+]" or r"_")
603  return set(delim.split(p))
604 
605  reference = os.path.normpath(
606  os.path.join(self.basedir, os.path.expandvars(reffile)))
607 
608  # old-style platform-specific reference name
609  spec_ref = reference[:-3] + GetPlatform(self)[0:3] + reference[-3:]
610  if os.path.isfile(spec_ref):
611  reference = spec_ref
612  else: # look for new-style platform specific reference files:
613  # get all the files whose name start with the reference filename
614  dirname, basename = os.path.split(reference)
615  if not dirname:
616  dirname = '.'
617  head = basename + "."
618  head_len = len(head)
619  platform = platformSplit(GetPlatform(self))
620  if 'do0' in platform:
621  platform.add('dbg')
622  candidates = []
623  for f in os.listdir(dirname):
624  if f.startswith(head):
625  req_plat = platformSplit(f[head_len:])
626  if platform.issuperset(req_plat):
627  candidates.append((len(req_plat), f))
628  if candidates: # take the one with highest matching
629  # FIXME: it is not possible to say if x86_64-slc5-gcc43-dbg
630  # has to use ref.x86_64-gcc43 or ref.slc5-dbg
631  candidates.sort()
632  reference = os.path.join(dirname, candidates[-1][1])
633  return reference
634 
635 
636 # ======= GAUDI TOOLS =======
637 
638 import shutil
639 import string
640 import difflib
641 import calendar
642 
643 try:
644  from GaudiKernel import ROOT6WorkAroundEnabled
645 except ImportError:
646 
648  # dummy implementation
649  return False
650 
651 
652 # --------------------------------- TOOLS ---------------------------------#
653 
654 
656  """
657  Function used to normalize the used path
658  """
659  newPath = os.path.normpath(os.path.expandvars(p))
660  if os.path.exists(newPath):
661  p = os.path.realpath(newPath)
662  return p
663 
664 
665 def which(executable):
666  """
667  Locates an executable in the executables path ($PATH) and returns the full
668  path to it. An application is looked for with or without the '.exe' suffix.
669  If the executable cannot be found, None is returned
670  """
671  if os.path.isabs(executable):
672  if not os.path.exists(executable):
673  if executable.endswith('.exe'):
674  if os.path.exists(executable[:-4]):
675  return executable[:-4]
676  else:
677  head, executable = os.path.split(executable)
678  else:
679  return executable
680  for d in os.environ.get("PATH").split(os.pathsep):
681  fullpath = os.path.join(d, executable)
682  if os.path.exists(fullpath):
683  return fullpath
684  if executable.endswith('.exe'):
685  return which(executable[:-4])
686  return None
687 
688 
689 # -------------------------------------------------------------------------#
690 # ----------------------------- Result Classe -----------------------------#
691 # -------------------------------------------------------------------------#
692 import types
693 
694 
695 class Result:
696 
697  PASS = 'PASS'
698  FAIL = 'FAIL'
699  ERROR = 'ERROR'
700  UNTESTED = 'UNTESTED'
701 
702  EXCEPTION = ""
703  RESOURCE = ""
704  TARGET = ""
705  TRACEBACK = ""
706  START_TIME = ""
707  END_TIME = ""
708  TIMEOUT_DETAIL = ""
709 
710  def __init__(self, kind=None, id=None, outcome=PASS, annotations={}):
711  self.annotations = annotations.copy()
712 
713  def __getitem__(self, key):
714  assert isinstance(key, six.string_types)
715  return self.annotations[key]
716 
717  def __setitem__(self, key, value):
718  assert isinstance(key, six.string_types)
719  assert isinstance(
720  value, six.string_types), '{!r} is not a string'.format(value)
721  self.annotations[key] = value
722 
723  def Quote(self, string):
724  return string
725 
726 
727 # -------------------------------------------------------------------------#
728 # --------------------------- Validator Classes ---------------------------#
729 # -------------------------------------------------------------------------#
730 
731 # Basic implementation of an option validator for Gaudi test. This
732 # implementation is based on the standard (LCG) validation functions used
733 # in QMTest.
734 
735 
737  def __init__(self, ref, cause, result_key):
738  self.ref = ref
739  self.cause = cause
740  self.result_key = result_key
741 
742  def __call__(self, out, result):
743  """Validate the output of the program.
744  'stdout' -- A string containing the data written to the standard output
745  stream.
746  'stderr' -- A string containing the data written to the standard error
747  stream.
748  'result' -- A 'Result' object. It may be used to annotate
749  the outcome according to the content of stderr.
750  returns -- A list of strings giving causes of failure."""
751 
752  causes = []
753  # Check the output
754  if not self.__CompareText(out, self.ref):
755  causes.append(self.cause)
756  result[self.result_key] = result.Quote(self.ref)
757 
758  return causes
759 
760  def __CompareText(self, s1, s2):
761  """Compare 's1' and 's2', ignoring line endings.
762  's1' -- A string.
763  's2' -- A string.
764  returns -- True if 's1' and 's2' are the same, ignoring
765  differences in line endings."""
766  if ROOT6WorkAroundEnabled('ReadRootmapCheck'):
767  # FIXME: (MCl) Hide warnings from new rootmap sanity check until we
768  # can fix them
769  to_ignore = re.compile(
770  r'Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*'
771  )
772 
773  def keep_line(l):
774  return not to_ignore.match(l)
775 
776  return list(filter(keep_line, s1.splitlines())) == list(
777  filter(keep_line, s2.splitlines()))
778  else:
779  return s1.splitlines() == s2.splitlines()
780 
781 
782 # ------------------------ Preprocessor elements ------------------------#
784  """ Base class for a callable that takes a file and returns a modified
785  version of it."""
786 
787  def __processLine__(self, line):
788  return line
789 
790  def __processFile__(self, lines):
791  output = []
792  for l in lines:
793  l = self.__processLine__(l)
794  if l:
795  output.append(l)
796  return output
797 
798  def __call__(self, input):
799  if not isinstance(input, six.string_types):
800  lines = input
801  mergeback = False
802  else:
803  lines = input.splitlines()
804  mergeback = True
805  output = self.__processFile__(lines)
806  if mergeback:
807  output = '\n'.join(output)
808  return output
809 
810  def __add__(self, rhs):
811  return FilePreprocessorSequence([self, rhs])
812 
813 
815  def __init__(self, members=[]):
816  self.members = members
817 
818  def __add__(self, rhs):
819  return FilePreprocessorSequence(self.members + [rhs])
820 
821  def __call__(self, input):
822  output = input
823  for pp in self.members:
824  output = pp(output)
825  return output
826 
827 
829  def __init__(self, strings=[], regexps=[]):
830  import re
831  self.strings = strings
832  self.regexps = list(map(re.compile, regexps))
833 
834  def __processLine__(self, line):
835  for s in self.strings:
836  if line.find(s) >= 0:
837  return None
838  for r in self.regexps:
839  if r.search(line):
840  return None
841  return line
842 
843 
845  def __init__(self, start, end):
846  self.start = start
847  self.end = end
848  self._skipping = False
849 
850  def __processLine__(self, line):
851  if self.start in line:
852  self._skipping = True
853  return None
854  elif self.end in line:
855  self._skipping = False
856  elif self._skipping:
857  return None
858  return line
859 
860 
862  def __init__(self, orig, repl="", when=None):
863  if when:
864  when = re.compile(when)
865  self._operations = [(when, re.compile(orig), repl)]
866 
867  def __add__(self, rhs):
868  if isinstance(rhs, RegexpReplacer):
869  res = RegexpReplacer("", "", None)
870  res._operations = self._operations + rhs._operations
871  else:
872  res = FilePreprocessor.__add__(self, rhs)
873  return res
874 
875  def __processLine__(self, line):
876  for w, o, r in self._operations:
877  if w is None or w.search(line):
878  line = o.sub(r, line)
879  return line
880 
881 
882 # Common preprocessors
883 maskPointers = RegexpReplacer("0x[0-9a-fA-F]{4,16}", "0x########")
884 normalizeDate = RegexpReplacer(
885  "[0-2]?[0-9]:[0-5][0-9]:[0-5][0-9] [0-9]{4}[-/][01][0-9][-/][0-3][0-9][ A-Z]*",
886  "00:00:00 1970-01-01")
887 normalizeEOL = FilePreprocessor()
888 normalizeEOL.__processLine__ = lambda line: str(line).rstrip() + '\n'
889 
890 skipEmptyLines = FilePreprocessor()
891 # FIXME: that's ugly
892 skipEmptyLines.__processLine__ = lambda line: (line.strip() and line) or None
893 
894 # Special preprocessor sorting the list of strings (whitespace separated)
895 # that follow a signature on a single line
896 
897 
899  def __init__(self, signature):
900  self.signature = signature
901  self.siglen = len(signature)
902 
903  def __processLine__(self, line):
904  pos = line.find(self.signature)
905  if pos >= 0:
906  line = line[:(pos + self.siglen)]
907  lst = line[(pos + self.siglen):].split()
908  lst.sort()
909  line += " ".join(lst)
910  return line
911 
912 
914  '''
915  Sort group of lines matching a regular expression
916  '''
917 
918  def __init__(self, exp):
919  self.exp = exp if hasattr(exp, 'match') else re.compile(exp)
920 
921  def __processFile__(self, lines):
922  match = self.exp.match
923  output = []
924  group = []
925  for l in lines:
926  if match(l):
927  group.append(l)
928  else:
929  if group:
930  group.sort()
931  output.extend(group)
932  group = []
933  output.append(l)
934  return output
935 
936 
937 # Preprocessors for GaudiExamples
938 normalizeExamples = maskPointers + normalizeDate
939 for w, o, r in [
940  # ("TIMER.TIMER",r"[0-9]", "0"), # Normalize time output
941  ("TIMER.TIMER", r"\s+[+-]?[0-9]+[0-9.]*", " 0"), # Normalize time output
942  ("release all pending", r"^.*/([^/]*:.*)", r"\1"),
943  ("^#.*file", r"file '.*[/\\]([^/\\]*)$", r"file '\1"),
944  ("^JobOptionsSvc.*options successfully read in from",
945  r"read in from .*[/\\]([^/\\]*)$",
946  r"file \1"), # normalize path to options
947  # Normalize UUID, except those ending with all 0s (i.e. the class IDs)
948  (None,
949  r"[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}(?!-0{12})-[0-9A-Fa-f]{12}",
950  "00000000-0000-0000-0000-000000000000"),
951  # Absorb a change in ServiceLocatorHelper
952  ("ServiceLocatorHelper::", "ServiceLocatorHelper::(create|locate)Service",
953  "ServiceLocatorHelper::service"),
954  # Remove the leading 0 in Windows' exponential format
955  (None, r"e([-+])0([0-9][0-9])", r"e\1\2"),
956  # Output line changed in Gaudi v24
957  (None, r'Service reference count check:',
958  r'Looping over all active services...'),
959  # Ignore count of declared properties (anyway they are all printed)
960  (None,
961  r"^(.*(DEBUG|SUCCESS) List of ALL properties of .*#properties = )\d+",
962  r"\1NN"),
963  ('ApplicationMgr', r'(declareMultiSvcType|addMultiSvc): ', ''),
964  ("Property ['Name': Value]", r"( = '[^']+':)'(.*)'", r'\1\2'),
965  ('DataObjectHandleBase', r'DataObjectHandleBase\("([^"]*)"\)', r"'\1'"),
966 ]: # [ ("TIMER.TIMER","[0-9]+[0-9.]*", "") ]
967  normalizeExamples += RegexpReplacer(o, r, w)
968 
969 lineSkipper = LineSkipper(
970  [
971  "//GP:",
972  "JobOptionsSvc INFO # ",
973  "JobOptionsSvc WARNING # ",
974  "Time User",
975  "Welcome to",
976  "This machine has a speed",
977  "TIME:",
978  "running on",
979  "ToolSvc.Sequenc... INFO",
980  "DataListenerSvc INFO XML written to file:",
981  "[INFO]",
982  "[WARNING]",
983  "DEBUG No writable file catalog found which contains FID:",
984  "DEBUG Service base class initialized successfully",
985  # changed between v20 and v21
986  "DEBUG Incident timing:",
987  # introduced with patch #3487
988  # changed the level of the message from INFO to
989  # DEBUG
990  "INFO 'CnvServices':[",
991  # message removed because could be printed in constructor
992  "DEBUG 'CnvServices':[",
993  # The signal handler complains about SIGXCPU not
994  # defined on some platforms
995  'SIGXCPU',
996  # Message demoted to DEBUG in gaudi/Gaudi!992
997  'Histograms saving not required.',
998  ],
999  regexps=[
1000  r"^JobOptionsSvc INFO *$",
1001  r"^# ", # Ignore python comments
1002  # skip the message reporting the version of the root file
1003  r"(Always|SUCCESS)\s*(Root f|[^ ]* F)ile version:",
1004  r"File '.*.xml' does not exist",
1005  r"INFO Refer to dataset .* by its file ID:",
1006  r"INFO Referring to dataset .* by its file ID:",
1007  r"INFO Disconnect from dataset",
1008  r"INFO Disconnected from dataset",
1009  r"INFO Disconnected data IO:",
1010  r"IncidentSvc\s*(DEBUG (Adding|Removing)|VERBOSE Calling)",
1011  # I want to ignore the header of the unchecked StatusCode report
1012  r"^StatusCodeSvc.*listing all unchecked return codes:",
1013  r"^StatusCodeSvc\s*INFO\s*$",
1014  r"Num\s*\|\s*Function\s*\|\s*Source Library",
1015  r"^[-+]*\s*$",
1016  # Hide the fake error message coming from POOL/ROOT (ROOT 5.21)
1017  r"ERROR Failed to modify file: .* Errno=2 No such file or directory",
1018  # Hide unchecked StatusCodes from dictionaries
1019  r"^ +[0-9]+ \|.*ROOT",
1020  r"^ +[0-9]+ \|.*\|.*Dict",
1021  # Hide success StatusCodeSvc message
1022  r"StatusCodeSvc.*all StatusCode instances where checked",
1023  # Hide EventLoopMgr total timing report
1024  r"EventLoopMgr.*---> Loop Finished",
1025  r"HiveSlimEventLo.*---> Loop Finished",
1026  # Remove ROOT TTree summary table, which changes from one version to the
1027  # other
1028  r"^\*.*\*$",
1029  # Remove Histos Summaries
1030  r"SUCCESS\s*Booked \d+ Histogram\(s\)",
1031  r"^ \|",
1032  r"^ ID=",
1033  # Ignore added/removed properties
1034  r"Property(.*)'Audit(Algorithm|Tool|Service)s':",
1035  r"Property(.*)'Audit(Begin|End)Run':",
1036  # these were missing in tools
1037  r"Property(.*)'AuditRe(start|initialize)':",
1038  r"Property(.*)'IsIOBound':",
1039  # removed with gaudi/Gaudi!273
1040  r"Property(.*)'ErrorCount(er)?':",
1041  # added with gaudi/Gaudi!306
1042  r"Property(.*)'Sequential':",
1043  # added with gaudi/Gaudi!314
1044  r"Property(.*)'FilterCircularDependencies':",
1045  # removed with gaudi/Gaudi!316
1046  r"Property(.*)'IsClonable':",
1047  # ignore uninteresting/obsolete messages
1048  r"Property update for OutputLevel : new value =",
1049  r"EventLoopMgr\s*DEBUG Creating OutputStream",
1050  ])
1051 
1052 if ROOT6WorkAroundEnabled('ReadRootmapCheck'):
1053  # FIXME: (MCl) Hide warnings from new rootmap sanity check until we can
1054  # fix them
1055  lineSkipper += LineSkipper(regexps=[
1056  r'Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*',
1057  ])
1058 
1059 normalizeExamples = (
1060  lineSkipper + normalizeExamples + skipEmptyLines + normalizeEOL +
1061  LineSorter("Services to release : ") +
1062  SortGroupOfLines(r'^\S+\s+(DEBUG|SUCCESS) Property \[\'Name\':'))
1063 
1064 # --------------------- Validation functions/classes ---------------------#
1065 
1066 
1068  def __init__(self, reffile, cause, result_key, preproc=normalizeExamples):
1069  self.reffile = os.path.expandvars(reffile)
1070  self.cause = cause
1071  self.result_key = result_key
1072  self.preproc = preproc
1073 
1074  def __call__(self, stdout, result):
1075  causes = []
1076  if os.path.isfile(self.reffile):
1077  orig = open(self.reffile).readlines()
1078  if self.preproc:
1079  orig = self.preproc(orig)
1080  result[self.result_key + '.preproc.orig'] = \
1081  result.Quote('\n'.join(map(str.strip, orig)))
1082  else:
1083  orig = []
1084  new = stdout.splitlines()
1085  if self.preproc:
1086  new = self.preproc(new)
1087 
1088  diffs = difflib.ndiff(orig, new, charjunk=difflib.IS_CHARACTER_JUNK)
1089  filterdiffs = list(
1090  map(lambda x: x.strip(), filter(lambda x: x[0] != " ", diffs)))
1091  if filterdiffs:
1092  result[self.result_key] = result.Quote("\n".join(filterdiffs))
1093  result[self.result_key] += result.Quote("""
1094  Legend:
1095  -) reference file
1096  +) standard output of the test""")
1097  result[self.result_key + '.preproc.new'] = \
1098  result.Quote('\n'.join(map(str.strip, new)))
1099  causes.append(self.cause)
1100  return causes
1101 
1102 
1104  """
1105  Scan stdout to find ROOT TTree summaries and digest them.
1106  """
1107  stars = re.compile(r"^\*+$")
1108  outlines = stdout.splitlines()
1109  nlines = len(outlines)
1110  trees = {}
1111 
1112  i = 0
1113  while i < nlines: # loop over the output
1114  # look for
1115  while i < nlines and not stars.match(outlines[i]):
1116  i += 1
1117  if i < nlines:
1118  tree, i = _parseTTreeSummary(outlines, i)
1119  if tree:
1120  trees[tree["Name"]] = tree
1121 
1122  return trees
1123 
1124 
1125 def cmpTreesDicts(reference, to_check, ignore=None):
1126  """
1127  Check that all the keys in reference are in to_check too, with the same value.
1128  If the value is a dict, the function is called recursively. to_check can
1129  contain more keys than reference, that will not be tested.
1130  The function returns at the first difference found.
1131  """
1132  fail_keys = []
1133  # filter the keys in the reference dictionary
1134  if ignore:
1135  ignore_re = re.compile(ignore)
1136  keys = [key for key in reference if not ignore_re.match(key)]
1137  else:
1138  keys = reference.keys()
1139  # loop over the keys (not ignored) in the reference dictionary
1140  for k in keys:
1141  if k in to_check: # the key must be in the dictionary to_check
1142  if (type(reference[k]) is dict) and (type(to_check[k]) is dict):
1143  # if both reference and to_check values are dictionaries,
1144  # recurse
1145  failed = fail_keys = cmpTreesDicts(reference[k], to_check[k],
1146  ignore)
1147  else:
1148  # compare the two values
1149  failed = to_check[k] != reference[k]
1150  else: # handle missing keys in the dictionary to check (i.e. failure)
1151  to_check[k] = None
1152  failed = True
1153  if failed:
1154  fail_keys.insert(0, k)
1155  break # exit from the loop at the first failure
1156  return fail_keys # return the list of keys bringing to the different values
1157 
1158 
1159 def getCmpFailingValues(reference, to_check, fail_path):
1160  c = to_check
1161  r = reference
1162  for k in fail_path:
1163  c = c.get(k, None)
1164  r = r.get(k, None)
1165  if c is None or r is None:
1166  break # one of the dictionaries is not deep enough
1167  return (fail_path, r, c)
1168 
1169 
1170 # signature of the print-out of the histograms
1171 h_count_re = re.compile(
1172  r"^(.*)SUCCESS\s+Booked (\d+) Histogram\(s\) :\s+([\s\w=-]*)")
1173 
1174 
1175 def _parseTTreeSummary(lines, pos):
1176  """
1177  Parse the TTree summary table in lines, starting from pos.
1178  Returns a tuple with the dictionary with the digested informations and the
1179  position of the first line after the summary.
1180  """
1181  result = {}
1182  i = pos + 1 # first line is a sequence of '*'
1183  count = len(lines)
1184 
1185  def splitcols(l):
1186  return [f.strip() for f in l.strip("*\n").split(':', 2)]
1187 
1188  def parseblock(ll):
1189  r = {}
1190  cols = splitcols(ll[0])
1191  r["Name"], r["Title"] = cols[1:]
1192 
1193  cols = splitcols(ll[1])
1194  r["Entries"] = int(cols[1])
1195 
1196  sizes = cols[2].split()
1197  r["Total size"] = int(sizes[2])
1198  if sizes[-1] == "memory":
1199  r["File size"] = 0
1200  else:
1201  r["File size"] = int(sizes[-1])
1202 
1203  cols = splitcols(ll[2])
1204  sizes = cols[2].split()
1205  if cols[0] == "Baskets":
1206  r["Baskets"] = int(cols[1])
1207  r["Basket size"] = int(sizes[2])
1208  r["Compression"] = float(sizes[-1])
1209  return r
1210 
1211  if i < (count - 3) and lines[i].startswith("*Tree"):
1212  result = parseblock(lines[i:i + 3])
1213  result["Branches"] = {}
1214  i += 4
1215  while i < (count - 3) and lines[i].startswith("*Br"):
1216  if i < (count - 2) and lines[i].startswith("*Branch "):
1217  # skip branch header
1218  i += 3
1219  continue
1220  branch = parseblock(lines[i:i + 3])
1221  result["Branches"][branch["Name"]] = branch
1222  i += 4
1223 
1224  return (result, i)
1225 
1226 
1227 def parseHistosSummary(lines, pos):
1228  """
1229  Extract the histograms infos from the lines starting at pos.
1230  Returns the position of the first line after the summary block.
1231  """
1232  global h_count_re
1233  h_table_head = re.compile(
1234  r'SUCCESS\s+(1D|2D|3D|1D profile|2D profile) histograms in directory\s+"(\w*)"'
1235  )
1236  h_short_summ = re.compile(r"ID=([^\"]+)\s+\"([^\"]+)\"\s+(.*)")
1237 
1238  nlines = len(lines)
1239 
1240  # decode header
1241  m = h_count_re.search(lines[pos])
1242  name = m.group(1).strip()
1243  total = int(m.group(2))
1244  header = {}
1245  for k, v in [x.split("=") for x in m.group(3).split()]:
1246  header[k] = int(v)
1247  pos += 1
1248  header["Total"] = total
1249 
1250  summ = {}
1251  while pos < nlines:
1252  m = h_table_head.search(lines[pos])
1253  if m:
1254  t, d = m.groups(1) # type and directory
1255  t = t.replace(" profile", "Prof")
1256  pos += 1
1257  if pos < nlines:
1258  l = lines[pos]
1259  else:
1260  l = ""
1261  cont = {}
1262  if l.startswith(" | ID"):
1263  # table format
1264  titles = [x.strip() for x in l.split("|")][1:]
1265  pos += 1
1266  while pos < nlines and lines[pos].startswith(" |"):
1267  l = lines[pos]
1268  values = [x.strip() for x in l.split("|")][1:]
1269  hcont = {}
1270  for i in range(len(titles)):
1271  hcont[titles[i]] = values[i]
1272  cont[hcont["ID"]] = hcont
1273  pos += 1
1274  elif l.startswith(" ID="):
1275  while pos < nlines and lines[pos].startswith(" ID="):
1276  values = [
1277  x.strip()
1278  for x in h_short_summ.search(lines[pos]).groups()
1279  ]
1280  cont[values[0]] = values
1281  pos += 1
1282  else: # not interpreted
1283  raise RuntimeError(
1284  "Cannot understand line %d: '%s'" % (pos, l))
1285  if not d in summ:
1286  summ[d] = {}
1287  summ[d][t] = cont
1288  summ[d]["header"] = header
1289  else:
1290  break
1291  if not summ:
1292  # If the full table is not present, we use only the header
1293  summ[name] = {"header": header}
1294  return summ, pos
1295 
1296 
1298  """
1299  Scan stdout to find ROOT TTree summaries and digest them.
1300  """
1301  outlines = stdout.splitlines()
1302  nlines = len(outlines) - 1
1303  summaries = {}
1304  global h_count_re
1305 
1306  pos = 0
1307  while pos < nlines:
1308  summ = {}
1309  # find first line of block:
1310  match = h_count_re.search(outlines[pos])
1311  while pos < nlines and not match:
1312  pos += 1
1313  match = h_count_re.search(outlines[pos])
1314  if match:
1315  summ, pos = parseHistosSummary(outlines, pos)
1316  summaries.update(summ)
1317  return summaries
1318 
1319 
1320 def PlatformIsNotSupported(self, context, result):
1321  platform = GetPlatform(self)
1322  unsupported = [
1323  re.compile(x) for x in [str(y).strip() for y in unsupported_platforms]
1324  if x
1325  ]
1326  for p_re in unsupported:
1327  if p_re.search(platform):
1328  result.SetOutcome(result.UNTESTED)
1329  result[result.CAUSE] = 'Platform not supported.'
1330  return True
1331  return False
1332 
1333 
1334 def GetPlatform(self):
1335  """
1336  Return the platform Id defined in CMTCONFIG or SCRAM_ARCH.
1337  """
1338  arch = "None"
1339  # check architecture name
1340  if "BINARY_TAG" in os.environ:
1341  arch = os.environ["BINARY_TAG"]
1342  elif "CMTCONFIG" in os.environ:
1343  arch = os.environ["CMTCONFIG"]
1344  elif "SCRAM_ARCH" in os.environ:
1345  arch = os.environ["SCRAM_ARCH"]
1346  return arch
1347 
1348 
1349 def isWinPlatform(self):
1350  """
1351  Return True if the current platform is Windows.
1352 
1353  This function was needed because of the change in the CMTCONFIG format,
1354  from win32_vc71_dbg to i686-winxp-vc9-dbg.
1355  """
1356  platform = GetPlatform(self)
1357  return "winxp" in platform or platform.startswith("win")
def dumpProcs(name)
Definition: BaseTest.py:64
GAUDI_API std::string format(const char *,...)
MsgStream format utility "a la sprintf(...)".
Definition: MsgStream.cpp:119
def GetPlatform(self)
Definition: BaseTest.py:1334
def PlatformIsNotSupported(self, context, result)
Definition: BaseTest.py:1320
def __init__(self, start, end)
Definition: BaseTest.py:845
def validateWithReference(self, stdout=None, stderr=None, result=None, causes=None, preproc=None)
Definition: BaseTest.py:508
def cmpTreesDicts(reference, to_check, ignore=None)
Definition: BaseTest.py:1125
def ValidateOutput(self, stdout, stderr, result)
Definition: BaseTest.py:306
def read(f, regex='.*', skipevents=0)
Definition: hivetimeline.py:33
def __processLine__(self, line)
Definition: BaseTest.py:903
def findHistosSummaries(stdout)
Definition: BaseTest.py:1297
MsgStream & hex(MsgStream &log)
Definition: MsgStream.h:281
def _parseTTreeSummary(lines, pos)
Definition: BaseTest.py:1175
struct GAUDI_API map
Parametrisation class for map-like implementation.
def __call__(self, stdout, result)
Definition: BaseTest.py:1074
def __processLine__(self, line)
Definition: BaseTest.py:834
def __init__(self, orig, repl="", when=None)
Definition: BaseTest.py:862
def _new_backslashreplace_errors(exc)
Definition: BaseTest.py:33
def __init__(self, signature)
Definition: BaseTest.py:899
def sanitize_for_xml(data)
Definition: BaseTest.py:46
def isWinPlatform(self)
Definition: BaseTest.py:1349
def countErrorLines(self, expected={ 'ERROR':0, 'FATAL':0 }, stdout=None, result=None, causes=None)
Definition: BaseTest.py:370
def getCmpFailingValues(reference, to_check, fail_path)
Definition: BaseTest.py:1159
def __init__(self, strings=[], regexps=[])
Definition: BaseTest.py:829
def __setitem__(self, key, value)
Definition: BaseTest.py:717
def __init__(self, kind=None, id=None, outcome=PASS, annotations={})
Definition: BaseTest.py:710
def which(executable)
Definition: BaseTest.py:665
def parseHistosSummary(lines, pos)
Definition: BaseTest.py:1227
def _expandReferenceFileName(self, reffile)
Definition: BaseTest.py:594
def findReferenceBlock(self, reference=None, stdout=None, result=None, causes=None, signature_offset=0, signature=None, id=None)
Definition: BaseTest.py:313
def CheckHistosSummaries(self, stdout=None, result=None, causes=None, dict=None, ignore=None)
Definition: BaseTest.py:463
def __init__(self, reffile, cause, result_key, preproc=normalizeExamples)
Definition: BaseTest.py:1068
decltype(auto) range(Args &&... args)
Zips multiple containers together to form a single range.
def __getitem__(self, key)
Definition: BaseTest.py:713
def kill_tree(ppid, sig)
Definition: BaseTest.py:73
def findTTreeSummaries(stdout)
Definition: BaseTest.py:1103
def __init__(self, ref, cause, result_key)
Definition: BaseTest.py:737
def ROOT6WorkAroundEnabled(id=None)
Definition: BaseTest.py:647
def CheckTTreesSummaries(self, stdout=None, result=None, causes=None, trees_dict=None, ignore=r"Basket|.*size|Compression")
Definition: BaseTest.py:418
def Quote(self, string)
Definition: BaseTest.py:723