The Gaudi Framework  v30r0 (c919700c)
BaseTest.py
Go to the documentation of this file.
1 # -*- coding: utf-8 -*-
2 
3 import os
4 import sys
5 import time
6 import signal
7 import threading
8 import platform
9 import tempfile
10 import inspect
11 import re
12 import logging
13 
14 from subprocess import Popen, PIPE, STDOUT
15 
16 
17 def sanitize_for_xml(data):
18  '''
19  Take a string with invalid ASCII/UTF characters and quote them so that the
20  string can be used in an XML text.
21 
22  >>> sanitize_for_xml('this is \x1b')
23  'this is [NON-XML-CHAR-0x1B]'
24  '''
25  bad_chars = re.compile(
26  u'[\x00-\x08\x0b\x0c\x0e-\x1F\uD800-\uDFFF\uFFFE\uFFFF]')
27 
28  def quote(match):
29  'helper function'
30  return ''.join('[NON-XML-CHAR-0x%2X]' % ord(c) for c in match.group())
31  return bad_chars.sub(quote, data)
32 
33 
34 def dumpProcs(name):
35  '''helper to debug GAUDI-1084, dump the list of processes'''
36  from getpass import getuser
37  if 'WORKSPACE' in os.environ:
38  p = Popen(['ps', '-fH', '-U', getuser()], stdout=PIPE)
39  with open(os.path.join(os.environ['WORKSPACE'], name), 'w') as f:
40  f.write(p.communicate()[0])
41 
42 
43 def kill_tree(ppid, sig):
44  '''
45  Send a signal to a process and all its child processes (starting from the
46  leaves).
47  '''
48  log = logging.getLogger('kill_tree')
49  ps_cmd = ['ps', '--no-headers', '-o', 'pid', '--ppid', str(ppid)]
50  get_children = Popen(ps_cmd, stdout=PIPE, stderr=PIPE)
51  children = map(int, get_children.communicate()[0].split())
52  for child in children:
53  kill_tree(child, sig)
54  try:
55  log.debug('killing process %d', ppid)
56  os.kill(ppid, sig)
57  except OSError, err:
58  if err.errno != 3: # No such process
59  raise
60  log.debug('no such process %d', ppid)
61 
62 # -------------------------------------------------------------------------#
63 
64 
65 class BaseTest(object):
66 
67  _common_tmpdir = None
68 
69  def __init__(self):
70  self.program = ''
71  self.args = []
72  self.reference = ''
73  self.error_reference = ''
74  self.options = ''
75  self.stderr = ''
76  self.timeout = 600
77  self.exit_code = None
78  self.environment = None
80  self.signal = None
81  self.workdir = os.curdir
82  self.use_temp_dir = False
83  # Variables not for users
84  self.status = None
85  self.name = ''
86  self.causes = []
87  self.result = Result(self)
88  self.returnedCode = 0
89  self.out = ''
90  self.err = ''
91  self.proc = None
92  self.stack_trace = None
93  self.basedir = os.getcwd()
94 
95  def run(self):
96  logging.debug('running test %s', self.name)
97 
98  if self.options:
99  if re.search(r'from\s+Gaudi.Configuration\s+import\s+\*|'
100  'from\s+Configurables\s+import', self.options):
101  optionFile = tempfile.NamedTemporaryFile(suffix='.py')
102  else:
103  optionFile = tempfile.NamedTemporaryFile(suffix='.opts')
104  optionFile.file.write(self.options)
105  optionFile.seek(0)
106  self.args.append(RationalizePath(optionFile.name))
107 
108  # If not specified, setting the environment
109  if self.environment is None:
110  self.environment = os.environ
111  else:
112  self.environment = dict(
113  self.environment.items() + os.environ.items())
114 
115  platform_id = (os.environ.get('BINARY_TAG') or
116  os.environ.get('CMTCONFIG') or
117  platform.platform())
118  # If at least one regex matches we skip the test.
119  skip_test = bool([None
120  for prex in self.unsupported_platforms
121  if re.search(prex, platform_id)])
122 
123  if not skip_test:
124  # handle working/temporary directory options
125  workdir = self.workdir
126  if self.use_temp_dir:
127  if self._common_tmpdir:
128  workdir = self._common_tmpdir
129  else:
130  workdir = tempfile.mkdtemp()
131 
132  # prepare the command to execute
133  prog = ''
134  if self.program != '':
135  prog = self.program
136  elif "GAUDIEXE" in os.environ:
137  prog = os.environ["GAUDIEXE"]
138  else:
139  prog = "Gaudi.exe"
140 
141  dummy, prog_ext = os.path.splitext(prog)
142  if prog_ext not in [".exe", ".py", ".bat"]:
143  prog += ".exe"
144  prog_ext = ".exe"
145 
146  prog = which(prog) or prog
147 
148  args = map(RationalizePath, self.args)
149 
150  if prog_ext == ".py":
151  params = ['python', RationalizePath(prog)] + args
152  else:
153  params = [RationalizePath(prog)] + args
154 
155  validatorRes = Result({'CAUSE': None, 'EXCEPTION': None,
156  'RESOURCE': None, 'TARGET': None,
157  'TRACEBACK': None, 'START_TIME': None,
158  'END_TIME': None, 'TIMEOUT_DETAIL': None})
159  self.result = validatorRes
160 
161  # we need to switch directory because the validator expects to run
162  # in the same dir as the program
163  os.chdir(workdir)
164 
165  # launching test in a different thread to handle timeout exception
166  def target():
167  logging.debug('executing %r in %s',
168  params, workdir)
169  self.proc = Popen(params, stdout=PIPE, stderr=PIPE,
170  env=self.environment)
171  logging.debug('(pid: %d)', self.proc.pid)
172  self.out, self.err = self.proc.communicate()
173 
174  thread = threading.Thread(target=target)
175  thread.start()
176  # catching timeout
177  thread.join(self.timeout)
178 
179  if thread.is_alive():
180  logging.debug('time out in test %s (pid %d)',
181  self.name, self.proc.pid)
182  # get the stack trace of the stuck process
183  cmd = ['gdb', '--pid', str(self.proc.pid), '--batch',
184  '--eval-command=thread apply all backtrace']
185  gdb = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
186  self.stack_trace = gdb.communicate()[0]
187 
188  kill_tree(self.proc.pid, signal.SIGTERM)
189  thread.join(60)
190  if thread.is_alive():
191  kill_tree(self.proc.pid, signal.SIGKILL)
192  self.causes.append('timeout')
193  else:
194  logging.debug('completed test %s', self.name)
195 
196  # Getting the error code
197  logging.debug('returnedCode = %s', self.proc.returncode)
198  self.returnedCode = self.proc.returncode
199 
200  logging.debug('validating test...')
201  self.result, self.causes = self.ValidateOutput(stdout=self.out,
202  stderr=self.err,
203  result=validatorRes)
204 
205  # remove the temporary directory if we created it
206  if self.use_temp_dir and not self._common_tmpdir:
207  shutil.rmtree(workdir, True)
208 
209  os.chdir(self.basedir)
210 
211  # handle application exit code
212  if self.signal is not None:
213  if int(self.returnedCode) != -int(self.signal):
214  self.causes.append('exit code')
215 
216  elif self.exit_code is not None:
217  if int(self.returnedCode) != int(self.exit_code):
218  self.causes.append('exit code')
219 
220  elif self.returnedCode != 0:
221  self.causes.append("exit code")
222 
223  if self.causes:
224  self.status = "failed"
225  else:
226  self.status = "passed"
227 
228  else:
229  self.status = "skipped"
230 
231  logging.debug('%s: %s', self.name, self.status)
232  field_mapping = {'Exit Code': 'returnedCode',
233  'stderr': 'err',
234  'Arguments': 'args',
235  'Environment': 'environment',
236  'Status': 'status',
237  'stdout': 'out',
238  'Program Name': 'program',
239  'Name': 'name',
240  'Validator': 'validator',
241  'Output Reference File': 'reference',
242  'Error Reference File': 'error_reference',
243  'Causes': 'causes',
244  #'Validator Result': 'result.annotations',
245  'Unsupported Platforms': 'unsupported_platforms',
246  'Stack Trace': 'stack_trace'}
247  resultDict = [(key, getattr(self, attr))
248  for key, attr in field_mapping.iteritems()
249  if getattr(self, attr)]
250  resultDict.append(('Working Directory',
251  RationalizePath(os.path.join(os.getcwd(),
252  self.workdir))))
253  # print dict(resultDict).keys()
254  resultDict.extend(self.result.annotations.iteritems())
255  # print self.result.annotations.keys()
256  return dict(resultDict)
257 
258  # -------------------------------------------------#
259  # ----------------Validating tool------------------#
260  # -------------------------------------------------#
261 
262  def ValidateOutput(self, stdout, stderr, result):
263  if not self.stderr:
264  self.validateWithReference(stdout, stderr, result, self.causes)
265  elif stderr.strip() != self.stderr.strip():
266  self.causes.append('standard error')
267  return result, self.causes
268 
269  def findReferenceBlock(self, reference=None, stdout=None, result=None, causes=None, signature_offset=0, signature=None, id=None):
270  """
271  Given a block of text, tries to find it in the output. The block had to be identified by a signature line. By default, the first line is used as signature, or the line pointed to by signature_offset. If signature_offset points outside the block, a signature line can be passed as signature argument. Note: if 'signature' is None (the default), a negative signature_offset is interpreted as index in a list (e.g. -1 means the last line), otherwise the it is interpreted as the number of lines before the first one of the block the signature must appear. The parameter 'id' allow to distinguish between different calls to this function in the same validation code.
272  """
273 
274  if reference is None:
275  reference = self.reference
276  if stdout is None:
277  stdout = self.out
278  if result is None:
279  result = self.result
280  if causes is None:
281  causes = self.causes
282 
283  reflines = filter(
284  None, map(lambda s: s.rstrip(), reference.splitlines()))
285  if not reflines:
286  raise RuntimeError("Empty (or null) reference")
287  # the same on standard output
288  outlines = filter(None, map(lambda s: s.rstrip(), stdout.splitlines()))
289 
290  res_field = "GaudiTest.RefBlock"
291  if id:
292  res_field += "_%s" % id
293 
294  if signature is None:
295  if signature_offset < 0:
296  signature_offset = len(reference) + signature_offset
297  signature = reflines[signature_offset]
298  # find the reference block in the output file
299  try:
300  pos = outlines.index(signature)
301  outlines = outlines[pos - signature_offset:pos +
302  len(reflines) - signature_offset]
303  if reflines != outlines:
304  msg = "standard output"
305  # I do not want 2 messages in causes if the function is called
306  # twice
307  if not msg in causes:
308  causes.append(msg)
309  result[res_field +
310  ".observed"] = result.Quote("\n".join(outlines))
311  except ValueError:
312  causes.append("missing signature")
313  result[res_field + ".signature"] = result.Quote(signature)
314  if len(reflines) > 1 or signature != reflines[0]:
315  result[res_field + ".expected"] = result.Quote("\n".join(reflines))
316  return causes
317 
318  def countErrorLines(self, expected={'ERROR': 0, 'FATAL': 0}, stdout=None, result=None, causes=None):
319  """
320  Count the number of messages with required severity (by default ERROR and FATAL)
321  and check if their numbers match the expected ones (0 by default).
322  The dictionary "expected" can be used to tune the number of errors and fatals
323  allowed, or to limit the number of expected warnings etc.
324  """
325 
326  if stdout is None:
327  stdout = self.out
328  if result is None:
329  result = self.result
330  if causes is None:
331  causes = self.causes
332 
333  # prepare the dictionary to record the extracted lines
334  errors = {}
335  for sev in expected:
336  errors[sev] = []
337 
338  outlines = stdout.splitlines()
339  from math import log10
340  fmt = "%%%dd - %%s" % (int(log10(len(outlines) + 1)))
341 
342  linecount = 0
343  for l in outlines:
344  linecount += 1
345  words = l.split()
346  if len(words) >= 2 and words[1] in errors:
347  errors[words[1]].append(fmt % (linecount, l.rstrip()))
348 
349  for e in errors:
350  if len(errors[e]) != expected[e]:
351  causes.append('%s(%d)' % (e, len(errors[e])))
352  result["GaudiTest.lines.%s" %
353  e] = result.Quote('\n'.join(errors[e]))
354  result["GaudiTest.lines.%s.expected#" %
355  e] = result.Quote(str(expected[e]))
356 
357  return causes
358 
359  def CheckTTreesSummaries(self, stdout=None, result=None, causes=None,
360  trees_dict=None,
361  ignore=r"Basket|.*size|Compression"):
362  """
363  Compare the TTree summaries in stdout with the ones in trees_dict or in
364  the reference file. By default ignore the size, compression and basket
365  fields.
366  The presence of TTree summaries when none is expected is not a failure.
367  """
368  if stdout is None:
369  stdout = self.out
370  if result is None:
371  result = self.result
372  if causes is None:
373  causes = self.causes
374  if trees_dict is None:
375  lreference = self._expandReferenceFileName(self.reference)
376  # call the validator if the file exists
377  if lreference and os.path.isfile(lreference):
378  trees_dict = findTTreeSummaries(open(lreference).read())
379  else:
380  trees_dict = {}
381 
382  from pprint import PrettyPrinter
383  pp = PrettyPrinter()
384  if trees_dict:
385  result["GaudiTest.TTrees.expected"] = result.Quote(
386  pp.pformat(trees_dict))
387  if ignore:
388  result["GaudiTest.TTrees.ignore"] = result.Quote(ignore)
389 
390  trees = findTTreeSummaries(stdout)
391  failed = cmpTreesDicts(trees_dict, trees, ignore)
392  if failed:
393  causes.append("trees summaries")
394  msg = "%s: %s != %s" % getCmpFailingValues(
395  trees_dict, trees, failed)
396  result["GaudiTest.TTrees.failure_on"] = result.Quote(msg)
397  result["GaudiTest.TTrees.found"] = result.Quote(pp.pformat(trees))
398 
399  return causes
400 
401  def CheckHistosSummaries(self, stdout=None, result=None, causes=None,
402  dict=None,
403  ignore=None):
404  """
405  Compare the TTree summaries in stdout with the ones in trees_dict or in
406  the reference file. By default ignore the size, compression and basket
407  fields.
408  The presence of TTree summaries when none is expected is not a failure.
409  """
410  if stdout is None:
411  stdout = self.out
412  if result is None:
413  result = self.result
414  if causes is None:
415  causes = self.causes
416 
417  if dict is None:
418  lreference = self._expandReferenceFileName(self.reference)
419  # call the validator if the file exists
420  if lreference and os.path.isfile(lreference):
421  dict = findHistosSummaries(open(lreference).read())
422  else:
423  dict = {}
424 
425  from pprint import PrettyPrinter
426  pp = PrettyPrinter()
427  if dict:
428  result["GaudiTest.Histos.expected"] = result.Quote(
429  pp.pformat(dict))
430  if ignore:
431  result["GaudiTest.Histos.ignore"] = result.Quote(ignore)
432 
433  histos = findHistosSummaries(stdout)
434  failed = cmpTreesDicts(dict, histos, ignore)
435  if failed:
436  causes.append("histos summaries")
437  msg = "%s: %s != %s" % getCmpFailingValues(dict, histos, failed)
438  result["GaudiTest.Histos.failure_on"] = result.Quote(msg)
439  result["GaudiTest.Histos.found"] = result.Quote(pp.pformat(histos))
440 
441  return causes
442 
443  def validateWithReference(self, stdout=None, stderr=None, result=None,
444  causes=None, preproc=None):
445  '''
446  Default validation acti*on: compare standard output and error to the
447  reference files.
448  '''
449 
450  if stdout is None:
451  stdout = self.out
452  if stderr is None:
453  stderr = self.err
454  if result is None:
455  result = self.result
456  if causes is None:
457  causes = self.causes
458 
459  # set the default output preprocessor
460  if preproc is None:
461  preproc = normalizeExamples
462  # check standard output
463  lreference = self._expandReferenceFileName(self.reference)
464  # call the validator if the file exists
465  if lreference and os.path.isfile(lreference):
466  causes += ReferenceFileValidator(lreference,
467  "standard output",
468  "Output Diff",
469  preproc=preproc)(stdout, result)
470  # Compare TTree summaries
471  causes = self.CheckTTreesSummaries(stdout, result, causes)
472  causes = self.CheckHistosSummaries(stdout, result, causes)
473  if causes: # Write a new reference file for stdout
474  try:
475  newref = open(lreference + ".new", "w")
476  # sanitize newlines
477  for l in stdout.splitlines():
478  newref.write(l.rstrip() + '\n')
479  del newref # flush and close
480  except IOError:
481  # Ignore IO errors when trying to update reference files
482  # because we may be in a read-only filesystem
483  pass
484 
485  # check standard error
486  lreference = self._expandReferenceFileName(self.error_reference)
487  # call the validator if we have a file to use
488  if lreference and os.path.isfile(lreference):
489  newcauses = ReferenceFileValidator(lreference,
490  "standard error",
491  "Error Diff",
492  preproc=preproc)(stderr, result)
493  causes += newcauses
494  if newcauses: # Write a new reference file for stdedd
495  newref = open(lreference + ".new", "w")
496  # sanitize newlines
497  for l in stderr.splitlines():
498  newref.write(l.rstrip() + '\n')
499  del newref # flush and close
500  else:
501  causes += BasicOutputValidator(lreference, "standard error",
502  "ExecTest.expected_stderr")(stderr, result)
503  return causes
504 
505  def _expandReferenceFileName(self, reffile):
506  # if no file is passed, do nothing
507  if not reffile:
508  return ""
509 
510  # function to split an extension in constituents parts
511  def platformSplit(p): return set(p.split('-' in p and '-' or '_'))
512 
513  reference = os.path.normpath(os.path.join(self.basedir,
514  os.path.expandvars(reffile)))
515 
516  # old-style platform-specific reference name
517  spec_ref = reference[:-3] + GetPlatform(self)[0:3] + reference[-3:]
518  if os.path.isfile(spec_ref):
519  reference = spec_ref
520  else: # look for new-style platform specific reference files:
521  # get all the files whose name start with the reference filename
522  dirname, basename = os.path.split(reference)
523  if not dirname:
524  dirname = '.'
525  head = basename + "."
526  head_len = len(head)
527  platform = platformSplit(GetPlatform(self))
528  if 'do0' in platform:
529  platform.add('dbg')
530  candidates = []
531  for f in os.listdir(dirname):
532  if f.startswith(head):
533  req_plat = platformSplit(f[head_len:])
534  if platform.issuperset(req_plat):
535  candidates.append((len(req_plat), f))
536  if candidates: # take the one with highest matching
537  # FIXME: it is not possible to say if x86_64-slc5-gcc43-dbg
538  # has to use ref.x86_64-gcc43 or ref.slc5-dbg
539  candidates.sort()
540  reference = os.path.join(dirname, candidates[-1][1])
541  return reference
542 
543 # ======= GAUDI TOOLS =======
544 
545 
546 import shutil
547 import string
548 import difflib
549 import calendar
550 
551 try:
552  from GaudiKernel import ROOT6WorkAroundEnabled
553 except ImportError:
555  # dummy implementation
556  return False
557 
558 # --------------------------------- TOOLS ---------------------------------#
559 
560 
562  """
563  Function used to normalize the used path
564  """
565  newPath = os.path.normpath(os.path.expandvars(p))
566  if os.path.exists(newPath):
567  p = os.path.realpath(newPath)
568  return p
569 
570 
571 def which(executable):
572  """
573  Locates an executable in the executables path ($PATH) and returns the full
574  path to it. An application is looked for with or without the '.exe' suffix.
575  If the executable cannot be found, None is returned
576  """
577  if os.path.isabs(executable):
578  if not os.path.exists(executable):
579  if executable.endswith('.exe'):
580  if os.path.exists(executable[:-4]):
581  return executable[:-4]
582  else:
583  head, executable = os.path.split(executable)
584  else:
585  return executable
586  for d in os.environ.get("PATH").split(os.pathsep):
587  fullpath = os.path.join(d, executable)
588  if os.path.exists(fullpath):
589  return fullpath
590  if executable.endswith('.exe'):
591  return which(executable[:-4])
592  return None
593 
594 
595 # -------------------------------------------------------------------------#
596 # ----------------------------- Result Classe -----------------------------#
597 # -------------------------------------------------------------------------#
598 import types
599 
600 
601 class Result:
602 
603  PASS = 'PASS'
604  FAIL = 'FAIL'
605  ERROR = 'ERROR'
606  UNTESTED = 'UNTESTED'
607 
608  EXCEPTION = ""
609  RESOURCE = ""
610  TARGET = ""
611  TRACEBACK = ""
612  START_TIME = ""
613  END_TIME = ""
614  TIMEOUT_DETAIL = ""
615 
616  def __init__(self, kind=None, id=None, outcome=PASS, annotations={}):
617  self.annotations = annotations.copy()
618 
619  def __getitem__(self, key):
620  assert type(key) in types.StringTypes
621  return self.annotations[key]
622 
623  def __setitem__(self, key, value):
624  assert type(key) in types.StringTypes
625  assert type(value) in types.StringTypes
626  self.annotations[key] = value
627 
628  def Quote(self, string):
629  return string
630 
631 
632 # -------------------------------------------------------------------------#
633 # --------------------------- Validator Classes ---------------------------#
634 # -------------------------------------------------------------------------#
635 
636 # Basic implementation of an option validator for Gaudi test. This
637 # implementation is based on the standard (LCG) validation functions used
638 # in QMTest.
639 
640 
642 
643  def __init__(self, ref, cause, result_key):
644  self.ref = ref
645  self.cause = cause
646  self.result_key = result_key
647 
648  def __call__(self, out, result):
649  """Validate the output of the program.
650  'stdout' -- A string containing the data written to the standard output
651  stream.
652  'stderr' -- A string containing the data written to the standard error
653  stream.
654  'result' -- A 'Result' object. It may be used to annotate
655  the outcome according to the content of stderr.
656  returns -- A list of strings giving causes of failure."""
657 
658  causes = []
659  # Check the output
660  if not self.__CompareText(out, self.ref):
661  causes.append(self.cause)
662  result[self.result_key] = result.Quote(self.ref)
663 
664  return causes
665 
666  def __CompareText(self, s1, s2):
667  """Compare 's1' and 's2', ignoring line endings.
668  's1' -- A string.
669  's2' -- A string.
670  returns -- True if 's1' and 's2' are the same, ignoring
671  differences in line endings."""
672  if ROOT6WorkAroundEnabled('ReadRootmapCheck'):
673  # FIXME: (MCl) Hide warnings from new rootmap sanity check until we
674  # can fix them
675  to_ignore = re.compile(
676  r'Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*')
677 
678  def keep_line(l): return not to_ignore.match(l)
679  return filter(keep_line, s1.splitlines()) == filter(keep_line, s2.splitlines())
680  else:
681  return s1.splitlines() == s2.splitlines()
682 
683 
684 # ------------------------ Preprocessor elements ------------------------#
686 
687  """ Base class for a callable that takes a file and returns a modified
688  version of it."""
689 
690  def __processLine__(self, line):
691  return line
692 
693  def __processFile__(self, lines):
694  output = []
695  for l in lines:
696  l = self.__processLine__(l)
697  if l:
698  output.append(l)
699  return output
700 
701  def __call__(self, input):
702  if hasattr(input, "__iter__"):
703  lines = input
704  mergeback = False
705  else:
706  lines = input.splitlines()
707  mergeback = True
708  output = self.__processFile__(lines)
709  if mergeback:
710  output = '\n'.join(output)
711  return output
712 
713  def __add__(self, rhs):
714  return FilePreprocessorSequence([self, rhs])
715 
716 
718 
719  def __init__(self, members=[]):
720  self.members = members
721 
722  def __add__(self, rhs):
723  return FilePreprocessorSequence(self.members + [rhs])
724 
725  def __call__(self, input):
726  output = input
727  for pp in self.members:
728  output = pp(output)
729  return output
730 
731 
733 
734  def __init__(self, strings=[], regexps=[]):
735  import re
736  self.strings = strings
737  self.regexps = map(re.compile, regexps)
738 
739  def __processLine__(self, line):
740  for s in self.strings:
741  if line.find(s) >= 0:
742  return None
743  for r in self.regexps:
744  if r.search(line):
745  return None
746  return line
747 
748 
750 
751  def __init__(self, start, end):
752  self.start = start
753  self.end = end
754  self._skipping = False
755 
756  def __processLine__(self, line):
757  if self.start in line:
758  self._skipping = True
759  return None
760  elif self.end in line:
761  self._skipping = False
762  elif self._skipping:
763  return None
764  return line
765 
766 
768 
769  def __init__(self, orig, repl="", when=None):
770  if when:
771  when = re.compile(when)
772  self._operations = [(when, re.compile(orig), repl)]
773 
774  def __add__(self, rhs):
775  if isinstance(rhs, RegexpReplacer):
776  res = RegexpReplacer("", "", None)
777  res._operations = self._operations + rhs._operations
778  else:
779  res = FilePreprocessor.__add__(self, rhs)
780  return res
781 
782  def __processLine__(self, line):
783  for w, o, r in self._operations:
784  if w is None or w.search(line):
785  line = o.sub(r, line)
786  return line
787 
788 
789 # Common preprocessors
790 maskPointers = RegexpReplacer("0x[0-9a-fA-F]{4,16}", "0x########")
791 normalizeDate = RegexpReplacer(
792  "[0-2]?[0-9]:[0-5][0-9]:[0-5][0-9] [0-9]{4}[-/][01][0-9][-/][0-3][0-9][ A-Z]*",
793  "00:00:00 1970-01-01")
794 normalizeEOL = FilePreprocessor()
795 normalizeEOL.__processLine__ = lambda line: str(line).rstrip() + '\n'
796 
797 skipEmptyLines = FilePreprocessor()
798 # FIXME: that's ugly
799 skipEmptyLines.__processLine__ = lambda line: (line.strip() and line) or None
800 
801 # Special preprocessor sorting the list of strings (whitespace separated)
802 # that follow a signature on a single line
803 
804 
806 
807  def __init__(self, signature):
808  self.signature = signature
809  self.siglen = len(signature)
810 
811  def __processLine__(self, line):
812  pos = line.find(self.signature)
813  if pos >= 0:
814  line = line[:(pos + self.siglen)]
815  lst = line[(pos + self.siglen):].split()
816  lst.sort()
817  line += " ".join(lst)
818  return line
819 
820 
822 
823  '''
824  Sort group of lines matching a regular expression
825  '''
826 
827  def __init__(self, exp):
828  self.exp = exp if hasattr(exp, 'match') else re.compile(exp)
829 
830  def __processFile__(self, lines):
831  match = self.exp.match
832  output = []
833  group = []
834  for l in lines:
835  if match(l):
836  group.append(l)
837  else:
838  if group:
839  group.sort()
840  output.extend(group)
841  group = []
842  output.append(l)
843  return output
844 
845 
846 # Preprocessors for GaudiExamples
847 normalizeExamples = maskPointers + normalizeDate
848 for w, o, r in [
849  # ("TIMER.TIMER",r"[0-9]", "0"), # Normalize time output
850  ("TIMER.TIMER", r"\s+[+-]?[0-9]+[0-9.]*", " 0"), # Normalize time output
851  ("release all pending", r"^.*/([^/]*:.*)", r"\1"),
852  ("^#.*file", r"file '.*[/\\]([^/\\]*)$", r"file '\1"),
853  ("^JobOptionsSvc.*options successfully read in from",
854  r"read in from .*[/\\]([^/\\]*)$", r"file \1"), # normalize path to options
855  # Normalize UUID, except those ending with all 0s (i.e. the class IDs)
856  (None, r"[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}(?!-0{12})-[0-9A-Fa-f]{12}",
857  "00000000-0000-0000-0000-000000000000"),
858  # Absorb a change in ServiceLocatorHelper
859  ("ServiceLocatorHelper::", "ServiceLocatorHelper::(create|locate)Service",
860  "ServiceLocatorHelper::service"),
861  # Remove the leading 0 in Windows' exponential format
862  (None, r"e([-+])0([0-9][0-9])", r"e\1\2"),
863  # Output line changed in Gaudi v24
864  (None, r'Service reference count check:',
865  r'Looping over all active services...'),
866  # Ignore count of declared properties (anyway they are all printed)
867  (None, r"^(.*(DEBUG|SUCCESS) List of ALL properties of .*#properties = )\d+",
868  r"\1NN"),
869 ]: # [ ("TIMER.TIMER","[0-9]+[0-9.]*", "") ]
870  normalizeExamples += RegexpReplacer(o, r, w)
871 
872 lineSkipper = LineSkipper(["//GP:",
873  "JobOptionsSvc INFO # ",
874  "JobOptionsSvc WARNING # ",
875  "Time User",
876  "Welcome to",
877  "This machine has a speed",
878  "TIME:",
879  "running on",
880  "ToolSvc.Sequenc... INFO",
881  "DataListenerSvc INFO XML written to file:",
882  "[INFO]", "[WARNING]",
883  "DEBUG No writable file catalog found which contains FID:",
884  "DEBUG Service base class initialized successfully",
885  # changed between v20 and v21
886  "DEBUG Incident timing:",
887  # introduced with patch #3487
888  # changed the level of the message from INFO to
889  # DEBUG
890  "INFO 'CnvServices':[",
891  # message removed because could be printed in constructor
892  "DEBUG 'CnvServices':[",
893  # The signal handler complains about SIGXCPU not
894  # defined on some platforms
895  'SIGXCPU',
896  ], regexps=[
897  r"^JobOptionsSvc INFO *$",
898  r"^# ", # Ignore python comments
899  # skip the message reporting the version of the root file
900  r"(Always|SUCCESS)\s*(Root f|[^ ]* F)ile version:",
901  r"File '.*.xml' does not exist",
902  r"INFO Refer to dataset .* by its file ID:",
903  r"INFO Referring to dataset .* by its file ID:",
904  r"INFO Disconnect from dataset",
905  r"INFO Disconnected from dataset",
906  r"INFO Disconnected data IO:",
907  r"IncidentSvc\s*(DEBUG (Adding|Removing)|VERBOSE Calling)",
908  # I want to ignore the header of the unchecked StatusCode report
909  r"^StatusCodeSvc.*listing all unchecked return codes:",
910  r"^StatusCodeSvc\s*INFO\s*$",
911  r"Num\s*\|\s*Function\s*\|\s*Source Library",
912  r"^[-+]*\s*$",
913  # Hide the fake error message coming from POOL/ROOT (ROOT 5.21)
914  r"ERROR Failed to modify file: .* Errno=2 No such file or directory",
915  # Hide unchecked StatusCodes from dictionaries
916  r"^ +[0-9]+ \|.*ROOT",
917  r"^ +[0-9]+ \|.*\|.*Dict",
918  # Hide success StatusCodeSvc message
919  r"StatusCodeSvc.*all StatusCode instances where checked",
920  # Hide EventLoopMgr total timing report
921  r"EventLoopMgr.*---> Loop Finished",
922  r"HiveSlimEventLo.*---> Loop Finished",
923  # Remove ROOT TTree summary table, which changes from one version to the
924  # other
925  r"^\*.*\*$",
926  # Remove Histos Summaries
927  r"SUCCESS\s*Booked \d+ Histogram\(s\)",
928  r"^ \|",
929  r"^ ID=",
930  # Ignore added/removed properties
931  r"Property(.*)'Audit(Algorithm|Tool|Service)s':",
932  # these were missing in tools
933  r"Property(.*)'AuditRe(start|initialize)':",
934  r"Property(.*)'IsIOBound':",
935  # removed with gaudi/Gaudi!273
936  r"Property(.*)'ErrorCount(er)?':",
937  # added with gaudi/Gaudi!306
938  r"Property(.*)'Sequential':",
939  # added with gaudi/Gaudi!314
940  r"Property(.*)'FilterCircularDependencies':",
941  # removed with gaudi/Gaudi!316
942  r"Property(.*)'IsClonable':",
943  # ignore uninteresting/obsolete messages
944  r"Property update for OutputLevel : new value =",
945  r"EventLoopMgr\s*DEBUG Creating OutputStream",
946 ])
947 
948 if ROOT6WorkAroundEnabled('ReadRootmapCheck'):
949  # FIXME: (MCl) Hide warnings from new rootmap sanity check until we can
950  # fix them
951  lineSkipper += LineSkipper(regexps=[
952  r'Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*',
953  ])
954 
955 normalizeExamples = (lineSkipper + normalizeExamples + skipEmptyLines +
956  normalizeEOL + LineSorter("Services to release : ") +
957  SortGroupOfLines(r'^\S+\s+(DEBUG|SUCCESS) Property \[\'Name\':'))
958 
959 # --------------------- Validation functions/classes ---------------------#
960 
961 
963 
964  def __init__(self, reffile, cause, result_key, preproc=normalizeExamples):
965  self.reffile = os.path.expandvars(reffile)
966  self.cause = cause
967  self.result_key = result_key
968  self.preproc = preproc
969 
970  def __call__(self, stdout, result):
971  causes = []
972  if os.path.isfile(self.reffile):
973  orig = open(self.reffile).xreadlines()
974  if self.preproc:
975  orig = self.preproc(orig)
976  result[self.result_key + '.preproc.orig'] = \
977  result.Quote('\n'.join(map(str.strip, orig)))
978  else:
979  orig = []
980  new = stdout.splitlines()
981  if self.preproc:
982  new = self.preproc(new)
983 
984  diffs = difflib.ndiff(orig, new, charjunk=difflib.IS_CHARACTER_JUNK)
985  filterdiffs = map(lambda x: x.strip(), filter(
986  lambda x: x[0] != " ", diffs))
987  if filterdiffs:
988  result[self.result_key] = result.Quote("\n".join(filterdiffs))
989  result[self.result_key] += result.Quote("""
990  Legend:
991  -) reference file
992  +) standard output of the test""")
993  result[self.result_key + '.preproc.new'] = \
994  result.Quote('\n'.join(map(str.strip, new)))
995  causes.append(self.cause)
996  return causes
997 
998 
999 def findTTreeSummaries(stdout):
1000  """
1001  Scan stdout to find ROOT TTree summaries and digest them.
1002  """
1003  stars = re.compile(r"^\*+$")
1004  outlines = stdout.splitlines()
1005  nlines = len(outlines)
1006  trees = {}
1007 
1008  i = 0
1009  while i < nlines: # loop over the output
1010  # look for
1011  while i < nlines and not stars.match(outlines[i]):
1012  i += 1
1013  if i < nlines:
1014  tree, i = _parseTTreeSummary(outlines, i)
1015  if tree:
1016  trees[tree["Name"]] = tree
1017 
1018  return trees
1019 
1020 
1021 def cmpTreesDicts(reference, to_check, ignore=None):
1022  """
1023  Check that all the keys in reference are in to_check too, with the same value.
1024  If the value is a dict, the function is called recursively. to_check can
1025  contain more keys than reference, that will not be tested.
1026  The function returns at the first difference found.
1027  """
1028  fail_keys = []
1029  # filter the keys in the reference dictionary
1030  if ignore:
1031  ignore_re = re.compile(ignore)
1032  keys = [key for key in reference if not ignore_re.match(key)]
1033  else:
1034  keys = reference.keys()
1035  # loop over the keys (not ignored) in the reference dictionary
1036  for k in keys:
1037  if k in to_check: # the key must be in the dictionary to_check
1038  if (type(reference[k]) is dict) and (type(to_check[k]) is dict):
1039  # if both reference and to_check values are dictionaries,
1040  # recurse
1041  failed = fail_keys = cmpTreesDicts(
1042  reference[k], to_check[k], ignore)
1043  else:
1044  # compare the two values
1045  failed = to_check[k] != reference[k]
1046  else: # handle missing keys in the dictionary to check (i.e. failure)
1047  to_check[k] = None
1048  failed = True
1049  if failed:
1050  fail_keys.insert(0, k)
1051  break # exit from the loop at the first failure
1052  return fail_keys # return the list of keys bringing to the different values
1053 
1054 
1055 def getCmpFailingValues(reference, to_check, fail_path):
1056  c = to_check
1057  r = reference
1058  for k in fail_path:
1059  c = c.get(k, None)
1060  r = r.get(k, None)
1061  if c is None or r is None:
1062  break # one of the dictionaries is not deep enough
1063  return (fail_path, r, c)
1064 
1065 
1066 # signature of the print-out of the histograms
1067 h_count_re = re.compile(
1068  r"^(.*)SUCCESS\s+Booked (\d+) Histogram\(s\) :\s+([\s\w=-]*)")
1069 
1070 
1071 def _parseTTreeSummary(lines, pos):
1072  """
1073  Parse the TTree summary table in lines, starting from pos.
1074  Returns a tuple with the dictionary with the digested informations and the
1075  position of the first line after the summary.
1076  """
1077  result = {}
1078  i = pos + 1 # first line is a sequence of '*'
1079  count = len(lines)
1080 
1081  def splitcols(l): return [f.strip() for f in l.strip("*\n").split(':', 2)]
1082 
1083  def parseblock(ll):
1084  r = {}
1085  cols = splitcols(ll[0])
1086  r["Name"], r["Title"] = cols[1:]
1087 
1088  cols = splitcols(ll[1])
1089  r["Entries"] = int(cols[1])
1090 
1091  sizes = cols[2].split()
1092  r["Total size"] = int(sizes[2])
1093  if sizes[-1] == "memory":
1094  r["File size"] = 0
1095  else:
1096  r["File size"] = int(sizes[-1])
1097 
1098  cols = splitcols(ll[2])
1099  sizes = cols[2].split()
1100  if cols[0] == "Baskets":
1101  r["Baskets"] = int(cols[1])
1102  r["Basket size"] = int(sizes[2])
1103  r["Compression"] = float(sizes[-1])
1104  return r
1105 
1106  if i < (count - 3) and lines[i].startswith("*Tree"):
1107  result = parseblock(lines[i:i + 3])
1108  result["Branches"] = {}
1109  i += 4
1110  while i < (count - 3) and lines[i].startswith("*Br"):
1111  if i < (count - 2) and lines[i].startswith("*Branch "):
1112  # skip branch header
1113  i += 3
1114  continue
1115  branch = parseblock(lines[i:i + 3])
1116  result["Branches"][branch["Name"]] = branch
1117  i += 4
1118 
1119  return (result, i)
1120 
1121 
1122 def parseHistosSummary(lines, pos):
1123  """
1124  Extract the histograms infos from the lines starting at pos.
1125  Returns the position of the first line after the summary block.
1126  """
1127  global h_count_re
1128  h_table_head = re.compile(
1129  r'SUCCESS\s+List of booked (1D|2D|3D|1D profile|2D profile) histograms in directory\s+"(\w*)"')
1130  h_short_summ = re.compile(r"ID=([^\"]+)\s+\"([^\"]+)\"\s+(.*)")
1131 
1132  nlines = len(lines)
1133 
1134  # decode header
1135  m = h_count_re.search(lines[pos])
1136  name = m.group(1).strip()
1137  total = int(m.group(2))
1138  header = {}
1139  for k, v in [x.split("=") for x in m.group(3).split()]:
1140  header[k] = int(v)
1141  pos += 1
1142  header["Total"] = total
1143 
1144  summ = {}
1145  while pos < nlines:
1146  m = h_table_head.search(lines[pos])
1147  if m:
1148  t, d = m.groups(1) # type and directory
1149  t = t.replace(" profile", "Prof")
1150  pos += 1
1151  if pos < nlines:
1152  l = lines[pos]
1153  else:
1154  l = ""
1155  cont = {}
1156  if l.startswith(" | ID"):
1157  # table format
1158  titles = [x.strip() for x in l.split("|")][1:]
1159  pos += 1
1160  while pos < nlines and lines[pos].startswith(" |"):
1161  l = lines[pos]
1162  values = [x.strip() for x in l.split("|")][1:]
1163  hcont = {}
1164  for i in range(len(titles)):
1165  hcont[titles[i]] = values[i]
1166  cont[hcont["ID"]] = hcont
1167  pos += 1
1168  elif l.startswith(" ID="):
1169  while pos < nlines and lines[pos].startswith(" ID="):
1170  values = [x.strip()
1171  for x in h_short_summ.search(lines[pos]).groups()]
1172  cont[values[0]] = values
1173  pos += 1
1174  else: # not interpreted
1175  raise RuntimeError(
1176  "Cannot understand line %d: '%s'" % (pos, l))
1177  if not d in summ:
1178  summ[d] = {}
1179  summ[d][t] = cont
1180  summ[d]["header"] = header
1181  else:
1182  break
1183  if not summ:
1184  # If the full table is not present, we use only the header
1185  summ[name] = {"header": header}
1186  return summ, pos
1187 
1188 
1190  """
1191  Scan stdout to find ROOT TTree summaries and digest them.
1192  """
1193  outlines = stdout.splitlines()
1194  nlines = len(outlines) - 1
1195  summaries = {}
1196  global h_count_re
1197 
1198  pos = 0
1199  while pos < nlines:
1200  summ = {}
1201  # find first line of block:
1202  match = h_count_re.search(outlines[pos])
1203  while pos < nlines and not match:
1204  pos += 1
1205  match = h_count_re.search(outlines[pos])
1206  if match:
1207  summ, pos = parseHistosSummary(outlines, pos)
1208  summaries.update(summ)
1209  return summaries
1210 
1211 
1212 def PlatformIsNotSupported(self, context, result):
1213  platform = GetPlatform(self)
1214  unsupported = [re.compile(x) for x in [str(y).strip()
1215  for y in unsupported_platforms] if x]
1216  for p_re in unsupported:
1217  if p_re.search(platform):
1218  result.SetOutcome(result.UNTESTED)
1219  result[result.CAUSE] = 'Platform not supported.'
1220  return True
1221  return False
1222 
1223 
1224 def GetPlatform(self):
1225  """
1226  Return the platform Id defined in CMTCONFIG or SCRAM_ARCH.
1227  """
1228  arch = "None"
1229  # check architecture name
1230  if "BINARY_TAG" in os.environ:
1231  arch = os.environ["BINARY_TAG"]
1232  elif "CMTCONFIG" in os.environ:
1233  arch = os.environ["CMTCONFIG"]
1234  elif "SCRAM_ARCH" in os.environ:
1235  arch = os.environ["SCRAM_ARCH"]
1236  return arch
1237 
1238 
1239 def isWinPlatform(self):
1240  """
1241  Return True if the current platform is Windows.
1242 
1243  This function was needed because of the change in the CMTCONFIG format,
1244  from win32_vc71_dbg to i686-winxp-vc9-dbg.
1245  """
1246  platform = GetPlatform(self)
1247  return "winxp" in platform or platform.startswith("win")
def dumpProcs(name)
Definition: BaseTest.py:34
def GetPlatform(self)
Definition: BaseTest.py:1224
def PlatformIsNotSupported(self, context, result)
Definition: BaseTest.py:1212
def __init__(self, start, end)
Definition: BaseTest.py:751
def validateWithReference(self, stdout=None, stderr=None, result=None, causes=None, preproc=None)
Definition: BaseTest.py:444
def cmpTreesDicts(reference, to_check, ignore=None)
Definition: BaseTest.py:1021
def ValidateOutput(self, stdout, stderr, result)
Definition: BaseTest.py:262
def read(f, regex='.*', skipevents=0)
Definition: hivetimeline.py:22
def __processLine__(self, line)
Definition: BaseTest.py:811
def findHistosSummaries(stdout)
Definition: BaseTest.py:1189
def _parseTTreeSummary(lines, pos)
Definition: BaseTest.py:1071
struct GAUDI_API map
Parametrisation class for map-like implementation.
def __call__(self, stdout, result)
Definition: BaseTest.py:970
def __processLine__(self, line)
Definition: BaseTest.py:739
def __init__(self, orig, repl="", when=None)
Definition: BaseTest.py:769
decltype(auto) range(Args &&...args)
Zips multiple containers together to form a single range.
def __init__(self, signature)
Definition: BaseTest.py:807
def sanitize_for_xml(data)
Definition: BaseTest.py:17
def isWinPlatform(self)
Definition: BaseTest.py:1239
def getCmpFailingValues(reference, to_check, fail_path)
Definition: BaseTest.py:1055
def __init__(self, strings=[], regexps=[])
Definition: BaseTest.py:734
def __setitem__(self, key, value)
Definition: BaseTest.py:623
def __init__(self, kind=None, id=None, outcome=PASS, annotations={})
Definition: BaseTest.py:616
def which(executable)
Definition: BaseTest.py:571
def parseHistosSummary(lines, pos)
Definition: BaseTest.py:1122
def _expandReferenceFileName(self, reffile)
Definition: BaseTest.py:505
def findReferenceBlock(self, reference=None, stdout=None, result=None, causes=None, signature_offset=0, signature=None, id=None)
Definition: BaseTest.py:269
def CheckHistosSummaries(self, stdout=None, result=None, causes=None, dict=None, ignore=None)
Definition: BaseTest.py:403
def __init__(self, reffile, cause, result_key, preproc=normalizeExamples)
Definition: BaseTest.py:964
def __getitem__(self, key)
Definition: BaseTest.py:619
def kill_tree(ppid, sig)
Definition: BaseTest.py:43
def findTTreeSummaries(stdout)
Definition: BaseTest.py:999
def __init__(self, ref, cause, result_key)
Definition: BaseTest.py:643
def ROOT6WorkAroundEnabled(id=None)
Definition: BaseTest.py:554
def CheckTTreesSummaries(self, stdout=None, result=None, causes=None, trees_dict=None, ignore=r"Basket|.*size|Compression")
Definition: BaseTest.py:361
def Quote(self, string)
Definition: BaseTest.py:628