The Gaudi Framework  v30r3 (a5ef0a68)
BaseTest.py
Go to the documentation of this file.
1 # -*- coding: utf-8 -*-
2 
3 import os
4 import sys
5 import time
6 import signal
7 import threading
8 import platform
9 import tempfile
10 import inspect
11 import re
12 import logging
13 
14 from subprocess import Popen, PIPE, STDOUT
15 
16 
17 def sanitize_for_xml(data):
18  '''
19  Take a string with invalid ASCII/UTF characters and quote them so that the
20  string can be used in an XML text.
21 
22  >>> sanitize_for_xml('this is \x1b')
23  'this is [NON-XML-CHAR-0x1B]'
24  '''
25  bad_chars = re.compile(
26  u'[\x00-\x08\x0b\x0c\x0e-\x1F\uD800-\uDFFF\uFFFE\uFFFF]')
27 
28  def quote(match):
29  'helper function'
30  return ''.join('[NON-XML-CHAR-0x%2X]' % ord(c) for c in match.group())
31  return bad_chars.sub(quote, data)
32 
33 
34 def dumpProcs(name):
35  '''helper to debug GAUDI-1084, dump the list of processes'''
36  from getpass import getuser
37  if 'WORKSPACE' in os.environ:
38  p = Popen(['ps', '-fH', '-U', getuser()], stdout=PIPE)
39  with open(os.path.join(os.environ['WORKSPACE'], name), 'w') as f:
40  f.write(p.communicate()[0])
41 
42 
43 def kill_tree(ppid, sig):
44  '''
45  Send a signal to a process and all its child processes (starting from the
46  leaves).
47  '''
48  log = logging.getLogger('kill_tree')
49  ps_cmd = ['ps', '--no-headers', '-o', 'pid', '--ppid', str(ppid)]
50  get_children = Popen(ps_cmd, stdout=PIPE, stderr=PIPE)
51  children = map(int, get_children.communicate()[0].split())
52  for child in children:
53  kill_tree(child, sig)
54  try:
55  log.debug('killing process %d', ppid)
56  os.kill(ppid, sig)
57  except OSError, err:
58  if err.errno != 3: # No such process
59  raise
60  log.debug('no such process %d', ppid)
61 
62 # -------------------------------------------------------------------------#
63 
64 
65 class BaseTest(object):
66 
67  _common_tmpdir = None
68 
69  def __init__(self):
70  self.program = ''
71  self.args = []
72  self.reference = ''
73  self.error_reference = ''
74  self.options = ''
75  self.stderr = ''
76  self.timeout = 600
77  self.exit_code = None
78  self.environment = None
80  self.signal = None
81  self.workdir = os.curdir
82  self.use_temp_dir = False
83  # Variables not for users
84  self.status = None
85  self.name = ''
86  self.causes = []
87  self.result = Result(self)
88  self.returnedCode = 0
89  self.out = ''
90  self.err = ''
91  self.proc = None
92  self.stack_trace = None
93  self.basedir = os.getcwd()
94 
95  def run(self):
96  logging.debug('running test %s', self.name)
97 
98  if self.options:
99  if re.search(r'from\s+Gaudi.Configuration\s+import\s+\*|'
100  'from\s+Configurables\s+import', self.options):
101  optionFile = tempfile.NamedTemporaryFile(suffix='.py')
102  else:
103  optionFile = tempfile.NamedTemporaryFile(suffix='.opts')
104  optionFile.file.write(self.options)
105  optionFile.seek(0)
106  self.args.append(RationalizePath(optionFile.name))
107 
108  # If not specified, setting the environment
109  if self.environment is None:
110  self.environment = os.environ
111  else:
112  self.environment = dict(
113  self.environment.items() + os.environ.items())
114 
115  platform_id = (os.environ.get('BINARY_TAG') or
116  os.environ.get('CMTCONFIG') or
117  platform.platform())
118  # If at least one regex matches we skip the test.
119  skip_test = bool([None
120  for prex in self.unsupported_platforms
121  if re.search(prex, platform_id)])
122 
123  if not skip_test:
124  # handle working/temporary directory options
125  workdir = self.workdir
126  if self.use_temp_dir:
127  if self._common_tmpdir:
128  workdir = self._common_tmpdir
129  else:
130  workdir = tempfile.mkdtemp()
131 
132  # prepare the command to execute
133  prog = ''
134  if self.program != '':
135  prog = self.program
136  elif "GAUDIEXE" in os.environ:
137  prog = os.environ["GAUDIEXE"]
138  else:
139  prog = "Gaudi.exe"
140 
141  dummy, prog_ext = os.path.splitext(prog)
142  if prog_ext not in [".exe", ".py", ".bat"]:
143  prog += ".exe"
144  prog_ext = ".exe"
145 
146  prog = which(prog) or prog
147 
148  args = map(RationalizePath, self.args)
149 
150  if prog_ext == ".py":
151  params = ['python', RationalizePath(prog)] + args
152  else:
153  params = [RationalizePath(prog)] + args
154 
155  validatorRes = Result({'CAUSE': None, 'EXCEPTION': None,
156  'RESOURCE': None, 'TARGET': None,
157  'TRACEBACK': None, 'START_TIME': None,
158  'END_TIME': None, 'TIMEOUT_DETAIL': None})
159  self.result = validatorRes
160 
161  # we need to switch directory because the validator expects to run
162  # in the same dir as the program
163  os.chdir(workdir)
164 
165  # launching test in a different thread to handle timeout exception
166  def target():
167  logging.debug('executing %r in %s',
168  params, workdir)
169  self.proc = Popen(params, stdout=PIPE, stderr=PIPE,
170  env=self.environment)
171  logging.debug('(pid: %d)', self.proc.pid)
172  self.out, self.err = self.proc.communicate()
173 
174  thread = threading.Thread(target=target)
175  thread.start()
176  # catching timeout
177  thread.join(self.timeout)
178 
179  if thread.is_alive():
180  logging.debug('time out in test %s (pid %d)',
181  self.name, self.proc.pid)
182  # get the stack trace of the stuck process
183  cmd = ['gdb', '--pid', str(self.proc.pid), '--batch',
184  '--eval-command=thread apply all backtrace']
185  gdb = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
186  self.stack_trace = gdb.communicate()[0]
187 
188  kill_tree(self.proc.pid, signal.SIGTERM)
189  thread.join(60)
190  if thread.is_alive():
191  kill_tree(self.proc.pid, signal.SIGKILL)
192  self.causes.append('timeout')
193  else:
194  logging.debug('completed test %s', self.name)
195 
196  # Getting the error code
197  logging.debug('returnedCode = %s', self.proc.returncode)
198  self.returnedCode = self.proc.returncode
199 
200  logging.debug('validating test...')
201  self.result, self.causes = self.ValidateOutput(stdout=self.out,
202  stderr=self.err,
203  result=validatorRes)
204 
205  # remove the temporary directory if we created it
206  if self.use_temp_dir and not self._common_tmpdir:
207  shutil.rmtree(workdir, True)
208 
209  os.chdir(self.basedir)
210 
211  # handle application exit code
212  if self.signal is not None:
213  if int(self.returnedCode) != -int(self.signal):
214  self.causes.append('exit code')
215 
216  elif self.exit_code is not None:
217  if int(self.returnedCode) != int(self.exit_code):
218  self.causes.append('exit code')
219 
220  elif self.returnedCode != 0:
221  self.causes.append("exit code")
222 
223  if self.causes:
224  self.status = "failed"
225  else:
226  self.status = "passed"
227 
228  else:
229  self.status = "skipped"
230 
231  logging.debug('%s: %s', self.name, self.status)
232  field_mapping = {'Exit Code': 'returnedCode',
233  'stderr': 'err',
234  'Arguments': 'args',
235  'Environment': 'environment',
236  'Status': 'status',
237  'stdout': 'out',
238  'Program Name': 'program',
239  'Name': 'name',
240  'Validator': 'validator',
241  'Output Reference File': 'reference',
242  'Error Reference File': 'error_reference',
243  'Causes': 'causes',
244  # 'Validator Result': 'result.annotations',
245  'Unsupported Platforms': 'unsupported_platforms',
246  'Stack Trace': 'stack_trace'}
247  resultDict = [(key, getattr(self, attr))
248  for key, attr in field_mapping.iteritems()
249  if getattr(self, attr)]
250  resultDict.append(('Working Directory',
251  RationalizePath(os.path.join(os.getcwd(),
252  self.workdir))))
253  # print dict(resultDict).keys()
254  resultDict.extend(self.result.annotations.iteritems())
255  # print self.result.annotations.keys()
256  return dict(resultDict)
257 
258  # -------------------------------------------------#
259  # ----------------Validating tool------------------#
260  # -------------------------------------------------#
261 
262  def ValidateOutput(self, stdout, stderr, result):
263  if not self.stderr:
264  self.validateWithReference(stdout, stderr, result, self.causes)
265  elif stderr.strip() != self.stderr.strip():
266  self.causes.append('standard error')
267  return result, self.causes
268 
269  def findReferenceBlock(self, reference=None, stdout=None, result=None, causes=None, signature_offset=0, signature=None, id=None):
270  """
271  Given a block of text, tries to find it in the output. The block had to be identified by a signature line. By default, the first line is used as signature, or the line pointed to by signature_offset. If signature_offset points outside the block, a signature line can be passed as signature argument. Note: if 'signature' is None (the default), a negative signature_offset is interpreted as index in a list (e.g. -1 means the last line), otherwise the it is interpreted as the number of lines before the first one of the block the signature must appear. The parameter 'id' allow to distinguish between different calls to this function in the same validation code.
272  """
273 
274  if reference is None:
275  reference = self.reference
276  if stdout is None:
277  stdout = self.out
278  if result is None:
279  result = self.result
280  if causes is None:
281  causes = self.causes
282 
283  reflines = filter(
284  None, map(lambda s: s.rstrip(), reference.splitlines()))
285  if not reflines:
286  raise RuntimeError("Empty (or null) reference")
287  # the same on standard output
288  outlines = filter(None, map(lambda s: s.rstrip(), stdout.splitlines()))
289 
290  res_field = "GaudiTest.RefBlock"
291  if id:
292  res_field += "_%s" % id
293 
294  if signature is None:
295  if signature_offset < 0:
296  signature_offset = len(reference) + signature_offset
297  signature = reflines[signature_offset]
298  # find the reference block in the output file
299  try:
300  pos = outlines.index(signature)
301  outlines = outlines[pos - signature_offset:pos +
302  len(reflines) - signature_offset]
303  if reflines != outlines:
304  msg = "standard output"
305  # I do not want 2 messages in causes if the function is called
306  # twice
307  if not msg in causes:
308  causes.append(msg)
309  result[res_field +
310  ".observed"] = result.Quote("\n".join(outlines))
311  except ValueError:
312  causes.append("missing signature")
313  result[res_field + ".signature"] = result.Quote(signature)
314  if len(reflines) > 1 or signature != reflines[0]:
315  result[res_field + ".expected"] = result.Quote("\n".join(reflines))
316  return causes
317 
318  def countErrorLines(self, expected={'ERROR': 0, 'FATAL': 0}, stdout=None, result=None, causes=None):
319  """
320  Count the number of messages with required severity (by default ERROR and FATAL)
321  and check if their numbers match the expected ones (0 by default).
322  The dictionary "expected" can be used to tune the number of errors and fatals
323  allowed, or to limit the number of expected warnings etc.
324  """
325 
326  if stdout is None:
327  stdout = self.out
328  if result is None:
329  result = self.result
330  if causes is None:
331  causes = self.causes
332 
333  # prepare the dictionary to record the extracted lines
334  errors = {}
335  for sev in expected:
336  errors[sev] = []
337 
338  outlines = stdout.splitlines()
339  from math import log10
340  fmt = "%%%dd - %%s" % (int(log10(len(outlines) + 1)))
341 
342  linecount = 0
343  for l in outlines:
344  linecount += 1
345  words = l.split()
346  if len(words) >= 2 and words[1] in errors:
347  errors[words[1]].append(fmt % (linecount, l.rstrip()))
348 
349  for e in errors:
350  if len(errors[e]) != expected[e]:
351  causes.append('%s(%d)' % (e, len(errors[e])))
352  result["GaudiTest.lines.%s" %
353  e] = result.Quote('\n'.join(errors[e]))
354  result["GaudiTest.lines.%s.expected#" %
355  e] = result.Quote(str(expected[e]))
356 
357  return causes
358 
359  def CheckTTreesSummaries(self, stdout=None, result=None, causes=None,
360  trees_dict=None,
361  ignore=r"Basket|.*size|Compression"):
362  """
363  Compare the TTree summaries in stdout with the ones in trees_dict or in
364  the reference file. By default ignore the size, compression and basket
365  fields.
366  The presence of TTree summaries when none is expected is not a failure.
367  """
368  if stdout is None:
369  stdout = self.out
370  if result is None:
371  result = self.result
372  if causes is None:
373  causes = self.causes
374  if trees_dict is None:
375  lreference = self._expandReferenceFileName(self.reference)
376  # call the validator if the file exists
377  if lreference and os.path.isfile(lreference):
378  trees_dict = findTTreeSummaries(open(lreference).read())
379  else:
380  trees_dict = {}
381 
382  from pprint import PrettyPrinter
383  pp = PrettyPrinter()
384  if trees_dict:
385  result["GaudiTest.TTrees.expected"] = result.Quote(
386  pp.pformat(trees_dict))
387  if ignore:
388  result["GaudiTest.TTrees.ignore"] = result.Quote(ignore)
389 
390  trees = findTTreeSummaries(stdout)
391  failed = cmpTreesDicts(trees_dict, trees, ignore)
392  if failed:
393  causes.append("trees summaries")
394  msg = "%s: %s != %s" % getCmpFailingValues(
395  trees_dict, trees, failed)
396  result["GaudiTest.TTrees.failure_on"] = result.Quote(msg)
397  result["GaudiTest.TTrees.found"] = result.Quote(pp.pformat(trees))
398 
399  return causes
400 
401  def CheckHistosSummaries(self, stdout=None, result=None, causes=None,
402  dict=None,
403  ignore=None):
404  """
405  Compare the TTree summaries in stdout with the ones in trees_dict or in
406  the reference file. By default ignore the size, compression and basket
407  fields.
408  The presence of TTree summaries when none is expected is not a failure.
409  """
410  if stdout is None:
411  stdout = self.out
412  if result is None:
413  result = self.result
414  if causes is None:
415  causes = self.causes
416 
417  if dict is None:
418  lreference = self._expandReferenceFileName(self.reference)
419  # call the validator if the file exists
420  if lreference and os.path.isfile(lreference):
421  dict = findHistosSummaries(open(lreference).read())
422  else:
423  dict = {}
424 
425  from pprint import PrettyPrinter
426  pp = PrettyPrinter()
427  if dict:
428  result["GaudiTest.Histos.expected"] = result.Quote(
429  pp.pformat(dict))
430  if ignore:
431  result["GaudiTest.Histos.ignore"] = result.Quote(ignore)
432 
433  histos = findHistosSummaries(stdout)
434  failed = cmpTreesDicts(dict, histos, ignore)
435  if failed:
436  causes.append("histos summaries")
437  msg = "%s: %s != %s" % getCmpFailingValues(dict, histos, failed)
438  result["GaudiTest.Histos.failure_on"] = result.Quote(msg)
439  result["GaudiTest.Histos.found"] = result.Quote(pp.pformat(histos))
440 
441  return causes
442 
443  def validateWithReference(self, stdout=None, stderr=None, result=None,
444  causes=None, preproc=None):
445  '''
446  Default validation acti*on: compare standard output and error to the
447  reference files.
448  '''
449 
450  if stdout is None:
451  stdout = self.out
452  if stderr is None:
453  stderr = self.err
454  if result is None:
455  result = self.result
456  if causes is None:
457  causes = self.causes
458 
459  # set the default output preprocessor
460  if preproc is None:
461  preproc = normalizeExamples
462  # check standard output
463  lreference = self._expandReferenceFileName(self.reference)
464  # call the validator if the file exists
465  if lreference and os.path.isfile(lreference):
466  causes += ReferenceFileValidator(lreference,
467  "standard output",
468  "Output Diff",
469  preproc=preproc)(stdout, result)
470  # Compare TTree summaries
471  causes = self.CheckTTreesSummaries(stdout, result, causes)
472  causes = self.CheckHistosSummaries(stdout, result, causes)
473  if causes: # Write a new reference file for stdout
474  try:
475  newref = open(lreference + ".new", "w")
476  # sanitize newlines
477  for l in stdout.splitlines():
478  newref.write(l.rstrip() + '\n')
479  del newref # flush and close
480  except IOError:
481  # Ignore IO errors when trying to update reference files
482  # because we may be in a read-only filesystem
483  pass
484 
485  # check standard error
486  lreference = self._expandReferenceFileName(self.error_reference)
487  # call the validator if we have a file to use
488  if lreference and os.path.isfile(lreference):
489  newcauses = ReferenceFileValidator(lreference,
490  "standard error",
491  "Error Diff",
492  preproc=preproc)(stderr, result)
493  causes += newcauses
494  if newcauses: # Write a new reference file for stdedd
495  newref = open(lreference + ".new", "w")
496  # sanitize newlines
497  for l in stderr.splitlines():
498  newref.write(l.rstrip() + '\n')
499  del newref # flush and close
500  else:
501  causes += BasicOutputValidator(lreference, "standard error",
502  "ExecTest.expected_stderr")(stderr, result)
503  return causes
504 
505  def _expandReferenceFileName(self, reffile):
506  # if no file is passed, do nothing
507  if not reffile:
508  return ""
509 
510  # function to split an extension in constituents parts
511  def platformSplit(p): return set(p.split('-' in p and '-' or '_'))
512 
513  reference = os.path.normpath(os.path.join(self.basedir,
514  os.path.expandvars(reffile)))
515 
516  # old-style platform-specific reference name
517  spec_ref = reference[:-3] + GetPlatform(self)[0:3] + reference[-3:]
518  if os.path.isfile(spec_ref):
519  reference = spec_ref
520  else: # look for new-style platform specific reference files:
521  # get all the files whose name start with the reference filename
522  dirname, basename = os.path.split(reference)
523  if not dirname:
524  dirname = '.'
525  head = basename + "."
526  head_len = len(head)
527  platform = platformSplit(GetPlatform(self))
528  if 'do0' in platform:
529  platform.add('dbg')
530  candidates = []
531  for f in os.listdir(dirname):
532  if f.startswith(head):
533  req_plat = platformSplit(f[head_len:])
534  if platform.issuperset(req_plat):
535  candidates.append((len(req_plat), f))
536  if candidates: # take the one with highest matching
537  # FIXME: it is not possible to say if x86_64-slc5-gcc43-dbg
538  # has to use ref.x86_64-gcc43 or ref.slc5-dbg
539  candidates.sort()
540  reference = os.path.join(dirname, candidates[-1][1])
541  return reference
542 
543 # ======= GAUDI TOOLS =======
544 
545 
546 import shutil
547 import string
548 import difflib
549 import calendar
550 
551 try:
552  from GaudiKernel import ROOT6WorkAroundEnabled
553 except ImportError:
555  # dummy implementation
556  return False
557 
558 # --------------------------------- TOOLS ---------------------------------#
559 
560 
562  """
563  Function used to normalize the used path
564  """
565  newPath = os.path.normpath(os.path.expandvars(p))
566  if os.path.exists(newPath):
567  p = os.path.realpath(newPath)
568  return p
569 
570 
571 def which(executable):
572  """
573  Locates an executable in the executables path ($PATH) and returns the full
574  path to it. An application is looked for with or without the '.exe' suffix.
575  If the executable cannot be found, None is returned
576  """
577  if os.path.isabs(executable):
578  if not os.path.exists(executable):
579  if executable.endswith('.exe'):
580  if os.path.exists(executable[:-4]):
581  return executable[:-4]
582  else:
583  head, executable = os.path.split(executable)
584  else:
585  return executable
586  for d in os.environ.get("PATH").split(os.pathsep):
587  fullpath = os.path.join(d, executable)
588  if os.path.exists(fullpath):
589  return fullpath
590  if executable.endswith('.exe'):
591  return which(executable[:-4])
592  return None
593 
594 
595 # -------------------------------------------------------------------------#
596 # ----------------------------- Result Classe -----------------------------#
597 # -------------------------------------------------------------------------#
598 import types
599 
600 
601 class Result:
602 
603  PASS = 'PASS'
604  FAIL = 'FAIL'
605  ERROR = 'ERROR'
606  UNTESTED = 'UNTESTED'
607 
608  EXCEPTION = ""
609  RESOURCE = ""
610  TARGET = ""
611  TRACEBACK = ""
612  START_TIME = ""
613  END_TIME = ""
614  TIMEOUT_DETAIL = ""
615 
616  def __init__(self, kind=None, id=None, outcome=PASS, annotations={}):
617  self.annotations = annotations.copy()
618 
619  def __getitem__(self, key):
620  assert type(key) in types.StringTypes
621  return self.annotations[key]
622 
623  def __setitem__(self, key, value):
624  assert type(key) in types.StringTypes
625  assert type(value) in types.StringTypes
626  self.annotations[key] = value
627 
628  def Quote(self, string):
629  return string
630 
631 
632 # -------------------------------------------------------------------------#
633 # --------------------------- Validator Classes ---------------------------#
634 # -------------------------------------------------------------------------#
635 
636 # Basic implementation of an option validator for Gaudi test. This
637 # implementation is based on the standard (LCG) validation functions used
638 # in QMTest.
639 
640 
642 
643  def __init__(self, ref, cause, result_key):
644  self.ref = ref
645  self.cause = cause
646  self.result_key = result_key
647 
648  def __call__(self, out, result):
649  """Validate the output of the program.
650  'stdout' -- A string containing the data written to the standard output
651  stream.
652  'stderr' -- A string containing the data written to the standard error
653  stream.
654  'result' -- A 'Result' object. It may be used to annotate
655  the outcome according to the content of stderr.
656  returns -- A list of strings giving causes of failure."""
657 
658  causes = []
659  # Check the output
660  if not self.__CompareText(out, self.ref):
661  causes.append(self.cause)
662  result[self.result_key] = result.Quote(self.ref)
663 
664  return causes
665 
666  def __CompareText(self, s1, s2):
667  """Compare 's1' and 's2', ignoring line endings.
668  's1' -- A string.
669  's2' -- A string.
670  returns -- True if 's1' and 's2' are the same, ignoring
671  differences in line endings."""
672  if ROOT6WorkAroundEnabled('ReadRootmapCheck'):
673  # FIXME: (MCl) Hide warnings from new rootmap sanity check until we
674  # can fix them
675  to_ignore = re.compile(
676  r'Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*')
677 
678  def keep_line(l): return not to_ignore.match(l)
679  return filter(keep_line, s1.splitlines()) == filter(keep_line, s2.splitlines())
680  else:
681  return s1.splitlines() == s2.splitlines()
682 
683 
684 # ------------------------ Preprocessor elements ------------------------#
686 
687  """ Base class for a callable that takes a file and returns a modified
688  version of it."""
689 
690  def __processLine__(self, line):
691  return line
692 
693  def __processFile__(self, lines):
694  output = []
695  for l in lines:
696  l = self.__processLine__(l)
697  if l:
698  output.append(l)
699  return output
700 
701  def __call__(self, input):
702  if hasattr(input, "__iter__"):
703  lines = input
704  mergeback = False
705  else:
706  lines = input.splitlines()
707  mergeback = True
708  output = self.__processFile__(lines)
709  if mergeback:
710  output = '\n'.join(output)
711  return output
712 
713  def __add__(self, rhs):
714  return FilePreprocessorSequence([self, rhs])
715 
716 
718 
719  def __init__(self, members=[]):
720  self.members = members
721 
722  def __add__(self, rhs):
723  return FilePreprocessorSequence(self.members + [rhs])
724 
725  def __call__(self, input):
726  output = input
727  for pp in self.members:
728  output = pp(output)
729  return output
730 
731 
733 
734  def __init__(self, strings=[], regexps=[]):
735  import re
736  self.strings = strings
737  self.regexps = map(re.compile, regexps)
738 
739  def __processLine__(self, line):
740  for s in self.strings:
741  if line.find(s) >= 0:
742  return None
743  for r in self.regexps:
744  if r.search(line):
745  return None
746  return line
747 
748 
750 
751  def __init__(self, start, end):
752  self.start = start
753  self.end = end
754  self._skipping = False
755 
756  def __processLine__(self, line):
757  if self.start in line:
758  self._skipping = True
759  return None
760  elif self.end in line:
761  self._skipping = False
762  elif self._skipping:
763  return None
764  return line
765 
766 
768 
769  def __init__(self, orig, repl="", when=None):
770  if when:
771  when = re.compile(when)
772  self._operations = [(when, re.compile(orig), repl)]
773 
774  def __add__(self, rhs):
775  if isinstance(rhs, RegexpReplacer):
776  res = RegexpReplacer("", "", None)
777  res._operations = self._operations + rhs._operations
778  else:
779  res = FilePreprocessor.__add__(self, rhs)
780  return res
781 
782  def __processLine__(self, line):
783  for w, o, r in self._operations:
784  if w is None or w.search(line):
785  line = o.sub(r, line)
786  return line
787 
788 
789 # Common preprocessors
790 maskPointers = RegexpReplacer("0x[0-9a-fA-F]{4,16}", "0x########")
791 normalizeDate = RegexpReplacer(
792  "[0-2]?[0-9]:[0-5][0-9]:[0-5][0-9] [0-9]{4}[-/][01][0-9][-/][0-3][0-9][ A-Z]*",
793  "00:00:00 1970-01-01")
794 normalizeEOL = FilePreprocessor()
795 normalizeEOL.__processLine__ = lambda line: str(line).rstrip() + '\n'
796 
797 skipEmptyLines = FilePreprocessor()
798 # FIXME: that's ugly
799 skipEmptyLines.__processLine__ = lambda line: (line.strip() and line) or None
800 
801 # Special preprocessor sorting the list of strings (whitespace separated)
802 # that follow a signature on a single line
803 
804 
806 
807  def __init__(self, signature):
808  self.signature = signature
809  self.siglen = len(signature)
810 
811  def __processLine__(self, line):
812  pos = line.find(self.signature)
813  if pos >= 0:
814  line = line[:(pos + self.siglen)]
815  lst = line[(pos + self.siglen):].split()
816  lst.sort()
817  line += " ".join(lst)
818  return line
819 
820 
822 
823  '''
824  Sort group of lines matching a regular expression
825  '''
826 
827  def __init__(self, exp):
828  self.exp = exp if hasattr(exp, 'match') else re.compile(exp)
829 
830  def __processFile__(self, lines):
831  match = self.exp.match
832  output = []
833  group = []
834  for l in lines:
835  if match(l):
836  group.append(l)
837  else:
838  if group:
839  group.sort()
840  output.extend(group)
841  group = []
842  output.append(l)
843  return output
844 
845 
846 # Preprocessors for GaudiExamples
847 normalizeExamples = maskPointers + normalizeDate
848 for w, o, r in [
849  # ("TIMER.TIMER",r"[0-9]", "0"), # Normalize time output
850  ("TIMER.TIMER", r"\s+[+-]?[0-9]+[0-9.]*", " 0"), # Normalize time output
851  ("release all pending", r"^.*/([^/]*:.*)", r"\1"),
852  ("^#.*file", r"file '.*[/\\]([^/\\]*)$", r"file '\1"),
853  ("^JobOptionsSvc.*options successfully read in from",
854  r"read in from .*[/\\]([^/\\]*)$", r"file \1"), # normalize path to options
855  # Normalize UUID, except those ending with all 0s (i.e. the class IDs)
856  (None, r"[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}(?!-0{12})-[0-9A-Fa-f]{12}",
857  "00000000-0000-0000-0000-000000000000"),
858  # Absorb a change in ServiceLocatorHelper
859  ("ServiceLocatorHelper::", "ServiceLocatorHelper::(create|locate)Service",
860  "ServiceLocatorHelper::service"),
861  # Remove the leading 0 in Windows' exponential format
862  (None, r"e([-+])0([0-9][0-9])", r"e\1\2"),
863  # Output line changed in Gaudi v24
864  (None, r'Service reference count check:',
865  r'Looping over all active services...'),
866  # Ignore count of declared properties (anyway they are all printed)
867  (None, r"^(.*(DEBUG|SUCCESS) List of ALL properties of .*#properties = )\d+",
868  r"\1NN"),
869  ('ApplicationMgr', r'(declareMultiSvcType|addMultiSvc): ', ''),
870 ]: # [ ("TIMER.TIMER","[0-9]+[0-9.]*", "") ]
871  normalizeExamples += RegexpReplacer(o, r, w)
872 
873 lineSkipper = LineSkipper(["//GP:",
874  "JobOptionsSvc INFO # ",
875  "JobOptionsSvc WARNING # ",
876  "Time User",
877  "Welcome to",
878  "This machine has a speed",
879  "TIME:",
880  "running on",
881  "ToolSvc.Sequenc... INFO",
882  "DataListenerSvc INFO XML written to file:",
883  "[INFO]", "[WARNING]",
884  "DEBUG No writable file catalog found which contains FID:",
885  "DEBUG Service base class initialized successfully",
886  # changed between v20 and v21
887  "DEBUG Incident timing:",
888  # introduced with patch #3487
889  # changed the level of the message from INFO to
890  # DEBUG
891  "INFO 'CnvServices':[",
892  # message removed because could be printed in constructor
893  "DEBUG 'CnvServices':[",
894  # The signal handler complains about SIGXCPU not
895  # defined on some platforms
896  'SIGXCPU',
897  ], regexps=[
898  r"^JobOptionsSvc INFO *$",
899  r"^# ", # Ignore python comments
900  # skip the message reporting the version of the root file
901  r"(Always|SUCCESS)\s*(Root f|[^ ]* F)ile version:",
902  r"File '.*.xml' does not exist",
903  r"INFO Refer to dataset .* by its file ID:",
904  r"INFO Referring to dataset .* by its file ID:",
905  r"INFO Disconnect from dataset",
906  r"INFO Disconnected from dataset",
907  r"INFO Disconnected data IO:",
908  r"IncidentSvc\s*(DEBUG (Adding|Removing)|VERBOSE Calling)",
909  # I want to ignore the header of the unchecked StatusCode report
910  r"^StatusCodeSvc.*listing all unchecked return codes:",
911  r"^StatusCodeSvc\s*INFO\s*$",
912  r"Num\s*\|\s*Function\s*\|\s*Source Library",
913  r"^[-+]*\s*$",
914  # Hide the fake error message coming from POOL/ROOT (ROOT 5.21)
915  r"ERROR Failed to modify file: .* Errno=2 No such file or directory",
916  # Hide unchecked StatusCodes from dictionaries
917  r"^ +[0-9]+ \|.*ROOT",
918  r"^ +[0-9]+ \|.*\|.*Dict",
919  # Hide success StatusCodeSvc message
920  r"StatusCodeSvc.*all StatusCode instances where checked",
921  # Hide EventLoopMgr total timing report
922  r"EventLoopMgr.*---> Loop Finished",
923  r"HiveSlimEventLo.*---> Loop Finished",
924  # Remove ROOT TTree summary table, which changes from one version to the
925  # other
926  r"^\*.*\*$",
927  # Remove Histos Summaries
928  r"SUCCESS\s*Booked \d+ Histogram\(s\)",
929  r"^ \|",
930  r"^ ID=",
931  # Ignore added/removed properties
932  r"Property(.*)'Audit(Algorithm|Tool|Service)s':",
933  # these were missing in tools
934  r"Property(.*)'AuditRe(start|initialize)':",
935  r"Property(.*)'IsIOBound':",
936  # removed with gaudi/Gaudi!273
937  r"Property(.*)'ErrorCount(er)?':",
938  # added with gaudi/Gaudi!306
939  r"Property(.*)'Sequential':",
940  # added with gaudi/Gaudi!314
941  r"Property(.*)'FilterCircularDependencies':",
942  # removed with gaudi/Gaudi!316
943  r"Property(.*)'IsClonable':",
944  # ignore uninteresting/obsolete messages
945  r"Property update for OutputLevel : new value =",
946  r"EventLoopMgr\s*DEBUG Creating OutputStream",
947 ])
948 
949 if ROOT6WorkAroundEnabled('ReadRootmapCheck'):
950  # FIXME: (MCl) Hide warnings from new rootmap sanity check until we can
951  # fix them
952  lineSkipper += LineSkipper(regexps=[
953  r'Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*',
954  ])
955 
956 normalizeExamples = (lineSkipper + normalizeExamples + skipEmptyLines +
957  normalizeEOL + LineSorter("Services to release : ") +
958  SortGroupOfLines(r'^\S+\s+(DEBUG|SUCCESS) Property \[\'Name\':'))
959 
960 # --------------------- Validation functions/classes ---------------------#
961 
962 
964 
965  def __init__(self, reffile, cause, result_key, preproc=normalizeExamples):
966  self.reffile = os.path.expandvars(reffile)
967  self.cause = cause
968  self.result_key = result_key
969  self.preproc = preproc
970 
971  def __call__(self, stdout, result):
972  causes = []
973  if os.path.isfile(self.reffile):
974  orig = open(self.reffile).xreadlines()
975  if self.preproc:
976  orig = self.preproc(orig)
977  result[self.result_key + '.preproc.orig'] = \
978  result.Quote('\n'.join(map(str.strip, orig)))
979  else:
980  orig = []
981  new = stdout.splitlines()
982  if self.preproc:
983  new = self.preproc(new)
984 
985  diffs = difflib.ndiff(orig, new, charjunk=difflib.IS_CHARACTER_JUNK)
986  filterdiffs = map(lambda x: x.strip(), filter(
987  lambda x: x[0] != " ", diffs))
988  if filterdiffs:
989  result[self.result_key] = result.Quote("\n".join(filterdiffs))
990  result[self.result_key] += result.Quote("""
991  Legend:
992  -) reference file
993  +) standard output of the test""")
994  result[self.result_key + '.preproc.new'] = \
995  result.Quote('\n'.join(map(str.strip, new)))
996  causes.append(self.cause)
997  return causes
998 
999 
1001  """
1002  Scan stdout to find ROOT TTree summaries and digest them.
1003  """
1004  stars = re.compile(r"^\*+$")
1005  outlines = stdout.splitlines()
1006  nlines = len(outlines)
1007  trees = {}
1008 
1009  i = 0
1010  while i < nlines: # loop over the output
1011  # look for
1012  while i < nlines and not stars.match(outlines[i]):
1013  i += 1
1014  if i < nlines:
1015  tree, i = _parseTTreeSummary(outlines, i)
1016  if tree:
1017  trees[tree["Name"]] = tree
1018 
1019  return trees
1020 
1021 
1022 def cmpTreesDicts(reference, to_check, ignore=None):
1023  """
1024  Check that all the keys in reference are in to_check too, with the same value.
1025  If the value is a dict, the function is called recursively. to_check can
1026  contain more keys than reference, that will not be tested.
1027  The function returns at the first difference found.
1028  """
1029  fail_keys = []
1030  # filter the keys in the reference dictionary
1031  if ignore:
1032  ignore_re = re.compile(ignore)
1033  keys = [key for key in reference if not ignore_re.match(key)]
1034  else:
1035  keys = reference.keys()
1036  # loop over the keys (not ignored) in the reference dictionary
1037  for k in keys:
1038  if k in to_check: # the key must be in the dictionary to_check
1039  if (type(reference[k]) is dict) and (type(to_check[k]) is dict):
1040  # if both reference and to_check values are dictionaries,
1041  # recurse
1042  failed = fail_keys = cmpTreesDicts(
1043  reference[k], to_check[k], ignore)
1044  else:
1045  # compare the two values
1046  failed = to_check[k] != reference[k]
1047  else: # handle missing keys in the dictionary to check (i.e. failure)
1048  to_check[k] = None
1049  failed = True
1050  if failed:
1051  fail_keys.insert(0, k)
1052  break # exit from the loop at the first failure
1053  return fail_keys # return the list of keys bringing to the different values
1054 
1055 
1056 def getCmpFailingValues(reference, to_check, fail_path):
1057  c = to_check
1058  r = reference
1059  for k in fail_path:
1060  c = c.get(k, None)
1061  r = r.get(k, None)
1062  if c is None or r is None:
1063  break # one of the dictionaries is not deep enough
1064  return (fail_path, r, c)
1065 
1066 
1067 # signature of the print-out of the histograms
1068 h_count_re = re.compile(
1069  r"^(.*)SUCCESS\s+Booked (\d+) Histogram\(s\) :\s+([\s\w=-]*)")
1070 
1071 
1072 def _parseTTreeSummary(lines, pos):
1073  """
1074  Parse the TTree summary table in lines, starting from pos.
1075  Returns a tuple with the dictionary with the digested informations and the
1076  position of the first line after the summary.
1077  """
1078  result = {}
1079  i = pos + 1 # first line is a sequence of '*'
1080  count = len(lines)
1081 
1082  def splitcols(l): return [f.strip() for f in l.strip("*\n").split(':', 2)]
1083 
1084  def parseblock(ll):
1085  r = {}
1086  cols = splitcols(ll[0])
1087  r["Name"], r["Title"] = cols[1:]
1088 
1089  cols = splitcols(ll[1])
1090  r["Entries"] = int(cols[1])
1091 
1092  sizes = cols[2].split()
1093  r["Total size"] = int(sizes[2])
1094  if sizes[-1] == "memory":
1095  r["File size"] = 0
1096  else:
1097  r["File size"] = int(sizes[-1])
1098 
1099  cols = splitcols(ll[2])
1100  sizes = cols[2].split()
1101  if cols[0] == "Baskets":
1102  r["Baskets"] = int(cols[1])
1103  r["Basket size"] = int(sizes[2])
1104  r["Compression"] = float(sizes[-1])
1105  return r
1106 
1107  if i < (count - 3) and lines[i].startswith("*Tree"):
1108  result = parseblock(lines[i:i + 3])
1109  result["Branches"] = {}
1110  i += 4
1111  while i < (count - 3) and lines[i].startswith("*Br"):
1112  if i < (count - 2) and lines[i].startswith("*Branch "):
1113  # skip branch header
1114  i += 3
1115  continue
1116  branch = parseblock(lines[i:i + 3])
1117  result["Branches"][branch["Name"]] = branch
1118  i += 4
1119 
1120  return (result, i)
1121 
1122 
1123 def parseHistosSummary(lines, pos):
1124  """
1125  Extract the histograms infos from the lines starting at pos.
1126  Returns the position of the first line after the summary block.
1127  """
1128  global h_count_re
1129  h_table_head = re.compile(
1130  r'SUCCESS\s+List of booked (1D|2D|3D|1D profile|2D profile) histograms in directory\s+"(\w*)"')
1131  h_short_summ = re.compile(r"ID=([^\"]+)\s+\"([^\"]+)\"\s+(.*)")
1132 
1133  nlines = len(lines)
1134 
1135  # decode header
1136  m = h_count_re.search(lines[pos])
1137  name = m.group(1).strip()
1138  total = int(m.group(2))
1139  header = {}
1140  for k, v in [x.split("=") for x in m.group(3).split()]:
1141  header[k] = int(v)
1142  pos += 1
1143  header["Total"] = total
1144 
1145  summ = {}
1146  while pos < nlines:
1147  m = h_table_head.search(lines[pos])
1148  if m:
1149  t, d = m.groups(1) # type and directory
1150  t = t.replace(" profile", "Prof")
1151  pos += 1
1152  if pos < nlines:
1153  l = lines[pos]
1154  else:
1155  l = ""
1156  cont = {}
1157  if l.startswith(" | ID"):
1158  # table format
1159  titles = [x.strip() for x in l.split("|")][1:]
1160  pos += 1
1161  while pos < nlines and lines[pos].startswith(" |"):
1162  l = lines[pos]
1163  values = [x.strip() for x in l.split("|")][1:]
1164  hcont = {}
1165  for i in range(len(titles)):
1166  hcont[titles[i]] = values[i]
1167  cont[hcont["ID"]] = hcont
1168  pos += 1
1169  elif l.startswith(" ID="):
1170  while pos < nlines and lines[pos].startswith(" ID="):
1171  values = [x.strip()
1172  for x in h_short_summ.search(lines[pos]).groups()]
1173  cont[values[0]] = values
1174  pos += 1
1175  else: # not interpreted
1176  raise RuntimeError(
1177  "Cannot understand line %d: '%s'" % (pos, l))
1178  if not d in summ:
1179  summ[d] = {}
1180  summ[d][t] = cont
1181  summ[d]["header"] = header
1182  else:
1183  break
1184  if not summ:
1185  # If the full table is not present, we use only the header
1186  summ[name] = {"header": header}
1187  return summ, pos
1188 
1189 
1191  """
1192  Scan stdout to find ROOT TTree summaries and digest them.
1193  """
1194  outlines = stdout.splitlines()
1195  nlines = len(outlines) - 1
1196  summaries = {}
1197  global h_count_re
1198 
1199  pos = 0
1200  while pos < nlines:
1201  summ = {}
1202  # find first line of block:
1203  match = h_count_re.search(outlines[pos])
1204  while pos < nlines and not match:
1205  pos += 1
1206  match = h_count_re.search(outlines[pos])
1207  if match:
1208  summ, pos = parseHistosSummary(outlines, pos)
1209  summaries.update(summ)
1210  return summaries
1211 
1212 
1213 def PlatformIsNotSupported(self, context, result):
1214  platform = GetPlatform(self)
1215  unsupported = [re.compile(x) for x in [str(y).strip()
1216  for y in unsupported_platforms] if x]
1217  for p_re in unsupported:
1218  if p_re.search(platform):
1219  result.SetOutcome(result.UNTESTED)
1220  result[result.CAUSE] = 'Platform not supported.'
1221  return True
1222  return False
1223 
1224 
1225 def GetPlatform(self):
1226  """
1227  Return the platform Id defined in CMTCONFIG or SCRAM_ARCH.
1228  """
1229  arch = "None"
1230  # check architecture name
1231  if "BINARY_TAG" in os.environ:
1232  arch = os.environ["BINARY_TAG"]
1233  elif "CMTCONFIG" in os.environ:
1234  arch = os.environ["CMTCONFIG"]
1235  elif "SCRAM_ARCH" in os.environ:
1236  arch = os.environ["SCRAM_ARCH"]
1237  return arch
1238 
1239 
1240 def isWinPlatform(self):
1241  """
1242  Return True if the current platform is Windows.
1243 
1244  This function was needed because of the change in the CMTCONFIG format,
1245  from win32_vc71_dbg to i686-winxp-vc9-dbg.
1246  """
1247  platform = GetPlatform(self)
1248  return "winxp" in platform or platform.startswith("win")
def dumpProcs(name)
Definition: BaseTest.py:34
def GetPlatform(self)
Definition: BaseTest.py:1225
def PlatformIsNotSupported(self, context, result)
Definition: BaseTest.py:1213
def __init__(self, start, end)
Definition: BaseTest.py:751
def validateWithReference(self, stdout=None, stderr=None, result=None, causes=None, preproc=None)
Definition: BaseTest.py:444
def cmpTreesDicts(reference, to_check, ignore=None)
Definition: BaseTest.py:1022
def ValidateOutput(self, stdout, stderr, result)
Definition: BaseTest.py:262
def read(f, regex='.*', skipevents=0)
Definition: hivetimeline.py:22
def __processLine__(self, line)
Definition: BaseTest.py:811
def findHistosSummaries(stdout)
Definition: BaseTest.py:1190
def _parseTTreeSummary(lines, pos)
Definition: BaseTest.py:1072
struct GAUDI_API map
Parametrisation class for map-like implementation.
def __call__(self, stdout, result)
Definition: BaseTest.py:971
def __processLine__(self, line)
Definition: BaseTest.py:739
def __init__(self, orig, repl="", when=None)
Definition: BaseTest.py:769
decltype(auto) range(Args &&...args)
Zips multiple containers together to form a single range.
def __init__(self, signature)
Definition: BaseTest.py:807
def sanitize_for_xml(data)
Definition: BaseTest.py:17
def isWinPlatform(self)
Definition: BaseTest.py:1240
def getCmpFailingValues(reference, to_check, fail_path)
Definition: BaseTest.py:1056
def __init__(self, strings=[], regexps=[])
Definition: BaseTest.py:734
def __setitem__(self, key, value)
Definition: BaseTest.py:623
def __init__(self, kind=None, id=None, outcome=PASS, annotations={})
Definition: BaseTest.py:616
def which(executable)
Definition: BaseTest.py:571
def parseHistosSummary(lines, pos)
Definition: BaseTest.py:1123
def _expandReferenceFileName(self, reffile)
Definition: BaseTest.py:505
def findReferenceBlock(self, reference=None, stdout=None, result=None, causes=None, signature_offset=0, signature=None, id=None)
Definition: BaseTest.py:269
def CheckHistosSummaries(self, stdout=None, result=None, causes=None, dict=None, ignore=None)
Definition: BaseTest.py:403
def __init__(self, reffile, cause, result_key, preproc=normalizeExamples)
Definition: BaseTest.py:965
def __getitem__(self, key)
Definition: BaseTest.py:619
def kill_tree(ppid, sig)
Definition: BaseTest.py:43
def findTTreeSummaries(stdout)
Definition: BaseTest.py:1000
def __init__(self, ref, cause, result_key)
Definition: BaseTest.py:643
def ROOT6WorkAroundEnabled(id=None)
Definition: BaseTest.py:554
def CheckTTreesSummaries(self, stdout=None, result=None, causes=None, trees_dict=None, ignore=r"Basket|.*size|Compression")
Definition: BaseTest.py:361
def Quote(self, string)
Definition: BaseTest.py:628