The Gaudi Framework  v30r4 (9b837755)
BaseTest.py
Go to the documentation of this file.
1 # -*- coding: utf-8 -*-
2 
3 import os
4 import sys
5 import time
6 import signal
7 import threading
8 import platform
9 import tempfile
10 import inspect
11 import re
12 import logging
13 
14 from subprocess import Popen, PIPE, STDOUT
15 
16 
17 def sanitize_for_xml(data):
18  '''
19  Take a string with invalid ASCII/UTF characters and quote them so that the
20  string can be used in an XML text.
21 
22  >>> sanitize_for_xml('this is \x1b')
23  'this is [NON-XML-CHAR-0x1B]'
24  '''
25  bad_chars = re.compile(
26  u'[\x00-\x08\x0b\x0c\x0e-\x1F\uD800-\uDFFF\uFFFE\uFFFF]')
27 
28  def quote(match):
29  'helper function'
30  return ''.join('[NON-XML-CHAR-0x%2X]' % ord(c) for c in match.group())
31  return bad_chars.sub(quote, data)
32 
33 
34 def dumpProcs(name):
35  '''helper to debug GAUDI-1084, dump the list of processes'''
36  from getpass import getuser
37  if 'WORKSPACE' in os.environ:
38  p = Popen(['ps', '-fH', '-U', getuser()], stdout=PIPE)
39  with open(os.path.join(os.environ['WORKSPACE'], name), 'w') as f:
40  f.write(p.communicate()[0])
41 
42 
43 def kill_tree(ppid, sig):
44  '''
45  Send a signal to a process and all its child processes (starting from the
46  leaves).
47  '''
48  log = logging.getLogger('kill_tree')
49  ps_cmd = ['ps', '--no-headers', '-o', 'pid', '--ppid', str(ppid)]
50  get_children = Popen(ps_cmd, stdout=PIPE, stderr=PIPE)
51  children = map(int, get_children.communicate()[0].split())
52  for child in children:
53  kill_tree(child, sig)
54  try:
55  log.debug('killing process %d', ppid)
56  os.kill(ppid, sig)
57  except OSError, err:
58  if err.errno != 3: # No such process
59  raise
60  log.debug('no such process %d', ppid)
61 
62 # -------------------------------------------------------------------------#
63 
64 
65 class BaseTest(object):
66 
67  _common_tmpdir = None
68 
69  def __init__(self):
70  self.program = ''
71  self.args = []
72  self.reference = ''
73  self.error_reference = ''
74  self.options = ''
75  self.stderr = ''
76  self.timeout = 600
77  self.exit_code = None
78  self.environment = None
80  self.signal = None
81  self.workdir = os.curdir
82  self.use_temp_dir = False
83  # Variables not for users
84  self.status = None
85  self.name = ''
86  self.causes = []
87  self.result = Result(self)
88  self.returnedCode = 0
89  self.out = ''
90  self.err = ''
91  self.proc = None
92  self.stack_trace = None
93  self.basedir = os.getcwd()
94 
95  def run(self):
96  logging.debug('running test %s', self.name)
97 
98  if self.options:
99  if re.search(r'from\s+Gaudi.Configuration\s+import\s+\*|'
100  'from\s+Configurables\s+import', self.options):
101  optionFile = tempfile.NamedTemporaryFile(suffix='.py')
102  else:
103  optionFile = tempfile.NamedTemporaryFile(suffix='.opts')
104  optionFile.file.write(self.options)
105  optionFile.seek(0)
106  self.args.append(RationalizePath(optionFile.name))
107 
108  # If not specified, setting the environment
109  if self.environment is None:
110  self.environment = os.environ
111  else:
112  self.environment = dict(
113  self.environment.items() + os.environ.items())
114 
115  platform_id = (os.environ.get('BINARY_TAG') or
116  os.environ.get('CMTCONFIG') or
117  platform.platform())
118  # If at least one regex matches we skip the test.
119  skip_test = bool([None
120  for prex in self.unsupported_platforms
121  if re.search(prex, platform_id)])
122 
123  if not skip_test:
124  # handle working/temporary directory options
125  workdir = self.workdir
126  if self.use_temp_dir:
127  if self._common_tmpdir:
128  workdir = self._common_tmpdir
129  else:
130  workdir = tempfile.mkdtemp()
131 
132  # prepare the command to execute
133  prog = ''
134  if self.program != '':
135  prog = self.program
136  elif "GAUDIEXE" in os.environ:
137  prog = os.environ["GAUDIEXE"]
138  else:
139  prog = "Gaudi.exe"
140 
141  dummy, prog_ext = os.path.splitext(prog)
142  if prog_ext not in [".exe", ".py", ".bat"]:
143  prog += ".exe"
144  prog_ext = ".exe"
145 
146  prog = which(prog) or prog
147 
148  args = map(RationalizePath, self.args)
149 
150  if prog_ext == ".py":
151  params = ['python', RationalizePath(prog)] + args
152  else:
153  params = [RationalizePath(prog)] + args
154 
155  validatorRes = Result({'CAUSE': None, 'EXCEPTION': None,
156  'RESOURCE': None, 'TARGET': None,
157  'TRACEBACK': None, 'START_TIME': None,
158  'END_TIME': None, 'TIMEOUT_DETAIL': None})
159  self.result = validatorRes
160 
161  # we need to switch directory because the validator expects to run
162  # in the same dir as the program
163  os.chdir(workdir)
164 
165  # launching test in a different thread to handle timeout exception
166  def target():
167  logging.debug('executing %r in %s',
168  params, workdir)
169  self.proc = Popen(params, stdout=PIPE, stderr=PIPE,
170  env=self.environment)
171  logging.debug('(pid: %d)', self.proc.pid)
172  self.out, self.err = self.proc.communicate()
173 
174  thread = threading.Thread(target=target)
175  thread.start()
176  # catching timeout
177  thread.join(self.timeout)
178 
179  if thread.is_alive():
180  logging.debug('time out in test %s (pid %d)',
181  self.name, self.proc.pid)
182  # get the stack trace of the stuck process
183  cmd = ['gdb', '--pid', str(self.proc.pid), '--batch',
184  '--eval-command=thread apply all backtrace']
185  gdb = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
186  self.stack_trace = gdb.communicate()[0]
187 
188  kill_tree(self.proc.pid, signal.SIGTERM)
189  thread.join(60)
190  if thread.is_alive():
191  kill_tree(self.proc.pid, signal.SIGKILL)
192  self.causes.append('timeout')
193  else:
194  logging.debug('completed test %s', self.name)
195 
196  # Getting the error code
197  logging.debug('returnedCode = %s', self.proc.returncode)
198  self.returnedCode = self.proc.returncode
199 
200  logging.debug('validating test...')
201  self.result, self.causes = self.ValidateOutput(stdout=self.out,
202  stderr=self.err,
203  result=validatorRes)
204 
205  # remove the temporary directory if we created it
206  if self.use_temp_dir and not self._common_tmpdir:
207  shutil.rmtree(workdir, True)
208 
209  os.chdir(self.basedir)
210 
211  # handle application exit code
212  if self.signal is not None:
213  if int(self.returnedCode) != -int(self.signal):
214  self.causes.append('exit code')
215 
216  elif self.exit_code is not None:
217  if int(self.returnedCode) != int(self.exit_code):
218  self.causes.append('exit code')
219 
220  elif self.returnedCode != 0:
221  self.causes.append("exit code")
222 
223  if self.causes:
224  self.status = "failed"
225  else:
226  self.status = "passed"
227 
228  else:
229  self.status = "skipped"
230 
231  logging.debug('%s: %s', self.name, self.status)
232  field_mapping = {'Exit Code': 'returnedCode',
233  'stderr': 'err',
234  'Arguments': 'args',
235  'Environment': 'environment',
236  'Status': 'status',
237  'stdout': 'out',
238  'Program Name': 'program',
239  'Name': 'name',
240  'Validator': 'validator',
241  'Output Reference File': 'reference',
242  'Error Reference File': 'error_reference',
243  'Causes': 'causes',
244  # 'Validator Result': 'result.annotations',
245  'Unsupported Platforms': 'unsupported_platforms',
246  'Stack Trace': 'stack_trace'}
247  resultDict = [(key, getattr(self, attr))
248  for key, attr in field_mapping.iteritems()
249  if getattr(self, attr)]
250  resultDict.append(('Working Directory',
251  RationalizePath(os.path.join(os.getcwd(),
252  self.workdir))))
253  # print dict(resultDict).keys()
254  resultDict.extend(self.result.annotations.iteritems())
255  # print self.result.annotations.keys()
256  return dict(resultDict)
257 
258  # -------------------------------------------------#
259  # ----------------Validating tool------------------#
260  # -------------------------------------------------#
261 
262  def ValidateOutput(self, stdout, stderr, result):
263  if not self.stderr:
264  self.validateWithReference(stdout, stderr, result, self.causes)
265  elif stderr.strip() != self.stderr.strip():
266  self.causes.append('standard error')
267  return result, self.causes
268 
269  def findReferenceBlock(self, reference=None, stdout=None, result=None, causes=None, signature_offset=0, signature=None, id=None):
270  """
271  Given a block of text, tries to find it in the output. The block had to be identified by a signature line. By default, the first line is used as signature, or the line pointed to by signature_offset. If signature_offset points outside the block, a signature line can be passed as signature argument. Note: if 'signature' is None (the default), a negative signature_offset is interpreted as index in a list (e.g. -1 means the last line), otherwise the it is interpreted as the number of lines before the first one of the block the signature must appear. The parameter 'id' allow to distinguish between different calls to this function in the same validation code.
272  """
273 
274  if reference is None:
275  reference = self.reference
276  if stdout is None:
277  stdout = self.out
278  if result is None:
279  result = self.result
280  if causes is None:
281  causes = self.causes
282 
283  reflines = filter(
284  None, map(lambda s: s.rstrip(), reference.splitlines()))
285  if not reflines:
286  raise RuntimeError("Empty (or null) reference")
287  # the same on standard output
288  outlines = filter(None, map(lambda s: s.rstrip(), stdout.splitlines()))
289 
290  res_field = "GaudiTest.RefBlock"
291  if id:
292  res_field += "_%s" % id
293 
294  if signature is None:
295  if signature_offset < 0:
296  signature_offset = len(reference) + signature_offset
297  signature = reflines[signature_offset]
298  # find the reference block in the output file
299  try:
300  pos = outlines.index(signature)
301  outlines = outlines[pos - signature_offset:pos +
302  len(reflines) - signature_offset]
303  if reflines != outlines:
304  msg = "standard output"
305  # I do not want 2 messages in causes if the function is called
306  # twice
307  if not msg in causes:
308  causes.append(msg)
309  result[res_field +
310  ".observed"] = result.Quote("\n".join(outlines))
311  except ValueError:
312  causes.append("missing signature")
313  result[res_field + ".signature"] = result.Quote(signature)
314  if len(reflines) > 1 or signature != reflines[0]:
315  result[res_field + ".expected"] = result.Quote("\n".join(reflines))
316  return causes
317 
318  def countErrorLines(self, expected={'ERROR': 0, 'FATAL': 0}, stdout=None, result=None, causes=None):
319  """
320  Count the number of messages with required severity (by default ERROR and FATAL)
321  and check if their numbers match the expected ones (0 by default).
322  The dictionary "expected" can be used to tune the number of errors and fatals
323  allowed, or to limit the number of expected warnings etc.
324  """
325 
326  if stdout is None:
327  stdout = self.out
328  if result is None:
329  result = self.result
330  if causes is None:
331  causes = self.causes
332 
333  # prepare the dictionary to record the extracted lines
334  errors = {}
335  for sev in expected:
336  errors[sev] = []
337 
338  outlines = stdout.splitlines()
339  from math import log10
340  fmt = "%%%dd - %%s" % (int(log10(len(outlines) + 1)))
341 
342  linecount = 0
343  for l in outlines:
344  linecount += 1
345  words = l.split()
346  if len(words) >= 2 and words[1] in errors:
347  errors[words[1]].append(fmt % (linecount, l.rstrip()))
348 
349  for e in errors:
350  if len(errors[e]) != expected[e]:
351  causes.append('%s(%d)' % (e, len(errors[e])))
352  result["GaudiTest.lines.%s" %
353  e] = result.Quote('\n'.join(errors[e]))
354  result["GaudiTest.lines.%s.expected#" %
355  e] = result.Quote(str(expected[e]))
356 
357  return causes
358 
359  def CheckTTreesSummaries(self, stdout=None, result=None, causes=None,
360  trees_dict=None,
361  ignore=r"Basket|.*size|Compression"):
362  """
363  Compare the TTree summaries in stdout with the ones in trees_dict or in
364  the reference file. By default ignore the size, compression and basket
365  fields.
366  The presence of TTree summaries when none is expected is not a failure.
367  """
368  if stdout is None:
369  stdout = self.out
370  if result is None:
371  result = self.result
372  if causes is None:
373  causes = self.causes
374  if trees_dict is None:
375  lreference = self._expandReferenceFileName(self.reference)
376  # call the validator if the file exists
377  if lreference and os.path.isfile(lreference):
378  trees_dict = findTTreeSummaries(open(lreference).read())
379  else:
380  trees_dict = {}
381 
382  from pprint import PrettyPrinter
383  pp = PrettyPrinter()
384  if trees_dict:
385  result["GaudiTest.TTrees.expected"] = result.Quote(
386  pp.pformat(trees_dict))
387  if ignore:
388  result["GaudiTest.TTrees.ignore"] = result.Quote(ignore)
389 
390  trees = findTTreeSummaries(stdout)
391  failed = cmpTreesDicts(trees_dict, trees, ignore)
392  if failed:
393  causes.append("trees summaries")
394  msg = "%s: %s != %s" % getCmpFailingValues(
395  trees_dict, trees, failed)
396  result["GaudiTest.TTrees.failure_on"] = result.Quote(msg)
397  result["GaudiTest.TTrees.found"] = result.Quote(pp.pformat(trees))
398 
399  return causes
400 
401  def CheckHistosSummaries(self, stdout=None, result=None, causes=None,
402  dict=None,
403  ignore=None):
404  """
405  Compare the TTree summaries in stdout with the ones in trees_dict or in
406  the reference file. By default ignore the size, compression and basket
407  fields.
408  The presence of TTree summaries when none is expected is not a failure.
409  """
410  if stdout is None:
411  stdout = self.out
412  if result is None:
413  result = self.result
414  if causes is None:
415  causes = self.causes
416 
417  if dict is None:
418  lreference = self._expandReferenceFileName(self.reference)
419  # call the validator if the file exists
420  if lreference and os.path.isfile(lreference):
421  dict = findHistosSummaries(open(lreference).read())
422  else:
423  dict = {}
424 
425  from pprint import PrettyPrinter
426  pp = PrettyPrinter()
427  if dict:
428  result["GaudiTest.Histos.expected"] = result.Quote(
429  pp.pformat(dict))
430  if ignore:
431  result["GaudiTest.Histos.ignore"] = result.Quote(ignore)
432 
433  histos = findHistosSummaries(stdout)
434  failed = cmpTreesDicts(dict, histos, ignore)
435  if failed:
436  causes.append("histos summaries")
437  msg = "%s: %s != %s" % getCmpFailingValues(dict, histos, failed)
438  result["GaudiTest.Histos.failure_on"] = result.Quote(msg)
439  result["GaudiTest.Histos.found"] = result.Quote(pp.pformat(histos))
440 
441  return causes
442 
443  def validateWithReference(self, stdout=None, stderr=None, result=None,
444  causes=None, preproc=None):
445  '''
446  Default validation acti*on: compare standard output and error to the
447  reference files.
448  '''
449 
450  if stdout is None:
451  stdout = self.out
452  if stderr is None:
453  stderr = self.err
454  if result is None:
455  result = self.result
456  if causes is None:
457  causes = self.causes
458 
459  # set the default output preprocessor
460  if preproc is None:
461  preproc = normalizeExamples
462  # check standard output
463  lreference = self._expandReferenceFileName(self.reference)
464  # call the validator if the file exists
465  if lreference and os.path.isfile(lreference):
466  causes += ReferenceFileValidator(lreference,
467  "standard output",
468  "Output Diff",
469  preproc=preproc)(stdout, result)
470  elif lreference:
471  causes += ["missing reference file"]
472  # Compare TTree summaries
473  causes = self.CheckTTreesSummaries(stdout, result, causes)
474  causes = self.CheckHistosSummaries(stdout, result, causes)
475  if causes: # Write a new reference file for stdout
476  try:
477  newref = open(lreference + ".new", "w")
478  # sanitize newlines
479  for l in stdout.splitlines():
480  newref.write(l.rstrip() + '\n')
481  del newref # flush and close
482  except IOError:
483  # Ignore IO errors when trying to update reference files
484  # because we may be in a read-only filesystem
485  pass
486 
487  # check standard error
488  lreference = self._expandReferenceFileName(self.error_reference)
489  # call the validator if we have a file to use
490  if lreference and os.path.isfile(lreference):
491  newcauses = ReferenceFileValidator(lreference,
492  "standard error",
493  "Error Diff",
494  preproc=preproc)(stderr, result)
495  causes += newcauses
496  if newcauses: # Write a new reference file for stdedd
497  newref = open(lreference + ".new", "w")
498  # sanitize newlines
499  for l in stderr.splitlines():
500  newref.write(l.rstrip() + '\n')
501  del newref # flush and close
502  else:
503  causes += BasicOutputValidator(lreference, "standard error",
504  "ExecTest.expected_stderr")(stderr, result)
505  return causes
506 
507  def _expandReferenceFileName(self, reffile):
508  # if no file is passed, do nothing
509  if not reffile:
510  return ""
511 
512  # function to split an extension in constituents parts
513  def platformSplit(p):
514  import re
515  delim = re.compile('-' in p and r"[-+]" or r"_")
516  return set(delim.split(p))
517 
518  reference = os.path.normpath(os.path.join(self.basedir,
519  os.path.expandvars(reffile)))
520 
521  # old-style platform-specific reference name
522  spec_ref = reference[:-3] + GetPlatform(self)[0:3] + reference[-3:]
523  if os.path.isfile(spec_ref):
524  reference = spec_ref
525  else: # look for new-style platform specific reference files:
526  # get all the files whose name start with the reference filename
527  dirname, basename = os.path.split(reference)
528  if not dirname:
529  dirname = '.'
530  head = basename + "."
531  head_len = len(head)
532  platform = platformSplit(GetPlatform(self))
533  if 'do0' in platform:
534  platform.add('dbg')
535  candidates = []
536  for f in os.listdir(dirname):
537  if f.startswith(head):
538  req_plat = platformSplit(f[head_len:])
539  if platform.issuperset(req_plat):
540  candidates.append((len(req_plat), f))
541  if candidates: # take the one with highest matching
542  # FIXME: it is not possible to say if x86_64-slc5-gcc43-dbg
543  # has to use ref.x86_64-gcc43 or ref.slc5-dbg
544  candidates.sort()
545  reference = os.path.join(dirname, candidates[-1][1])
546  return reference
547 
548 # ======= GAUDI TOOLS =======
549 
550 
551 import shutil
552 import string
553 import difflib
554 import calendar
555 
556 try:
557  from GaudiKernel import ROOT6WorkAroundEnabled
558 except ImportError:
560  # dummy implementation
561  return False
562 
563 # --------------------------------- TOOLS ---------------------------------#
564 
565 
567  """
568  Function used to normalize the used path
569  """
570  newPath = os.path.normpath(os.path.expandvars(p))
571  if os.path.exists(newPath):
572  p = os.path.realpath(newPath)
573  return p
574 
575 
576 def which(executable):
577  """
578  Locates an executable in the executables path ($PATH) and returns the full
579  path to it. An application is looked for with or without the '.exe' suffix.
580  If the executable cannot be found, None is returned
581  """
582  if os.path.isabs(executable):
583  if not os.path.exists(executable):
584  if executable.endswith('.exe'):
585  if os.path.exists(executable[:-4]):
586  return executable[:-4]
587  else:
588  head, executable = os.path.split(executable)
589  else:
590  return executable
591  for d in os.environ.get("PATH").split(os.pathsep):
592  fullpath = os.path.join(d, executable)
593  if os.path.exists(fullpath):
594  return fullpath
595  if executable.endswith('.exe'):
596  return which(executable[:-4])
597  return None
598 
599 
600 # -------------------------------------------------------------------------#
601 # ----------------------------- Result Classe -----------------------------#
602 # -------------------------------------------------------------------------#
603 import types
604 
605 
606 class Result:
607 
608  PASS = 'PASS'
609  FAIL = 'FAIL'
610  ERROR = 'ERROR'
611  UNTESTED = 'UNTESTED'
612 
613  EXCEPTION = ""
614  RESOURCE = ""
615  TARGET = ""
616  TRACEBACK = ""
617  START_TIME = ""
618  END_TIME = ""
619  TIMEOUT_DETAIL = ""
620 
621  def __init__(self, kind=None, id=None, outcome=PASS, annotations={}):
622  self.annotations = annotations.copy()
623 
624  def __getitem__(self, key):
625  assert type(key) in types.StringTypes
626  return self.annotations[key]
627 
628  def __setitem__(self, key, value):
629  assert type(key) in types.StringTypes
630  assert type(value) in types.StringTypes
631  self.annotations[key] = value
632 
633  def Quote(self, string):
634  return string
635 
636 
637 # -------------------------------------------------------------------------#
638 # --------------------------- Validator Classes ---------------------------#
639 # -------------------------------------------------------------------------#
640 
641 # Basic implementation of an option validator for Gaudi test. This
642 # implementation is based on the standard (LCG) validation functions used
643 # in QMTest.
644 
645 
647 
648  def __init__(self, ref, cause, result_key):
649  self.ref = ref
650  self.cause = cause
651  self.result_key = result_key
652 
653  def __call__(self, out, result):
654  """Validate the output of the program.
655  'stdout' -- A string containing the data written to the standard output
656  stream.
657  'stderr' -- A string containing the data written to the standard error
658  stream.
659  'result' -- A 'Result' object. It may be used to annotate
660  the outcome according to the content of stderr.
661  returns -- A list of strings giving causes of failure."""
662 
663  causes = []
664  # Check the output
665  if not self.__CompareText(out, self.ref):
666  causes.append(self.cause)
667  result[self.result_key] = result.Quote(self.ref)
668 
669  return causes
670 
671  def __CompareText(self, s1, s2):
672  """Compare 's1' and 's2', ignoring line endings.
673  's1' -- A string.
674  's2' -- A string.
675  returns -- True if 's1' and 's2' are the same, ignoring
676  differences in line endings."""
677  if ROOT6WorkAroundEnabled('ReadRootmapCheck'):
678  # FIXME: (MCl) Hide warnings from new rootmap sanity check until we
679  # can fix them
680  to_ignore = re.compile(
681  r'Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*')
682 
683  def keep_line(l): return not to_ignore.match(l)
684  return filter(keep_line, s1.splitlines()) == filter(keep_line, s2.splitlines())
685  else:
686  return s1.splitlines() == s2.splitlines()
687 
688 
689 # ------------------------ Preprocessor elements ------------------------#
691 
692  """ Base class for a callable that takes a file and returns a modified
693  version of it."""
694 
695  def __processLine__(self, line):
696  return line
697 
698  def __processFile__(self, lines):
699  output = []
700  for l in lines:
701  l = self.__processLine__(l)
702  if l:
703  output.append(l)
704  return output
705 
706  def __call__(self, input):
707  if hasattr(input, "__iter__"):
708  lines = input
709  mergeback = False
710  else:
711  lines = input.splitlines()
712  mergeback = True
713  output = self.__processFile__(lines)
714  if mergeback:
715  output = '\n'.join(output)
716  return output
717 
718  def __add__(self, rhs):
719  return FilePreprocessorSequence([self, rhs])
720 
721 
723 
724  def __init__(self, members=[]):
725  self.members = members
726 
727  def __add__(self, rhs):
728  return FilePreprocessorSequence(self.members + [rhs])
729 
730  def __call__(self, input):
731  output = input
732  for pp in self.members:
733  output = pp(output)
734  return output
735 
736 
738 
739  def __init__(self, strings=[], regexps=[]):
740  import re
741  self.strings = strings
742  self.regexps = map(re.compile, regexps)
743 
744  def __processLine__(self, line):
745  for s in self.strings:
746  if line.find(s) >= 0:
747  return None
748  for r in self.regexps:
749  if r.search(line):
750  return None
751  return line
752 
753 
755 
756  def __init__(self, start, end):
757  self.start = start
758  self.end = end
759  self._skipping = False
760 
761  def __processLine__(self, line):
762  if self.start in line:
763  self._skipping = True
764  return None
765  elif self.end in line:
766  self._skipping = False
767  elif self._skipping:
768  return None
769  return line
770 
771 
773 
774  def __init__(self, orig, repl="", when=None):
775  if when:
776  when = re.compile(when)
777  self._operations = [(when, re.compile(orig), repl)]
778 
779  def __add__(self, rhs):
780  if isinstance(rhs, RegexpReplacer):
781  res = RegexpReplacer("", "", None)
782  res._operations = self._operations + rhs._operations
783  else:
784  res = FilePreprocessor.__add__(self, rhs)
785  return res
786 
787  def __processLine__(self, line):
788  for w, o, r in self._operations:
789  if w is None or w.search(line):
790  line = o.sub(r, line)
791  return line
792 
793 
794 # Common preprocessors
795 maskPointers = RegexpReplacer("0x[0-9a-fA-F]{4,16}", "0x########")
796 normalizeDate = RegexpReplacer(
797  "[0-2]?[0-9]:[0-5][0-9]:[0-5][0-9] [0-9]{4}[-/][01][0-9][-/][0-3][0-9][ A-Z]*",
798  "00:00:00 1970-01-01")
799 normalizeEOL = FilePreprocessor()
800 normalizeEOL.__processLine__ = lambda line: str(line).rstrip() + '\n'
801 
802 skipEmptyLines = FilePreprocessor()
803 # FIXME: that's ugly
804 skipEmptyLines.__processLine__ = lambda line: (line.strip() and line) or None
805 
806 # Special preprocessor sorting the list of strings (whitespace separated)
807 # that follow a signature on a single line
808 
809 
811 
812  def __init__(self, signature):
813  self.signature = signature
814  self.siglen = len(signature)
815 
816  def __processLine__(self, line):
817  pos = line.find(self.signature)
818  if pos >= 0:
819  line = line[:(pos + self.siglen)]
820  lst = line[(pos + self.siglen):].split()
821  lst.sort()
822  line += " ".join(lst)
823  return line
824 
825 
827 
828  '''
829  Sort group of lines matching a regular expression
830  '''
831 
832  def __init__(self, exp):
833  self.exp = exp if hasattr(exp, 'match') else re.compile(exp)
834 
835  def __processFile__(self, lines):
836  match = self.exp.match
837  output = []
838  group = []
839  for l in lines:
840  if match(l):
841  group.append(l)
842  else:
843  if group:
844  group.sort()
845  output.extend(group)
846  group = []
847  output.append(l)
848  return output
849 
850 
851 # Preprocessors for GaudiExamples
852 normalizeExamples = maskPointers + normalizeDate
853 for w, o, r in [
854  # ("TIMER.TIMER",r"[0-9]", "0"), # Normalize time output
855  ("TIMER.TIMER", r"\s+[+-]?[0-9]+[0-9.]*", " 0"), # Normalize time output
856  ("release all pending", r"^.*/([^/]*:.*)", r"\1"),
857  ("^#.*file", r"file '.*[/\\]([^/\\]*)$", r"file '\1"),
858  ("^JobOptionsSvc.*options successfully read in from",
859  r"read in from .*[/\\]([^/\\]*)$", r"file \1"), # normalize path to options
860  # Normalize UUID, except those ending with all 0s (i.e. the class IDs)
861  (None, r"[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}(?!-0{12})-[0-9A-Fa-f]{12}",
862  "00000000-0000-0000-0000-000000000000"),
863  # Absorb a change in ServiceLocatorHelper
864  ("ServiceLocatorHelper::", "ServiceLocatorHelper::(create|locate)Service",
865  "ServiceLocatorHelper::service"),
866  # Remove the leading 0 in Windows' exponential format
867  (None, r"e([-+])0([0-9][0-9])", r"e\1\2"),
868  # Output line changed in Gaudi v24
869  (None, r'Service reference count check:',
870  r'Looping over all active services...'),
871  # Ignore count of declared properties (anyway they are all printed)
872  (None, r"^(.*(DEBUG|SUCCESS) List of ALL properties of .*#properties = )\d+",
873  r"\1NN"),
874  ('ApplicationMgr', r'(declareMultiSvcType|addMultiSvc): ', ''),
875 ]: # [ ("TIMER.TIMER","[0-9]+[0-9.]*", "") ]
876  normalizeExamples += RegexpReplacer(o, r, w)
877 
878 lineSkipper = LineSkipper(["//GP:",
879  "JobOptionsSvc INFO # ",
880  "JobOptionsSvc WARNING # ",
881  "Time User",
882  "Welcome to",
883  "This machine has a speed",
884  "TIME:",
885  "running on",
886  "ToolSvc.Sequenc... INFO",
887  "DataListenerSvc INFO XML written to file:",
888  "[INFO]", "[WARNING]",
889  "DEBUG No writable file catalog found which contains FID:",
890  "DEBUG Service base class initialized successfully",
891  # changed between v20 and v21
892  "DEBUG Incident timing:",
893  # introduced with patch #3487
894  # changed the level of the message from INFO to
895  # DEBUG
896  "INFO 'CnvServices':[",
897  # message removed because could be printed in constructor
898  "DEBUG 'CnvServices':[",
899  # The signal handler complains about SIGXCPU not
900  # defined on some platforms
901  'SIGXCPU',
902  ], regexps=[
903  r"^JobOptionsSvc INFO *$",
904  r"^# ", # Ignore python comments
905  # skip the message reporting the version of the root file
906  r"(Always|SUCCESS)\s*(Root f|[^ ]* F)ile version:",
907  r"File '.*.xml' does not exist",
908  r"INFO Refer to dataset .* by its file ID:",
909  r"INFO Referring to dataset .* by its file ID:",
910  r"INFO Disconnect from dataset",
911  r"INFO Disconnected from dataset",
912  r"INFO Disconnected data IO:",
913  r"IncidentSvc\s*(DEBUG (Adding|Removing)|VERBOSE Calling)",
914  # I want to ignore the header of the unchecked StatusCode report
915  r"^StatusCodeSvc.*listing all unchecked return codes:",
916  r"^StatusCodeSvc\s*INFO\s*$",
917  r"Num\s*\|\s*Function\s*\|\s*Source Library",
918  r"^[-+]*\s*$",
919  # Hide the fake error message coming from POOL/ROOT (ROOT 5.21)
920  r"ERROR Failed to modify file: .* Errno=2 No such file or directory",
921  # Hide unchecked StatusCodes from dictionaries
922  r"^ +[0-9]+ \|.*ROOT",
923  r"^ +[0-9]+ \|.*\|.*Dict",
924  # Hide success StatusCodeSvc message
925  r"StatusCodeSvc.*all StatusCode instances where checked",
926  # Hide EventLoopMgr total timing report
927  r"EventLoopMgr.*---> Loop Finished",
928  r"HiveSlimEventLo.*---> Loop Finished",
929  # Remove ROOT TTree summary table, which changes from one version to the
930  # other
931  r"^\*.*\*$",
932  # Remove Histos Summaries
933  r"SUCCESS\s*Booked \d+ Histogram\(s\)",
934  r"^ \|",
935  r"^ ID=",
936  # Ignore added/removed properties
937  r"Property(.*)'Audit(Algorithm|Tool|Service)s':",
938  # these were missing in tools
939  r"Property(.*)'AuditRe(start|initialize)':",
940  r"Property(.*)'IsIOBound':",
941  # removed with gaudi/Gaudi!273
942  r"Property(.*)'ErrorCount(er)?':",
943  # added with gaudi/Gaudi!306
944  r"Property(.*)'Sequential':",
945  # added with gaudi/Gaudi!314
946  r"Property(.*)'FilterCircularDependencies':",
947  # removed with gaudi/Gaudi!316
948  r"Property(.*)'IsClonable':",
949  # ignore uninteresting/obsolete messages
950  r"Property update for OutputLevel : new value =",
951  r"EventLoopMgr\s*DEBUG Creating OutputStream",
952 ])
953 
954 if ROOT6WorkAroundEnabled('ReadRootmapCheck'):
955  # FIXME: (MCl) Hide warnings from new rootmap sanity check until we can
956  # fix them
957  lineSkipper += LineSkipper(regexps=[
958  r'Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*',
959  ])
960 
961 normalizeExamples = (lineSkipper + normalizeExamples + skipEmptyLines +
962  normalizeEOL + LineSorter("Services to release : ") +
963  SortGroupOfLines(r'^\S+\s+(DEBUG|SUCCESS) Property \[\'Name\':'))
964 
965 # --------------------- Validation functions/classes ---------------------#
966 
967 
969 
970  def __init__(self, reffile, cause, result_key, preproc=normalizeExamples):
971  self.reffile = os.path.expandvars(reffile)
972  self.cause = cause
973  self.result_key = result_key
974  self.preproc = preproc
975 
976  def __call__(self, stdout, result):
977  causes = []
978  if os.path.isfile(self.reffile):
979  orig = open(self.reffile).xreadlines()
980  if self.preproc:
981  orig = self.preproc(orig)
982  result[self.result_key + '.preproc.orig'] = \
983  result.Quote('\n'.join(map(str.strip, orig)))
984  else:
985  orig = []
986  new = stdout.splitlines()
987  if self.preproc:
988  new = self.preproc(new)
989 
990  diffs = difflib.ndiff(orig, new, charjunk=difflib.IS_CHARACTER_JUNK)
991  filterdiffs = map(lambda x: x.strip(), filter(
992  lambda x: x[0] != " ", diffs))
993  if filterdiffs:
994  result[self.result_key] = result.Quote("\n".join(filterdiffs))
995  result[self.result_key] += result.Quote("""
996  Legend:
997  -) reference file
998  +) standard output of the test""")
999  result[self.result_key + '.preproc.new'] = \
1000  result.Quote('\n'.join(map(str.strip, new)))
1001  causes.append(self.cause)
1002  return causes
1003 
1004 
1006  """
1007  Scan stdout to find ROOT TTree summaries and digest them.
1008  """
1009  stars = re.compile(r"^\*+$")
1010  outlines = stdout.splitlines()
1011  nlines = len(outlines)
1012  trees = {}
1013 
1014  i = 0
1015  while i < nlines: # loop over the output
1016  # look for
1017  while i < nlines and not stars.match(outlines[i]):
1018  i += 1
1019  if i < nlines:
1020  tree, i = _parseTTreeSummary(outlines, i)
1021  if tree:
1022  trees[tree["Name"]] = tree
1023 
1024  return trees
1025 
1026 
1027 def cmpTreesDicts(reference, to_check, ignore=None):
1028  """
1029  Check that all the keys in reference are in to_check too, with the same value.
1030  If the value is a dict, the function is called recursively. to_check can
1031  contain more keys than reference, that will not be tested.
1032  The function returns at the first difference found.
1033  """
1034  fail_keys = []
1035  # filter the keys in the reference dictionary
1036  if ignore:
1037  ignore_re = re.compile(ignore)
1038  keys = [key for key in reference if not ignore_re.match(key)]
1039  else:
1040  keys = reference.keys()
1041  # loop over the keys (not ignored) in the reference dictionary
1042  for k in keys:
1043  if k in to_check: # the key must be in the dictionary to_check
1044  if (type(reference[k]) is dict) and (type(to_check[k]) is dict):
1045  # if both reference and to_check values are dictionaries,
1046  # recurse
1047  failed = fail_keys = cmpTreesDicts(
1048  reference[k], to_check[k], ignore)
1049  else:
1050  # compare the two values
1051  failed = to_check[k] != reference[k]
1052  else: # handle missing keys in the dictionary to check (i.e. failure)
1053  to_check[k] = None
1054  failed = True
1055  if failed:
1056  fail_keys.insert(0, k)
1057  break # exit from the loop at the first failure
1058  return fail_keys # return the list of keys bringing to the different values
1059 
1060 
1061 def getCmpFailingValues(reference, to_check, fail_path):
1062  c = to_check
1063  r = reference
1064  for k in fail_path:
1065  c = c.get(k, None)
1066  r = r.get(k, None)
1067  if c is None or r is None:
1068  break # one of the dictionaries is not deep enough
1069  return (fail_path, r, c)
1070 
1071 
1072 # signature of the print-out of the histograms
1073 h_count_re = re.compile(
1074  r"^(.*)SUCCESS\s+Booked (\d+) Histogram\(s\) :\s+([\s\w=-]*)")
1075 
1076 
1077 def _parseTTreeSummary(lines, pos):
1078  """
1079  Parse the TTree summary table in lines, starting from pos.
1080  Returns a tuple with the dictionary with the digested informations and the
1081  position of the first line after the summary.
1082  """
1083  result = {}
1084  i = pos + 1 # first line is a sequence of '*'
1085  count = len(lines)
1086 
1087  def splitcols(l): return [f.strip() for f in l.strip("*\n").split(':', 2)]
1088 
1089  def parseblock(ll):
1090  r = {}
1091  cols = splitcols(ll[0])
1092  r["Name"], r["Title"] = cols[1:]
1093 
1094  cols = splitcols(ll[1])
1095  r["Entries"] = int(cols[1])
1096 
1097  sizes = cols[2].split()
1098  r["Total size"] = int(sizes[2])
1099  if sizes[-1] == "memory":
1100  r["File size"] = 0
1101  else:
1102  r["File size"] = int(sizes[-1])
1103 
1104  cols = splitcols(ll[2])
1105  sizes = cols[2].split()
1106  if cols[0] == "Baskets":
1107  r["Baskets"] = int(cols[1])
1108  r["Basket size"] = int(sizes[2])
1109  r["Compression"] = float(sizes[-1])
1110  return r
1111 
1112  if i < (count - 3) and lines[i].startswith("*Tree"):
1113  result = parseblock(lines[i:i + 3])
1114  result["Branches"] = {}
1115  i += 4
1116  while i < (count - 3) and lines[i].startswith("*Br"):
1117  if i < (count - 2) and lines[i].startswith("*Branch "):
1118  # skip branch header
1119  i += 3
1120  continue
1121  branch = parseblock(lines[i:i + 3])
1122  result["Branches"][branch["Name"]] = branch
1123  i += 4
1124 
1125  return (result, i)
1126 
1127 
1128 def parseHistosSummary(lines, pos):
1129  """
1130  Extract the histograms infos from the lines starting at pos.
1131  Returns the position of the first line after the summary block.
1132  """
1133  global h_count_re
1134  h_table_head = re.compile(
1135  r'SUCCESS\s+List of booked (1D|2D|3D|1D profile|2D profile) histograms in directory\s+"(\w*)"')
1136  h_short_summ = re.compile(r"ID=([^\"]+)\s+\"([^\"]+)\"\s+(.*)")
1137 
1138  nlines = len(lines)
1139 
1140  # decode header
1141  m = h_count_re.search(lines[pos])
1142  name = m.group(1).strip()
1143  total = int(m.group(2))
1144  header = {}
1145  for k, v in [x.split("=") for x in m.group(3).split()]:
1146  header[k] = int(v)
1147  pos += 1
1148  header["Total"] = total
1149 
1150  summ = {}
1151  while pos < nlines:
1152  m = h_table_head.search(lines[pos])
1153  if m:
1154  t, d = m.groups(1) # type and directory
1155  t = t.replace(" profile", "Prof")
1156  pos += 1
1157  if pos < nlines:
1158  l = lines[pos]
1159  else:
1160  l = ""
1161  cont = {}
1162  if l.startswith(" | ID"):
1163  # table format
1164  titles = [x.strip() for x in l.split("|")][1:]
1165  pos += 1
1166  while pos < nlines and lines[pos].startswith(" |"):
1167  l = lines[pos]
1168  values = [x.strip() for x in l.split("|")][1:]
1169  hcont = {}
1170  for i in range(len(titles)):
1171  hcont[titles[i]] = values[i]
1172  cont[hcont["ID"]] = hcont
1173  pos += 1
1174  elif l.startswith(" ID="):
1175  while pos < nlines and lines[pos].startswith(" ID="):
1176  values = [x.strip()
1177  for x in h_short_summ.search(lines[pos]).groups()]
1178  cont[values[0]] = values
1179  pos += 1
1180  else: # not interpreted
1181  raise RuntimeError(
1182  "Cannot understand line %d: '%s'" % (pos, l))
1183  if not d in summ:
1184  summ[d] = {}
1185  summ[d][t] = cont
1186  summ[d]["header"] = header
1187  else:
1188  break
1189  if not summ:
1190  # If the full table is not present, we use only the header
1191  summ[name] = {"header": header}
1192  return summ, pos
1193 
1194 
1196  """
1197  Scan stdout to find ROOT TTree summaries and digest them.
1198  """
1199  outlines = stdout.splitlines()
1200  nlines = len(outlines) - 1
1201  summaries = {}
1202  global h_count_re
1203 
1204  pos = 0
1205  while pos < nlines:
1206  summ = {}
1207  # find first line of block:
1208  match = h_count_re.search(outlines[pos])
1209  while pos < nlines and not match:
1210  pos += 1
1211  match = h_count_re.search(outlines[pos])
1212  if match:
1213  summ, pos = parseHistosSummary(outlines, pos)
1214  summaries.update(summ)
1215  return summaries
1216 
1217 
1218 def PlatformIsNotSupported(self, context, result):
1219  platform = GetPlatform(self)
1220  unsupported = [re.compile(x) for x in [str(y).strip()
1221  for y in unsupported_platforms] if x]
1222  for p_re in unsupported:
1223  if p_re.search(platform):
1224  result.SetOutcome(result.UNTESTED)
1225  result[result.CAUSE] = 'Platform not supported.'
1226  return True
1227  return False
1228 
1229 
1230 def GetPlatform(self):
1231  """
1232  Return the platform Id defined in CMTCONFIG or SCRAM_ARCH.
1233  """
1234  arch = "None"
1235  # check architecture name
1236  if "BINARY_TAG" in os.environ:
1237  arch = os.environ["BINARY_TAG"]
1238  elif "CMTCONFIG" in os.environ:
1239  arch = os.environ["CMTCONFIG"]
1240  elif "SCRAM_ARCH" in os.environ:
1241  arch = os.environ["SCRAM_ARCH"]
1242  return arch
1243 
1244 
1245 def isWinPlatform(self):
1246  """
1247  Return True if the current platform is Windows.
1248 
1249  This function was needed because of the change in the CMTCONFIG format,
1250  from win32_vc71_dbg to i686-winxp-vc9-dbg.
1251  """
1252  platform = GetPlatform(self)
1253  return "winxp" in platform or platform.startswith("win")
def dumpProcs(name)
Definition: BaseTest.py:34
def GetPlatform(self)
Definition: BaseTest.py:1230
def PlatformIsNotSupported(self, context, result)
Definition: BaseTest.py:1218
def __init__(self, start, end)
Definition: BaseTest.py:756
def validateWithReference(self, stdout=None, stderr=None, result=None, causes=None, preproc=None)
Definition: BaseTest.py:444
def cmpTreesDicts(reference, to_check, ignore=None)
Definition: BaseTest.py:1027
def ValidateOutput(self, stdout, stderr, result)
Definition: BaseTest.py:262
def read(f, regex='.*', skipevents=0)
Definition: hivetimeline.py:22
def __processLine__(self, line)
Definition: BaseTest.py:816
def findHistosSummaries(stdout)
Definition: BaseTest.py:1195
def _parseTTreeSummary(lines, pos)
Definition: BaseTest.py:1077
struct GAUDI_API map
Parametrisation class for map-like implementation.
def __call__(self, stdout, result)
Definition: BaseTest.py:976
def __processLine__(self, line)
Definition: BaseTest.py:744
def __init__(self, orig, repl="", when=None)
Definition: BaseTest.py:774
decltype(auto) range(Args &&...args)
Zips multiple containers together to form a single range.
def __init__(self, signature)
Definition: BaseTest.py:812
def sanitize_for_xml(data)
Definition: BaseTest.py:17
def isWinPlatform(self)
Definition: BaseTest.py:1245
def getCmpFailingValues(reference, to_check, fail_path)
Definition: BaseTest.py:1061
def __init__(self, strings=[], regexps=[])
Definition: BaseTest.py:739
def __setitem__(self, key, value)
Definition: BaseTest.py:628
def __init__(self, kind=None, id=None, outcome=PASS, annotations={})
Definition: BaseTest.py:621
def which(executable)
Definition: BaseTest.py:576
def parseHistosSummary(lines, pos)
Definition: BaseTest.py:1128
def _expandReferenceFileName(self, reffile)
Definition: BaseTest.py:507
def findReferenceBlock(self, reference=None, stdout=None, result=None, causes=None, signature_offset=0, signature=None, id=None)
Definition: BaseTest.py:269
def CheckHistosSummaries(self, stdout=None, result=None, causes=None, dict=None, ignore=None)
Definition: BaseTest.py:403
def __init__(self, reffile, cause, result_key, preproc=normalizeExamples)
Definition: BaseTest.py:970
def __getitem__(self, key)
Definition: BaseTest.py:624
def kill_tree(ppid, sig)
Definition: BaseTest.py:43
def findTTreeSummaries(stdout)
Definition: BaseTest.py:1005
def __init__(self, ref, cause, result_key)
Definition: BaseTest.py:648
def ROOT6WorkAroundEnabled(id=None)
Definition: BaseTest.py:559
def CheckTTreesSummaries(self, stdout=None, result=None, causes=None, trees_dict=None, ignore=r"Basket|.*size|Compression")
Definition: BaseTest.py:361
def Quote(self, string)
Definition: BaseTest.py:633