The Gaudi Framework  v29r0 (ff2e7097)
BaseTest.py
Go to the documentation of this file.
1 # -*- coding: utf-8 -*-
2 
3 import os
4 import sys
5 import time
6 import signal
7 import threading
8 import platform
9 import tempfile
10 import inspect
11 import re
12 import logging
13 
14 from subprocess import Popen, PIPE, STDOUT
15 
16 
17 def sanitize_for_xml(data):
18  '''
19  Take a string with invalid ASCII/UTF characters and quote them so that the
20  string can be used in an XML text.
21 
22  >>> sanitize_for_xml('this is \x1b')
23  'this is [NON-XML-CHAR-0x1B]'
24  '''
25  bad_chars = re.compile(
26  u'[\x00-\x08\x0b\x0c\x0e-\x1F\uD800-\uDFFF\uFFFE\uFFFF]')
27 
28  def quote(match):
29  'helper function'
30  return ''.join('[NON-XML-CHAR-0x%2X]' % ord(c) for c in match.group())
31  return bad_chars.sub(quote, data)
32 
33 
34 def dumpProcs(name):
35  '''helper to debug GAUDI-1084, dump the list of processes'''
36  from getpass import getuser
37  if 'WORKSPACE' in os.environ:
38  p = Popen(['ps', '-fH', '-U', getuser()], stdout=PIPE)
39  with open(os.path.join(os.environ['WORKSPACE'], name), 'w') as f:
40  f.write(p.communicate()[0])
41 
42 
43 def kill_tree(ppid, sig):
44  '''
45  Send a signal to a process and all its child processes (starting from the
46  leaves).
47  '''
48  log = logging.getLogger('kill_tree')
49  ps_cmd = ['ps', '--no-headers', '-o', 'pid', '--ppid', str(ppid)]
50  get_children = Popen(ps_cmd, stdout=PIPE, stderr=PIPE)
51  children = map(int, get_children.communicate()[0].split())
52  for child in children:
53  kill_tree(child, sig)
54  try:
55  log.debug('killing process %d', ppid)
56  os.kill(ppid, sig)
57  except OSError, err:
58  if err.errno != 3: # No such process
59  raise
60  log.debug('no such process %d', ppid)
61 
62 #-------------------------------------------------------------------------#
63 
64 
65 class BaseTest(object):
66 
67  _common_tmpdir = None
68 
69  def __init__(self):
70  self.program = ''
71  self.args = []
72  self.reference = ''
73  self.error_reference = ''
74  self.options = ''
75  self.stderr = ''
76  self.timeout = 600
77  self.exit_code = None
78  self.environment = None
80  self.signal = None
81  self.workdir = os.curdir
82  self.use_temp_dir = False
83  # Variables not for users
84  self.status = None
85  self.name = ''
86  self.causes = []
87  self.result = Result(self)
88  self.returnedCode = 0
89  self.out = ''
90  self.err = ''
91  self.proc = None
92  self.stack_trace = None
93  self.basedir = os.getcwd()
94 
95  def run(self):
96  logging.debug('running test %s', self.name)
97 
98  if self.options:
99  if re.search(r'from\s+Gaudi.Configuration\s+import\s+\*|'
100  'from\s+Configurables\s+import', self.options):
101  optionFile = tempfile.NamedTemporaryFile(suffix='.py')
102  else:
103  optionFile = tempfile.NamedTemporaryFile(suffix='.opts')
104  optionFile.file.write(self.options)
105  optionFile.seek(0)
106  self.args.append(RationalizePath(optionFile.name))
107 
108  # If not specified, setting the environment
109  if self.environment is None:
110  self.environment = os.environ
111  else:
112  self.environment = dict(
113  self.environment.items() + os.environ.items())
114 
115  platform_id = (os.environ.get('BINARY_TAG') or
116  os.environ.get('CMTCONFIG') or
117  platform.platform())
118  # If at least one regex matches we skip the test.
119  skip_test = bool([None
120  for prex in self.unsupported_platforms
121  if re.search(prex, platform_id)])
122 
123  if not skip_test:
124  # handle working/temporary directory options
125  workdir = self.workdir
126  if self.use_temp_dir:
127  if self._common_tmpdir:
128  workdir = self._common_tmpdir
129  else:
130  workdir = tempfile.mkdtemp()
131 
132  # prepare the command to execute
133  prog = ''
134  if self.program != '':
135  prog = self.program
136  elif "GAUDIEXE" in os.environ:
137  prog = os.environ["GAUDIEXE"]
138  else:
139  prog = "Gaudi.exe"
140 
141  dummy, prog_ext = os.path.splitext(prog)
142  if prog_ext not in [".exe", ".py", ".bat"]:
143  prog += ".exe"
144  prog_ext = ".exe"
145 
146  prog = which(prog) or prog
147 
148  args = map(RationalizePath, self.args)
149 
150  if prog_ext == ".py":
151  params = ['python', RationalizePath(prog)] + args
152  else:
153  params = [RationalizePath(prog)] + args
154 
155  validatorRes = Result({'CAUSE': None, 'EXCEPTION': None,
156  'RESOURCE': None, 'TARGET': None,
157  'TRACEBACK': None, 'START_TIME': None,
158  'END_TIME': None, 'TIMEOUT_DETAIL': None})
159  self.result = validatorRes
160 
161  # we need to switch directory because the validator expects to run
162  # in the same dir as the program
163  os.chdir(workdir)
164 
165  # launching test in a different thread to handle timeout exception
166  def target():
167  logging.debug('executing %r in %s',
168  params, workdir)
169  self.proc = Popen(params, stdout=PIPE, stderr=PIPE,
170  env=self.environment)
171  logging.debug('(pid: %d)', self.proc.pid)
172  self.out, self.err = self.proc.communicate()
173 
174  thread = threading.Thread(target=target)
175  thread.start()
176  # catching timeout
177  thread.join(self.timeout)
178 
179  if thread.is_alive():
180  logging.debug('time out in test %s (pid %d)',
181  self.name, self.proc.pid)
182  # get the stack trace of the stuck process
183  cmd = ['gdb', '--pid', str(self.proc.pid), '--batch',
184  '--eval-command=thread apply all backtrace']
185  gdb = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
186  self.stack_trace = gdb.communicate()[0]
187 
188  kill_tree(self.proc.pid, signal.SIGTERM)
189  thread.join(60)
190  if thread.is_alive():
191  kill_tree(self.proc.pid, signal.SIGKILL)
192  self.causes.append('timeout')
193  else:
194  logging.debug('completed test %s', self.name)
195 
196  # Getting the error code
197  logging.debug('returnedCode = %s', self.proc.returncode)
198  self.returnedCode = self.proc.returncode
199 
200  logging.debug('validating test...')
201  self.result, self.causes = self.ValidateOutput(stdout=self.out,
202  stderr=self.err,
203  result=validatorRes)
204 
205  # remove the temporary directory if we created it
206  if self.use_temp_dir and not self._common_tmpdir:
207  shutil.rmtree(workdir, True)
208 
209  os.chdir(self.basedir)
210 
211  # handle application exit code
212  if self.signal is not None:
213  if int(self.returnedCode) != -int(self.signal):
214  self.causes.append('exit code')
215 
216  elif self.exit_code is not None:
217  if int(self.returnedCode) != int(self.exit_code):
218  self.causes.append('exit code')
219 
220  elif self.returnedCode != 0:
221  self.causes.append("exit code")
222 
223  if self.causes:
224  self.status = "failed"
225  else:
226  self.status = "passed"
227 
228  else:
229  self.status = "skipped"
230 
231  logging.debug('%s: %s', self.name, self.status)
232  field_mapping = {'Exit Code': 'returnedCode',
233  'stderr': 'err',
234  'Arguments': 'args',
235  'Environment': 'environment',
236  'Status': 'status',
237  'stdout': 'out',
238  'Program Name': 'program',
239  'Name': 'name',
240  'Validator': 'validator',
241  'Output Reference File': 'reference',
242  'Error Reference File': 'error_reference',
243  'Causes': 'causes',
244  #'Validator Result': 'result.annotations',
245  'Unsupported Platforms': 'unsupported_platforms',
246  'Stack Trace': 'stack_trace'}
247  resultDict = [(key, getattr(self, attr))
248  for key, attr in field_mapping.iteritems()
249  if getattr(self, attr)]
250  resultDict.append(('Working Directory',
251  RationalizePath(os.path.join(os.getcwd(),
252  self.workdir))))
253  # print dict(resultDict).keys()
254  resultDict.extend(self.result.annotations.iteritems())
255  # print self.result.annotations.keys()
256  return dict(resultDict)
257 
258  #-------------------------------------------------#
259  #----------------Validating tool------------------#
260  #-------------------------------------------------#
261 
262  def ValidateOutput(self, stdout, stderr, result):
263  if not self.stderr:
264  self.validateWithReference(stdout, stderr, result, self.causes)
265  elif stderr.strip() != self.stderr.strip():
266  self.causes.append('standard error')
267  return result, self.causes
268 
269  def findReferenceBlock(self, reference=None, stdout=None, result=None, causes=None, signature_offset=0, signature=None, id=None):
270  """
271  Given a block of text, tries to find it in the output. The block had to be identified by a signature line. By default, the first line is used as signature, or the line pointed to by signature_offset. If signature_offset points outside the block, a signature line can be passed as signature argument. Note: if 'signature' is None (the default), a negative signature_offset is interpreted as index in a list (e.g. -1 means the last line), otherwise the it is interpreted as the number of lines before the first one of the block the signature must appear. The parameter 'id' allow to distinguish between different calls to this function in the same validation code.
272  """
273 
274  if reference is None:
275  reference = self.reference
276  if stdout is None:
277  stdout = self.out
278  if result is None:
279  result = self.result
280  if causes is None:
281  causes = self.causes
282 
283  reflines = filter(
284  None, map(lambda s: s.rstrip(), reference.splitlines()))
285  if not reflines:
286  raise RuntimeError("Empty (or null) reference")
287  # the same on standard output
288  outlines = filter(None, map(lambda s: s.rstrip(), stdout.splitlines()))
289 
290  res_field = "GaudiTest.RefBlock"
291  if id:
292  res_field += "_%s" % id
293 
294  if signature is None:
295  if signature_offset < 0:
296  signature_offset = len(reference) + signature_offset
297  signature = reflines[signature_offset]
298  # find the reference block in the output file
299  try:
300  pos = outlines.index(signature)
301  outlines = outlines[pos - signature_offset:pos +
302  len(reflines) - signature_offset]
303  if reflines != outlines:
304  msg = "standard output"
305  # I do not want 2 messages in causes if teh function is called twice
306  if not msg in causes:
307  causes.append(msg)
308  result[res_field +
309  ".observed"] = result.Quote("\n".join(outlines))
310  except ValueError:
311  causes.append("missing signature")
312  result[res_field + ".signature"] = result.Quote(signature)
313  if len(reflines) > 1 or signature != reflines[0]:
314  result[res_field + ".expected"] = result.Quote("\n".join(reflines))
315  return causes
316 
317  def countErrorLines(self, expected={'ERROR': 0, 'FATAL': 0}, stdout=None, result=None, causes=None):
318  """
319  Count the number of messages with required severity (by default ERROR and FATAL)
320  and check if their numbers match the expected ones (0 by default).
321  The dictionary "expected" can be used to tune the number of errors and fatals
322  allowed, or to limit the number of expected warnings etc.
323  """
324 
325  if stdout is None:
326  stdout = self.out
327  if result is None:
328  result = self.result
329  if causes is None:
330  causes = self.causes
331 
332  # prepare the dictionary to record the extracted lines
333  errors = {}
334  for sev in expected:
335  errors[sev] = []
336 
337  outlines = stdout.splitlines()
338  from math import log10
339  fmt = "%%%dd - %%s" % (int(log10(len(outlines) + 1)))
340 
341  linecount = 0
342  for l in outlines:
343  linecount += 1
344  words = l.split()
345  if len(words) >= 2 and words[1] in errors:
346  errors[words[1]].append(fmt % (linecount, l.rstrip()))
347 
348  for e in errors:
349  if len(errors[e]) != expected[e]:
350  causes.append('%s(%d)' % (e, len(errors[e])))
351  result["GaudiTest.lines.%s" %
352  e] = result.Quote('\n'.join(errors[e]))
353  result["GaudiTest.lines.%s.expected#" %
354  e] = result.Quote(str(expected[e]))
355 
356  return causes
357 
358  def CheckTTreesSummaries(self, stdout=None, result=None, causes=None,
359  trees_dict=None,
360  ignore=r"Basket|.*size|Compression"):
361  """
362  Compare the TTree summaries in stdout with the ones in trees_dict or in
363  the reference file. By default ignore the size, compression and basket
364  fields.
365  The presence of TTree summaries when none is expected is not a failure.
366  """
367  if stdout is None:
368  stdout = self.out
369  if result is None:
370  result = self.result
371  if causes is None:
372  causes = self.causes
373  if trees_dict is None:
374  lreference = self._expandReferenceFileName(self.reference)
375  # call the validator if the file exists
376  if lreference and os.path.isfile(lreference):
377  trees_dict = findTTreeSummaries(open(lreference).read())
378  else:
379  trees_dict = {}
380 
381  from pprint import PrettyPrinter
382  pp = PrettyPrinter()
383  if trees_dict:
384  result["GaudiTest.TTrees.expected"] = result.Quote(
385  pp.pformat(trees_dict))
386  if ignore:
387  result["GaudiTest.TTrees.ignore"] = result.Quote(ignore)
388 
389  trees = findTTreeSummaries(stdout)
390  failed = cmpTreesDicts(trees_dict, trees, ignore)
391  if failed:
392  causes.append("trees summaries")
393  msg = "%s: %s != %s" % getCmpFailingValues(
394  trees_dict, trees, failed)
395  result["GaudiTest.TTrees.failure_on"] = result.Quote(msg)
396  result["GaudiTest.TTrees.found"] = result.Quote(pp.pformat(trees))
397 
398  return causes
399 
400  def CheckHistosSummaries(self, stdout=None, result=None, causes=None,
401  dict=None,
402  ignore=None):
403  """
404  Compare the TTree summaries in stdout with the ones in trees_dict or in
405  the reference file. By default ignore the size, compression and basket
406  fields.
407  The presence of TTree summaries when none is expected is not a failure.
408  """
409  if stdout is None:
410  stdout = self.out
411  if result is None:
412  result = self.result
413  if causes is None:
414  causes = self.causes
415 
416  if dict is None:
417  lreference = self._expandReferenceFileName(self.reference)
418  # call the validator if the file exists
419  if lreference and os.path.isfile(lreference):
420  dict = findHistosSummaries(open(lreference).read())
421  else:
422  dict = {}
423 
424  from pprint import PrettyPrinter
425  pp = PrettyPrinter()
426  if dict:
427  result["GaudiTest.Histos.expected"] = result.Quote(
428  pp.pformat(dict))
429  if ignore:
430  result["GaudiTest.Histos.ignore"] = result.Quote(ignore)
431 
432  histos = findHistosSummaries(stdout)
433  failed = cmpTreesDicts(dict, histos, ignore)
434  if failed:
435  causes.append("histos summaries")
436  msg = "%s: %s != %s" % getCmpFailingValues(dict, histos, failed)
437  result["GaudiTest.Histos.failure_on"] = result.Quote(msg)
438  result["GaudiTest.Histos.found"] = result.Quote(pp.pformat(histos))
439 
440  return causes
441 
442  def validateWithReference(self, stdout=None, stderr=None, result=None,
443  causes=None, preproc=None):
444  '''
445  Default validation acti*on: compare standard output and error to the
446  reference files.
447  '''
448 
449  if stdout is None:
450  stdout = self.out
451  if stderr is None:
452  stderr = self.err
453  if result is None:
454  result = self.result
455  if causes is None:
456  causes = self.causes
457 
458  # set the default output preprocessor
459  if preproc is None:
460  preproc = normalizeExamples
461  # check standard output
462  lreference = self._expandReferenceFileName(self.reference)
463  # call the validator if the file exists
464  if lreference and os.path.isfile(lreference):
465  causes += ReferenceFileValidator(lreference,
466  "standard output",
467  "Output Diff",
468  preproc=preproc)(stdout, result)
469  # Compare TTree summaries
470  causes = self.CheckTTreesSummaries(stdout, result, causes)
471  causes = self.CheckHistosSummaries(stdout, result, causes)
472  if causes: # Write a new reference file for stdout
473  try:
474  newref = open(lreference + ".new", "w")
475  # sanitize newlines
476  for l in stdout.splitlines():
477  newref.write(l.rstrip() + '\n')
478  del newref # flush and close
479  except IOError:
480  # Ignore IO errors when trying to update reference files
481  # because we may be in a read-only filesystem
482  pass
483 
484  # check standard error
485  lreference = self._expandReferenceFileName(self.error_reference)
486  # call the validator if we have a file to use
487  if lreference and os.path.isfile(lreference):
488  newcauses = ReferenceFileValidator(lreference,
489  "standard error",
490  "Error Diff",
491  preproc=preproc)(stderr, result)
492  causes += newcauses
493  if newcauses: # Write a new reference file for stdedd
494  newref = open(lreference + ".new", "w")
495  # sanitize newlines
496  for l in stderr.splitlines():
497  newref.write(l.rstrip() + '\n')
498  del newref # flush and close
499  else:
500  causes += BasicOutputValidator(lreference, "standard error",
501  "ExecTest.expected_stderr")(stderr, result)
502  return causes
503 
504  def _expandReferenceFileName(self, reffile):
505  # if no file is passed, do nothing
506  if not reffile:
507  return ""
508 
509  # function to split an extension in constituents parts
510  def platformSplit(p): return set(p.split('-' in p and '-' or '_'))
511 
512  reference = os.path.normpath(os.path.join(self.basedir,
513  os.path.expandvars(reffile)))
514 
515  # old-style platform-specific reference name
516  spec_ref = reference[:-3] + GetPlatform(self)[0:3] + reference[-3:]
517  if os.path.isfile(spec_ref):
518  reference = spec_ref
519  else: # look for new-style platform specific reference files:
520  # get all the files whose name start with the reference filename
521  dirname, basename = os.path.split(reference)
522  if not dirname:
523  dirname = '.'
524  head = basename + "."
525  head_len = len(head)
526  platform = platformSplit(GetPlatform(self))
527  if 'do0' in platform:
528  platform.add('dbg')
529  candidates = []
530  for f in os.listdir(dirname):
531  if f.startswith(head):
532  req_plat = platformSplit(f[head_len:])
533  if platform.issuperset(req_plat):
534  candidates.append((len(req_plat), f))
535  if candidates: # take the one with highest matching
536  # FIXME: it is not possible to say if x86_64-slc5-gcc43-dbg
537  # has to use ref.x86_64-gcc43 or ref.slc5-dbg
538  candidates.sort()
539  reference = os.path.join(dirname, candidates[-1][1])
540  return reference
541 
542 # ======= GAUDI TOOLS =======
543 
544 
545 import shutil
546 import string
547 import difflib
548 import calendar
549 
550 try:
551  from GaudiKernel import ROOT6WorkAroundEnabled
552 except ImportError:
554  # dummy implementation
555  return False
556 
557 #--------------------------------- TOOLS ---------------------------------#
558 
559 
561  """
562  Function used to normalize the used path
563  """
564  newPath = os.path.normpath(os.path.expandvars(p))
565  if os.path.exists(newPath):
566  p = os.path.realpath(newPath)
567  return p
568 
569 
570 def which(executable):
571  """
572  Locates an executable in the executables path ($PATH) and returns the full
573  path to it. An application is looked for with or without the '.exe' suffix.
574  If the executable cannot be found, None is returned
575  """
576  if os.path.isabs(executable):
577  if not os.path.exists(executable):
578  if executable.endswith('.exe'):
579  if os.path.exists(executable[:-4]):
580  return executable[:-4]
581  else:
582  head, executable = os.path.split(executable)
583  else:
584  return executable
585  for d in os.environ.get("PATH").split(os.pathsep):
586  fullpath = os.path.join(d, executable)
587  if os.path.exists(fullpath):
588  return fullpath
589  if executable.endswith('.exe'):
590  return which(executable[:-4])
591  return None
592 
593 
594 
595 #-------------------------------------------------------------------------#
596 #----------------------------- Result Classe -----------------------------#
597 #-------------------------------------------------------------------------#
598 import types
599 
600 
601 class Result:
602 
603  PASS = 'PASS'
604  FAIL = 'FAIL'
605  ERROR = 'ERROR'
606  UNTESTED = 'UNTESTED'
607 
608  EXCEPTION = ""
609  RESOURCE = ""
610  TARGET = ""
611  TRACEBACK = ""
612  START_TIME = ""
613  END_TIME = ""
614  TIMEOUT_DETAIL = ""
615 
616  def __init__(self, kind=None, id=None, outcome=PASS, annotations={}):
617  self.annotations = annotations.copy()
618 
619  def __getitem__(self, key):
620  assert type(key) in types.StringTypes
621  return self.annotations[key]
622 
623  def __setitem__(self, key, value):
624  assert type(key) in types.StringTypes
625  assert type(value) in types.StringTypes
626  self.annotations[key] = value
627 
628  def Quote(self, string):
629  return string
630 
631 
632 #-------------------------------------------------------------------------#
633 #--------------------------- Validator Classes ---------------------------#
634 #-------------------------------------------------------------------------#
635 
636 # Basic implementation of an option validator for Gaudi test. This implementation is based on the standard (LCG) validation functions used in QMTest.
637 
638 
640 
641  def __init__(self, ref, cause, result_key):
642  self.ref = ref
643  self.cause = cause
644  self.result_key = result_key
645 
646  def __call__(self, out, result):
647  """Validate the output of the program.
648  'stdout' -- A string containing the data written to the standard output
649  stream.
650  'stderr' -- A string containing the data written to the standard error
651  stream.
652  'result' -- A 'Result' object. It may be used to annotate
653  the outcome according to the content of stderr.
654  returns -- A list of strings giving causes of failure."""
655 
656  causes = []
657  # Check the output
658  if not self.__CompareText(out, self.ref):
659  causes.append(self.cause)
660  result[self.result_key] = result.Quote(self.ref)
661 
662  return causes
663 
664  def __CompareText(self, s1, s2):
665  """Compare 's1' and 's2', ignoring line endings.
666  's1' -- A string.
667  's2' -- A string.
668  returns -- True if 's1' and 's2' are the same, ignoring
669  differences in line endings."""
670  if ROOT6WorkAroundEnabled('ReadRootmapCheck'):
671  # FIXME: (MCl) Hide warnings from new rootmap sanity check until we can fix them
672  to_ignore = re.compile(
673  r'Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*')
674 
675  def keep_line(l): return not to_ignore.match(l)
676  return filter(keep_line, s1.splitlines()) == filter(keep_line, s2.splitlines())
677  else:
678  return s1.splitlines() == s2.splitlines()
679 
680 
681 #------------------------ Preprocessor elements ------------------------#
683  """ Base class for a callable that takes a file and returns a modified
684  version of it."""
685 
686  def __processLine__(self, line):
687  return line
688 
689  def __processFile__(self, lines):
690  output = []
691  for l in lines:
692  l = self.__processLine__(l)
693  if l:
694  output.append(l)
695  return output
696 
697  def __call__(self, input):
698  if hasattr(input, "__iter__"):
699  lines = input
700  mergeback = False
701  else:
702  lines = input.splitlines()
703  mergeback = True
704  output = self.__processFile__(lines)
705  if mergeback:
706  output = '\n'.join(output)
707  return output
708 
709  def __add__(self, rhs):
710  return FilePreprocessorSequence([self, rhs])
711 
712 
714  def __init__(self, members=[]):
715  self.members = members
716 
717  def __add__(self, rhs):
718  return FilePreprocessorSequence(self.members + [rhs])
719 
720  def __call__(self, input):
721  output = input
722  for pp in self.members:
723  output = pp(output)
724  return output
725 
726 
728  def __init__(self, strings=[], regexps=[]):
729  import re
730  self.strings = strings
731  self.regexps = map(re.compile, regexps)
732 
733  def __processLine__(self, line):
734  for s in self.strings:
735  if line.find(s) >= 0:
736  return None
737  for r in self.regexps:
738  if r.search(line):
739  return None
740  return line
741 
742 
744  def __init__(self, start, end):
745  self.start = start
746  self.end = end
747  self._skipping = False
748 
749  def __processLine__(self, line):
750  if self.start in line:
751  self._skipping = True
752  return None
753  elif self.end in line:
754  self._skipping = False
755  elif self._skipping:
756  return None
757  return line
758 
759 
761  def __init__(self, orig, repl="", when=None):
762  if when:
763  when = re.compile(when)
764  self._operations = [(when, re.compile(orig), repl)]
765 
766  def __add__(self, rhs):
767  if isinstance(rhs, RegexpReplacer):
768  res = RegexpReplacer("", "", None)
769  res._operations = self._operations + rhs._operations
770  else:
771  res = FilePreprocessor.__add__(self, rhs)
772  return res
773 
774  def __processLine__(self, line):
775  for w, o, r in self._operations:
776  if w is None or w.search(line):
777  line = o.sub(r, line)
778  return line
779 
780 
781 # Common preprocessors
782 maskPointers = RegexpReplacer("0x[0-9a-fA-F]{4,16}", "0x########")
783 normalizeDate = RegexpReplacer("[0-2]?[0-9]:[0-5][0-9]:[0-5][0-9] [0-9]{4}[-/][01][0-9][-/][0-3][0-9][ A-Z]*",
784  "00:00:00 1970-01-01")
785 normalizeEOL = FilePreprocessor()
786 normalizeEOL.__processLine__ = lambda line: str(line).rstrip() + '\n'
787 
788 skipEmptyLines = FilePreprocessor()
789 # FIXME: that's ugly
790 skipEmptyLines.__processLine__ = lambda line: (line.strip() and line) or None
791 
792 # Special preprocessor sorting the list of strings (whitespace separated)
793 # that follow a signature on a single line
794 
795 
797  def __init__(self, signature):
798  self.signature = signature
799  self.siglen = len(signature)
800 
801  def __processLine__(self, line):
802  pos = line.find(self.signature)
803  if pos >= 0:
804  line = line[:(pos + self.siglen)]
805  lst = line[(pos + self.siglen):].split()
806  lst.sort()
807  line += " ".join(lst)
808  return line
809 
810 
812  '''
813  Sort group of lines matching a regular expression
814  '''
815 
816  def __init__(self, exp):
817  self.exp = exp if hasattr(exp, 'match') else re.compile(exp)
818 
819  def __processFile__(self, lines):
820  match = self.exp.match
821  output = []
822  group = []
823  for l in lines:
824  if match(l):
825  group.append(l)
826  else:
827  if group:
828  group.sort()
829  output.extend(group)
830  group = []
831  output.append(l)
832  return output
833 
834 
835 # Preprocessors for GaudiExamples
836 normalizeExamples = maskPointers + normalizeDate
837 for w, o, r in [
838  #("TIMER.TIMER",r"[0-9]", "0"), # Normalize time output
839  ("TIMER.TIMER", r"\s+[+-]?[0-9]+[0-9.]*", " 0"), # Normalize time output
840  ("release all pending", r"^.*/([^/]*:.*)", r"\1"),
841  ("^#.*file", r"file '.*[/\\]([^/\\]*)$", r"file '\1"),
842  ("^JobOptionsSvc.*options successfully read in from",
843  r"read in from .*[/\\]([^/\\]*)$", r"file \1"), # normalize path to options
844  # Normalize UUID, except those ending with all 0s (i.e. the class IDs)
845  (None, r"[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}(?!-0{12})-[0-9A-Fa-f]{12}",
846  "00000000-0000-0000-0000-000000000000"),
847  # Absorb a change in ServiceLocatorHelper
848  ("ServiceLocatorHelper::", "ServiceLocatorHelper::(create|locate)Service",
849  "ServiceLocatorHelper::service"),
850  # Remove the leading 0 in Windows' exponential format
851  (None, r"e([-+])0([0-9][0-9])", r"e\1\2"),
852  # Output line changed in Gaudi v24
853  (None, r'Service reference count check:',
854  r'Looping over all active services...'),
855  # Ignore count of declared properties (anyway they are all printed)
856  (None, r"^(.*(DEBUG|SUCCESS) List of ALL properties of .*#properties = )\d+", r"\1NN"),
857 ]: # [ ("TIMER.TIMER","[0-9]+[0-9.]*", "") ]
858  normalizeExamples += RegexpReplacer(o, r, w)
859 
860 lineSkipper = LineSkipper(["//GP:",
861  "JobOptionsSvc INFO # ",
862  "JobOptionsSvc WARNING # ",
863  "Time User",
864  "Welcome to",
865  "This machine has a speed",
866  "TIME:",
867  "running on",
868  "ToolSvc.Sequenc... INFO",
869  "DataListenerSvc INFO XML written to file:",
870  "[INFO]", "[WARNING]",
871  "DEBUG No writable file catalog found which contains FID:",
872  "DEBUG Service base class initialized successfully", # changed between v20 and v21
873  "DEBUG Incident timing:", # introduced with patch #3487
874  # changed the level of the message from INFO to DEBUG
875  "INFO 'CnvServices':[",
876  # The signal handler complains about SIGXCPU not defined on some platforms
877  'SIGXCPU',
878  ], regexps=[
879  r"^JobOptionsSvc INFO *$",
880  r"^# ", # Ignore python comments
881  # skip the message reporting the version of the root file
882  r"(Always|SUCCESS)\s*(Root f|[^ ]* F)ile version:",
883  r"File '.*.xml' does not exist",
884  r"INFO Refer to dataset .* by its file ID:",
885  r"INFO Referring to dataset .* by its file ID:",
886  r"INFO Disconnect from dataset",
887  r"INFO Disconnected from dataset",
888  r"INFO Disconnected data IO:",
889  r"IncidentSvc\s*(DEBUG (Adding|Removing)|VERBOSE Calling)",
890  # I want to ignore the header of the unchecked StatusCode report
891  r"^StatusCodeSvc.*listing all unchecked return codes:",
892  r"^StatusCodeSvc\s*INFO\s*$",
893  r"Num\s*\|\s*Function\s*\|\s*Source Library",
894  r"^[-+]*\s*$",
895  # Hide the fake error message coming from POOL/ROOT (ROOT 5.21)
896  r"ERROR Failed to modify file: .* Errno=2 No such file or directory",
897  # Hide unchecked StatusCodes from dictionaries
898  r"^ +[0-9]+ \|.*ROOT",
899  r"^ +[0-9]+ \|.*\|.*Dict",
900  # Hide success StatusCodeSvc message
901  r"StatusCodeSvc.*all StatusCode instances where checked",
902  # Hide EventLoopMgr total timing report
903  r"EventLoopMgr.*---> Loop Finished",
904  # Remove ROOT TTree summary table, which changes from one version to the other
905  r"^\*.*\*$",
906  # Remove Histos Summaries
907  r"SUCCESS\s*Booked \d+ Histogram\(s\)",
908  r"^ \|",
909  r"^ ID=",
910  # Ignore added/removed properties
911  r"Property(.*)'Audit(Algorithm|Tool|Service)s':",
912  # these were missing in tools
913  r"Property(.*)'AuditRe(start|initialize)':",
914  r"Property(.*)'IsIOBound':",
915  # removed with gaudi/Gaudi!273
916  r"Property(.*)'ErrorCount(er)?':",
917  # added with gaudi/Gaudi!306
918  r"Property(.*)'Sequential':",
919  # added with gaudi/Gaudi!314
920  r"Property(.*)'FilterCircularDependencies':",
921  # removed with gaudi/Gaudi!316
922  r"Property(.*)'IsClonable':",
923  # ignore uninteresting/obsolete messages
924  r"Property update for OutputLevel : new value =",
925  r"EventLoopMgr\s*DEBUG Creating OutputStream",
926 ])
927 
928 if ROOT6WorkAroundEnabled('ReadRootmapCheck'):
929  # FIXME: (MCl) Hide warnings from new rootmap sanity check until we can fix them
930  lineSkipper += LineSkipper(regexps=[
931  r'Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*',
932  ])
933 
934 normalizeExamples = (lineSkipper + normalizeExamples + skipEmptyLines +
935  normalizeEOL + LineSorter("Services to release : ") +
936  SortGroupOfLines(r'^\S+\s+(DEBUG|SUCCESS) Property \[\'Name\':'))
937 
938 #--------------------- Validation functions/classes ---------------------#
939 
940 
942  def __init__(self, reffile, cause, result_key, preproc=normalizeExamples):
943  self.reffile = os.path.expandvars(reffile)
944  self.cause = cause
945  self.result_key = result_key
946  self.preproc = preproc
947 
948  def __call__(self, stdout, result):
949  causes = []
950  if os.path.isfile(self.reffile):
951  orig = open(self.reffile).xreadlines()
952  if self.preproc:
953  orig = self.preproc(orig)
954  result[self.result_key + '.preproc.orig'] = \
955  result.Quote('\n'.join(map(str.strip, orig)))
956  else:
957  orig = []
958  new = stdout.splitlines()
959  if self.preproc:
960  new = self.preproc(new)
961 
962  diffs = difflib.ndiff(orig, new, charjunk=difflib.IS_CHARACTER_JUNK)
963  filterdiffs = map(lambda x: x.strip(), filter(
964  lambda x: x[0] != " ", diffs))
965  if filterdiffs:
966  result[self.result_key] = result.Quote("\n".join(filterdiffs))
967  result[self.result_key] += result.Quote("""
968  Legend:
969  -) reference file
970  +) standard output of the test""")
971  result[self.result_key + '.preproc.new'] = \
972  result.Quote('\n'.join(map(str.strip, new)))
973  causes.append(self.cause)
974  return causes
975 
976 
977 def findTTreeSummaries(stdout):
978  """
979  Scan stdout to find ROOT TTree summaries and digest them.
980  """
981  stars = re.compile(r"^\*+$")
982  outlines = stdout.splitlines()
983  nlines = len(outlines)
984  trees = {}
985 
986  i = 0
987  while i < nlines: # loop over the output
988  # look for
989  while i < nlines and not stars.match(outlines[i]):
990  i += 1
991  if i < nlines:
992  tree, i = _parseTTreeSummary(outlines, i)
993  if tree:
994  trees[tree["Name"]] = tree
995 
996  return trees
997 
998 
999 def cmpTreesDicts(reference, to_check, ignore=None):
1000  """
1001  Check that all the keys in reference are in to_check too, with the same value.
1002  If the value is a dict, the function is called recursively. to_check can
1003  contain more keys than reference, that will not be tested.
1004  The function returns at the first difference found.
1005  """
1006  fail_keys = []
1007  # filter the keys in the reference dictionary
1008  if ignore:
1009  ignore_re = re.compile(ignore)
1010  keys = [key for key in reference if not ignore_re.match(key)]
1011  else:
1012  keys = reference.keys()
1013  # loop over the keys (not ignored) in the reference dictionary
1014  for k in keys:
1015  if k in to_check: # the key must be in the dictionary to_check
1016  if (type(reference[k]) is dict) and (type(to_check[k]) is dict):
1017  # if both reference and to_check values are dictionaries, recurse
1018  failed = fail_keys = cmpTreesDicts(
1019  reference[k], to_check[k], ignore)
1020  else:
1021  # compare the two values
1022  failed = to_check[k] != reference[k]
1023  else: # handle missing keys in the dictionary to check (i.e. failure)
1024  to_check[k] = None
1025  failed = True
1026  if failed:
1027  fail_keys.insert(0, k)
1028  break # exit from the loop at the first failure
1029  return fail_keys # return the list of keys bringing to the different values
1030 
1031 
1032 def getCmpFailingValues(reference, to_check, fail_path):
1033  c = to_check
1034  r = reference
1035  for k in fail_path:
1036  c = c.get(k, None)
1037  r = r.get(k, None)
1038  if c is None or r is None:
1039  break # one of the dictionaries is not deep enough
1040  return (fail_path, r, c)
1041 
1042 
1043 # signature of the print-out of the histograms
1044 h_count_re = re.compile(
1045  r"^(.*)SUCCESS\s+Booked (\d+) Histogram\(s\) :\s+([\s\w=-]*)")
1046 
1047 
1048 def _parseTTreeSummary(lines, pos):
1049  """
1050  Parse the TTree summary table in lines, starting from pos.
1051  Returns a tuple with the dictionary with the digested informations and the
1052  position of the first line after the summary.
1053  """
1054  result = {}
1055  i = pos + 1 # first line is a sequence of '*'
1056  count = len(lines)
1057 
1058  def splitcols(l): return [f.strip() for f in l.strip("*\n").split(':', 2)]
1059 
1060  def parseblock(ll):
1061  r = {}
1062  cols = splitcols(ll[0])
1063  r["Name"], r["Title"] = cols[1:]
1064 
1065  cols = splitcols(ll[1])
1066  r["Entries"] = int(cols[1])
1067 
1068  sizes = cols[2].split()
1069  r["Total size"] = int(sizes[2])
1070  if sizes[-1] == "memory":
1071  r["File size"] = 0
1072  else:
1073  r["File size"] = int(sizes[-1])
1074 
1075  cols = splitcols(ll[2])
1076  sizes = cols[2].split()
1077  if cols[0] == "Baskets":
1078  r["Baskets"] = int(cols[1])
1079  r["Basket size"] = int(sizes[2])
1080  r["Compression"] = float(sizes[-1])
1081  return r
1082 
1083  if i < (count - 3) and lines[i].startswith("*Tree"):
1084  result = parseblock(lines[i:i + 3])
1085  result["Branches"] = {}
1086  i += 4
1087  while i < (count - 3) and lines[i].startswith("*Br"):
1088  if i < (count - 2) and lines[i].startswith("*Branch "):
1089  # skip branch header
1090  i += 3
1091  continue
1092  branch = parseblock(lines[i:i + 3])
1093  result["Branches"][branch["Name"]] = branch
1094  i += 4
1095 
1096  return (result, i)
1097 
1098 
1099 def parseHistosSummary(lines, pos):
1100  """
1101  Extract the histograms infos from the lines starting at pos.
1102  Returns the position of the first line after the summary block.
1103  """
1104  global h_count_re
1105  h_table_head = re.compile(
1106  r'SUCCESS\s+List of booked (1D|2D|3D|1D profile|2D profile) histograms in directory\s+"(\w*)"')
1107  h_short_summ = re.compile(r"ID=([^\"]+)\s+\"([^\"]+)\"\s+(.*)")
1108 
1109  nlines = len(lines)
1110 
1111  # decode header
1112  m = h_count_re.search(lines[pos])
1113  name = m.group(1).strip()
1114  total = int(m.group(2))
1115  header = {}
1116  for k, v in [x.split("=") for x in m.group(3).split()]:
1117  header[k] = int(v)
1118  pos += 1
1119  header["Total"] = total
1120 
1121  summ = {}
1122  while pos < nlines:
1123  m = h_table_head.search(lines[pos])
1124  if m:
1125  t, d = m.groups(1) # type and directory
1126  t = t.replace(" profile", "Prof")
1127  pos += 1
1128  if pos < nlines:
1129  l = lines[pos]
1130  else:
1131  l = ""
1132  cont = {}
1133  if l.startswith(" | ID"):
1134  # table format
1135  titles = [x.strip() for x in l.split("|")][1:]
1136  pos += 1
1137  while pos < nlines and lines[pos].startswith(" |"):
1138  l = lines[pos]
1139  values = [x.strip() for x in l.split("|")][1:]
1140  hcont = {}
1141  for i in range(len(titles)):
1142  hcont[titles[i]] = values[i]
1143  cont[hcont["ID"]] = hcont
1144  pos += 1
1145  elif l.startswith(" ID="):
1146  while pos < nlines and lines[pos].startswith(" ID="):
1147  values = [x.strip()
1148  for x in h_short_summ.search(lines[pos]).groups()]
1149  cont[values[0]] = values
1150  pos += 1
1151  else: # not interpreted
1152  raise RuntimeError(
1153  "Cannot understand line %d: '%s'" % (pos, l))
1154  if not d in summ:
1155  summ[d] = {}
1156  summ[d][t] = cont
1157  summ[d]["header"] = header
1158  else:
1159  break
1160  if not summ:
1161  # If the full table is not present, we use only the header
1162  summ[name] = {"header": header}
1163  return summ, pos
1164 
1165 
1167  """
1168  Scan stdout to find ROOT TTree summaries and digest them.
1169  """
1170  outlines = stdout.splitlines()
1171  nlines = len(outlines) - 1
1172  summaries = {}
1173  global h_count_re
1174 
1175  pos = 0
1176  while pos < nlines:
1177  summ = {}
1178  # find first line of block:
1179  match = h_count_re.search(outlines[pos])
1180  while pos < nlines and not match:
1181  pos += 1
1182  match = h_count_re.search(outlines[pos])
1183  if match:
1184  summ, pos = parseHistosSummary(outlines, pos)
1185  summaries.update(summ)
1186  return summaries
1187 
1188 
1189 def PlatformIsNotSupported(self, context, result):
1190  platform = GetPlatform(self)
1191  unsupported = [re.compile(x) for x in [str(y).strip()
1192  for y in unsupported_platforms] if x]
1193  for p_re in unsupported:
1194  if p_re.search(platform):
1195  result.SetOutcome(result.UNTESTED)
1196  result[result.CAUSE] = 'Platform not supported.'
1197  return True
1198  return False
1199 
1200 
1201 def GetPlatform(self):
1202  """
1203  Return the platform Id defined in CMTCONFIG or SCRAM_ARCH.
1204  """
1205  arch = "None"
1206  # check architecture name
1207  if "BINARY_TAG" in os.environ:
1208  arch = os.environ["BINARY_TAG"]
1209  elif "CMTCONFIG" in os.environ:
1210  arch = os.environ["CMTCONFIG"]
1211  elif "SCRAM_ARCH" in os.environ:
1212  arch = os.environ["SCRAM_ARCH"]
1213  return arch
1214 
1215 
1216 def isWinPlatform(self):
1217  """
1218  Return True if the current platform is Windows.
1219 
1220  This function was needed because of the change in the CMTCONFIG format,
1221  from win32_vc71_dbg to i686-winxp-vc9-dbg.
1222  """
1223  platform = GetPlatform(self)
1224  return "winxp" in platform or platform.startswith("win")
def dumpProcs(name)
Definition: BaseTest.py:34
def GetPlatform(self)
Definition: BaseTest.py:1201
def PlatformIsNotSupported(self, context, result)
Definition: BaseTest.py:1189
def __init__(self, start, end)
Definition: BaseTest.py:744
def validateWithReference(self, stdout=None, stderr=None, result=None, causes=None, preproc=None)
Definition: BaseTest.py:443
def cmpTreesDicts(reference, to_check, ignore=None)
Definition: BaseTest.py:999
def ValidateOutput(self, stdout, stderr, result)
Definition: BaseTest.py:262
def read(f, regex='.*', skipevents=0)
Definition: hivetimeline.py:22
def __processLine__(self, line)
Definition: BaseTest.py:801
def findHistosSummaries(stdout)
Definition: BaseTest.py:1166
def _parseTTreeSummary(lines, pos)
Definition: BaseTest.py:1048
struct GAUDI_API map
Parametrisation class for map-like implementation.
def __call__(self, stdout, result)
Definition: BaseTest.py:948
def __processLine__(self, line)
Definition: BaseTest.py:733
def __init__(self, orig, repl="", when=None)
Definition: BaseTest.py:761
decltype(auto) range(Args &&...args)
Zips multiple containers together to form a single range.
def __init__(self, signature)
Definition: BaseTest.py:797
def sanitize_for_xml(data)
Definition: BaseTest.py:17
def isWinPlatform(self)
Definition: BaseTest.py:1216
def getCmpFailingValues(reference, to_check, fail_path)
Definition: BaseTest.py:1032
def __init__(self, strings=[], regexps=[])
Definition: BaseTest.py:728
def __setitem__(self, key, value)
Definition: BaseTest.py:623
def __init__(self, kind=None, id=None, outcome=PASS, annotations={})
Definition: BaseTest.py:616
def which(executable)
Definition: BaseTest.py:570
def parseHistosSummary(lines, pos)
Definition: BaseTest.py:1099
def _expandReferenceFileName(self, reffile)
Definition: BaseTest.py:504
def findReferenceBlock(self, reference=None, stdout=None, result=None, causes=None, signature_offset=0, signature=None, id=None)
Definition: BaseTest.py:269
def CheckHistosSummaries(self, stdout=None, result=None, causes=None, dict=None, ignore=None)
Definition: BaseTest.py:402
def __init__(self, reffile, cause, result_key, preproc=normalizeExamples)
Definition: BaseTest.py:942
def __getitem__(self, key)
Definition: BaseTest.py:619
def kill_tree(ppid, sig)
Definition: BaseTest.py:43
def findTTreeSummaries(stdout)
Definition: BaseTest.py:977
def __init__(self, ref, cause, result_key)
Definition: BaseTest.py:641
def ROOT6WorkAroundEnabled(id=None)
Definition: BaseTest.py:553
def CheckTTreesSummaries(self, stdout=None, result=None, causes=None, trees_dict=None, ignore=r"Basket|.*size|Compression")
Definition: BaseTest.py:360
def Quote(self, string)
Definition: BaseTest.py:628