Loading web-font TeX/Math/Italic
The Gaudi Framework  v29r5 (37229091)
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Modules Pages
BaseTest.py
Go to the documentation of this file.
1 # -*- coding: utf-8 -*-
2 
3 import os
4 import sys
5 import time
6 import signal
7 import threading
8 import platform
9 import tempfile
10 import inspect
11 import re
12 import logging
13 
14 from subprocess import Popen, PIPE, STDOUT
15 
16 
17 def sanitize_for_xml(data):
18  '''
19  Take a string with invalid ASCII/UTF characters and quote them so that the
20  string can be used in an XML text.
21 
22  >>> sanitize_for_xml('this is \x1b')
23  'this is [NON-XML-CHAR-0x1B]'
24  '''
25  bad_chars = re.compile(
26  u'[\x00-\x08\x0b\x0c\x0e-\x1F\uD800-\uDFFF\uFFFE\uFFFF]')
27 
28  def quote(match):
29  'helper function'
30  return ''.join('[NON-XML-CHAR-0x%2X]' % ord(c) for c in match.group())
31  return bad_chars.sub(quote, data)
32 
33 
34 def dumpProcs(name):
35  '''helper to debug GAUDI-1084, dump the list of processes'''
36  from getpass import getuser
37  if 'WORKSPACE' in os.environ:
38  p = Popen(['ps', '-fH', '-U', getuser()], stdout=PIPE)
39  with open(os.path.join(os.environ['WORKSPACE'], name), 'w') as f:
40  f.write(p.communicate()[0])
41 
42 
43 def kill_tree(ppid, sig):
44  '''
45  Send a signal to a process and all its child processes (starting from the
46  leaves).
47  '''
48  log = logging.getLogger('kill_tree')
49  ps_cmd = ['ps', '--no-headers', '-o', 'pid', '--ppid', str(ppid)]
50  get_children = Popen(ps_cmd, stdout=PIPE, stderr=PIPE)
51  children = map(int, get_children.communicate()[0].split())
52  for child in children:
53  kill_tree(child, sig)
54  try:
55  log.debug('killing process %d', ppid)
56  os.kill(ppid, sig)
57  except OSError, err:
58  if err.errno != 3: # No such process
59  raise
60  log.debug('no such process %d', ppid)
61 
62 #-------------------------------------------------------------------------#
63 
64 
65 class BaseTest(object):
66 
67  _common_tmpdir = None
68 
69  def __init__(self):
70  self.program = ''
71  self.args = []
72  self.reference = ''
73  self.error_reference = ''
74  self.options = ''
75  self.stderr = ''
76  self.timeout = 600
77  self.exit_code = None
78  self.environment = None
80  self.signal = None
81  self.workdir = os.curdir
82  self.use_temp_dir = False
83  # Variables not for users
84  self.status = None
85  self.name = ''
86  self.causes = []
87  self.result = Result(self)
88  self.returnedCode = 0
89  self.out = ''
90  self.err = ''
91  self.proc = None
92  self.stack_trace = None
93  self.basedir = os.getcwd()
94 
95  def run(self):
96  logging.debug('running test %s', self.name)
97 
98  if self.options:
99  if re.search(r'from\s+Gaudi.Configuration\s+import\s+\*|'
100  'from\s+Configurables\s+import', self.options):
101  optionFile = tempfile.NamedTemporaryFile(suffix='.py')
102  else:
103  optionFile = tempfile.NamedTemporaryFile(suffix='.opts')
104  optionFile.file.write(self.options)
105  optionFile.seek(0)
106  self.args.append(RationalizePath(optionFile.name))
107 
108  # If not specified, setting the environment
109  if self.environment is None:
110  self.environment = os.environ
111  else:
112  self.environment = dict(
113  self.environment.items() + os.environ.items())
114 
115  platform_id = (os.environ.get('BINARY_TAG') or
116  os.environ.get('CMTCONFIG') or
117  platform.platform())
118  # If at least one regex matches we skip the test.
119  skip_test = bool([None
120  for prex in self.unsupported_platforms
121  if re.search(prex, platform_id)])
122 
123  if not skip_test:
124  # handle working/temporary directory options
125  workdir = self.workdir
126  if self.use_temp_dir:
127  if self._common_tmpdir:
128  workdir = self._common_tmpdir
129  else:
130  workdir = tempfile.mkdtemp()
131 
132  # prepare the command to execute
133  prog = ''
134  if self.program != '':
135  prog = self.program
136  elif "GAUDIEXE" in os.environ:
137  prog = os.environ["GAUDIEXE"]
138  else:
139  prog = "Gaudi.exe"
140 
141  dummy, prog_ext = os.path.splitext(prog)
142  if prog_ext not in [".exe", ".py", ".bat"]:
143  prog += ".exe"
144  prog_ext = ".exe"
145 
146  prog = which(prog) or prog
147 
148  args = map(RationalizePath, self.args)
149 
150  if prog_ext == ".py":
151  params = ['python', RationalizePath(prog)] + args
152  else:
153  params = [RationalizePath(prog)] + args
154 
155  validatorRes = Result({'CAUSE': None, 'EXCEPTION': None,
156  'RESOURCE': None, 'TARGET': None,
157  'TRACEBACK': None, 'START_TIME': None,
158  'END_TIME': None, 'TIMEOUT_DETAIL': None})
159  self.result = validatorRes
160 
161  # we need to switch directory because the validator expects to run
162  # in the same dir as the program
163  os.chdir(workdir)
164 
165  # launching test in a different thread to handle timeout exception
166  def target():
167  logging.debug('executing %r in %s',
168  params, workdir)
169  self.proc = Popen(params, stdout=PIPE, stderr=PIPE,
170  env=self.environment)
171  logging.debug('(pid: %d)', self.proc.pid)
172  self.out, self.err = self.proc.communicate()
173 
174  thread = threading.Thread(target=target)
175  thread.start()
176  # catching timeout
177  thread.join(self.timeout)
178 
179  if thread.is_alive():
180  logging.debug('time out in test %s (pid %d)',
181  self.name, self.proc.pid)
182  # get the stack trace of the stuck process
183  cmd = ['gdb', '--pid', str(self.proc.pid), '--batch',
184  '--eval-command=thread apply all backtrace']
185  gdb = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
186  self.stack_trace = gdb.communicate()[0]
187 
188  kill_tree(self.proc.pid, signal.SIGTERM)
189  thread.join(60)
190  if thread.is_alive():
191  kill_tree(self.proc.pid, signal.SIGKILL)
192  self.causes.append('timeout')
193  else:
194  logging.debug('completed test %s', self.name)
195 
196  # Getting the error code
197  logging.debug('returnedCode = %s', self.proc.returncode)
198  self.returnedCode = self.proc.returncode
199 
200  logging.debug('validating test...')
201  self.result, self.causes = self.ValidateOutput(stdout=self.out,
202  stderr=self.err,
203  result=validatorRes)
204 
205  # remove the temporary directory if we created it
206  if self.use_temp_dir and not self._common_tmpdir:
207  shutil.rmtree(workdir, True)
208 
209  os.chdir(self.basedir)
210 
211  # handle application exit code
212  if self.signal is not None:
213  if int(self.returnedCode) != -int(self.signal):
214  self.causes.append('exit code')
215 
216  elif self.exit_code is not None:
217  if int(self.returnedCode) != int(self.exit_code):
218  self.causes.append('exit code')
219 
220  elif self.returnedCode != 0:
221  self.causes.append("exit code")
222 
223  if self.causes:
224  self.status = "failed"
225  else:
226  self.status = "passed"
227 
228  else:
229  self.status = "skipped"
230 
231  logging.debug('%s: %s', self.name, self.status)
232  field_mapping = {'Exit Code': 'returnedCode',
233  'stderr': 'err',
234  'Arguments': 'args',
235  'Environment': 'environment',
236  'Status': 'status',
237  'stdout': 'out',
238  'Program Name': 'program',
239  'Name': 'name',
240  'Validator': 'validator',
241  'Output Reference File': 'reference',
242  'Error Reference File': 'error_reference',
243  'Causes': 'causes',
244  # 'Validator Result': 'result.annotations',
245  'Unsupported Platforms': 'unsupported_platforms',
246  'Stack Trace': 'stack_trace'}
247  resultDict = [(key, getattr(self, attr))
248  for key, attr in field_mapping.iteritems()
249  if getattr(self, attr)]
250  resultDict.append(('Working Directory',
251  RationalizePath(os.path.join(os.getcwd(),
252  self.workdir))))
253  # print dict(resultDict).keys()
254  resultDict.extend(self.result.annotations.iteritems())
255  # print self.result.annotations.keys()
256  return dict(resultDict)
257 
258  #-------------------------------------------------#
259  #----------------Validating tool------------------#
260  #-------------------------------------------------#
261 
262  def ValidateOutput(self, stdout, stderr, result):
263  if not self.stderr:
264  self.validateWithReference(stdout, stderr, result, self.causes)
265  elif stderr.strip() != self.stderr.strip():
266  self.causes.append('standard error')
267  return result, self.causes
268 
269  def findReferenceBlock(self, reference=None, stdout=None, result=None, causes=None, signature_offset=0, signature=None, id=None):
270  """
271  Given a block of text, tries to find it in the output. The block had to be identified by a signature line. By default, the first line is used as signature, or the line pointed to by signature_offset. If signature_offset points outside the block, a signature line can be passed as signature argument. Note: if 'signature' is None (the default), a negative signature_offset is interpreted as index in a list (e.g. -1 means the last line), otherwise the it is interpreted as the number of lines before the first one of the block the signature must appear. The parameter 'id' allow to distinguish between different calls to this function in the same validation code.
272  """
273 
274  if reference is None:
275  reference = self.reference
276  if stdout is None:
277  stdout = self.out
278  if result is None:
279  result = self.result
280  if causes is None:
281  causes = self.causes
282 
283  reflines = filter(
284  None, map(lambda s: s.rstrip(), reference.splitlines()))
285  if not reflines:
286  raise RuntimeError("Empty (or null) reference")
287  # the same on standard output
288  outlines = filter(None, map(lambda s: s.rstrip(), stdout.splitlines()))
289 
290  res_field = "GaudiTest.RefBlock"
291  if id:
292  res_field += "_%s" % id
293 
294  if signature is None:
295  if signature_offset < 0:
296  signature_offset = len(reference) + signature_offset
297  signature = reflines[signature_offset]
298  # find the reference block in the output file
299  try:
300  pos = outlines.index(signature)
301  outlines = outlines[pos - signature_offset:pos +
302  len(reflines) - signature_offset]
303  if reflines != outlines:
304  msg = "standard output"
305  # I do not want 2 messages in causes if teh function is called twice
306  if not msg in causes:
307  causes.append(msg)
308  result[res_field +
309  ".observed"] = result.Quote("\n".join(outlines))
310  except ValueError:
311  causes.append("missing signature")
312  result[res_field + ".signature"] = result.Quote(signature)
313  if len(reflines) > 1 or signature != reflines[0]:
314  result[res_field + ".expected"] = result.Quote("\n".join(reflines))
315  return causes
316 
317  def countErrorLines(self, expected={'ERROR': 0, 'FATAL': 0}, stdout=None, result=None, causes=None):
318  """
319  Count the number of messages with required severity (by default ERROR and FATAL)
320  and check if their numbers match the expected ones (0 by default).
321  The dictionary "expected" can be used to tune the number of errors and fatals
322  allowed, or to limit the number of expected warnings etc.
323  """
324 
325  if stdout is None:
326  stdout = self.out
327  if result is None:
328  result = self.result
329  if causes is None:
330  causes = self.causes
331 
332  # prepare the dictionary to record the extracted lines
333  errors = {}
334  for sev in expected:
335  errors[sev] = []
336 
337  outlines = stdout.splitlines()
338  from math import log10
339  fmt = "%%%dd - %%s" % (int(log10(len(outlines) + 1)))
340 
341  linecount = 0
342  for l in outlines:
343  linecount += 1
344  words = l.split()
345  if len(words) >= 2 and words[1] in errors:
346  errors[words[1]].append(fmt % (linecount, l.rstrip()))
347 
348  for e in errors:
349  if len(errors[e]) != expected[e]:
350  causes.append('%s(%d)' % (e, len(errors[e])))
351  result["GaudiTest.lines.%s" %
352  e] = result.Quote('\n'.join(errors[e]))
353  result["GaudiTest.lines.%s.expected#" %
354  e] = result.Quote(str(expected[e]))
355 
356  return causes
357 
358  def CheckTTreesSummaries(self, stdout=None, result=None, causes=None,
359  trees_dict=None,
360  ignore=r"Basket|.*size|Compression"):
361  """
362  Compare the TTree summaries in stdout with the ones in trees_dict or in
363  the reference file. By default ignore the size, compression and basket
364  fields.
365  The presence of TTree summaries when none is expected is not a failure.
366  """
367  if stdout is None:
368  stdout = self.out
369  if result is None:
370  result = self.result
371  if causes is None:
372  causes = self.causes
373  if trees_dict is None:
374  lreference = self._expandReferenceFileName(self.reference)
375  # call the validator if the file exists
376  if lreference and os.path.isfile(lreference):
377  trees_dict = findTTreeSummaries(open(lreference).read())
378  else:
379  trees_dict = {}
380 
381  from pprint import PrettyPrinter
382  pp = PrettyPrinter()
383  if trees_dict:
384  result["GaudiTest.TTrees.expected"] = result.Quote(
385  pp.pformat(trees_dict))
386  if ignore:
387  result["GaudiTest.TTrees.ignore"] = result.Quote(ignore)
388 
389  trees = findTTreeSummaries(stdout)
390  failed = cmpTreesDicts(trees_dict, trees, ignore)
391  if failed:
392  causes.append("trees summaries")
393  msg = "%s: %s != %s" % getCmpFailingValues(
394  trees_dict, trees, failed)
395  result["GaudiTest.TTrees.failure_on"] = result.Quote(msg)
396  result["GaudiTest.TTrees.found"] = result.Quote(pp.pformat(trees))
397 
398  return causes
399 
400  def CheckHistosSummaries(self, stdout=None, result=None, causes=None,
401  dict=None,
402  ignore=None):
403  """
404  Compare the TTree summaries in stdout with the ones in trees_dict or in
405  the reference file. By default ignore the size, compression and basket
406  fields.
407  The presence of TTree summaries when none is expected is not a failure.
408  """
409  if stdout is None:
410  stdout = self.out
411  if result is None:
412  result = self.result
413  if causes is None:
414  causes = self.causes
415 
416  if dict is None:
417  lreference = self._expandReferenceFileName(self.reference)
418  # call the validator if the file exists
419  if lreference and os.path.isfile(lreference):
420  dict = findHistosSummaries(open(lreference).read())
421  else:
422  dict = {}
423 
424  from pprint import PrettyPrinter
425  pp = PrettyPrinter()
426  if dict:
427  result["GaudiTest.Histos.expected"] = result.Quote(
428  pp.pformat(dict))
429  if ignore:
430  result["GaudiTest.Histos.ignore"] = result.Quote(ignore)
431 
432  histos = findHistosSummaries(stdout)
433  failed = cmpTreesDicts(dict, histos, ignore)
434  if failed:
435  causes.append("histos summaries")
436  msg = "%s: %s != %s" % getCmpFailingValues(dict, histos, failed)
437  result["GaudiTest.Histos.failure_on"] = result.Quote(msg)
438  result["GaudiTest.Histos.found"] = result.Quote(pp.pformat(histos))
439 
440  return causes
441 
442  def validateWithReference(self, stdout=None, stderr=None, result=None,
443  causes=None, preproc=None):
444  '''
445  Default validation acti*on: compare standard output and error to the
446  reference files.
447  '''
448 
449  if stdout is None:
450  stdout = self.out
451  if stderr is None:
452  stderr = self.err
453  if result is None:
454  result = self.result
455  if causes is None:
456  causes = self.causes
457 
458  # set the default output preprocessor
459  if preproc is None:
460  preproc = normalizeExamples
461  # check standard output
462  lreference = self._expandReferenceFileName(self.reference)
463  # call the validator if the file exists
464  if lreference and os.path.isfile(lreference):
465  causes += ReferenceFileValidator(lreference,
466  "standard output",
467  "Output Diff",
468  preproc=preproc)(stdout, result)
469  # Compare TTree summaries
470  causes = self.CheckTTreesSummaries(stdout, result, causes)
471  causes = self.CheckHistosSummaries(stdout, result, causes)
472  if causes: # Write a new reference file for stdout
473  try:
474  newref = open(lreference + ".new", "w")
475  # sanitize newlines
476  for l in stdout.splitlines():
477  newref.write(l.rstrip() + '\n')
478  del newref # flush and close
479  except IOError:
480  # Ignore IO errors when trying to update reference files
481  # because we may be in a read-only filesystem
482  pass
483 
484  # check standard error
485  lreference = self._expandReferenceFileName(self.error_reference)
486  # call the validator if we have a file to use
487  if lreference and os.path.isfile(lreference):
488  newcauses = ReferenceFileValidator(lreference,
489  "standard error",
490  "Error Diff",
491  preproc=preproc)(stderr, result)
492  causes += newcauses
493  if newcauses: # Write a new reference file for stdedd
494  newref = open(lreference + ".new", "w")
495  # sanitize newlines
496  for l in stderr.splitlines():
497  newref.write(l.rstrip() + '\n')
498  del newref # flush and close
499  else:
500  causes += BasicOutputValidator(lreference, "standard error",
501  "ExecTest.expected_stderr")(stderr, result)
502  return causes
503 
504  def _expandReferenceFileName(self, reffile):
505  # if no file is passed, do nothing
506  if not reffile:
507  return ""
508 
509  # function to split an extension in constituents parts
510  def platformSplit(p): return set(p.split('-' in p and '-' or '_'))
511 
512  reference = os.path.normpath(os.path.join(self.basedir,
513  os.path.expandvars(reffile)))
514 
515  # old-style platform-specific reference name
516  spec_ref = reference[:-3] + GetPlatform(self)[0:3] + reference[-3:]
517  if os.path.isfile(spec_ref):
518  reference = spec_ref
519  else: # look for new-style platform specific reference files:
520  # get all the files whose name start with the reference filename
521  dirname, basename = os.path.split(reference)
522  if not dirname:
523  dirname = '.'
524  head = basename + "."
525  head_len = len(head)
526  platform = platformSplit(GetPlatform(self))
527  if 'do0' in platform:
528  platform.add('dbg')
529  candidates = []
530  for f in os.listdir(dirname):
531  if f.startswith(head):
532  req_plat = platformSplit(f[head_len:])
533  if platform.issuperset(req_plat):
534  candidates.append((len(req_plat), f))
535  if candidates: # take the one with highest matching
536  # FIXME: it is not possible to say if x86_64-slc5-gcc43-dbg
537  # has to use ref.x86_64-gcc43 or ref.slc5-dbg
538  candidates.sort()
539  reference = os.path.join(dirname, candidates[-1][1])
540  return reference
541 
542 # ======= GAUDI TOOLS =======
543 
544 
545 import shutil
546 import string
547 import difflib
548 import calendar
549 
550 try:
551  from GaudiKernel import ROOT6WorkAroundEnabled
552 except ImportError:
554  # dummy implementation
555  return False
556 
557 #--------------------------------- TOOLS ---------------------------------#
558 
559 
561  """
562  Function used to normalize the used path
563  """
564  newPath = os.path.normpath(os.path.expandvars(p))
565  if os.path.exists(newPath):
566  p = os.path.realpath(newPath)
567  return p
568 
569 
570 def which(executable):
571  """
572  Locates an executable in the executables path ($PATH) and returns the full
573  path to it. An application is looked for with or without the '.exe' suffix.
574  If the executable cannot be found, None is returned
575  """
576  if os.path.isabs(executable):
577  if not os.path.exists(executable):
578  if executable.endswith('.exe'):
579  if os.path.exists(executable[:-4]):
580  return executable[:-4]
581  else:
582  head, executable = os.path.split(executable)
583  else:
584  return executable
585  for d in os.environ.get("PATH").split(os.pathsep):
586  fullpath = os.path.join(d, executable)
587  if os.path.exists(fullpath):
588  return fullpath
589  if executable.endswith('.exe'):
590  return which(executable[:-4])
591  return None
592 
593 
594 
595 #-------------------------------------------------------------------------#
596 #----------------------------- Result Classe -----------------------------#
597 #-------------------------------------------------------------------------#
598 import types
599 
600 
601 class Result:
602 
603  PASS = 'PASS'
604  FAIL = 'FAIL'
605  ERROR = 'ERROR'
606  UNTESTED = 'UNTESTED'
607 
608  EXCEPTION = ""
609  RESOURCE = ""
610  TARGET = ""
611  TRACEBACK = ""
612  START_TIME = ""
613  END_TIME = ""
614  TIMEOUT_DETAIL = ""
615 
616  def __init__(self, kind=None, id=None, outcome=PASS, annotations={}):
617  self.annotations = annotations.copy()
618 
619  def __getitem__(self, key):
620  assert type(key) in types.StringTypes
621  return self.annotations[key]
622 
623  def __setitem__(self, key, value):
624  assert type(key) in types.StringTypes
625  assert type(value) in types.StringTypes
626  self.annotations[key] = value
627 
628  def Quote(self, string):
629  return string
630 
631 
632 #-------------------------------------------------------------------------#
633 #--------------------------- Validator Classes ---------------------------#
634 #-------------------------------------------------------------------------#
635 
636 # Basic implementation of an option validator for Gaudi test. This implementation is based on the standard (LCG) validation functions used in QMTest.
637 
638 
640 
641  def __init__(self, ref, cause, result_key):
642  self.ref = ref
643  self.cause = cause
644  self.result_key = result_key
645 
646  def __call__(self, out, result):
647  """Validate the output of the program.
648  'stdout' -- A string containing the data written to the standard output
649  stream.
650  'stderr' -- A string containing the data written to the standard error
651  stream.
652  'result' -- A 'Result' object. It may be used to annotate
653  the outcome according to the content of stderr.
654  returns -- A list of strings giving causes of failure."""
655 
656  causes = []
657  # Check the output
658  if not self.__CompareText(out, self.ref):
659  causes.append(self.cause)
660  result[self.result_key] = result.Quote(self.ref)
661 
662  return causes
663 
664  def __CompareText(self, s1, s2):
665  """Compare 's1' and 's2', ignoring line endings.
666  's1' -- A string.
667  's2' -- A string.
668  returns -- True if 's1' and 's2' are the same, ignoring
669  differences in line endings."""
670  if ROOT6WorkAroundEnabled('ReadRootmapCheck'):
671  # FIXME: (MCl) Hide warnings from new rootmap sanity check until we can fix them
672  to_ignore = re.compile(
673  r'Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*')
674 
675  def keep_line(l): return not to_ignore.match(l)
676  return filter(keep_line, s1.splitlines()) == filter(keep_line, s2.splitlines())
677  else:
678  return s1.splitlines() == s2.splitlines()
679 
680 
681 #------------------------ Preprocessor elements ------------------------#
683  """ Base class for a callable that takes a file and returns a modified
684  version of it."""
685 
686  def __processLine__(self, line):
687  return line
688 
689  def __processFile__(self, lines):
690  output = []
691  for l in lines:
692  l = self.__processLine__(l)
693  if l:
694  output.append(l)
695  return output
696 
697  def __call__(self, input):
698  if hasattr(input, "__iter__"):
699  lines = input
700  mergeback = False
701  else:
702  lines = input.splitlines()
703  mergeback = True
704  output = self.__processFile__(lines)
705  if mergeback:
706  output = '\n'.join(output)
707  return output
708 
709  def __add__(self, rhs):
710  return FilePreprocessorSequence([self, rhs])
711 
712 
714  def __init__(self, members=[]):
715  self.members = members
716 
717  def __add__(self, rhs):
718  return FilePreprocessorSequence(self.members + [rhs])
719 
720  def __call__(self, input):
721  output = input
722  for pp in self.members:
723  output = pp(output)
724  return output
725 
726 
728  def __init__(self, strings=[], regexps=[]):
729  import re
730  self.strings = strings
731  self.regexps = map(re.compile, regexps)
732 
733  def __processLine__(self, line):
734  for s in self.strings:
735  if line.find(s) >= 0:
736  return None
737  for r in self.regexps:
738  if r.search(line):
739  return None
740  return line
741 
742 
744  def __init__(self, start, end):
745  self.start = start
746  self.end = end
747  self._skipping = False
748 
749  def __processLine__(self, line):
750  if self.start in line:
751  self._skipping = True
752  return None
753  elif self.end in line:
754  self._skipping = False
755  elif self._skipping:
756  return None
757  return line
758 
759 
761  def __init__(self, orig, repl="", when=None):
762  if when:
763  when = re.compile(when)
764  self._operations = [(when, re.compile(orig), repl)]
765 
766  def __add__(self, rhs):
767  if isinstance(rhs, RegexpReplacer):
768  res = RegexpReplacer("", "", None)
769  res._operations = self._operations + rhs._operations
770  else:
771  res = FilePreprocessor.__add__(self, rhs)
772  return res
773 
774  def __processLine__(self, line):
775  for w, o, r in self._operations:
776  if w is None or w.search(line):
777  line = o.sub(r, line)
778  return line
779 
780 
781 # Common preprocessors
782 maskPointers = RegexpReplacer("0x[0-9a-fA-F]{4,16}", "0x########")
783 normalizeDate = RegexpReplacer("[0-2]?[0-9]:[0-5][0-9]:[0-5][0-9] [0-9]{4}[-/][01][0-9][-/][0-3][0-9][ A-Z]*",
784  "00:00:00 1970-01-01")
785 normalizeEOL = FilePreprocessor()
786 normalizeEOL.__processLine__ = lambda line: str(line).rstrip() + '\n'
787 
788 skipEmptyLines = FilePreprocessor()
789 # FIXME: that's ugly
790 skipEmptyLines.__processLine__ = lambda line: (line.strip() and line) or None
791 
792 # Special preprocessor sorting the list of strings (whitespace separated)
793 # that follow a signature on a single line
794 
795 
797  def __init__(self, signature):
798  self.signature = signature
799  self.siglen = len(signature)
800 
801  def __processLine__(self, line):
802  pos = line.find(self.signature)
803  if pos >= 0:
804  line = line[:(pos + self.siglen)]
805  lst = line[(pos + self.siglen):].split()
806  lst.sort()
807  line += " ".join(lst)
808  return line
809 
810 
812  '''
813  Sort group of lines matching a regular expression
814  '''
815 
816  def __init__(self, exp):
817  self.exp = exp if hasattr(exp, 'match') else re.compile(exp)
818 
819  def __processFile__(self, lines):
820  match = self.exp.match
821  output = []
822  group = []
823  for l in lines:
824  if match(l):
825  group.append(l)
826  else:
827  if group:
828  group.sort()
829  output.extend(group)
830  group = []
831  output.append(l)
832  return output
833 
834 
835 # Preprocessors for GaudiExamples
836 normalizeExamples = maskPointers + normalizeDate
837 for w, o, r in [
838  # ("TIMER.TIMER",r"[0-9]", "0"), # Normalize time output
839  ("TIMER.TIMER", r"\s+[+-]?[0-9]+[0-9.]*", " 0"), # Normalize time output
840  ("release all pending", r"^.*/([^/]*:.*)", r"\1"),
841  ("^#.*file", r"file '.*[/\\]([^/\\]*)$", r"file '\1"),
842  ("^JobOptionsSvc.*options successfully read in from",
843  r"read in from .*[/\\]([^/\\]*)$", r"file \1"), # normalize path to options
844  # Normalize UUID, except those ending with all 0s (i.e. the class IDs)
845  (None, r"[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}(?!-0{12})-[0-9A-Fa-f]{12}",
846  "00000000-0000-0000-0000-000000000000"),
847  # Absorb a change in ServiceLocatorHelper
848  ("ServiceLocatorHelper::", "ServiceLocatorHelper::(create|locate)Service",
849  "ServiceLocatorHelper::service"),
850  # Remove the leading 0 in Windows' exponential format
851  (None, r"e([-+])0([0-9][0-9])", r"e\1\2"),
852  # Output line changed in Gaudi v24
853  (None, r'Service reference count check:',
854  r'Looping over all active services...'),
855  # Ignore count of declared properties (anyway they are all printed)
856  (None, r"^(.*(DEBUG|SUCCESS) List of ALL properties of .*#properties = )\d+", r"\1NN"),
857 ]: # [ ("TIMER.TIMER","[0-9]+[0-9.]*", "") ]
858  normalizeExamples += RegexpReplacer(o, r, w)
859 
860 lineSkipper = LineSkipper(["//GP:",
861  "JobOptionsSvc INFO # ",
862  "JobOptionsSvc WARNING # ",
863  "Time User",
864  "Welcome to",
865  "This machine has a speed",
866  "TIME:",
867  "running on",
868  "ToolSvc.Sequenc... INFO",
869  "DataListenerSvc INFO XML written to file:",
870  "[INFO]", "[WARNING]",
871  "DEBUG No writable file catalog found which contains FID:",
872  "DEBUG Service base class initialized successfully", # changed between v20 and v21
873  "DEBUG Incident timing:", # introduced with patch #3487
874  # changed the level of the message from INFO to DEBUG
875  "INFO 'CnvServices':[",
876  # message removed because could be printed in constructor
877  "DEBUG 'CnvServices':[",
878  # The signal handler complains about SIGXCPU not defined on some platforms
879  'SIGXCPU',
880  ], regexps=[
881  r"^JobOptionsSvc INFO *$",
882  r"^# ", # Ignore python comments
883  # skip the message reporting the version of the root file
884  r"(Always|SUCCESS)\s*(Root f|[^ ]* F)ile version:",
885  r"File '.*.xml' does not exist",
886  r"INFO Refer to dataset .* by its file ID:",
887  r"INFO Referring to dataset .* by its file ID:",
888  r"INFO Disconnect from dataset",
889  r"INFO Disconnected from dataset",
890  r"INFO Disconnected data IO:",
891  r"IncidentSvc\s*(DEBUG (Adding|Removing)|VERBOSE Calling)",
892  # I want to ignore the header of the unchecked StatusCode report
893  r"^StatusCodeSvc.*listing all unchecked return codes:",
894  r"^StatusCodeSvc\s*INFO\s*$",
895  r"Num\s*\|\s*Function\s*\|\s*Source Library",
896  r"^[-+]*\s*$",
897  # Hide the fake error message coming from POOL/ROOT (ROOT 5.21)
898  r"ERROR Failed to modify file: .* Errno=2 No such file or directory",
899  # Hide unchecked StatusCodes from dictionaries
900  r"^ +[0-9]+ \|.*ROOT",
901  r"^ +[0-9]+ \|.*\|.*Dict",
902  # Hide success StatusCodeSvc message
903  r"StatusCodeSvc.*all StatusCode instances where checked",
904  # Hide EventLoopMgr total timing report
905  r"EventLoopMgr.*---> Loop Finished",
906  # Remove ROOT TTree summary table, which changes from one version to the other
907  r"^\*.*\*$",
908  # Remove Histos Summaries
909  r"SUCCESS\s*Booked \d+ Histograms",
910  r"^ \|",
911  r"^ ID=",
912  # Ignore added/removed properties
913  r"Property(.*)'Audit(Algorithm|Tool|Service)s':",
914  # these were missing in tools
915  r"Property(.*)'AuditRe(start|initialize)':",
916  r"Property(.*)'IsIOBound':",
917  # removed with gaudi/Gaudi!273
918  r"Property(.*)'ErrorCount(er)?':",
919  # added with gaudi/Gaudi!306
920  r"Property(.*)'Sequential':",
921  # added with gaudi/Gaudi!314
922  r"Property(.*)'FilterCircularDependencies':",
923  # removed with gaudi/Gaudi!316
924  r"Property(.*)'IsClonable':",
925  # ignore uninteresting/obsolete messages
926  r"Property update for OutputLevel : new value =",
927  r"EventLoopMgr\s*DEBUG Creating OutputStream",
928 ])
929 
930 if ROOT6WorkAroundEnabled('ReadRootmapCheck'):
931  # FIXME: (MCl) Hide warnings from new rootmap sanity check until we can fix them
932  lineSkipper += LineSkipper(regexps=[
933  r'Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*',
934  ])
935 
936 normalizeExamples = (lineSkipper + normalizeExamples + skipEmptyLines +
937  normalizeEOL + LineSorter("Services to release : ") +
938  SortGroupOfLines(r'^\S+\s+(DEBUG|SUCCESS) Property \[\'Name\':'))
939 
940 #--------------------- Validation functions/classes ---------------------#
941 
942 
944  def __init__(self, reffile, cause, result_key, preproc=normalizeExamples):
945  self.reffile = os.path.expandvars(reffile)
946  self.cause = cause
947  self.result_key = result_key
948  self.preproc = preproc
949 
950  def __call__(self, stdout, result):
951  causes = []
952  if os.path.isfile(self.reffile):
953  orig = open(self.reffile).xreadlines()
954  if self.preproc:
955  orig = self.preproc(orig)
956  result[self.result_key + '.preproc.orig'] = \
957  result.Quote('\n'.join(map(str.strip, orig)))
958  else:
959  orig = []
960  new = stdout.splitlines()
961  if self.preproc:
962  new = self.preproc(new)
963 
964  diffs = difflib.ndiff(orig, new, charjunk=difflib.IS_CHARACTER_JUNK)
965  filterdiffs = map(lambda x: x.strip(), filter(
966  lambda x: x[0] != " ", diffs))
967  if filterdiffs:
968  result[self.result_key] = result.Quote("\n".join(filterdiffs))
969  result[self.result_key] += result.Quote("""
970  Legend:
971  -) reference file
972  +) standard output of the test""")
973  result[self.result_key + '.preproc.new'] = \
974  result.Quote('\n'.join(map(str.strip, new)))
975  causes.append(self.cause)
976  return causes
977 
978 
979 def findTTreeSummaries(stdout):
980  """
981  Scan stdout to find ROOT TTree summaries and digest them.
982  """
983  stars = re.compile(r"^\*+$")
984  outlines = stdout.splitlines()
985  nlines = len(outlines)
986  trees = {}
987 
988  i = 0
989  while i < nlines: # loop over the output
990  # look for
991  while i < nlines and not stars.match(outlines[i]):
992  i += 1
993  if i < nlines:
994  tree, i = _parseTTreeSummary(outlines, i)
995  if tree:
996  trees[tree["Name"]] = tree
997 
998  return trees
999 
1000 
1001 def cmpTreesDicts(reference, to_check, ignore=None):
1002  """
1003  Check that all the keys in reference are in to_check too, with the same value.
1004  If the value is a dict, the function is called recursively. to_check can
1005  contain more keys than reference, that will not be tested.
1006  The function returns at the first difference found.
1007  """
1008  fail_keys = []
1009  # filter the keys in the reference dictionary
1010  if ignore:
1011  ignore_re = re.compile(ignore)
1012  keys = [key for key in reference if not ignore_re.match(key)]
1013  else:
1014  keys = reference.keys()
1015  # loop over the keys (not ignored) in the reference dictionary
1016  for k in keys:
1017  if k in to_check: # the key must be in the dictionary to_check
1018  if (type(reference[k]) is dict) and (type(to_check[k]) is dict):
1019  # if both reference and to_check values are dictionaries, recurse
1020  failed = fail_keys = cmpTreesDicts(
1021  reference[k], to_check[k], ignore)
1022  else:
1023  # compare the two values
1024  failed = to_check[k] != reference[k]
1025  else: # handle missing keys in the dictionary to check (i.e. failure)
1026  to_check[k] = None
1027  failed = True
1028  if failed:
1029  fail_keys.insert(0, k)
1030  break # exit from the loop at the first failure
1031  return fail_keys # return the list of keys bringing to the different values
1032 
1033 
1034 def getCmpFailingValues(reference, to_check, fail_path):
1035  c = to_check
1036  r = reference
1037  for k in fail_path:
1038  c = c.get(k, None)
1039  r = r.get(k, None)
1040  if c is None or r is None:
1041  break # one of the dictionaries is not deep enough
1042  return (fail_path, r, c)
1043 
1044 
1045 # signature of the print-out of the histograms
1046 h_count_re = re.compile(
1047  r"^(.*)SUCCESS\s+Booked (\d+) Histograms :\s+([\s\w=-]*)")
1048 
1049 
1050 def _parseTTreeSummary(lines, pos):
1051  """
1052  Parse the TTree summary table in lines, starting from pos.
1053  Returns a tuple with the dictionary with the digested informations and the
1054  position of the first line after the summary.
1055  """
1056  result = {}
1057  i = pos + 1 # first line is a sequence of '*'
1058  count = len(lines)
1059 
1060  def splitcols(l): return [f.strip() for f in l.strip("*\n").split(':', 2)]
1061 
1062  def parseblock(ll):
1063  r = {}
1064  cols = splitcols(ll[0])
1065  r["Name"], r["Title"] = cols[1:]
1066 
1067  cols = splitcols(ll[1])
1068  r["Entries"] = int(cols[1])
1069 
1070  sizes = cols[2].split()
1071  r["Total size"] = int(sizes[2])
1072  if sizes[-1] == "memory":
1073  r["File size"] = 0
1074  else:
1075  r["File size"] = int(sizes[-1])
1076 
1077  cols = splitcols(ll[2])
1078  sizes = cols[2].split()
1079  if cols[0] == "Baskets":
1080  r["Baskets"] = int(cols[1])
1081  r["Basket size"] = int(sizes[2])
1082  r["Compression"] = float(sizes[-1])
1083  return r
1084 
1085  if i < (count - 3) and lines[i].startswith("*Tree"):
1086  result = parseblock(lines[i:i + 3])
1087  result["Branches"] = {}
1088  i += 4
1089  while i < (count - 3) and lines[i].startswith("*Br"):
1090  if i < (count - 2) and lines[i].startswith("*Branch "):
1091  # skip branch header
1092  i += 3
1093  continue
1094  branch = parseblock(lines[i:i + 3])
1095  result["Branches"][branch["Name"]] = branch
1096  i += 4
1097 
1098  return (result, i)
1099 
1100 
1101 def parseHistosSummary(lines, pos):
1102  """
1103  Extract the histograms infos from the lines starting at pos.
1104  Returns the position of the first line after the summary block.
1105  """
1106  global h_count_re
1107  h_table_head = re.compile(
1108  r'SUCCESS\s+List of booked (1D|2D|3D|1D profile|2D profile) histograms in directory\s+"(\w*)"')
1109  h_short_summ = re.compile(r"ID=([^\"]+)\s+\"([^\"]+)\"\s+(.*)")
1110 
1111  nlines = len(lines)
1112 
1113  # decode header
1114  m = h_count_re.search(lines[pos])
1115  name = m.group(1).strip()
1116  total = int(m.group(2))
1117  header = {}
1118  for k, v in [x.split("=") for x in m.group(3).split()]:
1119  header[k] = int(v)
1120  pos += 1
1121  header["Total"] = total
1122 
1123  summ = {}
1124  while pos < nlines:
1125  m = h_table_head.search(lines[pos])
1126  if m:
1127  t, d = m.groups(1) # type and directory
1128  t = t.replace(" profile", "Prof")
1129  pos += 1
1130  if pos < nlines:
1131  l = lines[pos]
1132  else:
1133  l = ""
1134  cont = {}
1135  if l.startswith(" | ID"):
1136  # table format
1137  titles = [x.strip() for x in l.split("|")][1:]
1138  pos += 1
1139  while pos < nlines and lines[pos].startswith(" |"):
1140  l = lines[pos]
1141  values = [x.strip() for x in l.split("|")][1:]
1142  hcont = {}
1143  for i in range(len(titles)):
1144  hcont[titles[i]] = values[i]
1145  cont[hcont["ID"]] = hcont
1146  pos += 1
1147  elif l.startswith(" ID="):
1148  while pos < nlines and lines[pos].startswith(" ID="):
1149  values = [x.strip()
1150  for x in h_short_summ.search(lines[pos]).groups()]
1151  cont[values[0]] = values
1152  pos += 1
1153  else: # not interpreted
1154  raise RuntimeError(
1155  "Cannot understand line %d: '%s'" % (pos, l))
1156  if not d in summ:
1157  summ[d] = {}
1158  summ[d][t] = cont
1159  summ[d]["header"] = header
1160  else:
1161  break
1162  if not summ:
1163  # If the full table is not present, we use only the header
1164  summ[name] = {"header": header}
1165  return summ, pos
1166 
1167 
1169  """
1170  Scan stdout to find ROOT TTree summaries and digest them.
1171  """
1172  outlines = stdout.splitlines()
1173  nlines = len(outlines) - 1
1174  summaries = {}
1175  global h_count_re
1176 
1177  pos = 0
1178  while pos < nlines:
1179  summ = {}
1180  # find first line of block:
1181  match = h_count_re.search(outlines[pos])
1182  while pos < nlines and not match:
1183  pos += 1
1184  match = h_count_re.search(outlines[pos])
1185  if match:
1186  summ, pos = parseHistosSummary(outlines, pos)
1187  summaries.update(summ)
1188  return summaries
1189 
1190 
1191 def PlatformIsNotSupported(self, context, result):
1192  platform = GetPlatform(self)
1193  unsupported = [re.compile(x) for x in [str(y).strip()
1194  for y in unsupported_platforms] if x]
1195  for p_re in unsupported:
1196  if p_re.search(platform):
1197  result.SetOutcome(result.UNTESTED)
1198  result[result.CAUSE] = 'Platform not supported.'
1199  return True
1200  return False
1201 
1202 
1203 def GetPlatform(self):
1204  """
1205  Return the platform Id defined in CMTCONFIG or SCRAM_ARCH.
1206  """
1207  arch = "None"
1208  # check architecture name
1209  if "BINARY_TAG" in os.environ:
1210  arch = os.environ["BINARY_TAG"]
1211  elif "CMTCONFIG" in os.environ:
1212  arch = os.environ["CMTCONFIG"]
1213  elif "SCRAM_ARCH" in os.environ:
1214  arch = os.environ["SCRAM_ARCH"]
1215  return arch
1216 
1217 
1218 def isWinPlatform(self):
1219  """
1220  Return True if the current platform is Windows.
1221 
1222  This function was needed because of the change in the CMTCONFIG format,
1223  from win32_vc71_dbg to i686-winxp-vc9-dbg.
1224  """
1225  platform = GetPlatform(self)
1226  return "winxp" in platform or platform.startswith("win")
def dumpProcs(name)
Definition: BaseTest.py:34
def GetPlatform(self)
Definition: BaseTest.py:1203
def PlatformIsNotSupported(self, context, result)
Definition: BaseTest.py:1191
def __init__(self, start, end)
Definition: BaseTest.py:744
def validateWithReference(self, stdout=None, stderr=None, result=None, causes=None, preproc=None)
Definition: BaseTest.py:443
def cmpTreesDicts(reference, to_check, ignore=None)
Definition: BaseTest.py:1001
def ValidateOutput(self, stdout, stderr, result)
Definition: BaseTest.py:262
def read(f, regex='.*', skipevents=0)
Definition: hivetimeline.py:22
def __processLine__(self, line)
Definition: BaseTest.py:801
def findHistosSummaries(stdout)
Definition: BaseTest.py:1168
def _parseTTreeSummary(lines, pos)
Definition: BaseTest.py:1050
struct GAUDI_API map
Parametrisation class for map-like implementation.
def __call__(self, stdout, result)
Definition: BaseTest.py:950
def __processLine__(self, line)
Definition: BaseTest.py:733
def __init__(self, orig, repl="", when=None)
Definition: BaseTest.py:761
decltype(auto) range(Args &&...args)
Zips multiple containers together to form a single range.
def __init__(self, signature)
Definition: BaseTest.py:797
def sanitize_for_xml(data)
Definition: BaseTest.py:17
def isWinPlatform(self)
Definition: BaseTest.py:1218
def getCmpFailingValues(reference, to_check, fail_path)
Definition: BaseTest.py:1034
def __init__(self, strings=[], regexps=[])
Definition: BaseTest.py:728
def __setitem__(self, key, value)
Definition: BaseTest.py:623
def __init__(self, kind=None, id=None, outcome=PASS, annotations={})
Definition: BaseTest.py:616
def which(executable)
Definition: BaseTest.py:570
def parseHistosSummary(lines, pos)
Definition: BaseTest.py:1101
def _expandReferenceFileName(self, reffile)
Definition: BaseTest.py:504
def findReferenceBlock(self, reference=None, stdout=None, result=None, causes=None, signature_offset=0, signature=None, id=None)
Definition: BaseTest.py:269
def CheckHistosSummaries(self, stdout=None, result=None, causes=None, dict=None, ignore=None)
Definition: BaseTest.py:402
def __init__(self, reffile, cause, result_key, preproc=normalizeExamples)
Definition: BaseTest.py:944
def __getitem__(self, key)
Definition: BaseTest.py:619
def kill_tree(ppid, sig)
Definition: BaseTest.py:43
def findTTreeSummaries(stdout)
Definition: BaseTest.py:979
def __init__(self, ref, cause, result_key)
Definition: BaseTest.py:641
def ROOT6WorkAroundEnabled(id=None)
Definition: BaseTest.py:553
def CheckTTreesSummaries(self, stdout=None, result=None, causes=None, trees_dict=None, ignore=r"Basket|.*size|Compression")
Definition: BaseTest.py:360
def Quote(self, string)
Definition: BaseTest.py:628