The Gaudi Framework  v32r2 (46d42edc)
BaseTest.py
Go to the documentation of this file.
1 # -*- coding: utf-8 -*-
2 
3 import os
4 import sys
5 import time
6 import signal
7 import threading
8 import platform
9 import tempfile
10 import inspect
11 import re
12 import logging
13 
14 from subprocess import Popen, PIPE, STDOUT
15 
16 import six
17 
18 if sys.version_info < (3, 5):
19  # backport of 'backslashreplace' handling of UnicodeDecodeError
20  # to Python < 3.5
21  from codecs import register_error, backslashreplace_errors
22 
24  if isinstance(exc, UnicodeDecodeError):
25  code = hex(ord(exc.object[exc.start]))
26  return (u'\\' + code[1:], exc.start + 1)
27  else:
28  return backslashreplace_errors(exc)
29 
30  register_error('backslashreplace', _new_backslashreplace_errors)
31  del register_error
32  del backslashreplace_errors
33  del _new_backslashreplace_errors
34 
35 
36 def sanitize_for_xml(data):
37  '''
38  Take a string with invalid ASCII/UTF characters and quote them so that the
39  string can be used in an XML text.
40 
41  >>> sanitize_for_xml('this is \x1b')
42  'this is [NON-XML-CHAR-0x1B]'
43  '''
44  bad_chars = re.compile(
45  u'[\x00-\x08\x0b\x0c\x0e-\x1F\uD800-\uDFFF\uFFFE\uFFFF]')
46 
47  def quote(match):
48  'helper function'
49  return ''.join('[NON-XML-CHAR-0x%2X]' % ord(c) for c in match.group())
50 
51  return bad_chars.sub(quote, data)
52 
53 
54 def dumpProcs(name):
55  '''helper to debug GAUDI-1084, dump the list of processes'''
56  from getpass import getuser
57  if 'WORKSPACE' in os.environ:
58  p = Popen(['ps', '-fH', '-U', getuser()], stdout=PIPE)
59  with open(os.path.join(os.environ['WORKSPACE'], name), 'wb') as f:
60  f.write(p.communicate()[0])
61 
62 
63 def kill_tree(ppid, sig):
64  '''
65  Send a signal to a process and all its child processes (starting from the
66  leaves).
67  '''
68  log = logging.getLogger('kill_tree')
69  ps_cmd = ['ps', '--no-headers', '-o', 'pid', '--ppid', str(ppid)]
70  get_children = Popen(ps_cmd, stdout=PIPE, stderr=PIPE)
71  children = map(int, get_children.communicate()[0].split())
72  for child in children:
73  kill_tree(child, sig)
74  try:
75  log.debug('killing process %d', ppid)
76  os.kill(ppid, sig)
77  except OSError as err:
78  if err.errno != 3: # No such process
79  raise
80  log.debug('no such process %d', ppid)
81 
82 
83 # -------------------------------------------------------------------------#
84 
85 
86 class BaseTest(object):
87 
88  _common_tmpdir = None
89 
90  def __init__(self):
91  self.program = ''
92  self.args = []
93  self.reference = ''
94  self.error_reference = ''
95  self.options = ''
96  self.stderr = ''
97  self.timeout = 600
98  self.exit_code = None
99  self.environment = None
101  self.signal = None
102  self.workdir = os.curdir
103  self.use_temp_dir = False
104  # Variables not for users
105  self.status = None
106  self.name = ''
107  self.causes = []
108  self.result = Result(self)
109  self.returnedCode = 0
110  self.out = ''
111  self.err = ''
112  self.proc = None
113  self.stack_trace = None
114  self.basedir = os.getcwd()
115 
116  def run(self):
117  logging.debug('running test %s', self.name)
118 
119  if self.options:
120  if re.search(
121  r'from\s+Gaudi.Configuration\s+import\s+\*|'
122  'from\s+Configurables\s+import', self.options):
123  optionFile = tempfile.NamedTemporaryFile(suffix='.py')
124  else:
125  optionFile = tempfile.NamedTemporaryFile(suffix='.opts')
126  optionFile.file.write(self.options.encode('utf-8'))
127  optionFile.seek(0)
128  self.args.append(RationalizePath(optionFile.name))
129 
130  # If not specified, setting the environment
131  if self.environment is None:
132  self.environment = os.environ
133  else:
134  self.environment = dict(
135  list(self.environment.items()) + list(os.environ.items()))
136 
137  platform_id = (os.environ.get('BINARY_TAG')
138  or os.environ.get('CMTCONFIG') or platform.platform())
139  # If at least one regex matches we skip the test.
140  skip_test = bool([
141  None for prex in self.unsupported_platforms
142  if re.search(prex, platform_id)
143  ])
144 
145  if not skip_test:
146  # handle working/temporary directory options
147  workdir = self.workdir
148  if self.use_temp_dir:
149  if self._common_tmpdir:
150  workdir = self._common_tmpdir
151  else:
152  workdir = tempfile.mkdtemp()
153 
154  # prepare the command to execute
155  prog = ''
156  if self.program != '':
157  prog = self.program
158  elif "GAUDIEXE" in os.environ:
159  prog = os.environ["GAUDIEXE"]
160  else:
161  prog = "Gaudi.exe"
162 
163  dummy, prog_ext = os.path.splitext(prog)
164  if prog_ext not in [".exe", ".py", ".bat"]:
165  prog += ".exe"
166  prog_ext = ".exe"
167 
168  prog = which(prog) or prog
169 
170  args = list(map(RationalizePath, self.args))
171 
172  if prog_ext == ".py":
173  params = ['python', RationalizePath(prog)] + args
174  else:
175  params = [RationalizePath(prog)] + args
176 
177  validatorRes = Result({
178  'CAUSE': None,
179  'EXCEPTION': None,
180  'RESOURCE': None,
181  'TARGET': None,
182  'TRACEBACK': None,
183  'START_TIME': None,
184  'END_TIME': None,
185  'TIMEOUT_DETAIL': None
186  })
187  self.result = validatorRes
188 
189  # we need to switch directory because the validator expects to run
190  # in the same dir as the program
191  os.chdir(workdir)
192 
193  # launching test in a different thread to handle timeout exception
194  def target():
195  logging.debug('executing %r in %s', params, workdir)
196  self.proc = Popen(
197  params, stdout=PIPE, stderr=PIPE, env=self.environment)
198  logging.debug('(pid: %d)', self.proc.pid)
199  out, err = self.proc.communicate()
200  self.out = out.decode('utf-8', errors='backslashreplace')
201  self.err = err.decode('utf-8', errors='backslashreplace')
202 
203  thread = threading.Thread(target=target)
204  thread.start()
205  # catching timeout
206  thread.join(self.timeout)
207 
208  if thread.is_alive():
209  logging.debug('time out in test %s (pid %d)', self.name,
210  self.proc.pid)
211  # get the stack trace of the stuck process
212  cmd = [
213  'gdb', '--pid',
214  str(self.proc.pid), '--batch',
215  '--eval-command=thread apply all backtrace'
216  ]
217  gdb = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
218  self.stack_trace = gdb.communicate()[0].decode(
219  'utf-8', errors='backslashreplace')
220 
221  kill_tree(self.proc.pid, signal.SIGTERM)
222  thread.join(60)
223  if thread.is_alive():
224  kill_tree(self.proc.pid, signal.SIGKILL)
225  self.causes.append('timeout')
226  else:
227  logging.debug('completed test %s', self.name)
228 
229  # Getting the error code
230  logging.debug('returnedCode = %s', self.proc.returncode)
231  self.returnedCode = self.proc.returncode
232 
233  logging.debug('validating test...')
234  self.result, self.causes = self.ValidateOutput(
235  stdout=self.out, stderr=self.err, result=validatorRes)
236 
237  # remove the temporary directory if we created it
238  if self.use_temp_dir and not self._common_tmpdir:
239  shutil.rmtree(workdir, True)
240 
241  os.chdir(self.basedir)
242 
243  # handle application exit code
244  if self.signal is not None:
245  if int(self.returnedCode) != -int(self.signal):
246  self.causes.append('exit code')
247 
248  elif self.exit_code is not None:
249  if int(self.returnedCode) != int(self.exit_code):
250  self.causes.append('exit code')
251 
252  elif self.returnedCode != 0:
253  self.causes.append("exit code")
254 
255  if self.causes:
256  self.status = "failed"
257  else:
258  self.status = "passed"
259 
260  else:
261  self.status = "skipped"
262 
263  logging.debug('%s: %s', self.name, self.status)
264  field_mapping = {
265  'Exit Code': 'returnedCode',
266  'stderr': 'err',
267  'Arguments': 'args',
268  'Environment': 'environment',
269  'Status': 'status',
270  'stdout': 'out',
271  'Program Name': 'program',
272  'Name': 'name',
273  'Validator': 'validator',
274  'Output Reference File': 'reference',
275  'Error Reference File': 'error_reference',
276  'Causes': 'causes',
277  # 'Validator Result': 'result.annotations',
278  'Unsupported Platforms': 'unsupported_platforms',
279  'Stack Trace': 'stack_trace'
280  }
281  resultDict = [(key, getattr(self, attr))
282  for key, attr in field_mapping.items()
283  if getattr(self, attr)]
284  resultDict.append(('Working Directory',
286  os.path.join(os.getcwd(), self.workdir))))
287  # print(dict(resultDict).keys())
288  resultDict.extend(self.result.annotations.items())
289  # print(self.result.annotations.keys())
290  return dict(resultDict)
291 
292  # -------------------------------------------------#
293  # ----------------Validating tool------------------#
294  # -------------------------------------------------#
295 
296  def ValidateOutput(self, stdout, stderr, result):
297  if not self.stderr:
298  self.validateWithReference(stdout, stderr, result, self.causes)
299  elif stderr.strip() != self.stderr.strip():
300  self.causes.append('standard error')
301  return result, self.causes
302 
304  reference=None,
305  stdout=None,
306  result=None,
307  causes=None,
308  signature_offset=0,
309  signature=None,
310  id=None):
311  """
312  Given a block of text, tries to find it in the output. The block had to be identified by a signature line. By default, the first line is used as signature, or the line pointed to by signature_offset. If signature_offset points outside the block, a signature line can be passed as signature argument. Note: if 'signature' is None (the default), a negative signature_offset is interpreted as index in a list (e.g. -1 means the last line), otherwise the it is interpreted as the number of lines before the first one of the block the signature must appear. The parameter 'id' allow to distinguish between different calls to this function in the same validation code.
313  """
314 
315  if reference is None:
316  reference = self.reference
317  if stdout is None:
318  stdout = self.out
319  if result is None:
320  result = self.result
321  if causes is None:
322  causes = self.causes
323 
324  reflines = list(
325  filter(None, map(lambda s: s.rstrip(), reference.splitlines())))
326  if not reflines:
327  raise RuntimeError("Empty (or null) reference")
328  # the same on standard output
329  outlines = list(
330  filter(None, map(lambda s: s.rstrip(), stdout.splitlines())))
331 
332  res_field = "GaudiTest.RefBlock"
333  if id:
334  res_field += "_%s" % id
335 
336  if signature is None:
337  if signature_offset < 0:
338  signature_offset = len(reference) + signature_offset
339  signature = reflines[signature_offset]
340  # find the reference block in the output file
341  try:
342  pos = outlines.index(signature)
343  outlines = outlines[pos - signature_offset:pos + len(reflines) -
344  signature_offset]
345  if reflines != outlines:
346  msg = "standard output"
347  # I do not want 2 messages in causes if the function is called
348  # twice
349  if not msg in causes:
350  causes.append(msg)
351  result[res_field + ".observed"] = result.Quote(
352  "\n".join(outlines))
353  except ValueError:
354  causes.append("missing signature")
355  result[res_field + ".signature"] = result.Quote(signature)
356  if len(reflines) > 1 or signature != reflines[0]:
357  result[res_field + ".expected"] = result.Quote("\n".join(reflines))
358  return causes
359 
360  def countErrorLines(self,
361  expected={
362  'ERROR': 0,
363  'FATAL': 0
364  },
365  stdout=None,
366  result=None,
367  causes=None):
368  """
369  Count the number of messages with required severity (by default ERROR and FATAL)
370  and check if their numbers match the expected ones (0 by default).
371  The dictionary "expected" can be used to tune the number of errors and fatals
372  allowed, or to limit the number of expected warnings etc.
373  """
374 
375  if stdout is None:
376  stdout = self.out
377  if result is None:
378  result = self.result
379  if causes is None:
380  causes = self.causes
381 
382  # prepare the dictionary to record the extracted lines
383  errors = {}
384  for sev in expected:
385  errors[sev] = []
386 
387  outlines = stdout.splitlines()
388  from math import log10
389  fmt = "%%%dd - %%s" % (int(log10(len(outlines) + 1)))
390 
391  linecount = 0
392  for l in outlines:
393  linecount += 1
394  words = l.split()
395  if len(words) >= 2 and words[1] in errors:
396  errors[words[1]].append(fmt % (linecount, l.rstrip()))
397 
398  for e in errors:
399  if len(errors[e]) != expected[e]:
400  causes.append('%s(%d)' % (e, len(errors[e])))
401  result["GaudiTest.lines.%s" % e] = result.Quote('\n'.join(
402  errors[e]))
403  result["GaudiTest.lines.%s.expected#" % e] = result.Quote(
404  str(expected[e]))
405 
406  return causes
407 
409  stdout=None,
410  result=None,
411  causes=None,
412  trees_dict=None,
413  ignore=r"Basket|.*size|Compression"):
414  """
415  Compare the TTree summaries in stdout with the ones in trees_dict or in
416  the reference file. By default ignore the size, compression and basket
417  fields.
418  The presence of TTree summaries when none is expected is not a failure.
419  """
420  if stdout is None:
421  stdout = self.out
422  if result is None:
423  result = self.result
424  if causes is None:
425  causes = self.causes
426  if trees_dict is None:
427  lreference = self._expandReferenceFileName(self.reference)
428  # call the validator if the file exists
429  if lreference and os.path.isfile(lreference):
430  trees_dict = findTTreeSummaries(open(lreference).read())
431  else:
432  trees_dict = {}
433 
434  from pprint import PrettyPrinter
435  pp = PrettyPrinter()
436  if trees_dict:
437  result["GaudiTest.TTrees.expected"] = result.Quote(
438  pp.pformat(trees_dict))
439  if ignore:
440  result["GaudiTest.TTrees.ignore"] = result.Quote(ignore)
441 
442  trees = findTTreeSummaries(stdout)
443  failed = cmpTreesDicts(trees_dict, trees, ignore)
444  if failed:
445  causes.append("trees summaries")
446  msg = "%s: %s != %s" % getCmpFailingValues(trees_dict, trees,
447  failed)
448  result["GaudiTest.TTrees.failure_on"] = result.Quote(msg)
449  result["GaudiTest.TTrees.found"] = result.Quote(pp.pformat(trees))
450 
451  return causes
452 
454  stdout=None,
455  result=None,
456  causes=None,
457  dict=None,
458  ignore=None):
459  """
460  Compare the TTree summaries in stdout with the ones in trees_dict or in
461  the reference file. By default ignore the size, compression and basket
462  fields.
463  The presence of TTree summaries when none is expected is not a failure.
464  """
465  if stdout is None:
466  stdout = self.out
467  if result is None:
468  result = self.result
469  if causes is None:
470  causes = self.causes
471 
472  if dict is None:
473  lreference = self._expandReferenceFileName(self.reference)
474  # call the validator if the file exists
475  if lreference and os.path.isfile(lreference):
476  dict = findHistosSummaries(open(lreference).read())
477  else:
478  dict = {}
479 
480  from pprint import PrettyPrinter
481  pp = PrettyPrinter()
482  if dict:
483  result["GaudiTest.Histos.expected"] = result.Quote(
484  pp.pformat(dict))
485  if ignore:
486  result["GaudiTest.Histos.ignore"] = result.Quote(ignore)
487 
488  histos = findHistosSummaries(stdout)
489  failed = cmpTreesDicts(dict, histos, ignore)
490  if failed:
491  causes.append("histos summaries")
492  msg = "%s: %s != %s" % getCmpFailingValues(dict, histos, failed)
493  result["GaudiTest.Histos.failure_on"] = result.Quote(msg)
494  result["GaudiTest.Histos.found"] = result.Quote(pp.pformat(histos))
495 
496  return causes
497 
499  stdout=None,
500  stderr=None,
501  result=None,
502  causes=None,
503  preproc=None):
504  '''
505  Default validation acti*on: compare standard output and error to the
506  reference files.
507  '''
508 
509  if stdout is None:
510  stdout = self.out
511  if stderr is None:
512  stderr = self.err
513  if result is None:
514  result = self.result
515  if causes is None:
516  causes = self.causes
517 
518  # set the default output preprocessor
519  if preproc is None:
520  preproc = normalizeExamples
521  # check standard output
522  lreference = self._expandReferenceFileName(self.reference)
523  # call the validator if the file exists
524  if lreference and os.path.isfile(lreference):
525  causes += ReferenceFileValidator(
526  lreference, "standard output", "Output Diff",
527  preproc=preproc)(stdout, result)
528  elif lreference:
529  causes += ["missing reference file"]
530  # Compare TTree summaries
531  causes = self.CheckTTreesSummaries(stdout, result, causes)
532  causes = self.CheckHistosSummaries(stdout, result, causes)
533  if causes and lreference: # Write a new reference file for stdout
534  try:
535  cnt = 0
536  newrefname = '.'.join([lreference, 'new'])
537  while os.path.exists(newrefname):
538  cnt += 1
539  newrefname = '.'.join([lreference, '~%d~' % cnt, 'new'])
540  newref = open(newrefname, "w")
541  # sanitize newlines
542  for l in stdout.splitlines():
543  newref.write(l.rstrip() + '\n')
544  del newref # flush and close
545  result['New Output Reference File'] = os.path.relpath(
546  newrefname, self.basedir)
547  except IOError:
548  # Ignore IO errors when trying to update reference files
549  # because we may be in a read-only filesystem
550  pass
551 
552  # check standard error
553  lreference = self._expandReferenceFileName(self.error_reference)
554  # call the validator if we have a file to use
555  if lreference:
556  if os.path.isfile(lreference):
557  newcauses = ReferenceFileValidator(
558  lreference,
559  "standard error",
560  "Error Diff",
561  preproc=preproc)(stderr, result)
562  else:
563  newcauses += ["missing error reference file"]
564  causes += newcauses
565  if newcauses and lreference: # Write a new reference file for stdedd
566  cnt = 0
567  newrefname = '.'.join([lreference, 'new'])
568  while os.path.exists(newrefname):
569  cnt += 1
570  newrefname = '.'.join([lreference, '~%d~' % cnt, 'new'])
571  newref = open(newrefname, "w")
572  # sanitize newlines
573  for l in stderr.splitlines():
574  newref.write(l.rstrip() + '\n')
575  del newref # flush and close
576  result['New Error Reference File'] = os.path.relpath(
577  newrefname, self.basedir)
578  else:
579  causes += BasicOutputValidator(lreference, "standard error",
580  "ExecTest.expected_stderr")(stderr,
581  result)
582  return causes
583 
584  def _expandReferenceFileName(self, reffile):
585  # if no file is passed, do nothing
586  if not reffile:
587  return ""
588 
589  # function to split an extension in constituents parts
590  def platformSplit(p):
591  import re
592  delim = re.compile('-' in p and r"[-+]" or r"_")
593  return set(delim.split(p))
594 
595  reference = os.path.normpath(
596  os.path.join(self.basedir, os.path.expandvars(reffile)))
597 
598  # old-style platform-specific reference name
599  spec_ref = reference[:-3] + GetPlatform(self)[0:3] + reference[-3:]
600  if os.path.isfile(spec_ref):
601  reference = spec_ref
602  else: # look for new-style platform specific reference files:
603  # get all the files whose name start with the reference filename
604  dirname, basename = os.path.split(reference)
605  if not dirname:
606  dirname = '.'
607  head = basename + "."
608  head_len = len(head)
609  platform = platformSplit(GetPlatform(self))
610  if 'do0' in platform:
611  platform.add('dbg')
612  candidates = []
613  for f in os.listdir(dirname):
614  if f.startswith(head):
615  req_plat = platformSplit(f[head_len:])
616  if platform.issuperset(req_plat):
617  candidates.append((len(req_plat), f))
618  if candidates: # take the one with highest matching
619  # FIXME: it is not possible to say if x86_64-slc5-gcc43-dbg
620  # has to use ref.x86_64-gcc43 or ref.slc5-dbg
621  candidates.sort()
622  reference = os.path.join(dirname, candidates[-1][1])
623  return reference
624 
625 
626 # ======= GAUDI TOOLS =======
627 
628 import shutil
629 import string
630 import difflib
631 import calendar
632 
633 try:
634  from GaudiKernel import ROOT6WorkAroundEnabled
635 except ImportError:
636 
638  # dummy implementation
639  return False
640 
641 
642 # --------------------------------- TOOLS ---------------------------------#
643 
644 
646  """
647  Function used to normalize the used path
648  """
649  newPath = os.path.normpath(os.path.expandvars(p))
650  if os.path.exists(newPath):
651  p = os.path.realpath(newPath)
652  return p
653 
654 
655 def which(executable):
656  """
657  Locates an executable in the executables path ($PATH) and returns the full
658  path to it. An application is looked for with or without the '.exe' suffix.
659  If the executable cannot be found, None is returned
660  """
661  if os.path.isabs(executable):
662  if not os.path.exists(executable):
663  if executable.endswith('.exe'):
664  if os.path.exists(executable[:-4]):
665  return executable[:-4]
666  else:
667  head, executable = os.path.split(executable)
668  else:
669  return executable
670  for d in os.environ.get("PATH").split(os.pathsep):
671  fullpath = os.path.join(d, executable)
672  if os.path.exists(fullpath):
673  return fullpath
674  if executable.endswith('.exe'):
675  return which(executable[:-4])
676  return None
677 
678 
679 # -------------------------------------------------------------------------#
680 # ----------------------------- Result Classe -----------------------------#
681 # -------------------------------------------------------------------------#
682 import types
683 
684 
685 class Result:
686 
687  PASS = 'PASS'
688  FAIL = 'FAIL'
689  ERROR = 'ERROR'
690  UNTESTED = 'UNTESTED'
691 
692  EXCEPTION = ""
693  RESOURCE = ""
694  TARGET = ""
695  TRACEBACK = ""
696  START_TIME = ""
697  END_TIME = ""
698  TIMEOUT_DETAIL = ""
699 
700  def __init__(self, kind=None, id=None, outcome=PASS, annotations={}):
701  self.annotations = annotations.copy()
702 
703  def __getitem__(self, key):
704  assert isinstance(key, six.string_types)
705  return self.annotations[key]
706 
707  def __setitem__(self, key, value):
708  assert isinstance(key, six.string_types)
709  assert isinstance(
710  value, six.string_types), '{!r} is not a string'.format(value)
711  self.annotations[key] = value
712 
713  def Quote(self, string):
714  return string
715 
716 
717 # -------------------------------------------------------------------------#
718 # --------------------------- Validator Classes ---------------------------#
719 # -------------------------------------------------------------------------#
720 
721 # Basic implementation of an option validator for Gaudi test. This
722 # implementation is based on the standard (LCG) validation functions used
723 # in QMTest.
724 
725 
727  def __init__(self, ref, cause, result_key):
728  self.ref = ref
729  self.cause = cause
730  self.result_key = result_key
731 
732  def __call__(self, out, result):
733  """Validate the output of the program.
734  'stdout' -- A string containing the data written to the standard output
735  stream.
736  'stderr' -- A string containing the data written to the standard error
737  stream.
738  'result' -- A 'Result' object. It may be used to annotate
739  the outcome according to the content of stderr.
740  returns -- A list of strings giving causes of failure."""
741 
742  causes = []
743  # Check the output
744  if not self.__CompareText(out, self.ref):
745  causes.append(self.cause)
746  result[self.result_key] = result.Quote(self.ref)
747 
748  return causes
749 
750  def __CompareText(self, s1, s2):
751  """Compare 's1' and 's2', ignoring line endings.
752  's1' -- A string.
753  's2' -- A string.
754  returns -- True if 's1' and 's2' are the same, ignoring
755  differences in line endings."""
756  if ROOT6WorkAroundEnabled('ReadRootmapCheck'):
757  # FIXME: (MCl) Hide warnings from new rootmap sanity check until we
758  # can fix them
759  to_ignore = re.compile(
760  r'Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*'
761  )
762 
763  def keep_line(l):
764  return not to_ignore.match(l)
765 
766  return list(filter(keep_line, s1.splitlines())) == list(
767  filter(keep_line, s2.splitlines()))
768  else:
769  return s1.splitlines() == s2.splitlines()
770 
771 
772 # ------------------------ Preprocessor elements ------------------------#
774  """ Base class for a callable that takes a file and returns a modified
775  version of it."""
776 
777  def __processLine__(self, line):
778  return line
779 
780  def __processFile__(self, lines):
781  output = []
782  for l in lines:
783  l = self.__processLine__(l)
784  if l:
785  output.append(l)
786  return output
787 
788  def __call__(self, input):
789  if not isinstance(input, six.string_types):
790  lines = input
791  mergeback = False
792  else:
793  lines = input.splitlines()
794  mergeback = True
795  output = self.__processFile__(lines)
796  if mergeback:
797  output = '\n'.join(output)
798  return output
799 
800  def __add__(self, rhs):
801  return FilePreprocessorSequence([self, rhs])
802 
803 
805  def __init__(self, members=[]):
806  self.members = members
807 
808  def __add__(self, rhs):
809  return FilePreprocessorSequence(self.members + [rhs])
810 
811  def __call__(self, input):
812  output = input
813  for pp in self.members:
814  output = pp(output)
815  return output
816 
817 
819  def __init__(self, strings=[], regexps=[]):
820  import re
821  self.strings = strings
822  self.regexps = list(map(re.compile, regexps))
823 
824  def __processLine__(self, line):
825  for s in self.strings:
826  if line.find(s) >= 0:
827  return None
828  for r in self.regexps:
829  if r.search(line):
830  return None
831  return line
832 
833 
835  def __init__(self, start, end):
836  self.start = start
837  self.end = end
838  self._skipping = False
839 
840  def __processLine__(self, line):
841  if self.start in line:
842  self._skipping = True
843  return None
844  elif self.end in line:
845  self._skipping = False
846  elif self._skipping:
847  return None
848  return line
849 
850 
852  def __init__(self, orig, repl="", when=None):
853  if when:
854  when = re.compile(when)
855  self._operations = [(when, re.compile(orig), repl)]
856 
857  def __add__(self, rhs):
858  if isinstance(rhs, RegexpReplacer):
859  res = RegexpReplacer("", "", None)
860  res._operations = self._operations + rhs._operations
861  else:
862  res = FilePreprocessor.__add__(self, rhs)
863  return res
864 
865  def __processLine__(self, line):
866  for w, o, r in self._operations:
867  if w is None or w.search(line):
868  line = o.sub(r, line)
869  return line
870 
871 
872 # Common preprocessors
873 maskPointers = RegexpReplacer("0x[0-9a-fA-F]{4,16}", "0x########")
874 normalizeDate = RegexpReplacer(
875  "[0-2]?[0-9]:[0-5][0-9]:[0-5][0-9] [0-9]{4}[-/][01][0-9][-/][0-3][0-9][ A-Z]*",
876  "00:00:00 1970-01-01")
877 normalizeEOL = FilePreprocessor()
878 normalizeEOL.__processLine__ = lambda line: str(line).rstrip() + '\n'
879 
880 skipEmptyLines = FilePreprocessor()
881 # FIXME: that's ugly
882 skipEmptyLines.__processLine__ = lambda line: (line.strip() and line) or None
883 
884 # Special preprocessor sorting the list of strings (whitespace separated)
885 # that follow a signature on a single line
886 
887 
889  def __init__(self, signature):
890  self.signature = signature
891  self.siglen = len(signature)
892 
893  def __processLine__(self, line):
894  pos = line.find(self.signature)
895  if pos >= 0:
896  line = line[:(pos + self.siglen)]
897  lst = line[(pos + self.siglen):].split()
898  lst.sort()
899  line += " ".join(lst)
900  return line
901 
902 
904  '''
905  Sort group of lines matching a regular expression
906  '''
907 
908  def __init__(self, exp):
909  self.exp = exp if hasattr(exp, 'match') else re.compile(exp)
910 
911  def __processFile__(self, lines):
912  match = self.exp.match
913  output = []
914  group = []
915  for l in lines:
916  if match(l):
917  group.append(l)
918  else:
919  if group:
920  group.sort()
921  output.extend(group)
922  group = []
923  output.append(l)
924  return output
925 
926 
927 # Preprocessors for GaudiExamples
928 normalizeExamples = maskPointers + normalizeDate
929 for w, o, r in [
930  # ("TIMER.TIMER",r"[0-9]", "0"), # Normalize time output
931  ("TIMER.TIMER", r"\s+[+-]?[0-9]+[0-9.]*", " 0"), # Normalize time output
932  ("release all pending", r"^.*/([^/]*:.*)", r"\1"),
933  ("^#.*file", r"file '.*[/\\]([^/\\]*)$", r"file '\1"),
934  ("^JobOptionsSvc.*options successfully read in from",
935  r"read in from .*[/\\]([^/\\]*)$",
936  r"file \1"), # normalize path to options
937  # Normalize UUID, except those ending with all 0s (i.e. the class IDs)
938  (None,
939  r"[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}(?!-0{12})-[0-9A-Fa-f]{12}",
940  "00000000-0000-0000-0000-000000000000"),
941  # Absorb a change in ServiceLocatorHelper
942  ("ServiceLocatorHelper::", "ServiceLocatorHelper::(create|locate)Service",
943  "ServiceLocatorHelper::service"),
944  # Remove the leading 0 in Windows' exponential format
945  (None, r"e([-+])0([0-9][0-9])", r"e\1\2"),
946  # Output line changed in Gaudi v24
947  (None, r'Service reference count check:',
948  r'Looping over all active services...'),
949  # Ignore count of declared properties (anyway they are all printed)
950  (None,
951  r"^(.*(DEBUG|SUCCESS) List of ALL properties of .*#properties = )\d+",
952  r"\1NN"),
953  ('ApplicationMgr', r'(declareMultiSvcType|addMultiSvc): ', ''),
954 ]: # [ ("TIMER.TIMER","[0-9]+[0-9.]*", "") ]
955  normalizeExamples += RegexpReplacer(o, r, w)
956 
957 lineSkipper = LineSkipper(
958  [
959  "//GP:",
960  "JobOptionsSvc INFO # ",
961  "JobOptionsSvc WARNING # ",
962  "Time User",
963  "Welcome to",
964  "This machine has a speed",
965  "TIME:",
966  "running on",
967  "ToolSvc.Sequenc... INFO",
968  "DataListenerSvc INFO XML written to file:",
969  "[INFO]",
970  "[WARNING]",
971  "DEBUG No writable file catalog found which contains FID:",
972  "DEBUG Service base class initialized successfully",
973  # changed between v20 and v21
974  "DEBUG Incident timing:",
975  # introduced with patch #3487
976  # changed the level of the message from INFO to
977  # DEBUG
978  "INFO 'CnvServices':[",
979  # message removed because could be printed in constructor
980  "DEBUG 'CnvServices':[",
981  # The signal handler complains about SIGXCPU not
982  # defined on some platforms
983  'SIGXCPU',
984  ],
985  regexps=[
986  r"^JobOptionsSvc INFO *$",
987  r"^# ", # Ignore python comments
988  # skip the message reporting the version of the root file
989  r"(Always|SUCCESS)\s*(Root f|[^ ]* F)ile version:",
990  r"File '.*.xml' does not exist",
991  r"INFO Refer to dataset .* by its file ID:",
992  r"INFO Referring to dataset .* by its file ID:",
993  r"INFO Disconnect from dataset",
994  r"INFO Disconnected from dataset",
995  r"INFO Disconnected data IO:",
996  r"IncidentSvc\s*(DEBUG (Adding|Removing)|VERBOSE Calling)",
997  # I want to ignore the header of the unchecked StatusCode report
998  r"^StatusCodeSvc.*listing all unchecked return codes:",
999  r"^StatusCodeSvc\s*INFO\s*$",
1000  r"Num\s*\|\s*Function\s*\|\s*Source Library",
1001  r"^[-+]*\s*$",
1002  # Hide the fake error message coming from POOL/ROOT (ROOT 5.21)
1003  r"ERROR Failed to modify file: .* Errno=2 No such file or directory",
1004  # Hide unchecked StatusCodes from dictionaries
1005  r"^ +[0-9]+ \|.*ROOT",
1006  r"^ +[0-9]+ \|.*\|.*Dict",
1007  # Hide success StatusCodeSvc message
1008  r"StatusCodeSvc.*all StatusCode instances where checked",
1009  # Hide EventLoopMgr total timing report
1010  r"EventLoopMgr.*---> Loop Finished",
1011  r"HiveSlimEventLo.*---> Loop Finished",
1012  # Remove ROOT TTree summary table, which changes from one version to the
1013  # other
1014  r"^\*.*\*$",
1015  # Remove Histos Summaries
1016  r"SUCCESS\s*Booked \d+ Histogram\(s\)",
1017  r"^ \|",
1018  r"^ ID=",
1019  # Ignore added/removed properties
1020  r"Property(.*)'Audit(Algorithm|Tool|Service)s':",
1021  # these were missing in tools
1022  r"Property(.*)'AuditRe(start|initialize)':",
1023  r"Property(.*)'IsIOBound':",
1024  # removed with gaudi/Gaudi!273
1025  r"Property(.*)'ErrorCount(er)?':",
1026  # added with gaudi/Gaudi!306
1027  r"Property(.*)'Sequential':",
1028  # added with gaudi/Gaudi!314
1029  r"Property(.*)'FilterCircularDependencies':",
1030  # removed with gaudi/Gaudi!316
1031  r"Property(.*)'IsClonable':",
1032  # ignore uninteresting/obsolete messages
1033  r"Property update for OutputLevel : new value =",
1034  r"EventLoopMgr\s*DEBUG Creating OutputStream",
1035  ])
1036 
1037 if ROOT6WorkAroundEnabled('ReadRootmapCheck'):
1038  # FIXME: (MCl) Hide warnings from new rootmap sanity check until we can
1039  # fix them
1040  lineSkipper += LineSkipper(regexps=[
1041  r'Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*',
1042  ])
1043 
1044 normalizeExamples = (
1045  lineSkipper + normalizeExamples + skipEmptyLines + normalizeEOL +
1046  LineSorter("Services to release : ") +
1047  SortGroupOfLines(r'^\S+\s+(DEBUG|SUCCESS) Property \[\'Name\':'))
1048 
1049 # --------------------- Validation functions/classes ---------------------#
1050 
1051 
1053  def __init__(self, reffile, cause, result_key, preproc=normalizeExamples):
1054  self.reffile = os.path.expandvars(reffile)
1055  self.cause = cause
1056  self.result_key = result_key
1057  self.preproc = preproc
1058 
1059  def __call__(self, stdout, result):
1060  causes = []
1061  if os.path.isfile(self.reffile):
1062  orig = open(self.reffile).readlines()
1063  if self.preproc:
1064  orig = self.preproc(orig)
1065  result[self.result_key + '.preproc.orig'] = \
1066  result.Quote('\n'.join(map(str.strip, orig)))
1067  else:
1068  orig = []
1069  new = stdout.splitlines()
1070  if self.preproc:
1071  new = self.preproc(new)
1072 
1073  diffs = difflib.ndiff(orig, new, charjunk=difflib.IS_CHARACTER_JUNK)
1074  filterdiffs = list(
1075  map(lambda x: x.strip(), filter(lambda x: x[0] != " ", diffs)))
1076  if filterdiffs:
1077  result[self.result_key] = result.Quote("\n".join(filterdiffs))
1078  result[self.result_key] += result.Quote("""
1079  Legend:
1080  -) reference file
1081  +) standard output of the test""")
1082  result[self.result_key + '.preproc.new'] = \
1083  result.Quote('\n'.join(map(str.strip, new)))
1084  causes.append(self.cause)
1085  return causes
1086 
1087 
1089  """
1090  Scan stdout to find ROOT TTree summaries and digest them.
1091  """
1092  stars = re.compile(r"^\*+$")
1093  outlines = stdout.splitlines()
1094  nlines = len(outlines)
1095  trees = {}
1096 
1097  i = 0
1098  while i < nlines: # loop over the output
1099  # look for
1100  while i < nlines and not stars.match(outlines[i]):
1101  i += 1
1102  if i < nlines:
1103  tree, i = _parseTTreeSummary(outlines, i)
1104  if tree:
1105  trees[tree["Name"]] = tree
1106 
1107  return trees
1108 
1109 
1110 def cmpTreesDicts(reference, to_check, ignore=None):
1111  """
1112  Check that all the keys in reference are in to_check too, with the same value.
1113  If the value is a dict, the function is called recursively. to_check can
1114  contain more keys than reference, that will not be tested.
1115  The function returns at the first difference found.
1116  """
1117  fail_keys = []
1118  # filter the keys in the reference dictionary
1119  if ignore:
1120  ignore_re = re.compile(ignore)
1121  keys = [key for key in reference if not ignore_re.match(key)]
1122  else:
1123  keys = reference.keys()
1124  # loop over the keys (not ignored) in the reference dictionary
1125  for k in keys:
1126  if k in to_check: # the key must be in the dictionary to_check
1127  if (type(reference[k]) is dict) and (type(to_check[k]) is dict):
1128  # if both reference and to_check values are dictionaries,
1129  # recurse
1130  failed = fail_keys = cmpTreesDicts(reference[k], to_check[k],
1131  ignore)
1132  else:
1133  # compare the two values
1134  failed = to_check[k] != reference[k]
1135  else: # handle missing keys in the dictionary to check (i.e. failure)
1136  to_check[k] = None
1137  failed = True
1138  if failed:
1139  fail_keys.insert(0, k)
1140  break # exit from the loop at the first failure
1141  return fail_keys # return the list of keys bringing to the different values
1142 
1143 
1144 def getCmpFailingValues(reference, to_check, fail_path):
1145  c = to_check
1146  r = reference
1147  for k in fail_path:
1148  c = c.get(k, None)
1149  r = r.get(k, None)
1150  if c is None or r is None:
1151  break # one of the dictionaries is not deep enough
1152  return (fail_path, r, c)
1153 
1154 
1155 # signature of the print-out of the histograms
1156 h_count_re = re.compile(
1157  r"^(.*)SUCCESS\s+Booked (\d+) Histogram\(s\) :\s+([\s\w=-]*)")
1158 
1159 
1160 def _parseTTreeSummary(lines, pos):
1161  """
1162  Parse the TTree summary table in lines, starting from pos.
1163  Returns a tuple with the dictionary with the digested informations and the
1164  position of the first line after the summary.
1165  """
1166  result = {}
1167  i = pos + 1 # first line is a sequence of '*'
1168  count = len(lines)
1169 
1170  def splitcols(l):
1171  return [f.strip() for f in l.strip("*\n").split(':', 2)]
1172 
1173  def parseblock(ll):
1174  r = {}
1175  cols = splitcols(ll[0])
1176  r["Name"], r["Title"] = cols[1:]
1177 
1178  cols = splitcols(ll[1])
1179  r["Entries"] = int(cols[1])
1180 
1181  sizes = cols[2].split()
1182  r["Total size"] = int(sizes[2])
1183  if sizes[-1] == "memory":
1184  r["File size"] = 0
1185  else:
1186  r["File size"] = int(sizes[-1])
1187 
1188  cols = splitcols(ll[2])
1189  sizes = cols[2].split()
1190  if cols[0] == "Baskets":
1191  r["Baskets"] = int(cols[1])
1192  r["Basket size"] = int(sizes[2])
1193  r["Compression"] = float(sizes[-1])
1194  return r
1195 
1196  if i < (count - 3) and lines[i].startswith("*Tree"):
1197  result = parseblock(lines[i:i + 3])
1198  result["Branches"] = {}
1199  i += 4
1200  while i < (count - 3) and lines[i].startswith("*Br"):
1201  if i < (count - 2) and lines[i].startswith("*Branch "):
1202  # skip branch header
1203  i += 3
1204  continue
1205  branch = parseblock(lines[i:i + 3])
1206  result["Branches"][branch["Name"]] = branch
1207  i += 4
1208 
1209  return (result, i)
1210 
1211 
1212 def parseHistosSummary(lines, pos):
1213  """
1214  Extract the histograms infos from the lines starting at pos.
1215  Returns the position of the first line after the summary block.
1216  """
1217  global h_count_re
1218  h_table_head = re.compile(
1219  r'SUCCESS\s+(1D|2D|3D|1D profile|2D profile) histograms in directory\s+"(\w*)"'
1220  )
1221  h_short_summ = re.compile(r"ID=([^\"]+)\s+\"([^\"]+)\"\s+(.*)")
1222 
1223  nlines = len(lines)
1224 
1225  # decode header
1226  m = h_count_re.search(lines[pos])
1227  name = m.group(1).strip()
1228  total = int(m.group(2))
1229  header = {}
1230  for k, v in [x.split("=") for x in m.group(3).split()]:
1231  header[k] = int(v)
1232  pos += 1
1233  header["Total"] = total
1234 
1235  summ = {}
1236  while pos < nlines:
1237  m = h_table_head.search(lines[pos])
1238  if m:
1239  t, d = m.groups(1) # type and directory
1240  t = t.replace(" profile", "Prof")
1241  pos += 1
1242  if pos < nlines:
1243  l = lines[pos]
1244  else:
1245  l = ""
1246  cont = {}
1247  if l.startswith(" | ID"):
1248  # table format
1249  titles = [x.strip() for x in l.split("|")][1:]
1250  pos += 1
1251  while pos < nlines and lines[pos].startswith(" |"):
1252  l = lines[pos]
1253  values = [x.strip() for x in l.split("|")][1:]
1254  hcont = {}
1255  for i in range(len(titles)):
1256  hcont[titles[i]] = values[i]
1257  cont[hcont["ID"]] = hcont
1258  pos += 1
1259  elif l.startswith(" ID="):
1260  while pos < nlines and lines[pos].startswith(" ID="):
1261  values = [
1262  x.strip()
1263  for x in h_short_summ.search(lines[pos]).groups()
1264  ]
1265  cont[values[0]] = values
1266  pos += 1
1267  else: # not interpreted
1268  raise RuntimeError(
1269  "Cannot understand line %d: '%s'" % (pos, l))
1270  if not d in summ:
1271  summ[d] = {}
1272  summ[d][t] = cont
1273  summ[d]["header"] = header
1274  else:
1275  break
1276  if not summ:
1277  # If the full table is not present, we use only the header
1278  summ[name] = {"header": header}
1279  return summ, pos
1280 
1281 
1283  """
1284  Scan stdout to find ROOT TTree summaries and digest them.
1285  """
1286  outlines = stdout.splitlines()
1287  nlines = len(outlines) - 1
1288  summaries = {}
1289  global h_count_re
1290 
1291  pos = 0
1292  while pos < nlines:
1293  summ = {}
1294  # find first line of block:
1295  match = h_count_re.search(outlines[pos])
1296  while pos < nlines and not match:
1297  pos += 1
1298  match = h_count_re.search(outlines[pos])
1299  if match:
1300  summ, pos = parseHistosSummary(outlines, pos)
1301  summaries.update(summ)
1302  return summaries
1303 
1304 
1305 def PlatformIsNotSupported(self, context, result):
1306  platform = GetPlatform(self)
1307  unsupported = [
1308  re.compile(x) for x in [str(y).strip() for y in unsupported_platforms]
1309  if x
1310  ]
1311  for p_re in unsupported:
1312  if p_re.search(platform):
1313  result.SetOutcome(result.UNTESTED)
1314  result[result.CAUSE] = 'Platform not supported.'
1315  return True
1316  return False
1317 
1318 
1319 def GetPlatform(self):
1320  """
1321  Return the platform Id defined in CMTCONFIG or SCRAM_ARCH.
1322  """
1323  arch = "None"
1324  # check architecture name
1325  if "BINARY_TAG" in os.environ:
1326  arch = os.environ["BINARY_TAG"]
1327  elif "CMTCONFIG" in os.environ:
1328  arch = os.environ["CMTCONFIG"]
1329  elif "SCRAM_ARCH" in os.environ:
1330  arch = os.environ["SCRAM_ARCH"]
1331  return arch
1332 
1333 
1334 def isWinPlatform(self):
1335  """
1336  Return True if the current platform is Windows.
1337 
1338  This function was needed because of the change in the CMTCONFIG format,
1339  from win32_vc71_dbg to i686-winxp-vc9-dbg.
1340  """
1341  platform = GetPlatform(self)
1342  return "winxp" in platform or platform.startswith("win")
def dumpProcs(name)
Definition: BaseTest.py:54
GAUDI_API std::string format(const char *,...)
MsgStream format utility "a la sprintf(...)".
Definition: MsgStream.cpp:109
def GetPlatform(self)
Definition: BaseTest.py:1319
def PlatformIsNotSupported(self, context, result)
Definition: BaseTest.py:1305
def __init__(self, start, end)
Definition: BaseTest.py:835
def validateWithReference(self, stdout=None, stderr=None, result=None, causes=None, preproc=None)
Definition: BaseTest.py:498
def cmpTreesDicts(reference, to_check, ignore=None)
Definition: BaseTest.py:1110
def ValidateOutput(self, stdout, stderr, result)
Definition: BaseTest.py:296
def read(f, regex='.*', skipevents=0)
Definition: hivetimeline.py:23
def __processLine__(self, line)
Definition: BaseTest.py:893
def findHistosSummaries(stdout)
Definition: BaseTest.py:1282
MsgStream & hex(MsgStream &log)
Definition: MsgStream.h:271
def _parseTTreeSummary(lines, pos)
Definition: BaseTest.py:1160
struct GAUDI_API map
Parametrisation class for map-like implementation.
def __call__(self, stdout, result)
Definition: BaseTest.py:1059
def __processLine__(self, line)
Definition: BaseTest.py:824
def __init__(self, orig, repl="", when=None)
Definition: BaseTest.py:852
def _new_backslashreplace_errors(exc)
Definition: BaseTest.py:23
def __init__(self, signature)
Definition: BaseTest.py:889
def sanitize_for_xml(data)
Definition: BaseTest.py:36
def isWinPlatform(self)
Definition: BaseTest.py:1334
def countErrorLines(self, expected={ 'ERROR':0, 'FATAL':0 }, stdout=None, result=None, causes=None)
Definition: BaseTest.py:360
def getCmpFailingValues(reference, to_check, fail_path)
Definition: BaseTest.py:1144
def __init__(self, strings=[], regexps=[])
Definition: BaseTest.py:819
def __setitem__(self, key, value)
Definition: BaseTest.py:707
def __init__(self, kind=None, id=None, outcome=PASS, annotations={})
Definition: BaseTest.py:700
def which(executable)
Definition: BaseTest.py:655
def parseHistosSummary(lines, pos)
Definition: BaseTest.py:1212
def _expandReferenceFileName(self, reffile)
Definition: BaseTest.py:584
def findReferenceBlock(self, reference=None, stdout=None, result=None, causes=None, signature_offset=0, signature=None, id=None)
Definition: BaseTest.py:303
def CheckHistosSummaries(self, stdout=None, result=None, causes=None, dict=None, ignore=None)
Definition: BaseTest.py:453
def __init__(self, reffile, cause, result_key, preproc=normalizeExamples)
Definition: BaseTest.py:1053
decltype(auto) range(Args &&... args)
Zips multiple containers together to form a single range.
def __getitem__(self, key)
Definition: BaseTest.py:703
def kill_tree(ppid, sig)
Definition: BaseTest.py:63
def findTTreeSummaries(stdout)
Definition: BaseTest.py:1088
def __init__(self, ref, cause, result_key)
Definition: BaseTest.py:727
def ROOT6WorkAroundEnabled(id=None)
Definition: BaseTest.py:637
def CheckTTreesSummaries(self, stdout=None, result=None, causes=None, trees_dict=None, ignore=r"Basket|.*size|Compression")
Definition: BaseTest.py:408
def Quote(self, string)
Definition: BaseTest.py:713