All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Modules Pages
BaseTest.py
Go to the documentation of this file.
1 # -*- coding: utf-8 -*-
2 
3 import os
4 import sys
5 import time
6 import signal
7 import threading
8 import platform
9 import tempfile
10 import inspect
11 import re
12 import logging
13 
14 from subprocess import Popen, PIPE, STDOUT
15 
16 def sanitize_for_xml(data):
17  '''
18  Take a string with invalid ASCII/UTF characters and quote them so that the
19  string can be used in an XML text.
20 
21  >>> sanitize_for_xml('this is \x1b')
22  'this is [NON-XML-CHAR-0x1B]'
23  '''
24  bad_chars = re.compile(u'[\x00-\x08\x0b\x0c\x0e-\x1F\uD800-\uDFFF\uFFFE\uFFFF]')
25  def quote(match):
26  'helper function'
27  return ''.join('[NON-XML-CHAR-0x%2X]' % ord(c) for c in match.group())
28  return bad_chars.sub(quote, data)
29 
30 def dumpProcs(name):
31  '''helper to debug GAUDI-1084, dump the list of processes'''
32  from getpass import getuser
33  if 'WORKSPACE' in os.environ:
34  p = Popen(['ps', '-fH', '-U', getuser()], stdout=PIPE)
35  with open(os.path.join(os.environ['WORKSPACE'], name), 'w') as f:
36  f.write(p.communicate()[0])
37 
38 def kill_tree(ppid, sig):
39  '''
40  Send a signal to a process and all its child processes (starting from the
41  leaves).
42  '''
43  log = logging.getLogger('kill_tree')
44  ps_cmd = ['ps', '--no-headers', '-o', 'pid', '--ppid', str(ppid)]
45  get_children = Popen(ps_cmd, stdout=PIPE, stderr=PIPE)
46  children = map(int, get_children.communicate()[0].split())
47  for child in children:
48  kill_tree(child, sig)
49  try:
50  log.debug('killing process %d', ppid)
51  os.kill(ppid, sig)
52  except OSError, err:
53  if err.errno != 3: # No such process
54  raise
55  log.debug('no such process %d', ppid)
56 
57 #-------------------------------------------------------------------------#
58 class BaseTest(object):
59 
60  _common_tmpdir = None
61 
62  def __init__(self):
63  self.program = ''
64  self.args = []
65  self.reference = ''
66  self.error_reference = ''
67  self.options = ''
68  self.stderr = ''
69  self.timeout = 600
70  self.exit_code = None
71  self.environment = None
73  self.signal = None
74  self.workdir = os.curdir
75  self.use_temp_dir = False
76  #Variables not for users
77  self.status = None
78  self.name = ''
79  self.causes = []
80  self.result = Result(self)
81  self.returnedCode = 0
82  self.out = ''
83  self.err = ''
84  self.proc = None
85  self.stack_trace = None
86  self.basedir = os.getcwd()
87 
88  def run(self):
89  logging.debug('running test %s', self.name)
90 
91  if self.options:
92  if re.search(r'from\s+Gaudi.Configuration\s+import\s+\*|'
93  'from\s+Configurables\s+import', self.options):
94  optionFile = tempfile.NamedTemporaryFile(suffix='.py')
95  else:
96  optionFile = tempfile.NamedTemporaryFile(suffix='.opts')
97  optionFile.file.write(self.options)
98  optionFile.seek(0)
99  self.args.append(RationalizePath(optionFile.name))
100 
101  #If not specified, setting the environment
102  if self.environment is None : self.environment = os.environ
103  else : self.environment=dict(self.environment.items()+os.environ.items())
104 
105  platform_id = (os.environ.get('BINARY_TAG') or
106  os.environ.get('CMTCONFIG') or
107  platform.platform())
108  # If at least one regex matches we skip the test.
109  skip_test = bool([None
110  for prex in self.unsupported_platforms
111  if re.search(prex, platform_id)])
112 
113  if not skip_test:
114  # handle working/temporary directory options
115  workdir = self.workdir
116  if self.use_temp_dir:
117  if self._common_tmpdir:
118  workdir = self._common_tmpdir
119  else:
120  workdir = tempfile.mkdtemp()
121 
122  # prepare the command to execute
123  prog=''
124  if self.program != '':
125  prog = self.program
126  elif "GAUDIEXE" in os.environ :
127  prog = os.environ["GAUDIEXE"]
128  else :
129  prog = "Gaudi.exe"
130 
131  dummy, prog_ext = os.path.splitext(prog)
132  if prog_ext not in [ ".exe", ".py", ".bat" ]:
133  prog += ".exe"
134  prog_ext = ".exe"
135 
136  prog = which(prog) or prog
137 
138  args = map(RationalizePath, self.args)
139 
140  if prog_ext == ".py" :
141  params = ['python', RationalizePath(prog)] + args
142  else :
143  params = [RationalizePath(prog)] + args
144 
145  validatorRes = Result({'CAUSE': None, 'EXCEPTION': None,
146  'RESOURCE': None, 'TARGET': None,
147  'TRACEBACK': None, 'START_TIME': None,
148  'END_TIME': None, 'TIMEOUT_DETAIL': None})
149  self.result = validatorRes
150 
151  # we need to switch directory because the validator expects to run
152  # in the same dir as the program
153  os.chdir(workdir)
154 
155  #launching test in a different thread to handle timeout exception
156  def target() :
157  logging.debug('executing %r in %s',
158  params, workdir)
159  self.proc = Popen(params, stdout=PIPE, stderr=PIPE,
160  env=self.environment)
161  logging.debug('(pid: %d)', self.proc.pid)
162  self.out, self.err = self.proc.communicate()
163 
164  thread = threading.Thread(target=target)
165  thread.start()
166  # catching timeout
167  thread.join(self.timeout)
168 
169  if thread.is_alive():
170  logging.debug('time out in test %s (pid %d)', self.name, self.proc.pid)
171  # get the stack trace of the stuck process
172  cmd = ['gdb', '--pid', str(self.proc.pid), '--batch',
173  '--eval-command=thread apply all backtrace']
174  gdb = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
175  self.stack_trace = gdb.communicate()[0]
176 
177  kill_tree(self.proc.pid, signal.SIGTERM)
178  thread.join(60)
179  if thread.is_alive():
180  kill_tree(self.proc.pid, signal.SIGKILL)
181  self.causes.append('timeout')
182  else:
183  logging.debug('completed test %s', self.name)
184 
185  #Getting the error code
186  logging.debug('returnedCode = %s', self.proc.returncode)
187  self.returnedCode = self.proc.returncode
188 
189  logging.debug('validating test...')
190  self.result, self.causes = self.ValidateOutput(stdout=self.out,
191  stderr=self.err,
192  result=validatorRes)
193 
194  # remove the temporary directory if we created it
195  if self.use_temp_dir and not self._common_tmpdir:
196  shutil.rmtree(workdir, True)
197 
198  os.chdir(self.basedir)
199 
200  # handle application exit code
201  if self.signal is not None:
202  if int(self.returnedCode) != -int(self.signal):
203  self.causes.append('exit code')
204 
205  elif self.exit_code is not None:
206  if int(self.returnedCode) != int(self.exit_code):
207  self.causes.append('exit code')
208 
209  elif self.returnedCode != 0:
210  self.causes.append("exit code")
211 
212  if self.causes:
213  self.status = "failed"
214  else:
215  self.status = "passed"
216 
217  else:
218  self.status = "skipped"
219 
220  logging.debug('%s: %s', self.name, self.status)
221  field_mapping = {'Exit Code': 'returnedCode',
222  'stderr': 'err',
223  'Arguments': 'args',
224  'Environment': 'environment',
225  'Status': 'status',
226  'stdout': 'out',
227  'Program Name': 'program',
228  'Name': 'name',
229  'Validator': 'validator',
230  'Output Reference File': 'reference',
231  'Error Reference File': 'error_reference',
232  'Causes': 'causes',
233  #'Validator Result': 'result.annotations',
234  'Unsupported Platforms': 'unsupported_platforms',
235  'Stack Trace': 'stack_trace'}
236  resultDict = [(key, getattr(self, attr))
237  for key, attr in field_mapping.iteritems()
238  if getattr(self, attr)]
239  resultDict.append(('Working Directory',
240  RationalizePath(os.path.join(os.getcwd(),
241  self.workdir))))
242  #print dict(resultDict).keys()
243  resultDict.extend(self.result.annotations.iteritems())
244  #print self.result.annotations.keys()
245  return dict(resultDict)
246 
247 
248  #-------------------------------------------------#
249  #----------------Validating tool------------------#
250  #-------------------------------------------------#
251 
252  def ValidateOutput(self, stdout, stderr, result):
253  if not self.stderr:
254  self.validateWithReference(stdout, stderr, result, self.causes)
255  elif stderr.strip() != self.stderr.strip():
256  self.causes.append('standard error')
257  return result, self.causes
258 
259  def findReferenceBlock(self,reference=None, stdout=None, result=None, causes=None, signature_offset=0, signature=None, id = None):
260  """
261  Given a block of text, tries to find it in the output. The block had to be identified by a signature line. By default, the first line is used as signature, or the line pointed to by signature_offset. If signature_offset points outside the block, a signature line can be passed as signature argument. Note: if 'signature' is None (the default), a negative signature_offset is interpreted as index in a list (e.g. -1 means the last line), otherwise the it is interpreted as the number of lines before the first one of the block the signature must appear. The parameter 'id' allow to distinguish between different calls to this function in the same validation code.
262  """
263 
264  if reference is None : reference=self.reference
265  if stdout is None : stdout=self.out
266  if result is None : result=self.result
267  if causes is None : causes=self.causes
268 
269  reflines = filter(None,map(lambda s: s.rstrip(), reference.splitlines()))
270  if not reflines:
271  raise RuntimeError("Empty (or null) reference")
272  # the same on standard output
273  outlines = filter(None,map(lambda s: s.rstrip(), stdout.splitlines()))
274 
275  res_field = "GaudiTest.RefBlock"
276  if id:
277  res_field += "_%s" % id
278 
279  if signature is None:
280  if signature_offset < 0:
281  signature_offset = len(reference)+signature_offset
282  signature = reflines[signature_offset]
283  # find the reference block in the output file
284  try:
285  pos = outlines.index(signature)
286  outlines = outlines[pos-signature_offset:pos+len(reflines)-signature_offset]
287  if reflines != outlines:
288  msg = "standard output"
289  # I do not want 2 messages in causes if teh function is called twice
290  if not msg in causes:
291  causes.append(msg)
292  result[res_field + ".observed"] = result.Quote("\n".join(outlines))
293  except ValueError:
294  causes.append("missing signature")
295  result[res_field + ".signature"] = result.Quote(signature)
296  if len(reflines) > 1 or signature != reflines[0]:
297  result[res_field + ".expected"] = result.Quote("\n".join(reflines))
298  return causes
299 
300  def countErrorLines(self, expected = {'ERROR':0, 'FATAL':0}, stdout=None, result=None,causes=None):
301  """
302  Count the number of messages with required severity (by default ERROR and FATAL)
303  and check if their numbers match the expected ones (0 by default).
304  The dictionary "expected" can be used to tune the number of errors and fatals
305  allowed, or to limit the number of expected warnings etc.
306  """
307 
308  if stdout is None : stdout=self.out
309  if result is None : result=self.result
310  if causes is None : causes=self.causes
311 
312  # prepare the dictionary to record the extracted lines
313  errors = {}
314  for sev in expected:
315  errors[sev] = []
316 
317  outlines = stdout.splitlines()
318  from math import log10
319  fmt = "%%%dd - %%s" % (int(log10(len(outlines)+1)))
320 
321  linecount = 0
322  for l in outlines:
323  linecount += 1
324  words = l.split()
325  if len(words) >= 2 and words[1] in errors:
326  errors[words[1]].append(fmt%(linecount,l.rstrip()))
327 
328  for e in errors:
329  if len(errors[e]) != expected[e]:
330  causes.append('%s(%d)'%(e,len(errors[e])))
331  result["GaudiTest.lines.%s"%e] = result.Quote('\n'.join(errors[e]))
332  result["GaudiTest.lines.%s.expected#"%e] = result.Quote(str(expected[e]))
333 
334  return causes
335 
336  def CheckTTreesSummaries(self, stdout=None, result=None, causes=None,
337  trees_dict = None,
338  ignore = r"Basket|.*size|Compression"):
339  """
340  Compare the TTree summaries in stdout with the ones in trees_dict or in
341  the reference file. By default ignore the size, compression and basket
342  fields.
343  The presence of TTree summaries when none is expected is not a failure.
344  """
345  if stdout is None : stdout=self.out
346  if result is None : result=self.result
347  if causes is None : causes=self.causes
348  if trees_dict is None:
349  lreference = self._expandReferenceFileName(self.reference)
350  # call the validator if the file exists
351  if lreference and os.path.isfile(lreference):
352  trees_dict = findTTreeSummaries(open(lreference).read())
353  else:
354  trees_dict = {}
355 
356  from pprint import PrettyPrinter
357  pp = PrettyPrinter()
358  if trees_dict:
359  result["GaudiTest.TTrees.expected"] = result.Quote(pp.pformat(trees_dict))
360  if ignore:
361  result["GaudiTest.TTrees.ignore"] = result.Quote(ignore)
362 
363  trees = findTTreeSummaries(stdout)
364  failed = cmpTreesDicts(trees_dict, trees, ignore)
365  if failed:
366  causes.append("trees summaries")
367  msg = "%s: %s != %s" % getCmpFailingValues(trees_dict, trees, failed)
368  result["GaudiTest.TTrees.failure_on"] = result.Quote(msg)
369  result["GaudiTest.TTrees.found"] = result.Quote(pp.pformat(trees))
370 
371  return causes
372 
373  def CheckHistosSummaries(self, stdout=None, result=None, causes=None,
374  dict = None,
375  ignore = None):
376  """
377  Compare the TTree summaries in stdout with the ones in trees_dict or in
378  the reference file. By default ignore the size, compression and basket
379  fields.
380  The presence of TTree summaries when none is expected is not a failure.
381  """
382  if stdout is None : stdout=self.out
383  if result is None : result=self.result
384  if causes is None : causes=self.causes
385 
386  if dict is None:
387  lreference = self._expandReferenceFileName(self.reference)
388  # call the validator if the file exists
389  if lreference and os.path.isfile(lreference):
390  dict = findHistosSummaries(open(lreference).read())
391  else:
392  dict = {}
393 
394  from pprint import PrettyPrinter
395  pp = PrettyPrinter()
396  if dict:
397  result["GaudiTest.Histos.expected"] = result.Quote(pp.pformat(dict))
398  if ignore:
399  result["GaudiTest.Histos.ignore"] = result.Quote(ignore)
400 
401  histos = findHistosSummaries(stdout)
402  failed = cmpTreesDicts(dict, histos, ignore)
403  if failed:
404  causes.append("histos summaries")
405  msg = "%s: %s != %s" % getCmpFailingValues(dict, histos, failed)
406  result["GaudiTest.Histos.failure_on"] = result.Quote(msg)
407  result["GaudiTest.Histos.found"] = result.Quote(pp.pformat(histos))
408 
409  return causes
410 
411  def validateWithReference(self, stdout=None, stderr=None, result=None,
412  causes=None, preproc=None):
413  '''
414  Default validation acti*on: compare standard output and error to the
415  reference files.
416  '''
417 
418  if stdout is None : stdout = self.out
419  if stderr is None : stderr = self.err
420  if result is None : result = self.result
421  if causes is None : causes = self.causes
422 
423  # set the default output preprocessor
424  if preproc is None:
425  preproc = normalizeExamples
426  # check standard output
427  lreference = self._expandReferenceFileName(self.reference)
428  # call the validator if the file exists
429  if lreference and os.path.isfile(lreference):
430  causes += ReferenceFileValidator(lreference,
431  "standard output",
432  "Output Diff",
433  preproc=preproc)(stdout, result)
434  # Compare TTree summaries
435  causes = self.CheckTTreesSummaries(stdout, result, causes)
436  causes = self.CheckHistosSummaries(stdout, result, causes)
437  if causes: # Write a new reference file for stdout
438  try:
439  newref = open(lreference + ".new","w")
440  # sanitize newlines
441  for l in stdout.splitlines():
442  newref.write(l.rstrip() + '\n')
443  del newref # flush and close
444  except IOError:
445  # Ignore IO errors when trying to update reference files
446  # because we may be in a read-only filesystem
447  pass
448 
449  # check standard error
450  lreference = self._expandReferenceFileName(self.error_reference)
451  # call the validator if we have a file to use
452  if lreference and os.path.isfile(lreference):
453  newcauses = ReferenceFileValidator(lreference,
454  "standard error",
455  "Error Diff",
456  preproc=preproc)(stderr, result)
457  causes += newcauses
458  if newcauses: # Write a new reference file for stdedd
459  newref = open(lreference + ".new","w")
460  # sanitize newlines
461  for l in stderr.splitlines():
462  newref.write(l.rstrip() + '\n')
463  del newref # flush and close
464  else:
465  causes += BasicOutputValidator(lreference, "standard error", "ExecTest.expected_stderr")(stderr, result)
466  return causes
467 
468  def _expandReferenceFileName(self, reffile):
469  # if no file is passed, do nothing
470  if not reffile:
471  return ""
472 
473  # function to split an extension in constituents parts
474  platformSplit = lambda p: set(p.split('-' in p and '-' or '_'))
475 
476  reference = os.path.normpath(os.path.join(self.basedir,
477  os.path.expandvars(reffile)))
478 
479  # old-style platform-specific reference name
480  spec_ref = reference[:-3] + GetPlatform(self)[0:3] + reference[-3:]
481  if os.path.isfile(spec_ref):
482  reference = spec_ref
483  else: # look for new-style platform specific reference files:
484  # get all the files whose name start with the reference filename
485  dirname, basename = os.path.split(reference)
486  if not dirname: dirname = '.'
487  head = basename + "."
488  head_len = len(head)
489  platform = platformSplit(GetPlatform(self))
490  if 'do0' in platform:
491  platform.add('dbg')
492  candidates = []
493  for f in os.listdir(dirname):
494  if f.startswith(head):
495  req_plat = platformSplit(f[head_len:])
496  if platform.issuperset(req_plat):
497  candidates.append( (len(req_plat), f) )
498  if candidates: # take the one with highest matching
499  # FIXME: it is not possible to say if x86_64-slc5-gcc43-dbg
500  # has to use ref.x86_64-gcc43 or ref.slc5-dbg
501  candidates.sort()
502  reference = os.path.join(dirname, candidates[-1][1])
503  return reference
504 
505 #---------------------------------------------------------------------------------------------------#
506 #---------------------------------------------------------------------------------------------------#
507 #-----------------------------------------GAUDI TOOLS-----------------------------------------------------#
508 #---------------------------------------------------------------------------------------------------#
509 #---------------------------------------------------------------------------------------------------#
510 
511 import shutil
512 import string
513 import difflib
514 import calendar
515 
516 try:
517  from GaudiKernel import ROOT6WorkAroundEnabled
518 except ImportError:
520  # dummy implementation
521  return False
522 
523 #--------------------------------- TOOLS ---------------------------------#
524 
526  """
527  Function used to normalize the used path
528  """
529  newPath = os.path.normpath(os.path.expandvars(p))
530  if os.path.exists(newPath) :
531  p = os.path.realpath(newPath)
532  return p
533 
534 
535 def which(executable):
536  """
537  Locates an executable in the executables path ($PATH) and returns the full
538  path to it. An application is looked for with or without the '.exe' suffix.
539  If the executable cannot be found, None is returned
540  """
541  if os.path.isabs(executable):
542  if not os.path.exists(executable):
543  if executable.endswith('.exe'):
544  if os.path.exists(executable[:-4]):
545  return executable[:-4]
546  else :
547  head,executable = os.path.split(executable)
548  else :
549  return executable
550  for d in os.environ.get("PATH").split(os.pathsep):
551  fullpath = os.path.join(d, executable)
552  if os.path.exists(fullpath):
553  return fullpath
554  if executable.endswith('.exe'):
555  return which(executable[:-4])
556  return None
557 
558 
559 
560 #-------------------------------------------------------------------------#
561 #----------------------------- Result Classe -----------------------------#
562 #-------------------------------------------------------------------------#
563 import types
564 
565 class Result:
566 
567  PASS='PASS'
568  FAIL='FAIL'
569  ERROR='ERROR'
570  UNTESTED='UNTESTED'
571 
572  EXCEPTION = ""
573  RESOURCE = ""
574  TARGET = ""
575  TRACEBACK = ""
576  START_TIME = ""
577  END_TIME = ""
578  TIMEOUT_DETAIL = ""
579 
580  def __init__(self,kind=None,id=None,outcome=PASS,annotations={}):
581  self.annotations = annotations.copy()
582 
583  def __getitem__(self,key):
584  assert type(key) in types.StringTypes
585  return self.annotations[key]
586 
587  def __setitem__(self,key,value):
588  assert type(key) in types.StringTypes
589  assert type(value) in types.StringTypes
590  self.annotations[key]=value
591 
592  def Quote(self,string):
593  return string
594 
595 
596 #-------------------------------------------------------------------------#
597 #--------------------------- Validator Classes ---------------------------#
598 #-------------------------------------------------------------------------#
599 
600 #Basic implementation of an option validator for Gaudi test. This implementation is based on the standard (LCG) validation functions used in QMTest.
601 
602 
604 
605  def __init__(self,ref,cause,result_key):
606  self.ref=ref
607  self.cause=cause
608  self.result_key=result_key
609 
610  def __call__(self,out,result):
611  """Validate the output of the program.
612  'stdout' -- A string containing the data written to the standard output
613  stream.
614  'stderr' -- A string containing the data written to the standard error
615  stream.
616  'result' -- A 'Result' object. It may be used to annotate
617  the outcome according to the content of stderr.
618  returns -- A list of strings giving causes of failure."""
619 
620  causes=[]
621  #Check the output
622  if not self.__CompareText(out,self.ref):
623  causes.append(self.cause)
624  result[self.result_key] =result.Quote(self.ref)
625 
626 
627 
628  return causes
629 
630  def __CompareText(self, s1, s2):
631  """Compare 's1' and 's2', ignoring line endings.
632  's1' -- A string.
633  's2' -- A string.
634  returns -- True if 's1' and 's2' are the same, ignoring
635  differences in line endings."""
636  if ROOT6WorkAroundEnabled('ReadRootmapCheck'):
637  # FIXME: (MCl) Hide warnings from new rootmap sanity check until we can fix them
638  to_ignore = re.compile(r'Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*')
639  keep_line = lambda l: not to_ignore.match(l)
640  return filter(keep_line, s1.splitlines()) == filter(keep_line, s2.splitlines())
641  else:
642  return s1.splitlines() == s2.splitlines()
643 
644 
645 
646 #------------------------ Preprocessor elements ------------------------#
648  """ Base class for a callable that takes a file and returns a modified
649  version of it."""
650  def __processLine__(self, line):
651  return line
652  def __processFile__(self, lines):
653  output = []
654  for l in lines:
655  l = self.__processLine__(l)
656  if l: output.append(l)
657  return output
658  def __call__(self, input):
659  if hasattr(input,"__iter__"):
660  lines = input
661  mergeback = False
662  else:
663  lines = input.splitlines()
664  mergeback = True
665  output = self.__processFile__(lines)
666  if mergeback: output = '\n'.join(output)
667  return output
668  def __add__(self, rhs):
669  return FilePreprocessorSequence([self,rhs])
670 
672  def __init__(self, members = []):
673  self.members = members
674  def __add__(self, rhs):
675  return FilePreprocessorSequence(self.members + [rhs])
676  def __call__(self, input):
677  output = input
678  for pp in self.members:
679  output = pp(output)
680  return output
681 
683  def __init__(self, strings = [], regexps = []):
684  import re
685  self.strings = strings
686  self.regexps = map(re.compile,regexps)
687 
688  def __processLine__(self, line):
689  for s in self.strings:
690  if line.find(s) >= 0: return None
691  for r in self.regexps:
692  if r.search(line): return None
693  return line
694 
696  def __init__(self, start, end):
697  self.start = start
698  self.end = end
699  self._skipping = False
700 
701  def __processLine__(self, line):
702  if self.start in line:
703  self._skipping = True
704  return None
705  elif self.end in line:
706  self._skipping = False
707  elif self._skipping:
708  return None
709  return line
710 
712  def __init__(self, orig, repl = "", when = None):
713  if when:
714  when = re.compile(when)
715  self._operations = [ (when, re.compile(orig), repl) ]
716  def __add__(self,rhs):
717  if isinstance(rhs, RegexpReplacer):
718  res = RegexpReplacer("","",None)
719  res._operations = self._operations + rhs._operations
720  else:
721  res = FilePreprocessor.__add__(self, rhs)
722  return res
723  def __processLine__(self, line):
724  for w,o,r in self._operations:
725  if w is None or w.search(line):
726  line = o.sub(r, line)
727  return line
728 
729 # Common preprocessors
730 maskPointers = RegexpReplacer("0x[0-9a-fA-F]{4,16}","0x########")
731 normalizeDate = RegexpReplacer("[0-2]?[0-9]:[0-5][0-9]:[0-5][0-9] [0-9]{4}[-/][01][0-9][-/][0-3][0-9][ A-Z]*",
732  "00:00:00 1970-01-01")
733 normalizeEOL = FilePreprocessor()
734 normalizeEOL.__processLine__ = lambda line: str(line).rstrip() + '\n'
735 
736 skipEmptyLines = FilePreprocessor()
737 # FIXME: that's ugly
738 skipEmptyLines.__processLine__ = lambda line: (line.strip() and line) or None
739 
740 ## Special preprocessor sorting the list of strings (whitespace separated)
741 # that follow a signature on a single line
743  def __init__(self, signature):
744  self.signature = signature
745  self.siglen = len(signature)
746  def __processLine__(self, line):
747  pos = line.find(self.signature)
748  if pos >=0:
749  line = line[:(pos+self.siglen)]
750  lst = line[(pos+self.siglen):].split()
751  lst.sort()
752  line += " ".join(lst)
753  return line
754 
756  '''
757  Sort group of lines matching a regular expression
758  '''
759  def __init__(self, exp):
760  self.exp = exp if hasattr(exp, 'match') else re.compile(exp)
761  def __processFile__(self, lines):
762  match = self.exp.match
763  output = []
764  group = []
765  for l in lines:
766  if match(l):
767  group.append(l)
768  else:
769  if group:
770  group.sort()
771  output.extend(group)
772  group = []
773  output.append(l)
774  return output
775 
776 # Preprocessors for GaudiExamples
777 normalizeExamples = maskPointers + normalizeDate
778 for w,o,r in [
779  #("TIMER.TIMER",r"[0-9]", "0"), # Normalize time output
780  ("TIMER.TIMER",r"\s+[+-]?[0-9]+[0-9.]*", " 0"), # Normalize time output
781  ("release all pending",r"^.*/([^/]*:.*)",r"\1"),
782  ("^#.*file",r"file '.*[/\\]([^/\\]*)$",r"file '\1"),
783  ("^JobOptionsSvc.*options successfully read in from",r"read in from .*[/\\]([^/\\]*)$",r"file \1"), # normalize path to options
784  # Normalize UUID, except those ending with all 0s (i.e. the class IDs)
785  (None,r"[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}(?!-0{12})-[0-9A-Fa-f]{12}","00000000-0000-0000-0000-000000000000"),
786  # Absorb a change in ServiceLocatorHelper
787  ("ServiceLocatorHelper::", "ServiceLocatorHelper::(create|locate)Service", "ServiceLocatorHelper::service"),
788  # Remove the leading 0 in Windows' exponential format
789  (None, r"e([-+])0([0-9][0-9])", r"e\1\2"),
790  # Output line changed in Gaudi v24
791  (None, r'Service reference count check:', r'Looping over all active services...'),
792  # Ignore count of declared properties (anyway they are all printed)
793  (None, r"^(.*(DEBUG|SUCCESS) List of ALL properties of .*#properties = )\d+", r"\1NN"),
794  ]: #[ ("TIMER.TIMER","[0-9]+[0-9.]*", "") ]
795  normalizeExamples += RegexpReplacer(o,r,w)
796 
797 lineSkipper = LineSkipper(["//GP:",
798  "JobOptionsSvc INFO # ",
799  "JobOptionsSvc WARNING # ",
800  "Time User",
801  "Welcome to",
802  "This machine has a speed",
803  "TIME:",
804  "running on",
805  "ToolSvc.Sequenc... INFO",
806  "DataListenerSvc INFO XML written to file:",
807  "[INFO]","[WARNING]",
808  "DEBUG No writable file catalog found which contains FID:",
809  "DEBUG Service base class initialized successfully", # changed between v20 and v21
810  "DEBUG Incident timing:", # introduced with patch #3487
811  "INFO 'CnvServices':[", # changed the level of the message from INFO to DEBUG
812  # The signal handler complains about SIGXCPU not defined on some platforms
813  'SIGXCPU',
814  ],regexps = [
815  r"^JobOptionsSvc INFO *$",
816  r"^# ", # Ignore python comments
817  r"(Always|SUCCESS)\s*(Root f|[^ ]* F)ile version:", # skip the message reporting the version of the root file
818  r"File '.*.xml' does not exist",
819  r"INFO Refer to dataset .* by its file ID:",
820  r"INFO Referring to dataset .* by its file ID:",
821  r"INFO Disconnect from dataset",
822  r"INFO Disconnected from dataset",
823  r"INFO Disconnected data IO:",
824  r"IncidentSvc\s*(DEBUG (Adding|Removing)|VERBOSE Calling)",
825  # I want to ignore the header of the unchecked StatusCode report
826  r"^StatusCodeSvc.*listing all unchecked return codes:",
827  r"^StatusCodeSvc\s*INFO\s*$",
828  r"Num\s*\|\s*Function\s*\|\s*Source Library",
829  r"^[-+]*\s*$",
830  # Hide the fake error message coming from POOL/ROOT (ROOT 5.21)
831  r"ERROR Failed to modify file: .* Errno=2 No such file or directory",
832  # Hide unchecked StatusCodes from dictionaries
833  r"^ +[0-9]+ \|.*ROOT",
834  r"^ +[0-9]+ \|.*\|.*Dict",
835  # Hide success StatusCodeSvc message
836  r"StatusCodeSvc.*all StatusCode instances where checked",
837  # Hide EventLoopMgr total timing report
838  r"EventLoopMgr.*---> Loop Finished",
839  # Remove ROOT TTree summary table, which changes from one version to the other
840  r"^\*.*\*$",
841  # Remove Histos Summaries
842  r"SUCCESS\s*Booked \d+ Histogram\(s\)",
843  r"^ \|",
844  r"^ ID=",
845  # Ignore added/removed properties
846  r"Property(.*)'Audit(Algorithm|Tool|Service)s':",
847  r"Property(.*)'AuditRe(start|initialize)':", # these were missing in tools
848  r"Property(.*)'IsIOBound':",
849  r"Property(.*)'ErrorCount(er)?':", # removed with gaudi/Gaudi!273
850  r"Property(.*)'Sequential':", # added with gaudi/Gaudi!306
851  # ignore uninteresting/obsolete messages
852  r"Property update for OutputLevel : new value =",
853  r"EventLoopMgr\s*DEBUG Creating OutputStream",
854  ] )
855 
856 if ROOT6WorkAroundEnabled('ReadRootmapCheck'):
857  # FIXME: (MCl) Hide warnings from new rootmap sanity check until we can fix them
858  lineSkipper += LineSkipper(regexps = [
859  r'Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*',
860  ])
861 
862 normalizeExamples = (lineSkipper + normalizeExamples + skipEmptyLines +
863  normalizeEOL + LineSorter("Services to release : ") +
864  SortGroupOfLines(r'^\S+\s+(DEBUG|SUCCESS) Property \[\'Name\':'))
865 
866 #--------------------- Validation functions/classes ---------------------#
867 
869  def __init__(self,reffile, cause, result_key, preproc=normalizeExamples):
870  self.reffile = os.path.expandvars(reffile)
871  self.cause=cause
872  self.result_key = result_key
873  self.preproc = preproc
874 
875  def __call__(self,stdout, result) :
876  causes = []
877  if os.path.isfile(self.reffile):
878  orig = open(self.reffile).xreadlines()
879  if self.preproc:
880  orig = self.preproc(orig)
881  result[self.result_key + '.preproc.orig'] = \
882  result.Quote('\n'.join(map(str.strip, orig)))
883  else:
884  orig = []
885  new = stdout.splitlines()
886  if self.preproc:
887  new = self.preproc(new)
888 
889  diffs = difflib.ndiff(orig,new,charjunk=difflib.IS_CHARACTER_JUNK)
890  filterdiffs = map(lambda x: x.strip(),filter(lambda x: x[0] != " ",diffs))
891  if filterdiffs:
892  result[self.result_key] = result.Quote("\n".join(filterdiffs))
893  result[self.result_key] += result.Quote("""
894  Legend:
895  -) reference file
896  +) standard output of the test""")
897  result[self.result_key + '.preproc.new'] = \
898  result.Quote('\n'.join(map(str.strip, new)))
899  causes.append(self.cause)
900  return causes
901 
902 def findTTreeSummaries(stdout):
903  """
904  Scan stdout to find ROOT TTree summaries and digest them.
905  """
906  stars = re.compile(r"^\*+$")
907  outlines = stdout.splitlines()
908  nlines = len(outlines)
909  trees = {}
910 
911  i = 0
912  while i < nlines: #loop over the output
913  # look for
914  while i < nlines and not stars.match(outlines[i]):
915  i += 1
916  if i < nlines:
917  tree, i = _parseTTreeSummary(outlines, i)
918  if tree:
919  trees[tree["Name"]] = tree
920 
921  return trees
922 
923 def cmpTreesDicts(reference, to_check, ignore = None):
924  """
925  Check that all the keys in reference are in to_check too, with the same value.
926  If the value is a dict, the function is called recursively. to_check can
927  contain more keys than reference, that will not be tested.
928  The function returns at the first difference found.
929  """
930  fail_keys = []
931  # filter the keys in the reference dictionary
932  if ignore:
933  ignore_re = re.compile(ignore)
934  keys = [ key for key in reference if not ignore_re.match(key) ]
935  else:
936  keys = reference.keys()
937  # loop over the keys (not ignored) in the reference dictionary
938  for k in keys:
939  if k in to_check: # the key must be in the dictionary to_check
940  if (type(reference[k]) is dict) and (type(to_check[k]) is dict):
941  # if both reference and to_check values are dictionaries, recurse
942  failed = fail_keys = cmpTreesDicts(reference[k], to_check[k], ignore)
943  else:
944  # compare the two values
945  failed = to_check[k] != reference[k]
946  else: # handle missing keys in the dictionary to check (i.e. failure)
947  to_check[k] = None
948  failed = True
949  if failed:
950  fail_keys.insert(0, k)
951  break # exit from the loop at the first failure
952  return fail_keys # return the list of keys bringing to the different values
953 
954 def getCmpFailingValues(reference, to_check, fail_path):
955  c = to_check
956  r = reference
957  for k in fail_path:
958  c = c.get(k,None)
959  r = r.get(k,None)
960  if c is None or r is None:
961  break # one of the dictionaries is not deep enough
962  return (fail_path, r, c)
963 
964 # signature of the print-out of the histograms
965 h_count_re = re.compile(r"^(.*)SUCCESS\s+Booked (\d+) Histogram\(s\) :\s+([\s\w=-]*)")
966 
967 
968 def _parseTTreeSummary(lines, pos):
969  """
970  Parse the TTree summary table in lines, starting from pos.
971  Returns a tuple with the dictionary with the digested informations and the
972  position of the first line after the summary.
973  """
974  result = {}
975  i = pos + 1 # first line is a sequence of '*'
976  count = len(lines)
977 
978  splitcols = lambda l: [ f.strip() for f in l.strip("*\n").split(':',2) ]
979  def parseblock(ll):
980  r = {}
981  cols = splitcols(ll[0])
982  r["Name"], r["Title"] = cols[1:]
983 
984  cols = splitcols(ll[1])
985  r["Entries"] = int(cols[1])
986 
987  sizes = cols[2].split()
988  r["Total size"] = int(sizes[2])
989  if sizes[-1] == "memory":
990  r["File size"] = 0
991  else:
992  r["File size"] = int(sizes[-1])
993 
994  cols = splitcols(ll[2])
995  sizes = cols[2].split()
996  if cols[0] == "Baskets":
997  r["Baskets"] = int(cols[1])
998  r["Basket size"] = int(sizes[2])
999  r["Compression"] = float(sizes[-1])
1000  return r
1001 
1002  if i < (count - 3) and lines[i].startswith("*Tree"):
1003  result = parseblock(lines[i:i+3])
1004  result["Branches"] = {}
1005  i += 4
1006  while i < (count - 3) and lines[i].startswith("*Br"):
1007  if i < (count - 2) and lines[i].startswith("*Branch "):
1008  # skip branch header
1009  i += 3
1010  continue
1011  branch = parseblock(lines[i:i+3])
1012  result["Branches"][branch["Name"]] = branch
1013  i += 4
1014 
1015  return (result, i)
1016 
1017 def parseHistosSummary(lines, pos):
1018  """
1019  Extract the histograms infos from the lines starting at pos.
1020  Returns the position of the first line after the summary block.
1021  """
1022  global h_count_re
1023  h_table_head = re.compile(r'SUCCESS\s+List of booked (1D|2D|3D|1D profile|2D profile) histograms in directory\s+"(\w*)"')
1024  h_short_summ = re.compile(r"ID=([^\"]+)\s+\"([^\"]+)\"\s+(.*)")
1025 
1026  nlines = len(lines)
1027 
1028  # decode header
1029  m = h_count_re.search(lines[pos])
1030  name = m.group(1).strip()
1031  total = int(m.group(2))
1032  header = {}
1033  for k, v in [ x.split("=") for x in m.group(3).split() ]:
1034  header[k] = int(v)
1035  pos += 1
1036  header["Total"] = total
1037 
1038  summ = {}
1039  while pos < nlines:
1040  m = h_table_head.search(lines[pos])
1041  if m:
1042  t, d = m.groups(1) # type and directory
1043  t = t.replace(" profile", "Prof")
1044  pos += 1
1045  if pos < nlines:
1046  l = lines[pos]
1047  else:
1048  l = ""
1049  cont = {}
1050  if l.startswith(" | ID"):
1051  # table format
1052  titles = [ x.strip() for x in l.split("|")][1:]
1053  pos += 1
1054  while pos < nlines and lines[pos].startswith(" |"):
1055  l = lines[pos]
1056  values = [ x.strip() for x in l.split("|")][1:]
1057  hcont = {}
1058  for i in range(len(titles)):
1059  hcont[titles[i]] = values[i]
1060  cont[hcont["ID"]] = hcont
1061  pos += 1
1062  elif l.startswith(" ID="):
1063  while pos < nlines and lines[pos].startswith(" ID="):
1064  values = [ x.strip() for x in h_short_summ.search(lines[pos]).groups() ]
1065  cont[values[0]] = values
1066  pos += 1
1067  else: # not interpreted
1068  raise RuntimeError("Cannot understand line %d: '%s'" % (pos, l))
1069  if not d in summ:
1070  summ[d] = {}
1071  summ[d][t] = cont
1072  summ[d]["header"] = header
1073  else:
1074  break
1075  if not summ:
1076  # If the full table is not present, we use only the header
1077  summ[name] = {"header": header}
1078  return summ, pos
1079 
1080 
1081 
1083  """
1084  Scan stdout to find ROOT TTree summaries and digest them.
1085  """
1086  outlines = stdout.splitlines()
1087  nlines = len(outlines) - 1
1088  summaries = {}
1089  global h_count_re
1090 
1091  pos = 0
1092  while pos < nlines:
1093  summ = {}
1094  # find first line of block:
1095  match = h_count_re.search(outlines[pos])
1096  while pos < nlines and not match:
1097  pos += 1
1098  match = h_count_re.search(outlines[pos])
1099  if match:
1100  summ, pos = parseHistosSummary(outlines, pos)
1101  summaries.update(summ)
1102  return summaries
1103 
1104 def PlatformIsNotSupported(self, context, result):
1105  platform = GetPlatform(self)
1106  unsupported = [ re.compile(x) for x in [ str(y).strip() for y in unsupported_platforms ] if x]
1107  for p_re in unsupported :
1108  if p_re.search(platform):
1109  result.SetOutcome(result.UNTESTED)
1110  result[result.CAUSE] = 'Platform not supported.'
1111  return True
1112  return False
1113 
1114 def GetPlatform(self):
1115  """
1116  Return the platform Id defined in CMTCONFIG or SCRAM_ARCH.
1117  """
1118  arch = "None"
1119  # check architecture name
1120  if "BINARY_TAG" in os.environ:
1121  arch = os.environ["BINARY_TAG"]
1122  elif "CMTCONFIG" in os.environ:
1123  arch = os.environ["CMTCONFIG"]
1124  elif "SCRAM_ARCH" in os.environ:
1125  arch = os.environ["SCRAM_ARCH"]
1126  return arch
1127 
1128 def isWinPlatform(self):
1129  """
1130  Return True if the current platform is Windows.
1131 
1132  This function was needed because of the change in the CMTCONFIG format,
1133  from win32_vc71_dbg to i686-winxp-vc9-dbg.
1134  """
1135  platform = GetPlatform(self)
1136  return "winxp" in platform or platform.startswith("win")
def dumpProcs(name)
Definition: BaseTest.py:30
def GetPlatform(self)
Definition: BaseTest.py:1114
def PlatformIsNotSupported(self, context, result)
Definition: BaseTest.py:1104
def __init__(self, start, end)
Definition: BaseTest.py:696
decltype(auto) range(Args &&...args)
Zips multiple containers together to form a single range.
def validateWithReference(self, stdout=None, stderr=None, result=None, causes=None, preproc=None)
Definition: BaseTest.py:412
def cmpTreesDicts(reference, to_check, ignore=None)
Definition: BaseTest.py:923
def ValidateOutput(self, stdout, stderr, result)
Definition: BaseTest.py:252
def read(f, regex='.*', skipevents=0)
Definition: hivetimeline.py:19
def __processLine__(self, line)
Definition: BaseTest.py:746
def findHistosSummaries(stdout)
Definition: BaseTest.py:1082
def _parseTTreeSummary(lines, pos)
Definition: BaseTest.py:968
struct GAUDI_API map
Parametrisation class for map-like implementation.
def __call__(self, stdout, result)
Definition: BaseTest.py:875
def __processLine__(self, line)
Definition: BaseTest.py:688
def __init__(self, orig, repl="", when=None)
Definition: BaseTest.py:712
def __init__(self, signature)
Definition: BaseTest.py:743
def sanitize_for_xml(data)
Definition: BaseTest.py:16
def isWinPlatform(self)
Definition: BaseTest.py:1128
def getCmpFailingValues(reference, to_check, fail_path)
Definition: BaseTest.py:954
def __init__(self, strings=[], regexps=[])
Definition: BaseTest.py:683
def __setitem__(self, key, value)
Definition: BaseTest.py:587
def __init__(self, kind=None, id=None, outcome=PASS, annotations={})
Definition: BaseTest.py:580
def which(executable)
Definition: BaseTest.py:535
def parseHistosSummary(lines, pos)
Definition: BaseTest.py:1017
def _expandReferenceFileName(self, reffile)
Definition: BaseTest.py:468
def findReferenceBlock(self, reference=None, stdout=None, result=None, causes=None, signature_offset=0, signature=None, id=None)
Definition: BaseTest.py:259
def CheckHistosSummaries(self, stdout=None, result=None, causes=None, dict=None, ignore=None)
Definition: BaseTest.py:375
def __init__(self, reffile, cause, result_key, preproc=normalizeExamples)
Definition: BaseTest.py:869
def __getitem__(self, key)
Definition: BaseTest.py:583
def kill_tree(ppid, sig)
Definition: BaseTest.py:38
def findTTreeSummaries(stdout)
Definition: BaseTest.py:902
def __init__(self, ref, cause, result_key)
Definition: BaseTest.py:605
def ROOT6WorkAroundEnabled(id=None)
Definition: BaseTest.py:519
def CheckTTreesSummaries(self, stdout=None, result=None, causes=None, trees_dict=None, ignore=r"Basket|.*size|Compression")
Definition: BaseTest.py:338
def Quote(self, string)
Definition: BaseTest.py:592
Special preprocessor sorting the list of strings (whitespace separated) that follow a signature on a ...
Definition: BaseTest.py:742