BaseTest.py
Go to the documentation of this file.
1 # -*- coding: utf-8 -*-
2 
3 import os
4 import sys
5 import time
6 import signal
7 import threading
8 import platform
9 import tempfile
10 import inspect
11 import re
12 import logging
13 
14 from subprocess import Popen, PIPE, STDOUT
15 
16 def sanitize_for_xml(data):
17  '''
18  Take a string with invalid ASCII/UTF characters and quote them so that the
19  string can be used in an XML text.
20 
21  >>> sanitize_for_xml('this is \x1b')
22  'this is [NON-XML-CHAR-0x1B]'
23  '''
24  bad_chars = re.compile(u'[\x00-\x08\x0b\x0c\x0e-\x1F\uD800-\uDFFF\uFFFE\uFFFF]')
25  def quote(match):
26  'helper function'
27  return ''.join('[NON-XML-CHAR-0x%2X]' % ord(c) for c in match.group())
28  return bad_chars.sub(quote, data)
29 
30 def dumpProcs(name):
31  '''helper to debug GAUDI-1084, dump the list of processes'''
32  from getpass import getuser
33  if 'WORKSPACE' in os.environ:
34  p = Popen(['ps', '-fH', '-U', getuser()], stdout=PIPE)
35  with open(os.path.join(os.environ['WORKSPACE'], name), 'w') as f:
36  f.write(p.communicate()[0])
37 
38 def kill_tree(ppid, sig):
39  '''
40  Send a signal to a process and all its child processes (starting from the
41  leaves).
42  '''
43  log = logging.getLogger('kill_tree')
44  ps_cmd = ['ps', '--no-headers', '-o', 'pid', '--ppid', str(ppid)]
45  get_children = Popen(ps_cmd, stdout=PIPE, stderr=PIPE)
46  children = map(int, get_children.communicate()[0].split())
47  for child in children:
48  kill_tree(child, sig)
49  try:
50  log.debug('killing process %d', ppid)
51  os.kill(ppid, sig)
52  except OSError, err:
53  if err.errno != 3: # No such process
54  raise
55  log.debug('no such process %d', ppid)
56 
57 #-------------------------------------------------------------------------#
58 class BaseTest(object):
59 
60  _common_tmpdir = None
61 
62  def __init__(self):
63  self.program = ''
64  self.args = []
65  self.reference = ''
66  self.error_reference = ''
67  self.options = ''
68  self.stderr = ''
69  self.timeout = 600
70  self.exit_code = None
71  self.environment = None
73  self.signal = None
74  self.workdir = os.curdir
75  self.use_temp_dir = False
76  #Variables not for users
77  self.status = None
78  self.name = ''
79  self.causes = []
80  self.result = Result(self)
81  self.returnedCode = 0
82  self.out = ''
83  self.err = ''
84  self.proc = None
85  self.stack_trace = None
86  self.basedir = os.getcwd()
87 
88  def validator(self, stdout='',stderr=''):
89  pass
90 
91  def run(self):
92  logging.debug('running test %s', self.name)
93 
94  if self.options:
95  if re.search(r'from\s+Gaudi.Configuration\s+import\s+\*|'
96  'from\s+Configurables\s+import', self.options):
97  optionFile = tempfile.NamedTemporaryFile(suffix='.py')
98  else:
99  optionFile = tempfile.NamedTemporaryFile(suffix='.opts')
100  optionFile.file.write(self.options)
101  optionFile.seek(0)
102  self.args.append(RationalizePath(optionFile.name))
103 
104  #If not specified, setting the environment
105  if self.environment is None : self.environment = os.environ
106  else : self.environment=dict(self.environment.items()+os.environ.items())
107 
108  platform_id = (os.environ.get('BINARY_TAG') or
109  os.environ.get('CMTCONFIG') or
110  platform.platform())
111  # If at least one regex matches we skip the test.
112  skip_test = bool([None
113  for prex in self.unsupported_platforms
114  if re.search(prex, platform_id)])
115 
116  if not skip_test:
117  # handle working/temporary directory options
118  workdir = self.workdir
119  if self.use_temp_dir:
120  if self._common_tmpdir:
121  workdir = self._common_tmpdir
122  else:
123  workdir = tempfile.mkdtemp()
124 
125  # prepare the command to execute
126  prog=''
127  if self.program != '':
128  prog = self.program
129  elif "GAUDIEXE" in os.environ :
130  prog = os.environ["GAUDIEXE"]
131  else :
132  prog = "Gaudi.exe"
133 
134  dummy, prog_ext = os.path.splitext(prog)
135  if prog_ext not in [ ".exe", ".py", ".bat" ]:
136  prog += ".exe"
137  prog_ext = ".exe"
138 
139  prog = which(prog) or prog
140 
141  args = map(RationalizePath, self.args)
142 
143  if prog_ext == ".py" :
144  params = ['python', RationalizePath(prog)] + args
145  else :
146  params = [RationalizePath(prog)] + args
147 
148  validatorRes = Result({'CAUSE': None, 'EXCEPTION': None,
149  'RESOURCE': None, 'TARGET': None,
150  'TRACEBACK': None, 'START_TIME': None,
151  'END_TIME': None, 'TIMEOUT_DETAIL': None})
152  self.result = validatorRes
153 
154  # we need to switch directory because the validator expects to run
155  # in the same dir as the program
156  os.chdir(workdir)
157 
158  #launching test in a different thread to handle timeout exception
159  def target() :
160  logging.debug('executing %r in %s',
161  params, workdir)
162  self.proc = Popen(params, stdout=PIPE, stderr=PIPE,
163  env=self.environment)
164  logging.debug('(pid: %d)', self.proc.pid)
165  self.out, self.err = self.proc.communicate()
166 
167  thread = threading.Thread(target=target)
168  thread.start()
169  # catching timeout
170  thread.join(self.timeout)
171 
172  if thread.is_alive():
173  logging.debug('time out in test %s (pid %d)', self.name, self.proc.pid)
174  # get the stack trace of the stuck process
175  cmd = ['gdb', '--pid', str(self.proc.pid), '--batch',
176  '--eval-command=thread apply all backtrace']
177  gdb = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
178  self.stack_trace = gdb.communicate()[0]
179 
180  kill_tree(self.proc.pid, signal.SIGTERM)
181  thread.join(60)
182  if thread.is_alive():
183  kill_tree(self.proc.pid, signal.SIGKILL)
184  self.causes.append('timeout')
185  else:
186  logging.debug('completed test %s', self.name)
187 
188  #Getting the error code
189  logging.debug('returnedCode = %s', self.proc.returncode)
190  self.returnedCode = self.proc.returncode
191 
192  logging.debug('validating test...')
193  self.result, self.causes = self.ValidateOutput(stdout=self.out,
194  stderr=self.err,
195  result=validatorRes)
196 
197  # remove the temporary directory if we created it
198  if self.use_temp_dir and not self._common_tmpdir:
199  shutil.rmtree(workdir, True)
200 
201  os.chdir(self.basedir)
202 
203  # handle application exit code
204  if self.signal is not None:
205  if int(self.returnedCode) != -int(self.signal):
206  self.causes.append('exit code')
207 
208  elif self.exit_code is not None:
209  if int(self.returnedCode) != int(self.exit_code):
210  self.causes.append('exit code')
211 
212  elif self.returnedCode != 0:
213  self.causes.append("exit code")
214 
215  if self.causes:
216  self.status = "failed"
217  else:
218  self.status = "passed"
219 
220  else:
221  self.status = "skipped"
222 
223  logging.debug('%s: %s', self.name, self.status)
224  field_mapping = {'Exit Code': 'returnedCode',
225  'stderr': 'err',
226  'Arguments': 'args',
227  'Environment': 'environment',
228  'Status': 'status',
229  'stdout': 'out',
230  'Program Name': 'program',
231  'Name': 'name',
232  'Validator': 'validator',
233  'Output Reference File': 'reference',
234  'Error Reference File': 'error_reference',
235  'Causes': 'causes',
236  #'Validator Result': 'result.annotations',
237  'Unsupported Platforms': 'unsupported_platforms',
238  'Stack Trace': 'stack_trace'}
239  resultDict = [(key, getattr(self, attr))
240  for key, attr in field_mapping.iteritems()
241  if getattr(self, attr)]
242  resultDict.append(('Working Directory',
243  RationalizePath(os.path.join(os.getcwd(),
244  self.workdir))))
245  #print dict(resultDict).keys()
246  resultDict.extend(self.result.annotations.iteritems())
247  #print self.result.annotations.keys()
248  return dict(resultDict)
249 
250 
251  #-------------------------------------------------#
252  #----------------Validating tool------------------#
253  #-------------------------------------------------#
254 
255  def ValidateOutput(self, stdout, stderr, result):
256  # checking if default validation or not
257  if self.validator is not BaseTest.validator:
258  self.validator(stdout, stderr, result, self.causes,
259  self.reference, self.error_reference)
260  else:
261  if self.stderr == '':
262  self.validateWithReference(stdout, stderr, result, causes)
263  elif stderr.strip() != self.stderr.strip():
264  self.causes.append('standard error')
265 
266 
267  return result, causes
268 
269 
270 
271  def findReferenceBlock(self,reference=None, stdout=None, result=None, causes=None, signature_offset=0, signature=None, id = None):
272  """
273  Given a block of text, tries to find it in the output. The block had to be identified by a signature line. By default, the first line is used as signature, or the line pointed to by signature_offset. If signature_offset points outside the block, a signature line can be passed as signature argument. Note: if 'signature' is None (the default), a negative signature_offset is interpreted as index in a list (e.g. -1 means the last line), otherwise the it is interpreted as the number of lines before the first one of the block the signature must appear. The parameter 'id' allow to distinguish between different calls to this function in the same validation code.
274  """
275 
276  if reference is None : reference=self.reference
277  if stdout is None : stdout=self.out
278  if result is None : result=self.result
279  if causes is None : causes=self.causes
280 
281  reflines = filter(None,map(lambda s: s.rstrip(), reference.splitlines()))
282  if not reflines:
283  raise RuntimeError("Empty (or null) reference")
284  # the same on standard output
285  outlines = filter(None,map(lambda s: s.rstrip(), stdout.splitlines()))
286 
287  res_field = "GaudiTest.RefBlock"
288  if id:
289  res_field += "_%s" % id
290 
291  if signature is None:
292  if signature_offset < 0:
293  signature_offset = len(reference)+signature_offset
294  signature = reflines[signature_offset]
295  # find the reference block in the output file
296  try:
297  pos = outlines.index(signature)
298  outlines = outlines[pos-signature_offset:pos+len(reflines)-signature_offset]
299  if reflines != outlines:
300  msg = "standard output"
301  # I do not want 2 messages in causes if teh function is called twice
302  if not msg in causes:
303  causes.append(msg)
304  result[res_field + ".observed"] = result.Quote("\n".join(outlines))
305  except ValueError:
306  causes.append("missing signature")
307  result[res_field + ".signature"] = result.Quote(signature)
308  if len(reflines) > 1 or signature != reflines[0]:
309  result[res_field + ".expected"] = result.Quote("\n".join(reflines))
310  return causes
311 
312  def countErrorLines(self, expected = {'ERROR':0, 'FATAL':0}, stdout=None, result=None,causes=None):
313  """
314  Count the number of messages with required severity (by default ERROR and FATAL)
315  and check if their numbers match the expected ones (0 by default).
316  The dictionary "expected" can be used to tune the number of errors and fatals
317  allowed, or to limit the number of expected warnings etc.
318  """
319 
320  if stdout is None : stdout=self.out
321  if result is None : result=self.result
322  if causes is None : causes=self.causes
323 
324  # prepare the dictionary to record the extracted lines
325  errors = {}
326  for sev in expected:
327  errors[sev] = []
328 
329  outlines = stdout.splitlines()
330  from math import log10
331  fmt = "%%%dd - %%s" % (int(log10(len(outlines)+1)))
332 
333  linecount = 0
334  for l in outlines:
335  linecount += 1
336  words = l.split()
337  if len(words) >= 2 and words[1] in errors:
338  errors[words[1]].append(fmt%(linecount,l.rstrip()))
339 
340  for e in errors:
341  if len(errors[e]) != expected[e]:
342  causes.append('%s(%d)'%(e,len(errors[e])))
343  result["GaudiTest.lines.%s"%e] = result.Quote('\n'.join(errors[e]))
344  result["GaudiTest.lines.%s.expected#"%e] = result.Quote(str(expected[e]))
345 
346  return causes
347 
348  def CheckTTreesSummaries(self, stdout=None, result=None, causes=None,
349  trees_dict = None,
350  ignore = r"Basket|.*size|Compression"):
351  """
352  Compare the TTree summaries in stdout with the ones in trees_dict or in
353  the reference file. By default ignore the size, compression and basket
354  fields.
355  The presence of TTree summaries when none is expected is not a failure.
356  """
357  if stdout is None : stdout=self.out
358  if result is None : result=self.result
359  if causes is None : causes=self.causes
360  if trees_dict is None:
361  lreference = self._expandReferenceFileName(self.reference)
362  # call the validator if the file exists
363  if lreference and os.path.isfile(lreference):
364  trees_dict = findTTreeSummaries(open(lreference).read())
365  else:
366  trees_dict = {}
367 
368  from pprint import PrettyPrinter
369  pp = PrettyPrinter()
370  if trees_dict:
371  result["GaudiTest.TTrees.expected"] = result.Quote(pp.pformat(trees_dict))
372  if ignore:
373  result["GaudiTest.TTrees.ignore"] = result.Quote(ignore)
374 
375  trees = findTTreeSummaries(stdout)
376  failed = cmpTreesDicts(trees_dict, trees, ignore)
377  if failed:
378  causes.append("trees summaries")
379  msg = "%s: %s != %s" % getCmpFailingValues(trees_dict, trees, failed)
380  result["GaudiTest.TTrees.failure_on"] = result.Quote(msg)
381  result["GaudiTest.TTrees.found"] = result.Quote(pp.pformat(trees))
382 
383  return causes
384 
385  def CheckHistosSummaries(self, stdout=None, result=None, causes=None,
386  dict = None,
387  ignore = None):
388  """
389  Compare the TTree summaries in stdout with the ones in trees_dict or in
390  the reference file. By default ignore the size, compression and basket
391  fields.
392  The presence of TTree summaries when none is expected is not a failure.
393  """
394  if stdout is None : stdout=self.out
395  if result is None : result=self.result
396  if causes is None : causes=self.causes
397 
398  if dict is None:
399  lreference = self._expandReferenceFileName(self.reference)
400  # call the validator if the file exists
401  if lreference and os.path.isfile(lreference):
402  dict = findHistosSummaries(open(lreference).read())
403  else:
404  dict = {}
405 
406  from pprint import PrettyPrinter
407  pp = PrettyPrinter()
408  if dict:
409  result["GaudiTest.Histos.expected"] = result.Quote(pp.pformat(dict))
410  if ignore:
411  result["GaudiTest.Histos.ignore"] = result.Quote(ignore)
412 
413  histos = findHistosSummaries(stdout)
414  failed = cmpTreesDicts(dict, histos, ignore)
415  if failed:
416  causes.append("histos summaries")
417  msg = "%s: %s != %s" % getCmpFailingValues(dict, histos, failed)
418  result["GaudiTest.Histos.failure_on"] = result.Quote(msg)
419  result["GaudiTest.Histos.found"] = result.Quote(pp.pformat(histos))
420 
421  return causes
422 
423  def validateWithReference(self, stdout=None, stderr=None, result=None,
424  causes=None, preproc=None):
425  '''
426  Default validation acti*on: compare standard output and error to the
427  reference files.
428  '''
429 
430  if stdout is None : stdout = self.out
431  if stderr is None : stderr = self.err
432  if result is None : result = self.result
433  if causes is None : causes = self.causes
434 
435  # set the default output preprocessor
436  if preproc is None:
437  preproc = normalizeExamples
438  # check standard output
439  lreference = self._expandReferenceFileName(self.reference)
440  # call the validator if the file exists
441  if lreference and os.path.isfile(lreference):
442  causes += ReferenceFileValidator(lreference,
443  "standard output",
444  "Output Diff",
445  preproc=preproc)(stdout, result)
446  # Compare TTree summaries
447  causes = self.CheckTTreesSummaries(stdout, result, causes)
448  causes = self.CheckHistosSummaries(stdout, result, causes)
449  if causes: # Write a new reference file for stdout
450  try:
451  newref = open(lreference + ".new","w")
452  # sanitize newlines
453  for l in stdout.splitlines():
454  newref.write(l.rstrip() + '\n')
455  del newref # flush and close
456  except IOError:
457  # Ignore IO errors when trying to update reference files
458  # because we may be in a read-only filesystem
459  pass
460 
461  # check standard error
462  lreference = self._expandReferenceFileName(self.error_reference)
463  # call the validator if we have a file to use
464  if lreference and os.path.isfile(lreference):
465  newcauses = ReferenceFileValidator(lreference,
466  "standard error",
467  "Error Diff",
468  preproc=preproc)(stderr, result)
469  causes += newcauses
470  if newcauses: # Write a new reference file for stdedd
471  newref = open(lreference + ".new","w")
472  # sanitize newlines
473  for l in stderr.splitlines():
474  newref.write(l.rstrip() + '\n')
475  del newref # flush and close
476  else:
477  causes += BasicOutputValidator(lreference, "standard error", "ExecTest.expected_stderr")(stderr, result)
478  return causes
479 
480  def _expandReferenceFileName(self, reffile):
481  # if no file is passed, do nothing
482  if not reffile:
483  return ""
484 
485  # function to split an extension in constituents parts
486  platformSplit = lambda p: set(p.split('-' in p and '-' or '_'))
487 
488  reference = os.path.normpath(os.path.join(self.basedir,
489  os.path.expandvars(reffile)))
490 
491  # old-style platform-specific reference name
492  spec_ref = reference[:-3] + GetPlatform(self)[0:3] + reference[-3:]
493  if os.path.isfile(spec_ref):
494  reference = spec_ref
495  else: # look for new-style platform specific reference files:
496  # get all the files whose name start with the reference filename
497  dirname, basename = os.path.split(reference)
498  if not dirname: dirname = '.'
499  head = basename + "."
500  head_len = len(head)
501  platform = platformSplit(GetPlatform(self))
502  if 'do0' in platform:
503  platform.add('dbg')
504  candidates = []
505  for f in os.listdir(dirname):
506  if f.startswith(head):
507  req_plat = platformSplit(f[head_len:])
508  if platform.issuperset(req_plat):
509  candidates.append( (len(req_plat), f) )
510  if candidates: # take the one with highest matching
511  # FIXME: it is not possible to say if x86_64-slc5-gcc43-dbg
512  # has to use ref.x86_64-gcc43 or ref.slc5-dbg
513  candidates.sort()
514  reference = os.path.join(dirname, candidates[-1][1])
515  return reference
516 
517 #---------------------------------------------------------------------------------------------------#
518 #---------------------------------------------------------------------------------------------------#
519 #-----------------------------------------GAUDI TOOLS-----------------------------------------------------#
520 #---------------------------------------------------------------------------------------------------#
521 #---------------------------------------------------------------------------------------------------#
522 
523 import shutil
524 import string
525 import difflib
526 import calendar
527 
528 try:
529  from GaudiKernel import ROOT6WorkAroundEnabled
530 except ImportError:
532  # dummy implementation
533  return False
534 
535 #--------------------------------- TOOLS ---------------------------------#
536 
538  """
539  Function used to normalize the used path
540  """
541  newPath = os.path.normpath(os.path.expandvars(p))
542  if os.path.exists(newPath) :
543  p = os.path.realpath(newPath)
544  return p
545 
546 
547 def which(executable):
548  """
549  Locates an executable in the executables path ($PATH) and returns the full
550  path to it. An application is looked for with or without the '.exe' suffix.
551  If the executable cannot be found, None is returned
552  """
553  if os.path.isabs(executable):
554  if not os.path.exists(executable):
555  if executable.endswith('.exe'):
556  if os.path.exists(executable[:-4]):
557  return executable[:-4]
558  else :
559  head,executable = os.path.split(executable)
560  else :
561  return executable
562  for d in os.environ.get("PATH").split(os.pathsep):
563  fullpath = os.path.join(d, executable)
564  if os.path.exists(fullpath):
565  return fullpath
566  if executable.endswith('.exe'):
567  return which(executable[:-4])
568  return None
569 
570 
571 
572 #-------------------------------------------------------------------------#
573 #----------------------------- Result Classe -----------------------------#
574 #-------------------------------------------------------------------------#
575 import types
576 
577 class Result:
578 
579  PASS='PASS'
580  FAIL='FAIL'
581  ERROR='ERROR'
582  UNTESTED='UNTESTED'
583 
584  EXCEPTION = ""
585  RESOURCE = ""
586  TARGET = ""
587  TRACEBACK = ""
588  START_TIME = ""
589  END_TIME = ""
590  TIMEOUT_DETAIL = ""
591 
592  def __init__(self,kind=None,id=None,outcome=PASS,annotations={}):
593  self.annotations = annotations.copy()
594 
595  def __getitem__(self,key):
596  assert type(key) in types.StringTypes
597  return self.annotations[key]
598 
599  def __setitem__(self,key,value):
600  assert type(key) in types.StringTypes
601  assert type(value) in types.StringTypes
602  self.annotations[key]=value
603 
604  def Quote(self,string):
605  return string
606 
607 
608 #-------------------------------------------------------------------------#
609 #--------------------------- Validator Classes ---------------------------#
610 #-------------------------------------------------------------------------#
611 
612 #Basic implementation of an option validator for Gaudi test. This implementation is based on the standard (LCG) validation functions used in QMTest.
613 
614 
616 
617  def __init__(self,ref,cause,result_key):
618  self.ref=ref
619  self.cause=cause
620  self.result_key=result_key
621 
622  def __call__(self,out,result):
623  """Validate the output of the program.
624  'stdout' -- A string containing the data written to the standard output
625  stream.
626  'stderr' -- A string containing the data written to the standard error
627  stream.
628  'result' -- A 'Result' object. It may be used to annotate
629  the outcome according to the content of stderr.
630  returns -- A list of strings giving causes of failure."""
631 
632  causes=[]
633  #Check the output
634  if not self.__CompareText(out,self.ref):
635  causes.append(self.cause)
636  result[self.result_key] =result.Quote(self.ref)
637 
638 
639 
640  return causes
641 
642  def __CompareText(self, s1, s2):
643  """Compare 's1' and 's2', ignoring line endings.
644  's1' -- A string.
645  's2' -- A string.
646  returns -- True if 's1' and 's2' are the same, ignoring
647  differences in line endings."""
648  if ROOT6WorkAroundEnabled('ReadRootmapCheck'):
649  # FIXME: (MCl) Hide warnings from new rootmap sanity check until we can fix them
650  to_ignore = re.compile(r'Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*')
651  keep_line = lambda l: not to_ignore.match(l)
652  return filter(keep_line, s1.splitlines()) == filter(keep_line, s2.splitlines())
653  else:
654  return s1.splitlines() == s2.splitlines()
655 
656 
657 
658 #------------------------ Preprocessor elements ------------------------#
660  """ Base class for a callable that takes a file and returns a modified
661  version of it."""
662  def __processLine__(self, line):
663  return line
664  def __call__(self, input):
665  if hasattr(input,"__iter__"):
666  lines = input
667  mergeback = False
668  else:
669  lines = input.splitlines()
670  mergeback = True
671  output = []
672  for l in lines:
673  l = self.__processLine__(l)
674  if l: output.append(l)
675  if mergeback: output = '\n'.join(output)
676  return output
677  def __add__(self, rhs):
678  return FilePreprocessorSequence([self,rhs])
679 
681  def __init__(self, members = []):
682  self.members = members
683  def __add__(self, rhs):
684  return FilePreprocessorSequence(self.members + [rhs])
685  def __call__(self, input):
686  output = input
687  for pp in self.members:
688  output = pp(output)
689  return output
690 
692  def __init__(self, strings = [], regexps = []):
693  import re
694  self.strings = strings
695  self.regexps = map(re.compile,regexps)
696 
697  def __processLine__(self, line):
698  for s in self.strings:
699  if line.find(s) >= 0: return None
700  for r in self.regexps:
701  if r.search(line): return None
702  return line
703 
705  def __init__(self, start, end):
706  self.start = start
707  self.end = end
708  self._skipping = False
709 
710  def __processLine__(self, line):
711  if self.start in line:
712  self._skipping = True
713  return None
714  elif self.end in line:
715  self._skipping = False
716  elif self._skipping:
717  return None
718  return line
719 
721  def __init__(self, orig, repl = "", when = None):
722  if when:
723  when = re.compile(when)
724  self._operations = [ (when, re.compile(orig), repl) ]
725  def __add__(self,rhs):
726  if isinstance(rhs, RegexpReplacer):
727  res = RegexpReplacer("","",None)
728  res._operations = self._operations + rhs._operations
729  else:
730  res = FilePreprocessor.__add__(self, rhs)
731  return res
732  def __processLine__(self, line):
733  for w,o,r in self._operations:
734  if w is None or w.search(line):
735  line = o.sub(r, line)
736  return line
737 
738 # Common preprocessors
739 maskPointers = RegexpReplacer("0x[0-9a-fA-F]{4,16}","0x########")
740 normalizeDate = RegexpReplacer("[0-2]?[0-9]:[0-5][0-9]:[0-5][0-9] [0-9]{4}[-/][01][0-9][-/][0-3][0-9][ A-Z]*",
741  "00:00:00 1970-01-01")
742 normalizeEOL = FilePreprocessor()
743 normalizeEOL.__processLine__ = lambda line: str(line).rstrip() + '\n'
744 
745 skipEmptyLines = FilePreprocessor()
746 # FIXME: that's ugly
747 skipEmptyLines.__processLine__ = lambda line: (line.strip() and line) or None
748 
749 ## Special preprocessor sorting the list of strings (whitespace separated)
750 # that follow a signature on a single line
752  def __init__(self, signature):
753  self.signature = signature
754  self.siglen = len(signature)
755  def __processLine__(self, line):
756  pos = line.find(self.signature)
757  if pos >=0:
758  line = line[:(pos+self.siglen)]
759  lst = line[(pos+self.siglen):].split()
760  lst.sort()
761  line += " ".join(lst)
762  return line
763 
764 # Preprocessors for GaudiExamples
765 normalizeExamples = maskPointers + normalizeDate
766 for w,o,r in [
767  #("TIMER.TIMER",r"[0-9]", "0"), # Normalize time output
768  ("TIMER.TIMER",r"\s+[+-]?[0-9]+[0-9.]*", " 0"), # Normalize time output
769  ("release all pending",r"^.*/([^/]*:.*)",r"\1"),
770  ("0x########",r"\[.*/([^/]*.*)\]",r"[\1]"),
771  ("^#.*file",r"file '.*[/\\]([^/\\]*)$",r"file '\1"),
772  ("^JobOptionsSvc.*options successfully read in from",r"read in from .*[/\\]([^/\\]*)$",r"file \1"), # normalize path to options
773  # Normalize UUID, except those ending with all 0s (i.e. the class IDs)
774  (None,r"[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}(?!-0{12})-[0-9A-Fa-f]{12}","00000000-0000-0000-0000-000000000000"),
775  # Absorb a change in ServiceLocatorHelper
776  ("ServiceLocatorHelper::", "ServiceLocatorHelper::(create|locate)Service", "ServiceLocatorHelper::service"),
777  # Remove the leading 0 in Windows' exponential format
778  (None, r"e([-+])0([0-9][0-9])", r"e\1\2"),
779  # Output line changed in Gaudi v24
780  (None, r'Service reference count check:', r'Looping over all active services...'),
781  # Change of property name in Algorithm (GAUDI-1030)
782  (None, r"Property(.*)'ErrorCount':", r"Property\1'ErrorCounter':"),
783  ]: #[ ("TIMER.TIMER","[0-9]+[0-9.]*", "") ]
784  normalizeExamples += RegexpReplacer(o,r,w)
785 
786 lineSkipper = LineSkipper(["//GP:",
787  "JobOptionsSvc INFO # ",
788  "JobOptionsSvc WARNING # ",
789  "Time User",
790  "Welcome to",
791  "This machine has a speed",
792  "TIME:",
793  "running on",
794  "ToolSvc.Sequenc... INFO",
795  "DataListenerSvc INFO XML written to file:",
796  "[INFO]","[WARNING]",
797  "DEBUG No writable file catalog found which contains FID:",
798  "0 local", # hack for ErrorLogExample
799  "DEBUG Service base class initialized successfully", # changed between v20 and v21
800  "DEBUG Incident timing:", # introduced with patch #3487
801  "INFO 'CnvServices':[", # changed the level of the message from INFO to DEBUG
802  # The signal handler complains about SIGXCPU not defined on some platforms
803  'SIGXCPU',
804  ],regexps = [
805  r"^JobOptionsSvc INFO *$",
806  r"^#", # Ignore python comments
807  r"(Always|SUCCESS)\s*(Root f|[^ ]* F)ile version:", # skip the message reporting the version of the root file
808  r"0x[0-9a-fA-F#]+ *Algorithm::sysInitialize\(\) *\[", # hack for ErrorLogExample
809  r"0x[0-9a-fA-F#]* *__gxx_personality_v0 *\[", # hack for ErrorLogExample
810  r"File '.*.xml' does not exist",
811  r"INFO Refer to dataset .* by its file ID:",
812  r"INFO Referring to dataset .* by its file ID:",
813  r"INFO Disconnect from dataset",
814  r"INFO Disconnected from dataset",
815  r"INFO Disconnected data IO:",
816  r"IncidentSvc\s*(DEBUG (Adding|Removing)|VERBOSE Calling)",
817  # I want to ignore the header of the unchecked StatusCode report
818  r"^StatusCodeSvc.*listing all unchecked return codes:",
819  r"^StatusCodeSvc\s*INFO\s*$",
820  r"Num\s*\|\s*Function\s*\|\s*Source Library",
821  r"^[-+]*\s*$",
822  # Hide the fake error message coming from POOL/ROOT (ROOT 5.21)
823  r"ERROR Failed to modify file: .* Errno=2 No such file or directory",
824  # Hide unchecked StatusCodes from dictionaries
825  r"^ +[0-9]+ \|.*ROOT",
826  r"^ +[0-9]+ \|.*\|.*Dict",
827  # Hide success StatusCodeSvc message
828  r"StatusCodeSvc.*all StatusCode instances where checked",
829  # Hide EventLoopMgr total timing report
830  r"EventLoopMgr.*---> Loop Finished",
831  # Remove ROOT TTree summary table, which changes from one version to the other
832  r"^\*.*\*$",
833  # Remove Histos Summaries
834  r"SUCCESS\s*Booked \d+ Histogram\(s\)",
835  r"^ \|",
836  r"^ ID=",
837  ] )
838 
839 if ROOT6WorkAroundEnabled('ReadRootmapCheck'):
840  # FIXME: (MCl) Hide warnings from new rootmap sanity check until we can fix them
841  lineSkipper += LineSkipper(regexps = [
842  r'Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*',
843  ])
844 
845 normalizeExamples = (lineSkipper + normalizeExamples + skipEmptyLines +
846  normalizeEOL + LineSorter("Services to release : "))
847 
848 
849 #--------------------- Validation functions/classes ---------------------#
850 
852  def __init__(self,reffile, cause, result_key, preproc=normalizeExamples):
853  self.reffile = os.path.expandvars(reffile)
854  self.cause=cause
855  self.result_key = result_key
856  self.preproc = preproc
857 
858  def __call__(self,stdout, result) :
859  causes=[]
860  if os.path.isfile(self.reffile):
861  orig=open(self.reffile).xreadlines()
862  if self.preproc:
863  orig = self.preproc(orig)
864  else:
865  orig = []
866  new = stdout.splitlines()
867  if self.preproc:
868  new = self.preproc(new)
869 
870  diffs = difflib.ndiff(orig,new,charjunk=difflib.IS_CHARACTER_JUNK)
871  filterdiffs = map(lambda x: x.strip(),filter(lambda x: x[0] != " ",diffs))
872  if filterdiffs:
873  result[self.result_key] = result.Quote("\n".join(filterdiffs))
874  result[self.result_key] += result.Quote("""
875  Legend:
876  -) reference file
877  +) standard output of the test""")
878  causes.append(self.cause)
879  return causes
880 
881 def findTTreeSummaries(stdout):
882  """
883  Scan stdout to find ROOT TTree summaries and digest them.
884  """
885  stars = re.compile(r"^\*+$")
886  outlines = stdout.splitlines()
887  nlines = len(outlines)
888  trees = {}
889 
890  i = 0
891  while i < nlines: #loop over the output
892  # look for
893  while i < nlines and not stars.match(outlines[i]):
894  i += 1
895  if i < nlines:
896  tree, i = _parseTTreeSummary(outlines, i)
897  if tree:
898  trees[tree["Name"]] = tree
899 
900  return trees
901 
902 def cmpTreesDicts(reference, to_check, ignore = None):
903  """
904  Check that all the keys in reference are in to_check too, with the same value.
905  If the value is a dict, the function is called recursively. to_check can
906  contain more keys than reference, that will not be tested.
907  The function returns at the first difference found.
908  """
909  fail_keys = []
910  # filter the keys in the reference dictionary
911  if ignore:
912  ignore_re = re.compile(ignore)
913  keys = [ key for key in reference if not ignore_re.match(key) ]
914  else:
915  keys = reference.keys()
916  # loop over the keys (not ignored) in the reference dictionary
917  for k in keys:
918  if k in to_check: # the key must be in the dictionary to_check
919  if (type(reference[k]) is dict) and (type(to_check[k]) is dict):
920  # if both reference and to_check values are dictionaries, recurse
921  failed = fail_keys = cmpTreesDicts(reference[k], to_check[k], ignore)
922  else:
923  # compare the two values
924  failed = to_check[k] != reference[k]
925  else: # handle missing keys in the dictionary to check (i.e. failure)
926  to_check[k] = None
927  failed = True
928  if failed:
929  fail_keys.insert(0, k)
930  break # exit from the loop at the first failure
931  return fail_keys # return the list of keys bringing to the different values
932 
933 def getCmpFailingValues(reference, to_check, fail_path):
934  c = to_check
935  r = reference
936  for k in fail_path:
937  c = c.get(k,None)
938  r = r.get(k,None)
939  if c is None or r is None:
940  break # one of the dictionaries is not deep enough
941  return (fail_path, r, c)
942 
943 # signature of the print-out of the histograms
944 h_count_re = re.compile(r"^(.*)SUCCESS\s+Booked (\d+) Histogram\(s\) :\s+([\s\w=-]*)")
945 
946 
947 def _parseTTreeSummary(lines, pos):
948  """
949  Parse the TTree summary table in lines, starting from pos.
950  Returns a tuple with the dictionary with the digested informations and the
951  position of the first line after the summary.
952  """
953  result = {}
954  i = pos + 1 # first line is a sequence of '*'
955  count = len(lines)
956 
957  splitcols = lambda l: [ f.strip() for f in l.strip("*\n").split(':',2) ]
958  def parseblock(ll):
959  r = {}
960  cols = splitcols(ll[0])
961  r["Name"], r["Title"] = cols[1:]
962 
963  cols = splitcols(ll[1])
964  r["Entries"] = int(cols[1])
965 
966  sizes = cols[2].split()
967  r["Total size"] = int(sizes[2])
968  if sizes[-1] == "memory":
969  r["File size"] = 0
970  else:
971  r["File size"] = int(sizes[-1])
972 
973  cols = splitcols(ll[2])
974  sizes = cols[2].split()
975  if cols[0] == "Baskets":
976  r["Baskets"] = int(cols[1])
977  r["Basket size"] = int(sizes[2])
978  r["Compression"] = float(sizes[-1])
979  return r
980 
981  if i < (count - 3) and lines[i].startswith("*Tree"):
982  result = parseblock(lines[i:i+3])
983  result["Branches"] = {}
984  i += 4
985  while i < (count - 3) and lines[i].startswith("*Br"):
986  if i < (count - 2) and lines[i].startswith("*Branch "):
987  # skip branch header
988  i += 3
989  continue
990  branch = parseblock(lines[i:i+3])
991  result["Branches"][branch["Name"]] = branch
992  i += 4
993 
994  return (result, i)
995 
996 def parseHistosSummary(lines, pos):
997  """
998  Extract the histograms infos from the lines starting at pos.
999  Returns the position of the first line after the summary block.
1000  """
1001  global h_count_re
1002  h_table_head = re.compile(r'SUCCESS\s+List of booked (1D|2D|3D|1D profile|2D profile) histograms in directory\s+"(\w*)"')
1003  h_short_summ = re.compile(r"ID=([^\"]+)\s+\"([^\"]+)\"\s+(.*)")
1004 
1005  nlines = len(lines)
1006 
1007  # decode header
1008  m = h_count_re.search(lines[pos])
1009  name = m.group(1).strip()
1010  total = int(m.group(2))
1011  header = {}
1012  for k, v in [ x.split("=") for x in m.group(3).split() ]:
1013  header[k] = int(v)
1014  pos += 1
1015  header["Total"] = total
1016 
1017  summ = {}
1018  while pos < nlines:
1019  m = h_table_head.search(lines[pos])
1020  if m:
1021  t, d = m.groups(1) # type and directory
1022  t = t.replace(" profile", "Prof")
1023  pos += 1
1024  if pos < nlines:
1025  l = lines[pos]
1026  else:
1027  l = ""
1028  cont = {}
1029  if l.startswith(" | ID"):
1030  # table format
1031  titles = [ x.strip() for x in l.split("|")][1:]
1032  pos += 1
1033  while pos < nlines and lines[pos].startswith(" |"):
1034  l = lines[pos]
1035  values = [ x.strip() for x in l.split("|")][1:]
1036  hcont = {}
1037  for i in range(len(titles)):
1038  hcont[titles[i]] = values[i]
1039  cont[hcont["ID"]] = hcont
1040  pos += 1
1041  elif l.startswith(" ID="):
1042  while pos < nlines and lines[pos].startswith(" ID="):
1043  values = [ x.strip() for x in h_short_summ.search(lines[pos]).groups() ]
1044  cont[values[0]] = values
1045  pos += 1
1046  else: # not interpreted
1047  raise RuntimeError("Cannot understand line %d: '%s'" % (pos, l))
1048  if not d in summ:
1049  summ[d] = {}
1050  summ[d][t] = cont
1051  summ[d]["header"] = header
1052  else:
1053  break
1054  if not summ:
1055  # If the full table is not present, we use only the header
1056  summ[name] = {"header": header}
1057  return summ, pos
1058 
1059 
1060 
1062  """
1063  Scan stdout to find ROOT TTree summaries and digest them.
1064  """
1065  outlines = stdout.splitlines()
1066  nlines = len(outlines) - 1
1067  summaries = {}
1068  global h_count_re
1069 
1070  pos = 0
1071  while pos < nlines:
1072  summ = {}
1073  # find first line of block:
1074  match = h_count_re.search(outlines[pos])
1075  while pos < nlines and not match:
1076  pos += 1
1077  match = h_count_re.search(outlines[pos])
1078  if match:
1079  summ, pos = parseHistosSummary(outlines, pos)
1080  summaries.update(summ)
1081  return summaries
1082 
1083 def PlatformIsNotSupported(self, context, result):
1084  platform = GetPlatform(self)
1085  unsupported = [ re.compile(x) for x in [ str(y).strip() for y in unsupported_platforms ] if x]
1086  for p_re in unsupported :
1087  if p_re.search(platform):
1088  result.SetOutcome(result.UNTESTED)
1089  result[result.CAUSE] = 'Platform not supported.'
1090  return True
1091  return False
1092 
1093 def GetPlatform(self):
1094  """
1095  Return the platform Id defined in CMTCONFIG or SCRAM_ARCH.
1096  """
1097  arch = "None"
1098  # check architecture name
1099  if "BINARY_TAG" in os.environ:
1100  arch = os.environ["BINARY_TAG"]
1101  elif "CMTCONFIG" in os.environ:
1102  arch = os.environ["CMTCONFIG"]
1103  elif "SCRAM_ARCH" in os.environ:
1104  arch = os.environ["SCRAM_ARCH"]
1105  return arch
1106 
1107 def isWinPlatform(self):
1108  """
1109  Return True if the current platform is Windows.
1110 
1111  This function was needed because of the change in the CMTCONFIG format,
1112  from win32_vc71_dbg to i686-winxp-vc9-dbg.
1113  """
1114  platform = GetPlatform(self)
1115  return "winxp" in platform or platform.startswith("win")
1116 
def dumpProcs(name)
Definition: BaseTest.py:30
def GetPlatform(self)
Definition: BaseTest.py:1093
def PlatformIsNotSupported(self, context, result)
Definition: BaseTest.py:1083
def __init__(self, start, end)
Definition: BaseTest.py:705
def ValidateOutput(self, stdout, stderr, result)
Definition: BaseTest.py:255
def __processLine__(self, line)
Definition: BaseTest.py:755
def findHistosSummaries(stdout)
Definition: BaseTest.py:1061
def _parseTTreeSummary(lines, pos)
Definition: BaseTest.py:947
string type
Definition: gaudirun.py:151
struct GAUDI_API map
Parametrisation class for map-like implementation.
def __call__(self, stdout, result)
Definition: BaseTest.py:858
NamedRange_< CONTAINER > range(const CONTAINER &cnt, std::string name)
simple function to create the named range form arbitrary container
Definition: NamedRange.h:130
def __processLine__(self, line)
Definition: BaseTest.py:697
def __init__(self, signature)
Definition: BaseTest.py:752
def sanitize_for_xml(data)
Definition: BaseTest.py:16
def isWinPlatform(self)
Definition: BaseTest.py:1107
def getCmpFailingValues(reference, to_check, fail_path)
Definition: BaseTest.py:933
def __setitem__(self, key, value)
Definition: BaseTest.py:599
def which(executable)
Definition: BaseTest.py:547
def parseHistosSummary(lines, pos)
Definition: BaseTest.py:996
def _expandReferenceFileName(self, reffile)
Definition: BaseTest.py:480
def __getitem__(self, key)
Definition: BaseTest.py:595
def kill_tree(ppid, sig)
Definition: BaseTest.py:38
def findTTreeSummaries(stdout)
Definition: BaseTest.py:881
def __init__(self, ref, cause, result_key)
Definition: BaseTest.py:617
def Quote(self, string)
Definition: BaseTest.py:604
Special preprocessor sorting the list of strings (whitespace separated) that follow a signature on a ...
Definition: BaseTest.py:751