All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Modules Pages
BaseTest.py
Go to the documentation of this file.
1 # -*- coding: utf-8 -*-
2 
3 import os
4 import sys
5 import time
6 import signal
7 import threading
8 import platform
9 import tempfile
10 import inspect
11 import re
12 import logging
13 
14 from subprocess import Popen, PIPE, STDOUT
15 
16 def sanitize_for_xml(data):
17  '''
18  Take a string with invalid ASCII/UTF characters and quote them so that the
19  string can be used in an XML text.
20 
21  >>> sanitize_for_xml('this is \x1b')
22  'this is [NON-XML-CHAR-0x1B]'
23  '''
24  bad_chars = re.compile(u'[\x00-\x08\x0b\x0c\x0e-\x1F\uD800-\uDFFF\uFFFE\uFFFF]')
25  def quote(match):
26  'helper function'
27  return ''.join('[NON-XML-CHAR-0x%2X]' % ord(c) for c in match.group())
28  return bad_chars.sub(quote, data)
29 
30 def dumpProcs(name):
31  '''helper to debug GAUDI-1084, dump the list of processes'''
32  from getpass import getuser
33  if 'WORKSPACE' in os.environ:
34  p = Popen(['ps', '-fH', '-U', getuser()], stdout=PIPE)
35  with open(os.path.join(os.environ['WORKSPACE'], name), 'w') as f:
36  f.write(p.communicate()[0])
37 
38 def kill_tree(ppid, sig):
39  '''
40  Send a signal to a process and all its child processes (starting from the
41  leaves).
42  '''
43  log = logging.getLogger('kill_tree')
44  ps_cmd = ['ps', '--no-headers', '-o', 'pid', '--ppid', str(ppid)]
45  get_children = Popen(ps_cmd, stdout=PIPE, stderr=PIPE)
46  children = map(int, get_children.communicate()[0].split())
47  for child in children:
48  kill_tree(child, sig)
49  try:
50  log.debug('killing process %d', ppid)
51  os.kill(ppid, sig)
52  except OSError, err:
53  if err.errno != 3: # No such process
54  raise
55  log.debug('no such process %d', ppid)
56 
57 #-------------------------------------------------------------------------#
58 class BaseTest(object):
59 
60  _common_tmpdir = None
61 
62  def __init__(self):
63  self.program = ''
64  self.args = []
65  self.reference = ''
66  self.error_reference = ''
67  self.options = ''
68  self.stderr = ''
69  self.timeout = 600
70  self.exit_code = None
71  self.environment = None
73  self.signal = None
74  self.workdir = os.curdir
75  self.use_temp_dir = False
76  #Variables not for users
77  self.status = None
78  self.name = ''
79  self.causes = []
80  self.result = Result(self)
81  self.returnedCode = 0
82  self.out = ''
83  self.err = ''
84  self.proc = None
85  self.stack_trace = None
86  self.basedir = os.getcwd()
87 
88  def validator(self, stdout='',stderr=''):
89  pass
90 
91  def run(self):
92  logging.debug('running test %s', self.name)
93 
94  if self.options:
95  if re.search(r'from\s+Gaudi.Configuration\s+import\s+\*|'
96  'from\s+Configurables\s+import', self.options):
97  optionFile = tempfile.NamedTemporaryFile(suffix='.py')
98  else:
99  optionFile = tempfile.NamedTemporaryFile(suffix='.opts')
100  optionFile.file.write(self.options)
101  optionFile.seek(0)
102  self.args.append(RationalizePath(optionFile.name))
103 
104  #If not specified, setting the environment
105  if self.environment is None : self.environment = os.environ
106  else : self.environment=dict(self.environment.items()+os.environ.items())
107 
108  platform_id = (os.environ.get('BINARY_TAG') or
109  os.environ.get('CMTCONFIG') or
110  platform.platform())
111  # If at least one regex matches we skip the test.
112  skip_test = bool([None
113  for prex in self.unsupported_platforms
114  if re.search(prex, platform_id)])
115 
116  if not skip_test:
117  # handle working/temporary directory options
118  workdir = self.workdir
119  if self.use_temp_dir:
120  if self._common_tmpdir:
121  workdir = self._common_tmpdir
122  else:
123  workdir = tempfile.mkdtemp()
124 
125  # prepare the command to execute
126  prog=''
127  if self.program != '':
128  prog = self.program
129  elif "GAUDIEXE" in os.environ :
130  prog = os.environ["GAUDIEXE"]
131  else :
132  prog = "Gaudi.exe"
133 
134  dummy, prog_ext = os.path.splitext(prog)
135  if prog_ext not in [ ".exe", ".py", ".bat" ]:
136  prog += ".exe"
137  prog_ext = ".exe"
138 
139  prog = which(prog) or prog
140 
141  args = map(RationalizePath, self.args)
142 
143  if prog_ext == ".py" :
144  params = ['python', RationalizePath(prog)] + args
145  else :
146  params = [RationalizePath(prog)] + args
147 
148  validatorRes = Result({'CAUSE': None, 'EXCEPTION': None,
149  'RESOURCE': None, 'TARGET': None,
150  'TRACEBACK': None, 'START_TIME': None,
151  'END_TIME': None, 'TIMEOUT_DETAIL': None})
152  self.result = validatorRes
153 
154  # we need to switch directory because the validator expects to run
155  # in the same dir as the program
156  os.chdir(workdir)
157 
158  #launching test in a different thread to handle timeout exception
159  def target() :
160  logging.debug('executing %r in %s',
161  params, workdir)
162  self.proc = Popen(params, stdout=PIPE, stderr=PIPE,
163  env=self.environment)
164  logging.debug('(pid: %d)', self.proc.pid)
165  self.out, self.err = self.proc.communicate()
166 
167  thread = threading.Thread(target=target)
168  thread.start()
169  # catching timeout
170  thread.join(self.timeout)
171 
172  if thread.is_alive():
173  logging.debug('time out in test %s (pid %d)', self.name, self.proc.pid)
174  # get the stack trace of the stuck process
175  cmd = ['gdb', '--pid', str(self.proc.pid), '--batch',
176  '--eval-command=thread apply all backtrace']
177  gdb = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
178  self.stack_trace = gdb.communicate()[0]
179 
180  kill_tree(self.proc.pid, signal.SIGTERM)
181  thread.join(60)
182  if thread.is_alive():
183  kill_tree(self.proc.pid, signal.SIGKILL)
184  self.causes.append('timeout')
185  else:
186  logging.debug('completed test %s', self.name)
187 
188  #Getting the error code
189  logging.debug('returnedCode = %s', self.proc.returncode)
190  self.returnedCode = self.proc.returncode
191 
192  logging.debug('validating test...')
193  self.result, self.causes = self.ValidateOutput(stdout=self.out,
194  stderr=self.err,
195  result=validatorRes)
196 
197  # remove the temporary directory if we created it
198  if self.use_temp_dir and not self._common_tmpdir:
199  shutil.rmtree(workdir, True)
200 
201  os.chdir(self.basedir)
202 
203  # handle application exit code
204  if self.signal is not None:
205  if int(self.returnedCode) != -int(self.signal):
206  self.causes.append('exit code')
207 
208  elif self.exit_code is not None:
209  if int(self.returnedCode) != int(self.exit_code):
210  self.causes.append('exit code')
211 
212  elif self.returnedCode != 0:
213  self.causes.append("exit code")
214 
215  if self.causes:
216  self.status = "failed"
217  else:
218  self.status = "passed"
219 
220  else:
221  self.status = "skipped"
222 
223  logging.debug('%s: %s', self.name, self.status)
224  field_mapping = {'Exit Code': 'returnedCode',
225  'stderr': 'err',
226  'Arguments': 'args',
227  'Environment': 'environment',
228  'Status': 'status',
229  'stdout': 'out',
230  'Program Name': 'program',
231  'Name': 'name',
232  'Validator': 'validator',
233  'Output Reference File': 'reference',
234  'Error Reference File': 'error_reference',
235  'Causes': 'causes',
236  #'Validator Result': 'result.annotations',
237  'Unsupported Platforms': 'unsupported_platforms',
238  'Stack Trace': 'stack_trace'}
239  resultDict = [(key, getattr(self, attr))
240  for key, attr in field_mapping.iteritems()
241  if getattr(self, attr)]
242  resultDict.append(('Working Directory',
243  RationalizePath(os.path.join(os.getcwd(),
244  self.workdir))))
245  #print dict(resultDict).keys()
246  resultDict.extend(self.result.annotations.iteritems())
247  #print self.result.annotations.keys()
248  return dict(resultDict)
249 
250 
251  #-------------------------------------------------#
252  #----------------Validating tool------------------#
253  #-------------------------------------------------#
254 
255  def ValidateOutput(self, stdout, stderr, result):
256  # checking if default validation or not
257  if self.validator is not BaseTest.validator:
258  self.validator(stdout, stderr, result, self.causes,
259  self.reference, self.error_reference)
260  else:
261  if self.stderr == '':
262  self.validateWithReference(stdout, stderr, result, causes)
263  elif stderr.strip() != self.stderr.strip():
264  self.causes.append('standard error')
265 
266 
267  return result, causes
268 
269 
270 
271  def findReferenceBlock(self,reference=None, stdout=None, result=None, causes=None, signature_offset=0, signature=None, id = None):
272  """
273  Given a block of text, tries to find it in the output. The block had to be identified by a signature line. By default, the first line is used as signature, or the line pointed to by signature_offset. If signature_offset points outside the block, a signature line can be passed as signature argument. Note: if 'signature' is None (the default), a negative signature_offset is interpreted as index in a list (e.g. -1 means the last line), otherwise the it is interpreted as the number of lines before the first one of the block the signature must appear. The parameter 'id' allow to distinguish between different calls to this function in the same validation code.
274  """
275 
276  if reference is None : reference=self.reference
277  if stdout is None : stdout=self.out
278  if result is None : result=self.result
279  if causes is None : causes=self.causes
280 
281  reflines = filter(None,map(lambda s: s.rstrip(), reference.splitlines()))
282  if not reflines:
283  raise RuntimeError("Empty (or null) reference")
284  # the same on standard output
285  outlines = filter(None,map(lambda s: s.rstrip(), stdout.splitlines()))
286 
287  res_field = "GaudiTest.RefBlock"
288  if id:
289  res_field += "_%s" % id
290 
291  if signature is None:
292  if signature_offset < 0:
293  signature_offset = len(reference)+signature_offset
294  signature = reflines[signature_offset]
295  # find the reference block in the output file
296  try:
297  pos = outlines.index(signature)
298  outlines = outlines[pos-signature_offset:pos+len(reflines)-signature_offset]
299  if reflines != outlines:
300  msg = "standard output"
301  # I do not want 2 messages in causes if teh function is called twice
302  if not msg in causes:
303  causes.append(msg)
304  result[res_field + ".observed"] = result.Quote("\n".join(outlines))
305  except ValueError:
306  causes.append("missing signature")
307  result[res_field + ".signature"] = result.Quote(signature)
308  if len(reflines) > 1 or signature != reflines[0]:
309  result[res_field + ".expected"] = result.Quote("\n".join(reflines))
310  return causes
311 
312  def countErrorLines(self, expected = {'ERROR':0, 'FATAL':0}, stdout=None, result=None,causes=None):
313  """
314  Count the number of messages with required severity (by default ERROR and FATAL)
315  and check if their numbers match the expected ones (0 by default).
316  The dictionary "expected" can be used to tune the number of errors and fatals
317  allowed, or to limit the number of expected warnings etc.
318  """
319 
320  if stdout is None : stdout=self.out
321  if result is None : result=self.result
322  if causes is None : causes=self.causes
323 
324  # prepare the dictionary to record the extracted lines
325  errors = {}
326  for sev in expected:
327  errors[sev] = []
328 
329  outlines = stdout.splitlines()
330  from math import log10
331  fmt = "%%%dd - %%s" % (int(log10(len(outlines)+1)))
332 
333  linecount = 0
334  for l in outlines:
335  linecount += 1
336  words = l.split()
337  if len(words) >= 2 and words[1] in errors:
338  errors[words[1]].append(fmt%(linecount,l.rstrip()))
339 
340  for e in errors:
341  if len(errors[e]) != expected[e]:
342  causes.append('%s(%d)'%(e,len(errors[e])))
343  result["GaudiTest.lines.%s"%e] = result.Quote('\n'.join(errors[e]))
344  result["GaudiTest.lines.%s.expected#"%e] = result.Quote(str(expected[e]))
345 
346  return causes
347 
348  def CheckTTreesSummaries(self, stdout=None, result=None, causes=None,
349  trees_dict = None,
350  ignore = r"Basket|.*size|Compression"):
351  """
352  Compare the TTree summaries in stdout with the ones in trees_dict or in
353  the reference file. By default ignore the size, compression and basket
354  fields.
355  The presence of TTree summaries when none is expected is not a failure.
356  """
357  if stdout is None : stdout=self.out
358  if result is None : result=self.result
359  if causes is None : causes=self.causes
360  if trees_dict is None:
361  lreference = self._expandReferenceFileName(self.reference)
362  # call the validator if the file exists
363  if lreference and os.path.isfile(lreference):
364  trees_dict = findTTreeSummaries(open(lreference).read())
365  else:
366  trees_dict = {}
367 
368  from pprint import PrettyPrinter
369  pp = PrettyPrinter()
370  if trees_dict:
371  result["GaudiTest.TTrees.expected"] = result.Quote(pp.pformat(trees_dict))
372  if ignore:
373  result["GaudiTest.TTrees.ignore"] = result.Quote(ignore)
374 
375  trees = findTTreeSummaries(stdout)
376  failed = cmpTreesDicts(trees_dict, trees, ignore)
377  if failed:
378  causes.append("trees summaries")
379  msg = "%s: %s != %s" % getCmpFailingValues(trees_dict, trees, failed)
380  result["GaudiTest.TTrees.failure_on"] = result.Quote(msg)
381  result["GaudiTest.TTrees.found"] = result.Quote(pp.pformat(trees))
382 
383  return causes
384 
385  def CheckHistosSummaries(self, stdout=None, result=None, causes=None,
386  dict = None,
387  ignore = None):
388  """
389  Compare the TTree summaries in stdout with the ones in trees_dict or in
390  the reference file. By default ignore the size, compression and basket
391  fields.
392  The presence of TTree summaries when none is expected is not a failure.
393  """
394  if stdout is None : stdout=self.out
395  if result is None : result=self.result
396  if causes is None : causes=self.causes
397 
398  if dict is None:
399  lreference = self._expandReferenceFileName(self.reference)
400  # call the validator if the file exists
401  if lreference and os.path.isfile(lreference):
402  dict = findHistosSummaries(open(lreference).read())
403  else:
404  dict = {}
405 
406  from pprint import PrettyPrinter
407  pp = PrettyPrinter()
408  if dict:
409  result["GaudiTest.Histos.expected"] = result.Quote(pp.pformat(dict))
410  if ignore:
411  result["GaudiTest.Histos.ignore"] = result.Quote(ignore)
412 
413  histos = findHistosSummaries(stdout)
414  failed = cmpTreesDicts(dict, histos, ignore)
415  if failed:
416  causes.append("histos summaries")
417  msg = "%s: %s != %s" % getCmpFailingValues(dict, histos, failed)
418  result["GaudiTest.Histos.failure_on"] = result.Quote(msg)
419  result["GaudiTest.Histos.found"] = result.Quote(pp.pformat(histos))
420 
421  return causes
422 
423  def validateWithReference(self, stdout=None, stderr=None, result=None,
424  causes=None, preproc=None):
425  '''
426  Default validation acti*on: compare standard output and error to the
427  reference files.
428  '''
429 
430  if stdout is None : stdout = self.out
431  if stderr is None : stderr = self.err
432  if result is None : result = self.result
433  if causes is None : causes = self.causes
434 
435  # set the default output preprocessor
436  if preproc is None:
437  preproc = normalizeExamples
438  # check standard output
439  lreference = self._expandReferenceFileName(self.reference)
440  # call the validator if the file exists
441  if lreference and os.path.isfile(lreference):
442  causes += ReferenceFileValidator(lreference,
443  "standard output",
444  "Output Diff",
445  preproc=preproc)(stdout, result)
446  # Compare TTree summaries
447  causes = self.CheckTTreesSummaries(stdout, result, causes)
448  causes = self.CheckHistosSummaries(stdout, result, causes)
449  if causes: # Write a new reference file for stdout
450  try:
451  newref = open(lreference + ".new","w")
452  # sanitize newlines
453  for l in stdout.splitlines():
454  newref.write(l.rstrip() + '\n')
455  del newref # flush and close
456  except IOError:
457  # Ignore IO errors when trying to update reference files
458  # because we may be in a read-only filesystem
459  pass
460 
461  # check standard error
462  lreference = self._expandReferenceFileName(self.error_reference)
463  # call the validator if we have a file to use
464  if lreference and os.path.isfile(lreference):
465  newcauses = ReferenceFileValidator(lreference,
466  "standard error",
467  "Error Diff",
468  preproc=preproc)(stderr, result)
469  causes += newcauses
470  if newcauses: # Write a new reference file for stdedd
471  newref = open(lreference + ".new","w")
472  # sanitize newlines
473  for l in stderr.splitlines():
474  newref.write(l.rstrip() + '\n')
475  del newref # flush and close
476  else:
477  causes += BasicOutputValidator(lreference, "standard error", "ExecTest.expected_stderr")(stderr, result)
478  return causes
479 
480  def _expandReferenceFileName(self, reffile):
481  # if no file is passed, do nothing
482  if not reffile:
483  return ""
484 
485  # function to split an extension in constituents parts
486  platformSplit = lambda p: set(p.split('-' in p and '-' or '_'))
487 
488  reference = os.path.normpath(os.path.join(self.basedir,
489  os.path.expandvars(reffile)))
490 
491  # old-style platform-specific reference name
492  spec_ref = reference[:-3] + GetPlatform(self)[0:3] + reference[-3:]
493  if os.path.isfile(spec_ref):
494  reference = spec_ref
495  else: # look for new-style platform specific reference files:
496  # get all the files whose name start with the reference filename
497  dirname, basename = os.path.split(reference)
498  if not dirname: dirname = '.'
499  head = basename + "."
500  head_len = len(head)
501  platform = platformSplit(GetPlatform(self))
502  if 'do0' in platform:
503  platform.add('dbg')
504  candidates = []
505  for f in os.listdir(dirname):
506  if f.startswith(head):
507  req_plat = platformSplit(f[head_len:])
508  if platform.issuperset(req_plat):
509  candidates.append( (len(req_plat), f) )
510  if candidates: # take the one with highest matching
511  # FIXME: it is not possible to say if x86_64-slc5-gcc43-dbg
512  # has to use ref.x86_64-gcc43 or ref.slc5-dbg
513  candidates.sort()
514  reference = os.path.join(dirname, candidates[-1][1])
515  return reference
516 
517 #---------------------------------------------------------------------------------------------------#
518 #---------------------------------------------------------------------------------------------------#
519 #-----------------------------------------GAUDI TOOLS-----------------------------------------------------#
520 #---------------------------------------------------------------------------------------------------#
521 #---------------------------------------------------------------------------------------------------#
522 
523 import shutil
524 import string
525 import difflib
526 import calendar
527 
528 try:
529  from GaudiKernel import ROOT6WorkAroundEnabled
530 except ImportError:
532  # dummy implementation
533  return False
534 
535 #--------------------------------- TOOLS ---------------------------------#
536 
538  """
539  Function used to normalize the used path
540  """
541  newPath = os.path.normpath(os.path.expandvars(p))
542  if os.path.exists(newPath) :
543  p = os.path.realpath(newPath)
544  return p
545 
546 
547 def which(executable):
548  """
549  Locates an executable in the executables path ($PATH) and returns the full
550  path to it. An application is looked for with or without the '.exe' suffix.
551  If the executable cannot be found, None is returned
552  """
553  if os.path.isabs(executable):
554  if not os.path.exists(executable):
555  if executable.endswith('.exe'):
556  if os.path.exists(executable[:-4]):
557  return executable[:-4]
558  else :
559  head,executable = os.path.split(executable)
560  else :
561  return executable
562  for d in os.environ.get("PATH").split(os.pathsep):
563  fullpath = os.path.join(d, executable)
564  if os.path.exists(fullpath):
565  return fullpath
566  if executable.endswith('.exe'):
567  return which(executable[:-4])
568  return None
569 
570 
571 
572 #-------------------------------------------------------------------------#
573 #----------------------------- Result Classe -----------------------------#
574 #-------------------------------------------------------------------------#
575 import types
576 
577 class Result:
578 
579  PASS='PASS'
580  FAIL='FAIL'
581  ERROR='ERROR'
582  UNTESTED='UNTESTED'
583 
584  EXCEPTION = ""
585  RESOURCE = ""
586  TARGET = ""
587  TRACEBACK = ""
588  START_TIME = ""
589  END_TIME = ""
590  TIMEOUT_DETAIL = ""
591 
592  def __init__(self,kind=None,id=None,outcome=PASS,annotations={}):
593  self.annotations = annotations.copy()
594 
595  def __getitem__(self,key):
596  assert type(key) in types.StringTypes
597  return self.annotations[key]
598 
599  def __setitem__(self,key,value):
600  assert type(key) in types.StringTypes
601  assert type(value) in types.StringTypes
602  self.annotations[key]=value
603 
604  def Quote(self,string):
605  return string
606 
607 
608 #-------------------------------------------------------------------------#
609 #--------------------------- Validator Classes ---------------------------#
610 #-------------------------------------------------------------------------#
611 
612 #Basic implementation of an option validator for Gaudi test. This implementation is based on the standard (LCG) validation functions used in QMTest.
613 
614 
616 
617  def __init__(self,ref,cause,result_key):
618  self.ref=ref
619  self.cause=cause
620  self.result_key=result_key
621 
622  def __call__(self,out,result):
623  """Validate the output of the program.
624  'stdout' -- A string containing the data written to the standard output
625  stream.
626  'stderr' -- A string containing the data written to the standard error
627  stream.
628  'result' -- A 'Result' object. It may be used to annotate
629  the outcome according to the content of stderr.
630  returns -- A list of strings giving causes of failure."""
631 
632  causes=[]
633  #Check the output
634  if not self.__CompareText(out,self.ref):
635  causes.append(self.cause)
636  result[self.result_key] =result.Quote(self.ref)
637 
638 
639 
640  return causes
641 
642  def __CompareText(self, s1, s2):
643  """Compare 's1' and 's2', ignoring line endings.
644  's1' -- A string.
645  's2' -- A string.
646  returns -- True if 's1' and 's2' are the same, ignoring
647  differences in line endings."""
648  if ROOT6WorkAroundEnabled('ReadRootmapCheck'):
649  # FIXME: (MCl) Hide warnings from new rootmap sanity check until we can fix them
650  to_ignore = re.compile(r'Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*')
651  keep_line = lambda l: not to_ignore.match(l)
652  return filter(keep_line, s1.splitlines()) == filter(keep_line, s2.splitlines())
653  else:
654  return s1.splitlines() == s2.splitlines()
655 
656 
657 
658 #------------------------ Preprocessor elements ------------------------#
660  """ Base class for a callable that takes a file and returns a modified
661  version of it."""
662  def __processLine__(self, line):
663  return line
664  def __processFile__(self, lines):
665  output = []
666  for l in lines:
667  l = self.__processLine__(l)
668  if l: output.append(l)
669  return output
670  def __call__(self, input):
671  if hasattr(input,"__iter__"):
672  lines = input
673  mergeback = False
674  else:
675  lines = input.splitlines()
676  mergeback = True
677  output = self.__processFile__(lines)
678  if mergeback: output = '\n'.join(output)
679  return output
680  def __add__(self, rhs):
681  return FilePreprocessorSequence([self,rhs])
682 
684  def __init__(self, members = []):
685  self.members = members
686  def __add__(self, rhs):
687  return FilePreprocessorSequence(self.members + [rhs])
688  def __call__(self, input):
689  output = input
690  for pp in self.members:
691  output = pp(output)
692  return output
693 
695  def __init__(self, strings = [], regexps = []):
696  import re
697  self.strings = strings
698  self.regexps = map(re.compile,regexps)
699 
700  def __processLine__(self, line):
701  for s in self.strings:
702  if line.find(s) >= 0: return None
703  for r in self.regexps:
704  if r.search(line): return None
705  return line
706 
708  def __init__(self, start, end):
709  self.start = start
710  self.end = end
711  self._skipping = False
712 
713  def __processLine__(self, line):
714  if self.start in line:
715  self._skipping = True
716  return None
717  elif self.end in line:
718  self._skipping = False
719  elif self._skipping:
720  return None
721  return line
722 
724  def __init__(self, orig, repl = "", when = None):
725  if when:
726  when = re.compile(when)
727  self._operations = [ (when, re.compile(orig), repl) ]
728  def __add__(self,rhs):
729  if isinstance(rhs, RegexpReplacer):
730  res = RegexpReplacer("","",None)
731  res._operations = self._operations + rhs._operations
732  else:
733  res = FilePreprocessor.__add__(self, rhs)
734  return res
735  def __processLine__(self, line):
736  for w,o,r in self._operations:
737  if w is None or w.search(line):
738  line = o.sub(r, line)
739  return line
740 
741 # Common preprocessors
742 maskPointers = RegexpReplacer("0x[0-9a-fA-F]{4,16}","0x########")
743 normalizeDate = RegexpReplacer("[0-2]?[0-9]:[0-5][0-9]:[0-5][0-9] [0-9]{4}[-/][01][0-9][-/][0-3][0-9][ A-Z]*",
744  "00:00:00 1970-01-01")
745 normalizeEOL = FilePreprocessor()
746 normalizeEOL.__processLine__ = lambda line: str(line).rstrip() + '\n'
747 
748 skipEmptyLines = FilePreprocessor()
749 # FIXME: that's ugly
750 skipEmptyLines.__processLine__ = lambda line: (line.strip() and line) or None
751 
752 ## Special preprocessor sorting the list of strings (whitespace separated)
753 # that follow a signature on a single line
755  def __init__(self, signature):
756  self.signature = signature
757  self.siglen = len(signature)
758  def __processLine__(self, line):
759  pos = line.find(self.signature)
760  if pos >=0:
761  line = line[:(pos+self.siglen)]
762  lst = line[(pos+self.siglen):].split()
763  lst.sort()
764  line += " ".join(lst)
765  return line
766 
768  '''
769  Sort group of lines matching a regular expression
770  '''
771  def __init__(self, exp):
772  self.exp = exp if hasattr(exp, 'match') else re.compile(exp)
773  def __processFile__(self, lines):
774  match = self.exp.match
775  output = []
776  group = []
777  for l in lines:
778  if match(l):
779  group.append(l)
780  else:
781  if group:
782  group.sort()
783  output.extend(group)
784  group = []
785  output.append(l)
786  return output
787 
788 # Preprocessors for GaudiExamples
789 normalizeExamples = maskPointers + normalizeDate
790 for w,o,r in [
791  #("TIMER.TIMER",r"[0-9]", "0"), # Normalize time output
792  ("TIMER.TIMER",r"\s+[+-]?[0-9]+[0-9.]*", " 0"), # Normalize time output
793  ("release all pending",r"^.*/([^/]*:.*)",r"\1"),
794  ("0x########",r"\[.*/([^/]*.*)\]",r"[\1]"),
795  ("^#.*file",r"file '.*[/\\]([^/\\]*)$",r"file '\1"),
796  ("^JobOptionsSvc.*options successfully read in from",r"read in from .*[/\\]([^/\\]*)$",r"file \1"), # normalize path to options
797  # Normalize UUID, except those ending with all 0s (i.e. the class IDs)
798  (None,r"[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}(?!-0{12})-[0-9A-Fa-f]{12}","00000000-0000-0000-0000-000000000000"),
799  # Absorb a change in ServiceLocatorHelper
800  ("ServiceLocatorHelper::", "ServiceLocatorHelper::(create|locate)Service", "ServiceLocatorHelper::service"),
801  # Remove the leading 0 in Windows' exponential format
802  (None, r"e([-+])0([0-9][0-9])", r"e\1\2"),
803  # Output line changed in Gaudi v24
804  (None, r'Service reference count check:', r'Looping over all active services...'),
805  # Change of property name in Algorithm (GAUDI-1030)
806  (None, r"Property(.*)'ErrorCount':", r"Property\1'ErrorCounter':"),
807  # Ignore count of declared properties (anyway they are all printed)
808  (None, r"^(.*(DEBUG|SUCCESS) List of ALL properties of .*#properties = )\d+", r"\1NN"),
809  ]: #[ ("TIMER.TIMER","[0-9]+[0-9.]*", "") ]
810  normalizeExamples += RegexpReplacer(o,r,w)
811 
812 lineSkipper = LineSkipper(["//GP:",
813  "JobOptionsSvc INFO # ",
814  "JobOptionsSvc WARNING # ",
815  "Time User",
816  "Welcome to",
817  "This machine has a speed",
818  "TIME:",
819  "running on",
820  "ToolSvc.Sequenc... INFO",
821  "DataListenerSvc INFO XML written to file:",
822  "[INFO]","[WARNING]",
823  "DEBUG No writable file catalog found which contains FID:",
824  "0 local", # hack for ErrorLogExample
825  "DEBUG Service base class initialized successfully", # changed between v20 and v21
826  "DEBUG Incident timing:", # introduced with patch #3487
827  "INFO 'CnvServices':[", # changed the level of the message from INFO to DEBUG
828  # The signal handler complains about SIGXCPU not defined on some platforms
829  'SIGXCPU',
830  ],regexps = [
831  r"^JobOptionsSvc INFO *$",
832  r"^#", # Ignore python comments
833  r"(Always|SUCCESS)\s*(Root f|[^ ]* F)ile version:", # skip the message reporting the version of the root file
834  r"0x[0-9a-fA-F#]+ *Algorithm::sysInitialize\(\) *\[", # hack for ErrorLogExample
835  r"0x[0-9a-fA-F#]* *__gxx_personality_v0 *\[", # hack for ErrorLogExample
836  r"File '.*.xml' does not exist",
837  r"INFO Refer to dataset .* by its file ID:",
838  r"INFO Referring to dataset .* by its file ID:",
839  r"INFO Disconnect from dataset",
840  r"INFO Disconnected from dataset",
841  r"INFO Disconnected data IO:",
842  r"IncidentSvc\s*(DEBUG (Adding|Removing)|VERBOSE Calling)",
843  # I want to ignore the header of the unchecked StatusCode report
844  r"^StatusCodeSvc.*listing all unchecked return codes:",
845  r"^StatusCodeSvc\s*INFO\s*$",
846  r"Num\s*\|\s*Function\s*\|\s*Source Library",
847  r"^[-+]*\s*$",
848  # Hide the fake error message coming from POOL/ROOT (ROOT 5.21)
849  r"ERROR Failed to modify file: .* Errno=2 No such file or directory",
850  # Hide unchecked StatusCodes from dictionaries
851  r"^ +[0-9]+ \|.*ROOT",
852  r"^ +[0-9]+ \|.*\|.*Dict",
853  # Hide success StatusCodeSvc message
854  r"StatusCodeSvc.*all StatusCode instances where checked",
855  # Hide EventLoopMgr total timing report
856  r"EventLoopMgr.*---> Loop Finished",
857  # Remove ROOT TTree summary table, which changes from one version to the other
858  r"^\*.*\*$",
859  # Remove Histos Summaries
860  r"SUCCESS\s*Booked \d+ Histogram\(s\)",
861  r"^ \|",
862  r"^ ID=",
863  # Ignore added/removed properties
864  r"Property(.*)'Audit(Algorithm|Tool|Service)s':",
865  r"Property(.*)'AuditRe(start|initialize)':", # these were missing in tools
866  r"Property(.*)'IsIOBound':",
867  # ignore uninteresting/obsolete messages
868  r"Property update for OutputLevel : new value =",
869  r"EventLoopMgr\s*DEBUG Creating OutputStream",
870  ] )
871 
872 if ROOT6WorkAroundEnabled('ReadRootmapCheck'):
873  # FIXME: (MCl) Hide warnings from new rootmap sanity check until we can fix them
874  lineSkipper += LineSkipper(regexps = [
875  r'Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*',
876  ])
877 
878 normalizeExamples = (lineSkipper + normalizeExamples + skipEmptyLines +
879  normalizeEOL + LineSorter("Services to release : ") +
880  SortGroupOfLines(r'^\S+\s+(DEBUG|SUCCESS) Property \[\'Name\':'))
881 
882 #--------------------- Validation functions/classes ---------------------#
883 
885  def __init__(self,reffile, cause, result_key, preproc=normalizeExamples):
886  self.reffile = os.path.expandvars(reffile)
887  self.cause=cause
888  self.result_key = result_key
889  self.preproc = preproc
890 
891  def __call__(self,stdout, result) :
892  causes=[]
893  if os.path.isfile(self.reffile):
894  orig=open(self.reffile).xreadlines()
895  if self.preproc:
896  orig = self.preproc(orig)
897  else:
898  orig = []
899  new = stdout.splitlines()
900  if self.preproc:
901  new = self.preproc(new)
902 
903  diffs = difflib.ndiff(orig,new,charjunk=difflib.IS_CHARACTER_JUNK)
904  filterdiffs = map(lambda x: x.strip(),filter(lambda x: x[0] != " ",diffs))
905  if filterdiffs:
906  result[self.result_key] = result.Quote("\n".join(filterdiffs))
907  result[self.result_key] += result.Quote("""
908  Legend:
909  -) reference file
910  +) standard output of the test""")
911  causes.append(self.cause)
912  return causes
913 
914 def findTTreeSummaries(stdout):
915  """
916  Scan stdout to find ROOT TTree summaries and digest them.
917  """
918  stars = re.compile(r"^\*+$")
919  outlines = stdout.splitlines()
920  nlines = len(outlines)
921  trees = {}
922 
923  i = 0
924  while i < nlines: #loop over the output
925  # look for
926  while i < nlines and not stars.match(outlines[i]):
927  i += 1
928  if i < nlines:
929  tree, i = _parseTTreeSummary(outlines, i)
930  if tree:
931  trees[tree["Name"]] = tree
932 
933  return trees
934 
935 def cmpTreesDicts(reference, to_check, ignore = None):
936  """
937  Check that all the keys in reference are in to_check too, with the same value.
938  If the value is a dict, the function is called recursively. to_check can
939  contain more keys than reference, that will not be tested.
940  The function returns at the first difference found.
941  """
942  fail_keys = []
943  # filter the keys in the reference dictionary
944  if ignore:
945  ignore_re = re.compile(ignore)
946  keys = [ key for key in reference if not ignore_re.match(key) ]
947  else:
948  keys = reference.keys()
949  # loop over the keys (not ignored) in the reference dictionary
950  for k in keys:
951  if k in to_check: # the key must be in the dictionary to_check
952  if (type(reference[k]) is dict) and (type(to_check[k]) is dict):
953  # if both reference and to_check values are dictionaries, recurse
954  failed = fail_keys = cmpTreesDicts(reference[k], to_check[k], ignore)
955  else:
956  # compare the two values
957  failed = to_check[k] != reference[k]
958  else: # handle missing keys in the dictionary to check (i.e. failure)
959  to_check[k] = None
960  failed = True
961  if failed:
962  fail_keys.insert(0, k)
963  break # exit from the loop at the first failure
964  return fail_keys # return the list of keys bringing to the different values
965 
966 def getCmpFailingValues(reference, to_check, fail_path):
967  c = to_check
968  r = reference
969  for k in fail_path:
970  c = c.get(k,None)
971  r = r.get(k,None)
972  if c is None or r is None:
973  break # one of the dictionaries is not deep enough
974  return (fail_path, r, c)
975 
976 # signature of the print-out of the histograms
977 h_count_re = re.compile(r"^(.*)SUCCESS\s+Booked (\d+) Histogram\(s\) :\s+([\s\w=-]*)")
978 
979 
980 def _parseTTreeSummary(lines, pos):
981  """
982  Parse the TTree summary table in lines, starting from pos.
983  Returns a tuple with the dictionary with the digested informations and the
984  position of the first line after the summary.
985  """
986  result = {}
987  i = pos + 1 # first line is a sequence of '*'
988  count = len(lines)
989 
990  splitcols = lambda l: [ f.strip() for f in l.strip("*\n").split(':',2) ]
991  def parseblock(ll):
992  r = {}
993  cols = splitcols(ll[0])
994  r["Name"], r["Title"] = cols[1:]
995 
996  cols = splitcols(ll[1])
997  r["Entries"] = int(cols[1])
998 
999  sizes = cols[2].split()
1000  r["Total size"] = int(sizes[2])
1001  if sizes[-1] == "memory":
1002  r["File size"] = 0
1003  else:
1004  r["File size"] = int(sizes[-1])
1005 
1006  cols = splitcols(ll[2])
1007  sizes = cols[2].split()
1008  if cols[0] == "Baskets":
1009  r["Baskets"] = int(cols[1])
1010  r["Basket size"] = int(sizes[2])
1011  r["Compression"] = float(sizes[-1])
1012  return r
1013 
1014  if i < (count - 3) and lines[i].startswith("*Tree"):
1015  result = parseblock(lines[i:i+3])
1016  result["Branches"] = {}
1017  i += 4
1018  while i < (count - 3) and lines[i].startswith("*Br"):
1019  if i < (count - 2) and lines[i].startswith("*Branch "):
1020  # skip branch header
1021  i += 3
1022  continue
1023  branch = parseblock(lines[i:i+3])
1024  result["Branches"][branch["Name"]] = branch
1025  i += 4
1026 
1027  return (result, i)
1028 
1029 def parseHistosSummary(lines, pos):
1030  """
1031  Extract the histograms infos from the lines starting at pos.
1032  Returns the position of the first line after the summary block.
1033  """
1034  global h_count_re
1035  h_table_head = re.compile(r'SUCCESS\s+List of booked (1D|2D|3D|1D profile|2D profile) histograms in directory\s+"(\w*)"')
1036  h_short_summ = re.compile(r"ID=([^\"]+)\s+\"([^\"]+)\"\s+(.*)")
1037 
1038  nlines = len(lines)
1039 
1040  # decode header
1041  m = h_count_re.search(lines[pos])
1042  name = m.group(1).strip()
1043  total = int(m.group(2))
1044  header = {}
1045  for k, v in [ x.split("=") for x in m.group(3).split() ]:
1046  header[k] = int(v)
1047  pos += 1
1048  header["Total"] = total
1049 
1050  summ = {}
1051  while pos < nlines:
1052  m = h_table_head.search(lines[pos])
1053  if m:
1054  t, d = m.groups(1) # type and directory
1055  t = t.replace(" profile", "Prof")
1056  pos += 1
1057  if pos < nlines:
1058  l = lines[pos]
1059  else:
1060  l = ""
1061  cont = {}
1062  if l.startswith(" | ID"):
1063  # table format
1064  titles = [ x.strip() for x in l.split("|")][1:]
1065  pos += 1
1066  while pos < nlines and lines[pos].startswith(" |"):
1067  l = lines[pos]
1068  values = [ x.strip() for x in l.split("|")][1:]
1069  hcont = {}
1070  for i in range(len(titles)):
1071  hcont[titles[i]] = values[i]
1072  cont[hcont["ID"]] = hcont
1073  pos += 1
1074  elif l.startswith(" ID="):
1075  while pos < nlines and lines[pos].startswith(" ID="):
1076  values = [ x.strip() for x in h_short_summ.search(lines[pos]).groups() ]
1077  cont[values[0]] = values
1078  pos += 1
1079  else: # not interpreted
1080  raise RuntimeError("Cannot understand line %d: '%s'" % (pos, l))
1081  if not d in summ:
1082  summ[d] = {}
1083  summ[d][t] = cont
1084  summ[d]["header"] = header
1085  else:
1086  break
1087  if not summ:
1088  # If the full table is not present, we use only the header
1089  summ[name] = {"header": header}
1090  return summ, pos
1091 
1092 
1093 
1095  """
1096  Scan stdout to find ROOT TTree summaries and digest them.
1097  """
1098  outlines = stdout.splitlines()
1099  nlines = len(outlines) - 1
1100  summaries = {}
1101  global h_count_re
1102 
1103  pos = 0
1104  while pos < nlines:
1105  summ = {}
1106  # find first line of block:
1107  match = h_count_re.search(outlines[pos])
1108  while pos < nlines and not match:
1109  pos += 1
1110  match = h_count_re.search(outlines[pos])
1111  if match:
1112  summ, pos = parseHistosSummary(outlines, pos)
1113  summaries.update(summ)
1114  return summaries
1115 
1116 def PlatformIsNotSupported(self, context, result):
1117  platform = GetPlatform(self)
1118  unsupported = [ re.compile(x) for x in [ str(y).strip() for y in unsupported_platforms ] if x]
1119  for p_re in unsupported :
1120  if p_re.search(platform):
1121  result.SetOutcome(result.UNTESTED)
1122  result[result.CAUSE] = 'Platform not supported.'
1123  return True
1124  return False
1125 
1126 def GetPlatform(self):
1127  """
1128  Return the platform Id defined in CMTCONFIG or SCRAM_ARCH.
1129  """
1130  arch = "None"
1131  # check architecture name
1132  if "BINARY_TAG" in os.environ:
1133  arch = os.environ["BINARY_TAG"]
1134  elif "CMTCONFIG" in os.environ:
1135  arch = os.environ["CMTCONFIG"]
1136  elif "SCRAM_ARCH" in os.environ:
1137  arch = os.environ["SCRAM_ARCH"]
1138  return arch
1139 
1140 def isWinPlatform(self):
1141  """
1142  Return True if the current platform is Windows.
1143 
1144  This function was needed because of the change in the CMTCONFIG format,
1145  from win32_vc71_dbg to i686-winxp-vc9-dbg.
1146  """
1147  platform = GetPlatform(self)
1148  return "winxp" in platform or platform.startswith("win")
def dumpProcs(name)
Definition: BaseTest.py:30
def GetPlatform(self)
Definition: BaseTest.py:1126
def PlatformIsNotSupported(self, context, result)
Definition: BaseTest.py:1116
def __init__(self, start, end)
Definition: BaseTest.py:708
def validateWithReference(self, stdout=None, stderr=None, result=None, causes=None, preproc=None)
Definition: BaseTest.py:424
def cmpTreesDicts(reference, to_check, ignore=None)
Definition: BaseTest.py:935
def ValidateOutput(self, stdout, stderr, result)
Definition: BaseTest.py:255
def __processLine__(self, line)
Definition: BaseTest.py:758
def findHistosSummaries(stdout)
Definition: BaseTest.py:1094
def _parseTTreeSummary(lines, pos)
Definition: BaseTest.py:980
struct GAUDI_API map
Parametrisation class for map-like implementation.
def __call__(self, stdout, result)
Definition: BaseTest.py:891
NamedRange_< CONTAINER > range(const CONTAINER &cnt, std::string name)
simple function to create the named range form arbitrary container
Definition: NamedRange.h:130
def __processLine__(self, line)
Definition: BaseTest.py:700
def __init__(self, orig, repl="", when=None)
Definition: BaseTest.py:724
def __init__(self, signature)
Definition: BaseTest.py:755
def sanitize_for_xml(data)
Definition: BaseTest.py:16
def isWinPlatform(self)
Definition: BaseTest.py:1140
def getCmpFailingValues(reference, to_check, fail_path)
Definition: BaseTest.py:966
def validator(self, stdout='', stderr='')
Definition: BaseTest.py:88
def __init__(self, strings=[], regexps=[])
Definition: BaseTest.py:695
def __setitem__(self, key, value)
Definition: BaseTest.py:599
def __init__(self, kind=None, id=None, outcome=PASS, annotations={})
Definition: BaseTest.py:592
def which(executable)
Definition: BaseTest.py:547
def parseHistosSummary(lines, pos)
Definition: BaseTest.py:1029
def _expandReferenceFileName(self, reffile)
Definition: BaseTest.py:480
def findReferenceBlock(self, reference=None, stdout=None, result=None, causes=None, signature_offset=0, signature=None, id=None)
Definition: BaseTest.py:271
def CheckHistosSummaries(self, stdout=None, result=None, causes=None, dict=None, ignore=None)
Definition: BaseTest.py:387
def __init__(self, reffile, cause, result_key, preproc=normalizeExamples)
Definition: BaseTest.py:885
def __getitem__(self, key)
Definition: BaseTest.py:595
def kill_tree(ppid, sig)
Definition: BaseTest.py:38
def findTTreeSummaries(stdout)
Definition: BaseTest.py:914
def __init__(self, ref, cause, result_key)
Definition: BaseTest.py:617
def ROOT6WorkAroundEnabled(id=None)
Definition: BaseTest.py:531
def CheckTTreesSummaries(self, stdout=None, result=None, causes=None, trees_dict=None, ignore=r"Basket|.*size|Compression")
Definition: BaseTest.py:350
def Quote(self, string)
Definition: BaseTest.py:604
Special preprocessor sorting the list of strings (whitespace separated) that follow a signature on a ...
Definition: BaseTest.py:754