BaseTest.py
Go to the documentation of this file.
1 # -*- coding: utf-8 -*-
2 
3 import os
4 import sys
5 import time
6 import signal
7 import threading
8 import platform
9 import tempfile
10 import inspect
11 import re
12 import logging
13 
14 from subprocess import Popen, PIPE, STDOUT
15 
16 def sanitize_for_xml(data):
17  '''
18  Take a string with invalid ASCII/UTF characters and quote them so that the
19  string can be used in an XML text.
20 
21  >>> sanitize_for_xml('this is \x1b')
22  'this is [NON-XML-CHAR-0x1B]'
23  '''
24  bad_chars = re.compile(u'[\x00-\x08\x0b\x0c\x0e-\x1F\uD800-\uDFFF\uFFFE\uFFFF]')
25  def quote(match):
26  'helper function'
27  return ''.join('[NON-XML-CHAR-0x%2X]' % ord(c) for c in match.group())
28  return bad_chars.sub(quote, data)
29 
30 #-------------------------------------------------------------------------#
31 class BaseTest(object):
32 
33  _common_tmpdir = None
34 
35  def __init__(self):
36  self.program = ''
37  self.args = []
38  self.reference = ''
39  self.error_reference = ''
40  self.options = ''
41  self.stderr = ''
42  self.timeout = 600
43  self.exit_code = None
44  self.environment = None
46  self.signal = None
47  self.workdir = os.curdir
48  self.use_temp_dir = False
49  #Variables not for users
50  self.status = None
51  self.name = ''
52  self.causes = []
53  self.result = Result(self)
54  self.returnedCode = 0
55  self.out = ''
56  self.err = ''
57  self.proc = None
58  self.stack_trace = None
59  self.basedir = os.getcwd()
60 
61  def validator(self, stdout='',stderr=''):
62  pass
63 
64  def run(self):
65  logging.debug('running test %s', self.name)
66 
67  if self.options:
68  if re.search(r'from\s+Gaudi.Configuration\s+import\s+\*|'
69  'from\s+Configurables\s+import', self.options):
70  optionFile = tempfile.NamedTemporaryFile(suffix='.py')
71  else:
72  optionFile = tempfile.NamedTemporaryFile(suffix='.opts')
73  optionFile.file.write(self.options)
74  optionFile.seek(0)
75  self.args.append(RationalizePath(optionFile.name))
76 
77  #If not specified, setting the environment
78  if self.environment is None : self.environment = os.environ
79  else : self.environment=dict(self.environment.items()+os.environ.items())
80 
81  platform_id = (os.environ.get('BINARY_TAG') or
82  os.environ.get('CMTCONFIG') or
83  platform.platform())
84  # If at least one regex matches we skip the test.
85  skip_test = bool([None
86  for prex in self.unsupported_platforms
87  if re.search(prex, platform_id)])
88 
89  if not skip_test:
90  # handle working/temporary directory options
91  workdir = self.workdir
92  if self.use_temp_dir:
93  if self._common_tmpdir:
94  workdir = self._common_tmpdir
95  else:
96  workdir = tempfile.mkdtemp()
97 
98  # prepare the command to execute
99  prog=''
100  if self.program != '':
101  prog = self.program
102  elif "GAUDIEXE" in os.environ :
103  prog = os.environ["GAUDIEXE"]
104  else :
105  prog = "Gaudi.exe"
106 
107  dummy, prog_ext = os.path.splitext(prog)
108  if prog_ext not in [ ".exe", ".py", ".bat" ]:
109  prog += ".exe"
110  prog_ext = ".exe"
111 
112  prog = which(prog) or prog
113 
114  args = map(RationalizePath, self.args)
115 
116  if prog_ext == ".py" :
117  params = ['python', RationalizePath(prog)] + args
118  else :
119  params = [RationalizePath(prog)] + args
120 
121  validatorRes = Result({'CAUSE': None, 'EXCEPTION': None,
122  'RESOURCE': None, 'TARGET': None,
123  'TRACEBACK': None, 'START_TIME': None,
124  'END_TIME': None, 'TIMEOUT_DETAIL': None})
125  self.result = validatorRes
126 
127  # we need to switch directory because the validator expects to run
128  # in the same dir as the program
129  os.chdir(workdir)
130 
131  #launching test in a different thread to handle timeout exception
132  def target() :
133  logging.debug('executing %r in %s',
134  params, workdir)
135  self.proc = Popen(params, stdout=PIPE, stderr=PIPE,
136  env=self.environment)
137  self.out, self.err = self.proc.communicate()
138 
139  thread = threading.Thread(target=target)
140  thread.start()
141  #catching timeout
142  thread.join(self.timeout)
143 
144  if thread.is_alive():
145  # get the stack trace of the stuck process
146  cmd = ['gdb', '-p', str(self.proc.pid), '-n', '-q']
147  gdb = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
148  self.stack_trace = gdb.communicate('set pagination off\n'
149  'set confirm off\n'
150  'thread apply all backtrace\n'
151  'quit\n')[0]
152 
153  self.proc.send_signal(signal.SIGKILL)
154  logging.debug('time out in test %s', self.name)
155  thread.join()
156  self.causes.append('timeout')
157  else:
158  logging.debug('completed test %s', self.name)
159 
160  #Getting the error code
161  logging.debug('returnedCode = %s', self.proc.returncode)
162  self.returnedCode = self.proc.returncode
163 
164  logging.debug('validating test...')
165  self.result, self.causes = self.ValidateOutput(stdout=self.out,
166  stderr=self.err,
167  result=validatorRes)
168 
169  # remove the temporary directory if we created it
170  if self.use_temp_dir and not self._common_tmpdir:
171  shutil.rmtree(workdir, True)
172 
173  os.chdir(self.basedir)
174 
175  # handle application exit code
176  if self.signal is not None:
177  if int(self.returnedCode) != -int(self.signal):
178  self.causes.append('exit code')
179 
180  elif self.exit_code is not None:
181  if int(self.returnedCode) != int(self.exit_code):
182  self.causes.append('exit code')
183 
184  elif self.returnedCode != 0:
185  self.causes.append("exit code")
186 
187  if self.causes:
188  self.status = "failed"
189  else:
190  self.status = "passed"
191 
192  else:
193  self.status = "skipped"
194 
195  logging.debug('%s: %s', self.name, self.status)
196  field_mapping = {'Exit Code': 'returnedCode',
197  'stderr': 'err',
198  'Arguments': 'args',
199  'Environment': 'environment',
200  'Status': 'status',
201  'stdout': 'out',
202  'Program Name': 'program',
203  'Name': 'name',
204  'Validator': 'validator',
205  'Output Reference File': 'reference',
206  'Error Reference File': 'error_reference',
207  'Causes': 'causes',
208  #'Validator Result': 'result.annotations',
209  'Unsupported Platforms': 'unsupported_platforms',
210  'Stack Trace': 'stack_trace'}
211  resultDict = [(key, getattr(self, attr))
212  for key, attr in field_mapping.iteritems()
213  if getattr(self, attr)]
214  resultDict.append(('Working Directory',
215  RationalizePath(os.path.join(os.getcwd(),
216  self.workdir))))
217  #print dict(resultDict).keys()
218  resultDict.extend(self.result.annotations.iteritems())
219  #print self.result.annotations.keys()
220  return dict(resultDict)
221 
222 
223  #-------------------------------------------------#
224  #----------------Validating tool------------------#
225  #-------------------------------------------------#
226 
227  def ValidateOutput(self, stdout, stderr, result):
228  # checking if default validation or not
229  if self.validator is not BaseTest.validator:
230  self.validator(stdout, stderr, result, self.causes,
231  self.reference, self.error_reference)
232  else:
233  if self.stderr == '':
234  self.validateWithReference(stdout, stderr, result, causes)
235  elif stderr.strip() != self.stderr.strip():
236  self.causes.append('standard error')
237 
238 
239  return result, causes
240 
241 
242 
243  def findReferenceBlock(self,reference=None, stdout=None, result=None, causes=None, signature_offset=0, signature=None, id = None):
244  """
245  Given a block of text, tries to find it in the output. The block had to be identified by a signature line. By default, the first line is used as signature, or the line pointed to by signature_offset. If signature_offset points outside the block, a signature line can be passed as signature argument. Note: if 'signature' is None (the default), a negative signature_offset is interpreted as index in a list (e.g. -1 means the last line), otherwise the it is interpreted as the number of lines before the first one of the block the signature must appear. The parameter 'id' allow to distinguish between different calls to this function in the same validation code.
246  """
247 
248  if reference is None : reference=self.reference
249  if stdout is None : stdout=self.out
250  if result is None : result=self.result
251  if causes is None : causes=self.causes
252 
253  reflines = filter(None,map(lambda s: s.rstrip(), reference.splitlines()))
254  if not reflines:
255  raise RuntimeError("Empty (or null) reference")
256  # the same on standard output
257  outlines = filter(None,map(lambda s: s.rstrip(), stdout.splitlines()))
258 
259  res_field = "GaudiTest.RefBlock"
260  if id:
261  res_field += "_%s" % id
262 
263  if signature is None:
264  if signature_offset < 0:
265  signature_offset = len(reference)+signature_offset
266  signature = reflines[signature_offset]
267  # find the reference block in the output file
268  try:
269  pos = outlines.index(signature)
270  outlines = outlines[pos-signature_offset:pos+len(reflines)-signature_offset]
271  if reflines != outlines:
272  msg = "standard output"
273  # I do not want 2 messages in causes if teh function is called twice
274  if not msg in causes:
275  causes.append(msg)
276  result[res_field + ".observed"] = result.Quote("\n".join(outlines))
277  except ValueError:
278  causes.append("missing signature")
279  result[res_field + ".signature"] = result.Quote(signature)
280  if len(reflines) > 1 or signature != reflines[0]:
281  result[res_field + ".expected"] = result.Quote("\n".join(reflines))
282  return causes
283 
284  def countErrorLines(self, expected = {'ERROR':0, 'FATAL':0}, stdout=None, result=None,causes=None):
285  """
286  Count the number of messages with required severity (by default ERROR and FATAL)
287  and check if their numbers match the expected ones (0 by default).
288  The dictionary "expected" can be used to tune the number of errors and fatals
289  allowed, or to limit the number of expected warnings etc.
290  """
291 
292  if stdout is None : stdout=self.out
293  if result is None : result=self.result
294  if causes is None : causes=self.causes
295 
296  # prepare the dictionary to record the extracted lines
297  errors = {}
298  for sev in expected:
299  errors[sev] = []
300 
301  outlines = stdout.splitlines()
302  from math import log10
303  fmt = "%%%dd - %%s" % (int(log10(len(outlines)+1)))
304 
305  linecount = 0
306  for l in outlines:
307  linecount += 1
308  words = l.split()
309  if len(words) >= 2 and words[1] in errors:
310  errors[words[1]].append(fmt%(linecount,l.rstrip()))
311 
312  for e in errors:
313  if len(errors[e]) != expected[e]:
314  causes.append('%s(%d)'%(e,len(errors[e])))
315  result["GaudiTest.lines.%s"%e] = result.Quote('\n'.join(errors[e]))
316  result["GaudiTest.lines.%s.expected#"%e] = result.Quote(str(expected[e]))
317 
318  return causes
319 
320  def CheckTTreesSummaries(self, stdout=None, result=None, causes=None,
321  trees_dict = None,
322  ignore = r"Basket|.*size|Compression"):
323  """
324  Compare the TTree summaries in stdout with the ones in trees_dict or in
325  the reference file. By default ignore the size, compression and basket
326  fields.
327  The presence of TTree summaries when none is expected is not a failure.
328  """
329  if stdout is None : stdout=self.out
330  if result is None : result=self.result
331  if causes is None : causes=self.causes
332  if trees_dict is None:
333  lreference = self._expandReferenceFileName(self.reference)
334  # call the validator if the file exists
335  if lreference and os.path.isfile(lreference):
336  trees_dict = findTTreeSummaries(open(lreference).read())
337  else:
338  trees_dict = {}
339 
340  from pprint import PrettyPrinter
341  pp = PrettyPrinter()
342  if trees_dict:
343  result["GaudiTest.TTrees.expected"] = result.Quote(pp.pformat(trees_dict))
344  if ignore:
345  result["GaudiTest.TTrees.ignore"] = result.Quote(ignore)
346 
347  trees = findTTreeSummaries(stdout)
348  failed = cmpTreesDicts(trees_dict, trees, ignore)
349  if failed:
350  causes.append("trees summaries")
351  msg = "%s: %s != %s" % getCmpFailingValues(trees_dict, trees, failed)
352  result["GaudiTest.TTrees.failure_on"] = result.Quote(msg)
353  result["GaudiTest.TTrees.found"] = result.Quote(pp.pformat(trees))
354 
355  return causes
356 
357  def CheckHistosSummaries(self, stdout=None, result=None, causes=None,
358  dict = None,
359  ignore = None):
360  """
361  Compare the TTree summaries in stdout with the ones in trees_dict or in
362  the reference file. By default ignore the size, compression and basket
363  fields.
364  The presence of TTree summaries when none is expected is not a failure.
365  """
366  if stdout is None : stdout=self.out
367  if result is None : result=self.result
368  if causes is None : causes=self.causes
369 
370  if dict is None:
371  lreference = self._expandReferenceFileName(self.reference)
372  # call the validator if the file exists
373  if lreference and os.path.isfile(lreference):
374  dict = findHistosSummaries(open(lreference).read())
375  else:
376  dict = {}
377 
378  from pprint import PrettyPrinter
379  pp = PrettyPrinter()
380  if dict:
381  result["GaudiTest.Histos.expected"] = result.Quote(pp.pformat(dict))
382  if ignore:
383  result["GaudiTest.Histos.ignore"] = result.Quote(ignore)
384 
385  histos = findHistosSummaries(stdout)
386  failed = cmpTreesDicts(dict, histos, ignore)
387  if failed:
388  causes.append("histos summaries")
389  msg = "%s: %s != %s" % getCmpFailingValues(dict, histos, failed)
390  result["GaudiTest.Histos.failure_on"] = result.Quote(msg)
391  result["GaudiTest.Histos.found"] = result.Quote(pp.pformat(histos))
392 
393  return causes
394 
395  def validateWithReference(self, stdout=None, stderr=None, result=None,
396  causes=None, preproc=None):
397  '''
398  Default validation acti*on: compare standard output and error to the
399  reference files.
400  '''
401 
402  if stdout is None : stdout = self.out
403  if stderr is None : stderr = self.err
404  if result is None : result = self.result
405  if causes is None : causes = self.causes
406 
407  # set the default output preprocessor
408  if preproc is None:
409  preproc = normalizeExamples
410  # check standard output
411  lreference = self._expandReferenceFileName(self.reference)
412  # call the validator if the file exists
413  if lreference and os.path.isfile(lreference):
414  causes += ReferenceFileValidator(lreference,
415  "standard output",
416  "Output Diff",
417  preproc=preproc)(stdout, result)
418  # Compare TTree summaries
419  causes = self.CheckTTreesSummaries(stdout, result, causes)
420  causes = self.CheckHistosSummaries(stdout, result, causes)
421  if causes: # Write a new reference file for stdout
422  try:
423  newref = open(lreference + ".new","w")
424  # sanitize newlines
425  for l in stdout.splitlines():
426  newref.write(l.rstrip() + '\n')
427  del newref # flush and close
428  except IOError:
429  # Ignore IO errors when trying to update reference files
430  # because we may be in a read-only filesystem
431  pass
432 
433  # check standard error
434  lreference = self._expandReferenceFileName(self.error_reference)
435  # call the validator if we have a file to use
436  if lreference and os.path.isfile(lreference):
437  newcauses = ReferenceFileValidator(lreference,
438  "standard error",
439  "Error Diff",
440  preproc=preproc)(stderr, result)
441  causes += newcauses
442  if newcauses: # Write a new reference file for stdedd
443  newref = open(lreference + ".new","w")
444  # sanitize newlines
445  for l in stderr.splitlines():
446  newref.write(l.rstrip() + '\n')
447  del newref # flush and close
448  else:
449  causes += BasicOutputValidator(lreference, "standard error", "ExecTest.expected_stderr")(stderr, result)
450  return causes
451 
452  def _expandReferenceFileName(self, reffile):
453  # if no file is passed, do nothing
454  if not reffile:
455  return ""
456 
457  # function to split an extension in constituents parts
458  platformSplit = lambda p: set(p.split('-' in p and '-' or '_'))
459 
460  reference = os.path.normpath(os.path.join(self.basedir,
461  os.path.expandvars(reffile)))
462 
463  # old-style platform-specific reference name
464  spec_ref = reference[:-3] + GetPlatform(self)[0:3] + reference[-3:]
465  if os.path.isfile(spec_ref):
466  reference = spec_ref
467  else: # look for new-style platform specific reference files:
468  # get all the files whose name start with the reference filename
469  dirname, basename = os.path.split(reference)
470  if not dirname: dirname = '.'
471  head = basename + "."
472  head_len = len(head)
473  platform = platformSplit(GetPlatform(self))
474  if 'do0' in platform:
475  platform.add('dbg')
476  candidates = []
477  for f in os.listdir(dirname):
478  if f.startswith(head):
479  req_plat = platformSplit(f[head_len:])
480  if platform.issuperset(req_plat):
481  candidates.append( (len(req_plat), f) )
482  if candidates: # take the one with highest matching
483  # FIXME: it is not possible to say if x86_64-slc5-gcc43-dbg
484  # has to use ref.x86_64-gcc43 or ref.slc5-dbg
485  candidates.sort()
486  reference = os.path.join(dirname, candidates[-1][1])
487  return reference
488 
489 #---------------------------------------------------------------------------------------------------#
490 #---------------------------------------------------------------------------------------------------#
491 #-----------------------------------------GAUDI TOOLS-----------------------------------------------------#
492 #---------------------------------------------------------------------------------------------------#
493 #---------------------------------------------------------------------------------------------------#
494 
495 import shutil
496 import string
497 import difflib
498 import calendar
499 
500 try:
501  from GaudiKernel import ROOT6WorkAroundEnabled
502 except ImportError:
504  # dummy implementation
505  return False
506 
507 #--------------------------------- TOOLS ---------------------------------#
508 
510  """
511  Function used to normalize the used path
512  """
513  newPath = os.path.normpath(os.path.expandvars(p))
514  if os.path.exists(newPath) :
515  p = os.path.realpath(newPath)
516  return p
517 
518 
519 def which(executable):
520  """
521  Locates an executable in the executables path ($PATH) and returns the full
522  path to it. An application is looked for with or without the '.exe' suffix.
523  If the executable cannot be found, None is returned
524  """
525  if os.path.isabs(executable):
526  if not os.path.exists(executable):
527  if executable.endswith('.exe'):
528  if os.path.exists(executable[:-4]):
529  return executable[:-4]
530  else :
531  head,executable = os.path.split(executable)
532  else :
533  return executable
534  for d in os.environ.get("PATH").split(os.pathsep):
535  fullpath = os.path.join(d, executable)
536  if os.path.exists(fullpath):
537  return fullpath
538  if executable.endswith('.exe'):
539  return which(executable[:-4])
540  return None
541 
542 
543 
544 #-------------------------------------------------------------------------#
545 #----------------------------- Result Classe -----------------------------#
546 #-------------------------------------------------------------------------#
547 import types
548 
549 class Result:
550 
551  PASS='PASS'
552  FAIL='FAIL'
553  ERROR='ERROR'
554  UNTESTED='UNTESTED'
555 
556  EXCEPTION = ""
557  RESOURCE = ""
558  TARGET = ""
559  TRACEBACK = ""
560  START_TIME = ""
561  END_TIME = ""
562  TIMEOUT_DETAIL = ""
563 
564  def __init__(self,kind=None,id=None,outcome=PASS,annotations={}):
565  self.annotations = annotations.copy()
566 
567  def __getitem__(self,key):
568  assert type(key) in types.StringTypes
569  return self.annotations[key]
570 
571  def __setitem__(self,key,value):
572  assert type(key) in types.StringTypes
573  assert type(value) in types.StringTypes
574  self.annotations[key]=value
575 
576  def Quote(self,string):
577  return string
578 
579 
580 #-------------------------------------------------------------------------#
581 #--------------------------- Validator Classes ---------------------------#
582 #-------------------------------------------------------------------------#
583 
584 #Basic implementation of an option validator for Gaudi test. This implementation is based on the standard (LCG) validation functions used in QMTest.
585 
586 
588 
589  def __init__(self,ref,cause,result_key):
590  self.ref=ref
591  self.cause=cause
592  self.result_key=result_key
593 
594  def __call__(self,out,result):
595  """Validate the output of the program.
596  'stdout' -- A string containing the data written to the standard output
597  stream.
598  'stderr' -- A string containing the data written to the standard error
599  stream.
600  'result' -- A 'Result' object. It may be used to annotate
601  the outcome according to the content of stderr.
602  returns -- A list of strings giving causes of failure."""
603 
604  causes=[]
605  #Check the output
606  if not self.__CompareText(out,self.ref):
607  causes.append(self.cause)
608  result[self.result_key] =result.Quote(self.ref)
609 
610 
611 
612  return causes
613 
614  def __CompareText(self, s1, s2):
615  """Compare 's1' and 's2', ignoring line endings.
616  's1' -- A string.
617  's2' -- A string.
618  returns -- True if 's1' and 's2' are the same, ignoring
619  differences in line endings."""
620  if ROOT6WorkAroundEnabled('ReadRootmapCheck'):
621  # FIXME: (MCl) Hide warnings from new rootmap sanity check until we can fix them
622  to_ignore = re.compile(r'Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*')
623  keep_line = lambda l: not to_ignore.match(l)
624  return filter(keep_line, s1.splitlines()) == filter(keep_line, s2.splitlines())
625  else:
626  return s1.splitlines() == s2.splitlines()
627 
628 
629 
630 #------------------------ Preprocessor elements ------------------------#
632  """ Base class for a callable that takes a file and returns a modified
633  version of it."""
634  def __processLine__(self, line):
635  return line
636  def __call__(self, input):
637  if hasattr(input,"__iter__"):
638  lines = input
639  mergeback = False
640  else:
641  lines = input.splitlines()
642  mergeback = True
643  output = []
644  for l in lines:
645  l = self.__processLine__(l)
646  if l: output.append(l)
647  if mergeback: output = '\n'.join(output)
648  return output
649  def __add__(self, rhs):
650  return FilePreprocessorSequence([self,rhs])
651 
653  def __init__(self, members = []):
654  self.members = members
655  def __add__(self, rhs):
656  return FilePreprocessorSequence(self.members + [rhs])
657  def __call__(self, input):
658  output = input
659  for pp in self.members:
660  output = pp(output)
661  return output
662 
664  def __init__(self, strings = [], regexps = []):
665  import re
666  self.strings = strings
667  self.regexps = map(re.compile,regexps)
668 
669  def __processLine__(self, line):
670  for s in self.strings:
671  if line.find(s) >= 0: return None
672  for r in self.regexps:
673  if r.search(line): return None
674  return line
675 
677  def __init__(self, start, end):
678  self.start = start
679  self.end = end
680  self._skipping = False
681 
682  def __processLine__(self, line):
683  if self.start in line:
684  self._skipping = True
685  return None
686  elif self.end in line:
687  self._skipping = False
688  elif self._skipping:
689  return None
690  return line
691 
693  def __init__(self, orig, repl = "", when = None):
694  if when:
695  when = re.compile(when)
696  self._operations = [ (when, re.compile(orig), repl) ]
697  def __add__(self,rhs):
698  if isinstance(rhs, RegexpReplacer):
699  res = RegexpReplacer("","",None)
700  res._operations = self._operations + rhs._operations
701  else:
702  res = FilePreprocessor.__add__(self, rhs)
703  return res
704  def __processLine__(self, line):
705  for w,o,r in self._operations:
706  if w is None or w.search(line):
707  line = o.sub(r, line)
708  return line
709 
710 # Common preprocessors
711 maskPointers = RegexpReplacer("0x[0-9a-fA-F]{4,16}","0x########")
712 normalizeDate = RegexpReplacer("[0-2]?[0-9]:[0-5][0-9]:[0-5][0-9] [0-9]{4}[-/][01][0-9][-/][0-3][0-9][ A-Z]*",
713  "00:00:00 1970-01-01")
714 normalizeEOL = FilePreprocessor()
715 normalizeEOL.__processLine__ = lambda line: str(line).rstrip() + '\n'
716 
717 skipEmptyLines = FilePreprocessor()
718 # FIXME: that's ugly
719 skipEmptyLines.__processLine__ = lambda line: (line.strip() and line) or None
720 
721 ## Special preprocessor sorting the list of strings (whitespace separated)
722 # that follow a signature on a single line
724  def __init__(self, signature):
725  self.signature = signature
726  self.siglen = len(signature)
727  def __processLine__(self, line):
728  pos = line.find(self.signature)
729  if pos >=0:
730  line = line[:(pos+self.siglen)]
731  lst = line[(pos+self.siglen):].split()
732  lst.sort()
733  line += " ".join(lst)
734  return line
735 
736 # Preprocessors for GaudiExamples
737 normalizeExamples = maskPointers + normalizeDate
738 for w,o,r in [
739  #("TIMER.TIMER",r"[0-9]", "0"), # Normalize time output
740  ("TIMER.TIMER",r"\s+[+-]?[0-9]+[0-9.]*", " 0"), # Normalize time output
741  ("release all pending",r"^.*/([^/]*:.*)",r"\1"),
742  ("0x########",r"\[.*/([^/]*.*)\]",r"[\1]"),
743  ("^#.*file",r"file '.*[/\\]([^/\\]*)$",r"file '\1"),
744  ("^JobOptionsSvc.*options successfully read in from",r"read in from .*[/\\]([^/\\]*)$",r"file \1"), # normalize path to options
745  # Normalize UUID, except those ending with all 0s (i.e. the class IDs)
746  (None,r"[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}(?!-0{12})-[0-9A-Fa-f]{12}","00000000-0000-0000-0000-000000000000"),
747  # Absorb a change in ServiceLocatorHelper
748  ("ServiceLocatorHelper::", "ServiceLocatorHelper::(create|locate)Service", "ServiceLocatorHelper::service"),
749  # Remove the leading 0 in Windows' exponential format
750  (None, r"e([-+])0([0-9][0-9])", r"e\1\2"),
751  # Output line changed in Gaudi v24
752  (None, r'Service reference count check:', r'Looping over all active services...'),
753  # Change of property name in Algorithm (GAUDI-1030)
754  (None, r"Property(.*)'ErrorCount':", r"Property\1'ErrorCounter':"),
755  ]: #[ ("TIMER.TIMER","[0-9]+[0-9.]*", "") ]
756  normalizeExamples += RegexpReplacer(o,r,w)
757 
758 lineSkipper = LineSkipper(["//GP:",
759  "JobOptionsSvc INFO # ",
760  "JobOptionsSvc WARNING # ",
761  "Time User",
762  "Welcome to",
763  "This machine has a speed",
764  "TIME:",
765  "running on",
766  "ToolSvc.Sequenc... INFO",
767  "DataListenerSvc INFO XML written to file:",
768  "[INFO]","[WARNING]",
769  "DEBUG No writable file catalog found which contains FID:",
770  "0 local", # hack for ErrorLogExample
771  "DEBUG Service base class initialized successfully", # changed between v20 and v21
772  "DEBUG Incident timing:", # introduced with patch #3487
773  "INFO 'CnvServices':[", # changed the level of the message from INFO to DEBUG
774  # The signal handler complains about SIGXCPU not defined on some platforms
775  'SIGXCPU',
776  ],regexps = [
777  r"^JobOptionsSvc INFO *$",
778  r"^#", # Ignore python comments
779  r"(Always|SUCCESS)\s*(Root f|[^ ]* F)ile version:", # skip the message reporting the version of the root file
780  r"0x[0-9a-fA-F#]+ *Algorithm::sysInitialize\(\) *\[", # hack for ErrorLogExample
781  r"0x[0-9a-fA-F#]* *__gxx_personality_v0 *\[", # hack for ErrorLogExample
782  r"File '.*.xml' does not exist",
783  r"INFO Refer to dataset .* by its file ID:",
784  r"INFO Referring to dataset .* by its file ID:",
785  r"INFO Disconnect from dataset",
786  r"INFO Disconnected from dataset",
787  r"INFO Disconnected data IO:",
788  r"IncidentSvc\s*(DEBUG (Adding|Removing)|VERBOSE Calling)",
789  # I want to ignore the header of the unchecked StatusCode report
790  r"^StatusCodeSvc.*listing all unchecked return codes:",
791  r"^StatusCodeSvc\s*INFO\s*$",
792  r"Num\s*\|\s*Function\s*\|\s*Source Library",
793  r"^[-+]*\s*$",
794  # Hide the fake error message coming from POOL/ROOT (ROOT 5.21)
795  r"ERROR Failed to modify file: .* Errno=2 No such file or directory",
796  # Hide unchecked StatusCodes from dictionaries
797  r"^ +[0-9]+ \|.*ROOT",
798  r"^ +[0-9]+ \|.*\|.*Dict",
799  # Hide success StatusCodeSvc message
800  r"StatusCodeSvc.*all StatusCode instances where checked",\
801  # Remove ROOT TTree summary table, which changes from one version to the other
802  r"^\*.*\*$",
803  # Remove Histos Summaries
804  r"SUCCESS\s*Booked \d+ Histogram\(s\)",
805  r"^ \|",
806  r"^ ID=",
807  ] )
808 
809 if ROOT6WorkAroundEnabled('ReadRootmapCheck'):
810  # FIXME: (MCl) Hide warnings from new rootmap sanity check until we can fix them
811  lineSkipper += LineSkipper(regexps = [
812  r'Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*',
813  ])
814 
815 normalizeExamples = (lineSkipper + normalizeExamples + skipEmptyLines +
816  normalizeEOL + LineSorter("Services to release : "))
817 
818 
819 #--------------------- Validation functions/classes ---------------------#
820 
822  def __init__(self,reffile, cause, result_key, preproc=normalizeExamples):
823  self.reffile = os.path.expandvars(reffile)
824  self.cause=cause
825  self.result_key = result_key
826  self.preproc = preproc
827 
828  def __call__(self,stdout, result) :
829  causes=[]
830  if os.path.isfile(self.reffile):
831  orig=open(self.reffile).xreadlines()
832  if self.preproc:
833  orig = self.preproc(orig)
834  else:
835  orig = []
836  new = stdout.splitlines()
837  if self.preproc:
838  new = self.preproc(new)
839 
840  diffs = difflib.ndiff(orig,new,charjunk=difflib.IS_CHARACTER_JUNK)
841  filterdiffs = map(lambda x: x.strip(),filter(lambda x: x[0] != " ",diffs))
842  if filterdiffs:
843  result[self.result_key] = result.Quote("\n".join(filterdiffs))
844  result[self.result_key] += result.Quote("""
845  Legend:
846  -) reference file
847  +) standard output of the test""")
848  causes.append(self.cause)
849  return causes
850 
851 def findTTreeSummaries(stdout):
852  """
853  Scan stdout to find ROOT TTree summaries and digest them.
854  """
855  stars = re.compile(r"^\*+$")
856  outlines = stdout.splitlines()
857  nlines = len(outlines)
858  trees = {}
859 
860  i = 0
861  while i < nlines: #loop over the output
862  # look for
863  while i < nlines and not stars.match(outlines[i]):
864  i += 1
865  if i < nlines:
866  tree, i = _parseTTreeSummary(outlines, i)
867  if tree:
868  trees[tree["Name"]] = tree
869 
870  return trees
871 
872 def cmpTreesDicts(reference, to_check, ignore = None):
873  """
874  Check that all the keys in reference are in to_check too, with the same value.
875  If the value is a dict, the function is called recursively. to_check can
876  contain more keys than reference, that will not be tested.
877  The function returns at the first difference found.
878  """
879  fail_keys = []
880  # filter the keys in the reference dictionary
881  if ignore:
882  ignore_re = re.compile(ignore)
883  keys = [ key for key in reference if not ignore_re.match(key) ]
884  else:
885  keys = reference.keys()
886  # loop over the keys (not ignored) in the reference dictionary
887  for k in keys:
888  if k in to_check: # the key must be in the dictionary to_check
889  if (type(reference[k]) is dict) and (type(to_check[k]) is dict):
890  # if both reference and to_check values are dictionaries, recurse
891  failed = fail_keys = cmpTreesDicts(reference[k], to_check[k], ignore)
892  else:
893  # compare the two values
894  failed = to_check[k] != reference[k]
895  else: # handle missing keys in the dictionary to check (i.e. failure)
896  to_check[k] = None
897  failed = True
898  if failed:
899  fail_keys.insert(0, k)
900  break # exit from the loop at the first failure
901  return fail_keys # return the list of keys bringing to the different values
902 
903 def getCmpFailingValues(reference, to_check, fail_path):
904  c = to_check
905  r = reference
906  for k in fail_path:
907  c = c.get(k,None)
908  r = r.get(k,None)
909  if c is None or r is None:
910  break # one of the dictionaries is not deep enough
911  return (fail_path, r, c)
912 
913 # signature of the print-out of the histograms
914 h_count_re = re.compile(r"^(.*)SUCCESS\s+Booked (\d+) Histogram\(s\) :\s+(.*)")
915 
916 
917 def _parseTTreeSummary(lines, pos):
918  """
919  Parse the TTree summary table in lines, starting from pos.
920  Returns a tuple with the dictionary with the digested informations and the
921  position of the first line after the summary.
922  """
923  result = {}
924  i = pos + 1 # first line is a sequence of '*'
925  count = len(lines)
926 
927  splitcols = lambda l: [ f.strip() for f in l.strip("*\n").split(':',2) ]
928  def parseblock(ll):
929  r = {}
930  cols = splitcols(ll[0])
931  r["Name"], r["Title"] = cols[1:]
932 
933  cols = splitcols(ll[1])
934  r["Entries"] = int(cols[1])
935 
936  sizes = cols[2].split()
937  r["Total size"] = int(sizes[2])
938  if sizes[-1] == "memory":
939  r["File size"] = 0
940  else:
941  r["File size"] = int(sizes[-1])
942 
943  cols = splitcols(ll[2])
944  sizes = cols[2].split()
945  if cols[0] == "Baskets":
946  r["Baskets"] = int(cols[1])
947  r["Basket size"] = int(sizes[2])
948  r["Compression"] = float(sizes[-1])
949  return r
950 
951  if i < (count - 3) and lines[i].startswith("*Tree"):
952  result = parseblock(lines[i:i+3])
953  result["Branches"] = {}
954  i += 4
955  while i < (count - 3) and lines[i].startswith("*Br"):
956  if i < (count - 2) and lines[i].startswith("*Branch "):
957  # skip branch header
958  i += 3
959  continue
960  branch = parseblock(lines[i:i+3])
961  result["Branches"][branch["Name"]] = branch
962  i += 4
963 
964  return (result, i)
965 
966 def parseHistosSummary(lines, pos):
967  """
968  Extract the histograms infos from the lines starting at pos.
969  Returns the position of the first line after the summary block.
970  """
971  global h_count_re
972  h_table_head = re.compile(r'SUCCESS\s+List of booked (1D|2D|3D|1D profile|2D profile) histograms in directory\s+"(\w*)"')
973  h_short_summ = re.compile(r"ID=([^\"]+)\s+\"([^\"]+)\"\s+(.*)")
974 
975  nlines = len(lines)
976 
977  # decode header
978  m = h_count_re.search(lines[pos])
979  name = m.group(1).strip()
980  total = int(m.group(2))
981  header = {}
982  for k, v in [ x.split("=") for x in m.group(3).split() ]:
983  header[k] = int(v)
984  pos += 1
985  header["Total"] = total
986 
987  summ = {}
988  while pos < nlines:
989  m = h_table_head.search(lines[pos])
990  if m:
991  t, d = m.groups(1) # type and directory
992  t = t.replace(" profile", "Prof")
993  pos += 1
994  if pos < nlines:
995  l = lines[pos]
996  else:
997  l = ""
998  cont = {}
999  if l.startswith(" | ID"):
1000  # table format
1001  titles = [ x.strip() for x in l.split("|")][1:]
1002  pos += 1
1003  while pos < nlines and lines[pos].startswith(" |"):
1004  l = lines[pos]
1005  values = [ x.strip() for x in l.split("|")][1:]
1006  hcont = {}
1007  for i in range(len(titles)):
1008  hcont[titles[i]] = values[i]
1009  cont[hcont["ID"]] = hcont
1010  pos += 1
1011  elif l.startswith(" ID="):
1012  while pos < nlines and lines[pos].startswith(" ID="):
1013  values = [ x.strip() for x in h_short_summ.search(lines[pos]).groups() ]
1014  cont[values[0]] = values
1015  pos += 1
1016  else: # not interpreted
1017  raise RuntimeError("Cannot understand line %d: '%s'" % (pos, l))
1018  if not d in summ:
1019  summ[d] = {}
1020  summ[d][t] = cont
1021  summ[d]["header"] = header
1022  else:
1023  break
1024  if not summ:
1025  # If the full table is not present, we use only the header
1026  summ[name] = {"header": header}
1027  return summ, pos
1028 
1029 
1030 
1032  """
1033  Scan stdout to find ROOT TTree summaries and digest them.
1034  """
1035  outlines = stdout.splitlines()
1036  nlines = len(outlines) - 1
1037  summaries = {}
1038  global h_count_re
1039 
1040  pos = 0
1041  while pos < nlines:
1042  summ = {}
1043  # find first line of block:
1044  match = h_count_re.search(outlines[pos])
1045  while pos < nlines and not match:
1046  pos += 1
1047  match = h_count_re.search(outlines[pos])
1048  if match:
1049  summ, pos = parseHistosSummary(outlines, pos)
1050  summaries.update(summ)
1051  return summaries
1052 
1053 def PlatformIsNotSupported(self, context, result):
1054  platform = GetPlatform(self)
1055  unsupported = [ re.compile(x) for x in [ str(y).strip() for y in unsupported_platforms ] if x]
1056  for p_re in unsupported :
1057  if p_re.search(platform):
1058  result.SetOutcome(result.UNTESTED)
1059  result[result.CAUSE] = 'Platform not supported.'
1060  return True
1061  return False
1062 
1063 def GetPlatform(self):
1064  """
1065  Return the platform Id defined in CMTCONFIG or SCRAM_ARCH.
1066  """
1067  arch = "None"
1068  # check architecture name
1069  if "CMTCONFIG" in os.environ:
1070  arch = os.environ["CMTCONFIG"]
1071  elif "SCRAM_ARCH" in os.environ:
1072  arch = os.environ["SCRAM_ARCH"]
1073  return arch
1074 
1075 def isWinPlatform(self):
1076  """
1077  Return True if the current platform is Windows.
1078 
1079  This function was needed because of the change in the CMTCONFIG format,
1080  from win32_vc71_dbg to i686-winxp-vc9-dbg.
1081  """
1082  platform = GetPlatform(self)
1083  return "winxp" in platform or platform.startswith("win")
1084 
def GetPlatform(self)
Definition: BaseTest.py:1063
def PlatformIsNotSupported(self, context, result)
Definition: BaseTest.py:1053
def __init__(self, start, end)
Definition: BaseTest.py:677
def ValidateOutput(self, stdout, stderr, result)
Definition: BaseTest.py:227
def __processLine__(self, line)
Definition: BaseTest.py:727
def findHistosSummaries(stdout)
Definition: BaseTest.py:1031
def _parseTTreeSummary(lines, pos)
Definition: BaseTest.py:917
string type
Definition: gaudirun.py:127
struct GAUDI_API map
Parametrisation class for map-like implementation.
def __call__(self, stdout, result)
Definition: BaseTest.py:828
def __processLine__(self, line)
Definition: BaseTest.py:669
def __init__(self, signature)
Definition: BaseTest.py:724
def sanitize_for_xml(data)
Definition: BaseTest.py:16
def isWinPlatform(self)
Definition: BaseTest.py:1075
def getCmpFailingValues(reference, to_check, fail_path)
Definition: BaseTest.py:903
def __setitem__(self, key, value)
Definition: BaseTest.py:571
def which(executable)
Definition: BaseTest.py:519
def parseHistosSummary(lines, pos)
Definition: BaseTest.py:966
def _expandReferenceFileName(self, reffile)
Definition: BaseTest.py:452
def __getitem__(self, key)
Definition: BaseTest.py:567
def findTTreeSummaries(stdout)
Definition: BaseTest.py:851
def __init__(self, ref, cause, result_key)
Definition: BaseTest.py:589
NamedRange_< CONTAINER > range(const CONTAINER &cnt, const std::string &name)
simple function to create the named range form arbitrary container
Definition: NamedRange.h:133
def Quote(self, string)
Definition: BaseTest.py:576
Special preprocessor sorting the list of strings (whitespace separated) that follow a signature on a ...
Definition: BaseTest.py:723