All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Groups Pages
GaudiTest.py
Go to the documentation of this file.
1 ########################################################################
2 # File: GaudiTest.py
3 # Author: Marco Clemencic CERN/PH-LBC
4 ########################################################################
5 __author__ = 'Marco Clemencic CERN/PH-LBC'
6 ########################################################################
7 # Imports
8 ########################################################################
9 import os
10 import sys
11 import re
12 import tempfile
13 import shutil
14 import string
15 import difflib
16 import time
17 import calendar
18 import codecs
19 
20 from subprocess import Popen, PIPE, STDOUT
21 
22 try:
23  from GaudiKernel import ROOT6WorkAroundEnabled
24 except ImportError:
25  def ROOT6WorkAroundEnabled(id=None):
26  # dummy implementation
27  return False
28 
29 # ensure the preferred locale
30 os.environ['LC_ALL'] = 'C'
31 
32 # Needed for the XML wrapper
33 try:
34  import xml.etree.cElementTree as ET
35 except ImportError:
36  import xml.etree.ElementTree as ET
37 
38 # redefinition of timedelta.total_seconds() because it is not present in the 2.6 version
39 def total_seconds_replacement(timedelta) :
40  return timedelta.days*86400 + timedelta.seconds + timedelta.microseconds/1000000
41 
42 
43 import qm
44 from qm.test.classes.command import ExecTestBase
45 from qm.test.result_stream import ResultStream
46 
47 ### Needed by the re-implementation of TimeoutExecutable
48 import qm.executable
49 import signal
50 # The classes in this module are implemented differently depending on
51 # the operating system in use.
52 if sys.platform == "win32":
53  import msvcrt
54  import pywintypes
55  from threading import *
56  import win32api
57  import win32con
58  import win32event
59  import win32file
60  import win32pipe
61  import win32process
62 else:
63  import cPickle
64  import fcntl
65  import select
66  import qm.sigmask
67 
68 ########################################################################
69 # Utility Classes
70 ########################################################################
72  """
73  Class to changes the environment temporarily.
74  """
75  def __init__(self, orig = os.environ, keep_same = False):
76  """
77  Create a temporary environment on top of the one specified
78  (it can be another TemporaryEnvironment instance).
79  """
80  #print "New environment"
81  self.old_values = {}
82  self.env = orig
83  self._keep_same = keep_same
84 
85  def __setitem__(self,key,value):
86  """
87  Set an environment variable recording the previous value.
88  """
89  if key not in self.old_values :
90  if key in self.env :
91  if not self._keep_same or self.env[key] != value:
92  self.old_values[key] = self.env[key]
93  else:
94  self.old_values[key] = None
95  self.env[key] = value
96 
97  def __getitem__(self,key):
98  """
99  Get an environment variable.
100  Needed to provide the same interface as os.environ.
101  """
102  return self.env[key]
103 
104  def __delitem__(self,key):
105  """
106  Unset an environment variable.
107  Needed to provide the same interface as os.environ.
108  """
109  if key not in self.env :
110  raise KeyError(key)
111  self.old_values[key] = self.env[key]
112  del self.env[key]
113 
114  def keys(self):
115  """
116  Return the list of defined environment variables.
117  Needed to provide the same interface as os.environ.
118  """
119  return self.env.keys()
120 
121  def items(self):
122  """
123  Return the list of (name,value) pairs for the defined environment variables.
124  Needed to provide the same interface as os.environ.
125  """
126  return self.env.items()
127 
128  def __contains__(self,key):
129  """
130  Operator 'in'.
131  Needed to provide the same interface as os.environ.
132  """
133  return key in self.env
134 
135  def restore(self):
136  """
137  Revert all the changes done to the original environment.
138  """
139  for key,value in self.old_values.items():
140  if value is None:
141  del self.env[key]
142  else:
143  self.env[key] = value
144  self.old_values = {}
145 
146  def __del__(self):
147  """
148  Revert the changes on destruction.
149  """
150  #print "Restoring the environment"
151  self.restore()
152 
153  def gen_script(self,shell_type):
154  """
155  Generate a shell script to reproduce the changes in the environment.
156  """
157  shells = [ 'csh', 'sh', 'bat' ]
158  if shell_type not in shells:
159  raise RuntimeError("Shell type '%s' unknown. Available: %s"%(shell_type,shells))
160  out = ""
161  for key,value in self.old_values.items():
162  if key not in self.env:
163  # unset variable
164  if shell_type == 'csh':
165  out += 'unsetenv %s\n'%key
166  elif shell_type == 'sh':
167  out += 'unset %s\n'%key
168  elif shell_type == 'bat':
169  out += 'set %s=\n'%key
170  else:
171  # set variable
172  if shell_type == 'csh':
173  out += 'setenv %s "%s"\n'%(key,self.env[key])
174  elif shell_type == 'sh':
175  out += 'export %s="%s"\n'%(key,self.env[key])
176  elif shell_type == 'bat':
177  out += 'set %s=%s\n'%(key,self.env[key])
178  return out
179 
180 class TempDir:
181  """Small class for temporary directories.
182  When instantiated, it creates a temporary directory and the instance
183  behaves as the string containing the directory name.
184  When the instance goes out of scope, it removes all the content of
185  the temporary directory (automatic clean-up).
186  """
187  def __init__(self, keep = False, chdir = False):
188  self.name = tempfile.mkdtemp()
189  self._keep = keep
190  self._origdir = None
191  if chdir:
192  self._origdir = os.getcwd()
193  os.chdir(self.name)
194 
195  def __str__(self):
196  return self.name
197 
198  def __del__(self):
199  if self._origdir:
200  os.chdir(self._origdir)
201  if self.name and not self._keep:
202  shutil.rmtree(self.name)
203 
204  def __getattr__(self,attr):
205  return getattr(self.name,attr)
206 
207 class TempFile:
208  """Small class for temporary files.
209  When instantiated, it creates a temporary directory and the instance
210  behaves as the string containing the directory name.
211  When the instance goes out of scope, it removes all the content of
212  the temporary directory (automatic clean-up).
213  """
214  def __init__(self, suffix='', prefix='tmp', dir=None, text=False, keep = False):
215  self.file = None
216  self.name = None
217  self._keep = keep
218 
219  self._fd, self.name = tempfile.mkstemp(suffix,prefix,dir,text)
220  self.file = os.fdopen(self._fd,"r+")
221 
222  def __str__(self):
223  return self.name
224 
225  def __del__(self):
226  if self.file:
227  self.file.close()
228  if self.name and not self._keep:
229  os.remove(self.name)
230 
231  def __getattr__(self,attr):
232  return getattr(self.file,attr)
233 
234 class CMT:
235  """Small wrapper to call CMT.
236  """
237  def __init__(self,path=None):
238  if path is None:
239  path = os.getcwd()
240  self.path = path
241 
242  def _run_cmt(self,command,args):
243  # prepare command line
244  if type(args) is str:
245  args = [args]
246  cmd = "cmt %s"%command
247  for arg in args:
248  cmd += ' "%s"'%arg
249 
250  # go to the execution directory
251  olddir = os.getcwd()
252  os.chdir(self.path)
253  # run cmt
254  result = os.popen4(cmd)[1].read()
255  # return to the old directory
256  os.chdir(olddir)
257  return result
258 
259  def __getattr__(self,attr):
260  return lambda args=[]: self._run_cmt(attr, args)
261 
262  def runtime_env(self,env = None):
263  """Returns a dictionary containing the runtime environment produced by CMT.
264  If a dictionary is passed a modified instance of it is returned.
265  """
266  if env is None:
267  env = {}
268  for l in self.setup("-csh").splitlines():
269  l = l.strip()
270  if l.startswith("setenv"):
271  dummy,name,value = l.split(None,3)
272  env[name] = value.strip('"')
273  elif l.startswith("unsetenv"):
274  dummy,name = l.split(None,2)
275  if name in env:
276  del env[name]
277  return env
278  def show_macro(self,k):
279  r = self.show(["macro",k])
280  if r.find("CMT> Error: symbol not found") >= 0:
281  return None
282  else:
283  return self.show(["macro_value",k]).strip()
284 
285 
286 ## Locates an executable in the executables path ($PATH) and returns the full
287 # path to it.
288 # If the executable cannot be found, None is returned
289 def which(executable):
290  """
291  Locates an executable in the executables path ($PATH) and returns the full
292  path to it. An application is looked for with or without the '.exe' suffix.
293  If the executable cannot be found, None is returned
294  """
295  if os.path.isabs(executable):
296  if not os.path.exists(executable):
297  if executable.endswith('.exe'):
298  if os.path.exists(executable[:-4]):
299  return executable[:-4]
300  return executable
301  for d in os.environ.get("PATH").split(os.pathsep):
302  fullpath = os.path.join(d, executable)
303  if os.path.exists(fullpath):
304  return fullpath
305  if executable.endswith('.exe'):
306  return which(executable[:-4])
307  return None
308 
310  np = os.path.normpath(os.path.expandvars(p))
311  if os.path.exists(np):
312  p = os.path.realpath(np)
313  return p
314 
315 # XML Escaping character
316 import re
317 
318 # xml 1.0 valid characters:
319 # Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
320 # so to invert that, not in Char ::
321 # x0 - x8 | xB | xC | xE - x1F
322 # (most control characters, though TAB, CR, LF allowed)
323 # | #xD800 - #xDFFF
324 # (unicode surrogate characters)
325 # | #xFFFE | #xFFFF |
326 # (unicode end-of-plane non-characters)
327 # >= 110000
328 # that would be beyond unicode!!!
329 _illegal_xml_chars_RE = re.compile(u'[\x00-\x08\x0b\x0c\x0e-\x1F\uD800-\uDFFF\uFFFE\uFFFF]')
330 
331 def hexreplace( match ):
332  "Return the hex string "
333  return "".join(map(hexConvert,match.group()))
334 
335 def hexConvert(char):
336  return hex(ord(char))
338  return _illegal_xml_chars_RE.sub(hexreplace, val)
339 
340 def escape_xml_illegal_chars(val, replacement='?'):
341  """Filter out characters that are illegal in XML.
342  Looks for any character in val that is not allowed in XML
343  and replaces it with replacement ('?' by default).
344 
345  """
346  return _illegal_xml_chars_RE.sub(replacement, val)
347 
348 ########################################################################
349 # Output Validation Classes
350 ########################################################################
352  """Basic implementation of an option validator for Gaudi tests.
353  This implementation is based on the standard (LCG) validation functions
354  used in QMTest.
355  """
356  def __init__(self,ref,cause,result_key):
357  self.reference = ref
358  self.cause = cause
359  self.result_key = result_key
360 
361  def __call__(self, out, result):
362  """Validate the output of the program.
363 
364  'stdout' -- A string containing the data written to the standard output
365  stream.
366 
367  'stderr' -- A string containing the data written to the standard error
368  stream.
369 
370  'result' -- A 'Result' object. It may be used to annotate
371  the outcome according to the content of stderr.
372 
373  returns -- A list of strings giving causes of failure."""
374 
375  causes = []
376  # Check to see if theoutput matches.
377  if not self.__CompareText(out, self.reference):
378  causes.append(self.cause)
379  result[self.result_key] = result.Quote(self.reference)
380 
381  return causes
382 
383  def __CompareText(self, s1, s2):
384  """Compare 's1' and 's2', ignoring line endings.
385 
386  's1' -- A string.
387 
388  's2' -- A string.
389 
390  returns -- True if 's1' and 's2' are the same, ignoring
391  differences in line endings."""
392 
393  # The "splitlines" method works independently of the line ending
394  # convention in use.
395  if ROOT6WorkAroundEnabled('ReadRootmapCheck'):
396  # FIXME: (MCl) Hide warnings from new rootmap sanity check until we can fix them
397  to_ignore = re.compile(r'Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*')
398  keep_line = lambda l: not to_ignore.match(l)
399  return filter(keep_line, s1.splitlines()) == filter(keep_line, s2.splitlines())
400  else:
401  return s1.splitlines() == s2.splitlines()
402 
404  """ Base class for a callable that takes a file and returns a modified
405  version of it."""
406  def __processLine__(self, line):
407  return line
408  def __call__(self, input):
409  if hasattr(input,"__iter__"):
410  lines = input
411  mergeback = False
412  else:
413  lines = input.splitlines()
414  mergeback = True
415  output = []
416  for l in lines:
417  l = self.__processLine__(l)
418  if l: output.append(l)
419  if mergeback: output = '\n'.join(output)
420  return output
421  def __add__(self, rhs):
422  return FilePreprocessorSequence([self,rhs])
423 
425  def __init__(self, members = []):
426  self.members = members
427  def __add__(self, rhs):
428  return FilePreprocessorSequence(self.members + [rhs])
429  def __call__(self, input):
430  output = input
431  for pp in self.members:
432  output = pp(output)
433  return output
434 
436  def __init__(self, strings = [], regexps = []):
437  import re
438  self.strings = strings
439  self.regexps = map(re.compile,regexps)
440 
441  def __processLine__(self, line):
442  for s in self.strings:
443  if line.find(s) >= 0: return None
444  for r in self.regexps:
445  if r.search(line): return None
446  return line
447 
449  def __init__(self, start, end):
450  self.start = start
451  self.end = end
452  self._skipping = False
453 
454  def __processLine__(self, line):
455  if self.start in line:
456  self._skipping = True
457  return None
458  elif self.end in line:
459  self._skipping = False
460  elif self._skipping:
461  return None
462  return line
463 
465  def __init__(self, orig, repl = "", when = None):
466  if when:
467  when = re.compile(when)
468  self._operations = [ (when, re.compile(orig), repl) ]
469  def __add__(self,rhs):
470  if isinstance(rhs, RegexpReplacer):
471  res = RegexpReplacer("","",None)
472  res._operations = self._operations + rhs._operations
473  else:
474  res = FilePreprocessor.__add__(self, rhs)
475  return res
476  def __processLine__(self, line):
477  for w,o,r in self._operations:
478  if w is None or w.search(line):
479  line = o.sub(r, line)
480  return line
481 
482 # Common preprocessors
483 maskPointers = RegexpReplacer("0x[0-9a-fA-F]{4,16}","0x########")
484 normalizeDate = RegexpReplacer("[0-2]?[0-9]:[0-5][0-9]:[0-5][0-9] [0-9]{4}[-/][01][0-9][-/][0-3][0-9] *(CES?T)?",
485  "00:00:00 1970-01-01")
486 normalizeEOL = FilePreprocessor()
487 normalizeEOL.__processLine__ = lambda line: str(line).rstrip() + '\n'
488 
489 skipEmptyLines = FilePreprocessor()
490 # FIXME: that's ugly
491 skipEmptyLines.__processLine__ = lambda line: (line.strip() and line) or None
492 
493 ## Special preprocessor sorting the list of strings (whitespace separated)
494 # that follow a signature on a single line
496  def __init__(self, signature):
497  self.signature = signature
498  self.siglen = len(signature)
499  def __processLine__(self, line):
500  pos = line.find(self.signature)
501  if pos >=0:
502  line = line[:(pos+self.siglen)]
503  lst = line[(pos+self.siglen):].split()
504  lst.sort()
505  line += " ".join(lst)
506  return line
507 
508 # Preprocessors for GaudiExamples
509 normalizeExamples = maskPointers + normalizeDate
510 for w,o,r in [
511  #("TIMER.TIMER",r"[0-9]", "0"), # Normalize time output
512  ("TIMER.TIMER",r"\s+[+-]?[0-9]+[0-9.]*", " 0"), # Normalize time output
513  ("release all pending",r"^.*/([^/]*:.*)",r"\1"),
514  ("0x########",r"\[.*/([^/]*.*)\]",r"[\1]"),
515  ("^#.*file",r"file '.*[/\\]([^/\\]*)$",r"file '\1"),
516  ("^JobOptionsSvc.*options successfully read in from",r"read in from .*[/\\]([^/\\]*)$",r"file \1"), # normalize path to options
517  # Normalize UUID, except those ending with all 0s (i.e. the class IDs)
518  (None,r"[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}(?!-0{12})-[0-9A-Fa-f]{12}","00000000-0000-0000-0000-000000000000"),
519  # Absorb a change in ServiceLocatorHelper
520  ("ServiceLocatorHelper::", "ServiceLocatorHelper::(create|locate)Service", "ServiceLocatorHelper::service"),
521  # Remove the leading 0 in Windows' exponential format
522  (None, r"e([-+])0([0-9][0-9])", r"e\1\2"),
523  # Output line changed in Gaudi v24
524  (None, r'Service reference count check:', r'Looping over all active services...'),
525  ]: #[ ("TIMER.TIMER","[0-9]+[0-9.]*", "") ]
526  normalizeExamples += RegexpReplacer(o,r,w)
527 
528 lineSkipper = LineSkipper(["//GP:",
529  "JobOptionsSvc INFO # ",
530  "JobOptionsSvc WARNING # ",
531  "Time User",
532  "Welcome to",
533  "This machine has a speed",
534  "TIME:",
535  "running on",
536  "ToolSvc.Sequenc... INFO",
537  "DataListenerSvc INFO XML written to file:",
538  "[INFO]","[WARNING]",
539  "DEBUG No writable file catalog found which contains FID:",
540  "0 local", # hack for ErrorLogExample
541  "DEBUG Service base class initialized successfully", # changed between v20 and v21
542  "DEBUG Incident timing:", # introduced with patch #3487
543  "INFO 'CnvServices':[", # changed the level of the message from INFO to DEBUG
544  # The signal handler complains about SIGXCPU not defined on some platforms
545  'SIGXCPU',
546  ],regexps = [
547  r"^JobOptionsSvc INFO *$",
548  r"^#", # Ignore python comments
549  r"(Always|SUCCESS)\s*(Root f|[^ ]* F)ile version:", # skip the message reporting the version of the root file
550  r"0x[0-9a-fA-F#]+ *Algorithm::sysInitialize\(\) *\[", # hack for ErrorLogExample
551  r"0x[0-9a-fA-F#]* *__gxx_personality_v0 *\[", # hack for ErrorLogExample
552  r"File '.*.xml' does not exist",
553  r"INFO Refer to dataset .* by its file ID:",
554  r"INFO Referring to dataset .* by its file ID:",
555  r"INFO Disconnect from dataset",
556  r"INFO Disconnected from dataset",
557  r"INFO Disconnected data IO:",
558  r"IncidentSvc\s*(DEBUG (Adding|Removing)|VERBOSE Calling)",
559  # I want to ignore the header of the unchecked StatusCode report
560  r"^StatusCodeSvc.*listing all unchecked return codes:",
561  r"^StatusCodeSvc\s*INFO\s*$",
562  r"Num\s*\|\s*Function\s*\|\s*Source Library",
563  r"^[-+]*\s*$",
564  # Hide the fake error message coming from POOL/ROOT (ROOT 5.21)
565  r"ERROR Failed to modify file: .* Errno=2 No such file or directory",
566  # Hide unckeched StatusCodes from dictionaries
567  r"^ +[0-9]+ \|.*ROOT",
568  r"^ +[0-9]+ \|.*\|.*Dict",
569  # Remove ROOT TTree summary table, which changes from one version to the other
570  r"^\*.*\*$",
571  # Remove Histos Summaries
572  r"SUCCESS\s*Booked \d+ Histogram\(s\)",
573  r"^ \|",
574  r"^ ID=",
575  ] )
576 if ROOT6WorkAroundEnabled('ReadRootmapCheck'):
577  # FIXME: (MCl) Hide warnings from new rootmap sanity check until we can fix them
578  lineSkipper += LineSkipper(regexps = [
579  r'Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*',
580  ])
581 
582 normalizeExamples = (lineSkipper + normalizeExamples + skipEmptyLines +
583  normalizeEOL + LineSorter("Services to release : "))
584 
586  def __init__(self, reffile, cause, result_key, preproc = normalizeExamples):
587  self.reffile = os.path.expandvars(reffile)
588  self.cause = cause
589  self.result_key = result_key
590  self.preproc = preproc
591  def __call__(self, stdout, result):
592  causes = []
593  if os.path.isfile(self.reffile):
594  orig = open(self.reffile).xreadlines()
595  if self.preproc:
596  orig = self.preproc(orig)
597  else:
598  orig = []
599 
600  new = stdout.splitlines()
601  if self.preproc:
602  new = self.preproc(new)
603  #open(self.reffile + ".test","w").writelines(new)
604  diffs = difflib.ndiff(orig,new,charjunk=difflib.IS_CHARACTER_JUNK)
605  filterdiffs = map(lambda x: x.strip(),filter(lambda x: x[0] != " ",diffs))
606  #filterdiffs = [x.strip() for x in diffs]
607  if filterdiffs:
608  result[self.result_key] = result.Quote("\n".join(filterdiffs))
609  result[self.result_key] += result.Quote("""
610 Legend:
611  -) reference file
612  +) standard output of the test""")
613  causes.append(self.cause)
614 
615  return causes
616 
617 ########################################################################
618 # Useful validation functions
619 ########################################################################
620 def findReferenceBlock(reference, stdout, result, causes, signature_offset=0, signature=None,
621  id = None):
622  """
623  Given a block of text, tries to find it in the output.
624  The block had to be identified by a signature line. By default, the first
625  line is used as signature, or the line pointed to by signature_offset. If
626  signature_offset points outside the block, a signature line can be passed as
627  signature argument. Note: if 'signature' is None (the default), a negative
628  signature_offset is interpreted as index in a list (e.g. -1 means the last
629  line), otherwise the it is interpreted as the number of lines before the
630  first one of the block the signature must appear.
631  The parameter 'id' allow to distinguish between different calls to this
632  function in the same validation code.
633  """
634  # split reference file, sanitize EOLs and remove empty lines
635  reflines = filter(None,map(lambda s: s.rstrip(), reference.splitlines()))
636  if not reflines:
637  raise RuntimeError("Empty (or null) reference")
638  # the same on standard output
639  outlines = filter(None,map(lambda s: s.rstrip(), stdout.splitlines()))
640 
641  res_field = "GaudiTest.RefBlock"
642  if id:
643  res_field += "_%s" % id
644 
645  if signature is None:
646  if signature_offset < 0:
647  signature_offset = len(reference)+signature_offset
648  signature = reflines[signature_offset]
649  # find the reference block in the output file
650  try:
651  pos = outlines.index(signature)
652  outlines = outlines[pos-signature_offset:pos+len(reflines)-signature_offset]
653  if reflines != outlines:
654  msg = "standard output"
655  # I do not want 2 messages in causes if teh function is called twice
656  if not msg in causes:
657  causes.append(msg)
658  result[res_field + ".observed"] = result.Quote("\n".join(outlines))
659  except ValueError:
660  causes.append("missing signature")
661  result[res_field + ".signature"] = result.Quote(signature)
662  if len(reflines) > 1 or signature != reflines[0]:
663  result[res_field + ".expected"] = result.Quote("\n".join(reflines))
664 
665  return causes
666 
667 def countErrorLines(expected = {'ERROR':0, 'FATAL':0}, **kwargs):
668  """
669  Count the number of messages with required severity (by default ERROR and FATAL)
670  and check if their numbers match the expected ones (0 by default).
671  The dictionary "expected" can be used to tune the number of errors and fatals
672  allowed, or to limit the number of expected warnings etc.
673  """
674  stdout = kwargs["stdout"]
675  result = kwargs["result"]
676  causes = kwargs["causes"]
677 
678  # prepare the dictionary to record the extracted lines
679  errors = {}
680  for sev in expected:
681  errors[sev] = []
682 
683  outlines = stdout.splitlines()
684  from math import log10
685  fmt = "%%%dd - %%s" % (int(log10(len(outlines))+1))
686 
687  linecount = 0
688  for l in outlines:
689  linecount += 1
690  words = l.split()
691  if len(words) >= 2 and words[1] in errors:
692  errors[words[1]].append(fmt%(linecount,l.rstrip()))
693 
694  for e in errors:
695  if len(errors[e]) != expected[e]:
696  causes.append('%s(%d)'%(e,len(errors[e])))
697  result["GaudiTest.lines.%s"%e] = result.Quote('\n'.join(errors[e]))
698  result["GaudiTest.lines.%s.expected#"%e] = result.Quote(str(expected[e]))
699 
700  return causes
701 
702 
703 def _parseTTreeSummary(lines, pos):
704  """
705  Parse the TTree summary table in lines, starting from pos.
706  Returns a tuple with the dictionary with the digested informations and the
707  position of the first line after the summary.
708  """
709  result = {}
710  i = pos + 1 # first line is a sequence of '*'
711  count = len(lines)
712 
713  splitcols = lambda l: [ f.strip() for f in l.strip("*\n").split(':',2) ]
714  def parseblock(ll):
715  r = {}
716  cols = splitcols(ll[0])
717  r["Name"], r["Title"] = cols[1:]
718 
719  cols = splitcols(ll[1])
720  r["Entries"] = int(cols[1])
721 
722  sizes = cols[2].split()
723  r["Total size"] = int(sizes[2])
724  if sizes[-1] == "memory":
725  r["File size"] = 0
726  else:
727  r["File size"] = int(sizes[-1])
728 
729  cols = splitcols(ll[2])
730  sizes = cols[2].split()
731  if cols[0] == "Baskets":
732  r["Baskets"] = int(cols[1])
733  r["Basket size"] = int(sizes[2])
734  r["Compression"] = float(sizes[-1])
735  return r
736 
737  if i < (count - 3) and lines[i].startswith("*Tree"):
738  result = parseblock(lines[i:i+3])
739  result["Branches"] = {}
740  i += 4
741  while i < (count - 3) and lines[i].startswith("*Br"):
742  if i < (count - 2) and lines[i].startswith("*Branch "):
743  # skip branch header
744  i += 3
745  continue
746  branch = parseblock(lines[i:i+3])
747  result["Branches"][branch["Name"]] = branch
748  i += 4
749 
750  return (result, i)
751 
752 def findTTreeSummaries(stdout):
753  """
754  Scan stdout to find ROOT TTree summaries and digest them.
755  """
756  stars = re.compile(r"^\*+$")
757  outlines = stdout.splitlines()
758  nlines = len(outlines)
759  trees = {}
760 
761  i = 0
762  while i < nlines: #loop over the output
763  # look for
764  while i < nlines and not stars.match(outlines[i]):
765  i += 1
766  if i < nlines:
767  tree, i = _parseTTreeSummary(outlines, i)
768  if tree:
769  trees[tree["Name"]] = tree
770 
771  return trees
772 
773 def cmpTreesDicts(reference, to_check, ignore = None):
774  """
775  Check that all the keys in reference are in to_check too, with the same value.
776  If the value is a dict, the function is called recursively. to_check can
777  contain more keys than reference, that will not be tested.
778  The function returns at the first difference found.
779  """
780  fail_keys = []
781  # filter the keys in the reference dictionary
782  if ignore:
783  ignore_re = re.compile(ignore)
784  keys = [ key for key in reference if not ignore_re.match(key) ]
785  else:
786  keys = reference.keys()
787  # loop over the keys (not ignored) in the reference dictionary
788  for k in keys:
789  if k in to_check: # the key must be in the dictionary to_check
790  if (type(reference[k]) is dict) and (type(to_check[k]) is dict):
791  # if both reference and to_check values are dictionaries, recurse
792  failed = fail_keys = cmpTreesDicts(reference[k], to_check[k], ignore)
793  else:
794  # compare the two values
795  failed = to_check[k] != reference[k]
796  else: # handle missing keys in the dictionary to check (i.e. failure)
797  to_check[k] = None
798  failed = True
799  if failed:
800  fail_keys.insert(0, k)
801  break # exit from the loop at the first failure
802  return fail_keys # return the list of keys bringing to the different values
803 
804 def getCmpFailingValues(reference, to_check, fail_path):
805  c = to_check
806  r = reference
807  for k in fail_path:
808  c = c.get(k,None)
809  r = r.get(k,None)
810  if c is None or r is None:
811  break # one of the dictionaries is not deep enough
812  return (fail_path, r, c)
813 
814 # signature of the print-out of the histograms
815 h_count_re = re.compile(r"^(.*)SUCCESS\s+Booked (\d+) Histogram\(s\) :\s+(.*)")
816 
817 def parseHistosSummary(lines, pos):
818  """
819  Extract the histograms infos from the lines starting at pos.
820  Returns the position of the first line after the summary block.
821  """
822  global h_count_re
823  h_table_head = re.compile(r'SUCCESS\s+List of booked (1D|2D|3D|1D profile|2D profile) histograms in directory\s+"(\w*)"')
824  h_short_summ = re.compile(r"ID=([^\"]+)\s+\"([^\"]+)\"\s+(.*)")
825 
826  nlines = len(lines)
827 
828  # decode header
829  m = h_count_re.search(lines[pos])
830  name = m.group(1).strip()
831  total = int(m.group(2))
832  header = {}
833  for k, v in [ x.split("=") for x in m.group(3).split() ]:
834  header[k] = int(v)
835  pos += 1
836  header["Total"] = total
837 
838  summ = {}
839  while pos < nlines:
840  m = h_table_head.search(lines[pos])
841  if m:
842  t, d = m.groups(1) # type and directory
843  t = t.replace(" profile", "Prof")
844  pos += 1
845  if pos < nlines:
846  l = lines[pos]
847  else:
848  l = ""
849  cont = {}
850  if l.startswith(" | ID"):
851  # table format
852  titles = [ x.strip() for x in l.split("|")][1:]
853  pos += 1
854  while pos < nlines and lines[pos].startswith(" |"):
855  l = lines[pos]
856  values = [ x.strip() for x in l.split("|")][1:]
857  hcont = {}
858  for i in range(len(titles)):
859  hcont[titles[i]] = values[i]
860  cont[hcont["ID"]] = hcont
861  pos += 1
862  elif l.startswith(" ID="):
863  while pos < nlines and lines[pos].startswith(" ID="):
864  values = [ x.strip() for x in h_short_summ.search(lines[pos]).groups() ]
865  cont[values[0]] = values
866  pos += 1
867  else: # not interpreted
868  raise RuntimeError("Cannot understand line %d: '%s'" % (pos, l))
869  if not d in summ:
870  summ[d] = {}
871  summ[d][t] = cont
872  summ[d]["header"] = header
873  else:
874  break
875  if not summ:
876  # If the full table is not present, we use only the header
877  summ[name] = {"header": header}
878  return summ, pos
879 
881  """
882  Scan stdout to find ROOT TTree summaries and digest them.
883  """
884  outlines = stdout.splitlines()
885  nlines = len(outlines) - 1
886  summaries = {}
887  global h_count_re
888 
889  pos = 0
890  while pos < nlines:
891  summ = {}
892  # find first line of block:
893  match = h_count_re.search(outlines[pos])
894  while pos < nlines and not match:
895  pos += 1
896  match = h_count_re.search(outlines[pos])
897  if match:
898  summ, pos = parseHistosSummary(outlines, pos)
899  summaries.update(summ)
900  return summaries
901 
902 class GaudiFilterExecutable(qm.executable.Filter):
903  def __init__(self, input, timeout = -1):
904  """Create a new 'Filter'.
905 
906  'input' -- The string containing the input to provide to the
907  child process.
908 
909  'timeout' -- As for 'TimeoutExecutable.__init__'."""
910 
911  super(GaudiFilterExecutable, self).__init__(input, timeout)
912  self.__input = input
913  self.__timeout = timeout
914  self.stack_trace_file = None
915  # Temporary file to pass the stack trace from one process to the other
916  # The file must be closed and reopened when needed to avoid conflicts
917  # between the processes
918  tmpf = tempfile.mkstemp()
919  os.close(tmpf[0])
920  self.stack_trace_file = tmpf[1] # remember only the name
921 
923  """Copied from TimeoutExecutable to allow the re-implementation of
924  _HandleChild.
925  """
926  if sys.platform == "win32":
927  # In Windows 2000 (or later), we should use "jobs" by
928  # analogy with UNIX process groups. However, that
929  # functionality is not (yet) provided by the Python Win32
930  # extensions.
931  return 0
932 
933  return self.__timeout >= 0 or self.__timeout == -2
934  ##
935  # Needs to replace the ones from RedirectedExecutable and TimeoutExecutable
936  def _HandleChild(self):
937  """Code copied from both FilterExecutable and TimeoutExecutable.
938  """
939  # Close the pipe ends that we do not need.
940  if self._stdin_pipe:
941  self._ClosePipeEnd(self._stdin_pipe[0])
942  if self._stdout_pipe:
943  self._ClosePipeEnd(self._stdout_pipe[1])
944  if self._stderr_pipe:
945  self._ClosePipeEnd(self._stderr_pipe[1])
946 
947  # The pipes created by 'RedirectedExecutable' must be closed
948  # before the monitor process (created by 'TimeoutExecutable')
949  # is created. Otherwise, if the child process dies, 'select'
950  # in the parent will not return if the monitor process may
951  # still have one of the file descriptors open.
952 
953  super(qm.executable.TimeoutExecutable, self)._HandleChild()
954 
956  # Put the child into its own process group. This step is
957  # performed in both the parent and the child; therefore both
958  # processes can safely assume that the creation of the process
959  # group has taken place.
960  child_pid = self._GetChildPID()
961  try:
962  os.setpgid(child_pid, child_pid)
963  except:
964  # The call to setpgid may fail if the child has exited,
965  # or has already called 'exec'. In that case, we are
966  # guaranteed that the child has already put itself in the
967  # desired process group.
968  pass
969  # Create the monitoring process.
970  #
971  # If the monitoring process is in parent's process group and
972  # kills the child after waitpid has returned in the parent, we
973  # may end up trying to kill a process group other than the one
974  # that we intend to kill. Therefore, we put the monitoring
975  # process in the same process group as the child; that ensures
976  # that the process group will persist until the monitoring
977  # process kills it.
978  self.__monitor_pid = os.fork()
979  if self.__monitor_pid != 0:
980  # Make sure that the monitoring process is placed into the
981  # child's process group before the parent process calls
982  # 'waitpid'. In this way, we are guaranteed that the process
983  # group as the child
984  os.setpgid(self.__monitor_pid, child_pid)
985  else:
986  # Put the monitoring process into the child's process
987  # group. We know the process group still exists at
988  # this point because either (a) we are in the process
989  # group, or (b) the parent has not yet called waitpid.
990  os.setpgid(0, child_pid)
991 
992  # Close all open file descriptors. They are not needed
993  # in the monitor process. Furthermore, when the parent
994  # closes the write end of the stdin pipe to the child,
995  # we do not want the pipe to remain open; leaving the
996  # pipe open in the monitor process might cause the child
997  # to block waiting for additional input.
998  try:
999  max_fds = os.sysconf("SC_OPEN_MAX")
1000  except:
1001  max_fds = 256
1002  for fd in xrange(max_fds):
1003  try:
1004  os.close(fd)
1005  except:
1006  pass
1007  try:
1008  if self.__timeout >= 0:
1009  # Give the child time to run.
1010  time.sleep (self.__timeout)
1011  #######################################################
1012  ### This is the interesting part: dump the stack trace to a file
1013  if sys.platform == "linux2": # we should be have /proc and gdb
1014  cmd = ["gdb",
1015  os.path.join("/proc", str(child_pid), "exe"),
1016  str(child_pid),
1017  "-batch", "-n", "-x",
1018  "'%s'" % os.path.join(os.path.dirname(__file__), "stack-trace.gdb")]
1019  # FIXME: I wanted to use subprocess.Popen, but it doesn't want to work
1020  # in this context.
1021  o = os.popen(" ".join(cmd)).read()
1022  open(self.stack_trace_file,"w").write(o)
1023  #######################################################
1024 
1025  # Kill all processes in the child process group.
1026  os.kill(0, signal.SIGKILL)
1027  else:
1028  # This call to select will never terminate.
1029  select.select ([], [], [])
1030  finally:
1031  # Exit. This code is in a finally clause so that
1032  # we are guaranteed to get here no matter what.
1033  os._exit(0)
1034  elif self.__timeout >= 0 and sys.platform == "win32":
1035  # Create a monitoring thread.
1036  self.__monitor_thread = Thread(target = self.__Monitor)
1037  self.__monitor_thread.start()
1038 
1039  if sys.platform == "win32":
1040 
1041  def __Monitor(self):
1042  """Code copied from FilterExecutable.
1043  Kill the child if the timeout expires.
1044 
1045  This function is run in the monitoring thread."""
1046 
1047  # The timeout may be expressed as a floating-point value
1048  # on UNIX, but it must be an integer number of
1049  # milliseconds when passed to WaitForSingleObject.
1050  timeout = int(self.__timeout * 1000)
1051  # Wait for the child process to terminate or for the
1052  # timer to expire.
1053  result = win32event.WaitForSingleObject(self._GetChildPID(),
1054  timeout)
1055  # If the timeout occurred, kill the child process.
1056  if result == win32con.WAIT_TIMEOUT:
1057  self.Kill()
1058 
1059 ########################################################################
1060 # Test Classes
1061 ########################################################################
1062 class GaudiExeTest(ExecTestBase):
1063  """Standard Gaudi test.
1064  """
1065  arguments = [
1066  qm.fields.TextField(
1067  name="program",
1068  title="Program",
1069  not_empty_text=1,
1070  description="""The path to the program.
1071 
1072  This field indicates the path to the program. If it is not
1073  an absolute path, the value of the 'PATH' environment
1074  variable will be used to search for the program.
1075  If not specified, $GAUDIEXE or Gaudi.exe are used.
1076  """
1077  ),
1078  qm.fields.SetField(qm.fields.TextField(
1079  name="args",
1080  title="Argument List",
1081  description="""The command-line arguments.
1082 
1083  If this field is left blank, the program is run without any
1084  arguments.
1085 
1086  Use this field to specify the option files.
1087 
1088  An implicit 0th argument (the path to the program) is added
1089  automatically."""
1090  )),
1091  qm.fields.TextField(
1092  name="options",
1093  title="Options",
1094  description="""Options to be passed to the application.
1095 
1096  This field allows to pass a list of options to the main program
1097  without the need of a separate option file.
1098 
1099  The content of the field is written to a temporary file which name
1100  is passed the the application as last argument (appended to the
1101  field "Argument List".
1102  """,
1103  verbatim="true",
1104  multiline="true",
1105  default_value=""
1106  ),
1107  qm.fields.TextField(
1108  name="workdir",
1109  title="Working Directory",
1110  description="""Path to the working directory.
1111 
1112  If this field is left blank, the program will be run from the qmtest
1113  directory, otherwise from the directory specified.""",
1114  default_value=""
1115  ),
1116  qm.fields.TextField(
1117  name="reference",
1118  title="Reference Output",
1119  description="""Path to the file containing the reference output.
1120 
1121  If this field is left blank, any standard output will be considered
1122  valid.
1123 
1124  If the reference file is specified, any output on standard error is
1125  ignored."""
1126  ),
1127  qm.fields.TextField(
1128  name="error_reference",
1129  title="Reference for standard error",
1130  description="""Path to the file containing the reference for the standard error.
1131 
1132  If this field is left blank, any standard output will be considered
1133  valid.
1134 
1135  If the reference file is specified, any output on standard error is
1136  ignored."""
1137  ),
1138  qm.fields.SetField(qm.fields.TextField(
1139  name = "unsupported_platforms",
1140  title = "Unsupported Platforms",
1141  description = """Platform on which the test must not be run.
1142 
1143  List of regular expressions identifying the platforms on which the
1144  test is not run and the result is set to UNTESTED."""
1145  )),
1146 
1147  qm.fields.TextField(
1148  name = "validator",
1149  title = "Validator",
1150  description = """Function to validate the output of the test.
1151 
1152  If defined, the function is used to validate the products of the
1153  test.
1154  The function is called passing as arguments:
1155  self: the test class instance
1156  stdout: the standard output of the executed test
1157  stderr: the standard error of the executed test
1158  result: the Result objects to fill with messages
1159  The function must return a list of causes for the failure.
1160  If specified, overrides standard output, standard error and
1161  reference files.
1162  """,
1163  verbatim="true",
1164  multiline="true",
1165  default_value=""
1166  ),
1167 
1168  qm.fields.BooleanField(
1169  name = "use_temp_dir",
1170  title = "Use temporary directory",
1171  description = """Use temporary directory.
1172 
1173  If set to true, use a temporary directory as working directory.
1174  """,
1175  default_value="false"
1176  ),
1177 
1178  qm.fields.IntegerField(
1179  name = "signal",
1180  title = "Expected signal",
1181  description = """Expect termination by signal.""",
1182  default_value=None
1183  ),
1184  ]
1185 
1186  def PlatformIsNotSupported(self, context, result):
1187  platform = self.GetPlatform()
1188  unsupported = [ re.compile(x)
1189  for x in [ str(y).strip()
1190  for y in self.unsupported_platforms ]
1191  if x
1192  ]
1193  for p_re in unsupported:
1194  if p_re.search(platform):
1195  result.SetOutcome(result.UNTESTED)
1196  result[result.CAUSE] = 'Platform not supported.'
1197  return True
1198  return False
1199 
1200  def GetPlatform(self):
1201  """
1202  Return the platform Id defined in CMTCONFIG or SCRAM_ARCH.
1203  """
1204  arch = "None"
1205  # check architecture name
1206  if "CMTCONFIG" in os.environ:
1207  arch = os.environ["CMTCONFIG"]
1208  elif "SCRAM_ARCH" in os.environ:
1209  arch = os.environ["SCRAM_ARCH"]
1210  return arch
1211 
1212  def isWinPlatform(self):
1213  """
1214  Return True if the current platform is Windows.
1215 
1216  This function was needed because of the change in the CMTCONFIG format,
1217  from win32_vc71_dbg to i686-winxp-vc9-dbg.
1218  """
1219  platform = self.GetPlatform()
1220  return "winxp" in platform or platform.startswith("win")
1221 
1222  def _expandReferenceFileName(self, reffile):
1223  # if no file is passed, do nothing
1224  if not reffile:
1225  return ""
1226 
1227  # function to split an extension in constituents parts
1228  platformSplit = lambda p: set(p.split('-' in p and '-' or '_'))
1229 
1230  reference = os.path.normpath(os.path.expandvars(reffile))
1231  # old-style platform-specific reference name
1232  spec_ref = reference[:-3] + self.GetPlatform()[0:3] + reference[-3:]
1233  if os.path.isfile(spec_ref):
1234  reference = spec_ref
1235  else: # look for new-style platform specific reference files:
1236  # get all the files whose name start with the reference filename
1237  dirname, basename = os.path.split(reference)
1238  if not dirname: dirname = '.'
1239  head = basename + "."
1240  head_len = len(head)
1241  platform = platformSplit(self.GetPlatform())
1242  candidates = []
1243  for f in os.listdir(dirname):
1244  if f.startswith(head):
1245  req_plat = platformSplit(f[head_len:])
1246  if platform.issuperset(req_plat):
1247  candidates.append( (len(req_plat), f) )
1248  if candidates: # take the one with highest matching
1249  # FIXME: it is not possible to say if x86_64-slc5-gcc43-dbg
1250  # has to use ref.x86_64-gcc43 or ref.slc5-dbg
1251  candidates.sort()
1252  reference = os.path.join(dirname, candidates[-1][1])
1253  return reference
1254 
1255  def CheckTTreesSummaries(self, stdout, result, causes,
1256  trees_dict = None,
1257  ignore = r"Basket|.*size|Compression"):
1258  """
1259  Compare the TTree summaries in stdout with the ones in trees_dict or in
1260  the reference file. By default ignore the size, compression and basket
1261  fields.
1262  The presence of TTree summaries when none is expected is not a failure.
1263  """
1264  if trees_dict is None:
1265  reference = self._expandReferenceFileName(self.reference)
1266  # call the validator if the file exists
1267  if reference and os.path.isfile(reference):
1268  trees_dict = findTTreeSummaries(open(reference).read())
1269  else:
1270  trees_dict = {}
1271 
1272  from pprint import PrettyPrinter
1273  pp = PrettyPrinter()
1274  if trees_dict:
1275  result["GaudiTest.TTrees.expected"] = result.Quote(pp.pformat(trees_dict))
1276  if ignore:
1277  result["GaudiTest.TTrees.ignore"] = result.Quote(ignore)
1278 
1279  trees = findTTreeSummaries(stdout)
1280  failed = cmpTreesDicts(trees_dict, trees, ignore)
1281  if failed:
1282  causes.append("trees summaries")
1283  msg = "%s: %s != %s" % getCmpFailingValues(trees_dict, trees, failed)
1284  result["GaudiTest.TTrees.failure_on"] = result.Quote(msg)
1285  result["GaudiTest.TTrees.found"] = result.Quote(pp.pformat(trees))
1286 
1287  return causes
1288 
1289  def CheckHistosSummaries(self, stdout, result, causes,
1290  dict = None,
1291  ignore = None):
1292  """
1293  Compare the TTree summaries in stdout with the ones in trees_dict or in
1294  the reference file. By default ignore the size, compression and basket
1295  fields.
1296  The presence of TTree summaries when none is expected is not a failure.
1297  """
1298  if dict is None:
1299  reference = self._expandReferenceFileName(self.reference)
1300  # call the validator if the file exists
1301  if reference and os.path.isfile(reference):
1302  dict = findHistosSummaries(open(reference).read())
1303  else:
1304  dict = {}
1305 
1306  from pprint import PrettyPrinter
1307  pp = PrettyPrinter()
1308  if dict:
1309  result["GaudiTest.Histos.expected"] = result.Quote(pp.pformat(dict))
1310  if ignore:
1311  result["GaudiTest.Histos.ignore"] = result.Quote(ignore)
1312 
1313  histos = findHistosSummaries(stdout)
1314  failed = cmpTreesDicts(dict, histos, ignore)
1315  if failed:
1316  causes.append("histos summaries")
1317  msg = "%s: %s != %s" % getCmpFailingValues(dict, histos, failed)
1318  result["GaudiTest.Histos.failure_on"] = result.Quote(msg)
1319  result["GaudiTest.Histos.found"] = result.Quote(pp.pformat(histos))
1320 
1321  return causes
1322 
1323  def ValidateWithReference(self, stdout, stderr, result, causes, preproc = None):
1324  """
1325  Default validation action: compare standard output and error to the
1326  reference files.
1327  """
1328  # set the default output preprocessor
1329  if preproc is None:
1330  preproc = normalizeExamples
1331  # check standard output
1332  reference = self._expandReferenceFileName(self.reference)
1333  # call the validator if the file exists
1334  if reference and os.path.isfile(reference):
1335  result["GaudiTest.output_reference"] = reference
1336  causes += ReferenceFileValidator(reference,
1337  "standard output",
1338  "GaudiTest.output_diff",
1339  preproc = preproc)(stdout, result)
1340 
1341  # Compare TTree summaries
1342  causes = self.CheckTTreesSummaries(stdout, result, causes)
1343  causes = self.CheckHistosSummaries(stdout, result, causes)
1344 
1345  if causes: # Write a new reference file for stdout
1346  try:
1347  newref = open(reference + ".new","w")
1348  # sanitize newlines
1349  for l in stdout.splitlines():
1350  newref.write(l.rstrip() + '\n')
1351  del newref # flush and close
1352  except IOError:
1353  # Ignore IO errors when trying to update reference files
1354  # because we may be in a read-only filesystem
1355  pass
1356 
1357  # check standard error
1358  reference = self._expandReferenceFileName(self.error_reference)
1359  # call the validator if we have a file to use
1360  if reference and os.path.isfile(reference):
1361  result["GaudiTest.error_reference"] = reference
1362  newcauses = ReferenceFileValidator(reference,
1363  "standard error",
1364  "GaudiTest.error_diff",
1365  preproc = preproc)(stderr, result)
1366  causes += newcauses
1367  if newcauses: # Write a new reference file for stdedd
1368  newref = open(reference + ".new","w")
1369  # sanitize newlines
1370  for l in stderr.splitlines():
1371  newref.write(l.rstrip() + '\n')
1372  del newref # flush and close
1373  else:
1374  causes += BasicOutputValidator(self.stderr,
1375  "standard error",
1376  "ExecTest.expected_stderr")(stderr, result)
1377 
1378  return causes
1379 
1380  def ValidateOutput(self, stdout, stderr, result):
1381  causes = []
1382  # if the test definition contains a custom validator, use it
1383  if self.validator.strip() != "":
1384  class CallWrapper(object):
1385  """
1386  Small wrapper class to dynamically bind some default arguments
1387  to a callable.
1388  """
1389  def __init__(self, callable, extra_args = {}):
1390  self.callable = callable
1391  self.extra_args = extra_args
1392  # get the list of names of positional arguments
1393  from inspect import getargspec
1394  self.args_order = getargspec(callable)[0]
1395  # Remove "self" from the list of positional arguments
1396  # since it is added automatically
1397  if self.args_order[0] == "self":
1398  del self.args_order[0]
1399  def __call__(self, *args, **kwargs):
1400  # Check which positional arguments are used
1401  positional = self.args_order[:len(args)]
1402 
1403  kwargs = dict(kwargs) # copy the arguments dictionary
1404  for a in self.extra_args:
1405  # use "extra_args" for the arguments not specified as
1406  # positional or keyword
1407  if a not in positional and a not in kwargs:
1408  kwargs[a] = self.extra_args[a]
1409  return apply(self.callable, args, kwargs)
1410  # local names to be exposed in the script
1411  exported_symbols = {"self":self,
1412  "stdout":stdout,
1413  "stderr":stderr,
1414  "result":result,
1415  "causes":causes,
1416  "findReferenceBlock":
1417  CallWrapper(findReferenceBlock, {"stdout":stdout,
1418  "result":result,
1419  "causes":causes}),
1420  "validateWithReference":
1421  CallWrapper(self.ValidateWithReference, {"stdout":stdout,
1422  "stderr":stderr,
1423  "result":result,
1424  "causes":causes}),
1425  "countErrorLines":
1426  CallWrapper(countErrorLines, {"stdout":stdout,
1427  "result":result,
1428  "causes":causes}),
1429  "checkTTreesSummaries":
1430  CallWrapper(self.CheckTTreesSummaries, {"stdout":stdout,
1431  "result":result,
1432  "causes":causes}),
1433  "checkHistosSummaries":
1434  CallWrapper(self.CheckHistosSummaries, {"stdout":stdout,
1435  "result":result,
1436  "causes":causes}),
1437 
1438  }
1439  exec self.validator in globals(), exported_symbols
1440  else:
1441  self.ValidateWithReference(stdout, stderr, result, causes)
1442 
1443  return causes
1444 
1445  def DumpEnvironment(self, result):
1446  """
1447  Add the content of the environment to the result object.
1448 
1449  Copied from the QMTest class of COOL.
1450  """
1451  vars = os.environ.keys()
1452  vars.sort()
1453  result['GaudiTest.environment'] = \
1454  result.Quote('\n'.join(["%s=%s"%(v,os.environ[v]) for v in vars]))
1455 
1456  def Run(self, context, result):
1457  """Run the test.
1458 
1459  'context' -- A 'Context' giving run-time parameters to the
1460  test.
1461 
1462  'result' -- A 'Result' object. The outcome will be
1463  'Result.PASS' when this method is called. The 'result' may be
1464  modified by this method to indicate outcomes other than
1465  'Result.PASS' or to add annotations."""
1466 
1467  # Check if the platform is supported
1468  if self.PlatformIsNotSupported(context, result):
1469  return
1470 
1471  # Prepare program name and arguments (expanding variables, and converting to absolute)
1472  if self.program:
1473  prog = rationalizepath(self.program)
1474  elif "GAUDIEXE" in os.environ:
1475  prog = os.environ["GAUDIEXE"]
1476  else:
1477  prog = "Gaudi.exe"
1478  self.program = prog
1479 
1480  dummy, prog_ext = os.path.splitext(prog)
1481  if prog_ext not in [ ".exe", ".py", ".bat" ] and self.isWinPlatform():
1482  prog += ".exe"
1483  prog_ext = ".exe"
1484 
1485  prog = which(prog) or prog
1486 
1487  # Convert paths to absolute paths in arguments and reference files
1488  args = map(rationalizepath, self.args)
1491 
1492 
1493  # check if the user provided inline options
1494  tmpfile = None
1495  if self.options.strip():
1496  ext = ".opts"
1497  if re.search(r"from\s+Gaudi.Configuration\s+import\s+\*|from\s+Configurables\s+import", self.options):
1498  ext = ".py"
1499  tmpfile = TempFile(ext)
1500  tmpfile.writelines("\n".join(self.options.splitlines()))
1501  tmpfile.flush()
1502  args.append(tmpfile.name)
1503  result["GaudiTest.options"] = result.Quote(self.options)
1504 
1505  # if the program is a python file, execute it through python
1506  if prog_ext == ".py":
1507  args.insert(0,prog)
1508  if self.isWinPlatform():
1509  prog = which("python.exe") or "python.exe"
1510  else:
1511  prog = which("python") or "python"
1512 
1513  # Change to the working directory if specified or to the default temporary
1514  origdir = os.getcwd()
1515  if self.workdir:
1516  os.chdir(str(os.path.normpath(os.path.expandvars(self.workdir))))
1517  elif self.use_temp_dir == "true":
1518  if "QMTEST_TMPDIR" in os.environ:
1519  qmtest_tmpdir = os.environ["QMTEST_TMPDIR"]
1520  if not os.path.exists(qmtest_tmpdir):
1521  os.makedirs(qmtest_tmpdir)
1522  os.chdir(qmtest_tmpdir)
1523  elif "qmtest.tmpdir" in context:
1524  os.chdir(context["qmtest.tmpdir"])
1525 
1526  if "QMTEST_IGNORE_TIMEOUT" not in os.environ:
1527  self.timeout = max(self.timeout,600)
1528  else:
1529  self.timeout = -1
1530 
1531  try:
1532  # Generate eclipse.org debug launcher for the test
1533  self._CreateEclipseLaunch(prog, args, destdir = os.path.join(origdir, '.eclipse'))
1534  # Run the test
1535  self.RunProgram(prog,
1536  [ prog ] + args,
1537  context, result)
1538  # Record the content of the enfironment for failing tests
1539  if result.GetOutcome() not in [ result.PASS ]:
1540  self.DumpEnvironment(result)
1541  finally:
1542  # revert to the original directory
1543  os.chdir(origdir)
1544 
1545  def RunProgram(self, program, arguments, context, result):
1546  """Run the 'program'.
1547 
1548  'program' -- The path to the program to run.
1549 
1550  'arguments' -- A list of the arguments to the program. This
1551  list must contain a first argument corresponding to 'argv[0]'.
1552 
1553  'context' -- A 'Context' giving run-time parameters to the
1554  test.
1555 
1556  'result' -- A 'Result' object. The outcome will be
1557  'Result.PASS' when this method is called. The 'result' may be
1558  modified by this method to indicate outcomes other than
1559  'Result.PASS' or to add annotations.
1560 
1561  @attention: This method has been copied from command.ExecTestBase
1562  (QMTest 2.3.0) and modified to keep stdout and stderr
1563  for tests that have been terminated by a signal.
1564  (Fundamental for debugging in the Application Area)
1565  """
1566 
1567  # Construct the environment.
1568  environment = self.MakeEnvironment(context)
1569  # FIXME: whithout this, we get some spurious '\x1b[?1034' in the std out on SLC6
1570  if "slc6" in environment.get('CMTCONFIG', ''):
1571  environment['TERM'] = 'dumb'
1572  # Create the executable.
1573  if self.timeout >= 0:
1574  timeout = self.timeout
1575  else:
1576  # If no timeout was specified, we sill run this process in a
1577  # separate process group and kill the entire process group
1578  # when the child is done executing. That means that
1579  # orphaned child processes created by the test will be
1580  # cleaned up.
1581  timeout = -2
1582  e = GaudiFilterExecutable(self.stdin, timeout)
1583  # Run it.
1584  exit_status = e.Run(arguments, environment, path = program)
1585  # Get the stack trace from the temporary file (if present)
1586  if e.stack_trace_file and os.path.exists(e.stack_trace_file):
1587  stack_trace = open(e.stack_trace_file).read()
1588  os.remove(e.stack_trace_file)
1589  else:
1590  stack_trace = None
1591  if stack_trace:
1592  result["ExecTest.stack_trace"] = result.Quote(stack_trace)
1593 
1594  # If the process terminated normally, check the outputs.
1595  if (sys.platform == "win32" or os.WIFEXITED(exit_status)
1596  or self.signal == os.WTERMSIG(exit_status)):
1597  # There are no causes of failure yet.
1598  causes = []
1599  # The target program terminated normally. Extract the
1600  # exit code, if this test checks it.
1601  if self.exit_code is None:
1602  exit_code = None
1603  elif sys.platform == "win32":
1604  exit_code = exit_status
1605  else:
1606  exit_code = os.WEXITSTATUS(exit_status)
1607  # Get the output generated by the program.
1608  stdout = e.stdout
1609  stderr = e.stderr
1610  # Record the results.
1611  result["ExecTest.exit_code"] = str(exit_code)
1612  result["ExecTest.stdout"] = result.Quote(stdout)
1613  result["ExecTest.stderr"] = result.Quote(stderr)
1614  # Check to see if the exit code matches.
1615  if exit_code != self.exit_code:
1616  causes.append("exit_code")
1617  result["ExecTest.expected_exit_code"] \
1618  = str(self.exit_code)
1619  # Validate the output.
1620  causes += self.ValidateOutput(stdout, stderr, result)
1621  # If anything went wrong, the test failed.
1622  if causes:
1623  result.Fail("Unexpected %s." % string.join(causes, ", "))
1624  elif os.WIFSIGNALED(exit_status):
1625  # The target program terminated with a signal. Construe
1626  # that as a test failure.
1627  signal_number = str(os.WTERMSIG(exit_status))
1628  if not stack_trace:
1629  result.Fail("Program terminated by signal.")
1630  else:
1631  # The presence of stack_trace means tha we stopped the job because
1632  # of a time-out
1633  result.Fail("Exceeded time limit (%ds), terminated." % timeout)
1634  result["ExecTest.signal_number"] = signal_number
1635  result["ExecTest.stdout"] = result.Quote(e.stdout)
1636  result["ExecTest.stderr"] = result.Quote(e.stderr)
1637  if self.signal:
1638  result["ExecTest.expected_signal_number"] = str(self.signal)
1639  elif os.WIFSTOPPED(exit_status):
1640  # The target program was stopped. Construe that as a
1641  # test failure.
1642  signal_number = str(os.WSTOPSIG(exit_status))
1643  if not stack_trace:
1644  result.Fail("Program stopped by signal.")
1645  else:
1646  # The presence of stack_trace means tha we stopped the job because
1647  # of a time-out
1648  result.Fail("Exceeded time limit (%ds), stopped." % timeout)
1649  result["ExecTest.signal_number"] = signal_number
1650  result["ExecTest.stdout"] = result.Quote(e.stdout)
1651  result["ExecTest.stderr"] = result.Quote(e.stderr)
1652  else:
1653  # The target program terminated abnormally in some other
1654  # manner. (This shouldn't normally happen...)
1655  result.Fail("Program did not terminate normally.")
1656 
1657  # Marco Cl.: This is a special trick to fix a "problem" with the output
1658  # of gaudi jobs when they use colors
1659  esc = '\x1b'
1660  repr_esc = '\\x1b'
1661  result["ExecTest.stdout"] = result["ExecTest.stdout"].replace(esc,repr_esc)
1662  # TODO: (MCl) improve the hack for colors in standard output
1663  # may be converting them to HTML tags
1664 
1665  def _CreateEclipseLaunch(self, prog, args, destdir = None):
1666  if 'NO_ECLIPSE_LAUNCHERS' in os.environ:
1667  # do not generate eclipse launchers if the user asks so
1668  return
1669  # Find the project name used in ecplise.
1670  # The name is in a file called ".project" in one of the parent directories
1671  projbasedir = os.path.normpath(destdir)
1672  while not os.path.exists(os.path.join(projbasedir, ".project")):
1673  oldprojdir = projbasedir
1674  projbasedir = os.path.normpath(os.path.join(projbasedir, os.pardir))
1675  # FIXME: the root level is invariant when trying to go up one level,
1676  # but it must be cheched on windows
1677  if oldprojdir == projbasedir:
1678  # If we cannot find a .project, so no point in creating a .launch file
1679  return
1680  # Ensure that we have a place where to write.
1681  if not os.path.exists(destdir):
1682  os.makedirs(destdir)
1683  # Use ElementTree to parse the XML file
1684  from xml.etree import ElementTree as ET
1685  t = ET.parse(os.path.join(projbasedir, ".project"))
1686  projectName = t.find("name").text
1687 
1688  # prepare the name/path of the generated file
1689  destfile = "%s.launch" % self._Runnable__id
1690  if destdir:
1691  destfile = os.path.join(destdir, destfile)
1692 
1693  if self.options.strip():
1694  # this means we have some custom options in the qmt file, so we have
1695  # to copy them from the temporary file at the end of the arguments
1696  # in another file
1697  tempfile = args.pop()
1698  optsfile = destfile + os.path.splitext(tempfile)[1]
1699  shutil.copyfile(tempfile, optsfile)
1700  args.append(optsfile)
1701 
1702  # prepare the data to insert in the XML file
1703  from xml.sax.saxutils import quoteattr # useful to quote XML special chars
1704  data = {}
1705  # Note: the "quoteattr(k)" is not needed because special chars cannot be part of a variable name,
1706  # but it doesn't harm.
1707  data["environment"] = "\n".join(['<mapEntry key=%s value=%s/>' % (quoteattr(k), quoteattr(v))
1708  for k, v in os.environ.iteritems()
1709  if k not in ('MAKEOVERRIDES', 'MAKEFLAGS', 'MAKELEVEL')])
1710 
1711  data["exec"] = which(prog) or prog
1712  if os.path.basename(data["exec"]).lower().startswith("python"):
1713  data["stopAtMain"] = "false" # do not stop at main when debugging Python scripts
1714  else:
1715  data["stopAtMain"] = "true"
1716 
1717  data["args"] = "&#10;".join(map(rationalizepath, args))
1718  if self.isWinPlatform():
1719  data["args"] = "&#10;".join(["/debugexe"] + map(rationalizepath, [data["exec"]] + args))
1720  data["exec"] = which("vcexpress.exe")
1721 
1722  if not self.use_temp_dir:
1723  data["workdir"] = os.getcwd()
1724  else:
1725  # If the test is using a tmporary directory, it is better to run it
1726  # in the same directory as the .launch file when debugged in eclipse
1727  data["workdir"] = destdir
1728 
1729  data["project"] = projectName.strip()
1730 
1731  # Template for the XML file, based on eclipse 3.4
1732  xml_template = u"""<?xml version="1.0" encoding="UTF-8" standalone="no"?>
1733 <launchConfiguration type="org.eclipse.cdt.launch.applicationLaunchType">
1734 <booleanAttribute key="org.eclipse.cdt.debug.mi.core.AUTO_SOLIB" value="true"/>
1735 <listAttribute key="org.eclipse.cdt.debug.mi.core.AUTO_SOLIB_LIST"/>
1736 <stringAttribute key="org.eclipse.cdt.debug.mi.core.DEBUG_NAME" value="gdb"/>
1737 <stringAttribute key="org.eclipse.cdt.debug.mi.core.GDB_INIT" value=".gdbinit"/>
1738 <listAttribute key="org.eclipse.cdt.debug.mi.core.SOLIB_PATH"/>
1739 <booleanAttribute key="org.eclipse.cdt.debug.mi.core.STOP_ON_SOLIB_EVENTS" value="false"/>
1740 <booleanAttribute key="org.eclipse.cdt.debug.mi.core.breakpointsFullPath" value="false"/>
1741 <stringAttribute key="org.eclipse.cdt.debug.mi.core.commandFactory" value="org.eclipse.cdt.debug.mi.core.standardCommandFactory"/>
1742 <stringAttribute key="org.eclipse.cdt.debug.mi.core.protocol" value="mi"/>
1743 <booleanAttribute key="org.eclipse.cdt.debug.mi.core.verboseMode" value="false"/>
1744 <intAttribute key="org.eclipse.cdt.launch.ATTR_BUILD_BEFORE_LAUNCH_ATTR" value="0"/>
1745 <stringAttribute key="org.eclipse.cdt.launch.COREFILE_PATH" value=""/>
1746 <stringAttribute key="org.eclipse.cdt.launch.DEBUGGER_ID" value="org.eclipse.cdt.debug.mi.core.CDebuggerNew"/>
1747 <stringAttribute key="org.eclipse.cdt.launch.DEBUGGER_REGISTER_GROUPS" value=""/>
1748 <stringAttribute key="org.eclipse.cdt.launch.DEBUGGER_START_MODE" value="run"/>
1749 <booleanAttribute key="org.eclipse.cdt.launch.DEBUGGER_STOP_AT_MAIN" value="%(stopAtMain)s"/>
1750 <stringAttribute key="org.eclipse.cdt.launch.DEBUGGER_STOP_AT_MAIN_SYMBOL" value="main"/>
1751 <booleanAttribute key="org.eclipse.cdt.launch.ENABLE_REGISTER_BOOKKEEPING" value="false"/>
1752 <booleanAttribute key="org.eclipse.cdt.launch.ENABLE_VARIABLE_BOOKKEEPING" value="false"/>
1753 <stringAttribute key="org.eclipse.cdt.launch.FORMAT" value="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&lt;contentList/&gt;"/>
1754 <stringAttribute key="org.eclipse.cdt.launch.GLOBAL_VARIABLES" value="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;globalVariableList/&gt;&#10;"/>
1755 <stringAttribute key="org.eclipse.cdt.launch.MEMORY_BLOCKS" value="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;memoryBlockExpressionList/&gt;&#10;"/>
1756 <stringAttribute key="org.eclipse.cdt.launch.PROGRAM_ARGUMENTS" value="%(args)s"/>
1757 <stringAttribute key="org.eclipse.cdt.launch.PROGRAM_NAME" value="%(exec)s"/>
1758 <stringAttribute key="org.eclipse.cdt.launch.PROJECT_ATTR" value="%(project)s"/>
1759 <stringAttribute key="org.eclipse.cdt.launch.PROJECT_BUILD_CONFIG_ID_ATTR" value=""/>
1760 <stringAttribute key="org.eclipse.cdt.launch.WORKING_DIRECTORY" value="%(workdir)s"/>
1761 <booleanAttribute key="org.eclipse.cdt.launch.ui.ApplicationCDebuggerTab.DEFAULTS_SET" value="true"/>
1762 <booleanAttribute key="org.eclipse.cdt.launch.use_terminal" value="true"/>
1763 <listAttribute key="org.eclipse.debug.core.MAPPED_RESOURCE_PATHS">
1764 <listEntry value="/%(project)s"/>
1765 </listAttribute>
1766 <listAttribute key="org.eclipse.debug.core.MAPPED_RESOURCE_TYPES">
1767 <listEntry value="4"/>
1768 </listAttribute>
1769 <booleanAttribute key="org.eclipse.debug.core.appendEnvironmentVariables" value="false"/>
1770 <mapAttribute key="org.eclipse.debug.core.environmentVariables">
1771 %(environment)s
1772 </mapAttribute>
1773 <mapAttribute key="org.eclipse.debug.core.preferred_launchers">
1774 <mapEntry key="[debug]" value="org.eclipse.cdt.cdi.launch.localCLaunch"/>
1775 </mapAttribute>
1776 <listAttribute key="org.eclipse.debug.ui.favoriteGroups">
1777 <listEntry value="org.eclipse.debug.ui.launchGroup.debug"/>
1778 </listAttribute>
1779 </launchConfiguration>
1780 """
1781  try:
1782  # ensure the correct encoding of data values
1783  for k in data:
1784  data[k] = codecs.decode(data[k], 'utf-8')
1785  xml = xml_template % data
1786 
1787  # Write the output file
1788  codecs.open(destfile, "w", encoding='utf-8').write(xml)
1789  except:
1790  print 'WARNING: problem generating Eclipse launcher'
1791 
1792 
1793 try:
1794  import json
1795 except ImportError:
1796  # Use simplejson for LCG
1797  import simplejson as json
1798 
1799 class HTMLResultStream(ResultStream):
1800  """An 'HTMLResultStream' writes its output to a set of HTML files.
1801 
1802  The argument 'dir' is used to select the destination directory for the HTML
1803  report.
1804  The destination directory may already contain the report from a previous run
1805  (for example of a different package), in which case it will be extended to
1806  include the new data.
1807  """
1808  arguments = [
1809  qm.fields.TextField(
1810  name = "dir",
1811  title = "Destination Directory",
1812  description = """The name of the directory.
1813 
1814  All results will be written to the directory indicated.""",
1815  verbatim = "true",
1816  default_value = ""),
1817  ]
1818 
1819  def __init__(self, arguments = None, **args):
1820  """Prepare the destination directory.
1821 
1822  Creates the destination directory and store in it some preliminary
1823  annotations and the static files found in the template directory
1824  'html_report'.
1825  """
1826  ResultStream.__init__(self, arguments, **args)
1827  self._summary = []
1828  self._summaryFile = os.path.join(self.dir, "summary.json")
1829  self._annotationsFile = os.path.join(self.dir, "annotations.json")
1830  # Prepare the destination directory using the template
1831  templateDir = os.path.join(os.path.dirname(__file__), "html_report")
1832  if not os.path.isdir(self.dir):
1833  os.makedirs(self.dir)
1834  # Copy the files in the template directory excluding the directories
1835  for f in os.listdir(templateDir):
1836  src = os.path.join(templateDir, f)
1837  dst = os.path.join(self.dir, f)
1838  if not os.path.isdir(src) and not os.path.exists(dst):
1839  shutil.copy(src, dst)
1840  # Add some non-QMTest attributes
1841  if "CMTCONFIG" in os.environ:
1842  self.WriteAnnotation("cmt.cmtconfig", os.environ["CMTCONFIG"])
1843  import socket
1844  self.WriteAnnotation("hostname", socket.gethostname())
1845 
1846  def _updateSummary(self):
1847  """Helper function to extend the global summary file in the destination
1848  directory.
1849  """
1850  if os.path.exists(self._summaryFile):
1851  oldSummary = json.load(open(self._summaryFile))
1852  else:
1853  oldSummary = []
1854  ids = set([ i["id"] for i in self._summary ])
1855  newSummary = [ i for i in oldSummary if i["id"] not in ids ]
1856  newSummary.extend(self._summary)
1857  json.dump(newSummary, open(self._summaryFile, "w"),
1858  sort_keys = True)
1859 
1860  def WriteAnnotation(self, key, value):
1861  """Writes the annotation to the annotation file.
1862  If the key is already present with a different value, the value becomes
1863  a list and the new value is appended to it, except for start_time and
1864  end_time.
1865  """
1866  # Initialize the annotation dict from the file (if present)
1867  if os.path.exists(self._annotationsFile):
1868  annotations = json.load(open(self._annotationsFile))
1869  else:
1870  annotations = {}
1871  # hack because we do not have proper JSON support
1872  key, value = map(str, [key, value])
1873  if key == "qmtest.run.start_time":
1874  # Special handling of the start time:
1875  # if we are updating a result, we have to keep the original start
1876  # time, but remove the original end time to mark the report to be
1877  # in progress.
1878  if key not in annotations:
1879  annotations[key] = value
1880  if "qmtest.run.end_time" in annotations:
1881  del annotations["qmtest.run.end_time"]
1882  else:
1883  # All other annotations are added to a list
1884  if key in annotations:
1885  old = annotations[key]
1886  if type(old) is list:
1887  if value not in old:
1888  annotations[key].append(value)
1889  elif value != old:
1890  annotations[key] = [old, value]
1891  else:
1892  annotations[key] = value
1893  # Write the new annotations file
1894  json.dump(annotations, open(self._annotationsFile, "w"),
1895  sort_keys = True)
1896 
1897  def WriteResult(self, result):
1898  """Prepare the test result directory in the destination directory storing
1899  into it the result fields.
1900  A summary of the test result is stored both in a file in the test directory
1901  and in the global summary file.
1902  """
1903  summary = {}
1904  summary["id"] = result.GetId()
1905  summary["outcome"] = result.GetOutcome()
1906  summary["cause"] = result.GetCause()
1907  summary["fields"] = result.keys()
1908  summary["fields"].sort()
1909 
1910  # Since we miss proper JSON support, I hack a bit
1911  for f in ["id", "outcome", "cause"]:
1912  summary[f] = str(summary[f])
1913  summary["fields"] = map(str, summary["fields"])
1914 
1915  self._summary.append(summary)
1916 
1917  # format:
1918  # testname/summary.json
1919  # testname/field1
1920  # testname/field2
1921  testOutDir = os.path.join(self.dir, summary["id"])
1922  if not os.path.isdir(testOutDir):
1923  os.makedirs(testOutDir)
1924  json.dump(summary, open(os.path.join(testOutDir, "summary.json"), "w"),
1925  sort_keys = True)
1926  for f in summary["fields"]:
1927  open(os.path.join(testOutDir, f), "w").write(result[f])
1928 
1929  self._updateSummary()
1930 
1931  def Summarize(self):
1932  # Not implemented.
1933  pass
1934 
1935 
1936 
1937 
1938 class XMLResultStream(ResultStream):
1939  """An 'XMLResultStream' writes its output to a Ctest XML file.
1940 
1941  The argument 'dir' is used to select the destination file for the XML
1942  report.
1943  The destination directory may already contain the report from a previous run
1944  (for example of a different package), in which case it will be overrided to
1945  with the new data.
1946  """
1947  arguments = [
1948  qm.fields.TextField(
1949  name = "dir",
1950  title = "Destination Directory",
1951  description = """The name of the directory.
1952 
1953  All results will be written to the directory indicated.""",
1954  verbatim = "true",
1955  default_value = ""),
1956  qm.fields.TextField(
1957  name = "prefix",
1958  title = "Output File Prefix",
1959  description = """The output file name will be the specified prefix
1960  followed by 'Test.xml' (CTest convention).""",
1961  verbatim = "true",
1962  default_value = ""),
1963  ]
1964 
1965  def __init__(self, arguments = None, **args):
1966  """Prepare the destination directory.
1967 
1968  Creates the destination directory and store in it some preliminary
1969  annotations .
1970  """
1971  ResultStream.__init__(self, arguments, **args)
1972 
1973  self._xmlFile = os.path.join(self.dir, self.prefix + 'Test.xml')
1974 
1975  # add some global variable
1976  self._startTime = None
1977  self._endTime = None
1978  # Format the XML file if it not exists
1979  if not os.path.isfile(self._xmlFile):
1980  # check that the container directory exists and create it if not
1981  if not os.path.exists(os.path.dirname(self._xmlFile)):
1982  os.makedirs(os.path.dirname(self._xmlFile))
1983 
1984  newdataset = ET.Element("newdataset")
1985  self._tree = ET.ElementTree(newdataset)
1986  self._tree.write(self._xmlFile)
1987  else :
1988  # Read the xml file
1989  self._tree = ET.parse(self._xmlFile)
1990  newdataset = self._tree.getroot()
1991 
1992  # Find the corresponding site, if do not exist, create it
1993 
1994  #site = newdataset.find('Site[@BuildStamp="'+result["qmtest.start_time"]+'"][@OSPlatform="'+os.getenv("CMTOPT")+'"]')
1995  # I don't know why this syntax doesn't work. Maybe it is because of the python version. Indeed,
1996  # This works well in the python terminal. So I have to make a for:
1997  for site in newdataset.getiterator() :
1998  if site.get("OSPlatform") == os.uname()[4]: # and site.get("BuildStamp") == result["qmtest.start_time"] and:
1999  # Here we can add some variable to define the difference beetween 2 site
2000  self._site = site
2001  break
2002  else :
2003  site = None
2004 
2005 
2006  if site is None :
2007  import socket
2008  import multiprocessing
2009  attrib = {
2010  "BuildName" : os.getenv("CMTCONFIG"),
2011  "Name" : os.uname()[1] ,
2012  "Generator" : "QMTest "+qm.version ,
2013  "OSName" : os.uname()[0] ,
2014  "Hostname" : socket.gethostname() ,
2015  "OSRelease" : os.uname()[2] ,
2016  "OSVersion" :os.uname()[3] ,
2017  "OSPlatform" :os.uname()[4] ,
2018  "Is64Bits" : "unknown" ,
2019  "VendorString" : "unknown" ,
2020  "VendorID" :"unknown" ,
2021  "FamilyID" :"unknown" ,
2022  "ModelID" :"unknown" ,
2023  "ProcessorCacheSize" :"unknown" ,
2024  "NumberOfLogicalCPU" : str(multiprocessing.cpu_count()) ,
2025  "NumberOfPhysicalCPU" : "0" ,
2026  "TotalVirtualMemory" : "0" ,
2027  "TotalPhysicalMemory" : "0" ,
2028  "LogicalProcessorsPerPhysical" : "0" ,
2029  "ProcessorClockFrequency" : "0" ,
2030  }
2031  self._site = ET.SubElement(newdataset, "site", attrib)
2032  self._Testing = ET.SubElement(self._site,"Testing")
2033 
2034  # Start time elements
2035  self._StartDateTime = ET.SubElement(self._Testing, "StartDateTime")
2036 
2037  self._StartTestTime = ET.SubElement(self._Testing, "StartTestTime")
2038 
2039 
2040  self._TestList = ET.SubElement(self._Testing, "TestList")
2041 
2042  ## End time elements
2043  self._EndDateTime = ET.SubElement(self._Testing, "EndDateTime")
2044 
2045 
2046  self._EndTestTime = ET.SubElement(self._Testing, "EndTestTime")
2047 
2048 
2049 
2050  self._ElapsedMinutes = ET.SubElement(self._Testing, "ElapsedMinutes")
2051 
2052 
2053  else : # We get the elements
2054  self._Testing = self._site.find("Testing")
2055  self._StartDateTime = self._Testing.find("StartDateTime")
2056  self._StartTestTime = self._Testing.find("StartTestTime")
2057  self._TestList = self._Testing.find("TestList")
2058  self._EndDateTime = self._Testing.find("EndDateTime")
2059  self._EndTestTime = self._Testing.find("EndTestTime")
2060  self._ElapsedMinutes = self._Testing.find("ElapsedMinutes")
2061 
2062  """
2063  # Add some non-QMTest attributes
2064  if "CMTCONFIG" in os.environ:
2065  self.WriteAnnotation("cmt.cmtconfig", os.environ["CMTCONFIG"])
2066  import socket
2067  self.WriteAnnotation("hostname", socket.gethostname())
2068  """
2069 
2070 
2071  def WriteAnnotation(self, key, value):
2072  if key == "qmtest.run.start_time":
2073  if self._site.get("qmtest.run.start_time") is not None :
2074  return None
2075  self._site.set(str(key),str(value))
2076  def WriteResult(self, result):
2077  """Prepare the test result directory in the destination directory storing
2078  into it the result fields.
2079  A summary of the test result is stored both in a file in the test directory
2080  and in the global summary file.
2081  """
2082  summary = {}
2083  summary["id"] = result.GetId()
2084  summary["outcome"] = result.GetOutcome()
2085  summary["cause"] = result.GetCause()
2086  summary["fields"] = result.keys()
2087  summary["fields"].sort()
2088 
2089 
2090  # Since we miss proper JSON support, I hack a bit
2091  for f in ["id", "outcome", "cause"]:
2092  summary[f] = str(summary[f])
2093  summary["fields"] = map(str, summary["fields"])
2094 
2095 
2096  # format
2097  # package_Test.xml
2098 
2099  if "qmtest.start_time" in summary["fields"]:
2100  haveStartDate = True
2101  else :
2102  haveStartDate = False
2103  if "qmtest.end_time" in summary["fields"]:
2104  haveEndDate = True
2105  else :
2106  haveEndDate = False
2107 
2108  # writing the start date time
2109  if haveStartDate:
2110  self._startTime = calendar.timegm(time.strptime(result["qmtest.start_time"], "%Y-%m-%dT%H:%M:%SZ"))
2111  if self._StartTestTime.text is None:
2112  self._StartDateTime.text = time.strftime("%b %d %H:%M %Z", time.localtime(self._startTime))
2113  self._StartTestTime.text = str(self._startTime)
2114  self._site.set("BuildStamp" , result["qmtest.start_time"] )
2115 
2116  #Save the end date time in memory
2117  if haveEndDate:
2118  self._endTime = calendar.timegm(time.strptime(result["qmtest.end_time"], "%Y-%m-%dT%H:%M:%SZ"))
2119 
2120 
2121  #add the current test to the test list
2122  tl = ET.Element("Test")
2123  tl.text = summary["id"]
2124  self._TestList.insert(0,tl)
2125 
2126  #Fill the current test
2127  Test = ET.Element("Test")
2128  if summary["outcome"] == "PASS":
2129  Test.set("Status", "passed")
2130  elif summary["outcome"] == "FAIL":
2131  Test.set("Status", "failed")
2132  elif summary["outcome"] == "SKIPPED" or summary["outcome"] == "UNTESTED":
2133  Test.set("Status", "skipped")
2134  elif summary["outcome"] == "ERROR":
2135  Test.set("Status", "failed")
2136  Name = ET.SubElement(Test, "Name",)
2137  Name.text = summary["id"]
2138  Results = ET.SubElement(Test, "Results")
2139 
2140  # add the test after the other test
2141  self._Testing.insert(3,Test)
2142 
2143  if haveStartDate and haveEndDate:
2144  # Compute the test duration
2145  delta = self._endTime - self._startTime
2146  testduration = str(delta)
2147  Testduration= ET.SubElement(Results,"NamedMeasurement")
2148  Testduration.set("name","Execution Time")
2149  Testduration.set("type","numeric/float" )
2150  value = ET.SubElement(Testduration, "Value")
2151  value.text = testduration
2152 
2153  #remove the fields that we store in a different way
2154  for n in ("qmtest.end_time", "qmtest.start_time", "qmtest.cause", "ExecTest.stdout"):
2155  if n in summary["fields"]:
2156  summary["fields"].remove(n)
2157 
2158  # Here we can add some NamedMeasurment which we know the type
2159  #
2160  if "ExecTest.exit_code" in summary["fields"] :
2161  summary["fields"].remove("ExecTest.exit_code")
2162  ExitCode= ET.SubElement(Results,"NamedMeasurement")
2163  ExitCode.set("name","exit_code")
2164  ExitCode.set("type","numeric/integer" )
2165  value = ET.SubElement(ExitCode, "Value")
2166  value.text = convert_xml_illegal_chars(result["ExecTest.exit_code"])
2167 
2168  TestStartTime= ET.SubElement(Results,"NamedMeasurement")
2169  TestStartTime.set("name","Start_Time")
2170  TestStartTime.set("type","String" )
2171  value = ET.SubElement(TestStartTime, "Value")
2172  if haveStartDate :
2173  value.text = escape_xml_illegal_chars(time.strftime("%b %d %H:%M %Z %Y", time.localtime(self._startTime)))
2174  else :
2175  value.text = ""
2176 
2177  TestEndTime= ET.SubElement(Results,"NamedMeasurement")
2178  TestEndTime.set("name","End_Time")
2179  TestEndTime.set("type","String" )
2180  value = ET.SubElement(TestEndTime, "Value")
2181  if haveStartDate :
2182  value.text = escape_xml_illegal_chars(time.strftime("%b %d %H:%M %Z %Y", time.localtime(self._endTime)))
2183  else :
2184  value.text = ""
2185 
2186  if summary["cause"]:
2187  FailureCause= ET.SubElement(Results,"NamedMeasurement")
2188  FailureCause.set("name", "Cause")
2189  FailureCause.set("type", "String" )
2190  value = ET.SubElement(FailureCause, "Value")
2191  value.text = escape_xml_illegal_chars(summary["cause"])
2192 
2193  #Fill the result
2194  fields = {}
2195  for field in summary["fields"] :
2196  fields[field] = ET.SubElement(Results, "NamedMeasurement")
2197  fields[field].set("type","String")
2198  fields[field].set("name",field)
2199  value = ET.SubElement(fields[field], "Value")
2200  # to escape the <pre></pre>
2201  if "<pre>" in result[field][0:6] :
2202  value.text = convert_xml_illegal_chars(result[field][5:-6])
2203  else :
2204  value.text = convert_xml_illegal_chars(result[field])
2205 
2206 
2207  if result.has_key("ExecTest.stdout" ) : #"ExecTest.stdout" in result :
2208  Measurement = ET.SubElement(Results, "Measurement")
2209  value = ET.SubElement(Measurement, "Value")
2210  if "<pre>" in result["ExecTest.stdout"][0:6] :
2211  value.text = convert_xml_illegal_chars(result["ExecTest.stdout"][5:-6])
2212  else :
2213  value.text = convert_xml_illegal_chars(result["ExecTest.stdout"])
2214 
2215 
2216  # write the file
2217  self._tree.write(self._xmlFile, "utf-8") #,True) in python 2.7 to add the xml header
2218 
2219 
2220  def Summarize(self):
2221 
2222  # Set the final end date time
2223  self._EndTestTime.text = str(self._endTime)
2224  self._EndDateTime.text = time.strftime("%b %d %H:%M %Z", time.localtime(self._endTime))
2225 
2226  # Compute the total duration
2227  if self._endTime and self._startTime:
2228  delta = self._endTime - self._startTime
2229  else:
2230  delta = 0
2231  self._ElapsedMinutes.text = str(delta/60)
2232 
2233  # Write into the file
2234  self._tree.write(self._xmlFile, "utf-8") #,True) in python 2.7 to add the xml header
2235 
def _parseTTreeSummary
Definition: GaudiTest.py:703
def findHistosSummaries
Definition: GaudiTest.py:880
def findTTreeSummaries
Definition: GaudiTest.py:752
def escape_xml_illegal_chars
Definition: GaudiTest.py:340
def findReferenceBlock
Definition: GaudiTest.py:621
def parseHistosSummary
Definition: GaudiTest.py:817
__monitor_thread
This is the interesting part: dump the stack trace to a file.
Definition: GaudiTest.py:1036
def hexConvert
Definition: GaudiTest.py:335
_EndDateTime
End time elements.
Definition: GaudiTest.py:2043
string type
Definition: gaudirun.py:126
struct GAUDI_API map
Parametrisation class for map-like implementation.
def cmpTreesDicts
Definition: GaudiTest.py:773
def remove
Definition: install.py:153
def rationalizepath
Definition: GaudiTest.py:309
def which
Locates an executable in the executables path ($PATH) and returns the full path to it...
Definition: GaudiTest.py:289
Output Validation Classes.
Definition: GaudiTest.py:351
def ROOT6WorkAroundEnabled
Definition: GaudiTest.py:25
def getCmpFailingValues
Definition: GaudiTest.py:804
def _HandleChild
Needs to replace the ones from RedirectedExecutable and TimeoutExecutable.
Definition: GaudiTest.py:936
def total_seconds_replacement
Definition: GaudiTest.py:39
def convert_xml_illegal_chars
Definition: GaudiTest.py:337
def countErrorLines
Definition: GaudiTest.py:667
Special preprocessor sorting the list of strings (whitespace separated) that follow a signature on a ...
Definition: GaudiTest.py:495
NamedRange_< CONTAINER > range(const CONTAINER &cnt, const std::string &name)
simple function to create the named range form arbitrary container
Definition: NamedRange.h:133
def hexreplace
Definition: GaudiTest.py:331