All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Groups Pages
GaudiTest.py
Go to the documentation of this file.
1 ########################################################################
2 # File: GaudiTest.py
3 # Author: Marco Clemencic CERN/PH-LBC
4 ########################################################################
5 __author__ = 'Marco Clemencic CERN/PH-LBC'
6 ########################################################################
7 # Imports
8 ########################################################################
9 import os
10 import sys
11 import re
12 import tempfile
13 import shutil
14 import string
15 import difflib
16 import time
17 import calendar
18 import codecs
19 
20 from subprocess import Popen, PIPE, STDOUT
21 
22 try:
23  from GaudiKernel import ROOT6WorkAroundEnabled
24 except ImportError:
25  def ROOT6WorkAroundEnabled(id=None):
26  # dummy implementation
27  return False
28 
29 # ensure the preferred locale
30 os.environ['LC_ALL'] = 'C'
31 
32 # Needed for the XML wrapper
33 try:
34  import xml.etree.cElementTree as ET
35 except ImportError:
36  import xml.etree.ElementTree as ET
37 
38 # redefinition of timedelta.total_seconds() because it is not present in the 2.6 version
39 def total_seconds_replacement(timedelta) :
40  return timedelta.days*86400 + timedelta.seconds + timedelta.microseconds/1000000
41 
42 
43 import qm
44 from qm.test.classes.command import ExecTestBase
45 from qm.test.result_stream import ResultStream
46 
47 ### Needed by the re-implementation of TimeoutExecutable
48 import qm.executable
49 import signal
50 # The classes in this module are implemented differently depending on
51 # the operating system in use.
52 if sys.platform == "win32":
53  import msvcrt
54  import pywintypes
55  from threading import *
56  import win32api
57  import win32con
58  import win32event
59  import win32file
60  import win32pipe
61  import win32process
62 else:
63  import cPickle
64  import fcntl
65  import select
66  import qm.sigmask
67 
68 ########################################################################
69 # Utility Classes
70 ########################################################################
72  """
73  Class to changes the environment temporarily.
74  """
75  def __init__(self, orig = os.environ, keep_same = False):
76  """
77  Create a temporary environment on top of the one specified
78  (it can be another TemporaryEnvironment instance).
79  """
80  #print "New environment"
81  self.old_values = {}
82  self.env = orig
83  self._keep_same = keep_same
84 
85  def __setitem__(self,key,value):
86  """
87  Set an environment variable recording the previous value.
88  """
89  if key not in self.old_values :
90  if key in self.env :
91  if not self._keep_same or self.env[key] != value:
92  self.old_values[key] = self.env[key]
93  else:
94  self.old_values[key] = None
95  self.env[key] = value
96 
97  def __getitem__(self,key):
98  """
99  Get an environment variable.
100  Needed to provide the same interface as os.environ.
101  """
102  return self.env[key]
103 
104  def __delitem__(self,key):
105  """
106  Unset an environment variable.
107  Needed to provide the same interface as os.environ.
108  """
109  if key not in self.env :
110  raise KeyError(key)
111  self.old_values[key] = self.env[key]
112  del self.env[key]
113 
114  def keys(self):
115  """
116  Return the list of defined environment variables.
117  Needed to provide the same interface as os.environ.
118  """
119  return self.env.keys()
120 
121  def items(self):
122  """
123  Return the list of (name,value) pairs for the defined environment variables.
124  Needed to provide the same interface as os.environ.
125  """
126  return self.env.items()
127 
128  def __contains__(self,key):
129  """
130  Operator 'in'.
131  Needed to provide the same interface as os.environ.
132  """
133  return key in self.env
134 
135  def restore(self):
136  """
137  Revert all the changes done to the original environment.
138  """
139  for key,value in self.old_values.items():
140  if value is None:
141  del self.env[key]
142  else:
143  self.env[key] = value
144  self.old_values = {}
145 
146  def __del__(self):
147  """
148  Revert the changes on destruction.
149  """
150  #print "Restoring the environment"
151  self.restore()
152 
153  def gen_script(self,shell_type):
154  """
155  Generate a shell script to reproduce the changes in the environment.
156  """
157  shells = [ 'csh', 'sh', 'bat' ]
158  if shell_type not in shells:
159  raise RuntimeError("Shell type '%s' unknown. Available: %s"%(shell_type,shells))
160  out = ""
161  for key,value in self.old_values.items():
162  if key not in self.env:
163  # unset variable
164  if shell_type == 'csh':
165  out += 'unsetenv %s\n'%key
166  elif shell_type == 'sh':
167  out += 'unset %s\n'%key
168  elif shell_type == 'bat':
169  out += 'set %s=\n'%key
170  else:
171  # set variable
172  if shell_type == 'csh':
173  out += 'setenv %s "%s"\n'%(key,self.env[key])
174  elif shell_type == 'sh':
175  out += 'export %s="%s"\n'%(key,self.env[key])
176  elif shell_type == 'bat':
177  out += 'set %s=%s\n'%(key,self.env[key])
178  return out
179 
180 class TempDir:
181  """Small class for temporary directories.
182  When instantiated, it creates a temporary directory and the instance
183  behaves as the string containing the directory name.
184  When the instance goes out of scope, it removes all the content of
185  the temporary directory (automatic clean-up).
186  """
187  def __init__(self, keep = False, chdir = False):
188  self.name = tempfile.mkdtemp()
189  self._keep = keep
190  self._origdir = None
191  if chdir:
192  self._origdir = os.getcwd()
193  os.chdir(self.name)
194 
195  def __str__(self):
196  return self.name
197 
198  def __del__(self):
199  if self._origdir:
200  os.chdir(self._origdir)
201  if self.name and not self._keep:
202  shutil.rmtree(self.name)
203 
204  def __getattr__(self,attr):
205  return getattr(self.name,attr)
206 
207 class TempFile:
208  """Small class for temporary files.
209  When instantiated, it creates a temporary directory and the instance
210  behaves as the string containing the directory name.
211  When the instance goes out of scope, it removes all the content of
212  the temporary directory (automatic clean-up).
213  """
214  def __init__(self, suffix='', prefix='tmp', dir=None, text=False, keep = False):
215  self.file = None
216  self.name = None
217  self._keep = keep
218 
219  self._fd, self.name = tempfile.mkstemp(suffix,prefix,dir,text)
220  self.file = os.fdopen(self._fd,"r+")
221 
222  def __str__(self):
223  return self.name
224 
225  def __del__(self):
226  if self.file:
227  self.file.close()
228  if self.name and not self._keep:
229  os.remove(self.name)
230 
231  def __getattr__(self,attr):
232  return getattr(self.file,attr)
233 
234 class CMT:
235  """Small wrapper to call CMT.
236  """
237  def __init__(self,path=None):
238  if path is None:
239  path = os.getcwd()
240  self.path = path
241 
242  def _run_cmt(self,command,args):
243  # prepare command line
244  if type(args) is str:
245  args = [args]
246  cmd = "cmt %s"%command
247  for arg in args:
248  cmd += ' "%s"'%arg
249 
250  # go to the execution directory
251  olddir = os.getcwd()
252  os.chdir(self.path)
253  # run cmt
254  result = os.popen4(cmd)[1].read()
255  # return to the old directory
256  os.chdir(olddir)
257  return result
258 
259  def __getattr__(self,attr):
260  return lambda args=[]: self._run_cmt(attr, args)
261 
262  def runtime_env(self,env = None):
263  """Returns a dictionary containing the runtime environment produced by CMT.
264  If a dictionary is passed a modified instance of it is returned.
265  """
266  if env is None:
267  env = {}
268  for l in self.setup("-csh").splitlines():
269  l = l.strip()
270  if l.startswith("setenv"):
271  dummy,name,value = l.split(None,3)
272  env[name] = value.strip('"')
273  elif l.startswith("unsetenv"):
274  dummy,name = l.split(None,2)
275  if name in env:
276  del env[name]
277  return env
278  def show_macro(self,k):
279  r = self.show(["macro",k])
280  if r.find("CMT> Error: symbol not found") >= 0:
281  return None
282  else:
283  return self.show(["macro_value",k]).strip()
284 
285 
286 ## Locates an executable in the executables path ($PATH) and returns the full
287 # path to it.
288 # If the executable cannot be found, None is returned
289 def which(executable):
290  """
291  Locates an executable in the executables path ($PATH) and returns the full
292  path to it. An application is looked for with or without the '.exe' suffix.
293  If the executable cannot be found, None is returned
294  """
295  if os.path.isabs(executable):
296  if not os.path.exists(executable):
297  if executable.endswith('.exe'):
298  if os.path.exists(executable[:-4]):
299  return executable[:-4]
300  return executable
301  for d in os.environ.get("PATH").split(os.pathsep):
302  fullpath = os.path.join(d, executable)
303  if os.path.exists(fullpath):
304  return fullpath
305  if executable.endswith('.exe'):
306  return which(executable[:-4])
307  return None
308 
310  np = os.path.normpath(os.path.expandvars(p))
311  if os.path.exists(np):
312  p = os.path.realpath(np)
313  return p
314 
315 # XML Escaping character
316 import re
317 
318 # xml 1.0 valid characters:
319 # Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
320 # so to invert that, not in Char ::
321 # x0 - x8 | xB | xC | xE - x1F
322 # (most control characters, though TAB, CR, LF allowed)
323 # | #xD800 - #xDFFF
324 # (unicode surrogate characters)
325 # | #xFFFE | #xFFFF |
326 # (unicode end-of-plane non-characters)
327 # >= 110000
328 # that would be beyond unicode!!!
329 _illegal_xml_chars_RE = re.compile(u'[\x00-\x08\x0b\x0c\x0e-\x1F\uD800-\uDFFF\uFFFE\uFFFF]')
330 
331 def hexreplace( match ):
332  "Return the hex string "
333  return "".join(map(hexConvert,match.group()))
334 
335 def hexConvert(char):
336  return hex(ord(char))
338  return _illegal_xml_chars_RE.sub(hexreplace, val)
339 
340 def escape_xml_illegal_chars(val, replacement='?'):
341  """Filter out characters that are illegal in XML.
342  Looks for any character in val that is not allowed in XML
343  and replaces it with replacement ('?' by default).
344 
345  """
346  return _illegal_xml_chars_RE.sub(replacement, val)
347 
348 ########################################################################
349 # Output Validation Classes
350 ########################################################################
352  """Basic implementation of an option validator for Gaudi tests.
353  This implementation is based on the standard (LCG) validation functions
354  used in QMTest.
355  """
356  def __init__(self,ref,cause,result_key):
357  self.reference = ref
358  self.cause = cause
359  self.result_key = result_key
360 
361  def __call__(self, out, result):
362  """Validate the output of the program.
363 
364  'stdout' -- A string containing the data written to the standard output
365  stream.
366 
367  'stderr' -- A string containing the data written to the standard error
368  stream.
369 
370  'result' -- A 'Result' object. It may be used to annotate
371  the outcome according to the content of stderr.
372 
373  returns -- A list of strings giving causes of failure."""
374 
375  causes = []
376  # Check to see if theoutput matches.
377  if not self.__CompareText(out, self.reference):
378  causes.append(self.cause)
379  result[self.result_key] = result.Quote(self.reference)
380 
381  return causes
382 
383  def __CompareText(self, s1, s2):
384  """Compare 's1' and 's2', ignoring line endings.
385 
386  's1' -- A string.
387 
388  's2' -- A string.
389 
390  returns -- True if 's1' and 's2' are the same, ignoring
391  differences in line endings."""
392 
393  # The "splitlines" method works independently of the line ending
394  # convention in use.
395  if ROOT6WorkAroundEnabled('ReadRootmapCheck'):
396  # FIXME: (MCl) Hide warnings from new rootmap sanity check until we can fix them
397  to_ignore = re.compile(r'Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*')
398  keep_line = lambda l: not to_ignore.match(l)
399  return filter(keep_line, s1.splitlines()) == filter(keep_line, s2.splitlines())
400  else:
401  return s1.splitlines() == s2.splitlines()
402 
404  """ Base class for a callable that takes a file and returns a modified
405  version of it."""
406  def __processLine__(self, line):
407  return line
408  def __call__(self, input):
409  if hasattr(input,"__iter__"):
410  lines = input
411  mergeback = False
412  else:
413  lines = input.splitlines()
414  mergeback = True
415  output = []
416  for l in lines:
417  l = self.__processLine__(l)
418  if l: output.append(l)
419  if mergeback: output = '\n'.join(output)
420  return output
421  def __add__(self, rhs):
422  return FilePreprocessorSequence([self,rhs])
423 
425  def __init__(self, members = []):
426  self.members = members
427  def __add__(self, rhs):
428  return FilePreprocessorSequence(self.members + [rhs])
429  def __call__(self, input):
430  output = input
431  for pp in self.members:
432  output = pp(output)
433  return output
434 
436  def __init__(self, strings = [], regexps = []):
437  import re
438  self.strings = strings
439  self.regexps = map(re.compile,regexps)
440 
441  def __processLine__(self, line):
442  for s in self.strings:
443  if line.find(s) >= 0: return None
444  for r in self.regexps:
445  if r.search(line): return None
446  return line
447 
449  def __init__(self, start, end):
450  self.start = start
451  self.end = end
452  self._skipping = False
453 
454  def __processLine__(self, line):
455  if self.start in line:
456  self._skipping = True
457  return None
458  elif self.end in line:
459  self._skipping = False
460  elif self._skipping:
461  return None
462  return line
463 
465  def __init__(self, orig, repl = "", when = None):
466  if when:
467  when = re.compile(when)
468  self._operations = [ (when, re.compile(orig), repl) ]
469  def __add__(self,rhs):
470  if isinstance(rhs, RegexpReplacer):
471  res = RegexpReplacer("","",None)
472  res._operations = self._operations + rhs._operations
473  else:
474  res = FilePreprocessor.__add__(self, rhs)
475  return res
476  def __processLine__(self, line):
477  for w,o,r in self._operations:
478  if w is None or w.search(line):
479  line = o.sub(r, line)
480  return line
481 
482 # Common preprocessors
483 maskPointers = RegexpReplacer("0x[0-9a-fA-F]{4,16}","0x########")
484 normalizeDate = RegexpReplacer("[0-2]?[0-9]:[0-5][0-9]:[0-5][0-9] [0-9]{4}[-/][01][0-9][-/][0-3][0-9] *(CES?T)?",
485  "00:00:00 1970-01-01")
486 normalizeEOL = FilePreprocessor()
487 normalizeEOL.__processLine__ = lambda line: str(line).rstrip() + '\n'
488 
489 skipEmptyLines = FilePreprocessor()
490 # FIXME: that's ugly
491 skipEmptyLines.__processLine__ = lambda line: (line.strip() and line) or None
492 
493 ## Special preprocessor sorting the list of strings (whitespace separated)
494 # that follow a signature on a single line
496  def __init__(self, signature):
497  self.signature = signature
498  self.siglen = len(signature)
499  def __processLine__(self, line):
500  pos = line.find(self.signature)
501  if pos >=0:
502  line = line[:(pos+self.siglen)]
503  lst = line[(pos+self.siglen):].split()
504  lst.sort()
505  line += " ".join(lst)
506  return line
507 
508 # Preprocessors for GaudiExamples
509 normalizeExamples = maskPointers + normalizeDate
510 for w,o,r in [
511  #("TIMER.TIMER",r"[0-9]", "0"), # Normalize time output
512  ("TIMER.TIMER",r"\s+[+-]?[0-9]+[0-9.]*", " 0"), # Normalize time output
513  ("release all pending",r"^.*/([^/]*:.*)",r"\1"),
514  ("0x########",r"\[.*/([^/]*.*)\]",r"[\1]"),
515  ("^#.*file",r"file '.*[/\\]([^/\\]*)$",r"file '\1"),
516  ("^JobOptionsSvc.*options successfully read in from",r"read in from .*[/\\]([^/\\]*)$",r"file \1"), # normalize path to options
517  # Normalize UUID, except those ending with all 0s (i.e. the class IDs)
518  (None,r"[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}(?!-0{12})-[0-9A-Fa-f]{12}","00000000-0000-0000-0000-000000000000"),
519  # Absorb a change in ServiceLocatorHelper
520  ("ServiceLocatorHelper::", "ServiceLocatorHelper::(create|locate)Service", "ServiceLocatorHelper::service"),
521  # Remove the leading 0 in Windows' exponential format
522  (None, r"e([-+])0([0-9][0-9])", r"e\1\2"),
523  # Output line changed in Gaudi v24
524  (None, r'Service reference count check:', r'Looping over all active services...'),
525  ]: #[ ("TIMER.TIMER","[0-9]+[0-9.]*", "") ]
526  normalizeExamples += RegexpReplacer(o,r,w)
527 
528 lineSkipper = LineSkipper(["//GP:",
529  "JobOptionsSvc INFO # ",
530  "JobOptionsSvc WARNING # ",
531  "Time User",
532  "Welcome to",
533  "This machine has a speed",
534  "TIME:",
535  "running on",
536  "ToolSvc.Sequenc... INFO",
537  "DataListenerSvc INFO XML written to file:",
538  "[INFO]","[WARNING]",
539  "DEBUG No writable file catalog found which contains FID:",
540  "0 local", # hack for ErrorLogExample
541  "DEBUG Service base class initialized successfully", # changed between v20 and v21
542  "DEBUG Incident timing:", # introduced with patch #3487
543  "INFO 'CnvServices':[", # changed the level of the message from INFO to DEBUG
544  # The signal handler complains about SIGXCPU not defined on some platforms
545  'SIGXCPU',
546  ],regexps = [
547  r"^JobOptionsSvc INFO *$",
548  r"^#", # Ignore python comments
549  r"(Always|SUCCESS)\s*(Root f|[^ ]* F)ile version:", # skip the message reporting the version of the root file
550  r"0x[0-9a-fA-F#]+ *Algorithm::sysInitialize\(\) *\[", # hack for ErrorLogExample
551  r"0x[0-9a-fA-F#]* *__gxx_personality_v0 *\[", # hack for ErrorLogExample
552  r"File '.*.xml' does not exist",
553  r"INFO Refer to dataset .* by its file ID:",
554  r"INFO Referring to dataset .* by its file ID:",
555  r"INFO Disconnect from dataset",
556  r"INFO Disconnected from dataset",
557  r"INFO Disconnected data IO:",
558  r"IncidentSvc\s*(DEBUG (Adding|Removing)|VERBOSE Calling)",
559  # I want to ignore the header of the unchecked StatusCode report
560  r"^StatusCodeSvc.*listing all unchecked return codes:",
561  r"^StatusCodeSvc\s*INFO\s*$",
562  r"Num\s*\|\s*Function\s*\|\s*Source Library",
563  r"^[-+]*\s*$",
564  # Hide the fake error message coming from POOL/ROOT (ROOT 5.21)
565  r"ERROR Failed to modify file: .* Errno=2 No such file or directory",
566  # Hide unchecked StatusCodes from dictionaries
567  r"^ +[0-9]+ \|.*ROOT",
568  r"^ +[0-9]+ \|.*\|.*Dict",
569  # Hide success StatusCodeSvc message
570  r"StatusCodeSvc.*all StatusCode instances where checked",
571  # Remove ROOT TTree summary table, which changes from one version to the other
572  r"^\*.*\*$",
573  # Remove Histos Summaries
574  r"SUCCESS\s*Booked \d+ Histogram\(s\)",
575  r"^ \|",
576  r"^ ID=",
577  ] )
578 if ROOT6WorkAroundEnabled('ReadRootmapCheck'):
579  # FIXME: (MCl) Hide warnings from new rootmap sanity check until we can fix them
580  lineSkipper += LineSkipper(regexps = [
581  r'Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*',
582  ])
583 
584 normalizeExamples = (lineSkipper + normalizeExamples + skipEmptyLines +
585  normalizeEOL + LineSorter("Services to release : "))
586 
588  def __init__(self, reffile, cause, result_key, preproc = normalizeExamples):
589  self.reffile = os.path.expandvars(reffile)
590  self.cause = cause
591  self.result_key = result_key
592  self.preproc = preproc
593  def __call__(self, stdout, result):
594  causes = []
595  if os.path.isfile(self.reffile):
596  orig = open(self.reffile).xreadlines()
597  if self.preproc:
598  orig = self.preproc(orig)
599  else:
600  orig = []
601 
602  new = stdout.splitlines()
603  if self.preproc:
604  new = self.preproc(new)
605  #open(self.reffile + ".test","w").writelines(new)
606  diffs = difflib.ndiff(orig,new,charjunk=difflib.IS_CHARACTER_JUNK)
607  filterdiffs = map(lambda x: x.strip(),filter(lambda x: x[0] != " ",diffs))
608  #filterdiffs = [x.strip() for x in diffs]
609  if filterdiffs:
610  result[self.result_key] = result.Quote("\n".join(filterdiffs))
611  result[self.result_key] += result.Quote("""
612 Legend:
613  -) reference file
614  +) standard output of the test""")
615  causes.append(self.cause)
616 
617  return causes
618 
619 ########################################################################
620 # Useful validation functions
621 ########################################################################
622 def findReferenceBlock(reference, stdout, result, causes, signature_offset=0, signature=None,
623  id = None):
624  """
625  Given a block of text, tries to find it in the output.
626  The block had to be identified by a signature line. By default, the first
627  line is used as signature, or the line pointed to by signature_offset. If
628  signature_offset points outside the block, a signature line can be passed as
629  signature argument. Note: if 'signature' is None (the default), a negative
630  signature_offset is interpreted as index in a list (e.g. -1 means the last
631  line), otherwise the it is interpreted as the number of lines before the
632  first one of the block the signature must appear.
633  The parameter 'id' allow to distinguish between different calls to this
634  function in the same validation code.
635  """
636  # split reference file, sanitize EOLs and remove empty lines
637  reflines = filter(None,map(lambda s: s.rstrip(), reference.splitlines()))
638  if not reflines:
639  raise RuntimeError("Empty (or null) reference")
640  # the same on standard output
641  outlines = filter(None,map(lambda s: s.rstrip(), stdout.splitlines()))
642 
643  res_field = "GaudiTest.RefBlock"
644  if id:
645  res_field += "_%s" % id
646 
647  if signature is None:
648  if signature_offset < 0:
649  signature_offset = len(reference)+signature_offset
650  signature = reflines[signature_offset]
651  # find the reference block in the output file
652  try:
653  pos = outlines.index(signature)
654  outlines = outlines[pos-signature_offset:pos+len(reflines)-signature_offset]
655  if reflines != outlines:
656  msg = "standard output"
657  # I do not want 2 messages in causes if teh function is called twice
658  if not msg in causes:
659  causes.append(msg)
660  result[res_field + ".observed"] = result.Quote("\n".join(outlines))
661  except ValueError:
662  causes.append("missing signature")
663  result[res_field + ".signature"] = result.Quote(signature)
664  if len(reflines) > 1 or signature != reflines[0]:
665  result[res_field + ".expected"] = result.Quote("\n".join(reflines))
666 
667  return causes
668 
669 def countErrorLines(expected = {'ERROR':0, 'FATAL':0}, **kwargs):
670  """
671  Count the number of messages with required severity (by default ERROR and FATAL)
672  and check if their numbers match the expected ones (0 by default).
673  The dictionary "expected" can be used to tune the number of errors and fatals
674  allowed, or to limit the number of expected warnings etc.
675  """
676  stdout = kwargs["stdout"]
677  result = kwargs["result"]
678  causes = kwargs["causes"]
679 
680  # prepare the dictionary to record the extracted lines
681  errors = {}
682  for sev in expected:
683  errors[sev] = []
684 
685  outlines = stdout.splitlines()
686  from math import log10
687  fmt = "%%%dd - %%s" % (int(log10(len(outlines))+1))
688 
689  linecount = 0
690  for l in outlines:
691  linecount += 1
692  words = l.split()
693  if len(words) >= 2 and words[1] in errors:
694  errors[words[1]].append(fmt%(linecount,l.rstrip()))
695 
696  for e in errors:
697  if len(errors[e]) != expected[e]:
698  causes.append('%s(%d)'%(e,len(errors[e])))
699  result["GaudiTest.lines.%s"%e] = result.Quote('\n'.join(errors[e]))
700  result["GaudiTest.lines.%s.expected#"%e] = result.Quote(str(expected[e]))
701 
702  return causes
703 
704 
705 def _parseTTreeSummary(lines, pos):
706  """
707  Parse the TTree summary table in lines, starting from pos.
708  Returns a tuple with the dictionary with the digested informations and the
709  position of the first line after the summary.
710  """
711  result = {}
712  i = pos + 1 # first line is a sequence of '*'
713  count = len(lines)
714 
715  splitcols = lambda l: [ f.strip() for f in l.strip("*\n").split(':',2) ]
716  def parseblock(ll):
717  r = {}
718  cols = splitcols(ll[0])
719  r["Name"], r["Title"] = cols[1:]
720 
721  cols = splitcols(ll[1])
722  r["Entries"] = int(cols[1])
723 
724  sizes = cols[2].split()
725  r["Total size"] = int(sizes[2])
726  if sizes[-1] == "memory":
727  r["File size"] = 0
728  else:
729  r["File size"] = int(sizes[-1])
730 
731  cols = splitcols(ll[2])
732  sizes = cols[2].split()
733  if cols[0] == "Baskets":
734  r["Baskets"] = int(cols[1])
735  r["Basket size"] = int(sizes[2])
736  r["Compression"] = float(sizes[-1])
737  return r
738 
739  if i < (count - 3) and lines[i].startswith("*Tree"):
740  result = parseblock(lines[i:i+3])
741  result["Branches"] = {}
742  i += 4
743  while i < (count - 3) and lines[i].startswith("*Br"):
744  if i < (count - 2) and lines[i].startswith("*Branch "):
745  # skip branch header
746  i += 3
747  continue
748  branch = parseblock(lines[i:i+3])
749  result["Branches"][branch["Name"]] = branch
750  i += 4
751 
752  return (result, i)
753 
754 def findTTreeSummaries(stdout):
755  """
756  Scan stdout to find ROOT TTree summaries and digest them.
757  """
758  stars = re.compile(r"^\*+$")
759  outlines = stdout.splitlines()
760  nlines = len(outlines)
761  trees = {}
762 
763  i = 0
764  while i < nlines: #loop over the output
765  # look for
766  while i < nlines and not stars.match(outlines[i]):
767  i += 1
768  if i < nlines:
769  tree, i = _parseTTreeSummary(outlines, i)
770  if tree:
771  trees[tree["Name"]] = tree
772 
773  return trees
774 
775 def cmpTreesDicts(reference, to_check, ignore = None):
776  """
777  Check that all the keys in reference are in to_check too, with the same value.
778  If the value is a dict, the function is called recursively. to_check can
779  contain more keys than reference, that will not be tested.
780  The function returns at the first difference found.
781  """
782  fail_keys = []
783  # filter the keys in the reference dictionary
784  if ignore:
785  ignore_re = re.compile(ignore)
786  keys = [ key for key in reference if not ignore_re.match(key) ]
787  else:
788  keys = reference.keys()
789  # loop over the keys (not ignored) in the reference dictionary
790  for k in keys:
791  if k in to_check: # the key must be in the dictionary to_check
792  if (type(reference[k]) is dict) and (type(to_check[k]) is dict):
793  # if both reference and to_check values are dictionaries, recurse
794  failed = fail_keys = cmpTreesDicts(reference[k], to_check[k], ignore)
795  else:
796  # compare the two values
797  failed = to_check[k] != reference[k]
798  else: # handle missing keys in the dictionary to check (i.e. failure)
799  to_check[k] = None
800  failed = True
801  if failed:
802  fail_keys.insert(0, k)
803  break # exit from the loop at the first failure
804  return fail_keys # return the list of keys bringing to the different values
805 
806 def getCmpFailingValues(reference, to_check, fail_path):
807  c = to_check
808  r = reference
809  for k in fail_path:
810  c = c.get(k,None)
811  r = r.get(k,None)
812  if c is None or r is None:
813  break # one of the dictionaries is not deep enough
814  return (fail_path, r, c)
815 
816 # signature of the print-out of the histograms
817 h_count_re = re.compile(r"^(.*)SUCCESS\s+Booked (\d+) Histogram\(s\) :\s+(.*)")
818 
819 def parseHistosSummary(lines, pos):
820  """
821  Extract the histograms infos from the lines starting at pos.
822  Returns the position of the first line after the summary block.
823  """
824  global h_count_re
825  h_table_head = re.compile(r'SUCCESS\s+List of booked (1D|2D|3D|1D profile|2D profile) histograms in directory\s+"(\w*)"')
826  h_short_summ = re.compile(r"ID=([^\"]+)\s+\"([^\"]+)\"\s+(.*)")
827 
828  nlines = len(lines)
829 
830  # decode header
831  m = h_count_re.search(lines[pos])
832  name = m.group(1).strip()
833  total = int(m.group(2))
834  header = {}
835  for k, v in [ x.split("=") for x in m.group(3).split() ]:
836  header[k] = int(v)
837  pos += 1
838  header["Total"] = total
839 
840  summ = {}
841  while pos < nlines:
842  m = h_table_head.search(lines[pos])
843  if m:
844  t, d = m.groups(1) # type and directory
845  t = t.replace(" profile", "Prof")
846  pos += 1
847  if pos < nlines:
848  l = lines[pos]
849  else:
850  l = ""
851  cont = {}
852  if l.startswith(" | ID"):
853  # table format
854  titles = [ x.strip() for x in l.split("|")][1:]
855  pos += 1
856  while pos < nlines and lines[pos].startswith(" |"):
857  l = lines[pos]
858  values = [ x.strip() for x in l.split("|")][1:]
859  hcont = {}
860  for i in range(len(titles)):
861  hcont[titles[i]] = values[i]
862  cont[hcont["ID"]] = hcont
863  pos += 1
864  elif l.startswith(" ID="):
865  while pos < nlines and lines[pos].startswith(" ID="):
866  values = [ x.strip() for x in h_short_summ.search(lines[pos]).groups() ]
867  cont[values[0]] = values
868  pos += 1
869  else: # not interpreted
870  raise RuntimeError("Cannot understand line %d: '%s'" % (pos, l))
871  if not d in summ:
872  summ[d] = {}
873  summ[d][t] = cont
874  summ[d]["header"] = header
875  else:
876  break
877  if not summ:
878  # If the full table is not present, we use only the header
879  summ[name] = {"header": header}
880  return summ, pos
881 
883  """
884  Scan stdout to find ROOT TTree summaries and digest them.
885  """
886  outlines = stdout.splitlines()
887  nlines = len(outlines) - 1
888  summaries = {}
889  global h_count_re
890 
891  pos = 0
892  while pos < nlines:
893  summ = {}
894  # find first line of block:
895  match = h_count_re.search(outlines[pos])
896  while pos < nlines and not match:
897  pos += 1
898  match = h_count_re.search(outlines[pos])
899  if match:
900  summ, pos = parseHistosSummary(outlines, pos)
901  summaries.update(summ)
902  return summaries
903 
904 class GaudiFilterExecutable(qm.executable.Filter):
905  def __init__(self, input, timeout = -1):
906  """Create a new 'Filter'.
907 
908  'input' -- The string containing the input to provide to the
909  child process.
910 
911  'timeout' -- As for 'TimeoutExecutable.__init__'."""
912 
913  super(GaudiFilterExecutable, self).__init__(input, timeout)
914  self.__input = input
915  self.__timeout = timeout
916  self.stack_trace_file = None
917  # Temporary file to pass the stack trace from one process to the other
918  # The file must be closed and reopened when needed to avoid conflicts
919  # between the processes
920  tmpf = tempfile.mkstemp()
921  os.close(tmpf[0])
922  self.stack_trace_file = tmpf[1] # remember only the name
923 
925  """Copied from TimeoutExecutable to allow the re-implementation of
926  _HandleChild.
927  """
928  if sys.platform == "win32":
929  # In Windows 2000 (or later), we should use "jobs" by
930  # analogy with UNIX process groups. However, that
931  # functionality is not (yet) provided by the Python Win32
932  # extensions.
933  return 0
934 
935  return self.__timeout >= 0 or self.__timeout == -2
936  ##
937  # Needs to replace the ones from RedirectedExecutable and TimeoutExecutable
938  def _HandleChild(self):
939  """Code copied from both FilterExecutable and TimeoutExecutable.
940  """
941  # Close the pipe ends that we do not need.
942  if self._stdin_pipe:
943  self._ClosePipeEnd(self._stdin_pipe[0])
944  if self._stdout_pipe:
945  self._ClosePipeEnd(self._stdout_pipe[1])
946  if self._stderr_pipe:
947  self._ClosePipeEnd(self._stderr_pipe[1])
948 
949  # The pipes created by 'RedirectedExecutable' must be closed
950  # before the monitor process (created by 'TimeoutExecutable')
951  # is created. Otherwise, if the child process dies, 'select'
952  # in the parent will not return if the monitor process may
953  # still have one of the file descriptors open.
954 
955  super(qm.executable.TimeoutExecutable, self)._HandleChild()
956 
958  # Put the child into its own process group. This step is
959  # performed in both the parent and the child; therefore both
960  # processes can safely assume that the creation of the process
961  # group has taken place.
962  child_pid = self._GetChildPID()
963  try:
964  os.setpgid(child_pid, child_pid)
965  except:
966  # The call to setpgid may fail if the child has exited,
967  # or has already called 'exec'. In that case, we are
968  # guaranteed that the child has already put itself in the
969  # desired process group.
970  pass
971  # Create the monitoring process.
972  #
973  # If the monitoring process is in parent's process group and
974  # kills the child after waitpid has returned in the parent, we
975  # may end up trying to kill a process group other than the one
976  # that we intend to kill. Therefore, we put the monitoring
977  # process in the same process group as the child; that ensures
978  # that the process group will persist until the monitoring
979  # process kills it.
980  self.__monitor_pid = os.fork()
981  if self.__monitor_pid != 0:
982  # Make sure that the monitoring process is placed into the
983  # child's process group before the parent process calls
984  # 'waitpid'. In this way, we are guaranteed that the process
985  # group as the child
986  os.setpgid(self.__monitor_pid, child_pid)
987  else:
988  # Put the monitoring process into the child's process
989  # group. We know the process group still exists at
990  # this point because either (a) we are in the process
991  # group, or (b) the parent has not yet called waitpid.
992  os.setpgid(0, child_pid)
993 
994  # Close all open file descriptors. They are not needed
995  # in the monitor process. Furthermore, when the parent
996  # closes the write end of the stdin pipe to the child,
997  # we do not want the pipe to remain open; leaving the
998  # pipe open in the monitor process might cause the child
999  # to block waiting for additional input.
1000  try:
1001  max_fds = os.sysconf("SC_OPEN_MAX")
1002  except:
1003  max_fds = 256
1004  for fd in xrange(max_fds):
1005  try:
1006  os.close(fd)
1007  except:
1008  pass
1009  try:
1010  if self.__timeout >= 0:
1011  # Give the child time to run.
1012  time.sleep (self.__timeout)
1013  #######################################################
1014  ### This is the interesting part: dump the stack trace to a file
1015  if sys.platform == "linux2": # we should be have /proc and gdb
1016  cmd = ["gdb",
1017  os.path.join("/proc", str(child_pid), "exe"),
1018  str(child_pid),
1019  "-batch", "-n", "-x",
1020  "'%s'" % os.path.join(os.path.dirname(__file__), "stack-trace.gdb")]
1021  # FIXME: I wanted to use subprocess.Popen, but it doesn't want to work
1022  # in this context.
1023  o = os.popen(" ".join(cmd)).read()
1024  open(self.stack_trace_file,"w").write(o)
1025  #######################################################
1026 
1027  # Kill all processes in the child process group.
1028  os.kill(0, signal.SIGKILL)
1029  else:
1030  # This call to select will never terminate.
1031  select.select ([], [], [])
1032  finally:
1033  # Exit. This code is in a finally clause so that
1034  # we are guaranteed to get here no matter what.
1035  os._exit(0)
1036  elif self.__timeout >= 0 and sys.platform == "win32":
1037  # Create a monitoring thread.
1038  self.__monitor_thread = Thread(target = self.__Monitor)
1039  self.__monitor_thread.start()
1040 
1041  if sys.platform == "win32":
1042 
1043  def __Monitor(self):
1044  """Code copied from FilterExecutable.
1045  Kill the child if the timeout expires.
1046 
1047  This function is run in the monitoring thread."""
1048 
1049  # The timeout may be expressed as a floating-point value
1050  # on UNIX, but it must be an integer number of
1051  # milliseconds when passed to WaitForSingleObject.
1052  timeout = int(self.__timeout * 1000)
1053  # Wait for the child process to terminate or for the
1054  # timer to expire.
1055  result = win32event.WaitForSingleObject(self._GetChildPID(),
1056  timeout)
1057  # If the timeout occurred, kill the child process.
1058  if result == win32con.WAIT_TIMEOUT:
1059  self.Kill()
1060 
1061 ########################################################################
1062 # Test Classes
1063 ########################################################################
1064 class GaudiExeTest(ExecTestBase):
1065  """Standard Gaudi test.
1066  """
1067  arguments = [
1068  qm.fields.TextField(
1069  name="program",
1070  title="Program",
1071  not_empty_text=1,
1072  description="""The path to the program.
1073 
1074  This field indicates the path to the program. If it is not
1075  an absolute path, the value of the 'PATH' environment
1076  variable will be used to search for the program.
1077  If not specified, $GAUDIEXE or Gaudi.exe are used.
1078  """
1079  ),
1080  qm.fields.SetField(qm.fields.TextField(
1081  name="args",
1082  title="Argument List",
1083  description="""The command-line arguments.
1084 
1085  If this field is left blank, the program is run without any
1086  arguments.
1087 
1088  Use this field to specify the option files.
1089 
1090  An implicit 0th argument (the path to the program) is added
1091  automatically."""
1092  )),
1093  qm.fields.TextField(
1094  name="options",
1095  title="Options",
1096  description="""Options to be passed to the application.
1097 
1098  This field allows to pass a list of options to the main program
1099  without the need of a separate option file.
1100 
1101  The content of the field is written to a temporary file which name
1102  is passed the the application as last argument (appended to the
1103  field "Argument List".
1104  """,
1105  verbatim="true",
1106  multiline="true",
1107  default_value=""
1108  ),
1109  qm.fields.TextField(
1110  name="workdir",
1111  title="Working Directory",
1112  description="""Path to the working directory.
1113 
1114  If this field is left blank, the program will be run from the qmtest
1115  directory, otherwise from the directory specified.""",
1116  default_value=""
1117  ),
1118  qm.fields.TextField(
1119  name="reference",
1120  title="Reference Output",
1121  description="""Path to the file containing the reference output.
1122 
1123  If this field is left blank, any standard output will be considered
1124  valid.
1125 
1126  If the reference file is specified, any output on standard error is
1127  ignored."""
1128  ),
1129  qm.fields.TextField(
1130  name="error_reference",
1131  title="Reference for standard error",
1132  description="""Path to the file containing the reference for the standard error.
1133 
1134  If this field is left blank, any standard output will be considered
1135  valid.
1136 
1137  If the reference file is specified, any output on standard error is
1138  ignored."""
1139  ),
1140  qm.fields.SetField(qm.fields.TextField(
1141  name = "unsupported_platforms",
1142  title = "Unsupported Platforms",
1143  description = """Platform on which the test must not be run.
1144 
1145  List of regular expressions identifying the platforms on which the
1146  test is not run and the result is set to UNTESTED."""
1147  )),
1148 
1149  qm.fields.TextField(
1150  name = "validator",
1151  title = "Validator",
1152  description = """Function to validate the output of the test.
1153 
1154  If defined, the function is used to validate the products of the
1155  test.
1156  The function is called passing as arguments:
1157  self: the test class instance
1158  stdout: the standard output of the executed test
1159  stderr: the standard error of the executed test
1160  result: the Result objects to fill with messages
1161  The function must return a list of causes for the failure.
1162  If specified, overrides standard output, standard error and
1163  reference files.
1164  """,
1165  verbatim="true",
1166  multiline="true",
1167  default_value=""
1168  ),
1169 
1170  qm.fields.BooleanField(
1171  name = "use_temp_dir",
1172  title = "Use temporary directory",
1173  description = """Use temporary directory.
1174 
1175  If set to true, use a temporary directory as working directory.
1176  """,
1177  default_value="false"
1178  ),
1179 
1180  qm.fields.IntegerField(
1181  name = "signal",
1182  title = "Expected signal",
1183  description = """Expect termination by signal.""",
1184  default_value=None
1185  ),
1186  ]
1187 
1188  def PlatformIsNotSupported(self, context, result):
1189  platform = self.GetPlatform()
1190  unsupported = [ re.compile(x)
1191  for x in [ str(y).strip()
1192  for y in self.unsupported_platforms ]
1193  if x
1194  ]
1195  for p_re in unsupported:
1196  if p_re.search(platform):
1197  result.SetOutcome(result.UNTESTED)
1198  result[result.CAUSE] = 'Platform not supported.'
1199  return True
1200  return False
1201 
1202  def GetPlatform(self):
1203  """
1204  Return the platform Id defined in CMTCONFIG or SCRAM_ARCH.
1205  """
1206  arch = "None"
1207  # check architecture name
1208  if "CMTCONFIG" in os.environ:
1209  arch = os.environ["CMTCONFIG"]
1210  elif "SCRAM_ARCH" in os.environ:
1211  arch = os.environ["SCRAM_ARCH"]
1212  return arch
1213 
1214  def isWinPlatform(self):
1215  """
1216  Return True if the current platform is Windows.
1217 
1218  This function was needed because of the change in the CMTCONFIG format,
1219  from win32_vc71_dbg to i686-winxp-vc9-dbg.
1220  """
1221  platform = self.GetPlatform()
1222  return "winxp" in platform or platform.startswith("win")
1223 
1224  def _expandReferenceFileName(self, reffile):
1225  # if no file is passed, do nothing
1226  if not reffile:
1227  return ""
1228 
1229  # function to split an extension in constituents parts
1230  platformSplit = lambda p: set(p.split('-' in p and '-' or '_'))
1231 
1232  reference = os.path.normpath(os.path.expandvars(reffile))
1233  # old-style platform-specific reference name
1234  spec_ref = reference[:-3] + self.GetPlatform()[0:3] + reference[-3:]
1235  if os.path.isfile(spec_ref):
1236  reference = spec_ref
1237  else: # look for new-style platform specific reference files:
1238  # get all the files whose name start with the reference filename
1239  dirname, basename = os.path.split(reference)
1240  if not dirname: dirname = '.'
1241  head = basename + "."
1242  head_len = len(head)
1243  platform = platformSplit(self.GetPlatform())
1244  candidates = []
1245  for f in os.listdir(dirname):
1246  if f.startswith(head):
1247  req_plat = platformSplit(f[head_len:])
1248  if platform.issuperset(req_plat):
1249  candidates.append( (len(req_plat), f) )
1250  if candidates: # take the one with highest matching
1251  # FIXME: it is not possible to say if x86_64-slc5-gcc43-dbg
1252  # has to use ref.x86_64-gcc43 or ref.slc5-dbg
1253  candidates.sort()
1254  reference = os.path.join(dirname, candidates[-1][1])
1255  return reference
1256 
1257  def CheckTTreesSummaries(self, stdout, result, causes,
1258  trees_dict = None,
1259  ignore = r"Basket|.*size|Compression"):
1260  """
1261  Compare the TTree summaries in stdout with the ones in trees_dict or in
1262  the reference file. By default ignore the size, compression and basket
1263  fields.
1264  The presence of TTree summaries when none is expected is not a failure.
1265  """
1266  if trees_dict is None:
1267  reference = self._expandReferenceFileName(self.reference)
1268  # call the validator if the file exists
1269  if reference and os.path.isfile(reference):
1270  trees_dict = findTTreeSummaries(open(reference).read())
1271  else:
1272  trees_dict = {}
1273 
1274  from pprint import PrettyPrinter
1275  pp = PrettyPrinter()
1276  if trees_dict:
1277  result["GaudiTest.TTrees.expected"] = result.Quote(pp.pformat(trees_dict))
1278  if ignore:
1279  result["GaudiTest.TTrees.ignore"] = result.Quote(ignore)
1280 
1281  trees = findTTreeSummaries(stdout)
1282  failed = cmpTreesDicts(trees_dict, trees, ignore)
1283  if failed:
1284  causes.append("trees summaries")
1285  msg = "%s: %s != %s" % getCmpFailingValues(trees_dict, trees, failed)
1286  result["GaudiTest.TTrees.failure_on"] = result.Quote(msg)
1287  result["GaudiTest.TTrees.found"] = result.Quote(pp.pformat(trees))
1288 
1289  return causes
1290 
1291  def CheckHistosSummaries(self, stdout, result, causes,
1292  dict = None,
1293  ignore = None):
1294  """
1295  Compare the TTree summaries in stdout with the ones in trees_dict or in
1296  the reference file. By default ignore the size, compression and basket
1297  fields.
1298  The presence of TTree summaries when none is expected is not a failure.
1299  """
1300  if dict is None:
1301  reference = self._expandReferenceFileName(self.reference)
1302  # call the validator if the file exists
1303  if reference and os.path.isfile(reference):
1304  dict = findHistosSummaries(open(reference).read())
1305  else:
1306  dict = {}
1307 
1308  from pprint import PrettyPrinter
1309  pp = PrettyPrinter()
1310  if dict:
1311  result["GaudiTest.Histos.expected"] = result.Quote(pp.pformat(dict))
1312  if ignore:
1313  result["GaudiTest.Histos.ignore"] = result.Quote(ignore)
1314 
1315  histos = findHistosSummaries(stdout)
1316  failed = cmpTreesDicts(dict, histos, ignore)
1317  if failed:
1318  causes.append("histos summaries")
1319  msg = "%s: %s != %s" % getCmpFailingValues(dict, histos, failed)
1320  result["GaudiTest.Histos.failure_on"] = result.Quote(msg)
1321  result["GaudiTest.Histos.found"] = result.Quote(pp.pformat(histos))
1322 
1323  return causes
1324 
1325  def ValidateWithReference(self, stdout, stderr, result, causes, preproc = None):
1326  """
1327  Default validation action: compare standard output and error to the
1328  reference files.
1329  """
1330  # set the default output preprocessor
1331  if preproc is None:
1332  preproc = normalizeExamples
1333  # check standard output
1334  reference = self._expandReferenceFileName(self.reference)
1335  # call the validator if the file exists
1336  if reference and os.path.isfile(reference):
1337  result["GaudiTest.output_reference"] = reference
1338  causes += ReferenceFileValidator(reference,
1339  "standard output",
1340  "GaudiTest.output_diff",
1341  preproc = preproc)(stdout, result)
1342 
1343  # Compare TTree summaries
1344  causes = self.CheckTTreesSummaries(stdout, result, causes)
1345  causes = self.CheckHistosSummaries(stdout, result, causes)
1346 
1347  if causes: # Write a new reference file for stdout
1348  try:
1349  newref = open(reference + ".new","w")
1350  # sanitize newlines
1351  for l in stdout.splitlines():
1352  newref.write(l.rstrip() + '\n')
1353  del newref # flush and close
1354  except IOError:
1355  # Ignore IO errors when trying to update reference files
1356  # because we may be in a read-only filesystem
1357  pass
1358 
1359  # check standard error
1360  reference = self._expandReferenceFileName(self.error_reference)
1361  # call the validator if we have a file to use
1362  if reference and os.path.isfile(reference):
1363  result["GaudiTest.error_reference"] = reference
1364  newcauses = ReferenceFileValidator(reference,
1365  "standard error",
1366  "GaudiTest.error_diff",
1367  preproc = preproc)(stderr, result)
1368  causes += newcauses
1369  if newcauses: # Write a new reference file for stdedd
1370  newref = open(reference + ".new","w")
1371  # sanitize newlines
1372  for l in stderr.splitlines():
1373  newref.write(l.rstrip() + '\n')
1374  del newref # flush and close
1375  else:
1376  causes += BasicOutputValidator(self.stderr,
1377  "standard error",
1378  "ExecTest.expected_stderr")(stderr, result)
1379 
1380  return causes
1381 
1382  def ValidateOutput(self, stdout, stderr, result):
1383  causes = []
1384  # if the test definition contains a custom validator, use it
1385  if self.validator.strip() != "":
1386  class CallWrapper(object):
1387  """
1388  Small wrapper class to dynamically bind some default arguments
1389  to a callable.
1390  """
1391  def __init__(self, callable, extra_args = {}):
1392  self.callable = callable
1393  self.extra_args = extra_args
1394  # get the list of names of positional arguments
1395  from inspect import getargspec
1396  self.args_order = getargspec(callable)[0]
1397  # Remove "self" from the list of positional arguments
1398  # since it is added automatically
1399  if self.args_order[0] == "self":
1400  del self.args_order[0]
1401  def __call__(self, *args, **kwargs):
1402  # Check which positional arguments are used
1403  positional = self.args_order[:len(args)]
1404 
1405  kwargs = dict(kwargs) # copy the arguments dictionary
1406  for a in self.extra_args:
1407  # use "extra_args" for the arguments not specified as
1408  # positional or keyword
1409  if a not in positional and a not in kwargs:
1410  kwargs[a] = self.extra_args[a]
1411  return apply(self.callable, args, kwargs)
1412  # local names to be exposed in the script
1413  exported_symbols = {"self":self,
1414  "stdout":stdout,
1415  "stderr":stderr,
1416  "result":result,
1417  "causes":causes,
1418  "findReferenceBlock":
1419  CallWrapper(findReferenceBlock, {"stdout":stdout,
1420  "result":result,
1421  "causes":causes}),
1422  "validateWithReference":
1423  CallWrapper(self.ValidateWithReference, {"stdout":stdout,
1424  "stderr":stderr,
1425  "result":result,
1426  "causes":causes}),
1427  "countErrorLines":
1428  CallWrapper(countErrorLines, {"stdout":stdout,
1429  "result":result,
1430  "causes":causes}),
1431  "checkTTreesSummaries":
1432  CallWrapper(self.CheckTTreesSummaries, {"stdout":stdout,
1433  "result":result,
1434  "causes":causes}),
1435  "checkHistosSummaries":
1436  CallWrapper(self.CheckHistosSummaries, {"stdout":stdout,
1437  "result":result,
1438  "causes":causes}),
1439 
1440  }
1441  exec self.validator in globals(), exported_symbols
1442  else:
1443  self.ValidateWithReference(stdout, stderr, result, causes)
1444 
1445  return causes
1446 
1447  def DumpEnvironment(self, result):
1448  """
1449  Add the content of the environment to the result object.
1450 
1451  Copied from the QMTest class of COOL.
1452  """
1453  vars = os.environ.keys()
1454  vars.sort()
1455  result['GaudiTest.environment'] = \
1456  result.Quote('\n'.join(["%s=%s"%(v,os.environ[v]) for v in vars]))
1457 
1458  def Run(self, context, result):
1459  """Run the test.
1460 
1461  'context' -- A 'Context' giving run-time parameters to the
1462  test.
1463 
1464  'result' -- A 'Result' object. The outcome will be
1465  'Result.PASS' when this method is called. The 'result' may be
1466  modified by this method to indicate outcomes other than
1467  'Result.PASS' or to add annotations."""
1468 
1469  # Check if the platform is supported
1470  if self.PlatformIsNotSupported(context, result):
1471  return
1472 
1473  # Prepare program name and arguments (expanding variables, and converting to absolute)
1474  if self.program:
1475  prog = rationalizepath(self.program)
1476  elif "GAUDIEXE" in os.environ:
1477  prog = os.environ["GAUDIEXE"]
1478  else:
1479  prog = "Gaudi.exe"
1480  self.program = prog
1481 
1482  dummy, prog_ext = os.path.splitext(prog)
1483  if prog_ext not in [ ".exe", ".py", ".bat" ] and self.isWinPlatform():
1484  prog += ".exe"
1485  prog_ext = ".exe"
1486 
1487  prog = which(prog) or prog
1488 
1489  # Convert paths to absolute paths in arguments and reference files
1490  args = map(rationalizepath, self.args)
1493 
1494 
1495  # check if the user provided inline options
1496  tmpfile = None
1497  if self.options.strip():
1498  ext = ".opts"
1499  if re.search(r"from\s+Gaudi.Configuration\s+import\s+\*|from\s+Configurables\s+import", self.options):
1500  ext = ".py"
1501  tmpfile = TempFile(ext)
1502  tmpfile.writelines("\n".join(self.options.splitlines()))
1503  tmpfile.flush()
1504  args.append(tmpfile.name)
1505  result["GaudiTest.options"] = result.Quote(self.options)
1506 
1507  # if the program is a python file, execute it through python
1508  if prog_ext == ".py":
1509  args.insert(0,prog)
1510  if self.isWinPlatform():
1511  prog = which("python.exe") or "python.exe"
1512  else:
1513  prog = which("python") or "python"
1514 
1515  # Change to the working directory if specified or to the default temporary
1516  origdir = os.getcwd()
1517  if self.workdir:
1518  os.chdir(str(os.path.normpath(os.path.expandvars(self.workdir))))
1519  elif self.use_temp_dir == "true":
1520  if "QMTEST_TMPDIR" in os.environ:
1521  qmtest_tmpdir = os.environ["QMTEST_TMPDIR"]
1522  if not os.path.exists(qmtest_tmpdir):
1523  os.makedirs(qmtest_tmpdir)
1524  os.chdir(qmtest_tmpdir)
1525  elif "qmtest.tmpdir" in context:
1526  os.chdir(context["qmtest.tmpdir"])
1527 
1528  if "QMTEST_IGNORE_TIMEOUT" not in os.environ:
1529  self.timeout = max(self.timeout,600)
1530  else:
1531  self.timeout = -1
1532 
1533  try:
1534  # Generate eclipse.org debug launcher for the test
1535  self._CreateEclipseLaunch(prog, args, destdir = os.path.join(origdir, '.eclipse'))
1536  # Run the test
1537  self.RunProgram(prog,
1538  [ prog ] + args,
1539  context, result)
1540  # Record the content of the enfironment for failing tests
1541  if result.GetOutcome() not in [ result.PASS ]:
1542  self.DumpEnvironment(result)
1543  finally:
1544  # revert to the original directory
1545  os.chdir(origdir)
1546 
1547  def RunProgram(self, program, arguments, context, result):
1548  """Run the 'program'.
1549 
1550  'program' -- The path to the program to run.
1551 
1552  'arguments' -- A list of the arguments to the program. This
1553  list must contain a first argument corresponding to 'argv[0]'.
1554 
1555  'context' -- A 'Context' giving run-time parameters to the
1556  test.
1557 
1558  'result' -- A 'Result' object. The outcome will be
1559  'Result.PASS' when this method is called. The 'result' may be
1560  modified by this method to indicate outcomes other than
1561  'Result.PASS' or to add annotations.
1562 
1563  @attention: This method has been copied from command.ExecTestBase
1564  (QMTest 2.3.0) and modified to keep stdout and stderr
1565  for tests that have been terminated by a signal.
1566  (Fundamental for debugging in the Application Area)
1567  """
1568 
1569  # Construct the environment.
1570  environment = self.MakeEnvironment(context)
1571  # FIXME: whithout this, we get some spurious '\x1b[?1034' in the std out on SLC6
1572  if "slc6" in environment.get('CMTCONFIG', ''):
1573  environment['TERM'] = 'dumb'
1574  # Create the executable.
1575  if self.timeout >= 0:
1576  timeout = self.timeout
1577  else:
1578  # If no timeout was specified, we sill run this process in a
1579  # separate process group and kill the entire process group
1580  # when the child is done executing. That means that
1581  # orphaned child processes created by the test will be
1582  # cleaned up.
1583  timeout = -2
1584  e = GaudiFilterExecutable(self.stdin, timeout)
1585  # Run it.
1586  exit_status = e.Run(arguments, environment, path = program)
1587  # Get the stack trace from the temporary file (if present)
1588  if e.stack_trace_file and os.path.exists(e.stack_trace_file):
1589  stack_trace = open(e.stack_trace_file).read()
1590  os.remove(e.stack_trace_file)
1591  else:
1592  stack_trace = None
1593  if stack_trace:
1594  result["ExecTest.stack_trace"] = result.Quote(stack_trace)
1595 
1596  # If the process terminated normally, check the outputs.
1597  if (sys.platform == "win32" or os.WIFEXITED(exit_status)
1598  or self.signal == os.WTERMSIG(exit_status)):
1599  # There are no causes of failure yet.
1600  causes = []
1601  # The target program terminated normally. Extract the
1602  # exit code, if this test checks it.
1603  if self.exit_code is None:
1604  exit_code = None
1605  elif sys.platform == "win32":
1606  exit_code = exit_status
1607  else:
1608  exit_code = os.WEXITSTATUS(exit_status)
1609  # Get the output generated by the program.
1610  stdout = e.stdout
1611  stderr = e.stderr
1612  # Record the results.
1613  result["ExecTest.exit_code"] = str(exit_code)
1614  result["ExecTest.stdout"] = result.Quote(stdout)
1615  result["ExecTest.stderr"] = result.Quote(stderr)
1616  # Check to see if the exit code matches.
1617  if exit_code != self.exit_code:
1618  causes.append("exit_code")
1619  result["ExecTest.expected_exit_code"] \
1620  = str(self.exit_code)
1621  # Validate the output.
1622  causes += self.ValidateOutput(stdout, stderr, result)
1623  # If anything went wrong, the test failed.
1624  if causes:
1625  result.Fail("Unexpected %s." % string.join(causes, ", "))
1626  elif os.WIFSIGNALED(exit_status):
1627  # The target program terminated with a signal. Construe
1628  # that as a test failure.
1629  signal_number = str(os.WTERMSIG(exit_status))
1630  if not stack_trace:
1631  result.Fail("Program terminated by signal.")
1632  else:
1633  # The presence of stack_trace means tha we stopped the job because
1634  # of a time-out
1635  result.Fail("Exceeded time limit (%ds), terminated." % timeout)
1636  result["ExecTest.signal_number"] = signal_number
1637  result["ExecTest.stdout"] = result.Quote(e.stdout)
1638  result["ExecTest.stderr"] = result.Quote(e.stderr)
1639  if self.signal:
1640  result["ExecTest.expected_signal_number"] = str(self.signal)
1641  elif os.WIFSTOPPED(exit_status):
1642  # The target program was stopped. Construe that as a
1643  # test failure.
1644  signal_number = str(os.WSTOPSIG(exit_status))
1645  if not stack_trace:
1646  result.Fail("Program stopped by signal.")
1647  else:
1648  # The presence of stack_trace means tha we stopped the job because
1649  # of a time-out
1650  result.Fail("Exceeded time limit (%ds), stopped." % timeout)
1651  result["ExecTest.signal_number"] = signal_number
1652  result["ExecTest.stdout"] = result.Quote(e.stdout)
1653  result["ExecTest.stderr"] = result.Quote(e.stderr)
1654  else:
1655  # The target program terminated abnormally in some other
1656  # manner. (This shouldn't normally happen...)
1657  result.Fail("Program did not terminate normally.")
1658 
1659  # Marco Cl.: This is a special trick to fix a "problem" with the output
1660  # of gaudi jobs when they use colors
1661  esc = '\x1b'
1662  repr_esc = '\\x1b'
1663  result["ExecTest.stdout"] = result["ExecTest.stdout"].replace(esc,repr_esc)
1664  # TODO: (MCl) improve the hack for colors in standard output
1665  # may be converting them to HTML tags
1666 
1667  def _CreateEclipseLaunch(self, prog, args, destdir = None):
1668  if 'NO_ECLIPSE_LAUNCHERS' in os.environ:
1669  # do not generate eclipse launchers if the user asks so
1670  return
1671  # Find the project name used in ecplise.
1672  # The name is in a file called ".project" in one of the parent directories
1673  projbasedir = os.path.normpath(destdir)
1674  while not os.path.exists(os.path.join(projbasedir, ".project")):
1675  oldprojdir = projbasedir
1676  projbasedir = os.path.normpath(os.path.join(projbasedir, os.pardir))
1677  # FIXME: the root level is invariant when trying to go up one level,
1678  # but it must be cheched on windows
1679  if oldprojdir == projbasedir:
1680  # If we cannot find a .project, so no point in creating a .launch file
1681  return
1682  # Ensure that we have a place where to write.
1683  if not os.path.exists(destdir):
1684  os.makedirs(destdir)
1685  # Use ElementTree to parse the XML file
1686  from xml.etree import ElementTree as ET
1687  t = ET.parse(os.path.join(projbasedir, ".project"))
1688  projectName = t.find("name").text
1689 
1690  # prepare the name/path of the generated file
1691  destfile = "%s.launch" % self._Runnable__id
1692  if destdir:
1693  destfile = os.path.join(destdir, destfile)
1694 
1695  if self.options.strip():
1696  # this means we have some custom options in the qmt file, so we have
1697  # to copy them from the temporary file at the end of the arguments
1698  # in another file
1699  tempfile = args.pop()
1700  optsfile = destfile + os.path.splitext(tempfile)[1]
1701  shutil.copyfile(tempfile, optsfile)
1702  args.append(optsfile)
1703 
1704  # prepare the data to insert in the XML file
1705  from xml.sax.saxutils import quoteattr # useful to quote XML special chars
1706  data = {}
1707  # Note: the "quoteattr(k)" is not needed because special chars cannot be part of a variable name,
1708  # but it doesn't harm.
1709  data["environment"] = "\n".join(['<mapEntry key=%s value=%s/>' % (quoteattr(k), quoteattr(v))
1710  for k, v in os.environ.iteritems()
1711  if k not in ('MAKEOVERRIDES', 'MAKEFLAGS', 'MAKELEVEL')])
1712 
1713  data["exec"] = which(prog) or prog
1714  if os.path.basename(data["exec"]).lower().startswith("python"):
1715  data["stopAtMain"] = "false" # do not stop at main when debugging Python scripts
1716  else:
1717  data["stopAtMain"] = "true"
1718 
1719  data["args"] = "&#10;".join(map(rationalizepath, args))
1720  if self.isWinPlatform():
1721  data["args"] = "&#10;".join(["/debugexe"] + map(rationalizepath, [data["exec"]] + args))
1722  data["exec"] = which("vcexpress.exe")
1723 
1724  if not self.use_temp_dir:
1725  data["workdir"] = os.getcwd()
1726  else:
1727  # If the test is using a tmporary directory, it is better to run it
1728  # in the same directory as the .launch file when debugged in eclipse
1729  data["workdir"] = destdir
1730 
1731  data["project"] = projectName.strip()
1732 
1733  # Template for the XML file, based on eclipse 3.4
1734  xml_template = u"""<?xml version="1.0" encoding="UTF-8" standalone="no"?>
1735 <launchConfiguration type="org.eclipse.cdt.launch.applicationLaunchType">
1736 <booleanAttribute key="org.eclipse.cdt.debug.mi.core.AUTO_SOLIB" value="true"/>
1737 <listAttribute key="org.eclipse.cdt.debug.mi.core.AUTO_SOLIB_LIST"/>
1738 <stringAttribute key="org.eclipse.cdt.debug.mi.core.DEBUG_NAME" value="gdb"/>
1739 <stringAttribute key="org.eclipse.cdt.debug.mi.core.GDB_INIT" value=".gdbinit"/>
1740 <listAttribute key="org.eclipse.cdt.debug.mi.core.SOLIB_PATH"/>
1741 <booleanAttribute key="org.eclipse.cdt.debug.mi.core.STOP_ON_SOLIB_EVENTS" value="false"/>
1742 <booleanAttribute key="org.eclipse.cdt.debug.mi.core.breakpointsFullPath" value="false"/>
1743 <stringAttribute key="org.eclipse.cdt.debug.mi.core.commandFactory" value="org.eclipse.cdt.debug.mi.core.standardCommandFactory"/>
1744 <stringAttribute key="org.eclipse.cdt.debug.mi.core.protocol" value="mi"/>
1745 <booleanAttribute key="org.eclipse.cdt.debug.mi.core.verboseMode" value="false"/>
1746 <intAttribute key="org.eclipse.cdt.launch.ATTR_BUILD_BEFORE_LAUNCH_ATTR" value="0"/>
1747 <stringAttribute key="org.eclipse.cdt.launch.COREFILE_PATH" value=""/>
1748 <stringAttribute key="org.eclipse.cdt.launch.DEBUGGER_ID" value="org.eclipse.cdt.debug.mi.core.CDebuggerNew"/>
1749 <stringAttribute key="org.eclipse.cdt.launch.DEBUGGER_REGISTER_GROUPS" value=""/>
1750 <stringAttribute key="org.eclipse.cdt.launch.DEBUGGER_START_MODE" value="run"/>
1751 <booleanAttribute key="org.eclipse.cdt.launch.DEBUGGER_STOP_AT_MAIN" value="%(stopAtMain)s"/>
1752 <stringAttribute key="org.eclipse.cdt.launch.DEBUGGER_STOP_AT_MAIN_SYMBOL" value="main"/>
1753 <booleanAttribute key="org.eclipse.cdt.launch.ENABLE_REGISTER_BOOKKEEPING" value="false"/>
1754 <booleanAttribute key="org.eclipse.cdt.launch.ENABLE_VARIABLE_BOOKKEEPING" value="false"/>
1755 <stringAttribute key="org.eclipse.cdt.launch.FORMAT" value="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&lt;contentList/&gt;"/>
1756 <stringAttribute key="org.eclipse.cdt.launch.GLOBAL_VARIABLES" value="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;globalVariableList/&gt;&#10;"/>
1757 <stringAttribute key="org.eclipse.cdt.launch.MEMORY_BLOCKS" value="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;memoryBlockExpressionList/&gt;&#10;"/>
1758 <stringAttribute key="org.eclipse.cdt.launch.PROGRAM_ARGUMENTS" value="%(args)s"/>
1759 <stringAttribute key="org.eclipse.cdt.launch.PROGRAM_NAME" value="%(exec)s"/>
1760 <stringAttribute key="org.eclipse.cdt.launch.PROJECT_ATTR" value="%(project)s"/>
1761 <stringAttribute key="org.eclipse.cdt.launch.PROJECT_BUILD_CONFIG_ID_ATTR" value=""/>
1762 <stringAttribute key="org.eclipse.cdt.launch.WORKING_DIRECTORY" value="%(workdir)s"/>
1763 <booleanAttribute key="org.eclipse.cdt.launch.ui.ApplicationCDebuggerTab.DEFAULTS_SET" value="true"/>
1764 <booleanAttribute key="org.eclipse.cdt.launch.use_terminal" value="true"/>
1765 <listAttribute key="org.eclipse.debug.core.MAPPED_RESOURCE_PATHS">
1766 <listEntry value="/%(project)s"/>
1767 </listAttribute>
1768 <listAttribute key="org.eclipse.debug.core.MAPPED_RESOURCE_TYPES">
1769 <listEntry value="4"/>
1770 </listAttribute>
1771 <booleanAttribute key="org.eclipse.debug.core.appendEnvironmentVariables" value="false"/>
1772 <mapAttribute key="org.eclipse.debug.core.environmentVariables">
1773 %(environment)s
1774 </mapAttribute>
1775 <mapAttribute key="org.eclipse.debug.core.preferred_launchers">
1776 <mapEntry key="[debug]" value="org.eclipse.cdt.cdi.launch.localCLaunch"/>
1777 </mapAttribute>
1778 <listAttribute key="org.eclipse.debug.ui.favoriteGroups">
1779 <listEntry value="org.eclipse.debug.ui.launchGroup.debug"/>
1780 </listAttribute>
1781 </launchConfiguration>
1782 """
1783  try:
1784  # ensure the correct encoding of data values
1785  for k in data:
1786  data[k] = codecs.decode(data[k], 'utf-8')
1787  xml = xml_template % data
1788 
1789  # Write the output file
1790  codecs.open(destfile, "w", encoding='utf-8').write(xml)
1791  except:
1792  print 'WARNING: problem generating Eclipse launcher'
1793 
1794 
1795 try:
1796  import json
1797 except ImportError:
1798  # Use simplejson for LCG
1799  import simplejson as json
1800 
1801 class HTMLResultStream(ResultStream):
1802  """An 'HTMLResultStream' writes its output to a set of HTML files.
1803 
1804  The argument 'dir' is used to select the destination directory for the HTML
1805  report.
1806  The destination directory may already contain the report from a previous run
1807  (for example of a different package), in which case it will be extended to
1808  include the new data.
1809  """
1810  arguments = [
1811  qm.fields.TextField(
1812  name = "dir",
1813  title = "Destination Directory",
1814  description = """The name of the directory.
1815 
1816  All results will be written to the directory indicated.""",
1817  verbatim = "true",
1818  default_value = ""),
1819  ]
1820 
1821  def __init__(self, arguments = None, **args):
1822  """Prepare the destination directory.
1823 
1824  Creates the destination directory and store in it some preliminary
1825  annotations and the static files found in the template directory
1826  'html_report'.
1827  """
1828  ResultStream.__init__(self, arguments, **args)
1829  self._summary = []
1830  self._summaryFile = os.path.join(self.dir, "summary.json")
1831  self._annotationsFile = os.path.join(self.dir, "annotations.json")
1832  # Prepare the destination directory using the template
1833  templateDir = os.path.join(os.path.dirname(__file__), "html_report")
1834  if not os.path.isdir(self.dir):
1835  os.makedirs(self.dir)
1836  # Copy the files in the template directory excluding the directories
1837  for f in os.listdir(templateDir):
1838  src = os.path.join(templateDir, f)
1839  dst = os.path.join(self.dir, f)
1840  if not os.path.isdir(src) and not os.path.exists(dst):
1841  shutil.copy(src, dst)
1842  # Add some non-QMTest attributes
1843  if "CMTCONFIG" in os.environ:
1844  self.WriteAnnotation("cmt.cmtconfig", os.environ["CMTCONFIG"])
1845  import socket
1846  self.WriteAnnotation("hostname", socket.gethostname())
1847 
1848  def _updateSummary(self):
1849  """Helper function to extend the global summary file in the destination
1850  directory.
1851  """
1852  if os.path.exists(self._summaryFile):
1853  oldSummary = json.load(open(self._summaryFile))
1854  else:
1855  oldSummary = []
1856  ids = set([ i["id"] for i in self._summary ])
1857  newSummary = [ i for i in oldSummary if i["id"] not in ids ]
1858  newSummary.extend(self._summary)
1859  json.dump(newSummary, open(self._summaryFile, "w"),
1860  sort_keys = True)
1861 
1862  def WriteAnnotation(self, key, value):
1863  """Writes the annotation to the annotation file.
1864  If the key is already present with a different value, the value becomes
1865  a list and the new value is appended to it, except for start_time and
1866  end_time.
1867  """
1868  # Initialize the annotation dict from the file (if present)
1869  if os.path.exists(self._annotationsFile):
1870  annotations = json.load(open(self._annotationsFile))
1871  else:
1872  annotations = {}
1873  # hack because we do not have proper JSON support
1874  key, value = map(str, [key, value])
1875  if key == "qmtest.run.start_time":
1876  # Special handling of the start time:
1877  # if we are updating a result, we have to keep the original start
1878  # time, but remove the original end time to mark the report to be
1879  # in progress.
1880  if key not in annotations:
1881  annotations[key] = value
1882  if "qmtest.run.end_time" in annotations:
1883  del annotations["qmtest.run.end_time"]
1884  else:
1885  # All other annotations are added to a list
1886  if key in annotations:
1887  old = annotations[key]
1888  if type(old) is list:
1889  if value not in old:
1890  annotations[key].append(value)
1891  elif value != old:
1892  annotations[key] = [old, value]
1893  else:
1894  annotations[key] = value
1895  # Write the new annotations file
1896  json.dump(annotations, open(self._annotationsFile, "w"),
1897  sort_keys = True)
1898 
1899  def WriteResult(self, result):
1900  """Prepare the test result directory in the destination directory storing
1901  into it the result fields.
1902  A summary of the test result is stored both in a file in the test directory
1903  and in the global summary file.
1904  """
1905  summary = {}
1906  summary["id"] = result.GetId()
1907  summary["outcome"] = result.GetOutcome()
1908  summary["cause"] = result.GetCause()
1909  summary["fields"] = result.keys()
1910  summary["fields"].sort()
1911 
1912  # Since we miss proper JSON support, I hack a bit
1913  for f in ["id", "outcome", "cause"]:
1914  summary[f] = str(summary[f])
1915  summary["fields"] = map(str, summary["fields"])
1916 
1917  self._summary.append(summary)
1918 
1919  # format:
1920  # testname/summary.json
1921  # testname/field1
1922  # testname/field2
1923  testOutDir = os.path.join(self.dir, summary["id"])
1924  if not os.path.isdir(testOutDir):
1925  os.makedirs(testOutDir)
1926  json.dump(summary, open(os.path.join(testOutDir, "summary.json"), "w"),
1927  sort_keys = True)
1928  for f in summary["fields"]:
1929  open(os.path.join(testOutDir, f), "w").write(result[f])
1930 
1931  self._updateSummary()
1932 
1933  def Summarize(self):
1934  # Not implemented.
1935  pass
1936 
1937 
1938 
1939 
1940 class XMLResultStream(ResultStream):
1941  """An 'XMLResultStream' writes its output to a Ctest XML file.
1942 
1943  The argument 'dir' is used to select the destination file for the XML
1944  report.
1945  The destination directory may already contain the report from a previous run
1946  (for example of a different package), in which case it will be overrided to
1947  with the new data.
1948  """
1949  arguments = [
1950  qm.fields.TextField(
1951  name = "dir",
1952  title = "Destination Directory",
1953  description = """The name of the directory.
1954 
1955  All results will be written to the directory indicated.""",
1956  verbatim = "true",
1957  default_value = ""),
1958  qm.fields.TextField(
1959  name = "prefix",
1960  title = "Output File Prefix",
1961  description = """The output file name will be the specified prefix
1962  followed by 'Test.xml' (CTest convention).""",
1963  verbatim = "true",
1964  default_value = ""),
1965  ]
1966 
1967  def __init__(self, arguments = None, **args):
1968  """Prepare the destination directory.
1969 
1970  Creates the destination directory and store in it some preliminary
1971  annotations .
1972  """
1973  ResultStream.__init__(self, arguments, **args)
1974 
1975  self._xmlFile = os.path.join(self.dir, self.prefix + 'Test.xml')
1976 
1977  # add some global variable
1978  self._startTime = None
1979  self._endTime = None
1980  # Format the XML file if it not exists
1981  if not os.path.isfile(self._xmlFile):
1982  # check that the container directory exists and create it if not
1983  if not os.path.exists(os.path.dirname(self._xmlFile)):
1984  os.makedirs(os.path.dirname(self._xmlFile))
1985 
1986  newdataset = ET.Element("newdataset")
1987  self._tree = ET.ElementTree(newdataset)
1988  self._tree.write(self._xmlFile)
1989  else :
1990  # Read the xml file
1991  self._tree = ET.parse(self._xmlFile)
1992  newdataset = self._tree.getroot()
1993 
1994  # Find the corresponding site, if do not exist, create it
1995 
1996  #site = newdataset.find('Site[@BuildStamp="'+result["qmtest.start_time"]+'"][@OSPlatform="'+os.getenv("CMTOPT")+'"]')
1997  # I don't know why this syntax doesn't work. Maybe it is because of the python version. Indeed,
1998  # This works well in the python terminal. So I have to make a for:
1999  for site in newdataset.getiterator() :
2000  if site.get("OSPlatform") == os.uname()[4]: # and site.get("BuildStamp") == result["qmtest.start_time"] and:
2001  # Here we can add some variable to define the difference beetween 2 site
2002  self._site = site
2003  break
2004  else :
2005  site = None
2006 
2007 
2008  if site is None :
2009  import socket
2010  import multiprocessing
2011  attrib = {
2012  "BuildName" : os.getenv("CMTCONFIG"),
2013  "Name" : os.uname()[1] ,
2014  "Generator" : "QMTest "+qm.version ,
2015  "OSName" : os.uname()[0] ,
2016  "Hostname" : socket.gethostname() ,
2017  "OSRelease" : os.uname()[2] ,
2018  "OSVersion" :os.uname()[3] ,
2019  "OSPlatform" :os.uname()[4] ,
2020  "Is64Bits" : "unknown" ,
2021  "VendorString" : "unknown" ,
2022  "VendorID" :"unknown" ,
2023  "FamilyID" :"unknown" ,
2024  "ModelID" :"unknown" ,
2025  "ProcessorCacheSize" :"unknown" ,
2026  "NumberOfLogicalCPU" : str(multiprocessing.cpu_count()) ,
2027  "NumberOfPhysicalCPU" : "0" ,
2028  "TotalVirtualMemory" : "0" ,
2029  "TotalPhysicalMemory" : "0" ,
2030  "LogicalProcessorsPerPhysical" : "0" ,
2031  "ProcessorClockFrequency" : "0" ,
2032  }
2033  self._site = ET.SubElement(newdataset, "Site", attrib)
2034  self._Testing = ET.SubElement(self._site,"Testing")
2035 
2036  # Start time elements
2037  self._StartDateTime = ET.SubElement(self._Testing, "StartDateTime")
2038 
2039  self._StartTestTime = ET.SubElement(self._Testing, "StartTestTime")
2040 
2041 
2042  self._TestList = ET.SubElement(self._Testing, "TestList")
2043 
2044  ## End time elements
2045  self._EndDateTime = ET.SubElement(self._Testing, "EndDateTime")
2046 
2047 
2048  self._EndTestTime = ET.SubElement(self._Testing, "EndTestTime")
2049 
2050 
2051 
2052  self._ElapsedMinutes = ET.SubElement(self._Testing, "ElapsedMinutes")
2053 
2054 
2055  else : # We get the elements
2056  self._Testing = self._site.find("Testing")
2057  self._StartDateTime = self._Testing.find("StartDateTime")
2058  self._StartTestTime = self._Testing.find("StartTestTime")
2059  self._TestList = self._Testing.find("TestList")
2060  self._EndDateTime = self._Testing.find("EndDateTime")
2061  self._EndTestTime = self._Testing.find("EndTestTime")
2062  self._ElapsedMinutes = self._Testing.find("ElapsedMinutes")
2063 
2064  """
2065  # Add some non-QMTest attributes
2066  if "CMTCONFIG" in os.environ:
2067  self.WriteAnnotation("cmt.cmtconfig", os.environ["CMTCONFIG"])
2068  import socket
2069  self.WriteAnnotation("hostname", socket.gethostname())
2070  """
2071 
2072 
2073  def WriteAnnotation(self, key, value):
2074  if key == "qmtest.run.start_time":
2075  if self._site.get("qmtest.run.start_time") is not None :
2076  return None
2077  self._site.set(str(key),str(value))
2078  def WriteResult(self, result):
2079  """Prepare the test result directory in the destination directory storing
2080  into it the result fields.
2081  A summary of the test result is stored both in a file in the test directory
2082  and in the global summary file.
2083  """
2084  summary = {}
2085  summary["id"] = result.GetId()
2086  summary["outcome"] = result.GetOutcome()
2087  summary["cause"] = result.GetCause()
2088  summary["fields"] = result.keys()
2089  summary["fields"].sort()
2090 
2091 
2092  # Since we miss proper JSON support, I hack a bit
2093  for f in ["id", "outcome", "cause"]:
2094  summary[f] = str(summary[f])
2095  summary["fields"] = map(str, summary["fields"])
2096 
2097 
2098  # format
2099  # package_Test.xml
2100 
2101  if "qmtest.start_time" in summary["fields"]:
2102  haveStartDate = True
2103  else :
2104  haveStartDate = False
2105  if "qmtest.end_time" in summary["fields"]:
2106  haveEndDate = True
2107  else :
2108  haveEndDate = False
2109 
2110  # writing the start date time
2111  if haveStartDate:
2112  self._startTime = calendar.timegm(time.strptime(result["qmtest.start_time"], "%Y-%m-%dT%H:%M:%SZ"))
2113  if self._StartTestTime.text is None:
2114  self._StartDateTime.text = time.strftime("%b %d %H:%M %Z", time.localtime(self._startTime))
2115  self._StartTestTime.text = str(self._startTime)
2116  self._site.set("BuildStamp" , result["qmtest.start_time"] )
2117 
2118  #Save the end date time in memory
2119  if haveEndDate:
2120  self._endTime = calendar.timegm(time.strptime(result["qmtest.end_time"], "%Y-%m-%dT%H:%M:%SZ"))
2121 
2122 
2123  #add the current test to the test list
2124  tl = ET.Element("Test")
2125  tl.text = summary["id"]
2126  self._TestList.insert(0,tl)
2127 
2128  #Fill the current test
2129  Test = ET.Element("Test")
2130  if summary["outcome"] == "PASS":
2131  Test.set("Status", "passed")
2132  elif summary["outcome"] == "FAIL":
2133  Test.set("Status", "failed")
2134  elif summary["outcome"] == "SKIPPED" or summary["outcome"] == "UNTESTED":
2135  Test.set("Status", "skipped")
2136  elif summary["outcome"] == "ERROR":
2137  Test.set("Status", "failed")
2138  Name = ET.SubElement(Test, "Name",)
2139  Name.text = summary["id"]
2140  Results = ET.SubElement(Test, "Results")
2141 
2142  # add the test after the other test
2143  self._Testing.insert(3,Test)
2144 
2145  if haveStartDate and haveEndDate:
2146  # Compute the test duration
2147  delta = self._endTime - self._startTime
2148  testduration = str(delta)
2149  Testduration= ET.SubElement(Results,"NamedMeasurement")
2150  Testduration.set("name","Execution Time")
2151  Testduration.set("type","numeric/float" )
2152  value = ET.SubElement(Testduration, "Value")
2153  value.text = testduration
2154 
2155  #remove the fields that we store in a different way
2156  for n in ("qmtest.end_time", "qmtest.start_time", "qmtest.cause", "ExecTest.stdout"):
2157  if n in summary["fields"]:
2158  summary["fields"].remove(n)
2159 
2160  # Here we can add some NamedMeasurment which we know the type
2161  #
2162  if "ExecTest.exit_code" in summary["fields"] :
2163  summary["fields"].remove("ExecTest.exit_code")
2164  ExitCode= ET.SubElement(Results,"NamedMeasurement")
2165  ExitCode.set("name","exit_code")
2166  ExitCode.set("type","numeric/integer" )
2167  value = ET.SubElement(ExitCode, "Value")
2168  value.text = convert_xml_illegal_chars(result["ExecTest.exit_code"])
2169 
2170  TestStartTime= ET.SubElement(Results,"NamedMeasurement")
2171  TestStartTime.set("name","Start_Time")
2172  TestStartTime.set("type","String" )
2173  value = ET.SubElement(TestStartTime, "Value")
2174  if haveStartDate :
2175  value.text = escape_xml_illegal_chars(time.strftime("%b %d %H:%M %Z %Y", time.localtime(self._startTime)))
2176  else :
2177  value.text = ""
2178 
2179  TestEndTime= ET.SubElement(Results,"NamedMeasurement")
2180  TestEndTime.set("name","End_Time")
2181  TestEndTime.set("type","String" )
2182  value = ET.SubElement(TestEndTime, "Value")
2183  if haveStartDate :
2184  value.text = escape_xml_illegal_chars(time.strftime("%b %d %H:%M %Z %Y", time.localtime(self._endTime)))
2185  else :
2186  value.text = ""
2187 
2188  if summary["cause"]:
2189  FailureCause= ET.SubElement(Results,"NamedMeasurement")
2190  FailureCause.set("name", "Cause")
2191  FailureCause.set("type", "String" )
2192  value = ET.SubElement(FailureCause, "Value")
2193  value.text = escape_xml_illegal_chars(summary["cause"])
2194 
2195  #Fill the result
2196  fields = {}
2197  for field in summary["fields"] :
2198  fields[field] = ET.SubElement(Results, "NamedMeasurement")
2199  fields[field].set("type","String")
2200  fields[field].set("name",field)
2201  value = ET.SubElement(fields[field], "Value")
2202  # to escape the <pre></pre>
2203  if "<pre>" in result[field][0:6] :
2204  value.text = convert_xml_illegal_chars(result[field][5:-6])
2205  else :
2206  value.text = convert_xml_illegal_chars(result[field])
2207 
2208 
2209  if result.has_key("ExecTest.stdout" ) : #"ExecTest.stdout" in result :
2210  Measurement = ET.SubElement(Results, "Measurement")
2211  value = ET.SubElement(Measurement, "Value")
2212  if "<pre>" in result["ExecTest.stdout"][0:6] :
2213  value.text = convert_xml_illegal_chars(result["ExecTest.stdout"][5:-6])
2214  else :
2215  value.text = convert_xml_illegal_chars(result["ExecTest.stdout"])
2216 
2217 
2218  # write the file
2219  self._tree.write(self._xmlFile, "utf-8") #,True) in python 2.7 to add the xml header
2220 
2221 
2222  def Summarize(self):
2223 
2224  # Set the final end date time
2225  self._EndTestTime.text = str(self._endTime)
2226  self._EndDateTime.text = time.strftime("%b %d %H:%M %Z", time.localtime(self._endTime))
2227 
2228  # Compute the total duration
2229  if self._endTime and self._startTime:
2230  delta = self._endTime - self._startTime
2231  else:
2232  delta = 0
2233  self._ElapsedMinutes.text = str(delta/60)
2234 
2235  # Write into the file
2236  self._tree.write(self._xmlFile, "utf-8") #,True) in python 2.7 to add the xml header
2237 
def _parseTTreeSummary
Definition: GaudiTest.py:705
def findHistosSummaries
Definition: GaudiTest.py:882
def findTTreeSummaries
Definition: GaudiTest.py:754
def escape_xml_illegal_chars
Definition: GaudiTest.py:340
def findReferenceBlock
Definition: GaudiTest.py:623
def parseHistosSummary
Definition: GaudiTest.py:819
__monitor_thread
This is the interesting part: dump the stack trace to a file.
Definition: GaudiTest.py:1038
def hexConvert
Definition: GaudiTest.py:335
_EndDateTime
End time elements.
Definition: GaudiTest.py:2045
string type
Definition: gaudirun.py:126
struct GAUDI_API map
Parametrisation class for map-like implementation.
def cmpTreesDicts
Definition: GaudiTest.py:775
def remove
Definition: install.py:153
def rationalizepath
Definition: GaudiTest.py:309
def which
Locates an executable in the executables path ($PATH) and returns the full path to it...
Definition: GaudiTest.py:289
Output Validation Classes.
Definition: GaudiTest.py:351
def ROOT6WorkAroundEnabled
Definition: GaudiTest.py:25
def getCmpFailingValues
Definition: GaudiTest.py:806
def _HandleChild
Needs to replace the ones from RedirectedExecutable and TimeoutExecutable.
Definition: GaudiTest.py:938
def total_seconds_replacement
Definition: GaudiTest.py:39
def convert_xml_illegal_chars
Definition: GaudiTest.py:337
def countErrorLines
Definition: GaudiTest.py:669
Special preprocessor sorting the list of strings (whitespace separated) that follow a signature on a ...
Definition: GaudiTest.py:495
NamedRange_< CONTAINER > range(const CONTAINER &cnt, const std::string &name)
simple function to create the named range form arbitrary container
Definition: NamedRange.h:133
def hexreplace
Definition: GaudiTest.py:331