Gaudi Framework, version v25r0

Home   Generated: Mon Feb 17 2014
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Groups Pages
GaudiTest.py
Go to the documentation of this file.
1 ########################################################################
2 # File: GaudiTest.py
3 # Author: Marco Clemencic CERN/PH-LBC
4 ########################################################################
5 __author__ = 'Marco Clemencic CERN/PH-LBC'
6 ########################################################################
7 # Imports
8 ########################################################################
9 import os
10 import sys
11 import re
12 import tempfile
13 import shutil
14 import string
15 import difflib
16 import time
17 import calendar
18 from subprocess import Popen, PIPE, STDOUT
19 
20 try:
21  from GaudiKernel import ROOT6WorkAroundEnabled
22 except ImportError:
23  def ROOT6WorkAroundEnabled(id=None):
24  # dummy implementation
25  return False
26 
27 # ensure the preferred locale
28 os.environ['LC_ALL'] = 'C'
29 
30 # Needed for the XML wrapper
31 try:
32  import xml.etree.cElementTree as ET
33 except ImportError:
34  import xml.etree.ElementTree as ET
35 
36 # redefinition of timedelta.total_seconds() because it is not present in the 2.6 version
37 def total_seconds_replacement(timedelta) :
38  return timedelta.days*86400 + timedelta.seconds + timedelta.microseconds/1000000
39 
40 
41 import qm
42 from qm.test.classes.command import ExecTestBase
43 from qm.test.result_stream import ResultStream
44 
45 ### Needed by the re-implementation of TimeoutExecutable
46 import qm.executable
47 import signal
48 # The classes in this module are implemented differently depending on
49 # the operating system in use.
50 if sys.platform == "win32":
51  import msvcrt
52  import pywintypes
53  from threading import *
54  import win32api
55  import win32con
56  import win32event
57  import win32file
58  import win32pipe
59  import win32process
60 else:
61  import cPickle
62  import fcntl
63  import select
64  import qm.sigmask
65 
66 ########################################################################
67 # Utility Classes
68 ########################################################################
70  """
71  Class to changes the environment temporarily.
72  """
73  def __init__(self, orig = os.environ, keep_same = False):
74  """
75  Create a temporary environment on top of the one specified
76  (it can be another TemporaryEnvironment instance).
77  """
78  #print "New environment"
79  self.old_values = {}
80  self.env = orig
81  self._keep_same = keep_same
82 
83  def __setitem__(self,key,value):
84  """
85  Set an environment variable recording the previous value.
86  """
87  if key not in self.old_values :
88  if key in self.env :
89  if not self._keep_same or self.env[key] != value:
90  self.old_values[key] = self.env[key]
91  else:
92  self.old_values[key] = None
93  self.env[key] = value
94 
95  def __getitem__(self,key):
96  """
97  Get an environment variable.
98  Needed to provide the same interface as os.environ.
99  """
100  return self.env[key]
101 
102  def __delitem__(self,key):
103  """
104  Unset an environment variable.
105  Needed to provide the same interface as os.environ.
106  """
107  if key not in self.env :
108  raise KeyError(key)
109  self.old_values[key] = self.env[key]
110  del self.env[key]
111 
112  def keys(self):
113  """
114  Return the list of defined environment variables.
115  Needed to provide the same interface as os.environ.
116  """
117  return self.env.keys()
118 
119  def items(self):
120  """
121  Return the list of (name,value) pairs for the defined environment variables.
122  Needed to provide the same interface as os.environ.
123  """
124  return self.env.items()
125 
126  def __contains__(self,key):
127  """
128  Operator 'in'.
129  Needed to provide the same interface as os.environ.
130  """
131  return key in self.env
132 
133  def restore(self):
134  """
135  Revert all the changes done to the orignal environment.
136  """
137  for key,value in self.old_values.items():
138  if value is None:
139  del self.env[key]
140  else:
141  self.env[key] = value
142  self.old_values = {}
143 
144  def __del__(self):
145  """
146  Revert the changes on destruction.
147  """
148  #print "Restoring the environment"
149  self.restore()
150 
151  def gen_script(self,shell_type):
152  """
153  Generate a shell script to reproduce the changes in the environment.
154  """
155  shells = [ 'csh', 'sh', 'bat' ]
156  if shell_type not in shells:
157  raise RuntimeError("Shell type '%s' unknown. Available: %s"%(shell_type,shells))
158  out = ""
159  for key,value in self.old_values.items():
160  if key not in self.env:
161  # unset variable
162  if shell_type == 'csh':
163  out += 'unsetenv %s\n'%key
164  elif shell_type == 'sh':
165  out += 'unset %s\n'%key
166  elif shell_type == 'bat':
167  out += 'set %s=\n'%key
168  else:
169  # set variable
170  if shell_type == 'csh':
171  out += 'setenv %s "%s"\n'%(key,self.env[key])
172  elif shell_type == 'sh':
173  out += 'export %s="%s"\n'%(key,self.env[key])
174  elif shell_type == 'bat':
175  out += 'set %s=%s\n'%(key,self.env[key])
176  return out
177 
178 class TempDir:
179  """Small class for temporary directories.
180  When instantiated, it creates a temporary directory and the instance
181  behaves as the string containing the directory name.
182  When the instance goes out of scope, it removes all the content of
183  the temporary directory (automatic clean-up).
184  """
185  def __init__(self, keep = False, chdir = False):
186  self.name = tempfile.mkdtemp()
187  self._keep = keep
188  self._origdir = None
189  if chdir:
190  self._origdir = os.getcwd()
191  os.chdir(self.name)
192 
193  def __str__(self):
194  return self.name
195 
196  def __del__(self):
197  if self._origdir:
198  os.chdir(self._origdir)
199  if self.name and not self._keep:
200  shutil.rmtree(self.name)
201 
202  def __getattr__(self,attr):
203  return getattr(self.name,attr)
204 
205 class TempFile:
206  """Small class for temporary files.
207  When instantiated, it creates a temporary directory and the instance
208  behaves as the string containing the directory name.
209  When the instance goes out of scope, it removes all the content of
210  the temporary directory (automatic clean-up).
211  """
212  def __init__(self, suffix='', prefix='tmp', dir=None, text=False, keep = False):
213  self.file = None
214  self.name = None
215  self._keep = keep
216 
217  self._fd, self.name = tempfile.mkstemp(suffix,prefix,dir,text)
218  self.file = os.fdopen(self._fd,"r+")
219 
220  def __str__(self):
221  return self.name
222 
223  def __del__(self):
224  if self.file:
225  self.file.close()
226  if self.name and not self._keep:
227  os.remove(self.name)
228 
229  def __getattr__(self,attr):
230  return getattr(self.file,attr)
231 
232 class CMT:
233  """Small wrapper to call CMT.
234  """
235  def __init__(self,path=None):
236  if path is None:
237  path = os.getcwd()
238  self.path = path
239 
240  def _run_cmt(self,command,args):
241  # prepare command line
242  if type(args) is str:
243  args = [args]
244  cmd = "cmt %s"%command
245  for arg in args:
246  cmd += ' "%s"'%arg
247 
248  # go to the execution directory
249  olddir = os.getcwd()
250  os.chdir(self.path)
251  # run cmt
252  result = os.popen4(cmd)[1].read()
253  # return to the old directory
254  os.chdir(olddir)
255  return result
256 
257  def __getattr__(self,attr):
258  return lambda args=[]: self._run_cmt(attr, args)
259 
260  def runtime_env(self,env = None):
261  """Returns a dictionary containing the runtime environment produced by CMT.
262  If a dictionary is passed a modified instance of it is returned.
263  """
264  if env is None:
265  env = {}
266  for l in self.setup("-csh").splitlines():
267  l = l.strip()
268  if l.startswith("setenv"):
269  dummy,name,value = l.split(None,3)
270  env[name] = value.strip('"')
271  elif l.startswith("unsetenv"):
272  dummy,name = l.split(None,2)
273  if name in env:
274  del env[name]
275  return env
276  def show_macro(self,k):
277  r = self.show(["macro",k])
278  if r.find("CMT> Error: symbol not found") >= 0:
279  return None
280  else:
281  return self.show(["macro_value",k]).strip()
282 
283 
284 ## Locates an executable in the executables path ($PATH) and returns the full
285 # path to it.
286 # If the executable cannot be found, None is returned
287 def which(executable):
288  """
289  Locates an executable in the executables path ($PATH) and returns the full
290  path to it. An application is looked for with or without the '.exe' suffix.
291  If the executable cannot be found, None is returned
292  """
293  if os.path.isabs(executable):
294  if not os.path.exists(executable):
295  if executable.endswith('.exe'):
296  if os.path.exists(executable[:-4]):
297  return executable[:-4]
298  return executable
299  for d in os.environ.get("PATH").split(os.pathsep):
300  fullpath = os.path.join(d, executable)
301  if os.path.exists(fullpath):
302  return fullpath
303  if executable.endswith('.exe'):
304  return which(executable[:-4])
305  return None
306 
308  np = os.path.normpath(os.path.expandvars(p))
309  if os.path.exists(np):
310  p = os.path.realpath(np)
311  return p
312 
313 # XML Escaping character
314 import re
315 
316 # xml 1.0 valid characters:
317 # Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
318 # so to invert that, not in Char ::
319 # x0 - x8 | xB | xC | xE - x1F
320 # (most control characters, though TAB, CR, LF allowed)
321 # | #xD800 - #xDFFF
322 # (unicode surrogate characters)
323 # | #xFFFE | #xFFFF |
324 # (unicode end-of-plane non-characters)
325 # >= 110000
326 # that would be beyond unicode!!!
327 _illegal_xml_chars_RE = re.compile(u'[\x00-\x08\x0b\x0c\x0e-\x1F\uD800-\uDFFF\uFFFE\uFFFF]')
328 
329 def hexreplace( match ):
330  "Return the hex string "
331  return "".join(map(hexConvert,match.group()))
332 
333 def hexConvert(char):
334  return hex(ord(char))
336  return _illegal_xml_chars_RE.sub(hexreplace, val)
337 
338 def escape_xml_illegal_chars(val, replacement='?'):
339  """Filter out characters that are illegal in XML.
340  Looks for any character in val that is not allowed in XML
341  and replaces it with replacement ('?' by default).
342 
343  """
344  return _illegal_xml_chars_RE.sub(replacement, val)
345 
346 ########################################################################
347 # Output Validation Classes
348 ########################################################################
350  """Basic implementation of an option validator for Gaudi tests.
351  This implementation is based on the standard (LCG) validation functions
352  used in QMTest.
353  """
354  def __init__(self,ref,cause,result_key):
355  self.reference = ref
356  self.cause = cause
357  self.result_key = result_key
358 
359  def __call__(self, out, result):
360  """Validate the output of the program.
361 
362  'stdout' -- A string containing the data written to the standard output
363  stream.
364 
365  'stderr' -- A string containing the data written to the standard error
366  stream.
367 
368  'result' -- A 'Result' object. It may be used to annotate
369  the outcome according to the content of stderr.
370 
371  returns -- A list of strings giving causes of failure."""
372 
373  causes = []
374  # Check to see if theoutput matches.
375  if not self.__CompareText(out, self.reference):
376  causes.append(self.cause)
377  result[self.result_key] = result.Quote(self.reference)
378 
379  return causes
380 
381  def __CompareText(self, s1, s2):
382  """Compare 's1' and 's2', ignoring line endings.
383 
384  's1' -- A string.
385 
386  's2' -- A string.
387 
388  returns -- True if 's1' and 's2' are the same, ignoring
389  differences in line endings."""
390 
391  # The "splitlines" method works independently of the line ending
392  # convention in use.
393  return s1.splitlines() == s2.splitlines()
394 
396  """ Base class for a callable that takes a file and returns a modified
397  version of it."""
398  def __processLine__(self, line):
399  return line
400  def __call__(self, input):
401  if hasattr(input,"__iter__"):
402  lines = input
403  mergeback = False
404  else:
405  lines = input.splitlines()
406  mergeback = True
407  output = []
408  for l in lines:
409  l = self.__processLine__(l)
410  if l: output.append(l)
411  if mergeback: output = '\n'.join(output)
412  return output
413  def __add__(self, rhs):
414  return FilePreprocessorSequence([self,rhs])
415 
417  def __init__(self, members = []):
418  self.members = members
419  def __add__(self, rhs):
420  return FilePreprocessorSequence(self.members + [rhs])
421  def __call__(self, input):
422  output = input
423  for pp in self.members:
424  output = pp(output)
425  return output
426 
428  def __init__(self, strings = [], regexps = []):
429  import re
430  self.strings = strings
431  self.regexps = map(re.compile,regexps)
432 
433  def __processLine__(self, line):
434  for s in self.strings:
435  if line.find(s) >= 0: return None
436  for r in self.regexps:
437  if r.search(line): return None
438  return line
439 
441  def __init__(self, start, end):
442  self.start = start
443  self.end = end
444  self._skipping = False
445 
446  def __processLine__(self, line):
447  if self.start in line:
448  self._skipping = True
449  return None
450  elif self.end in line:
451  self._skipping = False
452  elif self._skipping:
453  return None
454  return line
455 
457  def __init__(self, orig, repl = "", when = None):
458  if when:
459  when = re.compile(when)
460  self._operations = [ (when, re.compile(orig), repl) ]
461  def __add__(self,rhs):
462  if isinstance(rhs, RegexpReplacer):
463  res = RegexpReplacer("","",None)
464  res._operations = self._operations + rhs._operations
465  else:
466  res = FilePreprocessor.__add__(self, rhs)
467  return res
468  def __processLine__(self, line):
469  for w,o,r in self._operations:
470  if w is None or w.search(line):
471  line = o.sub(r, line)
472  return line
473 
474 # Common preprocessors
475 maskPointers = RegexpReplacer("0x[0-9a-fA-F]{4,16}","0x########")
476 normalizeDate = RegexpReplacer("[0-2]?[0-9]:[0-5][0-9]:[0-5][0-9] [0-9]{4}[-/][01][0-9][-/][0-3][0-9] *(CES?T)?",
477  "00:00:00 1970-01-01")
478 normalizeEOL = FilePreprocessor()
479 normalizeEOL.__processLine__ = lambda line: str(line).rstrip() + '\n'
480 
481 skipEmptyLines = FilePreprocessor()
482 # FIXME: that's ugly
483 skipEmptyLines.__processLine__ = lambda line: (line.strip() and line) or None
484 
485 ## Special preprocessor sorting the list of strings (whitespace separated)
486 # that follow a signature on a single line
488  def __init__(self, signature):
489  self.signature = signature
490  self.siglen = len(signature)
491  def __processLine__(self, line):
492  pos = line.find(self.signature)
493  if pos >=0:
494  line = line[:(pos+self.siglen)]
495  lst = line[(pos+self.siglen):].split()
496  lst.sort()
497  line += " ".join(lst)
498  return line
499 
500 # Preprocessors for GaudiExamples
501 normalizeExamples = maskPointers + normalizeDate
502 for w,o,r in [
503  #("TIMER.TIMER",r"[0-9]", "0"), # Normalize time output
504  ("TIMER.TIMER",r"\s+[+-]?[0-9]+[0-9.]*", " 0"), # Normalize time output
505  ("release all pending",r"^.*/([^/]*:.*)",r"\1"),
506  ("0x########",r"\[.*/([^/]*.*)\]",r"[\1]"),
507  ("^#.*file",r"file '.*[/\\]([^/\\]*)$",r"file '\1"),
508  ("^JobOptionsSvc.*options successfully read in from",r"read in from .*[/\\]([^/\\]*)$",r"file \1"), # normalize path to options
509  # Normalize UUID, except those ending with all 0s (i.e. the class IDs)
510  (None,r"[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}(?!-0{12})-[0-9A-Fa-f]{12}","00000000-0000-0000-0000-000000000000"),
511  # Absorb a change in ServiceLocatorHelper
512  ("ServiceLocatorHelper::", "ServiceLocatorHelper::(create|locate)Service", "ServiceLocatorHelper::service"),
513  # Remove the leading 0 in Windows' exponential format
514  (None, r"e([-+])0([0-9][0-9])", r"e\1\2"),
515  # Output line changed in Gaudi v24
516  (None, r'Service reference count check:', r'Looping over all active services...'),
517  ]: #[ ("TIMER.TIMER","[0-9]+[0-9.]*", "") ]
518  normalizeExamples += RegexpReplacer(o,r,w)
519 
520 lineSkipper = LineSkipper(["//GP:",
521  "JobOptionsSvc INFO # ",
522  "JobOptionsSvc WARNING # ",
523  "Time User",
524  "Welcome to",
525  "This machine has a speed",
526  "TIME:",
527  "running on",
528  "ToolSvc.Sequenc... INFO",
529  "DataListenerSvc INFO XML written to file:",
530  "[INFO]","[WARNING]",
531  "DEBUG No writable file catalog found which contains FID:",
532  "0 local", # hack for ErrorLogExample
533  "DEBUG Service base class initialized successfully", # changed between v20 and v21
534  "DEBUG Incident timing:", # introduced with patch #3487
535  "INFO 'CnvServices':[", # changed the level of the message from INFO to DEBUG
536  # The signal handler complains about SIGXCPU not defined on some platforms
537  'SIGXCPU',
538  ],regexps = [
539  r"^JobOptionsSvc INFO *$",
540  r"^#", # Ignore python comments
541  r"(Always|SUCCESS)\s*(Root f|[^ ]* F)ile version:", # skip the message reporting the version of the root file
542  r"0x[0-9a-fA-F#]+ *Algorithm::sysInitialize\(\) *\[", # hack for ErrorLogExample
543  r"0x[0-9a-fA-F#]* *__gxx_personality_v0 *\[", # hack for ErrorLogExample
544  r"File '.*.xml' does not exist",
545  r"INFO Refer to dataset .* by its file ID:",
546  r"INFO Referring to dataset .* by its file ID:",
547  r"INFO Disconnect from dataset",
548  r"INFO Disconnected from dataset",
549  r"INFO Disconnected data IO:",
550  r"IncidentSvc\s*(DEBUG (Adding|Removing)|VERBOSE Calling)",
551  # I want to ignore the header of the unchecked StatusCode report
552  r"^StatusCodeSvc.*listing all unchecked return codes:",
553  r"^StatusCodeSvc\s*INFO\s*$",
554  r"Num\s*\|\s*Function\s*\|\s*Source Library",
555  r"^[-+]*\s*$",
556  # Hide the fake error message coming from POOL/ROOT (ROOT 5.21)
557  r"ERROR Failed to modify file: .* Errno=2 No such file or directory",
558  # Hide unckeched StatusCodes from dictionaries
559  r"^ +[0-9]+ \|.*ROOT",
560  r"^ +[0-9]+ \|.*\|.*Dict",
561  # Remove ROOT TTree summary table, which changes from one version to the other
562  r"^\*.*\*$",
563  # Remove Histos Summaries
564  r"SUCCESS\s*Booked \d+ Histogram\(s\)",
565  r"^ \|",
566  r"^ ID=",
567  ] )
568 
569 normalizeExamples = (lineSkipper + normalizeExamples + skipEmptyLines +
570  normalizeEOL + LineSorter("Services to release : "))
571 
573  def __init__(self, reffile, cause, result_key, preproc = normalizeExamples):
574  self.reffile = os.path.expandvars(reffile)
575  self.cause = cause
576  self.result_key = result_key
577  self.preproc = preproc
578  def __call__(self, stdout, result):
579  causes = []
580  if os.path.isfile(self.reffile):
581  orig = open(self.reffile).xreadlines()
582  if self.preproc:
583  orig = self.preproc(orig)
584  else:
585  orig = []
586 
587  new = stdout.splitlines()
588  if self.preproc:
589  new = self.preproc(new)
590  #open(self.reffile + ".test","w").writelines(new)
591  diffs = difflib.ndiff(orig,new,charjunk=difflib.IS_CHARACTER_JUNK)
592  filterdiffs = map(lambda x: x.strip(),filter(lambda x: x[0] != " ",diffs))
593  #filterdiffs = [x.strip() for x in diffs]
594  if filterdiffs:
595  result[self.result_key] = result.Quote("\n".join(filterdiffs))
596  result[self.result_key] += result.Quote("""
597 Legend:
598  -) reference file
599  +) standard output of the test""")
600  causes.append(self.cause)
601 
602  return causes
603 
604 ########################################################################
605 # Useful validation functions
606 ########################################################################
607 def findReferenceBlock(reference, stdout, result, causes, signature_offset=0, signature=None,
608  id = None):
609  """
610  Given a block of text, tries to find it in the output.
611  The block had to be identified by a signature line. By default, the first
612  line is used as signature, or the line pointed to by signature_offset. If
613  signature_offset points outside the block, a signature line can be passed as
614  signature argument. Note: if 'signature' is None (the default), a negative
615  signature_offset is interpreted as index in a list (e.g. -1 means the last
616  line), otherwise the it is interpreted as the number of lines before the
617  first one of the block the signature must appear.
618  The parameter 'id' allow to distinguish between different calls to this
619  function in the same validation code.
620  """
621  # split reference file, sanitize EOLs and remove empty lines
622  reflines = filter(None,map(lambda s: s.rstrip(), reference.splitlines()))
623  if not reflines:
624  raise RuntimeError("Empty (or null) reference")
625  # the same on standard output
626  outlines = filter(None,map(lambda s: s.rstrip(), stdout.splitlines()))
627 
628  res_field = "GaudiTest.RefBlock"
629  if id:
630  res_field += "_%s" % id
631 
632  if signature is None:
633  if signature_offset < 0:
634  signature_offset = len(reference)+signature_offset
635  signature = reflines[signature_offset]
636  # find the reference block in the output file
637  try:
638  pos = outlines.index(signature)
639  outlines = outlines[pos-signature_offset:pos+len(reflines)-signature_offset]
640  if reflines != outlines:
641  msg = "standard output"
642  # I do not want 2 messages in causes if teh function is called twice
643  if not msg in causes:
644  causes.append(msg)
645  result[res_field + ".observed"] = result.Quote("\n".join(outlines))
646  except ValueError:
647  causes.append("missing signature")
648  result[res_field + ".signature"] = result.Quote(signature)
649  if len(reflines) > 1 or signature != reflines[0]:
650  result[res_field + ".expected"] = result.Quote("\n".join(reflines))
651 
652  return causes
653 
654 def countErrorLines(expected = {'ERROR':0, 'FATAL':0}, **kwargs):
655  """
656  Count the number of messages with required severity (by default ERROR and FATAL)
657  and check if their numbers match the expected ones (0 by default).
658  The dictionary "expected" can be used to tune the number of errors and fatals
659  allowed, or to limit the number of expected warnings etc.
660  """
661  stdout = kwargs["stdout"]
662  result = kwargs["result"]
663  causes = kwargs["causes"]
664 
665  # prepare the dictionary to record the extracted lines
666  errors = {}
667  for sev in expected:
668  errors[sev] = []
669 
670  outlines = stdout.splitlines()
671  from math import log10
672  fmt = "%%%dd - %%s" % (int(log10(len(outlines))+1))
673 
674  linecount = 0
675  for l in outlines:
676  linecount += 1
677  words = l.split()
678  if len(words) >= 2 and words[1] in errors:
679  errors[words[1]].append(fmt%(linecount,l.rstrip()))
680 
681  for e in errors:
682  if len(errors[e]) != expected[e]:
683  causes.append('%s(%d)'%(e,len(errors[e])))
684  result["GaudiTest.lines.%s"%e] = result.Quote('\n'.join(errors[e]))
685  result["GaudiTest.lines.%s.expected#"%e] = result.Quote(str(expected[e]))
686 
687  return causes
688 
689 
690 def _parseTTreeSummary(lines, pos):
691  """
692  Parse the TTree summary table in lines, starting from pos.
693  Returns a tuple with the dictionary with the digested informations and the
694  position of the first line after the summary.
695  """
696  result = {}
697  i = pos + 1 # first line is a sequence of '*'
698  count = len(lines)
699 
700  splitcols = lambda l: [ f.strip() for f in l.strip("*\n").split(':',2) ]
701  def parseblock(ll):
702  r = {}
703  cols = splitcols(ll[0])
704  r["Name"], r["Title"] = cols[1:]
705 
706  cols = splitcols(ll[1])
707  r["Entries"] = int(cols[1])
708 
709  sizes = cols[2].split()
710  r["Total size"] = int(sizes[2])
711  if sizes[-1] == "memory":
712  r["File size"] = 0
713  else:
714  r["File size"] = int(sizes[-1])
715 
716  cols = splitcols(ll[2])
717  sizes = cols[2].split()
718  if cols[0] == "Baskets":
719  r["Baskets"] = int(cols[1])
720  r["Basket size"] = int(sizes[2])
721  r["Compression"] = float(sizes[-1])
722  return r
723 
724  if i < (count - 3) and lines[i].startswith("*Tree"):
725  result = parseblock(lines[i:i+3])
726  result["Branches"] = {}
727  i += 4
728  while i < (count - 3) and lines[i].startswith("*Br"):
729  if i < (count - 2) and lines[i].startswith("*Branch "):
730  # skip branch header
731  i += 3
732  continue
733  branch = parseblock(lines[i:i+3])
734  result["Branches"][branch["Name"]] = branch
735  i += 4
736 
737  return (result, i)
738 
739 def findTTreeSummaries(stdout):
740  """
741  Scan stdout to find ROOT TTree summaries and digest them.
742  """
743  stars = re.compile(r"^\*+$")
744  outlines = stdout.splitlines()
745  nlines = len(outlines)
746  trees = {}
747 
748  i = 0
749  while i < nlines: #loop over the output
750  # look for
751  while i < nlines and not stars.match(outlines[i]):
752  i += 1
753  if i < nlines:
754  tree, i = _parseTTreeSummary(outlines, i)
755  if tree:
756  trees[tree["Name"]] = tree
757 
758  return trees
759 
760 def cmpTreesDicts(reference, to_check, ignore = None):
761  """
762  Check that all the keys in reference are in to_check too, with the same value.
763  If the value is a dict, the function is called recursively. to_check can
764  contain more keys than reference, that will not be tested.
765  The function returns at the first difference found.
766  """
767  fail_keys = []
768  # filter the keys in the reference dictionary
769  if ignore:
770  ignore_re = re.compile(ignore)
771  keys = [ key for key in reference if not ignore_re.match(key) ]
772  else:
773  keys = reference.keys()
774  # loop over the keys (not ignored) in the reference dictionary
775  for k in keys:
776  if k in to_check: # the key must be in the dictionary to_check
777  if (type(reference[k]) is dict) and (type(to_check[k]) is dict):
778  # if both reference and to_check values are dictionaries, recurse
779  failed = fail_keys = cmpTreesDicts(reference[k], to_check[k], ignore)
780  else:
781  # compare the two values
782  failed = to_check[k] != reference[k]
783  else: # handle missing keys in the dictionary to check (i.e. failure)
784  to_check[k] = None
785  failed = True
786  if failed:
787  fail_keys.insert(0, k)
788  break # exit from the loop at the first failure
789  return fail_keys # return the list of keys bringing to the different values
790 
791 def getCmpFailingValues(reference, to_check, fail_path):
792  c = to_check
793  r = reference
794  for k in fail_path:
795  c = c.get(k,None)
796  r = r.get(k,None)
797  if c is None or r is None:
798  break # one of the dictionaries is not deep enough
799  return (fail_path, r, c)
800 
801 # signature of the print-out of the histograms
802 h_count_re = re.compile(r"^(.*)SUCCESS\s+Booked (\d+) Histogram\(s\) :\s+(.*)")
803 
804 def parseHistosSummary(lines, pos):
805  """
806  Extract the histograms infos from the lines starting at pos.
807  Returns the position of the first line after the summary block.
808  """
809  global h_count_re
810  h_table_head = re.compile(r'SUCCESS\s+List of booked (1D|2D|3D|1D profile|2D profile) histograms in directory\s+"(\w*)"')
811  h_short_summ = re.compile(r"ID=([^\"]+)\s+\"([^\"]+)\"\s+(.*)")
812 
813  nlines = len(lines)
814 
815  # decode header
816  m = h_count_re.search(lines[pos])
817  name = m.group(1).strip()
818  total = int(m.group(2))
819  header = {}
820  for k, v in [ x.split("=") for x in m.group(3).split() ]:
821  header[k] = int(v)
822  pos += 1
823  header["Total"] = total
824 
825  summ = {}
826  while pos < nlines:
827  m = h_table_head.search(lines[pos])
828  if m:
829  t, d = m.groups(1) # type and directory
830  t = t.replace(" profile", "Prof")
831  pos += 1
832  if pos < nlines:
833  l = lines[pos]
834  else:
835  l = ""
836  cont = {}
837  if l.startswith(" | ID"):
838  # table format
839  titles = [ x.strip() for x in l.split("|")][1:]
840  pos += 1
841  while pos < nlines and lines[pos].startswith(" |"):
842  l = lines[pos]
843  values = [ x.strip() for x in l.split("|")][1:]
844  hcont = {}
845  for i in range(len(titles)):
846  hcont[titles[i]] = values[i]
847  cont[hcont["ID"]] = hcont
848  pos += 1
849  elif l.startswith(" ID="):
850  while pos < nlines and lines[pos].startswith(" ID="):
851  values = [ x.strip() for x in h_short_summ.search(lines[pos]).groups() ]
852  cont[values[0]] = values
853  pos += 1
854  else: # not interpreted
855  raise RuntimeError("Cannot understand line %d: '%s'" % (pos, l))
856  if not d in summ:
857  summ[d] = {}
858  summ[d][t] = cont
859  summ[d]["header"] = header
860  else:
861  break
862  if not summ:
863  # If the full table is not present, we use only the header
864  summ[name] = {"header": header}
865  return summ, pos
866 
868  """
869  Scan stdout to find ROOT TTree summaries and digest them.
870  """
871  outlines = stdout.splitlines()
872  nlines = len(outlines) - 1
873  summaries = {}
874  global h_count_re
875 
876  pos = 0
877  while pos < nlines:
878  summ = {}
879  # find first line of block:
880  match = h_count_re.search(outlines[pos])
881  while pos < nlines and not match:
882  pos += 1
883  match = h_count_re.search(outlines[pos])
884  if match:
885  summ, pos = parseHistosSummary(outlines, pos)
886  summaries.update(summ)
887  return summaries
888 
889 class GaudiFilterExecutable(qm.executable.Filter):
890  def __init__(self, input, timeout = -1):
891  """Create a new 'Filter'.
892 
893  'input' -- The string containing the input to provide to the
894  child process.
895 
896  'timeout' -- As for 'TimeoutExecutable.__init__'."""
897 
898  super(GaudiFilterExecutable, self).__init__(input, timeout)
899  self.__input = input
900  self.__timeout = timeout
901  self.stack_trace_file = None
902  # Temporary file to pass the stack trace from one process to the other
903  # The file must be closed and reopened when needed to avoid conflicts
904  # between the processes
905  tmpf = tempfile.mkstemp()
906  os.close(tmpf[0])
907  self.stack_trace_file = tmpf[1] # remember only the name
908 
910  """Copied from TimeoutExecutable to allow the re-implementation of
911  _HandleChild.
912  """
913  if sys.platform == "win32":
914  # In Windows 2000 (or later), we should use "jobs" by
915  # analogy with UNIX process groups. However, that
916  # functionality is not (yet) provided by the Python Win32
917  # extensions.
918  return 0
919 
920  return self.__timeout >= 0 or self.__timeout == -2
921  ##
922  # Needs to replace the ones from RedirectedExecutable and TimeoutExecutable
923  def _HandleChild(self):
924  """Code copied from both FilterExecutable and TimeoutExecutable.
925  """
926  # Close the pipe ends that we do not need.
927  if self._stdin_pipe:
928  self._ClosePipeEnd(self._stdin_pipe[0])
929  if self._stdout_pipe:
930  self._ClosePipeEnd(self._stdout_pipe[1])
931  if self._stderr_pipe:
932  self._ClosePipeEnd(self._stderr_pipe[1])
933 
934  # The pipes created by 'RedirectedExecutable' must be closed
935  # before the monitor process (created by 'TimeoutExecutable')
936  # is created. Otherwise, if the child process dies, 'select'
937  # in the parent will not return if the monitor process may
938  # still have one of the file descriptors open.
939 
940  super(qm.executable.TimeoutExecutable, self)._HandleChild()
941 
943  # Put the child into its own process group. This step is
944  # performed in both the parent and the child; therefore both
945  # processes can safely assume that the creation of the process
946  # group has taken place.
947  child_pid = self._GetChildPID()
948  try:
949  os.setpgid(child_pid, child_pid)
950  except:
951  # The call to setpgid may fail if the child has exited,
952  # or has already called 'exec'. In that case, we are
953  # guaranteed that the child has already put itself in the
954  # desired process group.
955  pass
956  # Create the monitoring process.
957  #
958  # If the monitoring process is in parent's process group and
959  # kills the child after waitpid has returned in the parent, we
960  # may end up trying to kill a process group other than the one
961  # that we intend to kill. Therefore, we put the monitoring
962  # process in the same process group as the child; that ensures
963  # that the process group will persist until the monitoring
964  # process kills it.
965  self.__monitor_pid = os.fork()
966  if self.__monitor_pid != 0:
967  # Make sure that the monitoring process is placed into the
968  # child's process group before the parent process calls
969  # 'waitpid'. In this way, we are guaranteed that the process
970  # group as the child
971  os.setpgid(self.__monitor_pid, child_pid)
972  else:
973  # Put the monitoring process into the child's process
974  # group. We know the process group still exists at
975  # this point because either (a) we are in the process
976  # group, or (b) the parent has not yet called waitpid.
977  os.setpgid(0, child_pid)
978 
979  # Close all open file descriptors. They are not needed
980  # in the monitor process. Furthermore, when the parent
981  # closes the write end of the stdin pipe to the child,
982  # we do not want the pipe to remain open; leaving the
983  # pipe open in the monitor process might cause the child
984  # to block waiting for additional input.
985  try:
986  max_fds = os.sysconf("SC_OPEN_MAX")
987  except:
988  max_fds = 256
989  for fd in xrange(max_fds):
990  try:
991  os.close(fd)
992  except:
993  pass
994  try:
995  if self.__timeout >= 0:
996  # Give the child time to run.
997  time.sleep (self.__timeout)
998  #######################################################
999  ### This is the interesting part: dump the stack trace to a file
1000  if sys.platform == "linux2": # we should be have /proc and gdb
1001  cmd = ["gdb",
1002  os.path.join("/proc", str(child_pid), "exe"),
1003  str(child_pid),
1004  "-batch", "-n", "-x",
1005  "'%s'" % os.path.join(os.path.dirname(__file__), "stack-trace.gdb")]
1006  # FIXME: I wanted to use subprocess.Popen, but it doesn't want to work
1007  # in this context.
1008  o = os.popen(" ".join(cmd)).read()
1009  open(self.stack_trace_file,"w").write(o)
1010  #######################################################
1011 
1012  # Kill all processes in the child process group.
1013  os.kill(0, signal.SIGKILL)
1014  else:
1015  # This call to select will never terminate.
1016  select.select ([], [], [])
1017  finally:
1018  # Exit. This code is in a finally clause so that
1019  # we are guaranteed to get here no matter what.
1020  os._exit(0)
1021  elif self.__timeout >= 0 and sys.platform == "win32":
1022  # Create a monitoring thread.
1023  self.__monitor_thread = Thread(target = self.__Monitor)
1024  self.__monitor_thread.start()
1025 
1026  if sys.platform == "win32":
1027 
1028  def __Monitor(self):
1029  """Code copied from FilterExecutable.
1030  Kill the child if the timeout expires.
1031 
1032  This function is run in the monitoring thread."""
1033 
1034  # The timeout may be expressed as a floating-point value
1035  # on UNIX, but it must be an integer number of
1036  # milliseconds when passed to WaitForSingleObject.
1037  timeout = int(self.__timeout * 1000)
1038  # Wait for the child process to terminate or for the
1039  # timer to expire.
1040  result = win32event.WaitForSingleObject(self._GetChildPID(),
1041  timeout)
1042  # If the timeout occurred, kill the child process.
1043  if result == win32con.WAIT_TIMEOUT:
1044  self.Kill()
1045 
1046 ########################################################################
1047 # Test Classes
1048 ########################################################################
1049 class GaudiExeTest(ExecTestBase):
1050  """Standard Gaudi test.
1051  """
1052  arguments = [
1053  qm.fields.TextField(
1054  name="program",
1055  title="Program",
1056  not_empty_text=1,
1057  description="""The path to the program.
1058 
1059  This field indicates the path to the program. If it is not
1060  an absolute path, the value of the 'PATH' environment
1061  variable will be used to search for the program.
1062  If not specified, $GAUDIEXE or Gaudi.exe are used.
1063  """
1064  ),
1065  qm.fields.SetField(qm.fields.TextField(
1066  name="args",
1067  title="Argument List",
1068  description="""The command-line arguments.
1069 
1070  If this field is left blank, the program is run without any
1071  arguments.
1072 
1073  Use this field to specify the option files.
1074 
1075  An implicit 0th argument (the path to the program) is added
1076  automatically."""
1077  )),
1078  qm.fields.TextField(
1079  name="options",
1080  title="Options",
1081  description="""Options to be passed to the application.
1082 
1083  This field allows to pass a list of options to the main program
1084  without the need of a separate option file.
1085 
1086  The content of the field is written to a temporary file which name
1087  is passed the the application as last argument (appended to the
1088  field "Argument List".
1089  """,
1090  verbatim="true",
1091  multiline="true",
1092  default_value=""
1093  ),
1094  qm.fields.TextField(
1095  name="workdir",
1096  title="Working Directory",
1097  description="""Path to the working directory.
1098 
1099  If this field is left blank, the program will be run from the qmtest
1100  directory, otherwise from the directory specified.""",
1101  default_value=""
1102  ),
1103  qm.fields.TextField(
1104  name="reference",
1105  title="Reference Output",
1106  description="""Path to the file containing the reference output.
1107 
1108  If this field is left blank, any standard output will be considered
1109  valid.
1110 
1111  If the reference file is specified, any output on standard error is
1112  ignored."""
1113  ),
1114  qm.fields.TextField(
1115  name="error_reference",
1116  title="Reference for standard error",
1117  description="""Path to the file containing the reference for the standard error.
1118 
1119  If this field is left blank, any standard output will be considered
1120  valid.
1121 
1122  If the reference file is specified, any output on standard error is
1123  ignored."""
1124  ),
1125  qm.fields.SetField(qm.fields.TextField(
1126  name = "unsupported_platforms",
1127  title = "Unsupported Platforms",
1128  description = """Platform on which the test must not be run.
1129 
1130  List of regular expressions identifying the platforms on which the
1131  test is not run and the result is set to UNTESTED."""
1132  )),
1133 
1134  qm.fields.TextField(
1135  name = "validator",
1136  title = "Validator",
1137  description = """Function to validate the output of the test.
1138 
1139  If defined, the function is used to validate the products of the
1140  test.
1141  The function is called passing as arguments:
1142  self: the test class instance
1143  stdout: the standard output of the executed test
1144  stderr: the standard error of the executed test
1145  result: the Result objects to fill with messages
1146  The function must return a list of causes for the failure.
1147  If specified, overrides standard output, standard error and
1148  reference files.
1149  """,
1150  verbatim="true",
1151  multiline="true",
1152  default_value=""
1153  ),
1154 
1155  qm.fields.BooleanField(
1156  name = "use_temp_dir",
1157  title = "Use temporary directory",
1158  description = """Use temporary directory.
1159 
1160  If set to true, use a temporary directory as working directory.
1161  """,
1162  default_value="false"
1163  ),
1164 
1165  qm.fields.IntegerField(
1166  name = "signal",
1167  title = "Expected signal",
1168  description = """Expect termination by signal.""",
1169  default_value=None
1170  ),
1171  ]
1172 
1173  def PlatformIsNotSupported(self, context, result):
1174  platform = self.GetPlatform()
1175  unsupported = [ re.compile(x)
1176  for x in [ str(y).strip()
1177  for y in self.unsupported_platforms ]
1178  if x
1179  ]
1180  for p_re in unsupported:
1181  if p_re.search(platform):
1182  result.SetOutcome(result.UNTESTED)
1183  result[result.CAUSE] = 'Platform not supported.'
1184  return True
1185  return False
1186 
1187  def GetPlatform(self):
1188  """
1189  Return the platform Id defined in CMTCONFIG or SCRAM_ARCH.
1190  """
1191  arch = "None"
1192  # check architecture name
1193  if "CMTCONFIG" in os.environ:
1194  arch = os.environ["CMTCONFIG"]
1195  elif "SCRAM_ARCH" in os.environ:
1196  arch = os.environ["SCRAM_ARCH"]
1197  return arch
1198 
1199  def isWinPlatform(self):
1200  """
1201  Return True if the current platform is Windows.
1202 
1203  This function was needed because of the change in the CMTCONFIG format,
1204  from win32_vc71_dbg to i686-winxp-vc9-dbg.
1205  """
1206  platform = self.GetPlatform()
1207  return "winxp" in platform or platform.startswith("win")
1208 
1209  def _expandReferenceFileName(self, reffile):
1210  # if no file is passed, do nothing
1211  if not reffile:
1212  return ""
1213 
1214  # function to split an extension in constituents parts
1215  platformSplit = lambda p: set(p.split('-' in p and '-' or '_'))
1216 
1217  reference = os.path.normpath(os.path.expandvars(reffile))
1218  # old-style platform-specific reference name
1219  spec_ref = reference[:-3] + self.GetPlatform()[0:3] + reference[-3:]
1220  if os.path.isfile(spec_ref):
1221  reference = spec_ref
1222  else: # look for new-style platform specific reference files:
1223  # get all the files whose name start with the reference filename
1224  dirname, basename = os.path.split(reference)
1225  if not dirname: dirname = '.'
1226  head = basename + "."
1227  head_len = len(head)
1228  platform = platformSplit(self.GetPlatform())
1229  candidates = []
1230  for f in os.listdir(dirname):
1231  if f.startswith(head):
1232  req_plat = platformSplit(f[head_len:])
1233  if platform.issuperset(req_plat):
1234  candidates.append( (len(req_plat), f) )
1235  if candidates: # take the one with highest matching
1236  # FIXME: it is not possible to say if x86_64-slc5-gcc43-dbg
1237  # has to use ref.x86_64-gcc43 or ref.slc5-dbg
1238  candidates.sort()
1239  reference = os.path.join(dirname, candidates[-1][1])
1240  return reference
1241 
1242  def CheckTTreesSummaries(self, stdout, result, causes,
1243  trees_dict = None,
1244  ignore = r"Basket|.*size|Compression"):
1245  """
1246  Compare the TTree summaries in stdout with the ones in trees_dict or in
1247  the reference file. By default ignore the size, compression and basket
1248  fields.
1249  The presence of TTree summaries when none is expected is not a failure.
1250  """
1251  if trees_dict is None:
1252  reference = self._expandReferenceFileName(self.reference)
1253  # call the validator if the file exists
1254  if reference and os.path.isfile(reference):
1255  trees_dict = findTTreeSummaries(open(reference).read())
1256  else:
1257  trees_dict = {}
1258 
1259  from pprint import PrettyPrinter
1260  pp = PrettyPrinter()
1261  if trees_dict:
1262  result["GaudiTest.TTrees.expected"] = result.Quote(pp.pformat(trees_dict))
1263  if ignore:
1264  result["GaudiTest.TTrees.ignore"] = result.Quote(ignore)
1265 
1266  trees = findTTreeSummaries(stdout)
1267  failed = cmpTreesDicts(trees_dict, trees, ignore)
1268  if failed:
1269  causes.append("trees summaries")
1270  msg = "%s: %s != %s" % getCmpFailingValues(trees_dict, trees, failed)
1271  result["GaudiTest.TTrees.failure_on"] = result.Quote(msg)
1272  result["GaudiTest.TTrees.found"] = result.Quote(pp.pformat(trees))
1273 
1274  return causes
1275 
1276  def CheckHistosSummaries(self, stdout, result, causes,
1277  dict = None,
1278  ignore = None):
1279  """
1280  Compare the TTree summaries in stdout with the ones in trees_dict or in
1281  the reference file. By default ignore the size, compression and basket
1282  fields.
1283  The presence of TTree summaries when none is expected is not a failure.
1284  """
1285  if dict is None:
1286  reference = self._expandReferenceFileName(self.reference)
1287  # call the validator if the file exists
1288  if reference and os.path.isfile(reference):
1289  dict = findHistosSummaries(open(reference).read())
1290  else:
1291  dict = {}
1292 
1293  from pprint import PrettyPrinter
1294  pp = PrettyPrinter()
1295  if dict:
1296  result["GaudiTest.Histos.expected"] = result.Quote(pp.pformat(dict))
1297  if ignore:
1298  result["GaudiTest.Histos.ignore"] = result.Quote(ignore)
1299 
1300  histos = findHistosSummaries(stdout)
1301  failed = cmpTreesDicts(dict, histos, ignore)
1302  if failed:
1303  causes.append("histos summaries")
1304  msg = "%s: %s != %s" % getCmpFailingValues(dict, histos, failed)
1305  result["GaudiTest.Histos.failure_on"] = result.Quote(msg)
1306  result["GaudiTest.Histos.found"] = result.Quote(pp.pformat(histos))
1307 
1308  return causes
1309 
1310  def ValidateWithReference(self, stdout, stderr, result, causes, preproc = None):
1311  """
1312  Default validation action: compare standard output and error to the
1313  reference files.
1314  """
1315  # set the default output preprocessor
1316  if preproc is None:
1317  preproc = normalizeExamples
1318  # check standard output
1319  reference = self._expandReferenceFileName(self.reference)
1320  # call the validator if the file exists
1321  if reference and os.path.isfile(reference):
1322  result["GaudiTest.output_reference"] = reference
1323  causes += ReferenceFileValidator(reference,
1324  "standard output",
1325  "GaudiTest.output_diff",
1326  preproc = preproc)(stdout, result)
1327 
1328  # Compare TTree summaries
1329  causes = self.CheckTTreesSummaries(stdout, result, causes)
1330  causes = self.CheckHistosSummaries(stdout, result, causes)
1331 
1332  if causes: # Write a new reference file for stdout
1333  try:
1334  newref = open(reference + ".new","w")
1335  # sanitize newlines
1336  for l in stdout.splitlines():
1337  newref.write(l.rstrip() + '\n')
1338  del newref # flush and close
1339  except IOError:
1340  # Ignore IO errors when trying to update reference files
1341  # because we may be in a read-only filesystem
1342  pass
1343 
1344  # check standard error
1345  reference = self._expandReferenceFileName(self.error_reference)
1346  # call the validator if we have a file to use
1347  if reference and os.path.isfile(reference):
1348  result["GaudiTest.error_reference"] = reference
1349  newcauses = ReferenceFileValidator(reference,
1350  "standard error",
1351  "GaudiTest.error_diff",
1352  preproc = preproc)(stderr, result)
1353  causes += newcauses
1354  if newcauses: # Write a new reference file for stdedd
1355  newref = open(reference + ".new","w")
1356  # sanitize newlines
1357  for l in stderr.splitlines():
1358  newref.write(l.rstrip() + '\n')
1359  del newref # flush and close
1360  else:
1361  causes += BasicOutputValidator(self.stderr,
1362  "standard error",
1363  "ExecTest.expected_stderr")(stderr, result)
1364 
1365  return causes
1366 
1367  def ValidateOutput(self, stdout, stderr, result):
1368  causes = []
1369  # if the test definition contains a custom validator, use it
1370  if self.validator.strip() != "":
1371  class CallWrapper(object):
1372  """
1373  Small wrapper class to dynamically bind some default arguments
1374  to a callable.
1375  """
1376  def __init__(self, callable, extra_args = {}):
1377  self.callable = callable
1378  self.extra_args = extra_args
1379  # get the list of names of positional arguments
1380  from inspect import getargspec
1381  self.args_order = getargspec(callable)[0]
1382  # Remove "self" from the list of positional arguments
1383  # since it is added automatically
1384  if self.args_order[0] == "self":
1385  del self.args_order[0]
1386  def __call__(self, *args, **kwargs):
1387  # Check which positional arguments are used
1388  positional = self.args_order[:len(args)]
1389 
1390  kwargs = dict(kwargs) # copy the arguments dictionary
1391  for a in self.extra_args:
1392  # use "extra_args" for the arguments not specified as
1393  # positional or keyword
1394  if a not in positional and a not in kwargs:
1395  kwargs[a] = self.extra_args[a]
1396  return apply(self.callable, args, kwargs)
1397  # local names to be exposed in the script
1398  exported_symbols = {"self":self,
1399  "stdout":stdout,
1400  "stderr":stderr,
1401  "result":result,
1402  "causes":causes,
1403  "findReferenceBlock":
1404  CallWrapper(findReferenceBlock, {"stdout":stdout,
1405  "result":result,
1406  "causes":causes}),
1407  "validateWithReference":
1408  CallWrapper(self.ValidateWithReference, {"stdout":stdout,
1409  "stderr":stderr,
1410  "result":result,
1411  "causes":causes}),
1412  "countErrorLines":
1413  CallWrapper(countErrorLines, {"stdout":stdout,
1414  "result":result,
1415  "causes":causes}),
1416  "checkTTreesSummaries":
1417  CallWrapper(self.CheckTTreesSummaries, {"stdout":stdout,
1418  "result":result,
1419  "causes":causes}),
1420  "checkHistosSummaries":
1421  CallWrapper(self.CheckHistosSummaries, {"stdout":stdout,
1422  "result":result,
1423  "causes":causes}),
1424 
1425  }
1426  exec self.validator in globals(), exported_symbols
1427  else:
1428  self.ValidateWithReference(stdout, stderr, result, causes)
1429 
1430  return causes
1431 
1432  def DumpEnvironment(self, result):
1433  """
1434  Add the content of the environment to the result object.
1435 
1436  Copied from the QMTest class of COOL.
1437  """
1438  vars = os.environ.keys()
1439  vars.sort()
1440  result['GaudiTest.environment'] = \
1441  result.Quote('\n'.join(["%s=%s"%(v,os.environ[v]) for v in vars]))
1442 
1443  def Run(self, context, result):
1444  """Run the test.
1445 
1446  'context' -- A 'Context' giving run-time parameters to the
1447  test.
1448 
1449  'result' -- A 'Result' object. The outcome will be
1450  'Result.PASS' when this method is called. The 'result' may be
1451  modified by this method to indicate outcomes other than
1452  'Result.PASS' or to add annotations."""
1453 
1454  # Check if the platform is supported
1455  if self.PlatformIsNotSupported(context, result):
1456  return
1457 
1458  # Prepare program name and arguments (expanding variables, and converting to absolute)
1459  if self.program:
1460  prog = rationalizepath(self.program)
1461  elif "GAUDIEXE" in os.environ:
1462  prog = os.environ["GAUDIEXE"]
1463  else:
1464  prog = "Gaudi.exe"
1465  self.program = prog
1466 
1467  dummy, prog_ext = os.path.splitext(prog)
1468  if prog_ext not in [ ".exe", ".py", ".bat" ] and self.isWinPlatform():
1469  prog += ".exe"
1470  prog_ext = ".exe"
1471 
1472  prog = which(prog) or prog
1473 
1474  # Convert paths to absolute paths in arguments and reference files
1475  args = map(rationalizepath, self.args)
1478 
1479 
1480  # check if the user provided inline options
1481  tmpfile = None
1482  if self.options.strip():
1483  ext = ".opts"
1484  if re.search(r"from\s+Gaudi.Configuration\s+import\s+\*|from\s+Configurables\s+import", self.options):
1485  ext = ".py"
1486  tmpfile = TempFile(ext)
1487  tmpfile.writelines("\n".join(self.options.splitlines()))
1488  tmpfile.flush()
1489  args.append(tmpfile.name)
1490  result["GaudiTest.options"] = result.Quote(self.options)
1491 
1492  # if the program is a python file, execute it through python
1493  if prog_ext == ".py":
1494  args.insert(0,prog)
1495  if self.isWinPlatform():
1496  prog = which("python.exe") or "python.exe"
1497  else:
1498  prog = which("python") or "python"
1499 
1500  # Change to the working directory if specified or to the default temporary
1501  origdir = os.getcwd()
1502  if self.workdir:
1503  os.chdir(str(os.path.normpath(os.path.expandvars(self.workdir))))
1504  elif self.use_temp_dir == "true":
1505  if "QMTEST_TMPDIR" in os.environ:
1506  qmtest_tmpdir = os.environ["QMTEST_TMPDIR"]
1507  if not os.path.exists(qmtest_tmpdir):
1508  os.makedirs(qmtest_tmpdir)
1509  os.chdir(qmtest_tmpdir)
1510  elif "qmtest.tmpdir" in context:
1511  os.chdir(context["qmtest.tmpdir"])
1512 
1513  if "QMTEST_IGNORE_TIMEOUT" not in os.environ:
1514  self.timeout = max(self.timeout,600)
1515  else:
1516  self.timeout = -1
1517 
1518  try:
1519  # Generate eclipse.org debug launcher for the test
1520  self._CreateEclipseLaunch(prog, args, destdir = os.path.join(origdir, '.eclipse'))
1521  # Run the test
1522  self.RunProgram(prog,
1523  [ prog ] + args,
1524  context, result)
1525  # Record the content of the enfironment for failing tests
1526  if result.GetOutcome() not in [ result.PASS ]:
1527  self.DumpEnvironment(result)
1528  finally:
1529  # revert to the original directory
1530  os.chdir(origdir)
1531 
1532  def RunProgram(self, program, arguments, context, result):
1533  """Run the 'program'.
1534 
1535  'program' -- The path to the program to run.
1536 
1537  'arguments' -- A list of the arguments to the program. This
1538  list must contain a first argument corresponding to 'argv[0]'.
1539 
1540  'context' -- A 'Context' giving run-time parameters to the
1541  test.
1542 
1543  'result' -- A 'Result' object. The outcome will be
1544  'Result.PASS' when this method is called. The 'result' may be
1545  modified by this method to indicate outcomes other than
1546  'Result.PASS' or to add annotations.
1547 
1548  @attention: This method has been copied from command.ExecTestBase
1549  (QMTest 2.3.0) and modified to keep stdout and stderr
1550  for tests that have been terminated by a signal.
1551  (Fundamental for debugging in the Application Area)
1552  """
1553 
1554  # Construct the environment.
1555  environment = self.MakeEnvironment(context)
1556  # FIXME: whithout this, we get some spurious '\x1b[?1034' in the std out on SLC6
1557  if "slc6" in environment.get('CMTCONFIG', ''):
1558  environment['TERM'] = 'dumb'
1559  # Create the executable.
1560  if self.timeout >= 0:
1561  timeout = self.timeout
1562  else:
1563  # If no timeout was specified, we sill run this process in a
1564  # separate process group and kill the entire process group
1565  # when the child is done executing. That means that
1566  # orphaned child processes created by the test will be
1567  # cleaned up.
1568  timeout = -2
1569  e = GaudiFilterExecutable(self.stdin, timeout)
1570  # Run it.
1571  exit_status = e.Run(arguments, environment, path = program)
1572  # Get the stack trace from the temporary file (if present)
1573  if e.stack_trace_file and os.path.exists(e.stack_trace_file):
1574  stack_trace = open(e.stack_trace_file).read()
1575  os.remove(e.stack_trace_file)
1576  else:
1577  stack_trace = None
1578  if stack_trace:
1579  result["ExecTest.stack_trace"] = result.Quote(stack_trace)
1580 
1581  # If the process terminated normally, check the outputs.
1582  if (sys.platform == "win32" or os.WIFEXITED(exit_status)
1583  or self.signal == os.WTERMSIG(exit_status)):
1584  # There are no causes of failure yet.
1585  causes = []
1586  # The target program terminated normally. Extract the
1587  # exit code, if this test checks it.
1588  if self.exit_code is None:
1589  exit_code = None
1590  elif sys.platform == "win32":
1591  exit_code = exit_status
1592  else:
1593  exit_code = os.WEXITSTATUS(exit_status)
1594  # Get the output generated by the program.
1595  stdout = e.stdout
1596  stderr = e.stderr
1597  # Record the results.
1598  result["ExecTest.exit_code"] = str(exit_code)
1599  result["ExecTest.stdout"] = result.Quote(stdout)
1600  result["ExecTest.stderr"] = result.Quote(stderr)
1601  # Check to see if the exit code matches.
1602  if exit_code != self.exit_code:
1603  causes.append("exit_code")
1604  result["ExecTest.expected_exit_code"] \
1605  = str(self.exit_code)
1606  # Validate the output.
1607  causes += self.ValidateOutput(stdout, stderr, result)
1608  # If anything went wrong, the test failed.
1609  if causes:
1610  result.Fail("Unexpected %s." % string.join(causes, ", "))
1611  elif os.WIFSIGNALED(exit_status):
1612  # The target program terminated with a signal. Construe
1613  # that as a test failure.
1614  signal_number = str(os.WTERMSIG(exit_status))
1615  if not stack_trace:
1616  result.Fail("Program terminated by signal.")
1617  else:
1618  # The presence of stack_trace means tha we stopped the job because
1619  # of a time-out
1620  result.Fail("Exceeded time limit (%ds), terminated." % timeout)
1621  result["ExecTest.signal_number"] = signal_number
1622  result["ExecTest.stdout"] = result.Quote(e.stdout)
1623  result["ExecTest.stderr"] = result.Quote(e.stderr)
1624  if self.signal:
1625  result["ExecTest.expected_signal_number"] = str(self.signal)
1626  elif os.WIFSTOPPED(exit_status):
1627  # The target program was stopped. Construe that as a
1628  # test failure.
1629  signal_number = str(os.WSTOPSIG(exit_status))
1630  if not stack_trace:
1631  result.Fail("Program stopped by signal.")
1632  else:
1633  # The presence of stack_trace means tha we stopped the job because
1634  # of a time-out
1635  result.Fail("Exceeded time limit (%ds), stopped." % timeout)
1636  result["ExecTest.signal_number"] = signal_number
1637  result["ExecTest.stdout"] = result.Quote(e.stdout)
1638  result["ExecTest.stderr"] = result.Quote(e.stderr)
1639  else:
1640  # The target program terminated abnormally in some other
1641  # manner. (This shouldn't normally happen...)
1642  result.Fail("Program did not terminate normally.")
1643 
1644  # Marco Cl.: This is a special trick to fix a "problem" with the output
1645  # of gaudi jobs when they use colors
1646  esc = '\x1b'
1647  repr_esc = '\\x1b'
1648  result["ExecTest.stdout"] = result["ExecTest.stdout"].replace(esc,repr_esc)
1649  # TODO: (MCl) improve the hack for colors in standard output
1650  # may be converting them to HTML tags
1651 
1652  def _CreateEclipseLaunch(self, prog, args, destdir = None):
1653  # Find the project name used in ecplise.
1654  # The name is in a file called ".project" in one of the parent directories
1655  projbasedir = os.path.normpath(destdir)
1656  while not os.path.exists(os.path.join(projbasedir, ".project")):
1657  oldprojdir = projbasedir
1658  projbasedir = os.path.normpath(os.path.join(projbasedir, os.pardir))
1659  # FIXME: the root level is invariant when trying to go up one level,
1660  # but it must be cheched on windows
1661  if oldprojdir == projbasedir:
1662  # If we cannot find a .project, so no point in creating a .launch file
1663  return
1664  # Ensure that we have a place where to write.
1665  if not os.path.exists(destdir):
1666  os.makedirs(destdir)
1667  # Use ElementTree to parse the XML file
1668  from xml.etree import ElementTree as ET
1669  t = ET.parse(os.path.join(projbasedir, ".project"))
1670  projectName = t.find("name").text
1671 
1672  # prepare the name/path of the generated file
1673  destfile = "%s.launch" % self._Runnable__id
1674  if destdir:
1675  destfile = os.path.join(destdir, destfile)
1676 
1677  if self.options.strip():
1678  # this means we have some custom options in the qmt file, so we have
1679  # to copy them from the temporary file at the end of the arguments
1680  # in another file
1681  tempfile = args.pop()
1682  optsfile = destfile + os.path.splitext(tempfile)[1]
1683  shutil.copyfile(tempfile, optsfile)
1684  args.append(optsfile)
1685 
1686  # prepare the data to insert in the XML file
1687  from xml.sax.saxutils import quoteattr # useful to quote XML special chars
1688  data = {}
1689  # Note: the "quoteattr(k)" is not needed because special chars cannot be part of a variable name,
1690  # but it doesn't harm.
1691  data["environment"] = "\n".join(['<mapEntry key=%s value=%s/>' % (quoteattr(k), quoteattr(v))
1692  for k, v in os.environ.iteritems()
1693  if k not in ('MAKEOVERRIDES', 'MAKEFLAGS', 'MAKELEVEL')])
1694 
1695  data["exec"] = which(prog) or prog
1696  if os.path.basename(data["exec"]).lower().startswith("python"):
1697  data["stopAtMain"] = "false" # do not stop at main when debugging Python scripts
1698  else:
1699  data["stopAtMain"] = "true"
1700 
1701  data["args"] = "&#10;".join(map(rationalizepath, args))
1702  if self.isWinPlatform():
1703  data["args"] = "&#10;".join(["/debugexe"] + map(rationalizepath, [data["exec"]] + args))
1704  data["exec"] = which("vcexpress.exe")
1705 
1706  if not self.use_temp_dir:
1707  data["workdir"] = os.getcwd()
1708  else:
1709  # If the test is using a tmporary directory, it is better to run it
1710  # in the same directory as the .launch file when debugged in eclipse
1711  data["workdir"] = destdir
1712 
1713  data["project"] = projectName.strip()
1714 
1715  # Template for the XML file, based on eclipse 3.4
1716  xml = """<?xml version="1.0" encoding="UTF-8" standalone="no"?>
1717 <launchConfiguration type="org.eclipse.cdt.launch.applicationLaunchType">
1718 <booleanAttribute key="org.eclipse.cdt.debug.mi.core.AUTO_SOLIB" value="true"/>
1719 <listAttribute key="org.eclipse.cdt.debug.mi.core.AUTO_SOLIB_LIST"/>
1720 <stringAttribute key="org.eclipse.cdt.debug.mi.core.DEBUG_NAME" value="gdb"/>
1721 <stringAttribute key="org.eclipse.cdt.debug.mi.core.GDB_INIT" value=".gdbinit"/>
1722 <listAttribute key="org.eclipse.cdt.debug.mi.core.SOLIB_PATH"/>
1723 <booleanAttribute key="org.eclipse.cdt.debug.mi.core.STOP_ON_SOLIB_EVENTS" value="false"/>
1724 <booleanAttribute key="org.eclipse.cdt.debug.mi.core.breakpointsFullPath" value="false"/>
1725 <stringAttribute key="org.eclipse.cdt.debug.mi.core.commandFactory" value="org.eclipse.cdt.debug.mi.core.standardCommandFactory"/>
1726 <stringAttribute key="org.eclipse.cdt.debug.mi.core.protocol" value="mi"/>
1727 <booleanAttribute key="org.eclipse.cdt.debug.mi.core.verboseMode" value="false"/>
1728 <intAttribute key="org.eclipse.cdt.launch.ATTR_BUILD_BEFORE_LAUNCH_ATTR" value="0"/>
1729 <stringAttribute key="org.eclipse.cdt.launch.COREFILE_PATH" value=""/>
1730 <stringAttribute key="org.eclipse.cdt.launch.DEBUGGER_ID" value="org.eclipse.cdt.debug.mi.core.CDebuggerNew"/>
1731 <stringAttribute key="org.eclipse.cdt.launch.DEBUGGER_REGISTER_GROUPS" value=""/>
1732 <stringAttribute key="org.eclipse.cdt.launch.DEBUGGER_START_MODE" value="run"/>
1733 <booleanAttribute key="org.eclipse.cdt.launch.DEBUGGER_STOP_AT_MAIN" value="%(stopAtMain)s"/>
1734 <stringAttribute key="org.eclipse.cdt.launch.DEBUGGER_STOP_AT_MAIN_SYMBOL" value="main"/>
1735 <booleanAttribute key="org.eclipse.cdt.launch.ENABLE_REGISTER_BOOKKEEPING" value="false"/>
1736 <booleanAttribute key="org.eclipse.cdt.launch.ENABLE_VARIABLE_BOOKKEEPING" value="false"/>
1737 <stringAttribute key="org.eclipse.cdt.launch.FORMAT" value="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&lt;contentList/&gt;"/>
1738 <stringAttribute key="org.eclipse.cdt.launch.GLOBAL_VARIABLES" value="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;globalVariableList/&gt;&#10;"/>
1739 <stringAttribute key="org.eclipse.cdt.launch.MEMORY_BLOCKS" value="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;memoryBlockExpressionList/&gt;&#10;"/>
1740 <stringAttribute key="org.eclipse.cdt.launch.PROGRAM_ARGUMENTS" value="%(args)s"/>
1741 <stringAttribute key="org.eclipse.cdt.launch.PROGRAM_NAME" value="%(exec)s"/>
1742 <stringAttribute key="org.eclipse.cdt.launch.PROJECT_ATTR" value="%(project)s"/>
1743 <stringAttribute key="org.eclipse.cdt.launch.PROJECT_BUILD_CONFIG_ID_ATTR" value=""/>
1744 <stringAttribute key="org.eclipse.cdt.launch.WORKING_DIRECTORY" value="%(workdir)s"/>
1745 <booleanAttribute key="org.eclipse.cdt.launch.ui.ApplicationCDebuggerTab.DEFAULTS_SET" value="true"/>
1746 <booleanAttribute key="org.eclipse.cdt.launch.use_terminal" value="true"/>
1747 <listAttribute key="org.eclipse.debug.core.MAPPED_RESOURCE_PATHS">
1748 <listEntry value="/%(project)s"/>
1749 </listAttribute>
1750 <listAttribute key="org.eclipse.debug.core.MAPPED_RESOURCE_TYPES">
1751 <listEntry value="4"/>
1752 </listAttribute>
1753 <booleanAttribute key="org.eclipse.debug.core.appendEnvironmentVariables" value="false"/>
1754 <mapAttribute key="org.eclipse.debug.core.environmentVariables">
1755 %(environment)s
1756 </mapAttribute>
1757 <mapAttribute key="org.eclipse.debug.core.preferred_launchers">
1758 <mapEntry key="[debug]" value="org.eclipse.cdt.cdi.launch.localCLaunch"/>
1759 </mapAttribute>
1760 <listAttribute key="org.eclipse.debug.ui.favoriteGroups">
1761 <listEntry value="org.eclipse.debug.ui.launchGroup.debug"/>
1762 </listAttribute>
1763 </launchConfiguration>
1764 """ % data
1765 
1766  # Write the output file
1767  open(destfile, "w").write(xml)
1768  #open(destfile + "_copy.xml", "w").write(xml)
1769 
1770 
1771 try:
1772  import json
1773 except ImportError:
1774  # Use simplejson for LCG
1775  import simplejson as json
1776 
1777 class HTMLResultStream(ResultStream):
1778  """An 'HTMLResultStream' writes its output to a set of HTML files.
1779 
1780  The argument 'dir' is used to select the destination directory for the HTML
1781  report.
1782  The destination directory may already contain the report from a previous run
1783  (for example of a different package), in which case it will be extended to
1784  include the new data.
1785  """
1786  arguments = [
1787  qm.fields.TextField(
1788  name = "dir",
1789  title = "Destination Directory",
1790  description = """The name of the directory.
1791 
1792  All results will be written to the directory indicated.""",
1793  verbatim = "true",
1794  default_value = ""),
1795  ]
1796 
1797  def __init__(self, arguments = None, **args):
1798  """Prepare the destination directory.
1799 
1800  Creates the destination directory and store in it some preliminary
1801  annotations and the static files found in the template directory
1802  'html_report'.
1803  """
1804  ResultStream.__init__(self, arguments, **args)
1805  self._summary = []
1806  self._summaryFile = os.path.join(self.dir, "summary.json")
1807  self._annotationsFile = os.path.join(self.dir, "annotations.json")
1808  # Prepare the destination directory using the template
1809  templateDir = os.path.join(os.path.dirname(__file__), "html_report")
1810  if not os.path.isdir(self.dir):
1811  os.makedirs(self.dir)
1812  # Copy the files in the template directory excluding the directories
1813  for f in os.listdir(templateDir):
1814  src = os.path.join(templateDir, f)
1815  dst = os.path.join(self.dir, f)
1816  if not os.path.isdir(src) and not os.path.exists(dst):
1817  shutil.copy(src, dst)
1818  # Add some non-QMTest attributes
1819  if "CMTCONFIG" in os.environ:
1820  self.WriteAnnotation("cmt.cmtconfig", os.environ["CMTCONFIG"])
1821  import socket
1822  self.WriteAnnotation("hostname", socket.gethostname())
1823 
1824  def _updateSummary(self):
1825  """Helper function to extend the global summary file in the destination
1826  directory.
1827  """
1828  if os.path.exists(self._summaryFile):
1829  oldSummary = json.load(open(self._summaryFile))
1830  else:
1831  oldSummary = []
1832  ids = set([ i["id"] for i in self._summary ])
1833  newSummary = [ i for i in oldSummary if i["id"] not in ids ]
1834  newSummary.extend(self._summary)
1835  json.dump(newSummary, open(self._summaryFile, "w"),
1836  sort_keys = True)
1837 
1838  def WriteAnnotation(self, key, value):
1839  """Writes the annotation to the annotation file.
1840  If the key is already present with a different value, the value becomes
1841  a list and the new value is appended to it, except for start_time and
1842  end_time.
1843  """
1844  # Initialize the annotation dict from the file (if present)
1845  if os.path.exists(self._annotationsFile):
1846  annotations = json.load(open(self._annotationsFile))
1847  else:
1848  annotations = {}
1849  # hack because we do not have proper JSON support
1850  key, value = map(str, [key, value])
1851  if key == "qmtest.run.start_time":
1852  # Special handling of the start time:
1853  # if we are updating a result, we have to keep the original start
1854  # time, but remove the original end time to mark the report to be
1855  # in progress.
1856  if key not in annotations:
1857  annotations[key] = value
1858  if "qmtest.run.end_time" in annotations:
1859  del annotations["qmtest.run.end_time"]
1860  else:
1861  # All other annotations are added to a list
1862  if key in annotations:
1863  old = annotations[key]
1864  if type(old) is list:
1865  if value not in old:
1866  annotations[key].append(value)
1867  elif value != old:
1868  annotations[key] = [old, value]
1869  else:
1870  annotations[key] = value
1871  # Write the new annotations file
1872  json.dump(annotations, open(self._annotationsFile, "w"),
1873  sort_keys = True)
1874 
1875  def WriteResult(self, result):
1876  """Prepare the test result directory in the destination directory storing
1877  into it the result fields.
1878  A summary of the test result is stored both in a file in the test directory
1879  and in the global summary file.
1880  """
1881  summary = {}
1882  summary["id"] = result.GetId()
1883  summary["outcome"] = result.GetOutcome()
1884  summary["cause"] = result.GetCause()
1885  summary["fields"] = result.keys()
1886  summary["fields"].sort()
1887 
1888  # Since we miss proper JSON support, I hack a bit
1889  for f in ["id", "outcome", "cause"]:
1890  summary[f] = str(summary[f])
1891  summary["fields"] = map(str, summary["fields"])
1892 
1893  self._summary.append(summary)
1894 
1895  # format:
1896  # testname/summary.json
1897  # testname/field1
1898  # testname/field2
1899  testOutDir = os.path.join(self.dir, summary["id"])
1900  if not os.path.isdir(testOutDir):
1901  os.makedirs(testOutDir)
1902  json.dump(summary, open(os.path.join(testOutDir, "summary.json"), "w"),
1903  sort_keys = True)
1904  for f in summary["fields"]:
1905  open(os.path.join(testOutDir, f), "w").write(result[f])
1906 
1907  self._updateSummary()
1908 
1909  def Summarize(self):
1910  # Not implemented.
1911  pass
1912 
1913 
1914 
1915 
1916 class XMLResultStream(ResultStream):
1917  """An 'XMLResultStream' writes its output to a Ctest XML file.
1918 
1919  The argument 'dir' is used to select the destination file for the XML
1920  report.
1921  The destination directory may already contain the report from a previous run
1922  (for example of a different package), in which case it will be overrided to
1923  with the new data.
1924  """
1925  arguments = [
1926  qm.fields.TextField(
1927  name = "dir",
1928  title = "Destination Directory",
1929  description = """The name of the directory.
1930 
1931  All results will be written to the directory indicated.""",
1932  verbatim = "true",
1933  default_value = ""),
1934  qm.fields.TextField(
1935  name = "prefix",
1936  title = "Output File Prefix",
1937  description = """The output file name will be the specified prefix
1938  followed by 'Test.xml' (CTest convention).""",
1939  verbatim = "true",
1940  default_value = ""),
1941  ]
1942 
1943  def __init__(self, arguments = None, **args):
1944  """Prepare the destination directory.
1945 
1946  Creates the destination directory and store in it some preliminary
1947  annotations .
1948  """
1949  ResultStream.__init__(self, arguments, **args)
1950 
1951  self._xmlFile = os.path.join(self.dir, self.prefix + 'Test.xml')
1952 
1953  # add some global variable
1954  self._startTime = None
1955  self._endTime = None
1956  # Format the XML file if it not exists
1957  if not os.path.isfile(self._xmlFile):
1958  # check that the container directory exists and create it if not
1959  if not os.path.exists(os.path.dirname(self._xmlFile)):
1960  os.makedirs(os.path.dirname(self._xmlFile))
1961 
1962  newdataset = ET.Element("newdataset")
1963  self._tree = ET.ElementTree(newdataset)
1964  self._tree.write(self._xmlFile)
1965  else :
1966  # Read the xml file
1967  self._tree = ET.parse(self._xmlFile)
1968  newdataset = self._tree.getroot()
1969 
1970  # Find the corresponding site, if do not exist, create it
1971 
1972  #site = newdataset.find('Site[@BuildStamp="'+result["qmtest.start_time"]+'"][@OSPlatform="'+os.getenv("CMTOPT")+'"]')
1973  # I don't know why this syntax doesn't work. Maybe it is because of the python version. Indeed,
1974  # This works well in the python terminal. So I have to make a for:
1975  for site in newdataset.getiterator() :
1976  if site.get("OSPlatform") == os.uname()[4]: # and site.get("BuildStamp") == result["qmtest.start_time"] and:
1977  # Here we can add some variable to define the difference beetween 2 site
1978  self._site = site
1979  break
1980  else :
1981  site = None
1982 
1983 
1984  if site is None :
1985  import socket
1986  import multiprocessing
1987  attrib = {
1988  "BuildName" : os.getenv("CMTCONFIG"),
1989  "Name" : os.uname()[1] ,
1990  "Generator" : "QMTest "+qm.version ,
1991  "OSName" : os.uname()[0] ,
1992  "Hostname" : socket.gethostname() ,
1993  "OSRelease" : os.uname()[2] ,
1994  "OSVersion" :os.uname()[3] ,
1995  "OSPlatform" :os.uname()[4] ,
1996  "Is64Bits" : "unknown" ,
1997  "VendorString" : "unknown" ,
1998  "VendorID" :"unknown" ,
1999  "FamilyID" :"unknown" ,
2000  "ModelID" :"unknown" ,
2001  "ProcessorCacheSize" :"unknown" ,
2002  "NumberOfLogicalCPU" : str(multiprocessing.cpu_count()) ,
2003  "NumberOfPhysicalCPU" : "0" ,
2004  "TotalVirtualMemory" : "0" ,
2005  "TotalPhysicalMemory" : "0" ,
2006  "LogicalProcessorsPerPhysical" : "0" ,
2007  "ProcessorClockFrequency" : "0" ,
2008  }
2009  self._site = ET.SubElement(newdataset, "site", attrib)
2010  self._Testing = ET.SubElement(self._site,"Testing")
2011 
2012  # Start time elements
2013  self._StartDateTime = ET.SubElement(self._Testing, "StartDateTime")
2014 
2015  self._StartTestTime = ET.SubElement(self._Testing, "StartTestTime")
2016 
2017 
2018  self._TestList = ET.SubElement(self._Testing, "TestList")
2019 
2020  ## End time elements
2021  self._EndDateTime = ET.SubElement(self._Testing, "EndDateTime")
2022 
2023 
2024  self._EndTestTime = ET.SubElement(self._Testing, "EndTestTime")
2025 
2026 
2027 
2028  self._ElapsedMinutes = ET.SubElement(self._Testing, "ElapsedMinutes")
2029 
2030 
2031  else : # We get the elements
2032  self._Testing = self._site.find("Testing")
2033  self._StartDateTime = self._Testing.find("StartDateTime")
2034  self._StartTestTime = self._Testing.find("StartTestTime")
2035  self._TestList = self._Testing.find("TestList")
2036  self._EndDateTime = self._Testing.find("EndDateTime")
2037  self._EndTestTime = self._Testing.find("EndTestTime")
2038  self._ElapsedMinutes = self._Testing.find("ElapsedMinutes")
2039 
2040  """
2041  # Add some non-QMTest attributes
2042  if "CMTCONFIG" in os.environ:
2043  self.WriteAnnotation("cmt.cmtconfig", os.environ["CMTCONFIG"])
2044  import socket
2045  self.WriteAnnotation("hostname", socket.gethostname())
2046  """
2047 
2048 
2049  def WriteAnnotation(self, key, value):
2050  if key == "qmtest.run.start_time":
2051  if self._site.get("qmtest.run.start_time") is not None :
2052  return None
2053  self._site.set(str(key),str(value))
2054  def WriteResult(self, result):
2055  """Prepare the test result directory in the destination directory storing
2056  into it the result fields.
2057  A summary of the test result is stored both in a file in the test directory
2058  and in the global summary file.
2059  """
2060  summary = {}
2061  summary["id"] = result.GetId()
2062  summary["outcome"] = result.GetOutcome()
2063  summary["cause"] = result.GetCause()
2064  summary["fields"] = result.keys()
2065  summary["fields"].sort()
2066 
2067 
2068  # Since we miss proper JSON support, I hack a bit
2069  for f in ["id", "outcome", "cause"]:
2070  summary[f] = str(summary[f])
2071  summary["fields"] = map(str, summary["fields"])
2072 
2073 
2074  # format
2075  # package_Test.xml
2076 
2077  if "qmtest.start_time" in summary["fields"]:
2078  haveStartDate = True
2079  else :
2080  haveStartDate = False
2081  if "qmtest.end_time" in summary["fields"]:
2082  haveEndDate = True
2083  else :
2084  haveEndDate = False
2085 
2086  # writing the start date time
2087  if haveStartDate:
2088  self._startTime = calendar.timegm(time.strptime(result["qmtest.start_time"], "%Y-%m-%dT%H:%M:%SZ"))
2089  if self._StartTestTime.text is None:
2090  self._StartDateTime.text = time.strftime("%b %d %H:%M %Z", time.localtime(self._startTime))
2091  self._StartTestTime.text = str(self._startTime)
2092  self._site.set("BuildStamp" , result["qmtest.start_time"] )
2093 
2094  #Save the end date time in memory
2095  if haveEndDate:
2096  self._endTime = calendar.timegm(time.strptime(result["qmtest.end_time"], "%Y-%m-%dT%H:%M:%SZ"))
2097 
2098 
2099  #add the current test to the test list
2100  tl = ET.Element("Test")
2101  tl.text = summary["id"]
2102  self._TestList.insert(0,tl)
2103 
2104  #Fill the current test
2105  Test = ET.Element("Test")
2106  if summary["outcome"] == "PASS":
2107  Test.set("Status", "passed")
2108  elif summary["outcome"] == "FAIL":
2109  Test.set("Status", "failed")
2110  elif summary["outcome"] == "SKIPPED" or summary["outcome"] == "UNTESTED":
2111  Test.set("Status", "skipped")
2112  elif summary["outcome"] == "ERROR":
2113  Test.set("Status", "failed")
2114  Name = ET.SubElement(Test, "Name",)
2115  Name.text = summary["id"]
2116  Results = ET.SubElement(Test, "Results")
2117 
2118  # add the test after the other test
2119  self._Testing.insert(3,Test)
2120 
2121  if haveStartDate and haveEndDate:
2122  # Compute the test duration
2123  delta = self._endTime - self._startTime
2124  testduration = str(delta)
2125  Testduration= ET.SubElement(Results,"NamedMeasurement")
2126  Testduration.set("name","Execution Time")
2127  Testduration.set("type","numeric/float" )
2128  value = ET.SubElement(Testduration, "Value")
2129  value.text = testduration
2130 
2131  #remove the fields that we store in a different way
2132  for n in ("qmtest.end_time", "qmtest.start_time", "qmtest.cause", "ExecTest.stdout"):
2133  if n in summary["fields"]:
2134  summary["fields"].remove(n)
2135 
2136  # Here we can add some NamedMeasurment which we know the type
2137  #
2138  if "ExecTest.exit_code" in summary["fields"] :
2139  summary["fields"].remove("ExecTest.exit_code")
2140  ExitCode= ET.SubElement(Results,"NamedMeasurement")
2141  ExitCode.set("name","exit_code")
2142  ExitCode.set("type","numeric/integer" )
2143  value = ET.SubElement(ExitCode, "Value")
2144  value.text = convert_xml_illegal_chars(result["ExecTest.exit_code"])
2145 
2146  TestStartTime= ET.SubElement(Results,"NamedMeasurement")
2147  TestStartTime.set("name","Start_Time")
2148  TestStartTime.set("type","String" )
2149  value = ET.SubElement(TestStartTime, "Value")
2150  if haveStartDate :
2151  value.text = escape_xml_illegal_chars(time.strftime("%b %d %H:%M %Z %Y", time.localtime(self._startTime)))
2152  else :
2153  value.text = ""
2154 
2155  TestEndTime= ET.SubElement(Results,"NamedMeasurement")
2156  TestEndTime.set("name","End_Time")
2157  TestEndTime.set("type","String" )
2158  value = ET.SubElement(TestEndTime, "Value")
2159  if haveStartDate :
2160  value.text = escape_xml_illegal_chars(time.strftime("%b %d %H:%M %Z %Y", time.localtime(self._endTime)))
2161  else :
2162  value.text = ""
2163 
2164  if summary["cause"]:
2165  FailureCause= ET.SubElement(Results,"NamedMeasurement")
2166  FailureCause.set("name", "Cause")
2167  FailureCause.set("type", "String" )
2168  value = ET.SubElement(FailureCause, "Value")
2169  value.text = escape_xml_illegal_chars(summary["cause"])
2170 
2171  #Fill the result
2172  fields = {}
2173  for field in summary["fields"] :
2174  fields[field] = ET.SubElement(Results, "NamedMeasurement")
2175  fields[field].set("type","String")
2176  fields[field].set("name",field)
2177  value = ET.SubElement(fields[field], "Value")
2178  # to escape the <pre></pre>
2179  if "<pre>" in result[field][0:6] :
2180  value.text = convert_xml_illegal_chars(result[field][5:-6])
2181  else :
2182  value.text = convert_xml_illegal_chars(result[field])
2183 
2184 
2185  if result.has_key("ExecTest.stdout" ) : #"ExecTest.stdout" in result :
2186  Measurement = ET.SubElement(Results, "Measurement")
2187  value = ET.SubElement(Measurement, "Value")
2188  if "<pre>" in result["ExecTest.stdout"][0:6] :
2189  value.text = convert_xml_illegal_chars(result["ExecTest.stdout"][5:-6])
2190  else :
2191  value.text = convert_xml_illegal_chars(result["ExecTest.stdout"])
2192 
2193 
2194  # write the file
2195  self._tree.write(self._xmlFile, "utf-8") #,True) in python 2.7 to add the xml header
2196 
2197 
2198  def Summarize(self):
2199 
2200  # Set the final end date time
2201  self._EndTestTime.text = str(self._endTime)
2202  self._EndDateTime.text = time.strftime("%b %d %H:%M %Z", time.localtime(self._endTime))
2203 
2204  # Compute the total duration
2205  if self._endTime and self._startTime:
2206  delta = self._endTime - self._startTime
2207  else:
2208  delta = 0
2209  self._ElapsedMinutes.text = str(delta/60)
2210 
2211  # Write into the file
2212  self._tree.write(self._xmlFile, "utf-8") #,True) in python 2.7 to add the xml header
2213 

Generated at Mon Feb 17 2014 14:37:47 for Gaudi Framework, version v25r0 by Doxygen version 1.8.2 written by Dimitri van Heesch, © 1997-2004