GaudiTest.py
Go to the documentation of this file.
1 ########################################################################
2 # File: GaudiTest.py
3 # Author: Marco Clemencic CERN/PH-LBC
4 ########################################################################
5 __author__ = 'Marco Clemencic CERN/PH-LBC'
6 ########################################################################
7 # Imports
8 ########################################################################
9 import os
10 import sys
11 import re
12 import tempfile
13 import shutil
14 import string
15 import difflib
16 import time
17 import calendar
18 import codecs
19 
20 from subprocess import Popen, PIPE, STDOUT
21 
22 try:
23  from GaudiKernel import ROOT6WorkAroundEnabled
24 except ImportError:
25  def ROOT6WorkAroundEnabled(id=None):
26  # dummy implementation
27  return False
28 
29 # ensure the preferred locale
30 os.environ['LC_ALL'] = 'C'
31 
32 # Needed for the XML wrapper
33 try:
34  import xml.etree.cElementTree as ET
35 except ImportError:
36  import xml.etree.ElementTree as ET
37 
38 # redefinition of timedelta.total_seconds() because it is not present in the 2.6 version
39 def total_seconds_replacement(timedelta) :
40  return timedelta.days*86400 + timedelta.seconds + timedelta.microseconds/1000000
41 
42 
43 import qm
44 from qm.test.classes.command import ExecTestBase
45 from qm.test.result_stream import ResultStream
46 
47 ### Needed by the re-implementation of TimeoutExecutable
48 import qm.executable
49 import signal
50 # The classes in this module are implemented differently depending on
51 # the operating system in use.
52 if sys.platform == "win32":
53  import msvcrt
54  import pywintypes
55  from threading import *
56  import win32api
57  import win32con
58  import win32event
59  import win32file
60  import win32pipe
61  import win32process
62 else:
63  import cPickle
64  import fcntl
65  import select
66  import qm.sigmask
67 
68 ########################################################################
69 # Utility Classes
70 ########################################################################
71 class TemporaryEnvironment:
72  """
73  Class to changes the environment temporarily.
74  """
75  def __init__(self, orig = os.environ, keep_same = False):
76  """
77  Create a temporary environment on top of the one specified
78  (it can be another TemporaryEnvironment instance).
79  """
80  #print "New environment"
81  self.old_values = {}
82  self.env = orig
83  self._keep_same = keep_same
84 
85  def __setitem__(self,key,value):
86  """
87  Set an environment variable recording the previous value.
88  """
89  if key not in self.old_values :
90  if key in self.env :
91  if not self._keep_same or self.env[key] != value:
92  self.old_values[key] = self.env[key]
93  else:
94  self.old_values[key] = None
95  self.env[key] = value
96 
97  def __getitem__(self,key):
98  """
99  Get an environment variable.
100  Needed to provide the same interface as os.environ.
101  """
102  return self.env[key]
103 
104  def __delitem__(self,key):
105  """
106  Unset an environment variable.
107  Needed to provide the same interface as os.environ.
108  """
109  if key not in self.env :
110  raise KeyError(key)
111  self.old_values[key] = self.env[key]
112  del self.env[key]
113 
114  def keys(self):
115  """
116  Return the list of defined environment variables.
117  Needed to provide the same interface as os.environ.
118  """
119  return self.env.keys()
120 
121  def items(self):
122  """
123  Return the list of (name,value) pairs for the defined environment variables.
124  Needed to provide the same interface as os.environ.
125  """
126  return self.env.items()
127 
128  def __contains__(self,key):
129  """
130  Operator 'in'.
131  Needed to provide the same interface as os.environ.
132  """
133  return key in self.env
134 
135  def restore(self):
136  """
137  Revert all the changes done to the original environment.
138  """
139  for key,value in self.old_values.items():
140  if value is None:
141  del self.env[key]
142  else:
143  self.env[key] = value
144  self.old_values = {}
145 
146  def __del__(self):
147  """
148  Revert the changes on destruction.
149  """
150  #print "Restoring the environment"
151  self.restore()
152 
153  def gen_script(self,shell_type):
154  """
155  Generate a shell script to reproduce the changes in the environment.
156  """
157  shells = [ 'csh', 'sh', 'bat' ]
158  if shell_type not in shells:
159  raise RuntimeError("Shell type '%s' unknown. Available: %s"%(shell_type,shells))
160  out = ""
161  for key,value in self.old_values.items():
162  if key not in self.env:
163  # unset variable
164  if shell_type == 'csh':
165  out += 'unsetenv %s\n'%key
166  elif shell_type == 'sh':
167  out += 'unset %s\n'%key
168  elif shell_type == 'bat':
169  out += 'set %s=\n'%key
170  else:
171  # set variable
172  if shell_type == 'csh':
173  out += 'setenv %s "%s"\n'%(key,self.env[key])
174  elif shell_type == 'sh':
175  out += 'export %s="%s"\n'%(key,self.env[key])
176  elif shell_type == 'bat':
177  out += 'set %s=%s\n'%(key,self.env[key])
178  return out
179 
180 class TempDir:
181  """Small class for temporary directories.
182  When instantiated, it creates a temporary directory and the instance
183  behaves as the string containing the directory name.
184  When the instance goes out of scope, it removes all the content of
185  the temporary directory (automatic clean-up).
186  """
187  def __init__(self, keep = False, chdir = False):
188  self.name = tempfile.mkdtemp()
189  self._keep = keep
190  self._origdir = None
191  if chdir:
192  self._origdir = os.getcwd()
193  os.chdir(self.name)
194 
195  def __str__(self):
196  return self.name
197 
198  def __del__(self):
199  if self._origdir:
200  os.chdir(self._origdir)
201  if self.name and not self._keep:
202  shutil.rmtree(self.name)
203 
204  def __getattr__(self,attr):
205  return getattr(self.name,attr)
206 
207 class TempFile:
208  """Small class for temporary files.
209  When instantiated, it creates a temporary directory and the instance
210  behaves as the string containing the directory name.
211  When the instance goes out of scope, it removes all the content of
212  the temporary directory (automatic clean-up).
213  """
214  def __init__(self, suffix='', prefix='tmp', dir=None, text=False, keep = False):
215  self.file = None
216  self.name = None
217  self._keep = keep
218 
219  self._fd, self.name = tempfile.mkstemp(suffix,prefix,dir,text)
220  self.file = os.fdopen(self._fd,"r+")
221 
222  def __str__(self):
223  return self.name
224 
225  def __del__(self):
226  if self.file:
227  self.file.close()
228  if self.name and not self._keep:
229  os.remove(self.name)
230 
231  def __getattr__(self,attr):
232  return getattr(self.file,attr)
233 
234 class CMT:
235  """Small wrapper to call CMT.
236  """
237  def __init__(self,path=None):
238  if path is None:
239  path = os.getcwd()
240  self.path = path
241 
242  def _run_cmt(self,command,args):
243  # prepare command line
244  if type(args) is str:
245  args = [args]
246  cmd = "cmt %s"%command
247  for arg in args:
248  cmd += ' "%s"'%arg
249 
250  # go to the execution directory
251  olddir = os.getcwd()
252  os.chdir(self.path)
253  # run cmt
254  result = os.popen4(cmd)[1].read()
255  # return to the old directory
256  os.chdir(olddir)
257  return result
258 
259  def __getattr__(self,attr):
260  return lambda args=[]: self._run_cmt(attr, args)
261 
262  def runtime_env(self,env = None):
263  """Returns a dictionary containing the runtime environment produced by CMT.
264  If a dictionary is passed a modified instance of it is returned.
265  """
266  if env is None:
267  env = {}
268  for l in self.setup("-csh").splitlines():
269  l = l.strip()
270  if l.startswith("setenv"):
271  dummy,name,value = l.split(None,3)
272  env[name] = value.strip('"')
273  elif l.startswith("unsetenv"):
274  dummy,name = l.split(None,2)
275  if name in env:
276  del env[name]
277  return env
278  def show_macro(self,k):
279  r = self.show(["macro",k])
280  if r.find("CMT> Error: symbol not found") >= 0:
281  return None
282  else:
283  return self.show(["macro_value",k]).strip()
284 
285 
286 ## Locates an executable in the executables path ($PATH) and returns the full
287 # path to it.
288 # If the executable cannot be found, None is returned
289 def which(executable):
290  """
291  Locates an executable in the executables path ($PATH) and returns the full
292  path to it. An application is looked for with or without the '.exe' suffix.
293  If the executable cannot be found, None is returned
294  """
295  if os.path.isabs(executable):
296  if not os.path.exists(executable):
297  if executable.endswith('.exe'):
298  if os.path.exists(executable[:-4]):
299  return executable[:-4]
300  return executable
301  for d in os.environ.get("PATH").split(os.pathsep):
302  fullpath = os.path.join(d, executable)
303  if os.path.exists(fullpath):
304  return fullpath
305  if executable.endswith('.exe'):
306  return which(executable[:-4])
307  return None
308 
309 def rationalizepath(p):
310  np = os.path.normpath(os.path.expandvars(p))
311  if os.path.exists(np):
312  p = os.path.realpath(np)
313  return p
314 
315 # XML Escaping character
316 import re
317 
318 # xml 1.0 valid characters:
319 # Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
320 # so to invert that, not in Char ::
321 # x0 - x8 | xB | xC | xE - x1F
322 # (most control characters, though TAB, CR, LF allowed)
323 # | #xD800 - #xDFFF
324 # (unicode surrogate characters)
325 # | #xFFFE | #xFFFF |
326 # (unicode end-of-plane non-characters)
327 # >= 110000
328 # that would be beyond unicode!!!
329 _illegal_xml_chars_RE = re.compile(u'[\x00-\x08\x0b\x0c\x0e-\x1F\uD800-\uDFFF\uFFFE\uFFFF]')
330 
331 def hexreplace( match ):
332  "Return the hex string "
333  return "".join(map(hexConvert,match.group()))
334 
335 def hexConvert(char):
336  return hex(ord(char))
338  return _illegal_xml_chars_RE.sub(hexreplace, val)
339 
340 def escape_xml_illegal_chars(val, replacement='?'):
341  """Filter out characters that are illegal in XML.
342  Looks for any character in val that is not allowed in XML
343  and replaces it with replacement ('?' by default).
344 
345  """
346  return _illegal_xml_chars_RE.sub(replacement, val)
347 
348 ########################################################################
349 # Output Validation Classes
350 ########################################################################
351 class BasicOutputValidator:
352  """Basic implementation of an option validator for Gaudi tests.
353  This implementation is based on the standard (LCG) validation functions
354  used in QMTest.
355  """
356  def __init__(self,ref,cause,result_key):
357  self.reference = ref
358  self.cause = cause
359  self.result_key = result_key
360 
361  def __call__(self, out, result):
362  """Validate the output of the program.
363 
364  'stdout' -- A string containing the data written to the standard output
365  stream.
366 
367  'stderr' -- A string containing the data written to the standard error
368  stream.
369 
370  'result' -- A 'Result' object. It may be used to annotate
371  the outcome according to the content of stderr.
372 
373  returns -- A list of strings giving causes of failure."""
374 
375  causes = []
376  # Check to see if theoutput matches.
377  if not self.__CompareText(out, self.reference):
378  causes.append(self.cause)
379  result[self.result_key] = result.Quote(self.reference)
380 
381  return causes
382 
383  def __CompareText(self, s1, s2):
384  """Compare 's1' and 's2', ignoring line endings.
385 
386  's1' -- A string.
387 
388  's2' -- A string.
389 
390  returns -- True if 's1' and 's2' are the same, ignoring
391  differences in line endings."""
392 
393  # The "splitlines" method works independently of the line ending
394  # convention in use.
395  if ROOT6WorkAroundEnabled('ReadRootmapCheck'):
396  # FIXME: (MCl) Hide warnings from new rootmap sanity check until we can fix them
397  to_ignore = re.compile(r'Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*')
398  keep_line = lambda l: not to_ignore.match(l)
399  return filter(keep_line, s1.splitlines()) == filter(keep_line, s2.splitlines())
400  else:
401  return s1.splitlines() == s2.splitlines()
402 
403 class FilePreprocessor:
404  """ Base class for a callable that takes a file and returns a modified
405  version of it."""
406  def __processLine__(self, line):
407  return line
408  def __call__(self, input):
409  if hasattr(input,"__iter__"):
410  lines = input
411  mergeback = False
412  else:
413  lines = input.splitlines()
414  mergeback = True
415  output = []
416  for l in lines:
417  l = self.__processLine__(l)
418  if l: output.append(l)
419  if mergeback: output = '\n'.join(output)
420  return output
421  def __add__(self, rhs):
422  return FilePreprocessorSequence([self,rhs])
423 
424 class FilePreprocessorSequence(FilePreprocessor):
425  def __init__(self, members = []):
426  self.members = members
427  def __add__(self, rhs):
428  return FilePreprocessorSequence(self.members + [rhs])
429  def __call__(self, input):
430  output = input
431  for pp in self.members:
432  output = pp(output)
433  return output
434 
435 class LineSkipper(FilePreprocessor):
436  def __init__(self, strings = [], regexps = []):
437  import re
438  self.strings = strings
439  self.regexps = map(re.compile,regexps)
440 
441  def __processLine__(self, line):
442  for s in self.strings:
443  if line.find(s) >= 0: return None
444  for r in self.regexps:
445  if r.search(line): return None
446  return line
447 
448 class BlockSkipper(FilePreprocessor):
449  def __init__(self, start, end):
450  self.start = start
451  self.end = end
452  self._skipping = False
453 
454  def __processLine__(self, line):
455  if self.start in line:
456  self._skipping = True
457  return None
458  elif self.end in line:
459  self._skipping = False
460  elif self._skipping:
461  return None
462  return line
463 
464 class RegexpReplacer(FilePreprocessor):
465  def __init__(self, orig, repl = "", when = None):
466  if when:
467  when = re.compile(when)
468  self._operations = [ (when, re.compile(orig), repl) ]
469  def __add__(self,rhs):
470  if isinstance(rhs, RegexpReplacer):
471  res = RegexpReplacer("","",None)
472  res._operations = self._operations + rhs._operations
473  else:
474  res = FilePreprocessor.__add__(self, rhs)
475  return res
476  def __processLine__(self, line):
477  for w,o,r in self._operations:
478  if w is None or w.search(line):
479  line = o.sub(r, line)
480  return line
481 
482 # Common preprocessors
483 maskPointers = RegexpReplacer("0x[0-9a-fA-F]{4,16}","0x########")
484 normalizeDate = RegexpReplacer("[0-2]?[0-9]:[0-5][0-9]:[0-5][0-9] [0-9]{4}[-/][01][0-9][-/][0-3][0-9] *(CES?T)?",
485  "00:00:00 1970-01-01")
486 normalizeEOL = FilePreprocessor()
487 normalizeEOL.__processLine__ = lambda line: str(line).rstrip() + '\n'
488 
489 skipEmptyLines = FilePreprocessor()
490 # FIXME: that's ugly
491 skipEmptyLines.__processLine__ = lambda line: (line.strip() and line) or None
492 
493 ## Special preprocessor sorting the list of strings (whitespace separated)
494 # that follow a signature on a single line
495 class LineSorter(FilePreprocessor):
496  def __init__(self, signature):
497  self.signature = signature
498  self.siglen = len(signature)
499  def __processLine__(self, line):
500  pos = line.find(self.signature)
501  if pos >=0:
502  line = line[:(pos+self.siglen)]
503  lst = line[(pos+self.siglen):].split()
504  lst.sort()
505  line += " ".join(lst)
506  return line
507 
508 # Preprocessors for GaudiExamples
509 normalizeExamples = maskPointers + normalizeDate
510 for w,o,r in [
511  #("TIMER.TIMER",r"[0-9]", "0"), # Normalize time output
512  ("TIMER.TIMER",r"\s+[+-]?[0-9]+[0-9.]*", " 0"), # Normalize time output
513  ("release all pending",r"^.*/([^/]*:.*)",r"\1"),
514  ("0x########",r"\[.*/([^/]*.*)\]",r"[\1]"),
515  ("^#.*file",r"file '.*[/\\]([^/\\]*)$",r"file '\1"),
516  ("^JobOptionsSvc.*options successfully read in from",r"read in from .*[/\\]([^/\\]*)$",r"file \1"), # normalize path to options
517  # Normalize UUID, except those ending with all 0s (i.e. the class IDs)
518  (None,r"[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}(?!-0{12})-[0-9A-Fa-f]{12}","00000000-0000-0000-0000-000000000000"),
519  # Absorb a change in ServiceLocatorHelper
520  ("ServiceLocatorHelper::", "ServiceLocatorHelper::(create|locate)Service", "ServiceLocatorHelper::service"),
521  # Remove the leading 0 in Windows' exponential format
522  (None, r"e([-+])0([0-9][0-9])", r"e\1\2"),
523  # Output line changed in Gaudi v24
524  (None, r'Service reference count check:', r'Looping over all active services...'),
525  # Change of property name in Algorithm (GAUDI-1030)
526  (None, r"Property(.*)'ErrorCount':", r"Property\1'ErrorCounter':"),
527  ]: #[ ("TIMER.TIMER","[0-9]+[0-9.]*", "") ]
528  normalizeExamples += RegexpReplacer(o,r,w)
529 
530 lineSkipper = LineSkipper(["//GP:",
531  "JobOptionsSvc INFO # ",
532  "JobOptionsSvc WARNING # ",
533  "Time User",
534  "Welcome to",
535  "This machine has a speed",
536  "TIME:",
537  "running on",
538  "ToolSvc.Sequenc... INFO",
539  "DataListenerSvc INFO XML written to file:",
540  "[INFO]","[WARNING]",
541  "DEBUG No writable file catalog found which contains FID:",
542  "0 local", # hack for ErrorLogExample
543  "DEBUG Service base class initialized successfully", # changed between v20 and v21
544  "DEBUG Incident timing:", # introduced with patch #3487
545  "INFO 'CnvServices':[", # changed the level of the message from INFO to DEBUG
546  # The signal handler complains about SIGXCPU not defined on some platforms
547  'SIGXCPU',
548  ],regexps = [
549  r"^JobOptionsSvc INFO *$",
550  r"^#", # Ignore python comments
551  r"(Always|SUCCESS)\s*(Root f|[^ ]* F)ile version:", # skip the message reporting the version of the root file
552  r"0x[0-9a-fA-F#]+ *Algorithm::sysInitialize\(\) *\[", # hack for ErrorLogExample
553  r"0x[0-9a-fA-F#]* *__gxx_personality_v0 *\[", # hack for ErrorLogExample
554  r"File '.*.xml' does not exist",
555  r"INFO Refer to dataset .* by its file ID:",
556  r"INFO Referring to dataset .* by its file ID:",
557  r"INFO Disconnect from dataset",
558  r"INFO Disconnected from dataset",
559  r"INFO Disconnected data IO:",
560  r"IncidentSvc\s*(DEBUG (Adding|Removing)|VERBOSE Calling)",
561  # I want to ignore the header of the unchecked StatusCode report
562  r"^StatusCodeSvc.*listing all unchecked return codes:",
563  r"^StatusCodeSvc\s*INFO\s*$",
564  r"Num\s*\|\s*Function\s*\|\s*Source Library",
565  r"^[-+]*\s*$",
566  # Hide the fake error message coming from POOL/ROOT (ROOT 5.21)
567  r"ERROR Failed to modify file: .* Errno=2 No such file or directory",
568  # Hide unchecked StatusCodes from dictionaries
569  r"^ +[0-9]+ \|.*ROOT",
570  r"^ +[0-9]+ \|.*\|.*Dict",
571  # Hide success StatusCodeSvc message
572  r"StatusCodeSvc.*all StatusCode instances where checked",
573  # Remove ROOT TTree summary table, which changes from one version to the other
574  r"^\*.*\*$",
575  # Remove Histos Summaries
576  r"SUCCESS\s*Booked \d+ Histogram\(s\)",
577  r"^ \|",
578  r"^ ID=",
579  ] )
580 if ROOT6WorkAroundEnabled('ReadRootmapCheck'):
581  # FIXME: (MCl) Hide warnings from new rootmap sanity check until we can fix them
582  lineSkipper += LineSkipper(regexps = [
583  r'Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*',
584  ])
585 
586 normalizeExamples = (lineSkipper + normalizeExamples + skipEmptyLines +
587  normalizeEOL + LineSorter("Services to release : "))
588 
589 class ReferenceFileValidator:
590  def __init__(self, reffile, cause, result_key, preproc = normalizeExamples):
591  self.reffile = os.path.expandvars(reffile)
592  self.cause = cause
593  self.result_key = result_key
594  self.preproc = preproc
595  def __call__(self, stdout, result):
596  causes = []
597  if os.path.isfile(self.reffile):
598  orig = open(self.reffile).xreadlines()
599  if self.preproc:
600  orig = self.preproc(orig)
601  else:
602  orig = []
603 
604  new = stdout.splitlines()
605  if self.preproc:
606  new = self.preproc(new)
607  #open(self.reffile + ".test","w").writelines(new)
608  diffs = difflib.ndiff(orig,new,charjunk=difflib.IS_CHARACTER_JUNK)
609  filterdiffs = map(lambda x: x.strip(),filter(lambda x: x[0] != " ",diffs))
610  #filterdiffs = [x.strip() for x in diffs]
611  if filterdiffs:
612  result[self.result_key] = result.Quote("\n".join(filterdiffs))
613  result[self.result_key] += result.Quote("""
614 Legend:
615  -) reference file
616  +) standard output of the test""")
617  causes.append(self.cause)
618 
619  return causes
620 
621 ########################################################################
622 # Useful validation functions
623 ########################################################################
624 def findReferenceBlock(reference, stdout, result, causes, signature_offset=0, signature=None,
625  id = None):
626  """
627  Given a block of text, tries to find it in the output.
628  The block had to be identified by a signature line. By default, the first
629  line is used as signature, or the line pointed to by signature_offset. If
630  signature_offset points outside the block, a signature line can be passed as
631  signature argument. Note: if 'signature' is None (the default), a negative
632  signature_offset is interpreted as index in a list (e.g. -1 means the last
633  line), otherwise the it is interpreted as the number of lines before the
634  first one of the block the signature must appear.
635  The parameter 'id' allow to distinguish between different calls to this
636  function in the same validation code.
637  """
638  # split reference file, sanitize EOLs and remove empty lines
639  reflines = filter(None,map(lambda s: s.rstrip(), reference.splitlines()))
640  if not reflines:
641  raise RuntimeError("Empty (or null) reference")
642  # the same on standard output
643  outlines = filter(None,map(lambda s: s.rstrip(), stdout.splitlines()))
644 
645  res_field = "GaudiTest.RefBlock"
646  if id:
647  res_field += "_%s" % id
648 
649  if signature is None:
650  if signature_offset < 0:
651  signature_offset = len(reference)+signature_offset
652  signature = reflines[signature_offset]
653  # find the reference block in the output file
654  try:
655  pos = outlines.index(signature)
656  outlines = outlines[pos-signature_offset:pos+len(reflines)-signature_offset]
657  if reflines != outlines:
658  msg = "standard output"
659  # I do not want 2 messages in causes if teh function is called twice
660  if not msg in causes:
661  causes.append(msg)
662  result[res_field + ".observed"] = result.Quote("\n".join(outlines))
663  except ValueError:
664  causes.append("missing signature")
665  result[res_field + ".signature"] = result.Quote(signature)
666  if len(reflines) > 1 or signature != reflines[0]:
667  result[res_field + ".expected"] = result.Quote("\n".join(reflines))
668 
669  return causes
670 
671 def countErrorLines(expected = {'ERROR':0, 'FATAL':0}, **kwargs):
672  """
673  Count the number of messages with required severity (by default ERROR and FATAL)
674  and check if their numbers match the expected ones (0 by default).
675  The dictionary "expected" can be used to tune the number of errors and fatals
676  allowed, or to limit the number of expected warnings etc.
677  """
678  stdout = kwargs["stdout"]
679  result = kwargs["result"]
680  causes = kwargs["causes"]
681 
682  # prepare the dictionary to record the extracted lines
683  errors = {}
684  for sev in expected:
685  errors[sev] = []
686 
687  outlines = stdout.splitlines()
688  from math import log10
689  fmt = "%%%dd - %%s" % (int(log10(len(outlines))+1))
690 
691  linecount = 0
692  for l in outlines:
693  linecount += 1
694  words = l.split()
695  if len(words) >= 2 and words[1] in errors:
696  errors[words[1]].append(fmt%(linecount,l.rstrip()))
697 
698  for e in errors:
699  if len(errors[e]) != expected[e]:
700  causes.append('%s(%d)'%(e,len(errors[e])))
701  result["GaudiTest.lines.%s"%e] = result.Quote('\n'.join(errors[e]))
702  result["GaudiTest.lines.%s.expected#"%e] = result.Quote(str(expected[e]))
703 
704  return causes
705 
706 
707 def _parseTTreeSummary(lines, pos):
708  """
709  Parse the TTree summary table in lines, starting from pos.
710  Returns a tuple with the dictionary with the digested informations and the
711  position of the first line after the summary.
712  """
713  result = {}
714  i = pos + 1 # first line is a sequence of '*'
715  count = len(lines)
716 
717  splitcols = lambda l: [ f.strip() for f in l.strip("*\n").split(':',2) ]
718  def parseblock(ll):
719  r = {}
720  cols = splitcols(ll[0])
721  r["Name"], r["Title"] = cols[1:]
722 
723  cols = splitcols(ll[1])
724  r["Entries"] = int(cols[1])
725 
726  sizes = cols[2].split()
727  r["Total size"] = int(sizes[2])
728  if sizes[-1] == "memory":
729  r["File size"] = 0
730  else:
731  r["File size"] = int(sizes[-1])
732 
733  cols = splitcols(ll[2])
734  sizes = cols[2].split()
735  if cols[0] == "Baskets":
736  r["Baskets"] = int(cols[1])
737  r["Basket size"] = int(sizes[2])
738  r["Compression"] = float(sizes[-1])
739  return r
740 
741  if i < (count - 3) and lines[i].startswith("*Tree"):
742  result = parseblock(lines[i:i+3])
743  result["Branches"] = {}
744  i += 4
745  while i < (count - 3) and lines[i].startswith("*Br"):
746  if i < (count - 2) and lines[i].startswith("*Branch "):
747  # skip branch header
748  i += 3
749  continue
750  branch = parseblock(lines[i:i+3])
751  result["Branches"][branch["Name"]] = branch
752  i += 4
753 
754  return (result, i)
755 
756 def findTTreeSummaries(stdout):
757  """
758  Scan stdout to find ROOT TTree summaries and digest them.
759  """
760  stars = re.compile(r"^\*+$")
761  outlines = stdout.splitlines()
762  nlines = len(outlines)
763  trees = {}
764 
765  i = 0
766  while i < nlines: #loop over the output
767  # look for
768  while i < nlines and not stars.match(outlines[i]):
769  i += 1
770  if i < nlines:
771  tree, i = _parseTTreeSummary(outlines, i)
772  if tree:
773  trees[tree["Name"]] = tree
774 
775  return trees
776 
777 def cmpTreesDicts(reference, to_check, ignore = None):
778  """
779  Check that all the keys in reference are in to_check too, with the same value.
780  If the value is a dict, the function is called recursively. to_check can
781  contain more keys than reference, that will not be tested.
782  The function returns at the first difference found.
783  """
784  fail_keys = []
785  # filter the keys in the reference dictionary
786  if ignore:
787  ignore_re = re.compile(ignore)
788  keys = [ key for key in reference if not ignore_re.match(key) ]
789  else:
790  keys = reference.keys()
791  # loop over the keys (not ignored) in the reference dictionary
792  for k in keys:
793  if k in to_check: # the key must be in the dictionary to_check
794  if (type(reference[k]) is dict) and (type(to_check[k]) is dict):
795  # if both reference and to_check values are dictionaries, recurse
796  failed = fail_keys = cmpTreesDicts(reference[k], to_check[k], ignore)
797  else:
798  # compare the two values
799  failed = to_check[k] != reference[k]
800  else: # handle missing keys in the dictionary to check (i.e. failure)
801  to_check[k] = None
802  failed = True
803  if failed:
804  fail_keys.insert(0, k)
805  break # exit from the loop at the first failure
806  return fail_keys # return the list of keys bringing to the different values
807 
808 def getCmpFailingValues(reference, to_check, fail_path):
809  c = to_check
810  r = reference
811  for k in fail_path:
812  c = c.get(k,None)
813  r = r.get(k,None)
814  if c is None or r is None:
815  break # one of the dictionaries is not deep enough
816  return (fail_path, r, c)
817 
818 # signature of the print-out of the histograms
819 h_count_re = re.compile(r"^(.*)SUCCESS\s+Booked (\d+) Histogram\(s\) :\s+(.*)")
820 
821 def parseHistosSummary(lines, pos):
822  """
823  Extract the histograms infos from the lines starting at pos.
824  Returns the position of the first line after the summary block.
825  """
826  global h_count_re
827  h_table_head = re.compile(r'SUCCESS\s+List of booked (1D|2D|3D|1D profile|2D profile) histograms in directory\s+"(\w*)"')
828  h_short_summ = re.compile(r"ID=([^\"]+)\s+\"([^\"]+)\"\s+(.*)")
829 
830  nlines = len(lines)
831 
832  # decode header
833  m = h_count_re.search(lines[pos])
834  name = m.group(1).strip()
835  total = int(m.group(2))
836  header = {}
837  for k, v in [ x.split("=") for x in m.group(3).split() ]:
838  header[k] = int(v)
839  pos += 1
840  header["Total"] = total
841 
842  summ = {}
843  while pos < nlines:
844  m = h_table_head.search(lines[pos])
845  if m:
846  t, d = m.groups(1) # type and directory
847  t = t.replace(" profile", "Prof")
848  pos += 1
849  if pos < nlines:
850  l = lines[pos]
851  else:
852  l = ""
853  cont = {}
854  if l.startswith(" | ID"):
855  # table format
856  titles = [ x.strip() for x in l.split("|")][1:]
857  pos += 1
858  while pos < nlines and lines[pos].startswith(" |"):
859  l = lines[pos]
860  values = [ x.strip() for x in l.split("|")][1:]
861  hcont = {}
862  for i in range(len(titles)):
863  hcont[titles[i]] = values[i]
864  cont[hcont["ID"]] = hcont
865  pos += 1
866  elif l.startswith(" ID="):
867  while pos < nlines and lines[pos].startswith(" ID="):
868  values = [ x.strip() for x in h_short_summ.search(lines[pos]).groups() ]
869  cont[values[0]] = values
870  pos += 1
871  else: # not interpreted
872  raise RuntimeError("Cannot understand line %d: '%s'" % (pos, l))
873  if not d in summ:
874  summ[d] = {}
875  summ[d][t] = cont
876  summ[d]["header"] = header
877  else:
878  break
879  if not summ:
880  # If the full table is not present, we use only the header
881  summ[name] = {"header": header}
882  return summ, pos
883 
884 def findHistosSummaries(stdout):
885  """
886  Scan stdout to find ROOT TTree summaries and digest them.
887  """
888  outlines = stdout.splitlines()
889  nlines = len(outlines) - 1
890  summaries = {}
891  global h_count_re
892 
893  pos = 0
894  while pos < nlines:
895  summ = {}
896  # find first line of block:
897  match = h_count_re.search(outlines[pos])
898  while pos < nlines and not match:
899  pos += 1
900  match = h_count_re.search(outlines[pos])
901  if match:
902  summ, pos = parseHistosSummary(outlines, pos)
903  summaries.update(summ)
904  return summaries
905 
906 class GaudiFilterExecutable(qm.executable.Filter):
907  def __init__(self, input, timeout = -1):
908  """Create a new 'Filter'.
909 
910  'input' -- The string containing the input to provide to the
911  child process.
912 
913  'timeout' -- As for 'TimeoutExecutable.__init__'."""
914 
915  super(GaudiFilterExecutable, self).__init__(input, timeout)
916  self.__input = input
917  self.__timeout = timeout
918  self.stack_trace_file = None
919  # Temporary file to pass the stack trace from one process to the other
920  # The file must be closed and reopened when needed to avoid conflicts
921  # between the processes
922  tmpf = tempfile.mkstemp()
923  os.close(tmpf[0])
924  self.stack_trace_file = tmpf[1] # remember only the name
925 
926  def __UseSeparateProcessGroupForChild(self):
927  """Copied from TimeoutExecutable to allow the re-implementation of
928  _HandleChild.
929  """
930  if sys.platform == "win32":
931  # In Windows 2000 (or later), we should use "jobs" by
932  # analogy with UNIX process groups. However, that
933  # functionality is not (yet) provided by the Python Win32
934  # extensions.
935  return 0
936 
937  return self.__timeout >= 0 or self.__timeout == -2
938  ##
939  # Needs to replace the ones from RedirectedExecutable and TimeoutExecutable
940  def _HandleChild(self):
941  """Code copied from both FilterExecutable and TimeoutExecutable.
942  """
943  # Close the pipe ends that we do not need.
944  if self._stdin_pipe:
945  self._ClosePipeEnd(self._stdin_pipe[0])
946  if self._stdout_pipe:
947  self._ClosePipeEnd(self._stdout_pipe[1])
948  if self._stderr_pipe:
949  self._ClosePipeEnd(self._stderr_pipe[1])
950 
951  # The pipes created by 'RedirectedExecutable' must be closed
952  # before the monitor process (created by 'TimeoutExecutable')
953  # is created. Otherwise, if the child process dies, 'select'
954  # in the parent will not return if the monitor process may
955  # still have one of the file descriptors open.
956 
957  super(qm.executable.TimeoutExecutable, self)._HandleChild()
958 
959  if self.__UseSeparateProcessGroupForChild():
960  # Put the child into its own process group. This step is
961  # performed in both the parent and the child; therefore both
962  # processes can safely assume that the creation of the process
963  # group has taken place.
964  child_pid = self._GetChildPID()
965  try:
966  os.setpgid(child_pid, child_pid)
967  except:
968  # The call to setpgid may fail if the child has exited,
969  # or has already called 'exec'. In that case, we are
970  # guaranteed that the child has already put itself in the
971  # desired process group.
972  pass
973  # Create the monitoring process.
974  #
975  # If the monitoring process is in parent's process group and
976  # kills the child after waitpid has returned in the parent, we
977  # may end up trying to kill a process group other than the one
978  # that we intend to kill. Therefore, we put the monitoring
979  # process in the same process group as the child; that ensures
980  # that the process group will persist until the monitoring
981  # process kills it.
982  self.__monitor_pid = os.fork()
983  if self.__monitor_pid != 0:
984  # Make sure that the monitoring process is placed into the
985  # child's process group before the parent process calls
986  # 'waitpid'. In this way, we are guaranteed that the process
987  # group as the child
988  os.setpgid(self.__monitor_pid, child_pid)
989  else:
990  # Put the monitoring process into the child's process
991  # group. We know the process group still exists at
992  # this point because either (a) we are in the process
993  # group, or (b) the parent has not yet called waitpid.
994  os.setpgid(0, child_pid)
995 
996  # Close all open file descriptors. They are not needed
997  # in the monitor process. Furthermore, when the parent
998  # closes the write end of the stdin pipe to the child,
999  # we do not want the pipe to remain open; leaving the
1000  # pipe open in the monitor process might cause the child
1001  # to block waiting for additional input.
1002  try:
1003  max_fds = os.sysconf("SC_OPEN_MAX")
1004  except:
1005  max_fds = 256
1006  for fd in xrange(max_fds):
1007  try:
1008  os.close(fd)
1009  except:
1010  pass
1011  try:
1012  if self.__timeout >= 0:
1013  # Give the child time to run.
1014  time.sleep (self.__timeout)
1015  #######################################################
1016  ### This is the interesting part: dump the stack trace to a file
1017  if sys.platform == "linux2": # we should be have /proc and gdb
1018  cmd = ["gdb",
1019  os.path.join("/proc", str(child_pid), "exe"),
1020  str(child_pid),
1021  "-batch", "-n", "-x",
1022  "'%s'" % os.path.join(os.path.dirname(__file__), "stack-trace.gdb")]
1023  # FIXME: I wanted to use subprocess.Popen, but it doesn't want to work
1024  # in this context.
1025  o = os.popen(" ".join(cmd)).read()
1026  open(self.stack_trace_file,"w").write(o)
1027  #######################################################
1028 
1029  # Kill all processes in the child process group.
1030  os.kill(0, signal.SIGKILL)
1031  else:
1032  # This call to select will never terminate.
1033  select.select ([], [], [])
1034  finally:
1035  # Exit. This code is in a finally clause so that
1036  # we are guaranteed to get here no matter what.
1037  os._exit(0)
1038  elif self.__timeout >= 0 and sys.platform == "win32":
1039  # Create a monitoring thread.
1040  self.__monitor_thread = Thread(target = self.__Monitor)
1041  self.__monitor_thread.start()
1042 
1043  if sys.platform == "win32":
1044 
1045  def __Monitor(self):
1046  """Code copied from FilterExecutable.
1047  Kill the child if the timeout expires.
1048 
1049  This function is run in the monitoring thread."""
1050 
1051  # The timeout may be expressed as a floating-point value
1052  # on UNIX, but it must be an integer number of
1053  # milliseconds when passed to WaitForSingleObject.
1054  timeout = int(self.__timeout * 1000)
1055  # Wait for the child process to terminate or for the
1056  # timer to expire.
1057  result = win32event.WaitForSingleObject(self._GetChildPID(),
1058  timeout)
1059  # If the timeout occurred, kill the child process.
1060  if result == win32con.WAIT_TIMEOUT:
1061  self.Kill()
1062 
1063 ########################################################################
1064 # Test Classes
1065 ########################################################################
1066 class GaudiExeTest(ExecTestBase):
1067  """Standard Gaudi test.
1068  """
1069  arguments = [
1070  qm.fields.TextField(
1071  name="program",
1072  title="Program",
1073  not_empty_text=1,
1074  description="""The path to the program.
1075 
1076  This field indicates the path to the program. If it is not
1077  an absolute path, the value of the 'PATH' environment
1078  variable will be used to search for the program.
1079  If not specified, $GAUDIEXE or Gaudi.exe are used.
1080  """
1081  ),
1082  qm.fields.SetField(qm.fields.TextField(
1083  name="args",
1084  title="Argument List",
1085  description="""The command-line arguments.
1086 
1087  If this field is left blank, the program is run without any
1088  arguments.
1089 
1090  Use this field to specify the option files.
1091 
1092  An implicit 0th argument (the path to the program) is added
1093  automatically."""
1094  )),
1095  qm.fields.TextField(
1096  name="options",
1097  title="Options",
1098  description="""Options to be passed to the application.
1099 
1100  This field allows to pass a list of options to the main program
1101  without the need of a separate option file.
1102 
1103  The content of the field is written to a temporary file which name
1104  is passed the the application as last argument (appended to the
1105  field "Argument List".
1106  """,
1107  verbatim="true",
1108  multiline="true",
1109  default_value=""
1110  ),
1111  qm.fields.TextField(
1112  name="workdir",
1113  title="Working Directory",
1114  description="""Path to the working directory.
1115 
1116  If this field is left blank, the program will be run from the qmtest
1117  directory, otherwise from the directory specified.""",
1118  default_value=""
1119  ),
1120  qm.fields.TextField(
1121  name="reference",
1122  title="Reference Output",
1123  description="""Path to the file containing the reference output.
1124 
1125  If this field is left blank, any standard output will be considered
1126  valid.
1127 
1128  If the reference file is specified, any output on standard error is
1129  ignored."""
1130  ),
1131  qm.fields.TextField(
1132  name="error_reference",
1133  title="Reference for standard error",
1134  description="""Path to the file containing the reference for the standard error.
1135 
1136  If this field is left blank, any standard output will be considered
1137  valid.
1138 
1139  If the reference file is specified, any output on standard error is
1140  ignored."""
1141  ),
1142  qm.fields.SetField(qm.fields.TextField(
1143  name = "unsupported_platforms",
1144  title = "Unsupported Platforms",
1145  description = """Platform on which the test must not be run.
1146 
1147  List of regular expressions identifying the platforms on which the
1148  test is not run and the result is set to UNTESTED."""
1149  )),
1150 
1151  qm.fields.TextField(
1152  name = "validator",
1153  title = "Validator",
1154  description = """Function to validate the output of the test.
1155 
1156  If defined, the function is used to validate the products of the
1157  test.
1158  The function is called passing as arguments:
1159  self: the test class instance
1160  stdout: the standard output of the executed test
1161  stderr: the standard error of the executed test
1162  result: the Result objects to fill with messages
1163  The function must return a list of causes for the failure.
1164  If specified, overrides standard output, standard error and
1165  reference files.
1166  """,
1167  verbatim="true",
1168  multiline="true",
1169  default_value=""
1170  ),
1171 
1172  qm.fields.BooleanField(
1173  name = "use_temp_dir",
1174  title = "Use temporary directory",
1175  description = """Use temporary directory.
1176 
1177  If set to true, use a temporary directory as working directory.
1178  """,
1179  default_value="false"
1180  ),
1181 
1182  qm.fields.IntegerField(
1183  name = "signal",
1184  title = "Expected signal",
1185  description = """Expect termination by signal.""",
1186  default_value=None
1187  ),
1188  ]
1189 
1190  def PlatformIsNotSupported(self, context, result):
1191  platform = self.GetPlatform()
1192  unsupported = [ re.compile(x)
1193  for x in [ str(y).strip()
1194  for y in self.unsupported_platforms ]
1195  if x
1196  ]
1197  for p_re in unsupported:
1198  if p_re.search(platform):
1199  result.SetOutcome(result.UNTESTED)
1200  result[result.CAUSE] = 'Platform not supported.'
1201  return True
1202  return False
1203 
1204  def GetPlatform(self):
1205  """
1206  Return the platform Id defined in CMTCONFIG or SCRAM_ARCH.
1207  """
1208  arch = "None"
1209  # check architecture name
1210  if "CMTCONFIG" in os.environ:
1211  arch = os.environ["CMTCONFIG"]
1212  elif "SCRAM_ARCH" in os.environ:
1213  arch = os.environ["SCRAM_ARCH"]
1214  return arch
1215 
1216  def isWinPlatform(self):
1217  """
1218  Return True if the current platform is Windows.
1219 
1220  This function was needed because of the change in the CMTCONFIG format,
1221  from win32_vc71_dbg to i686-winxp-vc9-dbg.
1222  """
1223  platform = self.GetPlatform()
1224  return "winxp" in platform or platform.startswith("win")
1225 
1226  def _expandReferenceFileName(self, reffile):
1227  # if no file is passed, do nothing
1228  if not reffile:
1229  return ""
1230 
1231  # function to split an extension in constituents parts
1232  platformSplit = lambda p: set(p.split('-' in p and '-' or '_'))
1233 
1234  reference = os.path.normpath(os.path.expandvars(reffile))
1235  # old-style platform-specific reference name
1236  spec_ref = reference[:-3] + self.GetPlatform()[0:3] + reference[-3:]
1237  if os.path.isfile(spec_ref):
1238  reference = spec_ref
1239  else: # look for new-style platform specific reference files:
1240  # get all the files whose name start with the reference filename
1241  dirname, basename = os.path.split(reference)
1242  if not dirname: dirname = '.'
1243  head = basename + "."
1244  head_len = len(head)
1245  platform = platformSplit(self.GetPlatform())
1246  if 'do0' in platform:
1247  platform.add('dbg')
1248  candidates = []
1249  for f in os.listdir(dirname):
1250  if f.startswith(head):
1251  req_plat = platformSplit(f[head_len:])
1252  if platform.issuperset(req_plat):
1253  candidates.append( (len(req_plat), f) )
1254  if candidates: # take the one with highest matching
1255  # FIXME: it is not possible to say if x86_64-slc5-gcc43-dbg
1256  # has to use ref.x86_64-gcc43 or ref.slc5-dbg
1257  candidates.sort()
1258  reference = os.path.join(dirname, candidates[-1][1])
1259  return reference
1260 
1261  def CheckTTreesSummaries(self, stdout, result, causes,
1262  trees_dict = None,
1263  ignore = r"Basket|.*size|Compression"):
1264  """
1265  Compare the TTree summaries in stdout with the ones in trees_dict or in
1266  the reference file. By default ignore the size, compression and basket
1267  fields.
1268  The presence of TTree summaries when none is expected is not a failure.
1269  """
1270  if trees_dict is None:
1271  reference = self._expandReferenceFileName(self.reference)
1272  # call the validator if the file exists
1273  if reference and os.path.isfile(reference):
1274  trees_dict = findTTreeSummaries(open(reference).read())
1275  else:
1276  trees_dict = {}
1277 
1278  from pprint import PrettyPrinter
1279  pp = PrettyPrinter()
1280  if trees_dict:
1281  result["GaudiTest.TTrees.expected"] = result.Quote(pp.pformat(trees_dict))
1282  if ignore:
1283  result["GaudiTest.TTrees.ignore"] = result.Quote(ignore)
1284 
1285  trees = findTTreeSummaries(stdout)
1286  failed = cmpTreesDicts(trees_dict, trees, ignore)
1287  if failed:
1288  causes.append("trees summaries")
1289  msg = "%s: %s != %s" % getCmpFailingValues(trees_dict, trees, failed)
1290  result["GaudiTest.TTrees.failure_on"] = result.Quote(msg)
1291  result["GaudiTest.TTrees.found"] = result.Quote(pp.pformat(trees))
1292 
1293  return causes
1294 
1295  def CheckHistosSummaries(self, stdout, result, causes,
1296  dict = None,
1297  ignore = None):
1298  """
1299  Compare the TTree summaries in stdout with the ones in trees_dict or in
1300  the reference file. By default ignore the size, compression and basket
1301  fields.
1302  The presence of TTree summaries when none is expected is not a failure.
1303  """
1304  if dict is None:
1305  reference = self._expandReferenceFileName(self.reference)
1306  # call the validator if the file exists
1307  if reference and os.path.isfile(reference):
1308  dict = findHistosSummaries(open(reference).read())
1309  else:
1310  dict = {}
1311 
1312  from pprint import PrettyPrinter
1313  pp = PrettyPrinter()
1314  if dict:
1315  result["GaudiTest.Histos.expected"] = result.Quote(pp.pformat(dict))
1316  if ignore:
1317  result["GaudiTest.Histos.ignore"] = result.Quote(ignore)
1318 
1319  histos = findHistosSummaries(stdout)
1320  failed = cmpTreesDicts(dict, histos, ignore)
1321  if failed:
1322  causes.append("histos summaries")
1323  msg = "%s: %s != %s" % getCmpFailingValues(dict, histos, failed)
1324  result["GaudiTest.Histos.failure_on"] = result.Quote(msg)
1325  result["GaudiTest.Histos.found"] = result.Quote(pp.pformat(histos))
1326 
1327  return causes
1328 
1329  def ValidateWithReference(self, stdout, stderr, result, causes, preproc = None):
1330  """
1331  Default validation action: compare standard output and error to the
1332  reference files.
1333  """
1334  # set the default output preprocessor
1335  if preproc is None:
1336  preproc = normalizeExamples
1337  # check standard output
1338  reference = self._expandReferenceFileName(self.reference)
1339  # call the validator if the file exists
1340  if reference and os.path.isfile(reference):
1341  result["GaudiTest.output_reference"] = reference
1342  causes += ReferenceFileValidator(reference,
1343  "standard output",
1344  "GaudiTest.output_diff",
1345  preproc = preproc)(stdout, result)
1346 
1347  # Compare TTree summaries
1348  causes = self.CheckTTreesSummaries(stdout, result, causes)
1349  causes = self.CheckHistosSummaries(stdout, result, causes)
1350 
1351  if causes: # Write a new reference file for stdout
1352  try:
1353  newref = open(reference + ".new","w")
1354  # sanitize newlines
1355  for l in stdout.splitlines():
1356  newref.write(l.rstrip() + '\n')
1357  del newref # flush and close
1358  except IOError:
1359  # Ignore IO errors when trying to update reference files
1360  # because we may be in a read-only filesystem
1361  pass
1362 
1363  # check standard error
1364  reference = self._expandReferenceFileName(self.error_reference)
1365  # call the validator if we have a file to use
1366  if reference and os.path.isfile(reference):
1367  result["GaudiTest.error_reference"] = reference
1368  newcauses = ReferenceFileValidator(reference,
1369  "standard error",
1370  "GaudiTest.error_diff",
1371  preproc = preproc)(stderr, result)
1372  causes += newcauses
1373  if newcauses: # Write a new reference file for stdedd
1374  newref = open(reference + ".new","w")
1375  # sanitize newlines
1376  for l in stderr.splitlines():
1377  newref.write(l.rstrip() + '\n')
1378  del newref # flush and close
1379  else:
1380  causes += BasicOutputValidator(self.stderr,
1381  "standard error",
1382  "ExecTest.expected_stderr")(stderr, result)
1383 
1384  return causes
1385 
1386  def ValidateOutput(self, stdout, stderr, result):
1387  causes = []
1388  # if the test definition contains a custom validator, use it
1389  if self.validator.strip() != "":
1390  class CallWrapper(object):
1391  """
1392  Small wrapper class to dynamically bind some default arguments
1393  to a callable.
1394  """
1395  def __init__(self, callable, extra_args = {}):
1396  self.callable = callable
1397  self.extra_args = extra_args
1398  # get the list of names of positional arguments
1399  from inspect import getargspec
1400  self.args_order = getargspec(callable)[0]
1401  # Remove "self" from the list of positional arguments
1402  # since it is added automatically
1403  if self.args_order[0] == "self":
1404  del self.args_order[0]
1405  def __call__(self, *args, **kwargs):
1406  # Check which positional arguments are used
1407  positional = self.args_order[:len(args)]
1408 
1409  kwargs = dict(kwargs) # copy the arguments dictionary
1410  for a in self.extra_args:
1411  # use "extra_args" for the arguments not specified as
1412  # positional or keyword
1413  if a not in positional and a not in kwargs:
1414  kwargs[a] = self.extra_args[a]
1415  return apply(self.callable, args, kwargs)
1416  # local names to be exposed in the script
1417  exported_symbols = {"self":self,
1418  "stdout":stdout,
1419  "stderr":stderr,
1420  "result":result,
1421  "causes":causes,
1422  "findReferenceBlock":
1423  CallWrapper(findReferenceBlock, {"stdout":stdout,
1424  "result":result,
1425  "causes":causes}),
1426  "validateWithReference":
1427  CallWrapper(self.ValidateWithReference, {"stdout":stdout,
1428  "stderr":stderr,
1429  "result":result,
1430  "causes":causes}),
1431  "countErrorLines":
1432  CallWrapper(countErrorLines, {"stdout":stdout,
1433  "result":result,
1434  "causes":causes}),
1435  "checkTTreesSummaries":
1436  CallWrapper(self.CheckTTreesSummaries, {"stdout":stdout,
1437  "result":result,
1438  "causes":causes}),
1439  "checkHistosSummaries":
1440  CallWrapper(self.CheckHistosSummaries, {"stdout":stdout,
1441  "result":result,
1442  "causes":causes}),
1443 
1444  }
1445  exec self.validator in globals(), exported_symbols
1446  else:
1447  self.ValidateWithReference(stdout, stderr, result, causes)
1448 
1449  return causes
1450 
1451  def DumpEnvironment(self, result):
1452  """
1453  Add the content of the environment to the result object.
1454 
1455  Copied from the QMTest class of COOL.
1456  """
1457  vars = os.environ.keys()
1458  vars.sort()
1459  result['GaudiTest.environment'] = \
1460  result.Quote('\n'.join(["%s=%s"%(v,os.environ[v]) for v in vars]))
1461 
1462  def Run(self, context, result):
1463  """Run the test.
1464 
1465  'context' -- A 'Context' giving run-time parameters to the
1466  test.
1467 
1468  'result' -- A 'Result' object. The outcome will be
1469  'Result.PASS' when this method is called. The 'result' may be
1470  modified by this method to indicate outcomes other than
1471  'Result.PASS' or to add annotations."""
1472 
1473  # Check if the platform is supported
1474  if self.PlatformIsNotSupported(context, result):
1475  return
1476 
1477  # Prepare program name and arguments (expanding variables, and converting to absolute)
1478  if self.program:
1479  prog = rationalizepath(self.program)
1480  elif "GAUDIEXE" in os.environ:
1481  prog = os.environ["GAUDIEXE"]
1482  else:
1483  prog = "Gaudi.exe"
1484  self.program = prog
1485 
1486  dummy, prog_ext = os.path.splitext(prog)
1487  if prog_ext not in [ ".exe", ".py", ".bat" ] and self.isWinPlatform():
1488  prog += ".exe"
1489  prog_ext = ".exe"
1490 
1491  prog = which(prog) or prog
1492 
1493  # Convert paths to absolute paths in arguments and reference files
1494  args = map(rationalizepath, self.args)
1495  self.reference = rationalizepath(self.reference)
1496  self.error_reference = rationalizepath(self.error_reference)
1497 
1498 
1499  # check if the user provided inline options
1500  tmpfile = None
1501  if self.options.strip():
1502  ext = ".opts"
1503  if re.search(r"from\s+Gaudi.Configuration\s+import\s+\*|from\s+Configurables\s+import", self.options):
1504  ext = ".py"
1505  tmpfile = TempFile(ext)
1506  tmpfile.writelines("\n".join(self.options.splitlines()))
1507  tmpfile.flush()
1508  args.append(tmpfile.name)
1509  result["GaudiTest.options"] = result.Quote(self.options)
1510 
1511  # if the program is a python file, execute it through python
1512  if prog_ext == ".py":
1513  args.insert(0,prog)
1514  if self.isWinPlatform():
1515  prog = which("python.exe") or "python.exe"
1516  else:
1517  prog = which("python") or "python"
1518 
1519  # Change to the working directory if specified or to the default temporary
1520  origdir = os.getcwd()
1521  if self.workdir:
1522  os.chdir(str(os.path.normpath(os.path.expandvars(self.workdir))))
1523  elif self.use_temp_dir == "true":
1524  if "QMTEST_TMPDIR" in os.environ:
1525  qmtest_tmpdir = os.environ["QMTEST_TMPDIR"]
1526  if not os.path.exists(qmtest_tmpdir):
1527  os.makedirs(qmtest_tmpdir)
1528  os.chdir(qmtest_tmpdir)
1529  elif "qmtest.tmpdir" in context:
1530  os.chdir(context["qmtest.tmpdir"])
1531 
1532  if "QMTEST_IGNORE_TIMEOUT" not in os.environ:
1533  self.timeout = max(self.timeout,600)
1534  else:
1535  self.timeout = -1
1536 
1537  try:
1538  # Generate eclipse.org debug launcher for the test
1539  self._CreateEclipseLaunch(prog, args, destdir = os.path.join(origdir, '.eclipse'))
1540  # Run the test
1541  self.RunProgram(prog,
1542  [ prog ] + args,
1543  context, result)
1544  # Record the content of the enfironment for failing tests
1545  if result.GetOutcome() not in [ result.PASS ]:
1546  self.DumpEnvironment(result)
1547  finally:
1548  # revert to the original directory
1549  os.chdir(origdir)
1550 
1551  def RunProgram(self, program, arguments, context, result):
1552  """Run the 'program'.
1553 
1554  'program' -- The path to the program to run.
1555 
1556  'arguments' -- A list of the arguments to the program. This
1557  list must contain a first argument corresponding to 'argv[0]'.
1558 
1559  'context' -- A 'Context' giving run-time parameters to the
1560  test.
1561 
1562  'result' -- A 'Result' object. The outcome will be
1563  'Result.PASS' when this method is called. The 'result' may be
1564  modified by this method to indicate outcomes other than
1565  'Result.PASS' or to add annotations.
1566 
1567  @attention: This method has been copied from command.ExecTestBase
1568  (QMTest 2.3.0) and modified to keep stdout and stderr
1569  for tests that have been terminated by a signal.
1570  (Fundamental for debugging in the Application Area)
1571  """
1572 
1573  # Construct the environment.
1574  environment = self.MakeEnvironment(context)
1575  # FIXME: whithout this, we get some spurious '\x1b[?1034' in the std out on SLC6
1576  if "slc6" in environment.get('CMTCONFIG', ''):
1577  environment['TERM'] = 'dumb'
1578  # Create the executable.
1579  if self.timeout >= 0:
1580  timeout = self.timeout
1581  else:
1582  # If no timeout was specified, we sill run this process in a
1583  # separate process group and kill the entire process group
1584  # when the child is done executing. That means that
1585  # orphaned child processes created by the test will be
1586  # cleaned up.
1587  timeout = -2
1588  e = GaudiFilterExecutable(self.stdin, timeout)
1589  # Run it.
1590  exit_status = e.Run(arguments, environment, path = program)
1591  # Get the stack trace from the temporary file (if present)
1592  if e.stack_trace_file and os.path.exists(e.stack_trace_file):
1593  stack_trace = open(e.stack_trace_file).read()
1594  os.remove(e.stack_trace_file)
1595  else:
1596  stack_trace = None
1597  if stack_trace:
1598  result["ExecTest.stack_trace"] = result.Quote(stack_trace)
1599 
1600  # If the process terminated normally, check the outputs.
1601  if (sys.platform == "win32" or os.WIFEXITED(exit_status)
1602  or self.signal == os.WTERMSIG(exit_status)):
1603  # There are no causes of failure yet.
1604  causes = []
1605  # The target program terminated normally. Extract the
1606  # exit code, if this test checks it.
1607  if self.exit_code is None:
1608  exit_code = None
1609  elif sys.platform == "win32":
1610  exit_code = exit_status
1611  else:
1612  exit_code = os.WEXITSTATUS(exit_status)
1613  # Get the output generated by the program.
1614  stdout = e.stdout
1615  stderr = e.stderr
1616  # Record the results.
1617  result["ExecTest.exit_code"] = str(exit_code)
1618  result["ExecTest.stdout"] = result.Quote(stdout)
1619  result["ExecTest.stderr"] = result.Quote(stderr)
1620  # Check to see if the exit code matches.
1621  if exit_code != self.exit_code:
1622  causes.append("exit_code")
1623  result["ExecTest.expected_exit_code"] \
1624  = str(self.exit_code)
1625  # Validate the output.
1626  causes += self.ValidateOutput(stdout, stderr, result)
1627  # If anything went wrong, the test failed.
1628  if causes:
1629  result.Fail("Unexpected %s." % string.join(causes, ", "))
1630  elif os.WIFSIGNALED(exit_status):
1631  # The target program terminated with a signal. Construe
1632  # that as a test failure.
1633  signal_number = str(os.WTERMSIG(exit_status))
1634  if not stack_trace:
1635  result.Fail("Program terminated by signal.")
1636  else:
1637  # The presence of stack_trace means tha we stopped the job because
1638  # of a time-out
1639  result.Fail("Exceeded time limit (%ds), terminated." % timeout)
1640  result["ExecTest.signal_number"] = signal_number
1641  result["ExecTest.stdout"] = result.Quote(e.stdout)
1642  result["ExecTest.stderr"] = result.Quote(e.stderr)
1643  if self.signal:
1644  result["ExecTest.expected_signal_number"] = str(self.signal)
1645  elif os.WIFSTOPPED(exit_status):
1646  # The target program was stopped. Construe that as a
1647  # test failure.
1648  signal_number = str(os.WSTOPSIG(exit_status))
1649  if not stack_trace:
1650  result.Fail("Program stopped by signal.")
1651  else:
1652  # The presence of stack_trace means tha we stopped the job because
1653  # of a time-out
1654  result.Fail("Exceeded time limit (%ds), stopped." % timeout)
1655  result["ExecTest.signal_number"] = signal_number
1656  result["ExecTest.stdout"] = result.Quote(e.stdout)
1657  result["ExecTest.stderr"] = result.Quote(e.stderr)
1658  else:
1659  # The target program terminated abnormally in some other
1660  # manner. (This shouldn't normally happen...)
1661  result.Fail("Program did not terminate normally.")
1662 
1663  # Marco Cl.: This is a special trick to fix a "problem" with the output
1664  # of gaudi jobs when they use colors
1665  esc = '\x1b'
1666  repr_esc = '\\x1b'
1667  result["ExecTest.stdout"] = result["ExecTest.stdout"].replace(esc,repr_esc)
1668  # TODO: (MCl) improve the hack for colors in standard output
1669  # may be converting them to HTML tags
1670 
1671  def _CreateEclipseLaunch(self, prog, args, destdir = None):
1672  if 'NO_ECLIPSE_LAUNCHERS' in os.environ:
1673  # do not generate eclipse launchers if the user asks so
1674  return
1675  # Find the project name used in ecplise.
1676  # The name is in a file called ".project" in one of the parent directories
1677  projbasedir = os.path.normpath(destdir)
1678  while not os.path.exists(os.path.join(projbasedir, ".project")):
1679  oldprojdir = projbasedir
1680  projbasedir = os.path.normpath(os.path.join(projbasedir, os.pardir))
1681  # FIXME: the root level is invariant when trying to go up one level,
1682  # but it must be cheched on windows
1683  if oldprojdir == projbasedir:
1684  # If we cannot find a .project, so no point in creating a .launch file
1685  return
1686  # Ensure that we have a place where to write.
1687  if not os.path.exists(destdir):
1688  os.makedirs(destdir)
1689  # Use ElementTree to parse the XML file
1690  from xml.etree import ElementTree as ET
1691  t = ET.parse(os.path.join(projbasedir, ".project"))
1692  projectName = t.find("name").text
1693 
1694  # prepare the name/path of the generated file
1695  destfile = "%s.launch" % self._Runnable__id
1696  if destdir:
1697  destfile = os.path.join(destdir, destfile)
1698 
1699  if self.options.strip():
1700  # this means we have some custom options in the qmt file, so we have
1701  # to copy them from the temporary file at the end of the arguments
1702  # in another file
1703  tempfile = args.pop()
1704  optsfile = destfile + os.path.splitext(tempfile)[1]
1705  shutil.copyfile(tempfile, optsfile)
1706  args.append(optsfile)
1707 
1708  # prepare the data to insert in the XML file
1709  from xml.sax.saxutils import quoteattr # useful to quote XML special chars
1710  data = {}
1711  # Note: the "quoteattr(k)" is not needed because special chars cannot be part of a variable name,
1712  # but it doesn't harm.
1713  data["environment"] = "\n".join(['<mapEntry key=%s value=%s/>' % (quoteattr(k), quoteattr(v))
1714  for k, v in os.environ.iteritems()
1715  if k not in ('MAKEOVERRIDES', 'MAKEFLAGS', 'MAKELEVEL')])
1716 
1717  data["exec"] = which(prog) or prog
1718  if os.path.basename(data["exec"]).lower().startswith("python"):
1719  data["stopAtMain"] = "false" # do not stop at main when debugging Python scripts
1720  else:
1721  data["stopAtMain"] = "true"
1722 
1723  data["args"] = "&#10;".join(map(rationalizepath, args))
1724  if self.isWinPlatform():
1725  data["args"] = "&#10;".join(["/debugexe"] + map(rationalizepath, [data["exec"]] + args))
1726  data["exec"] = which("vcexpress.exe")
1727 
1728  if not self.use_temp_dir:
1729  data["workdir"] = os.getcwd()
1730  else:
1731  # If the test is using a tmporary directory, it is better to run it
1732  # in the same directory as the .launch file when debugged in eclipse
1733  data["workdir"] = destdir
1734 
1735  data["project"] = projectName.strip()
1736 
1737  # Template for the XML file, based on eclipse 3.4
1738  xml_template = u"""<?xml version="1.0" encoding="UTF-8" standalone="no"?>
1739 <launchConfiguration type="org.eclipse.cdt.launch.applicationLaunchType">
1740 <booleanAttribute key="org.eclipse.cdt.debug.mi.core.AUTO_SOLIB" value="true"/>
1741 <listAttribute key="org.eclipse.cdt.debug.mi.core.AUTO_SOLIB_LIST"/>
1742 <stringAttribute key="org.eclipse.cdt.debug.mi.core.DEBUG_NAME" value="gdb"/>
1743 <stringAttribute key="org.eclipse.cdt.debug.mi.core.GDB_INIT" value=".gdbinit"/>
1744 <listAttribute key="org.eclipse.cdt.debug.mi.core.SOLIB_PATH"/>
1745 <booleanAttribute key="org.eclipse.cdt.debug.mi.core.STOP_ON_SOLIB_EVENTS" value="false"/>
1746 <booleanAttribute key="org.eclipse.cdt.debug.mi.core.breakpointsFullPath" value="false"/>
1747 <stringAttribute key="org.eclipse.cdt.debug.mi.core.commandFactory" value="org.eclipse.cdt.debug.mi.core.standardCommandFactory"/>
1748 <stringAttribute key="org.eclipse.cdt.debug.mi.core.protocol" value="mi"/>
1749 <booleanAttribute key="org.eclipse.cdt.debug.mi.core.verboseMode" value="false"/>
1750 <intAttribute key="org.eclipse.cdt.launch.ATTR_BUILD_BEFORE_LAUNCH_ATTR" value="0"/>
1751 <stringAttribute key="org.eclipse.cdt.launch.COREFILE_PATH" value=""/>
1752 <stringAttribute key="org.eclipse.cdt.launch.DEBUGGER_ID" value="org.eclipse.cdt.debug.mi.core.CDebuggerNew"/>
1753 <stringAttribute key="org.eclipse.cdt.launch.DEBUGGER_REGISTER_GROUPS" value=""/>
1754 <stringAttribute key="org.eclipse.cdt.launch.DEBUGGER_START_MODE" value="run"/>
1755 <booleanAttribute key="org.eclipse.cdt.launch.DEBUGGER_STOP_AT_MAIN" value="%(stopAtMain)s"/>
1756 <stringAttribute key="org.eclipse.cdt.launch.DEBUGGER_STOP_AT_MAIN_SYMBOL" value="main"/>
1757 <booleanAttribute key="org.eclipse.cdt.launch.ENABLE_REGISTER_BOOKKEEPING" value="false"/>
1758 <booleanAttribute key="org.eclipse.cdt.launch.ENABLE_VARIABLE_BOOKKEEPING" value="false"/>
1759 <stringAttribute key="org.eclipse.cdt.launch.FORMAT" value="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&lt;contentList/&gt;"/>
1760 <stringAttribute key="org.eclipse.cdt.launch.GLOBAL_VARIABLES" value="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;globalVariableList/&gt;&#10;"/>
1761 <stringAttribute key="org.eclipse.cdt.launch.MEMORY_BLOCKS" value="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;memoryBlockExpressionList/&gt;&#10;"/>
1762 <stringAttribute key="org.eclipse.cdt.launch.PROGRAM_ARGUMENTS" value="%(args)s"/>
1763 <stringAttribute key="org.eclipse.cdt.launch.PROGRAM_NAME" value="%(exec)s"/>
1764 <stringAttribute key="org.eclipse.cdt.launch.PROJECT_ATTR" value="%(project)s"/>
1765 <stringAttribute key="org.eclipse.cdt.launch.PROJECT_BUILD_CONFIG_ID_ATTR" value=""/>
1766 <stringAttribute key="org.eclipse.cdt.launch.WORKING_DIRECTORY" value="%(workdir)s"/>
1767 <booleanAttribute key="org.eclipse.cdt.launch.ui.ApplicationCDebuggerTab.DEFAULTS_SET" value="true"/>
1768 <booleanAttribute key="org.eclipse.cdt.launch.use_terminal" value="true"/>
1769 <listAttribute key="org.eclipse.debug.core.MAPPED_RESOURCE_PATHS">
1770 <listEntry value="/%(project)s"/>
1771 </listAttribute>
1772 <listAttribute key="org.eclipse.debug.core.MAPPED_RESOURCE_TYPES">
1773 <listEntry value="4"/>
1774 </listAttribute>
1775 <booleanAttribute key="org.eclipse.debug.core.appendEnvironmentVariables" value="false"/>
1776 <mapAttribute key="org.eclipse.debug.core.environmentVariables">
1777 %(environment)s
1778 </mapAttribute>
1779 <mapAttribute key="org.eclipse.debug.core.preferred_launchers">
1780 <mapEntry key="[debug]" value="org.eclipse.cdt.cdi.launch.localCLaunch"/>
1781 </mapAttribute>
1782 <listAttribute key="org.eclipse.debug.ui.favoriteGroups">
1783 <listEntry value="org.eclipse.debug.ui.launchGroup.debug"/>
1784 </listAttribute>
1785 </launchConfiguration>
1786 """
1787  try:
1788  # ensure the correct encoding of data values
1789  for k in data:
1790  data[k] = codecs.decode(data[k], 'utf-8')
1791  xml = xml_template % data
1792 
1793  # Write the output file
1794  codecs.open(destfile, "w", encoding='utf-8').write(xml)
1795  except:
1796  print 'WARNING: problem generating Eclipse launcher'
1797 
1798 
1799 try:
1800  import json
1801 except ImportError:
1802  # Use simplejson for LCG
1803  import simplejson as json
1804 
1805 class HTMLResultStream(ResultStream):
1806  """An 'HTMLResultStream' writes its output to a set of HTML files.
1807 
1808  The argument 'dir' is used to select the destination directory for the HTML
1809  report.
1810  The destination directory may already contain the report from a previous run
1811  (for example of a different package), in which case it will be extended to
1812  include the new data.
1813  """
1814  arguments = [
1815  qm.fields.TextField(
1816  name = "dir",
1817  title = "Destination Directory",
1818  description = """The name of the directory.
1819 
1820  All results will be written to the directory indicated.""",
1821  verbatim = "true",
1822  default_value = ""),
1823  ]
1824 
1825  def __init__(self, arguments = None, **args):
1826  """Prepare the destination directory.
1827 
1828  Creates the destination directory and store in it some preliminary
1829  annotations and the static files found in the template directory
1830  'html_report'.
1831  """
1832  ResultStream.__init__(self, arguments, **args)
1833  self._summary = []
1834  self._summaryFile = os.path.join(self.dir, "summary.json")
1835  self._annotationsFile = os.path.join(self.dir, "annotations.json")
1836  # Prepare the destination directory using the template
1837  templateDir = os.path.join(os.path.dirname(__file__), "html_report")
1838  if not os.path.isdir(self.dir):
1839  os.makedirs(self.dir)
1840  # Copy the files in the template directory excluding the directories
1841  for f in os.listdir(templateDir):
1842  src = os.path.join(templateDir, f)
1843  dst = os.path.join(self.dir, f)
1844  if not os.path.isdir(src) and not os.path.exists(dst):
1845  shutil.copy(src, dst)
1846  # Add some non-QMTest attributes
1847  if "CMTCONFIG" in os.environ:
1848  self.WriteAnnotation("cmt.cmtconfig", os.environ["CMTCONFIG"])
1849  import socket
1850  self.WriteAnnotation("hostname", socket.gethostname())
1851 
1852  def _updateSummary(self):
1853  """Helper function to extend the global summary file in the destination
1854  directory.
1855  """
1856  if os.path.exists(self._summaryFile):
1857  oldSummary = json.load(open(self._summaryFile))
1858  else:
1859  oldSummary = []
1860  ids = set([ i["id"] for i in self._summary ])
1861  newSummary = [ i for i in oldSummary if i["id"] not in ids ]
1862  newSummary.extend(self._summary)
1863  json.dump(newSummary, open(self._summaryFile, "w"),
1864  sort_keys = True)
1865 
1866  def WriteAnnotation(self, key, value):
1867  """Writes the annotation to the annotation file.
1868  If the key is already present with a different value, the value becomes
1869  a list and the new value is appended to it, except for start_time and
1870  end_time.
1871  """
1872  # Initialize the annotation dict from the file (if present)
1873  if os.path.exists(self._annotationsFile):
1874  annotations = json.load(open(self._annotationsFile))
1875  else:
1876  annotations = {}
1877  # hack because we do not have proper JSON support
1878  key, value = map(str, [key, value])
1879  if key == "qmtest.run.start_time":
1880  # Special handling of the start time:
1881  # if we are updating a result, we have to keep the original start
1882  # time, but remove the original end time to mark the report to be
1883  # in progress.
1884  if key not in annotations:
1885  annotations[key] = value
1886  if "qmtest.run.end_time" in annotations:
1887  del annotations["qmtest.run.end_time"]
1888  else:
1889  # All other annotations are added to a list
1890  if key in annotations:
1891  old = annotations[key]
1892  if type(old) is list:
1893  if value not in old:
1894  annotations[key].append(value)
1895  elif value != old:
1896  annotations[key] = [old, value]
1897  else:
1898  annotations[key] = value
1899  # Write the new annotations file
1900  json.dump(annotations, open(self._annotationsFile, "w"),
1901  sort_keys = True)
1902 
1903  def WriteResult(self, result):
1904  """Prepare the test result directory in the destination directory storing
1905  into it the result fields.
1906  A summary of the test result is stored both in a file in the test directory
1907  and in the global summary file.
1908  """
1909  summary = {}
1910  summary["id"] = result.GetId()
1911  summary["outcome"] = result.GetOutcome()
1912  summary["cause"] = result.GetCause()
1913  summary["fields"] = result.keys()
1914  summary["fields"].sort()
1915 
1916  # Since we miss proper JSON support, I hack a bit
1917  for f in ["id", "outcome", "cause"]:
1918  summary[f] = str(summary[f])
1919  summary["fields"] = map(str, summary["fields"])
1920 
1921  self._summary.append(summary)
1922 
1923  # format:
1924  # testname/summary.json
1925  # testname/field1
1926  # testname/field2
1927  testOutDir = os.path.join(self.dir, summary["id"])
1928  if not os.path.isdir(testOutDir):
1929  os.makedirs(testOutDir)
1930  json.dump(summary, open(os.path.join(testOutDir, "summary.json"), "w"),
1931  sort_keys = True)
1932  for f in summary["fields"]:
1933  open(os.path.join(testOutDir, f), "w").write(result[f])
1934 
1935  self._updateSummary()
1936 
1937  def Summarize(self):
1938  # Not implemented.
1939  pass
1940 
1941 
1942 
1943 
1944 class XMLResultStream(ResultStream):
1945  """An 'XMLResultStream' writes its output to a Ctest XML file.
1946 
1947  The argument 'dir' is used to select the destination file for the XML
1948  report.
1949  The destination directory may already contain the report from a previous run
1950  (for example of a different package), in which case it will be overrided to
1951  with the new data.
1952  """
1953  arguments = [
1954  qm.fields.TextField(
1955  name = "dir",
1956  title = "Destination Directory",
1957  description = """The name of the directory.
1958 
1959  All results will be written to the directory indicated.""",
1960  verbatim = "true",
1961  default_value = ""),
1962  qm.fields.TextField(
1963  name = "prefix",
1964  title = "Output File Prefix",
1965  description = """The output file name will be the specified prefix
1966  followed by 'Test.xml' (CTest convention).""",
1967  verbatim = "true",
1968  default_value = ""),
1969  ]
1970 
1971  def __init__(self, arguments = None, **args):
1972  """Prepare the destination directory.
1973 
1974  Creates the destination directory and store in it some preliminary
1975  annotations .
1976  """
1977  ResultStream.__init__(self, arguments, **args)
1978 
1979  self._xmlFile = os.path.join(self.dir, self.prefix + 'Test.xml')
1980 
1981  # add some global variable
1982  self._startTime = None
1983  self._endTime = None
1984  # Format the XML file if it not exists
1985  if not os.path.isfile(self._xmlFile):
1986  # check that the container directory exists and create it if not
1987  if not os.path.exists(os.path.dirname(self._xmlFile)):
1988  os.makedirs(os.path.dirname(self._xmlFile))
1989 
1990  newdataset = ET.Element("newdataset")
1991  self._tree = ET.ElementTree(newdataset)
1992  self._tree.write(self._xmlFile)
1993  else :
1994  # Read the xml file
1995  self._tree = ET.parse(self._xmlFile)
1996  newdataset = self._tree.getroot()
1997 
1998  # Find the corresponding site, if do not exist, create it
1999 
2000  #site = newdataset.find('Site[@BuildStamp="'+result["qmtest.start_time"]+'"][@OSPlatform="'+os.getenv("CMTOPT")+'"]')
2001  # I don't know why this syntax doesn't work. Maybe it is because of the python version. Indeed,
2002  # This works well in the python terminal. So I have to make a for:
2003  for site in newdataset.getiterator() :
2004  if site.get("OSPlatform") == os.uname()[4]: # and site.get("BuildStamp") == result["qmtest.start_time"] and:
2005  # Here we can add some variable to define the difference beetween 2 site
2006  self._site = site
2007  break
2008  else :
2009  site = None
2010 
2011 
2012  if site is None :
2013  import socket
2014  import multiprocessing
2015  attrib = {
2016  "BuildName" : os.getenv("CMTCONFIG"),
2017  "Name" : os.uname()[1] ,
2018  "Generator" : "QMTest "+qm.version ,
2019  "OSName" : os.uname()[0] ,
2020  "Hostname" : socket.gethostname() ,
2021  "OSRelease" : os.uname()[2] ,
2022  "OSVersion" :os.uname()[3] ,
2023  "OSPlatform" :os.uname()[4] ,
2024  "Is64Bits" : "unknown" ,
2025  "VendorString" : "unknown" ,
2026  "VendorID" :"unknown" ,
2027  "FamilyID" :"unknown" ,
2028  "ModelID" :"unknown" ,
2029  "ProcessorCacheSize" :"unknown" ,
2030  "NumberOfLogicalCPU" : str(multiprocessing.cpu_count()) ,
2031  "NumberOfPhysicalCPU" : "0" ,
2032  "TotalVirtualMemory" : "0" ,
2033  "TotalPhysicalMemory" : "0" ,
2034  "LogicalProcessorsPerPhysical" : "0" ,
2035  "ProcessorClockFrequency" : "0" ,
2036  }
2037  self._site = ET.SubElement(newdataset, "Site", attrib)
2038  self._Testing = ET.SubElement(self._site,"Testing")
2039 
2040  # Start time elements
2041  self._StartDateTime = ET.SubElement(self._Testing, "StartDateTime")
2042 
2043  self._StartTestTime = ET.SubElement(self._Testing, "StartTestTime")
2044 
2045 
2046  self._TestList = ET.SubElement(self._Testing, "TestList")
2047 
2048  ## End time elements
2049  self._EndDateTime = ET.SubElement(self._Testing, "EndDateTime")
2050 
2051 
2052  self._EndTestTime = ET.SubElement(self._Testing, "EndTestTime")
2053 
2054 
2055 
2056  self._ElapsedMinutes = ET.SubElement(self._Testing, "ElapsedMinutes")
2057 
2058 
2059  else : # We get the elements
2060  self._Testing = self._site.find("Testing")
2061  self._StartDateTime = self._Testing.find("StartDateTime")
2062  self._StartTestTime = self._Testing.find("StartTestTime")
2063  self._TestList = self._Testing.find("TestList")
2064  self._EndDateTime = self._Testing.find("EndDateTime")
2065  self._EndTestTime = self._Testing.find("EndTestTime")
2066  self._ElapsedMinutes = self._Testing.find("ElapsedMinutes")
2067 
2068  """
2069  # Add some non-QMTest attributes
2070  if "CMTCONFIG" in os.environ:
2071  self.WriteAnnotation("cmt.cmtconfig", os.environ["CMTCONFIG"])
2072  import socket
2073  self.WriteAnnotation("hostname", socket.gethostname())
2074  """
2075 
2076 
2077  def WriteAnnotation(self, key, value):
2078  if key == "qmtest.run.start_time":
2079  if self._site.get("qmtest.run.start_time") is not None :
2080  return None
2081  self._site.set(str(key),str(value))
2082  def WriteResult(self, result):
2083  """Prepare the test result directory in the destination directory storing
2084  into it the result fields.
2085  A summary of the test result is stored both in a file in the test directory
2086  and in the global summary file.
2087  """
2088  summary = {}
2089  summary["id"] = result.GetId()
2090  summary["outcome"] = result.GetOutcome()
2091  summary["cause"] = result.GetCause()
2092  summary["fields"] = result.keys()
2093  summary["fields"].sort()
2094 
2095 
2096  # Since we miss proper JSON support, I hack a bit
2097  for f in ["id", "outcome", "cause"]:
2098  summary[f] = str(summary[f])
2099  summary["fields"] = map(str, summary["fields"])
2100 
2101 
2102  # format
2103  # package_Test.xml
2104 
2105  if "qmtest.start_time" in summary["fields"]:
2106  haveStartDate = True
2107  else :
2108  haveStartDate = False
2109  if "qmtest.end_time" in summary["fields"]:
2110  haveEndDate = True
2111  else :
2112  haveEndDate = False
2113 
2114  # writing the start date time
2115  if haveStartDate:
2116  self._startTime = calendar.timegm(time.strptime(result["qmtest.start_time"], "%Y-%m-%dT%H:%M:%SZ"))
2117  if self._StartTestTime.text is None:
2118  self._StartDateTime.text = time.strftime("%b %d %H:%M %Z", time.localtime(self._startTime))
2119  self._StartTestTime.text = str(self._startTime)
2120  self._site.set("BuildStamp" , result["qmtest.start_time"] )
2121 
2122  #Save the end date time in memory
2123  if haveEndDate:
2124  self._endTime = calendar.timegm(time.strptime(result["qmtest.end_time"], "%Y-%m-%dT%H:%M:%SZ"))
2125 
2126 
2127  #add the current test to the test list
2128  tl = ET.Element("Test")
2129  tl.text = summary["id"]
2130  self._TestList.insert(0,tl)
2131 
2132  #Fill the current test
2133  Test = ET.Element("Test")
2134  if summary["outcome"] == "PASS":
2135  Test.set("Status", "passed")
2136  elif summary["outcome"] == "FAIL":
2137  Test.set("Status", "failed")
2138  elif summary["outcome"] == "SKIPPED" or summary["outcome"] == "UNTESTED":
2139  Test.set("Status", "skipped")
2140  elif summary["outcome"] == "ERROR":
2141  Test.set("Status", "failed")
2142  Name = ET.SubElement(Test, "Name",)
2143  Name.text = summary["id"]
2144  Results = ET.SubElement(Test, "Results")
2145 
2146  # add the test after the other test
2147  self._Testing.insert(3,Test)
2148 
2149  if haveStartDate and haveEndDate:
2150  # Compute the test duration
2151  delta = self._endTime - self._startTime
2152  testduration = str(delta)
2153  Testduration= ET.SubElement(Results,"NamedMeasurement")
2154  Testduration.set("name","Execution Time")
2155  Testduration.set("type","numeric/float" )
2156  value = ET.SubElement(Testduration, "Value")
2157  value.text = testduration
2158 
2159  #remove the fields that we store in a different way
2160  for n in ("qmtest.end_time", "qmtest.start_time", "qmtest.cause", "ExecTest.stdout"):
2161  if n in summary["fields"]:
2162  summary["fields"].remove(n)
2163 
2164  # Here we can add some NamedMeasurment which we know the type
2165  #
2166  if "ExecTest.exit_code" in summary["fields"] :
2167  summary["fields"].remove("ExecTest.exit_code")
2168  ExitCode= ET.SubElement(Results,"NamedMeasurement")
2169  ExitCode.set("name","exit_code")
2170  ExitCode.set("type","numeric/integer" )
2171  value = ET.SubElement(ExitCode, "Value")
2172  value.text = convert_xml_illegal_chars(result["ExecTest.exit_code"])
2173 
2174  TestStartTime= ET.SubElement(Results,"NamedMeasurement")
2175  TestStartTime.set("name","Start_Time")
2176  TestStartTime.set("type","String" )
2177  value = ET.SubElement(TestStartTime, "Value")
2178  if haveStartDate :
2179  value.text = escape_xml_illegal_chars(time.strftime("%b %d %H:%M %Z %Y", time.localtime(self._startTime)))
2180  else :
2181  value.text = ""
2182 
2183  TestEndTime= ET.SubElement(Results,"NamedMeasurement")
2184  TestEndTime.set("name","End_Time")
2185  TestEndTime.set("type","String" )
2186  value = ET.SubElement(TestEndTime, "Value")
2187  if haveStartDate :
2188  value.text = escape_xml_illegal_chars(time.strftime("%b %d %H:%M %Z %Y", time.localtime(self._endTime)))
2189  else :
2190  value.text = ""
2191 
2192  if summary["cause"]:
2193  FailureCause= ET.SubElement(Results,"NamedMeasurement")
2194  FailureCause.set("name", "Cause")
2195  FailureCause.set("type", "String" )
2196  value = ET.SubElement(FailureCause, "Value")
2197  value.text = escape_xml_illegal_chars(summary["cause"])
2198 
2199  #Fill the result
2200  fields = {}
2201  for field in summary["fields"] :
2202  fields[field] = ET.SubElement(Results, "NamedMeasurement")
2203  fields[field].set("type","String")
2204  fields[field].set("name",field)
2205  value = ET.SubElement(fields[field], "Value")
2206  # to escape the <pre></pre>
2207  if "<pre>" in result[field][0:6] :
2208  value.text = convert_xml_illegal_chars(result[field][5:-6])
2209  else :
2210  value.text = convert_xml_illegal_chars(result[field])
2211 
2212 
2213  if result.has_key("ExecTest.stdout" ) : #"ExecTest.stdout" in result :
2214  Measurement = ET.SubElement(Results, "Measurement")
2215  value = ET.SubElement(Measurement, "Value")
2216  if "<pre>" in result["ExecTest.stdout"][0:6] :
2217  value.text = convert_xml_illegal_chars(result["ExecTest.stdout"][5:-6])
2218  else :
2219  value.text = convert_xml_illegal_chars(result["ExecTest.stdout"])
2220 
2221 
2222  # write the file
2223  self._tree.write(self._xmlFile, "utf-8") #,True) in python 2.7 to add the xml header
2224 
2225 
2226  def Summarize(self):
2227 
2228  # Set the final end date time
2229  self._EndTestTime.text = str(self._endTime)
2230  self._EndDateTime.text = time.strftime("%b %d %H:%M %Z", time.localtime(self._endTime))
2231 
2232  # Compute the total duration
2233  if self._endTime and self._startTime:
2234  delta = self._endTime - self._startTime
2235  else:
2236  delta = 0
2237  self._ElapsedMinutes.text = str(delta/60)
2238 
2239  # Write into the file
2240  self._tree.write(self._xmlFile, "utf-8") #,True) in python 2.7 to add the xml header
2241 
def findTTreeSummaries(stdout)
Definition: GaudiTest.py:756
def remove(file, logdir)
Definition: install.py:153
def findReferenceBlock
Definition: GaudiTest.py:625
def _parseTTreeSummary(lines, pos)
Definition: GaudiTest.py:707
def total_seconds_replacement(timedelta)
Definition: GaudiTest.py:39
def parseHistosSummary(lines, pos)
Definition: GaudiTest.py:821
def cmpTreesDicts
Definition: GaudiTest.py:777
struct GAUDI_API map
Parametrisation class for map-like implementation.
def GetPlatform(self)
Definition: BaseTest.py:1063
def rationalizepath(p)
Definition: GaudiTest.py:309
def getCmpFailingValues(reference, to_check, fail_path)
Definition: GaudiTest.py:808
def escape_xml_illegal_chars
Definition: GaudiTest.py:340
def which(executable)
Locates an executable in the executables path ($PATH) and returns the full path to it...
Definition: GaudiTest.py:289
def countErrorLines
Definition: GaudiTest.py:671
def findHistosSummaries(stdout)
Definition: GaudiTest.py:884
def hexreplace(match)
Definition: GaudiTest.py:331
def hexConvert(char)
Definition: GaudiTest.py:335
def PlatformIsNotSupported(self, context, result)
Definition: BaseTest.py:1053
def convert_xml_illegal_chars(val)
Definition: GaudiTest.py:337
NamedRange_< CONTAINER > range(const CONTAINER &cnt, const std::string &name)
simple function to create the named range form arbitrary container
Definition: NamedRange.h:133
def ROOT6WorkAroundEnabled
Definition: GaudiTest.py:25
def isWinPlatform(self)
Definition: BaseTest.py:1075
string type
Definition: gaudirun.py:151