Gaudi Framework, version v22r0

Home   Generated: 9 Feb 2011

GaudiTest.py

Go to the documentation of this file.
00001 ########################################################################
00002 # File:   GaudiTest.py
00003 # Author: Marco Clemencic CERN/PH-LBC
00004 ########################################################################
00005 __author__  = 'Marco Clemencic CERN/PH-LBC'
00006 ########################################################################
00007 # Imports
00008 ########################################################################
00009 import os
00010 import sys
00011 import re
00012 import tempfile
00013 import shutil
00014 import string
00015 import difflib
00016 from subprocess import Popen, PIPE, STDOUT
00017 
00018 import qm
00019 from qm.test.classes.command import ExecTestBase
00020 from qm.test.result_stream import ResultStream
00021 
00022 ### Needed by the re-implementation of TimeoutExecutable
00023 import qm.executable
00024 import time, signal
00025 # The classes in this module are implemented differently depending on
00026 # the operating system in use.
00027 if sys.platform == "win32":
00028     import msvcrt
00029     import pywintypes
00030     from   threading import *
00031     import win32api
00032     import win32con
00033     import win32event
00034     import win32file
00035     import win32pipe
00036     import win32process
00037 else:
00038     import cPickle
00039     import fcntl
00040     import select
00041     import qm.sigmask
00042 
00043 ########################################################################
00044 # Utility Classes
00045 ########################################################################
00046 class TemporaryEnvironment:
00047     """
00048     Class to changes the environment temporarily.
00049     """
00050     def __init__(self, orig = os.environ, keep_same = False):
00051         """
00052         Create a temporary environment on top of the one specified
00053         (it can be another TemporaryEnvironment instance).
00054         """
00055         #print "New environment"
00056         self.old_values = {}
00057         self.env = orig
00058         self._keep_same = keep_same
00059 
00060     def __setitem__(self,key,value):
00061         """
00062         Set an environment variable recording the previous value.
00063         """
00064         if key not in self.old_values :
00065             if key in self.env :
00066                 if not self._keep_same or self.env[key] != value:
00067                     self.old_values[key] = self.env[key]
00068             else:
00069                 self.old_values[key] = None
00070         self.env[key] = value
00071 
00072     def __getitem__(self,key):
00073         """
00074         Get an environment variable.
00075         Needed to provide the same interface as os.environ.
00076         """
00077         return self.env[key]
00078 
00079     def __delitem__(self,key):
00080         """
00081         Unset an environment variable.
00082         Needed to provide the same interface as os.environ.
00083         """
00084         if key not in self.env :
00085             raise KeyError(key)
00086         self.old_values[key] = self.env[key]
00087         del self.env[key]
00088 
00089     def keys(self):
00090         """
00091         Return the list of defined environment variables.
00092         Needed to provide the same interface as os.environ.
00093         """
00094         return self.env.keys()
00095 
00096     def items(self):
00097         """
00098         Return the list of (name,value) pairs for the defined environment variables.
00099         Needed to provide the same interface as os.environ.
00100         """
00101         return self.env.items()
00102 
00103     def __contains__(self,key):
00104         """
00105         Operator 'in'.
00106         Needed to provide the same interface as os.environ.
00107         """
00108         return key in self.env
00109 
00110     def restore(self):
00111         """
00112         Revert all the changes done to the orignal environment.
00113         """
00114         for key,value in self.old_values.items():
00115             if value is None:
00116                 del self.env[key]
00117             else:
00118                 self.env[key] = value
00119         self.old_values = {}
00120 
00121     def __del__(self):
00122         """
00123         Revert the changes on destruction.
00124         """
00125         #print "Restoring the environment"
00126         self.restore()
00127 
00128     def gen_script(self,shell_type):
00129         """
00130         Generate a shell script to reproduce the changes in the environment.
00131         """
00132         shells = [ 'csh', 'sh', 'bat' ]
00133         if shell_type not in shells:
00134             raise RuntimeError("Shell type '%s' unknown. Available: %s"%(shell_type,shells))
00135         out = ""
00136         for key,value in self.old_values.items():
00137             if key not in self.env:
00138                 # unset variable
00139                 if shell_type == 'csh':
00140                     out += 'unsetenv %s\n'%key
00141                 elif shell_type == 'sh':
00142                     out += 'unset %s\n'%key
00143                 elif shell_type == 'bat':
00144                     out += 'set %s=\n'%key
00145             else:
00146                 # set variable
00147                 if shell_type == 'csh':
00148                     out += 'setenv %s "%s"\n'%(key,self.env[key])
00149                 elif shell_type == 'sh':
00150                     out += 'export %s="%s"\n'%(key,self.env[key])
00151                 elif shell_type == 'bat':
00152                     out += 'set %s=%s\n'%(key,self.env[key])
00153         return out
00154 
00155 class TempDir:
00156     """Small class for temporary directories.
00157     When instantiated, it creates a temporary directory and the instance
00158     behaves as the string containing the directory name.
00159     When the instance goes out of scope, it removes all the content of
00160     the temporary directory (automatic clean-up).
00161     """
00162     def __init__(self, keep = False, chdir = False):
00163         self.name = tempfile.mkdtemp()
00164         self._keep = keep
00165         self._origdir = None
00166         if chdir:
00167             self._origdir = os.getcwd()
00168             os.chdir(self.name)
00169 
00170     def __str__(self):
00171         return self.name
00172 
00173     def __del__(self):
00174         if self._origdir:
00175             os.chdir(self._origdir)
00176         if self.name and not self._keep:
00177             shutil.rmtree(self.name)
00178 
00179     def __getattr__(self,attr):
00180         return getattr(self.name,attr)
00181 
00182 class TempFile:
00183     """Small class for temporary files.
00184     When instantiated, it creates a temporary directory and the instance
00185     behaves as the string containing the directory name.
00186     When the instance goes out of scope, it removes all the content of
00187     the temporary directory (automatic clean-up).
00188     """
00189     def __init__(self, suffix='', prefix='tmp', dir=None, text=False, keep = False):
00190         self.file = None
00191         self.name = None
00192         self._keep = keep
00193 
00194         self._fd, self.name = tempfile.mkstemp(suffix,prefix,dir,text)
00195         self.file = os.fdopen(self._fd,"r+")
00196 
00197     def __str__(self):
00198         return self.name
00199 
00200     def __del__(self):
00201         if self.file:
00202             self.file.close()
00203         if self.name and not self._keep:
00204             os.remove(self.name)
00205 
00206     def __getattr__(self,attr):
00207         return getattr(self.file,attr)
00208 
00209 class CMT:
00210     """Small wrapper to call CMT.
00211     """
00212     def __init__(self,path=None):
00213         if path is None:
00214             path = os.getcwd()
00215         self.path = path
00216 
00217     def _run_cmt(self,command,args):
00218         # prepare command line
00219         if type(args) is str:
00220             args = [args]
00221         cmd = "cmt %s"%command
00222         for arg in args:
00223             cmd += ' "%s"'%arg
00224 
00225         # go to the execution directory
00226         olddir = os.getcwd()
00227         os.chdir(self.path)
00228         # run cmt
00229         result = os.popen4(cmd)[1].read()
00230         # return to the old directory
00231         os.chdir(olddir)
00232         return result
00233 
00234     def __getattr__(self,attr):
00235         return lambda args=[]: self._run_cmt(attr, args)
00236 
00237     def runtime_env(self,env = None):
00238         """Returns a dictionary containing the runtime environment produced by CMT.
00239         If a dictionary is passed a modified instance of it is returned.
00240         """
00241         if env is None:
00242             env = {}
00243         for l in self.setup("-csh").splitlines():
00244             l = l.strip()
00245             if l.startswith("setenv"):
00246                 dummy,name,value = l.split(None,3)
00247                 env[name] = value.strip('"')
00248             elif l.startswith("unsetenv"):
00249                 dummy,name = l.split(None,2)
00250                 if name in env:
00251                     del env[name]
00252         return env
00253     def show_macro(self,k):
00254         r = self.show(["macro",k])
00255         if r.find("CMT> Error: symbol not found") >= 0:
00256             return None
00257         else:
00258             return self.show(["macro_value",k]).strip()
00259 
00260 ## Locates an executable in the executables path ($PATH) and returns the full
00261 #  path to it.
00262 #  If the executable cannot be found, None is returned
00263 def which(executable):
00264     if os.path.isabs(executable):
00265         return executable
00266     for d in os.environ.get("PATH").split(os.pathsep):
00267         fullpath = os.path.join(d,executable)
00268         if os.path.exists(fullpath):
00269             return fullpath
00270     return None
00271 
00272 def rationalizepath(p):
00273     p = os.path.normpath(os.path.expandvars(p))
00274     if os.path.exists(p):
00275         p = os.path.realpath(p)
00276     return p
00277 
00278 ########################################################################
00279 # Output Validation Classes
00280 ########################################################################
00281 class BasicOutputValidator:
00282     """Basic implementation of an option validator for Gaudi tests.
00283     This implementation is based on the standard (LCG) validation functions
00284     used in QMTest.
00285     """
00286     def __init__(self,ref,cause,result_key):
00287         self.reference = ref
00288         self.cause = cause
00289         self.result_key = result_key
00290 
00291     def __call__(self, out, result):
00292         """Validate the output of the program.
00293 
00294         'stdout' -- A string containing the data written to the standard output
00295         stream.
00296 
00297         'stderr' -- A string containing the data written to the standard error
00298         stream.
00299 
00300         'result' -- A 'Result' object. It may be used to annotate
00301         the outcome according to the content of stderr.
00302 
00303         returns -- A list of strings giving causes of failure."""
00304 
00305         causes = []
00306         # Check to see if theoutput matches.
00307         if not self.__CompareText(out, self.reference):
00308             causes.append(self.cause)
00309             result[self.result_key] = result.Quote(self.reference)
00310 
00311         return causes
00312 
00313     def __CompareText(self, s1, s2):
00314         """Compare 's1' and 's2', ignoring line endings.
00315 
00316         's1' -- A string.
00317 
00318         's2' -- A string.
00319 
00320         returns -- True if 's1' and 's2' are the same, ignoring
00321         differences in line endings."""
00322 
00323         # The "splitlines" method works independently of the line ending
00324         # convention in use.
00325         return s1.splitlines() == s2.splitlines()
00326 
00327 class FilePreprocessor:
00328     """ Base class for a callable that takes a file and returns a modified
00329     version of it."""
00330     def __processLine__(self, line):
00331         return line
00332     def __call__(self, input):
00333         if hasattr(input,"__iter__"):
00334             lines = input
00335             mergeback = False
00336         else:
00337             lines = input.splitlines()
00338             mergeback = True
00339         output = []
00340         for l in lines:
00341             l = self.__processLine__(l)
00342             if l: output.append(l)
00343         if mergeback: output = '\n'.join(output)
00344         return output
00345     def __add__(self, rhs):
00346         return FilePreprocessorSequence([self,rhs])
00347 
00348 class FilePreprocessorSequence(FilePreprocessor):
00349     def __init__(self, members = []):
00350         self.members = members
00351     def __add__(self, rhs):
00352         return FilePreprocessorSequence(self.members + [rhs])
00353     def __call__(self, input):
00354         output = input
00355         for pp in self.members:
00356             output = pp(output)
00357         return output
00358 
00359 class LineSkipper(FilePreprocessor):
00360     def __init__(self, strings = [], regexps = []):
00361         import re
00362         self.strings = strings
00363         self.regexps = map(re.compile,regexps)
00364 
00365     def __processLine__(self, line):
00366         for s in self.strings:
00367             if line.find(s) >= 0: return None
00368         for r in self.regexps:
00369             if r.search(line): return None
00370         return line
00371 
00372 class BlockSkipper(FilePreprocessor):
00373     def __init__(self, start, end):
00374         self.start = start
00375         self.end = end
00376         self._skipping = False
00377 
00378     def __processLine__(self, line):
00379         if self.start in line:
00380             self._skipping = True
00381             return None
00382         elif self.end in line:
00383             self._skipping = False
00384         elif self._skipping:
00385             return None
00386         return line
00387 
00388 class RegexpReplacer(FilePreprocessor):
00389     def __init__(self, orig, repl = "", when = None):
00390         if when:
00391             when = re.compile(when)
00392         self._operations = [ (when, re.compile(orig), repl) ]
00393     def __add__(self,rhs):
00394         if isinstance(rhs, RegexpReplacer):
00395             res = RegexpReplacer("","",None)
00396             res._operations = self._operations + rhs._operations
00397         else:
00398             res = FilePreprocessor.__add__(self, rhs)
00399         return res
00400     def __processLine__(self, line):
00401         for w,o,r in self._operations:
00402             if w is None or w.search(line):
00403                 line = o.sub(r, line)
00404         return line
00405 
00406 # Common preprocessors
00407 maskPointers  = RegexpReplacer("0x[0-9a-fA-F]{4,16}","0x########")
00408 normalizeDate = RegexpReplacer("[0-2]?[0-9]:[0-5][0-9]:[0-5][0-9] [0-9]{4}[-/][01][0-9][-/][0-3][0-9] *(CES?T)?",
00409                                "00:00:00 1970-01-01")
00410 normalizeEOL = FilePreprocessor()
00411 normalizeEOL.__processLine__ = lambda line: str(line).rstrip() + '\n'
00412 
00413 skipEmptyLines = FilePreprocessor()
00414 # FIXME: that's ugly
00415 skipEmptyLines.__processLine__ = lambda line: (line.strip() and line) or None
00416 
00417 ## Special preprocessor sorting the list of strings (whitespace separated)
00418 #  that follow a signature on a single line
00419 class LineSorter(FilePreprocessor):
00420     def __init__(self, signature):
00421         self.signature = signature
00422         self.siglen = len(signature)
00423     def __processLine__(self, line):
00424         pos = line.find(self.signature)
00425         if pos >=0:
00426             line = line[:(pos+self.siglen)]
00427             lst = line[(pos+self.siglen):].split()
00428             lst.sort()
00429             line += " ".join(lst)
00430         return line
00431 
00432 # Preprocessors for GaudiExamples
00433 normalizeExamples = maskPointers + normalizeDate
00434 for w,o,r in [
00435               #("TIMER.TIMER",r"[0-9]", "0"), # Normalize time output
00436               ("TIMER.TIMER",r"\s+[+-]?[0-9]+[0-9.]*", " 0"), # Normalize time output
00437               ("release all pending",r"^.*/([^/]*:.*)",r"\1"),
00438               ("0x########",r"\[.*/([^/]*.*)\]",r"[\1]"),
00439               ("^#.*file",r"file '.*[/\\]([^/\\]*)$",r"file '\1"),
00440               ("^JobOptionsSvc.*options successfully read in from",r"read in from .*[/\\]([^/\\]*)$",r"file \1"), # normalize path to options
00441               # Normalize UUID, except those ending with all 0s (i.e. the class IDs)
00442               (None,r"[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}(?!-0{12})-[0-9A-Fa-f]{12}","00000000-0000-0000-0000-000000000000"),
00443               # Absorb a change in ServiceLocatorHelper
00444               ("ServiceLocatorHelper::", "ServiceLocatorHelper::(create|locate)Service", "ServiceLocatorHelper::service"),
00445               # Remove the leading 0 in Windows' exponential format
00446               (None, r"e([-+])0([0-9][0-9])", r"e\1\2"),
00447               ]: #[ ("TIMER.TIMER","[0-9]+[0-9.]*", "") ]
00448     normalizeExamples += RegexpReplacer(o,r,w)
00449 normalizeExamples = LineSkipper(["//GP:",
00450                                  "Time User",
00451                                  "Welcome to",
00452                                  "This machine has a speed",
00453                                  "TIME:",
00454                                  "running on",
00455                                  "ToolSvc.Sequenc...   INFO",
00456                                  "DataListenerSvc      INFO XML written to file:",
00457                                  "[INFO]","[WARNING]",
00458                                  "DEBUG No writable file catalog found which contains FID:",
00459                                  "0 local", # hack for ErrorLogExample
00460                                  "DEBUG Service base class initialized successfully", # changed between v20 and v21
00461                                  "DEBUG Incident  timing:", # introduced with patch #3487
00462                                  # This comes from ROOT, when using GaudiPython
00463                                  'Note: (file "(tmpfile)", line 2) File "set" already loaded',
00464                                  # The signal handler complains about SIGXCPU not defined on some platforms
00465                                  'SIGXCPU',
00466                                  ],regexps = [
00467                                  r"^#", # Ignore python comments
00468                                  r"(Always|SUCCESS)\s*(Root f|[^ ]* F)ile version:", # skip the message reporting the version of the root file
00469                                  r"0x[0-9a-fA-F#]+ *Algorithm::sysInitialize\(\) *\[", # hack for ErrorLogExample
00470                                  r"0x[0-9a-fA-F#]* *__gxx_personality_v0 *\[", # hack for ErrorLogExample
00471                                  r"File '.*.xml' does not exist",
00472                                  r"INFO Refer to dataset .* by its file ID:",
00473                                  r"INFO Referring to dataset .* by its file ID:",
00474                                  r"INFO Disconnect from dataset",
00475                                  r"INFO Disconnected from dataset",
00476                                  r"INFO Disconnected data IO:",
00477                                  r"IncidentSvc\s*(DEBUG (Adding|Removing)|VERBOSE Calling)",
00478                                  # I want to ignore the header of the unchecked StatusCode report
00479                                  r"^StatusCodeSvc.*listing all unchecked return codes:",
00480                                  r"^StatusCodeSvc\s*INFO\s*$",
00481                                  r"Num\s*\|\s*Function\s*\|\s*Source Library",
00482                                  r"^[-+]*\s*$",
00483                                  # Hide the fake error message coming from POOL/ROOT (ROOT 5.21)
00484                                  r"ERROR Failed to modify file: .* Errno=2 No such file or directory",
00485                                  # Hide unckeched StatusCodes  from dictionaries
00486                                  r"^ +[0-9]+ \|.*ROOT",
00487                                  r"^ +[0-9]+ \|.*\|.*Dict",
00488                                  # Remove ROOT TTree summary table, which changes from one version to the other
00489                                  r"^\*.*\*$",
00490                                  # Remove Histos Summaries
00491                                  r"SUCCESS\s*Booked \d+ Histogram\(s\)",
00492                                  r"^ \|",
00493                                  r"^ ID=",
00494                                  ] ) + normalizeExamples + skipEmptyLines + \
00495                                   normalizeEOL + \
00496                                   LineSorter("Services to release : ")
00497 
00498 class ReferenceFileValidator:
00499     def __init__(self, reffile, cause, result_key, preproc = normalizeExamples):
00500         self.reffile = os.path.expandvars(reffile)
00501         self.cause = cause
00502         self.result_key = result_key
00503         self.preproc = preproc
00504     def __call__(self, stdout, result):
00505         causes = []
00506         if os.path.isfile(self.reffile):
00507             orig = open(self.reffile).xreadlines()
00508             if self.preproc:
00509                 orig = self.preproc(orig)
00510         else:
00511             orig = []
00512 
00513         new = stdout.splitlines()
00514         if self.preproc:
00515             new = self.preproc(new)
00516         #open(self.reffile + ".test","w").writelines(new)
00517         diffs = difflib.ndiff(orig,new,charjunk=difflib.IS_CHARACTER_JUNK)
00518         filterdiffs = map(lambda x: x.strip(),filter(lambda x: x[0] != " ",diffs))
00519         #filterdiffs = [x.strip() for x in diffs]
00520         if filterdiffs:
00521             result[self.result_key] = result.Quote("\n".join(filterdiffs))
00522             result[self.result_key] += result.Quote("""
00523 Legend:
00524         -) reference file
00525         +) standard output of the test""")
00526             causes.append(self.cause)
00527 
00528         return causes
00529 
00530 ########################################################################
00531 # Useful validation functions
00532 ########################################################################
00533 def findReferenceBlock(reference, stdout, result, causes, signature_offset=0, signature=None,
00534                        id = None):
00535     """
00536     Given a block of text, tries to find it in the output.
00537     The block had to be identified by a signature line. By default, the first
00538     line is used as signature, or the line pointed to by signature_offset. If
00539     signature_offset points outside the block, a signature line can be passed as
00540     signature argument. Note: if 'signature' is None (the default), a negative
00541     signature_offset is interpreted as index in a list (e.g. -1 means the last
00542     line), otherwise the it is interpreted as the number of lines before the
00543     first one of the block the signature must appear.
00544     The parameter 'id' allow to distinguish between different calls to this
00545     function in the same validation code.
00546     """
00547     # split reference file, sanitize EOLs and remove empty lines
00548     reflines = filter(None,map(lambda s: s.rstrip(), reference.splitlines()))
00549     if not reflines:
00550         raise RuntimeError("Empty (or null) reference")
00551     # the same on standard output
00552     outlines = filter(None,map(lambda s: s.rstrip(), stdout.splitlines()))
00553 
00554     res_field = "GaudiTest.RefBlock"
00555     if id:
00556         res_field += "_%s" % id
00557 
00558     if signature is None:
00559         if signature_offset < 0:
00560             signature_offset = len(reference)+signature_offset
00561         signature = reflines[signature_offset]
00562     # find the reference block in the output file
00563     try:
00564         pos = outlines.index(signature)
00565         outlines = outlines[pos-signature_offset:pos+len(reflines)-signature_offset]
00566         if reflines != outlines:
00567             msg = "standard output"
00568             # I do not want 2 messages in causes if teh function is called twice
00569             if not msg in causes:
00570                 causes.append(msg)
00571             result[res_field + ".observed"] = result.Quote("\n".join(outlines))
00572     except ValueError:
00573         causes.append("missing signature")
00574     result[res_field + ".signature"] = result.Quote(signature)
00575     if len(reflines) > 1 or signature != reflines[0]:
00576         result[res_field + ".expected"] = result.Quote("\n".join(reflines))
00577 
00578     return causes
00579 
00580 def countErrorLines(expected = {'ERROR':0, 'FATAL':0}, **kwargs):
00581     """
00582     Count the number of messages with required severity (by default ERROR and FATAL)
00583     and check if their numbers match the expected ones (0 by default).
00584     The dictionary "expected" can be used to tune the number of errors and fatals
00585     allowed, or to limit the number of expected warnings etc.
00586     """
00587     stdout = kwargs["stdout"]
00588     result = kwargs["result"]
00589     causes = kwargs["causes"]
00590 
00591     # prepare the dictionary to record the extracted lines
00592     errors = {}
00593     for sev in expected:
00594         errors[sev] = []
00595 
00596     outlines = stdout.splitlines()
00597     from math import log10
00598     fmt = "%%%dd - %%s" % (int(log10(len(outlines))+1))
00599 
00600     linecount = 0
00601     for l in outlines:
00602         linecount += 1
00603         words = l.split()
00604         if len(words) >= 2 and words[1] in errors:
00605             errors[words[1]].append(fmt%(linecount,l.rstrip()))
00606 
00607     for e in errors:
00608         if len(errors[e]) != expected[e]:
00609             causes.append('%s(%d)'%(e,len(errors[e])))
00610             result["GaudiTest.lines.%s"%e] = result.Quote('\n'.join(errors[e]))
00611             result["GaudiTest.lines.%s.expected#"%e] = result.Quote(str(expected[e]))
00612 
00613     return causes
00614 
00615 
00616 def _parseTTreeSummary(lines, pos):
00617     """
00618     Parse the TTree summary table in lines, starting from pos.
00619     Returns a tuple with the dictionary with the digested informations and the
00620     position of the first line after the summary.
00621     """
00622     result = {}
00623     i = pos + 1 # first line is a sequence of '*'
00624     count = len(lines)
00625 
00626     splitcols = lambda l: [ f.strip() for f in l.strip("*\n").split(':',2) ]
00627     def parseblock(ll):
00628         r = {}
00629         cols = splitcols(ll[0])
00630         r["Name"], r["Title"] = cols[1:]
00631 
00632         cols = splitcols(ll[1])
00633         r["Entries"] = int(cols[1])
00634 
00635         sizes = cols[2].split()
00636         r["Total size"] = int(sizes[2])
00637         if sizes[-1] == "memory":
00638             r["File size"] = 0
00639         else:
00640             r["File size"] = int(sizes[-1])
00641 
00642         cols = splitcols(ll[2])
00643         sizes = cols[2].split()
00644         if cols[0] == "Baskets":
00645             r["Baskets"] = int(cols[1])
00646             r["Basket size"] = int(sizes[2])
00647         r["Compression"] = float(sizes[-1])
00648         return r
00649 
00650     if i < (count - 3) and lines[i].startswith("*Tree"):
00651         result = parseblock(lines[i:i+3])
00652         result["Branches"] = {}
00653         i += 4
00654         while i < (count - 3) and lines[i].startswith("*Br"):
00655             branch = parseblock(lines[i:i+3])
00656             result["Branches"][branch["Name"]] = branch
00657             i += 4
00658 
00659     return (result, i)
00660 
00661 def findTTreeSummaries(stdout):
00662     """
00663     Scan stdout to find ROOT TTree summaries and digest them.
00664     """
00665     stars = re.compile(r"^\*+$")
00666     outlines = stdout.splitlines()
00667     nlines = len(outlines)
00668     trees = {}
00669 
00670     i = 0
00671     while i < nlines: #loop over the output
00672         # look for
00673         while i < nlines and not stars.match(outlines[i]):
00674             i += 1
00675         if i < nlines:
00676             tree, i = _parseTTreeSummary(outlines, i)
00677             if tree:
00678                 trees[tree["Name"]] = tree
00679 
00680     return trees
00681 
00682 def cmpTreesDicts(reference, to_check, ignore = None):
00683     """
00684     Check that all the keys in reference are in to_check too, with the same value.
00685     If the value is a dict, the function is called recursively. to_check can
00686     contain more keys than reference, that will not be tested.
00687     The function returns at the first difference found.
00688     """
00689     fail_keys = []
00690     # filter the keys in the reference dictionary
00691     if ignore:
00692         ignore_re = re.compile(ignore)
00693         keys = [ key for key in reference if not ignore_re.match(key) ]
00694     else:
00695         keys = reference.keys()
00696     # loop over the keys (not ignored) in the reference dictionary
00697     for k in keys:
00698         if k in to_check: # the key must be in the dictionary to_check
00699             if (type(reference[k]) is dict) and (type(to_check[k]) is dict):
00700                 # if both reference and to_check values are dictionaries, recurse
00701                 failed = fail_keys = cmpTreesDicts(reference[k], to_check[k], ignore)
00702             else:
00703                 # compare the two values
00704                 failed = to_check[k] != reference[k]
00705         else: # handle missing keys in the dictionary to check (i.e. failure)
00706             to_check[k] = None
00707             failed = True
00708         if failed:
00709             fail_keys.insert(0, k)
00710             break # exit from the loop at the first failure
00711     return fail_keys # return the list of keys bringing to the different values
00712 
00713 def getCmpFailingValues(reference, to_check, fail_path):
00714     c = to_check
00715     r = reference
00716     for k in fail_path:
00717         c = c.get(k,None)
00718         r = r.get(k,None)
00719         if c is None or r is None:
00720             break # one of the dictionaries is not deep enough
00721     return (fail_path, r, c)
00722 
00723 # signature of the print-out of the histograms
00724 h_count_re = re.compile(r"^(.*)SUCCESS\s+Booked (\d+) Histogram\(s\) :\s+(.*)")
00725 
00726 def parseHistosSummary(lines, pos):
00727     """
00728     Extract the histograms infos from the lines starting at pos.
00729     Returns the position of the first line after the summary block.
00730     """
00731     global h_count_re
00732     h_table_head = re.compile(r'SUCCESS\s+List of booked (1D|2D|3D|1D profile|2D profile) histograms in directory\s+"(\w*)"')
00733     h_short_summ = re.compile(r"ID=([^\"]+)\s+\"([^\"]+)\"\s+(.*)")
00734 
00735     nlines = len(lines)
00736 
00737     # decode header
00738     m = h_count_re.search(lines[pos])
00739     name = m.group(1).strip()
00740     total = int(m.group(2))
00741     header = {}
00742     for k, v in [ x.split("=") for x in  m.group(3).split() ]:
00743         header[k] = int(v)
00744     pos += 1
00745     header["Total"] = total
00746 
00747     summ = {}
00748     while pos < nlines:
00749         m = h_table_head.search(lines[pos])
00750         if m:
00751             t, d = m.groups(1) # type and directory
00752             t = t.replace(" profile", "Prof")
00753             pos += 1
00754             if pos < nlines:
00755                 l = lines[pos]
00756             else:
00757                 l = ""
00758             cont = {}
00759             if l.startswith(" | ID"):
00760                 # table format
00761                 titles = [ x.strip() for x in l.split("|")][1:]
00762                 pos += 1
00763                 while pos < nlines and lines[pos].startswith(" |"):
00764                     l = lines[pos]
00765                     values = [ x.strip() for x in l.split("|")][1:]
00766                     hcont = {}
00767                     for i in range(len(titles)):
00768                         hcont[titles[i]] = values[i]
00769                     cont[hcont["ID"]] = hcont
00770                     pos += 1
00771             elif l.startswith(" ID="):
00772                 while pos < nlines and lines[pos].startswith(" ID="):
00773                     values = [ x.strip() for x in  h_short_summ.search(lines[pos]).groups() ]
00774                     cont[values[0]] = values
00775                     pos += 1
00776             else: # not interpreted
00777                 raise RuntimeError("Cannot understand line %d: '%s'" % (pos, l))
00778             if not d in summ:
00779                 summ[d] = {}
00780             summ[d][t] = cont
00781             summ[d]["header"] = header
00782         else:
00783             break
00784     if not summ:
00785         # If the full table is not present, we use only the header
00786         summ[name] = {"header": header}
00787     return summ, pos
00788 
00789 def findHistosSummaries(stdout):
00790     """
00791     Scan stdout to find ROOT TTree summaries and digest them.
00792     """
00793     outlines = stdout.splitlines()
00794     nlines = len(outlines) - 1
00795     summaries = {}
00796     global h_count_re
00797 
00798     pos = 0
00799     while pos < nlines:
00800         summ = {}
00801         # find first line of block:
00802         match = h_count_re.search(outlines[pos])
00803         while pos < nlines and not match:
00804             pos += 1
00805             match = h_count_re.search(outlines[pos])
00806         if match:
00807             summ, pos = parseHistosSummary(outlines, pos)
00808         summaries.update(summ)
00809     return summaries
00810 
00811 class GaudiFilterExecutable(qm.executable.Filter):
00812     def __init__(self, input, timeout = -1):
00813         """Create a new 'Filter'.
00814 
00815         'input' -- The string containing the input to provide to the
00816         child process.
00817 
00818         'timeout' -- As for 'TimeoutExecutable.__init__'."""
00819 
00820         super(GaudiFilterExecutable, self).__init__(input, timeout)
00821         self.__input = input
00822         self.__timeout = timeout
00823         self.stack_trace_file = None
00824         # Temporary file to pass the stack trace from one process to the other
00825         # The file must be closed and reopened when needed to avoid conflicts
00826         # between the processes
00827         tmpf = tempfile.mkstemp()
00828         os.close(tmpf[0])
00829         self.stack_trace_file = tmpf[1] # remember only the name
00830 
00831     def __UseSeparateProcessGroupForChild(self):
00832         """Copied from TimeoutExecutable to allow the re-implementation of
00833            _HandleChild.
00834         """
00835         if sys.platform == "win32":
00836             # In Windows 2000 (or later), we should use "jobs" by
00837             # analogy with UNIX process groups.  However, that
00838             # functionality is not (yet) provided by the Python Win32
00839             # extensions.
00840             return 0
00841 
00842         return self.__timeout >= 0 or self.__timeout == -2
00843     ##
00844     # Needs to replace the ones from RedirectedExecutable and TimeoutExecutable
00845     def _HandleChild(self):
00846         """Code copied from both FilterExecutable and TimeoutExecutable.
00847         """
00848         # Close the pipe ends that we do not need.
00849         if self._stdin_pipe:
00850             self._ClosePipeEnd(self._stdin_pipe[0])
00851         if self._stdout_pipe:
00852             self._ClosePipeEnd(self._stdout_pipe[1])
00853         if self._stderr_pipe:
00854             self._ClosePipeEnd(self._stderr_pipe[1])
00855 
00856         # The pipes created by 'RedirectedExecutable' must be closed
00857         # before the monitor process (created by 'TimeoutExecutable')
00858         # is created.  Otherwise, if the child process dies, 'select'
00859         # in the parent will not return if the monitor process may
00860         # still have one of the file descriptors open.
00861 
00862         super(qm.executable.TimeoutExecutable, self)._HandleChild()
00863 
00864         if self.__UseSeparateProcessGroupForChild():
00865             # Put the child into its own process group.  This step is
00866             # performed in both the parent and the child; therefore both
00867             # processes can safely assume that the creation of the process
00868             # group has taken place.
00869             child_pid = self._GetChildPID()
00870             try:
00871                 os.setpgid(child_pid, child_pid)
00872             except:
00873                 # The call to setpgid may fail if the child has exited,
00874                 # or has already called 'exec'.  In that case, we are
00875                 # guaranteed that the child has already put itself in the
00876                 # desired process group.
00877                 pass
00878             # Create the monitoring process.
00879             #
00880             # If the monitoring process is in parent's process group and
00881             # kills the child after waitpid has returned in the parent, we
00882             # may end up trying to kill a process group other than the one
00883             # that we intend to kill.  Therefore, we put the monitoring
00884             # process in the same process group as the child; that ensures
00885             # that the process group will persist until the monitoring
00886             # process kills it.
00887             self.__monitor_pid = os.fork()
00888             if self.__monitor_pid != 0:
00889                 # Make sure that the monitoring process is placed into the
00890                 # child's process group before the parent process calls
00891                 # 'waitpid'.  In this way, we are guaranteed that the process
00892                 # group as the child
00893                 os.setpgid(self.__monitor_pid, child_pid)
00894             else:
00895                 # Put the monitoring process into the child's process
00896                 # group.  We know the process group still exists at
00897                 # this point because either (a) we are in the process
00898                 # group, or (b) the parent has not yet called waitpid.
00899                 os.setpgid(0, child_pid)
00900 
00901                 # Close all open file descriptors.  They are not needed
00902                 # in the monitor process.  Furthermore, when the parent
00903                 # closes the write end of the stdin pipe to the child,
00904                 # we do not want the pipe to remain open; leaving the
00905                 # pipe open in the monitor process might cause the child
00906                 # to block waiting for additional input.
00907                 try:
00908                     max_fds = os.sysconf("SC_OPEN_MAX")
00909                 except:
00910                     max_fds = 256
00911                 for fd in xrange(max_fds):
00912                     try:
00913                         os.close(fd)
00914                     except:
00915                         pass
00916                 try:
00917                     if self.__timeout >= 0:
00918                         # Give the child time to run.
00919                         time.sleep (self.__timeout)
00920                         #######################################################
00921                         ### This is the interesting part: dump the stack trace to a file
00922                         if sys.platform == "linux2": # we should be have /proc and gdb
00923                             cmd = ["gdb",
00924                                    os.path.join("/proc", str(child_pid), "exe"),
00925                                    str(child_pid),
00926                                    "-batch", "-n", "-x",
00927                                    "'%s'" % os.path.join(os.path.dirname(__file__), "stack-trace.gdb")]
00928                             # FIXME: I wanted to use subprocess.Popen, but it doesn't want to work
00929                             #        in this context.
00930                             o = os.popen(" ".join(cmd)).read()
00931                             open(self.stack_trace_file,"w").write(o)
00932                         #######################################################
00933 
00934                         # Kill all processes in the child process group.
00935                         os.kill(0, signal.SIGKILL)
00936                     else:
00937                         # This call to select will never terminate.
00938                         select.select ([], [], [])
00939                 finally:
00940                     # Exit.  This code is in a finally clause so that
00941                     # we are guaranteed to get here no matter what.
00942                     os._exit(0)
00943         elif self.__timeout >= 0 and sys.platform == "win32":
00944             # Create a monitoring thread.
00945             self.__monitor_thread = Thread(target = self.__Monitor)
00946             self.__monitor_thread.start()
00947 
00948     if sys.platform == "win32":
00949 
00950         def __Monitor(self):
00951             """Code copied from FilterExecutable.
00952             Kill the child if the timeout expires.
00953 
00954             This function is run in the monitoring thread."""
00955 
00956             # The timeout may be expressed as a floating-point value
00957             # on UNIX, but it must be an integer number of
00958             # milliseconds when passed to WaitForSingleObject.
00959             timeout = int(self.__timeout * 1000)
00960             # Wait for the child process to terminate or for the
00961             # timer to expire.
00962             result = win32event.WaitForSingleObject(self._GetChildPID(),
00963                                                     timeout)
00964             # If the timeout occurred, kill the child process.
00965             if result == win32con.WAIT_TIMEOUT:
00966                 self.Kill()
00967 
00968 ########################################################################
00969 # Test Classes
00970 ########################################################################
00971 class GaudiExeTest(ExecTestBase):
00972     """Standard Gaudi test.
00973     """
00974     arguments = [
00975         qm.fields.TextField(
00976             name="program",
00977             title="Program",
00978             not_empty_text=1,
00979             description="""The path to the program.
00980 
00981             This field indicates the path to the program.  If it is not
00982             an absolute path, the value of the 'PATH' environment
00983             variable will be used to search for the program.
00984             If not specified, $GAUDIEXE or Gaudi.exe are used.
00985             """
00986             ),
00987         qm.fields.SetField(qm.fields.TextField(
00988             name="args",
00989             title="Argument List",
00990             description="""The command-line arguments.
00991 
00992             If this field is left blank, the program is run without any
00993             arguments.
00994 
00995             Use this field to specify the option files.
00996 
00997             An implicit 0th argument (the path to the program) is added
00998             automatically."""
00999             )),
01000         qm.fields.TextField(
01001             name="options",
01002             title="Options",
01003             description="""Options to be passed to the application.
01004 
01005             This field allows to pass a list of options to the main program
01006             without the need of a separate option file.
01007 
01008             The content of the field is written to a temporary file which name
01009             is passed the the application as last argument (appended to the
01010             field "Argument List".
01011             """,
01012             verbatim="true",
01013             multiline="true",
01014             default_value=""
01015             ),
01016         qm.fields.TextField(
01017             name="workdir",
01018             title="Working Directory",
01019             description="""Path to the working directory.
01020 
01021             If this field is left blank, the program will be run from the qmtest
01022             directory, otherwise from the directory specified.""",
01023             default_value=""
01024             ),
01025         qm.fields.TextField(
01026             name="reference",
01027             title="Reference Output",
01028             description="""Path to the file containing the reference output.
01029 
01030             If this field is left blank, any standard output will be considered
01031             valid.
01032 
01033             If the reference file is specified, any output on standard error is
01034             ignored."""
01035             ),
01036         qm.fields.TextField(
01037             name="error_reference",
01038             title="Reference for standard error",
01039             description="""Path to the file containing the reference for the standard error.
01040 
01041             If this field is left blank, any standard output will be considered
01042             valid.
01043 
01044             If the reference file is specified, any output on standard error is
01045             ignored."""
01046             ),
01047         qm.fields.SetField(qm.fields.TextField(
01048             name = "unsupported_platforms",
01049             title = "Unsupported Platforms",
01050             description = """Platform on which the test must not be run.
01051 
01052             List of regular expressions identifying the platforms on which the
01053             test is not run and the result is set to UNTESTED."""
01054             )),
01055 
01056         qm.fields.TextField(
01057             name = "validator",
01058             title = "Validator",
01059             description = """Function to validate the output of the test.
01060 
01061             If defined, the function is used to validate the products of the
01062             test.
01063             The function is called passing as arguments:
01064               self:   the test class instance
01065               stdout: the standard output of the executed test
01066               stderr: the standard error of the executed test
01067               result: the Result objects to fill with messages
01068             The function must return a list of causes for the failure.
01069             If specified, overrides standard output, standard error and
01070             reference files.
01071             """,
01072             verbatim="true",
01073             multiline="true",
01074             default_value=""
01075             ),
01076 
01077         qm.fields.BooleanField(
01078             name = "use_temp_dir",
01079             title = "Use temporary directory",
01080             description = """Use temporary directory.
01081 
01082             If set to true, use a temporary directory as working directory.
01083             """,
01084             default_value="false"
01085             ),
01086         ]
01087 
01088     def PlatformIsNotSupported(self, context, result):
01089         platform = self.GetPlatform()
01090         unsupported = [ re.compile(x)
01091                         for x in [ str(y).strip()
01092                                    for y in self.unsupported_platforms ]
01093                         if x
01094                        ]
01095         for p_re in unsupported:
01096             if p_re.search(platform):
01097                 result.SetOutcome(result.UNTESTED)
01098                 result[result.CAUSE] = 'Platform not supported.'
01099                 return True
01100         return False
01101 
01102     def GetPlatform(self):
01103         """
01104         Return the platform Id defined in CMTCONFIG or SCRAM_ARCH.
01105         """
01106         arch = "None"
01107         # check architecture name
01108         if "CMTCONFIG" in os.environ:
01109             arch = os.environ["CMTCONFIG"]
01110         elif "SCRAM_ARCH" in os.environ:
01111             arch = os.environ["SCRAM_ARCH"]
01112         return arch
01113 
01114     def _expandReferenceFileName(self, reffile):
01115         # if no file is passed, do nothing
01116         if not reffile:
01117             return ""
01118 
01119         reference = os.path.normpath(os.path.expandvars(reffile))
01120         # old-style platform-specific reference name
01121         spec_ref = reference[:-3] + self.GetPlatform()[0:3] + reference[-3:]
01122         if os.path.isfile(spec_ref):
01123             reference = spec_ref
01124         else: # look for new-style platform specific reference files:
01125             # get all the files whose name start with the reference filename
01126             dirname, basename = os.path.split(reference)
01127             if not dirname: dirname = '.'
01128             head = basename + "."
01129             head_len = len(head)
01130             platform = self.GetPlatform()
01131             candidates = []
01132             for f in os.listdir(dirname):
01133                 if f.startswith(head) and platform.startswith(f[head_len:]):
01134                     candidates.append( (len(f) - head_len, f) )
01135             if candidates: # take the one with highest matching
01136                 candidates.sort()
01137                 reference = os.path.join(dirname, candidates[-1][1])
01138         return reference
01139 
01140     def CheckTTreesSummaries(self, stdout, result, causes,
01141                              trees_dict = None,
01142                              ignore = r"Basket|.*size|Compression"):
01143         """
01144         Compare the TTree summaries in stdout with the ones in trees_dict or in
01145         the reference file. By default ignore the size, compression and basket
01146         fields.
01147         The presence of TTree summaries when none is expected is not a failure.
01148         """
01149         if trees_dict is None:
01150             reference = self._expandReferenceFileName(self.reference)
01151             # call the validator if the file exists
01152             if reference and os.path.isfile(reference):
01153                 trees_dict = findTTreeSummaries(open(reference).read())
01154             else:
01155                 trees_dict = {}
01156 
01157         from pprint import PrettyPrinter
01158         pp = PrettyPrinter()
01159         if trees_dict:
01160             result["GaudiTest.TTrees.expected"] = result.Quote(pp.pformat(trees_dict))
01161             if ignore:
01162                 result["GaudiTest.TTrees.ignore"] = result.Quote(ignore)
01163 
01164         trees = findTTreeSummaries(stdout)
01165         failed = cmpTreesDicts(trees_dict, trees, ignore)
01166         if failed:
01167             causes.append("trees summaries")
01168             msg = "%s: %s != %s" % getCmpFailingValues(trees_dict, trees, failed)
01169             result["GaudiTest.TTrees.failure_on"] = result.Quote(msg)
01170             result["GaudiTest.TTrees.found"] = result.Quote(pp.pformat(trees))
01171 
01172         return causes
01173 
01174     def CheckHistosSummaries(self, stdout, result, causes,
01175                              dict = None,
01176                              ignore = None):
01177         """
01178         Compare the TTree summaries in stdout with the ones in trees_dict or in
01179         the reference file. By default ignore the size, compression and basket
01180         fields.
01181         The presence of TTree summaries when none is expected is not a failure.
01182         """
01183         if dict is None:
01184             reference = self._expandReferenceFileName(self.reference)
01185             # call the validator if the file exists
01186             if reference and os.path.isfile(reference):
01187                 dict = findHistosSummaries(open(reference).read())
01188             else:
01189                 dict = {}
01190 
01191         from pprint import PrettyPrinter
01192         pp = PrettyPrinter()
01193         if dict:
01194             result["GaudiTest.Histos.expected"] = result.Quote(pp.pformat(dict))
01195             if ignore:
01196                 result["GaudiTest.Histos.ignore"] = result.Quote(ignore)
01197 
01198         histos = findHistosSummaries(stdout)
01199         failed = cmpTreesDicts(dict, histos, ignore)
01200         if failed:
01201             causes.append("histos summaries")
01202             msg = "%s: %s != %s" % getCmpFailingValues(dict, histos, failed)
01203             result["GaudiTest.Histos.failure_on"] = result.Quote(msg)
01204             result["GaudiTest.Histos.found"] = result.Quote(pp.pformat(histos))
01205 
01206         return causes
01207 
01208     def ValidateWithReference(self, stdout, stderr, result, causes, preproc = None):
01209         """
01210         Default validation action: compare standard output and error to the
01211         reference files.
01212         """
01213         # set the default output preprocessor
01214         if preproc is None:
01215             preproc = normalizeExamples
01216         # check standard output
01217         reference = self._expandReferenceFileName(self.reference)
01218         # call the validator if the file exists
01219         if reference and os.path.isfile(reference):
01220             result["GaudiTest.output_reference"] = reference
01221             causes += ReferenceFileValidator(reference,
01222                                              "standard output",
01223                                              "GaudiTest.output_diff",
01224                                              preproc = preproc)(stdout, result)
01225 
01226         # Compare TTree summaries
01227         causes = self.CheckTTreesSummaries(stdout, result, causes)
01228         causes = self.CheckHistosSummaries(stdout, result, causes)
01229 
01230         if causes: # Write a new reference file for stdout
01231             try:
01232                 newref = open(reference + ".new","w")
01233                 # sanitize newlines
01234                 for l in stdout.splitlines():
01235                     newref.write(l.rstrip() + '\n')
01236                 del newref # flush and close
01237             except IOError:
01238                 # Ignore IO errors when trying to update reference files
01239                 # because we may be in a read-only filesystem
01240                 pass
01241 
01242         # check standard error
01243         reference = self._expandReferenceFileName(self.error_reference)
01244         # call the validator if we have a file to use
01245         if reference and os.path.isfile(reference):
01246             result["GaudiTest.error_reference"] = reference
01247             newcauses = ReferenceFileValidator(reference,
01248                                                "standard error",
01249                                                "GaudiTest.error_diff",
01250                                                preproc = preproc)(stderr, result)
01251             causes += newcauses
01252             if newcauses: # Write a new reference file for stdedd
01253                 newref = open(reference + ".new","w")
01254                 # sanitize newlines
01255                 for l in stderr.splitlines():
01256                     newref.write(l.rstrip() + '\n')
01257                 del newref # flush and close
01258         else:
01259             causes += BasicOutputValidator(self.stderr,
01260                                            "standard error",
01261                                            "ExecTest.expected_stderr")(stderr, result)
01262 
01263         return causes
01264 
01265     def ValidateOutput(self, stdout, stderr, result):
01266         causes = []
01267         # if the test definition contains a custom validator, use it
01268         if self.validator.strip() != "":
01269             class CallWrapper(object):
01270                 """
01271                 Small wrapper class to dynamically bind some default arguments
01272                 to a callable.
01273                 """
01274                 def __init__(self, callable, extra_args = {}):
01275                     self.callable = callable
01276                     self.extra_args = extra_args
01277                     # get the list of names of positional arguments
01278                     from inspect import getargspec
01279                     self.args_order = getargspec(callable)[0]
01280                     # Remove "self" from the list of positional arguments
01281                     # since it is added automatically
01282                     if self.args_order[0] == "self":
01283                         del self.args_order[0]
01284                 def __call__(self, *args, **kwargs):
01285                     # Check which positional arguments are used
01286                     positional = self.args_order[:len(args)]
01287 
01288                     kwargs = dict(kwargs) # copy the arguments dictionary
01289                     for a in self.extra_args:
01290                         # use "extra_args" for the arguments not specified as
01291                         # positional or keyword
01292                         if a not in positional and a not in kwargs:
01293                             kwargs[a] = self.extra_args[a]
01294                     return apply(self.callable, args, kwargs)
01295             # local names to be exposed in the script
01296             exported_symbols = {"self":self,
01297                                 "stdout":stdout,
01298                                 "stderr":stderr,
01299                                 "result":result,
01300                                 "causes":causes,
01301                                 "findReferenceBlock":
01302                                     CallWrapper(findReferenceBlock, {"stdout":stdout,
01303                                                                      "result":result,
01304                                                                      "causes":causes}),
01305                                 "validateWithReference":
01306                                     CallWrapper(self.ValidateWithReference, {"stdout":stdout,
01307                                                                              "stderr":stderr,
01308                                                                              "result":result,
01309                                                                              "causes":causes}),
01310                                 "countErrorLines":
01311                                     CallWrapper(countErrorLines, {"stdout":stdout,
01312                                                                   "result":result,
01313                                                                   "causes":causes}),
01314                                 "checkTTreesSummaries":
01315                                     CallWrapper(self.CheckTTreesSummaries, {"stdout":stdout,
01316                                                                             "result":result,
01317                                                                             "causes":causes}),
01318                                 "checkHistosSummaries":
01319                                     CallWrapper(self.CheckHistosSummaries, {"stdout":stdout,
01320                                                                             "result":result,
01321                                                                             "causes":causes}),
01322 
01323                                 }
01324             exec self.validator in globals(), exported_symbols
01325         else:
01326             self.ValidateWithReference(stdout, stderr, result, causes)
01327 
01328         return causes
01329 
01330     def DumpEnvironment(self, result):
01331         """
01332         Add the content of the environment to the result object.
01333 
01334         Copied from the QMTest class of COOL.
01335         """
01336         vars = os.environ.keys()
01337         vars.sort()
01338         result['GaudiTest.environment'] = \
01339             result.Quote('\n'.join(["%s=%s"%(v,os.environ[v]) for v in vars]))
01340 
01341     def _find_program(self,prog):
01342         # check if it is an absolute path or the file can be found
01343         # from the local directory, otherwise search for it in PATH
01344         if not os.path.isabs(prog) and not os.path.isfile(prog):
01345             for d in os.environ["PATH"].split(os.pathsep):
01346                 p = os.path.join(d,prog)
01347                 if os.path.isfile(p):
01348                     return p
01349         return prog
01350 
01351     def Run(self, context, result):
01352         """Run the test.
01353 
01354         'context' -- A 'Context' giving run-time parameters to the
01355         test.
01356 
01357         'result' -- A 'Result' object.  The outcome will be
01358         'Result.PASS' when this method is called.  The 'result' may be
01359         modified by this method to indicate outcomes other than
01360         'Result.PASS' or to add annotations."""
01361 
01362         # Check if the platform is supported
01363         if self.PlatformIsNotSupported(context, result):
01364             return
01365 
01366         # Prepare program name and arguments (expanding variables, and converting to absolute)
01367         if self.program:
01368             prog = rationalizepath(self.program)
01369         elif "GAUDIEXE" in os.environ:
01370             prog = os.environ["GAUDIEXE"]
01371         else:
01372             prog = "Gaudi.exe"
01373         self.program = prog
01374 
01375         dummy, prog_ext = os.path.splitext(prog)
01376         if prog_ext not in [ ".exe", ".py", ".bat" ] and self.GetPlatform()[0:3] == "win":
01377             prog += ".exe"
01378             prog_ext = ".exe"
01379 
01380         prog = self._find_program(prog)
01381 
01382         # Convert paths to absolute paths in arguments and reference files
01383         args = map(rationalizepath, self.args)
01384         self.reference = rationalizepath(self.reference)
01385         self.error_reference = rationalizepath(self.error_reference)
01386 
01387 
01388         # check if the user provided inline options
01389         tmpfile = None
01390         if self.options.strip():
01391             ext = ".opts"
01392             if re.search(r"from\s*Gaudi.Configuration\s*import\s*\*", self.options):
01393                 ext = ".py"
01394             tmpfile = TempFile(ext)
01395             tmpfile.writelines("\n".join(self.options.splitlines()))
01396             tmpfile.flush()
01397             args.append(tmpfile.name)
01398             result["GaudiTest.options"] = result.Quote(self.options)
01399 
01400         # if the program is a python file, execute it through python
01401         if prog_ext == ".py":
01402             args.insert(0,prog)
01403             if self.GetPlatform()[0:3] == "win":
01404                 prog = self._find_program("python.exe")
01405             else:
01406                 prog = self._find_program("python")
01407 
01408         # Change to the working directory if specified or to the default temporary
01409         origdir = os.getcwd()
01410         if self.workdir:
01411             os.chdir(str(os.path.normpath(os.path.expandvars(self.workdir))))
01412         elif self.use_temp_dir == "true":
01413             if "QMTEST_TMPDIR" in os.environ:
01414                 os.chdir(os.environ["QMTEST_TMPDIR"])
01415             elif "qmtest.tmpdir" in context:
01416                 os.chdir(context["qmtest.tmpdir"])
01417 
01418         if "QMTEST_IGNORE_TIMEOUT" not in os.environ:
01419             self.timeout = max(self.timeout,600)
01420         else:
01421             self.timeout = -1
01422 
01423         try:
01424             # Generate eclipse.org debug launcher for the test
01425             self._CreateEclipseLaunch(prog, args, destdir = origdir)
01426             # Run the test
01427             self.RunProgram(prog,
01428                             [ prog ] + args,
01429                             context, result)
01430             # Record the content of the enfironment for failing tests
01431             if result.GetOutcome() not in [ result.PASS ]:
01432                 self.DumpEnvironment(result)
01433         finally:
01434             # revert to the original directory
01435             os.chdir(origdir)
01436 
01437     def RunProgram(self, program, arguments, context, result):
01438         """Run the 'program'.
01439 
01440         'program' -- The path to the program to run.
01441 
01442         'arguments' -- A list of the arguments to the program.  This
01443         list must contain a first argument corresponding to 'argv[0]'.
01444 
01445         'context' -- A 'Context' giving run-time parameters to the
01446         test.
01447 
01448         'result' -- A 'Result' object.  The outcome will be
01449         'Result.PASS' when this method is called.  The 'result' may be
01450         modified by this method to indicate outcomes other than
01451         'Result.PASS' or to add annotations.
01452 
01453         @attention: This method has been copied from command.ExecTestBase
01454                     (QMTest 2.3.0) and modified to keep stdout and stderr
01455                     for tests that have been terminated by a signal.
01456                     (Fundamental for debugging in the Application Area)
01457         """
01458 
01459         # Construct the environment.
01460         environment = self.MakeEnvironment(context)
01461         # Create the executable.
01462         if self.timeout >= 0:
01463             timeout = self.timeout
01464         else:
01465             # If no timeout was specified, we sill run this process in a
01466             # separate process group and kill the entire process group
01467             # when the child is done executing.  That means that
01468             # orphaned child processes created by the test will be
01469             # cleaned up.
01470             timeout = -2
01471         e = GaudiFilterExecutable(self.stdin, timeout)
01472         # Run it.
01473         exit_status = e.Run(arguments, environment, path = program)
01474         # Get the stack trace from the temporary file (if present)
01475         if e.stack_trace_file and os.path.exists(e.stack_trace_file):
01476             stack_trace = open(e.stack_trace_file).read()
01477             os.remove(e.stack_trace_file)
01478         else:
01479             stack_trace = None
01480         if stack_trace:
01481             result["ExecTest.stack_trace"] = result.Quote(stack_trace)
01482 
01483         # If the process terminated normally, check the outputs.
01484         if sys.platform == "win32" or os.WIFEXITED(exit_status):
01485             # There are no causes of failure yet.
01486             causes = []
01487             # The target program terminated normally.  Extract the
01488             # exit code, if this test checks it.
01489             if self.exit_code is None:
01490                 exit_code = None
01491             elif sys.platform == "win32":
01492                 exit_code = exit_status
01493             else:
01494                 exit_code = os.WEXITSTATUS(exit_status)
01495             # Get the output generated by the program.
01496             stdout = e.stdout
01497             stderr = e.stderr
01498             # Record the results.
01499             result["ExecTest.exit_code"] = str(exit_code)
01500             result["ExecTest.stdout"] = result.Quote(stdout)
01501             result["ExecTest.stderr"] = result.Quote(stderr)
01502             # Check to see if the exit code matches.
01503             if exit_code != self.exit_code:
01504                 causes.append("exit_code")
01505                 result["ExecTest.expected_exit_code"] \
01506                     = str(self.exit_code)
01507             # Validate the output.
01508             causes += self.ValidateOutput(stdout, stderr, result)
01509             # If anything went wrong, the test failed.
01510             if causes:
01511                 result.Fail("Unexpected %s." % string.join(causes, ", "))
01512         elif os.WIFSIGNALED(exit_status):
01513             # The target program terminated with a signal.  Construe
01514             # that as a test failure.
01515             signal_number = str(os.WTERMSIG(exit_status))
01516             if not stack_trace:
01517                 result.Fail("Program terminated by signal.")
01518             else:
01519                 # The presence of stack_trace means tha we stopped the job because
01520                 # of a time-out
01521                 result.Fail("Exceeded time limit (%ds), terminated." % timeout)
01522             result["ExecTest.signal_number"] = signal_number
01523             result["ExecTest.stdout"] = result.Quote(e.stdout)
01524             result["ExecTest.stderr"] = result.Quote(e.stderr)
01525         elif os.WIFSTOPPED(exit_status):
01526             # The target program was stopped.  Construe that as a
01527             # test failure.
01528             signal_number = str(os.WSTOPSIG(exit_status))
01529             if not stack_trace:
01530                 result.Fail("Program stopped by signal.")
01531             else:
01532                 # The presence of stack_trace means tha we stopped the job because
01533                 # of a time-out
01534                 result.Fail("Exceeded time limit (%ds), stopped." % timeout)
01535             result["ExecTest.signal_number"] = signal_number
01536             result["ExecTest.stdout"] = result.Quote(e.stdout)
01537             result["ExecTest.stderr"] = result.Quote(e.stderr)
01538         else:
01539             # The target program terminated abnormally in some other
01540             # manner.  (This shouldn't normally happen...)
01541             result.Fail("Program did not terminate normally.")
01542 
01543         # Marco Cl.: This is a special trick to fix a "problem" with the output
01544         # of gaudi jobs when they use colors
01545         esc = '\x1b'
01546         repr_esc = '\\x1b'
01547         result["ExecTest.stdout"] = result["ExecTest.stdout"].replace(esc,repr_esc)
01548         # TODO: (MCl) improve the hack for colors in standard output
01549         #             may be converting them to HTML tags
01550 
01551     def _CreateEclipseLaunch(self, prog, args, destdir = None):
01552         # Find the project name used in ecplise.
01553         # The name is in a file called ".project" in one of the parent directories
01554         projbasedir = os.path.normpath(destdir)
01555         while not os.path.exists(os.path.join(projbasedir, ".project")):
01556             oldprojdir = projbasedir
01557             projbasedir = os.path.normpath(os.path.join(projbasedir, os.pardir))
01558             # FIXME: the root level is invariant when trying to go up one level,
01559             #        but it must be cheched on windows
01560             if oldprojdir == projbasedir:
01561                 # If we cannot find a .project, so no point in creating a .launch file
01562                 return
01563         # Use ElementTree to parse the XML file
01564         from xml.etree import ElementTree as ET
01565         t = ET.parse(os.path.join(projbasedir, ".project"))
01566         projectName = t.find("name").text
01567 
01568         # prepare the name/path of the generated file
01569         destfile = "%s.launch" % self._Runnable__id
01570         if destdir:
01571             destfile = os.path.join(destdir, destfile)
01572 
01573         if self.options.strip():
01574             # this means we have some custom options in the qmt file, so we have
01575             # to copy them from the temporary file at the end of the arguments
01576             # in another file
01577             tempfile = args.pop()
01578             optsfile = destfile + os.path.splitext(tempfile)[1]
01579             shutil.copyfile(tempfile, optsfile)
01580             args.append(optsfile)
01581 
01582         # prepare the data to insert in the XML file
01583         from xml.sax.saxutils import quoteattr # useful to quote XML special chars
01584         data = {}
01585         # Note: the "quoteattr(k)" is not needed because special chars cannot be part of a variable name,
01586         # but it doesn't harm.
01587         data["environment"] = "\n".join(['<mapEntry key=%s value=%s/>' % (quoteattr(k), quoteattr(v))
01588                                          for k, v in os.environ.iteritems()])
01589 
01590         data["exec"] = which(prog)
01591         if os.path.basename(data["exec"]).lower().startswith("python"):
01592             data["stopAtMain"] = "false" # do not stop at main when debugging Python scripts
01593         else:
01594             data["stopAtMain"] = "true"
01595 
01596         data["args"] = "&#10;".join(map(rationalizepath, args))
01597 
01598         if not self.use_temp_dir:
01599             data["workdir"] = os.getcwd()
01600         else:
01601             # If the test is using a tmporary directory, it is better to run it
01602             # in the same directory as the .launch file when debugged in eclipse
01603             data["workdir"] = destdir
01604 
01605         data["project"] = projectName.strip()
01606 
01607         # Template for the XML file, based on eclipse 3.4
01608         xml = """<?xml version="1.0" encoding="UTF-8" standalone="no"?>
01609 <launchConfiguration type="org.eclipse.cdt.launch.applicationLaunchType">
01610 <booleanAttribute key="org.eclipse.cdt.debug.mi.core.AUTO_SOLIB" value="true"/>
01611 <listAttribute key="org.eclipse.cdt.debug.mi.core.AUTO_SOLIB_LIST"/>
01612 <stringAttribute key="org.eclipse.cdt.debug.mi.core.DEBUG_NAME" value="gdb"/>
01613 <stringAttribute key="org.eclipse.cdt.debug.mi.core.GDB_INIT" value=".gdbinit"/>
01614 <listAttribute key="org.eclipse.cdt.debug.mi.core.SOLIB_PATH"/>
01615 <booleanAttribute key="org.eclipse.cdt.debug.mi.core.STOP_ON_SOLIB_EVENTS" value="false"/>
01616 <booleanAttribute key="org.eclipse.cdt.debug.mi.core.breakpointsFullPath" value="false"/>
01617 <stringAttribute key="org.eclipse.cdt.debug.mi.core.commandFactory" value="org.eclipse.cdt.debug.mi.core.standardCommandFactory"/>
01618 <stringAttribute key="org.eclipse.cdt.debug.mi.core.protocol" value="mi"/>
01619 <booleanAttribute key="org.eclipse.cdt.debug.mi.core.verboseMode" value="false"/>
01620 <intAttribute key="org.eclipse.cdt.launch.ATTR_BUILD_BEFORE_LAUNCH_ATTR" value="0"/>
01621 <stringAttribute key="org.eclipse.cdt.launch.COREFILE_PATH" value=""/>
01622 <stringAttribute key="org.eclipse.cdt.launch.DEBUGGER_ID" value="org.eclipse.cdt.debug.mi.core.CDebuggerNew"/>
01623 <stringAttribute key="org.eclipse.cdt.launch.DEBUGGER_REGISTER_GROUPS" value=""/>
01624 <stringAttribute key="org.eclipse.cdt.launch.DEBUGGER_START_MODE" value="run"/>
01625 <booleanAttribute key="org.eclipse.cdt.launch.DEBUGGER_STOP_AT_MAIN" value="%(stopAtMain)s"/>
01626 <stringAttribute key="org.eclipse.cdt.launch.DEBUGGER_STOP_AT_MAIN_SYMBOL" value="main"/>
01627 <booleanAttribute key="org.eclipse.cdt.launch.ENABLE_REGISTER_BOOKKEEPING" value="false"/>
01628 <booleanAttribute key="org.eclipse.cdt.launch.ENABLE_VARIABLE_BOOKKEEPING" value="false"/>
01629 <stringAttribute key="org.eclipse.cdt.launch.FORMAT" value="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&lt;contentList/&gt;"/>
01630 <stringAttribute key="org.eclipse.cdt.launch.GLOBAL_VARIABLES" value="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;globalVariableList/&gt;&#10;"/>
01631 <stringAttribute key="org.eclipse.cdt.launch.MEMORY_BLOCKS" value="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;memoryBlockExpressionList/&gt;&#10;"/>
01632 <stringAttribute key="org.eclipse.cdt.launch.PROGRAM_ARGUMENTS" value="%(args)s"/>
01633 <stringAttribute key="org.eclipse.cdt.launch.PROGRAM_NAME" value="%(exec)s"/>
01634 <stringAttribute key="org.eclipse.cdt.launch.PROJECT_ATTR" value="%(project)s"/>
01635 <stringAttribute key="org.eclipse.cdt.launch.PROJECT_BUILD_CONFIG_ID_ATTR" value=""/>
01636 <stringAttribute key="org.eclipse.cdt.launch.WORKING_DIRECTORY" value="%(workdir)s"/>
01637 <booleanAttribute key="org.eclipse.cdt.launch.ui.ApplicationCDebuggerTab.DEFAULTS_SET" value="true"/>
01638 <booleanAttribute key="org.eclipse.cdt.launch.use_terminal" value="true"/>
01639 <listAttribute key="org.eclipse.debug.core.MAPPED_RESOURCE_PATHS">
01640 <listEntry value="/%(project)s"/>
01641 </listAttribute>
01642 <listAttribute key="org.eclipse.debug.core.MAPPED_RESOURCE_TYPES">
01643 <listEntry value="4"/>
01644 </listAttribute>
01645 <booleanAttribute key="org.eclipse.debug.core.appendEnvironmentVariables" value="false"/>
01646 <mapAttribute key="org.eclipse.debug.core.environmentVariables">
01647 %(environment)s
01648 </mapAttribute>
01649 <mapAttribute key="org.eclipse.debug.core.preferred_launchers">
01650 <mapEntry key="[debug]" value="org.eclipse.cdt.cdi.launch.localCLaunch"/>
01651 </mapAttribute>
01652 <listAttribute key="org.eclipse.debug.ui.favoriteGroups">
01653 <listEntry value="org.eclipse.debug.ui.launchGroup.debug"/>
01654 </listAttribute>
01655 </launchConfiguration>
01656 """ % data
01657 
01658         # Write the output file
01659         open(destfile, "w").write(xml)
01660         #open(destfile + "_copy.xml", "w").write(xml)
01661 
01662 
01663 try:
01664     import json
01665 except ImportError:
01666     # Use simplejson for LCG
01667     import simplejson as json
01668 
01669 class HTMLResultStream(ResultStream):
01670     """An 'HTMLResultStream' writes its output to a set of HTML files.
01671 
01672     The argument 'dir' is used to select the destination directory for the HTML
01673     report.
01674     The destination directory may already contain the report from a previous run
01675     (for example of a different package), in which case it will be extended to
01676     include the new data.
01677     """
01678     arguments = [
01679         qm.fields.TextField(
01680             name = "dir",
01681             title = "Destination Directory",
01682             description = """The name of the directory.
01683 
01684             All results will be written to the directory indicated.""",
01685             verbatim = "true",
01686             default_value = ""),
01687     ]
01688 
01689     def __init__(self, arguments = None, **args):
01690         """Prepare the destination directory.
01691 
01692         Creates the destination directory and store in it some preliminary
01693         annotations and the static files found in the template directory
01694         'html_report'.
01695         """
01696         ResultStream.__init__(self, arguments, **args)
01697         self._summary = []
01698         self._summaryFile = os.path.join(self.dir, "summary.json")
01699         self._annotationsFile = os.path.join(self.dir, "annotations.json")
01700         # Prepare the destination directory using the template
01701         templateDir = os.path.join(os.path.dirname(__file__), "html_report")
01702         if not os.path.isdir(self.dir):
01703             os.makedirs(self.dir)
01704         # Copy the files in the template directory excluding the directories
01705         for f in os.listdir(templateDir):
01706             src = os.path.join(templateDir, f)
01707             dst = os.path.join(self.dir, f)
01708             if not os.path.isdir(src) and not os.path.exists(dst):
01709                 shutil.copy(src, dst)
01710         # Add some non-QMTest attributes
01711         if "CMTCONFIG" in os.environ:
01712             self.WriteAnnotation("cmt.cmtconfig", os.environ["CMTCONFIG"])
01713         import socket
01714         self.WriteAnnotation("hostname", socket.gethostname())
01715 
01716     def _updateSummary(self):
01717         """Helper function to extend the global summary file in the destination
01718         directory.
01719         """
01720         if os.path.exists(self._summaryFile):
01721             oldSummary = json.load(open(self._summaryFile))
01722         else:
01723             oldSummary = []
01724         ids = set([ i["id"] for i in self._summary ])
01725         newSummary = [ i for i in oldSummary if i["id"] not in ids ]
01726         newSummary.extend(self._summary)
01727         json.dump(newSummary, open(self._summaryFile, "w"),
01728                   sort_keys = True)
01729 
01730     def WriteAnnotation(self, key, value):
01731         """Writes the annotation to the annotation file.
01732         If the key is already present with a different value, the value becomes
01733         a list and the new value is appended to it, except for start_time and
01734         end_time.
01735         """
01736         # Initialize the annotation dict from the file (if present)
01737         if os.path.exists(self._annotationsFile):
01738             annotations = json.load(open(self._annotationsFile))
01739         else:
01740             annotations = {}
01741         # hack because we do not have proper JSON support
01742         key, value = map(str, [key, value])
01743         if key == "qmtest.run.start_time":
01744             # Special handling of the start time:
01745             # if we are updating a result, we have to keep the original start
01746             # time, but remove the original end time to mark the report to be
01747             # in progress.
01748             if key not in annotations:
01749                 annotations[key] = value
01750             if "qmtest.run.end_time" in annotations:
01751                 del annotations["qmtest.run.end_time"]
01752         else:
01753             # All other annotations are added to a list
01754             if key in annotations:
01755                 old = annotations[key]
01756                 if type(old) is list:
01757                     if value not in old:
01758                         annotations[key].append(value)
01759                 elif value != old:
01760                     annotations[key] = [old, value]
01761             else:
01762                 annotations[key] = value
01763         # Write the new annotations file
01764         json.dump(annotations, open(self._annotationsFile, "w"),
01765                   sort_keys = True)
01766 
01767     def WriteResult(self, result):
01768         """Prepare the test result directory in the destination directory storing
01769         into it the result fields.
01770         A summary of the test result is stored both in a file in the test directory
01771         and in the global summary file.
01772         """
01773         summary = {}
01774         summary["id"] = result.GetId()
01775         summary["outcome"] = result.GetOutcome()
01776         summary["cause"] = result.GetCause()
01777         summary["fields"] = result.keys()
01778         summary["fields"].sort()
01779 
01780         # Since we miss proper JSON support, I hack a bit
01781         for f in ["id", "outcome", "cause"]:
01782             summary[f] = str(summary[f])
01783         summary["fields"] = map(str, summary["fields"])
01784 
01785         self._summary.append(summary)
01786 
01787         # format:
01788         # testname/summary.json
01789         # testname/field1
01790         # testname/field2
01791         testOutDir = os.path.join(self.dir, summary["id"])
01792         if not os.path.isdir(testOutDir):
01793             os.makedirs(testOutDir)
01794         json.dump(summary, open(os.path.join(testOutDir, "summary.json"), "w"),
01795                   sort_keys = True)
01796         for f in summary["fields"]:
01797             open(os.path.join(testOutDir, f), "w").write(result[f])
01798 
01799         self._updateSummary()
01800 
01801     def Summarize(self):
01802         # Not implemented.
01803         pass
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Defines

Generated at Wed Feb 9 16:24:58 2011 for Gaudi Framework, version v22r0 by Doxygen version 1.6.2 written by Dimitri van Heesch, © 1997-2004