Gaudi Framework, version v22r2

Home   Generated: Tue May 10 2011

GaudiTest.py

Go to the documentation of this file.
00001 ########################################################################
00002 # File:   GaudiTest.py
00003 # Author: Marco Clemencic CERN/PH-LBC
00004 ########################################################################
00005 __author__  = 'Marco Clemencic CERN/PH-LBC'
00006 ########################################################################
00007 # Imports
00008 ########################################################################
00009 import os
00010 import sys
00011 import re
00012 import tempfile
00013 import shutil
00014 import string
00015 import difflib
00016 from subprocess import Popen, PIPE, STDOUT
00017 
00018 import qm
00019 from qm.test.classes.command import ExecTestBase
00020 from qm.test.result_stream import ResultStream
00021 
00022 ### Needed by the re-implementation of TimeoutExecutable
00023 import qm.executable
00024 import time, signal
00025 # The classes in this module are implemented differently depending on
00026 # the operating system in use.
00027 if sys.platform == "win32":
00028     import msvcrt
00029     import pywintypes
00030     from   threading import *
00031     import win32api
00032     import win32con
00033     import win32event
00034     import win32file
00035     import win32pipe
00036     import win32process
00037 else:
00038     import cPickle
00039     import fcntl
00040     import select
00041     import qm.sigmask
00042 
00043 ########################################################################
00044 # Utility Classes
00045 ########################################################################
00046 class TemporaryEnvironment:
00047     """
00048     Class to changes the environment temporarily.
00049     """
00050     def __init__(self, orig = os.environ, keep_same = False):
00051         """
00052         Create a temporary environment on top of the one specified
00053         (it can be another TemporaryEnvironment instance).
00054         """
00055         #print "New environment"
00056         self.old_values = {}
00057         self.env = orig
00058         self._keep_same = keep_same
00059 
00060     def __setitem__(self,key,value):
00061         """
00062         Set an environment variable recording the previous value.
00063         """
00064         if key not in self.old_values :
00065             if key in self.env :
00066                 if not self._keep_same or self.env[key] != value:
00067                     self.old_values[key] = self.env[key]
00068             else:
00069                 self.old_values[key] = None
00070         self.env[key] = value
00071 
00072     def __getitem__(self,key):
00073         """
00074         Get an environment variable.
00075         Needed to provide the same interface as os.environ.
00076         """
00077         return self.env[key]
00078 
00079     def __delitem__(self,key):
00080         """
00081         Unset an environment variable.
00082         Needed to provide the same interface as os.environ.
00083         """
00084         if key not in self.env :
00085             raise KeyError(key)
00086         self.old_values[key] = self.env[key]
00087         del self.env[key]
00088 
00089     def keys(self):
00090         """
00091         Return the list of defined environment variables.
00092         Needed to provide the same interface as os.environ.
00093         """
00094         return self.env.keys()
00095 
00096     def items(self):
00097         """
00098         Return the list of (name,value) pairs for the defined environment variables.
00099         Needed to provide the same interface as os.environ.
00100         """
00101         return self.env.items()
00102 
00103     def __contains__(self,key):
00104         """
00105         Operator 'in'.
00106         Needed to provide the same interface as os.environ.
00107         """
00108         return key in self.env
00109 
00110     def restore(self):
00111         """
00112         Revert all the changes done to the orignal environment.
00113         """
00114         for key,value in self.old_values.items():
00115             if value is None:
00116                 del self.env[key]
00117             else:
00118                 self.env[key] = value
00119         self.old_values = {}
00120 
00121     def __del__(self):
00122         """
00123         Revert the changes on destruction.
00124         """
00125         #print "Restoring the environment"
00126         self.restore()
00127 
00128     def gen_script(self,shell_type):
00129         """
00130         Generate a shell script to reproduce the changes in the environment.
00131         """
00132         shells = [ 'csh', 'sh', 'bat' ]
00133         if shell_type not in shells:
00134             raise RuntimeError("Shell type '%s' unknown. Available: %s"%(shell_type,shells))
00135         out = ""
00136         for key,value in self.old_values.items():
00137             if key not in self.env:
00138                 # unset variable
00139                 if shell_type == 'csh':
00140                     out += 'unsetenv %s\n'%key
00141                 elif shell_type == 'sh':
00142                     out += 'unset %s\n'%key
00143                 elif shell_type == 'bat':
00144                     out += 'set %s=\n'%key
00145             else:
00146                 # set variable
00147                 if shell_type == 'csh':
00148                     out += 'setenv %s "%s"\n'%(key,self.env[key])
00149                 elif shell_type == 'sh':
00150                     out += 'export %s="%s"\n'%(key,self.env[key])
00151                 elif shell_type == 'bat':
00152                     out += 'set %s=%s\n'%(key,self.env[key])
00153         return out
00154 
00155 class TempDir:
00156     """Small class for temporary directories.
00157     When instantiated, it creates a temporary directory and the instance
00158     behaves as the string containing the directory name.
00159     When the instance goes out of scope, it removes all the content of
00160     the temporary directory (automatic clean-up).
00161     """
00162     def __init__(self, keep = False, chdir = False):
00163         self.name = tempfile.mkdtemp()
00164         self._keep = keep
00165         self._origdir = None
00166         if chdir:
00167             self._origdir = os.getcwd()
00168             os.chdir(self.name)
00169 
00170     def __str__(self):
00171         return self.name
00172 
00173     def __del__(self):
00174         if self._origdir:
00175             os.chdir(self._origdir)
00176         if self.name and not self._keep:
00177             shutil.rmtree(self.name)
00178 
00179     def __getattr__(self,attr):
00180         return getattr(self.name,attr)
00181 
00182 class TempFile:
00183     """Small class for temporary files.
00184     When instantiated, it creates a temporary directory and the instance
00185     behaves as the string containing the directory name.
00186     When the instance goes out of scope, it removes all the content of
00187     the temporary directory (automatic clean-up).
00188     """
00189     def __init__(self, suffix='', prefix='tmp', dir=None, text=False, keep = False):
00190         self.file = None
00191         self.name = None
00192         self._keep = keep
00193 
00194         self._fd, self.name = tempfile.mkstemp(suffix,prefix,dir,text)
00195         self.file = os.fdopen(self._fd,"r+")
00196 
00197     def __str__(self):
00198         return self.name
00199 
00200     def __del__(self):
00201         if self.file:
00202             self.file.close()
00203         if self.name and not self._keep:
00204             os.remove(self.name)
00205 
00206     def __getattr__(self,attr):
00207         return getattr(self.file,attr)
00208 
00209 class CMT:
00210     """Small wrapper to call CMT.
00211     """
00212     def __init__(self,path=None):
00213         if path is None:
00214             path = os.getcwd()
00215         self.path = path
00216 
00217     def _run_cmt(self,command,args):
00218         # prepare command line
00219         if type(args) is str:
00220             args = [args]
00221         cmd = "cmt %s"%command
00222         for arg in args:
00223             cmd += ' "%s"'%arg
00224 
00225         # go to the execution directory
00226         olddir = os.getcwd()
00227         os.chdir(self.path)
00228         # run cmt
00229         result = os.popen4(cmd)[1].read()
00230         # return to the old directory
00231         os.chdir(olddir)
00232         return result
00233 
00234     def __getattr__(self,attr):
00235         return lambda args=[]: self._run_cmt(attr, args)
00236 
00237     def runtime_env(self,env = None):
00238         """Returns a dictionary containing the runtime environment produced by CMT.
00239         If a dictionary is passed a modified instance of it is returned.
00240         """
00241         if env is None:
00242             env = {}
00243         for l in self.setup("-csh").splitlines():
00244             l = l.strip()
00245             if l.startswith("setenv"):
00246                 dummy,name,value = l.split(None,3)
00247                 env[name] = value.strip('"')
00248             elif l.startswith("unsetenv"):
00249                 dummy,name = l.split(None,2)
00250                 if name in env:
00251                     del env[name]
00252         return env
00253     def show_macro(self,k):
00254         r = self.show(["macro",k])
00255         if r.find("CMT> Error: symbol not found") >= 0:
00256             return None
00257         else:
00258             return self.show(["macro_value",k]).strip()
00259 
00260 ## Locates an executable in the executables path ($PATH) and returns the full
00261 #  path to it.
00262 #  If the executable cannot be found, None is returned
00263 def which(executable):
00264     if os.path.isabs(executable):
00265         return executable
00266     for d in os.environ.get("PATH").split(os.pathsep):
00267         fullpath = os.path.join(d,executable)
00268         if os.path.exists(fullpath):
00269             return fullpath
00270     return None
00271 
00272 def rationalizepath(p):
00273     p = os.path.normpath(os.path.expandvars(p))
00274     if os.path.exists(p):
00275         p = os.path.realpath(p)
00276     return p
00277 
00278 ########################################################################
00279 # Output Validation Classes
00280 ########################################################################
00281 class BasicOutputValidator:
00282     """Basic implementation of an option validator for Gaudi tests.
00283     This implementation is based on the standard (LCG) validation functions
00284     used in QMTest.
00285     """
00286     def __init__(self,ref,cause,result_key):
00287         self.reference = ref
00288         self.cause = cause
00289         self.result_key = result_key
00290 
00291     def __call__(self, out, result):
00292         """Validate the output of the program.
00293 
00294         'stdout' -- A string containing the data written to the standard output
00295         stream.
00296 
00297         'stderr' -- A string containing the data written to the standard error
00298         stream.
00299 
00300         'result' -- A 'Result' object. It may be used to annotate
00301         the outcome according to the content of stderr.
00302 
00303         returns -- A list of strings giving causes of failure."""
00304 
00305         causes = []
00306         # Check to see if theoutput matches.
00307         if not self.__CompareText(out, self.reference):
00308             causes.append(self.cause)
00309             result[self.result_key] = result.Quote(self.reference)
00310 
00311         return causes
00312 
00313     def __CompareText(self, s1, s2):
00314         """Compare 's1' and 's2', ignoring line endings.
00315 
00316         's1' -- A string.
00317 
00318         's2' -- A string.
00319 
00320         returns -- True if 's1' and 's2' are the same, ignoring
00321         differences in line endings."""
00322 
00323         # The "splitlines" method works independently of the line ending
00324         # convention in use.
00325         return s1.splitlines() == s2.splitlines()
00326 
00327 class FilePreprocessor:
00328     """ Base class for a callable that takes a file and returns a modified
00329     version of it."""
00330     def __processLine__(self, line):
00331         return line
00332     def __call__(self, input):
00333         if hasattr(input,"__iter__"):
00334             lines = input
00335             mergeback = False
00336         else:
00337             lines = input.splitlines()
00338             mergeback = True
00339         output = []
00340         for l in lines:
00341             l = self.__processLine__(l)
00342             if l: output.append(l)
00343         if mergeback: output = '\n'.join(output)
00344         return output
00345     def __add__(self, rhs):
00346         return FilePreprocessorSequence([self,rhs])
00347 
00348 class FilePreprocessorSequence(FilePreprocessor):
00349     def __init__(self, members = []):
00350         self.members = members
00351     def __add__(self, rhs):
00352         return FilePreprocessorSequence(self.members + [rhs])
00353     def __call__(self, input):
00354         output = input
00355         for pp in self.members:
00356             output = pp(output)
00357         return output
00358 
00359 class LineSkipper(FilePreprocessor):
00360     def __init__(self, strings = [], regexps = []):
00361         import re
00362         self.strings = strings
00363         self.regexps = map(re.compile,regexps)
00364 
00365     def __processLine__(self, line):
00366         for s in self.strings:
00367             if line.find(s) >= 0: return None
00368         for r in self.regexps:
00369             if r.search(line): return None
00370         return line
00371 
00372 class BlockSkipper(FilePreprocessor):
00373     def __init__(self, start, end):
00374         self.start = start
00375         self.end = end
00376         self._skipping = False
00377 
00378     def __processLine__(self, line):
00379         if self.start in line:
00380             self._skipping = True
00381             return None
00382         elif self.end in line:
00383             self._skipping = False
00384         elif self._skipping:
00385             return None
00386         return line
00387 
00388 class RegexpReplacer(FilePreprocessor):
00389     def __init__(self, orig, repl = "", when = None):
00390         if when:
00391             when = re.compile(when)
00392         self._operations = [ (when, re.compile(orig), repl) ]
00393     def __add__(self,rhs):
00394         if isinstance(rhs, RegexpReplacer):
00395             res = RegexpReplacer("","",None)
00396             res._operations = self._operations + rhs._operations
00397         else:
00398             res = FilePreprocessor.__add__(self, rhs)
00399         return res
00400     def __processLine__(self, line):
00401         for w,o,r in self._operations:
00402             if w is None or w.search(line):
00403                 line = o.sub(r, line)
00404         return line
00405 
00406 # Common preprocessors
00407 maskPointers  = RegexpReplacer("0x[0-9a-fA-F]{4,16}","0x########")
00408 normalizeDate = RegexpReplacer("[0-2]?[0-9]:[0-5][0-9]:[0-5][0-9] [0-9]{4}[-/][01][0-9][-/][0-3][0-9] *(CES?T)?",
00409                                "00:00:00 1970-01-01")
00410 normalizeEOL = FilePreprocessor()
00411 normalizeEOL.__processLine__ = lambda line: str(line).rstrip() + '\n'
00412 
00413 skipEmptyLines = FilePreprocessor()
00414 # FIXME: that's ugly
00415 skipEmptyLines.__processLine__ = lambda line: (line.strip() and line) or None
00416 
00417 ## Special preprocessor sorting the list of strings (whitespace separated)
00418 #  that follow a signature on a single line
00419 class LineSorter(FilePreprocessor):
00420     def __init__(self, signature):
00421         self.signature = signature
00422         self.siglen = len(signature)
00423     def __processLine__(self, line):
00424         pos = line.find(self.signature)
00425         if pos >=0:
00426             line = line[:(pos+self.siglen)]
00427             lst = line[(pos+self.siglen):].split()
00428             lst.sort()
00429             line += " ".join(lst)
00430         return line
00431 
00432 # Preprocessors for GaudiExamples
00433 normalizeExamples = maskPointers + normalizeDate
00434 for w,o,r in [
00435               #("TIMER.TIMER",r"[0-9]", "0"), # Normalize time output
00436               ("TIMER.TIMER",r"\s+[+-]?[0-9]+[0-9.]*", " 0"), # Normalize time output
00437               ("release all pending",r"^.*/([^/]*:.*)",r"\1"),
00438               ("0x########",r"\[.*/([^/]*.*)\]",r"[\1]"),
00439               ("^#.*file",r"file '.*[/\\]([^/\\]*)$",r"file '\1"),
00440               ("^JobOptionsSvc.*options successfully read in from",r"read in from .*[/\\]([^/\\]*)$",r"file \1"), # normalize path to options
00441               # Normalize UUID, except those ending with all 0s (i.e. the class IDs)
00442               (None,r"[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}(?!-0{12})-[0-9A-Fa-f]{12}","00000000-0000-0000-0000-000000000000"),
00443               # Absorb a change in ServiceLocatorHelper
00444               ("ServiceLocatorHelper::", "ServiceLocatorHelper::(create|locate)Service", "ServiceLocatorHelper::service"),
00445               # Remove the leading 0 in Windows' exponential format
00446               (None, r"e([-+])0([0-9][0-9])", r"e\1\2"),
00447               ]: #[ ("TIMER.TIMER","[0-9]+[0-9.]*", "") ]
00448     normalizeExamples += RegexpReplacer(o,r,w)
00449 normalizeExamples = LineSkipper(["//GP:",
00450                                  "Time User",
00451                                  "Welcome to",
00452                                  "This machine has a speed",
00453                                  "TIME:",
00454                                  "running on",
00455                                  "ToolSvc.Sequenc...   INFO",
00456                                  "DataListenerSvc      INFO XML written to file:",
00457                                  "[INFO]","[WARNING]",
00458                                  "DEBUG No writable file catalog found which contains FID:",
00459                                  "0 local", # hack for ErrorLogExample
00460                                  "DEBUG Service base class initialized successfully", # changed between v20 and v21
00461                                  "DEBUG Incident  timing:", # introduced with patch #3487
00462                                  # This comes from ROOT, when using GaudiPython
00463                                  'Note: (file "(tmpfile)", line 2) File "set" already loaded',
00464                                  # The signal handler complains about SIGXCPU not defined on some platforms
00465                                  'SIGXCPU',
00466                                  ],regexps = [
00467                                  r"^#", # Ignore python comments
00468                                  r"(Always|SUCCESS)\s*(Root f|[^ ]* F)ile version:", # skip the message reporting the version of the root file
00469                                  r"0x[0-9a-fA-F#]+ *Algorithm::sysInitialize\(\) *\[", # hack for ErrorLogExample
00470                                  r"0x[0-9a-fA-F#]* *__gxx_personality_v0 *\[", # hack for ErrorLogExample
00471                                  r"File '.*.xml' does not exist",
00472                                  r"INFO Refer to dataset .* by its file ID:",
00473                                  r"INFO Referring to dataset .* by its file ID:",
00474                                  r"INFO Disconnect from dataset",
00475                                  r"INFO Disconnected from dataset",
00476                                  r"INFO Disconnected data IO:",
00477                                  r"IncidentSvc\s*(DEBUG (Adding|Removing)|VERBOSE Calling)",
00478                                  # I want to ignore the header of the unchecked StatusCode report
00479                                  r"^StatusCodeSvc.*listing all unchecked return codes:",
00480                                  r"^StatusCodeSvc\s*INFO\s*$",
00481                                  r"Num\s*\|\s*Function\s*\|\s*Source Library",
00482                                  r"^[-+]*\s*$",
00483                                  # Hide the fake error message coming from POOL/ROOT (ROOT 5.21)
00484                                  r"ERROR Failed to modify file: .* Errno=2 No such file or directory",
00485                                  # Hide unckeched StatusCodes  from dictionaries
00486                                  r"^ +[0-9]+ \|.*ROOT",
00487                                  r"^ +[0-9]+ \|.*\|.*Dict",
00488                                  # Remove ROOT TTree summary table, which changes from one version to the other
00489                                  r"^\*.*\*$",
00490                                  # Remove Histos Summaries
00491                                  r"SUCCESS\s*Booked \d+ Histogram\(s\)",
00492                                  r"^ \|",
00493                                  r"^ ID=",
00494                                  ] ) + normalizeExamples + skipEmptyLines + \
00495                                   normalizeEOL + \
00496                                   LineSorter("Services to release : ")
00497 
00498 class ReferenceFileValidator:
00499     def __init__(self, reffile, cause, result_key, preproc = normalizeExamples):
00500         self.reffile = os.path.expandvars(reffile)
00501         self.cause = cause
00502         self.result_key = result_key
00503         self.preproc = preproc
00504     def __call__(self, stdout, result):
00505         causes = []
00506         if os.path.isfile(self.reffile):
00507             orig = open(self.reffile).xreadlines()
00508             if self.preproc:
00509                 orig = self.preproc(orig)
00510         else:
00511             orig = []
00512 
00513         new = stdout.splitlines()
00514         if self.preproc:
00515             new = self.preproc(new)
00516         #open(self.reffile + ".test","w").writelines(new)
00517         diffs = difflib.ndiff(orig,new,charjunk=difflib.IS_CHARACTER_JUNK)
00518         filterdiffs = map(lambda x: x.strip(),filter(lambda x: x[0] != " ",diffs))
00519         #filterdiffs = [x.strip() for x in diffs]
00520         if filterdiffs:
00521             result[self.result_key] = result.Quote("\n".join(filterdiffs))
00522             result[self.result_key] += result.Quote("""
00523 Legend:
00524         -) reference file
00525         +) standard output of the test""")
00526             causes.append(self.cause)
00527 
00528         return causes
00529 
00530 ########################################################################
00531 # Useful validation functions
00532 ########################################################################
00533 def findReferenceBlock(reference, stdout, result, causes, signature_offset=0, signature=None,
00534                        id = None):
00535     """
00536     Given a block of text, tries to find it in the output.
00537     The block had to be identified by a signature line. By default, the first
00538     line is used as signature, or the line pointed to by signature_offset. If
00539     signature_offset points outside the block, a signature line can be passed as
00540     signature argument. Note: if 'signature' is None (the default), a negative
00541     signature_offset is interpreted as index in a list (e.g. -1 means the last
00542     line), otherwise the it is interpreted as the number of lines before the
00543     first one of the block the signature must appear.
00544     The parameter 'id' allow to distinguish between different calls to this
00545     function in the same validation code.
00546     """
00547     # split reference file, sanitize EOLs and remove empty lines
00548     reflines = filter(None,map(lambda s: s.rstrip(), reference.splitlines()))
00549     if not reflines:
00550         raise RuntimeError("Empty (or null) reference")
00551     # the same on standard output
00552     outlines = filter(None,map(lambda s: s.rstrip(), stdout.splitlines()))
00553 
00554     res_field = "GaudiTest.RefBlock"
00555     if id:
00556         res_field += "_%s" % id
00557 
00558     if signature is None:
00559         if signature_offset < 0:
00560             signature_offset = len(reference)+signature_offset
00561         signature = reflines[signature_offset]
00562     # find the reference block in the output file
00563     try:
00564         pos = outlines.index(signature)
00565         outlines = outlines[pos-signature_offset:pos+len(reflines)-signature_offset]
00566         if reflines != outlines:
00567             msg = "standard output"
00568             # I do not want 2 messages in causes if teh function is called twice
00569             if not msg in causes:
00570                 causes.append(msg)
00571             result[res_field + ".observed"] = result.Quote("\n".join(outlines))
00572     except ValueError:
00573         causes.append("missing signature")
00574     result[res_field + ".signature"] = result.Quote(signature)
00575     if len(reflines) > 1 or signature != reflines[0]:
00576         result[res_field + ".expected"] = result.Quote("\n".join(reflines))
00577 
00578     return causes
00579 
00580 def countErrorLines(expected = {'ERROR':0, 'FATAL':0}, **kwargs):
00581     """
00582     Count the number of messages with required severity (by default ERROR and FATAL)
00583     and check if their numbers match the expected ones (0 by default).
00584     The dictionary "expected" can be used to tune the number of errors and fatals
00585     allowed, or to limit the number of expected warnings etc.
00586     """
00587     stdout = kwargs["stdout"]
00588     result = kwargs["result"]
00589     causes = kwargs["causes"]
00590 
00591     # prepare the dictionary to record the extracted lines
00592     errors = {}
00593     for sev in expected:
00594         errors[sev] = []
00595 
00596     outlines = stdout.splitlines()
00597     from math import log10
00598     fmt = "%%%dd - %%s" % (int(log10(len(outlines))+1))
00599 
00600     linecount = 0
00601     for l in outlines:
00602         linecount += 1
00603         words = l.split()
00604         if len(words) >= 2 and words[1] in errors:
00605             errors[words[1]].append(fmt%(linecount,l.rstrip()))
00606 
00607     for e in errors:
00608         if len(errors[e]) != expected[e]:
00609             causes.append('%s(%d)'%(e,len(errors[e])))
00610             result["GaudiTest.lines.%s"%e] = result.Quote('\n'.join(errors[e]))
00611             result["GaudiTest.lines.%s.expected#"%e] = result.Quote(str(expected[e]))
00612 
00613     return causes
00614 
00615 
00616 def _parseTTreeSummary(lines, pos):
00617     """
00618     Parse the TTree summary table in lines, starting from pos.
00619     Returns a tuple with the dictionary with the digested informations and the
00620     position of the first line after the summary.
00621     """
00622     result = {}
00623     i = pos + 1 # first line is a sequence of '*'
00624     count = len(lines)
00625 
00626     splitcols = lambda l: [ f.strip() for f in l.strip("*\n").split(':',2) ]
00627     def parseblock(ll):
00628         r = {}
00629         cols = splitcols(ll[0])
00630         r["Name"], r["Title"] = cols[1:]
00631 
00632         cols = splitcols(ll[1])
00633         r["Entries"] = int(cols[1])
00634 
00635         sizes = cols[2].split()
00636         r["Total size"] = int(sizes[2])
00637         if sizes[-1] == "memory":
00638             r["File size"] = 0
00639         else:
00640             r["File size"] = int(sizes[-1])
00641 
00642         cols = splitcols(ll[2])
00643         sizes = cols[2].split()
00644         if cols[0] == "Baskets":
00645             r["Baskets"] = int(cols[1])
00646             r["Basket size"] = int(sizes[2])
00647         r["Compression"] = float(sizes[-1])
00648         return r
00649 
00650     if i < (count - 3) and lines[i].startswith("*Tree"):
00651         result = parseblock(lines[i:i+3])
00652         result["Branches"] = {}
00653         i += 4
00654         while i < (count - 3) and lines[i].startswith("*Br"):
00655             branch = parseblock(lines[i:i+3])
00656             result["Branches"][branch["Name"]] = branch
00657             i += 4
00658 
00659     return (result, i)
00660 
00661 def findTTreeSummaries(stdout):
00662     """
00663     Scan stdout to find ROOT TTree summaries and digest them.
00664     """
00665     stars = re.compile(r"^\*+$")
00666     outlines = stdout.splitlines()
00667     nlines = len(outlines)
00668     trees = {}
00669 
00670     i = 0
00671     while i < nlines: #loop over the output
00672         # look for
00673         while i < nlines and not stars.match(outlines[i]):
00674             i += 1
00675         if i < nlines:
00676             tree, i = _parseTTreeSummary(outlines, i)
00677             if tree:
00678                 trees[tree["Name"]] = tree
00679 
00680     return trees
00681 
00682 def cmpTreesDicts(reference, to_check, ignore = None):
00683     """
00684     Check that all the keys in reference are in to_check too, with the same value.
00685     If the value is a dict, the function is called recursively. to_check can
00686     contain more keys than reference, that will not be tested.
00687     The function returns at the first difference found.
00688     """
00689     fail_keys = []
00690     # filter the keys in the reference dictionary
00691     if ignore:
00692         ignore_re = re.compile(ignore)
00693         keys = [ key for key in reference if not ignore_re.match(key) ]
00694     else:
00695         keys = reference.keys()
00696     # loop over the keys (not ignored) in the reference dictionary
00697     for k in keys:
00698         if k in to_check: # the key must be in the dictionary to_check
00699             if (type(reference[k]) is dict) and (type(to_check[k]) is dict):
00700                 # if both reference and to_check values are dictionaries, recurse
00701                 failed = fail_keys = cmpTreesDicts(reference[k], to_check[k], ignore)
00702             else:
00703                 # compare the two values
00704                 failed = to_check[k] != reference[k]
00705         else: # handle missing keys in the dictionary to check (i.e. failure)
00706             to_check[k] = None
00707             failed = True
00708         if failed:
00709             fail_keys.insert(0, k)
00710             break # exit from the loop at the first failure
00711     return fail_keys # return the list of keys bringing to the different values
00712 
00713 def getCmpFailingValues(reference, to_check, fail_path):
00714     c = to_check
00715     r = reference
00716     for k in fail_path:
00717         c = c.get(k,None)
00718         r = r.get(k,None)
00719         if c is None or r is None:
00720             break # one of the dictionaries is not deep enough
00721     return (fail_path, r, c)
00722 
00723 # signature of the print-out of the histograms
00724 h_count_re = re.compile(r"^(.*)SUCCESS\s+Booked (\d+) Histogram\(s\) :\s+(.*)")
00725 
00726 def parseHistosSummary(lines, pos):
00727     """
00728     Extract the histograms infos from the lines starting at pos.
00729     Returns the position of the first line after the summary block.
00730     """
00731     global h_count_re
00732     h_table_head = re.compile(r'SUCCESS\s+List of booked (1D|2D|3D|1D profile|2D profile) histograms in directory\s+"(\w*)"')
00733     h_short_summ = re.compile(r"ID=([^\"]+)\s+\"([^\"]+)\"\s+(.*)")
00734 
00735     nlines = len(lines)
00736 
00737     # decode header
00738     m = h_count_re.search(lines[pos])
00739     name = m.group(1).strip()
00740     total = int(m.group(2))
00741     header = {}
00742     for k, v in [ x.split("=") for x in  m.group(3).split() ]:
00743         header[k] = int(v)
00744     pos += 1
00745     header["Total"] = total
00746 
00747     summ = {}
00748     while pos < nlines:
00749         m = h_table_head.search(lines[pos])
00750         if m:
00751             t, d = m.groups(1) # type and directory
00752             t = t.replace(" profile", "Prof")
00753             pos += 1
00754             if pos < nlines:
00755                 l = lines[pos]
00756             else:
00757                 l = ""
00758             cont = {}
00759             if l.startswith(" | ID"):
00760                 # table format
00761                 titles = [ x.strip() for x in l.split("|")][1:]
00762                 pos += 1
00763                 while pos < nlines and lines[pos].startswith(" |"):
00764                     l = lines[pos]
00765                     values = [ x.strip() for x in l.split("|")][1:]
00766                     hcont = {}
00767                     for i in range(len(titles)):
00768                         hcont[titles[i]] = values[i]
00769                     cont[hcont["ID"]] = hcont
00770                     pos += 1
00771             elif l.startswith(" ID="):
00772                 while pos < nlines and lines[pos].startswith(" ID="):
00773                     values = [ x.strip() for x in  h_short_summ.search(lines[pos]).groups() ]
00774                     cont[values[0]] = values
00775                     pos += 1
00776             else: # not interpreted
00777                 raise RuntimeError("Cannot understand line %d: '%s'" % (pos, l))
00778             if not d in summ:
00779                 summ[d] = {}
00780             summ[d][t] = cont
00781             summ[d]["header"] = header
00782         else:
00783             break
00784     if not summ:
00785         # If the full table is not present, we use only the header
00786         summ[name] = {"header": header}
00787     return summ, pos
00788 
00789 def findHistosSummaries(stdout):
00790     """
00791     Scan stdout to find ROOT TTree summaries and digest them.
00792     """
00793     outlines = stdout.splitlines()
00794     nlines = len(outlines) - 1
00795     summaries = {}
00796     global h_count_re
00797 
00798     pos = 0
00799     while pos < nlines:
00800         summ = {}
00801         # find first line of block:
00802         match = h_count_re.search(outlines[pos])
00803         while pos < nlines and not match:
00804             pos += 1
00805             match = h_count_re.search(outlines[pos])
00806         if match:
00807             summ, pos = parseHistosSummary(outlines, pos)
00808         summaries.update(summ)
00809     return summaries
00810 
00811 class GaudiFilterExecutable(qm.executable.Filter):
00812     def __init__(self, input, timeout = -1):
00813         """Create a new 'Filter'.
00814 
00815         'input' -- The string containing the input to provide to the
00816         child process.
00817 
00818         'timeout' -- As for 'TimeoutExecutable.__init__'."""
00819 
00820         super(GaudiFilterExecutable, self).__init__(input, timeout)
00821         self.__input = input
00822         self.__timeout = timeout
00823         self.stack_trace_file = None
00824         # Temporary file to pass the stack trace from one process to the other
00825         # The file must be closed and reopened when needed to avoid conflicts
00826         # between the processes
00827         tmpf = tempfile.mkstemp()
00828         os.close(tmpf[0])
00829         self.stack_trace_file = tmpf[1] # remember only the name
00830 
00831     def __UseSeparateProcessGroupForChild(self):
00832         """Copied from TimeoutExecutable to allow the re-implementation of
00833            _HandleChild.
00834         """
00835         if sys.platform == "win32":
00836             # In Windows 2000 (or later), we should use "jobs" by
00837             # analogy with UNIX process groups.  However, that
00838             # functionality is not (yet) provided by the Python Win32
00839             # extensions.
00840             return 0
00841 
00842         return self.__timeout >= 0 or self.__timeout == -2
00843     ##
00844     # Needs to replace the ones from RedirectedExecutable and TimeoutExecutable
00845     def _HandleChild(self):
00846         """Code copied from both FilterExecutable and TimeoutExecutable.
00847         """
00848         # Close the pipe ends that we do not need.
00849         if self._stdin_pipe:
00850             self._ClosePipeEnd(self._stdin_pipe[0])
00851         if self._stdout_pipe:
00852             self._ClosePipeEnd(self._stdout_pipe[1])
00853         if self._stderr_pipe:
00854             self._ClosePipeEnd(self._stderr_pipe[1])
00855 
00856         # The pipes created by 'RedirectedExecutable' must be closed
00857         # before the monitor process (created by 'TimeoutExecutable')
00858         # is created.  Otherwise, if the child process dies, 'select'
00859         # in the parent will not return if the monitor process may
00860         # still have one of the file descriptors open.
00861 
00862         super(qm.executable.TimeoutExecutable, self)._HandleChild()
00863 
00864         if self.__UseSeparateProcessGroupForChild():
00865             # Put the child into its own process group.  This step is
00866             # performed in both the parent and the child; therefore both
00867             # processes can safely assume that the creation of the process
00868             # group has taken place.
00869             child_pid = self._GetChildPID()
00870             try:
00871                 os.setpgid(child_pid, child_pid)
00872             except:
00873                 # The call to setpgid may fail if the child has exited,
00874                 # or has already called 'exec'.  In that case, we are
00875                 # guaranteed that the child has already put itself in the
00876                 # desired process group.
00877                 pass
00878             # Create the monitoring process.
00879             #
00880             # If the monitoring process is in parent's process group and
00881             # kills the child after waitpid has returned in the parent, we
00882             # may end up trying to kill a process group other than the one
00883             # that we intend to kill.  Therefore, we put the monitoring
00884             # process in the same process group as the child; that ensures
00885             # that the process group will persist until the monitoring
00886             # process kills it.
00887             self.__monitor_pid = os.fork()
00888             if self.__monitor_pid != 0:
00889                 # Make sure that the monitoring process is placed into the
00890                 # child's process group before the parent process calls
00891                 # 'waitpid'.  In this way, we are guaranteed that the process
00892                 # group as the child
00893                 os.setpgid(self.__monitor_pid, child_pid)
00894             else:
00895                 # Put the monitoring process into the child's process
00896                 # group.  We know the process group still exists at
00897                 # this point because either (a) we are in the process
00898                 # group, or (b) the parent has not yet called waitpid.
00899                 os.setpgid(0, child_pid)
00900 
00901                 # Close all open file descriptors.  They are not needed
00902                 # in the monitor process.  Furthermore, when the parent
00903                 # closes the write end of the stdin pipe to the child,
00904                 # we do not want the pipe to remain open; leaving the
00905                 # pipe open in the monitor process might cause the child
00906                 # to block waiting for additional input.
00907                 try:
00908                     max_fds = os.sysconf("SC_OPEN_MAX")
00909                 except:
00910                     max_fds = 256
00911                 for fd in xrange(max_fds):
00912                     try:
00913                         os.close(fd)
00914                     except:
00915                         pass
00916                 try:
00917                     if self.__timeout >= 0:
00918                         # Give the child time to run.
00919                         time.sleep (self.__timeout)
00920                         #######################################################
00921                         ### This is the interesting part: dump the stack trace to a file
00922                         if sys.platform == "linux2": # we should be have /proc and gdb
00923                             cmd = ["gdb",
00924                                    os.path.join("/proc", str(child_pid), "exe"),
00925                                    str(child_pid),
00926                                    "-batch", "-n", "-x",
00927                                    "'%s'" % os.path.join(os.path.dirname(__file__), "stack-trace.gdb")]
00928                             # FIXME: I wanted to use subprocess.Popen, but it doesn't want to work
00929                             #        in this context.
00930                             o = os.popen(" ".join(cmd)).read()
00931                             open(self.stack_trace_file,"w").write(o)
00932                         #######################################################
00933 
00934                         # Kill all processes in the child process group.
00935                         os.kill(0, signal.SIGKILL)
00936                     else:
00937                         # This call to select will never terminate.
00938                         select.select ([], [], [])
00939                 finally:
00940                     # Exit.  This code is in a finally clause so that
00941                     # we are guaranteed to get here no matter what.
00942                     os._exit(0)
00943         elif self.__timeout >= 0 and sys.platform == "win32":
00944             # Create a monitoring thread.
00945             self.__monitor_thread = Thread(target = self.__Monitor)
00946             self.__monitor_thread.start()
00947 
00948     if sys.platform == "win32":
00949 
00950         def __Monitor(self):
00951             """Code copied from FilterExecutable.
00952             Kill the child if the timeout expires.
00953 
00954             This function is run in the monitoring thread."""
00955 
00956             # The timeout may be expressed as a floating-point value
00957             # on UNIX, but it must be an integer number of
00958             # milliseconds when passed to WaitForSingleObject.
00959             timeout = int(self.__timeout * 1000)
00960             # Wait for the child process to terminate or for the
00961             # timer to expire.
00962             result = win32event.WaitForSingleObject(self._GetChildPID(),
00963                                                     timeout)
00964             # If the timeout occurred, kill the child process.
00965             if result == win32con.WAIT_TIMEOUT:
00966                 self.Kill()
00967 
00968 ########################################################################
00969 # Test Classes
00970 ########################################################################
00971 class GaudiExeTest(ExecTestBase):
00972     """Standard Gaudi test.
00973     """
00974     arguments = [
00975         qm.fields.TextField(
00976             name="program",
00977             title="Program",
00978             not_empty_text=1,
00979             description="""The path to the program.
00980 
00981             This field indicates the path to the program.  If it is not
00982             an absolute path, the value of the 'PATH' environment
00983             variable will be used to search for the program.
00984             If not specified, $GAUDIEXE or Gaudi.exe are used.
00985             """
00986             ),
00987         qm.fields.SetField(qm.fields.TextField(
00988             name="args",
00989             title="Argument List",
00990             description="""The command-line arguments.
00991 
00992             If this field is left blank, the program is run without any
00993             arguments.
00994 
00995             Use this field to specify the option files.
00996 
00997             An implicit 0th argument (the path to the program) is added
00998             automatically."""
00999             )),
01000         qm.fields.TextField(
01001             name="options",
01002             title="Options",
01003             description="""Options to be passed to the application.
01004 
01005             This field allows to pass a list of options to the main program
01006             without the need of a separate option file.
01007 
01008             The content of the field is written to a temporary file which name
01009             is passed the the application as last argument (appended to the
01010             field "Argument List".
01011             """,
01012             verbatim="true",
01013             multiline="true",
01014             default_value=""
01015             ),
01016         qm.fields.TextField(
01017             name="workdir",
01018             title="Working Directory",
01019             description="""Path to the working directory.
01020 
01021             If this field is left blank, the program will be run from the qmtest
01022             directory, otherwise from the directory specified.""",
01023             default_value=""
01024             ),
01025         qm.fields.TextField(
01026             name="reference",
01027             title="Reference Output",
01028             description="""Path to the file containing the reference output.
01029 
01030             If this field is left blank, any standard output will be considered
01031             valid.
01032 
01033             If the reference file is specified, any output on standard error is
01034             ignored."""
01035             ),
01036         qm.fields.TextField(
01037             name="error_reference",
01038             title="Reference for standard error",
01039             description="""Path to the file containing the reference for the standard error.
01040 
01041             If this field is left blank, any standard output will be considered
01042             valid.
01043 
01044             If the reference file is specified, any output on standard error is
01045             ignored."""
01046             ),
01047         qm.fields.SetField(qm.fields.TextField(
01048             name = "unsupported_platforms",
01049             title = "Unsupported Platforms",
01050             description = """Platform on which the test must not be run.
01051 
01052             List of regular expressions identifying the platforms on which the
01053             test is not run and the result is set to UNTESTED."""
01054             )),
01055 
01056         qm.fields.TextField(
01057             name = "validator",
01058             title = "Validator",
01059             description = """Function to validate the output of the test.
01060 
01061             If defined, the function is used to validate the products of the
01062             test.
01063             The function is called passing as arguments:
01064               self:   the test class instance
01065               stdout: the standard output of the executed test
01066               stderr: the standard error of the executed test
01067               result: the Result objects to fill with messages
01068             The function must return a list of causes for the failure.
01069             If specified, overrides standard output, standard error and
01070             reference files.
01071             """,
01072             verbatim="true",
01073             multiline="true",
01074             default_value=""
01075             ),
01076 
01077         qm.fields.BooleanField(
01078             name = "use_temp_dir",
01079             title = "Use temporary directory",
01080             description = """Use temporary directory.
01081 
01082             If set to true, use a temporary directory as working directory.
01083             """,
01084             default_value="false"
01085             ),
01086         ]
01087 
01088     def PlatformIsNotSupported(self, context, result):
01089         platform = self.GetPlatform()
01090         unsupported = [ re.compile(x)
01091                         for x in [ str(y).strip()
01092                                    for y in self.unsupported_platforms ]
01093                         if x
01094                        ]
01095         for p_re in unsupported:
01096             if p_re.search(platform):
01097                 result.SetOutcome(result.UNTESTED)
01098                 result[result.CAUSE] = 'Platform not supported.'
01099                 return True
01100         return False
01101 
01102     def GetPlatform(self):
01103         """
01104         Return the platform Id defined in CMTCONFIG or SCRAM_ARCH.
01105         """
01106         arch = "None"
01107         # check architecture name
01108         if "CMTCONFIG" in os.environ:
01109             arch = os.environ["CMTCONFIG"]
01110         elif "SCRAM_ARCH" in os.environ:
01111             arch = os.environ["SCRAM_ARCH"]
01112         return arch
01113 
01114     def _expandReferenceFileName(self, reffile):
01115         # if no file is passed, do nothing
01116         if not reffile:
01117             return ""
01118 
01119         # function to split an extension in constituents parts
01120         platformSplit = lambda p: set(p.split('-' in p and '-' or '_'))
01121 
01122         reference = os.path.normpath(os.path.expandvars(reffile))
01123         # old-style platform-specific reference name
01124         spec_ref = reference[:-3] + self.GetPlatform()[0:3] + reference[-3:]
01125         if os.path.isfile(spec_ref):
01126             reference = spec_ref
01127         else: # look for new-style platform specific reference files:
01128             # get all the files whose name start with the reference filename
01129             dirname, basename = os.path.split(reference)
01130             if not dirname: dirname = '.'
01131             head = basename + "."
01132             head_len = len(head)
01133             platform = platformSplit(self.GetPlatform())
01134             candidates = []
01135             for f in os.listdir(dirname):
01136                 if f.startswith(head):
01137                     req_plat = platformSplit(f[head_len:])
01138                     if platform.issuperset(req_plat):
01139                         candidates.append( (len(req_plat), f) )
01140             if candidates: # take the one with highest matching
01141                 # FIXME: it is not possible to say if x86_64-slc5-gcc43-dbg
01142                 #        has to use ref.x86_64-gcc43 or ref.slc5-dbg
01143                 candidates.sort()
01144                 reference = os.path.join(dirname, candidates[-1][1])
01145         return reference
01146 
01147     def CheckTTreesSummaries(self, stdout, result, causes,
01148                              trees_dict = None,
01149                              ignore = r"Basket|.*size|Compression"):
01150         """
01151         Compare the TTree summaries in stdout with the ones in trees_dict or in
01152         the reference file. By default ignore the size, compression and basket
01153         fields.
01154         The presence of TTree summaries when none is expected is not a failure.
01155         """
01156         if trees_dict is None:
01157             reference = self._expandReferenceFileName(self.reference)
01158             # call the validator if the file exists
01159             if reference and os.path.isfile(reference):
01160                 trees_dict = findTTreeSummaries(open(reference).read())
01161             else:
01162                 trees_dict = {}
01163 
01164         from pprint import PrettyPrinter
01165         pp = PrettyPrinter()
01166         if trees_dict:
01167             result["GaudiTest.TTrees.expected"] = result.Quote(pp.pformat(trees_dict))
01168             if ignore:
01169                 result["GaudiTest.TTrees.ignore"] = result.Quote(ignore)
01170 
01171         trees = findTTreeSummaries(stdout)
01172         failed = cmpTreesDicts(trees_dict, trees, ignore)
01173         if failed:
01174             causes.append("trees summaries")
01175             msg = "%s: %s != %s" % getCmpFailingValues(trees_dict, trees, failed)
01176             result["GaudiTest.TTrees.failure_on"] = result.Quote(msg)
01177             result["GaudiTest.TTrees.found"] = result.Quote(pp.pformat(trees))
01178 
01179         return causes
01180 
01181     def CheckHistosSummaries(self, stdout, result, causes,
01182                              dict = None,
01183                              ignore = None):
01184         """
01185         Compare the TTree summaries in stdout with the ones in trees_dict or in
01186         the reference file. By default ignore the size, compression and basket
01187         fields.
01188         The presence of TTree summaries when none is expected is not a failure.
01189         """
01190         if dict is None:
01191             reference = self._expandReferenceFileName(self.reference)
01192             # call the validator if the file exists
01193             if reference and os.path.isfile(reference):
01194                 dict = findHistosSummaries(open(reference).read())
01195             else:
01196                 dict = {}
01197 
01198         from pprint import PrettyPrinter
01199         pp = PrettyPrinter()
01200         if dict:
01201             result["GaudiTest.Histos.expected"] = result.Quote(pp.pformat(dict))
01202             if ignore:
01203                 result["GaudiTest.Histos.ignore"] = result.Quote(ignore)
01204 
01205         histos = findHistosSummaries(stdout)
01206         failed = cmpTreesDicts(dict, histos, ignore)
01207         if failed:
01208             causes.append("histos summaries")
01209             msg = "%s: %s != %s" % getCmpFailingValues(dict, histos, failed)
01210             result["GaudiTest.Histos.failure_on"] = result.Quote(msg)
01211             result["GaudiTest.Histos.found"] = result.Quote(pp.pformat(histos))
01212 
01213         return causes
01214 
01215     def ValidateWithReference(self, stdout, stderr, result, causes, preproc = None):
01216         """
01217         Default validation action: compare standard output and error to the
01218         reference files.
01219         """
01220         # set the default output preprocessor
01221         if preproc is None:
01222             preproc = normalizeExamples
01223         # check standard output
01224         reference = self._expandReferenceFileName(self.reference)
01225         # call the validator if the file exists
01226         if reference and os.path.isfile(reference):
01227             result["GaudiTest.output_reference"] = reference
01228             causes += ReferenceFileValidator(reference,
01229                                              "standard output",
01230                                              "GaudiTest.output_diff",
01231                                              preproc = preproc)(stdout, result)
01232 
01233         # Compare TTree summaries
01234         causes = self.CheckTTreesSummaries(stdout, result, causes)
01235         causes = self.CheckHistosSummaries(stdout, result, causes)
01236 
01237         if causes: # Write a new reference file for stdout
01238             try:
01239                 newref = open(reference + ".new","w")
01240                 # sanitize newlines
01241                 for l in stdout.splitlines():
01242                     newref.write(l.rstrip() + '\n')
01243                 del newref # flush and close
01244             except IOError:
01245                 # Ignore IO errors when trying to update reference files
01246                 # because we may be in a read-only filesystem
01247                 pass
01248 
01249         # check standard error
01250         reference = self._expandReferenceFileName(self.error_reference)
01251         # call the validator if we have a file to use
01252         if reference and os.path.isfile(reference):
01253             result["GaudiTest.error_reference"] = reference
01254             newcauses = ReferenceFileValidator(reference,
01255                                                "standard error",
01256                                                "GaudiTest.error_diff",
01257                                                preproc = preproc)(stderr, result)
01258             causes += newcauses
01259             if newcauses: # Write a new reference file for stdedd
01260                 newref = open(reference + ".new","w")
01261                 # sanitize newlines
01262                 for l in stderr.splitlines():
01263                     newref.write(l.rstrip() + '\n')
01264                 del newref # flush and close
01265         else:
01266             causes += BasicOutputValidator(self.stderr,
01267                                            "standard error",
01268                                            "ExecTest.expected_stderr")(stderr, result)
01269 
01270         return causes
01271 
01272     def ValidateOutput(self, stdout, stderr, result):
01273         causes = []
01274         # if the test definition contains a custom validator, use it
01275         if self.validator.strip() != "":
01276             class CallWrapper(object):
01277                 """
01278                 Small wrapper class to dynamically bind some default arguments
01279                 to a callable.
01280                 """
01281                 def __init__(self, callable, extra_args = {}):
01282                     self.callable = callable
01283                     self.extra_args = extra_args
01284                     # get the list of names of positional arguments
01285                     from inspect import getargspec
01286                     self.args_order = getargspec(callable)[0]
01287                     # Remove "self" from the list of positional arguments
01288                     # since it is added automatically
01289                     if self.args_order[0] == "self":
01290                         del self.args_order[0]
01291                 def __call__(self, *args, **kwargs):
01292                     # Check which positional arguments are used
01293                     positional = self.args_order[:len(args)]
01294 
01295                     kwargs = dict(kwargs) # copy the arguments dictionary
01296                     for a in self.extra_args:
01297                         # use "extra_args" for the arguments not specified as
01298                         # positional or keyword
01299                         if a not in positional and a not in kwargs:
01300                             kwargs[a] = self.extra_args[a]
01301                     return apply(self.callable, args, kwargs)
01302             # local names to be exposed in the script
01303             exported_symbols = {"self":self,
01304                                 "stdout":stdout,
01305                                 "stderr":stderr,
01306                                 "result":result,
01307                                 "causes":causes,
01308                                 "findReferenceBlock":
01309                                     CallWrapper(findReferenceBlock, {"stdout":stdout,
01310                                                                      "result":result,
01311                                                                      "causes":causes}),
01312                                 "validateWithReference":
01313                                     CallWrapper(self.ValidateWithReference, {"stdout":stdout,
01314                                                                              "stderr":stderr,
01315                                                                              "result":result,
01316                                                                              "causes":causes}),
01317                                 "countErrorLines":
01318                                     CallWrapper(countErrorLines, {"stdout":stdout,
01319                                                                   "result":result,
01320                                                                   "causes":causes}),
01321                                 "checkTTreesSummaries":
01322                                     CallWrapper(self.CheckTTreesSummaries, {"stdout":stdout,
01323                                                                             "result":result,
01324                                                                             "causes":causes}),
01325                                 "checkHistosSummaries":
01326                                     CallWrapper(self.CheckHistosSummaries, {"stdout":stdout,
01327                                                                             "result":result,
01328                                                                             "causes":causes}),
01329 
01330                                 }
01331             exec self.validator in globals(), exported_symbols
01332         else:
01333             self.ValidateWithReference(stdout, stderr, result, causes)
01334 
01335         return causes
01336 
01337     def DumpEnvironment(self, result):
01338         """
01339         Add the content of the environment to the result object.
01340 
01341         Copied from the QMTest class of COOL.
01342         """
01343         vars = os.environ.keys()
01344         vars.sort()
01345         result['GaudiTest.environment'] = \
01346             result.Quote('\n'.join(["%s=%s"%(v,os.environ[v]) for v in vars]))
01347 
01348     def Run(self, context, result):
01349         """Run the test.
01350 
01351         'context' -- A 'Context' giving run-time parameters to the
01352         test.
01353 
01354         'result' -- A 'Result' object.  The outcome will be
01355         'Result.PASS' when this method is called.  The 'result' may be
01356         modified by this method to indicate outcomes other than
01357         'Result.PASS' or to add annotations."""
01358 
01359         # Check if the platform is supported
01360         if self.PlatformIsNotSupported(context, result):
01361             return
01362 
01363         # Prepare program name and arguments (expanding variables, and converting to absolute)
01364         if self.program:
01365             prog = rationalizepath(self.program)
01366         elif "GAUDIEXE" in os.environ:
01367             prog = os.environ["GAUDIEXE"]
01368         else:
01369             prog = "Gaudi.exe"
01370         self.program = prog
01371 
01372         dummy, prog_ext = os.path.splitext(prog)
01373         if prog_ext not in [ ".exe", ".py", ".bat" ] and self.GetPlatform()[0:3] == "win":
01374             prog += ".exe"
01375             prog_ext = ".exe"
01376 
01377         prog = which(prog) or prog
01378 
01379         # Convert paths to absolute paths in arguments and reference files
01380         args = map(rationalizepath, self.args)
01381         self.reference = rationalizepath(self.reference)
01382         self.error_reference = rationalizepath(self.error_reference)
01383 
01384 
01385         # check if the user provided inline options
01386         tmpfile = None
01387         if self.options.strip():
01388             ext = ".opts"
01389             if re.search(r"from\s*Gaudi.Configuration\s*import\s*\*", self.options):
01390                 ext = ".py"
01391             tmpfile = TempFile(ext)
01392             tmpfile.writelines("\n".join(self.options.splitlines()))
01393             tmpfile.flush()
01394             args.append(tmpfile.name)
01395             result["GaudiTest.options"] = result.Quote(self.options)
01396 
01397         # if the program is a python file, execute it through python
01398         if prog_ext == ".py":
01399             args.insert(0,prog)
01400             if self.GetPlatform()[0:3] == "win":
01401                 prog = which("python.exe") or "python.exe"
01402             else:
01403                 prog = which("python") or "python"
01404 
01405         # Change to the working directory if specified or to the default temporary
01406         origdir = os.getcwd()
01407         if self.workdir:
01408             os.chdir(str(os.path.normpath(os.path.expandvars(self.workdir))))
01409         elif self.use_temp_dir == "true":
01410             if "QMTEST_TMPDIR" in os.environ:
01411                 os.chdir(os.environ["QMTEST_TMPDIR"])
01412             elif "qmtest.tmpdir" in context:
01413                 os.chdir(context["qmtest.tmpdir"])
01414 
01415         if "QMTEST_IGNORE_TIMEOUT" not in os.environ:
01416             self.timeout = max(self.timeout,600)
01417         else:
01418             self.timeout = -1
01419 
01420         try:
01421             # Generate eclipse.org debug launcher for the test
01422             self._CreateEclipseLaunch(prog, args, destdir = origdir)
01423             # Run the test
01424             self.RunProgram(prog,
01425                             [ prog ] + args,
01426                             context, result)
01427             # Record the content of the enfironment for failing tests
01428             if result.GetOutcome() not in [ result.PASS ]:
01429                 self.DumpEnvironment(result)
01430         finally:
01431             # revert to the original directory
01432             os.chdir(origdir)
01433 
01434     def RunProgram(self, program, arguments, context, result):
01435         """Run the 'program'.
01436 
01437         'program' -- The path to the program to run.
01438 
01439         'arguments' -- A list of the arguments to the program.  This
01440         list must contain a first argument corresponding to 'argv[0]'.
01441 
01442         'context' -- A 'Context' giving run-time parameters to the
01443         test.
01444 
01445         'result' -- A 'Result' object.  The outcome will be
01446         'Result.PASS' when this method is called.  The 'result' may be
01447         modified by this method to indicate outcomes other than
01448         'Result.PASS' or to add annotations.
01449 
01450         @attention: This method has been copied from command.ExecTestBase
01451                     (QMTest 2.3.0) and modified to keep stdout and stderr
01452                     for tests that have been terminated by a signal.
01453                     (Fundamental for debugging in the Application Area)
01454         """
01455 
01456         # Construct the environment.
01457         environment = self.MakeEnvironment(context)
01458         # Create the executable.
01459         if self.timeout >= 0:
01460             timeout = self.timeout
01461         else:
01462             # If no timeout was specified, we sill run this process in a
01463             # separate process group and kill the entire process group
01464             # when the child is done executing.  That means that
01465             # orphaned child processes created by the test will be
01466             # cleaned up.
01467             timeout = -2
01468         e = GaudiFilterExecutable(self.stdin, timeout)
01469         # Run it.
01470         exit_status = e.Run(arguments, environment, path = program)
01471         # Get the stack trace from the temporary file (if present)
01472         if e.stack_trace_file and os.path.exists(e.stack_trace_file):
01473             stack_trace = open(e.stack_trace_file).read()
01474             os.remove(e.stack_trace_file)
01475         else:
01476             stack_trace = None
01477         if stack_trace:
01478             result["ExecTest.stack_trace"] = result.Quote(stack_trace)
01479 
01480         # If the process terminated normally, check the outputs.
01481         if sys.platform == "win32" or os.WIFEXITED(exit_status):
01482             # There are no causes of failure yet.
01483             causes = []
01484             # The target program terminated normally.  Extract the
01485             # exit code, if this test checks it.
01486             if self.exit_code is None:
01487                 exit_code = None
01488             elif sys.platform == "win32":
01489                 exit_code = exit_status
01490             else:
01491                 exit_code = os.WEXITSTATUS(exit_status)
01492             # Get the output generated by the program.
01493             stdout = e.stdout
01494             stderr = e.stderr
01495             # Record the results.
01496             result["ExecTest.exit_code"] = str(exit_code)
01497             result["ExecTest.stdout"] = result.Quote(stdout)
01498             result["ExecTest.stderr"] = result.Quote(stderr)
01499             # Check to see if the exit code matches.
01500             if exit_code != self.exit_code:
01501                 causes.append("exit_code")
01502                 result["ExecTest.expected_exit_code"] \
01503                     = str(self.exit_code)
01504             # Validate the output.
01505             causes += self.ValidateOutput(stdout, stderr, result)
01506             # If anything went wrong, the test failed.
01507             if causes:
01508                 result.Fail("Unexpected %s." % string.join(causes, ", "))
01509         elif os.WIFSIGNALED(exit_status):
01510             # The target program terminated with a signal.  Construe
01511             # that as a test failure.
01512             signal_number = str(os.WTERMSIG(exit_status))
01513             if not stack_trace:
01514                 result.Fail("Program terminated by signal.")
01515             else:
01516                 # The presence of stack_trace means tha we stopped the job because
01517                 # of a time-out
01518                 result.Fail("Exceeded time limit (%ds), terminated." % timeout)
01519             result["ExecTest.signal_number"] = signal_number
01520             result["ExecTest.stdout"] = result.Quote(e.stdout)
01521             result["ExecTest.stderr"] = result.Quote(e.stderr)
01522         elif os.WIFSTOPPED(exit_status):
01523             # The target program was stopped.  Construe that as a
01524             # test failure.
01525             signal_number = str(os.WSTOPSIG(exit_status))
01526             if not stack_trace:
01527                 result.Fail("Program stopped by signal.")
01528             else:
01529                 # The presence of stack_trace means tha we stopped the job because
01530                 # of a time-out
01531                 result.Fail("Exceeded time limit (%ds), stopped." % timeout)
01532             result["ExecTest.signal_number"] = signal_number
01533             result["ExecTest.stdout"] = result.Quote(e.stdout)
01534             result["ExecTest.stderr"] = result.Quote(e.stderr)
01535         else:
01536             # The target program terminated abnormally in some other
01537             # manner.  (This shouldn't normally happen...)
01538             result.Fail("Program did not terminate normally.")
01539 
01540         # Marco Cl.: This is a special trick to fix a "problem" with the output
01541         # of gaudi jobs when they use colors
01542         esc = '\x1b'
01543         repr_esc = '\\x1b'
01544         result["ExecTest.stdout"] = result["ExecTest.stdout"].replace(esc,repr_esc)
01545         # TODO: (MCl) improve the hack for colors in standard output
01546         #             may be converting them to HTML tags
01547 
01548     def _CreateEclipseLaunch(self, prog, args, destdir = None):
01549         # Find the project name used in ecplise.
01550         # The name is in a file called ".project" in one of the parent directories
01551         projbasedir = os.path.normpath(destdir)
01552         while not os.path.exists(os.path.join(projbasedir, ".project")):
01553             oldprojdir = projbasedir
01554             projbasedir = os.path.normpath(os.path.join(projbasedir, os.pardir))
01555             # FIXME: the root level is invariant when trying to go up one level,
01556             #        but it must be cheched on windows
01557             if oldprojdir == projbasedir:
01558                 # If we cannot find a .project, so no point in creating a .launch file
01559                 return
01560         # Use ElementTree to parse the XML file
01561         from xml.etree import ElementTree as ET
01562         t = ET.parse(os.path.join(projbasedir, ".project"))
01563         projectName = t.find("name").text
01564 
01565         # prepare the name/path of the generated file
01566         destfile = "%s.launch" % self._Runnable__id
01567         if destdir:
01568             destfile = os.path.join(destdir, destfile)
01569 
01570         if self.options.strip():
01571             # this means we have some custom options in the qmt file, so we have
01572             # to copy them from the temporary file at the end of the arguments
01573             # in another file
01574             tempfile = args.pop()
01575             optsfile = destfile + os.path.splitext(tempfile)[1]
01576             shutil.copyfile(tempfile, optsfile)
01577             args.append(optsfile)
01578 
01579         # prepare the data to insert in the XML file
01580         from xml.sax.saxutils import quoteattr # useful to quote XML special chars
01581         data = {}
01582         # Note: the "quoteattr(k)" is not needed because special chars cannot be part of a variable name,
01583         # but it doesn't harm.
01584         data["environment"] = "\n".join(['<mapEntry key=%s value=%s/>' % (quoteattr(k), quoteattr(v))
01585                                          for k, v in os.environ.iteritems()])
01586 
01587         data["exec"] = which(prog) or prog
01588         if os.path.basename(data["exec"]).lower().startswith("python"):
01589             data["stopAtMain"] = "false" # do not stop at main when debugging Python scripts
01590         else:
01591             data["stopAtMain"] = "true"
01592 
01593         data["args"] = "&#10;".join(map(rationalizepath, args))
01594 
01595         if not self.use_temp_dir:
01596             data["workdir"] = os.getcwd()
01597         else:
01598             # If the test is using a tmporary directory, it is better to run it
01599             # in the same directory as the .launch file when debugged in eclipse
01600             data["workdir"] = destdir
01601 
01602         data["project"] = projectName.strip()
01603 
01604         # Template for the XML file, based on eclipse 3.4
01605         xml = """<?xml version="1.0" encoding="UTF-8" standalone="no"?>
01606 <launchConfiguration type="org.eclipse.cdt.launch.applicationLaunchType">
01607 <booleanAttribute key="org.eclipse.cdt.debug.mi.core.AUTO_SOLIB" value="true"/>
01608 <listAttribute key="org.eclipse.cdt.debug.mi.core.AUTO_SOLIB_LIST"/>
01609 <stringAttribute key="org.eclipse.cdt.debug.mi.core.DEBUG_NAME" value="gdb"/>
01610 <stringAttribute key="org.eclipse.cdt.debug.mi.core.GDB_INIT" value=".gdbinit"/>
01611 <listAttribute key="org.eclipse.cdt.debug.mi.core.SOLIB_PATH"/>
01612 <booleanAttribute key="org.eclipse.cdt.debug.mi.core.STOP_ON_SOLIB_EVENTS" value="false"/>
01613 <booleanAttribute key="org.eclipse.cdt.debug.mi.core.breakpointsFullPath" value="false"/>
01614 <stringAttribute key="org.eclipse.cdt.debug.mi.core.commandFactory" value="org.eclipse.cdt.debug.mi.core.standardCommandFactory"/>
01615 <stringAttribute key="org.eclipse.cdt.debug.mi.core.protocol" value="mi"/>
01616 <booleanAttribute key="org.eclipse.cdt.debug.mi.core.verboseMode" value="false"/>
01617 <intAttribute key="org.eclipse.cdt.launch.ATTR_BUILD_BEFORE_LAUNCH_ATTR" value="0"/>
01618 <stringAttribute key="org.eclipse.cdt.launch.COREFILE_PATH" value=""/>
01619 <stringAttribute key="org.eclipse.cdt.launch.DEBUGGER_ID" value="org.eclipse.cdt.debug.mi.core.CDebuggerNew"/>
01620 <stringAttribute key="org.eclipse.cdt.launch.DEBUGGER_REGISTER_GROUPS" value=""/>
01621 <stringAttribute key="org.eclipse.cdt.launch.DEBUGGER_START_MODE" value="run"/>
01622 <booleanAttribute key="org.eclipse.cdt.launch.DEBUGGER_STOP_AT_MAIN" value="%(stopAtMain)s"/>
01623 <stringAttribute key="org.eclipse.cdt.launch.DEBUGGER_STOP_AT_MAIN_SYMBOL" value="main"/>
01624 <booleanAttribute key="org.eclipse.cdt.launch.ENABLE_REGISTER_BOOKKEEPING" value="false"/>
01625 <booleanAttribute key="org.eclipse.cdt.launch.ENABLE_VARIABLE_BOOKKEEPING" value="false"/>
01626 <stringAttribute key="org.eclipse.cdt.launch.FORMAT" value="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&lt;contentList/&gt;"/>
01627 <stringAttribute key="org.eclipse.cdt.launch.GLOBAL_VARIABLES" value="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;globalVariableList/&gt;&#10;"/>
01628 <stringAttribute key="org.eclipse.cdt.launch.MEMORY_BLOCKS" value="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;memoryBlockExpressionList/&gt;&#10;"/>
01629 <stringAttribute key="org.eclipse.cdt.launch.PROGRAM_ARGUMENTS" value="%(args)s"/>
01630 <stringAttribute key="org.eclipse.cdt.launch.PROGRAM_NAME" value="%(exec)s"/>
01631 <stringAttribute key="org.eclipse.cdt.launch.PROJECT_ATTR" value="%(project)s"/>
01632 <stringAttribute key="org.eclipse.cdt.launch.PROJECT_BUILD_CONFIG_ID_ATTR" value=""/>
01633 <stringAttribute key="org.eclipse.cdt.launch.WORKING_DIRECTORY" value="%(workdir)s"/>
01634 <booleanAttribute key="org.eclipse.cdt.launch.ui.ApplicationCDebuggerTab.DEFAULTS_SET" value="true"/>
01635 <booleanAttribute key="org.eclipse.cdt.launch.use_terminal" value="true"/>
01636 <listAttribute key="org.eclipse.debug.core.MAPPED_RESOURCE_PATHS">
01637 <listEntry value="/%(project)s"/>
01638 </listAttribute>
01639 <listAttribute key="org.eclipse.debug.core.MAPPED_RESOURCE_TYPES">
01640 <listEntry value="4"/>
01641 </listAttribute>
01642 <booleanAttribute key="org.eclipse.debug.core.appendEnvironmentVariables" value="false"/>
01643 <mapAttribute key="org.eclipse.debug.core.environmentVariables">
01644 %(environment)s
01645 </mapAttribute>
01646 <mapAttribute key="org.eclipse.debug.core.preferred_launchers">
01647 <mapEntry key="[debug]" value="org.eclipse.cdt.cdi.launch.localCLaunch"/>
01648 </mapAttribute>
01649 <listAttribute key="org.eclipse.debug.ui.favoriteGroups">
01650 <listEntry value="org.eclipse.debug.ui.launchGroup.debug"/>
01651 </listAttribute>
01652 </launchConfiguration>
01653 """ % data
01654 
01655         # Write the output file
01656         open(destfile, "w").write(xml)
01657         #open(destfile + "_copy.xml", "w").write(xml)
01658 
01659 
01660 try:
01661     import json
01662 except ImportError:
01663     # Use simplejson for LCG
01664     import simplejson as json
01665 
01666 class HTMLResultStream(ResultStream):
01667     """An 'HTMLResultStream' writes its output to a set of HTML files.
01668 
01669     The argument 'dir' is used to select the destination directory for the HTML
01670     report.
01671     The destination directory may already contain the report from a previous run
01672     (for example of a different package), in which case it will be extended to
01673     include the new data.
01674     """
01675     arguments = [
01676         qm.fields.TextField(
01677             name = "dir",
01678             title = "Destination Directory",
01679             description = """The name of the directory.
01680 
01681             All results will be written to the directory indicated.""",
01682             verbatim = "true",
01683             default_value = ""),
01684     ]
01685 
01686     def __init__(self, arguments = None, **args):
01687         """Prepare the destination directory.
01688 
01689         Creates the destination directory and store in it some preliminary
01690         annotations and the static files found in the template directory
01691         'html_report'.
01692         """
01693         ResultStream.__init__(self, arguments, **args)
01694         self._summary = []
01695         self._summaryFile = os.path.join(self.dir, "summary.json")
01696         self._annotationsFile = os.path.join(self.dir, "annotations.json")
01697         # Prepare the destination directory using the template
01698         templateDir = os.path.join(os.path.dirname(__file__), "html_report")
01699         if not os.path.isdir(self.dir):
01700             os.makedirs(self.dir)
01701         # Copy the files in the template directory excluding the directories
01702         for f in os.listdir(templateDir):
01703             src = os.path.join(templateDir, f)
01704             dst = os.path.join(self.dir, f)
01705             if not os.path.isdir(src) and not os.path.exists(dst):
01706                 shutil.copy(src, dst)
01707         # Add some non-QMTest attributes
01708         if "CMTCONFIG" in os.environ:
01709             self.WriteAnnotation("cmt.cmtconfig", os.environ["CMTCONFIG"])
01710         import socket
01711         self.WriteAnnotation("hostname", socket.gethostname())
01712 
01713     def _updateSummary(self):
01714         """Helper function to extend the global summary file in the destination
01715         directory.
01716         """
01717         if os.path.exists(self._summaryFile):
01718             oldSummary = json.load(open(self._summaryFile))
01719         else:
01720             oldSummary = []
01721         ids = set([ i["id"] for i in self._summary ])
01722         newSummary = [ i for i in oldSummary if i["id"] not in ids ]
01723         newSummary.extend(self._summary)
01724         json.dump(newSummary, open(self._summaryFile, "w"),
01725                   sort_keys = True)
01726 
01727     def WriteAnnotation(self, key, value):
01728         """Writes the annotation to the annotation file.
01729         If the key is already present with a different value, the value becomes
01730         a list and the new value is appended to it, except for start_time and
01731         end_time.
01732         """
01733         # Initialize the annotation dict from the file (if present)
01734         if os.path.exists(self._annotationsFile):
01735             annotations = json.load(open(self._annotationsFile))
01736         else:
01737             annotations = {}
01738         # hack because we do not have proper JSON support
01739         key, value = map(str, [key, value])
01740         if key == "qmtest.run.start_time":
01741             # Special handling of the start time:
01742             # if we are updating a result, we have to keep the original start
01743             # time, but remove the original end time to mark the report to be
01744             # in progress.
01745             if key not in annotations:
01746                 annotations[key] = value
01747             if "qmtest.run.end_time" in annotations:
01748                 del annotations["qmtest.run.end_time"]
01749         else:
01750             # All other annotations are added to a list
01751             if key in annotations:
01752                 old = annotations[key]
01753                 if type(old) is list:
01754                     if value not in old:
01755                         annotations[key].append(value)
01756                 elif value != old:
01757                     annotations[key] = [old, value]
01758             else:
01759                 annotations[key] = value
01760         # Write the new annotations file
01761         json.dump(annotations, open(self._annotationsFile, "w"),
01762                   sort_keys = True)
01763 
01764     def WriteResult(self, result):
01765         """Prepare the test result directory in the destination directory storing
01766         into it the result fields.
01767         A summary of the test result is stored both in a file in the test directory
01768         and in the global summary file.
01769         """
01770         summary = {}
01771         summary["id"] = result.GetId()
01772         summary["outcome"] = result.GetOutcome()
01773         summary["cause"] = result.GetCause()
01774         summary["fields"] = result.keys()
01775         summary["fields"].sort()
01776 
01777         # Since we miss proper JSON support, I hack a bit
01778         for f in ["id", "outcome", "cause"]:
01779             summary[f] = str(summary[f])
01780         summary["fields"] = map(str, summary["fields"])
01781 
01782         self._summary.append(summary)
01783 
01784         # format:
01785         # testname/summary.json
01786         # testname/field1
01787         # testname/field2
01788         testOutDir = os.path.join(self.dir, summary["id"])
01789         if not os.path.isdir(testOutDir):
01790             os.makedirs(testOutDir)
01791         json.dump(summary, open(os.path.join(testOutDir, "summary.json"), "w"),
01792                   sort_keys = True)
01793         for f in summary["fields"]:
01794             open(os.path.join(testOutDir, f), "w").write(result[f])
01795 
01796         self._updateSummary()
01797 
01798     def Summarize(self):
01799         # Not implemented.
01800         pass
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Defines

Generated at Tue May 10 2011 18:53:47 for Gaudi Framework, version v22r2 by Doxygen version 1.7.2 written by Dimitri van Heesch, © 1997-2004