14 from subprocess 
import Popen, PIPE, STDOUT
    19     Take a string with invalid ASCII/UTF characters and quote them so that the    20     string can be used in an XML text.    22     >>> sanitize_for_xml('this is \x1b')    23     'this is [NON-XML-CHAR-0x1B]'    25     bad_chars = re.compile(
    26         u'[\x00-\x08\x0b\x0c\x0e-\x1F\uD800-\uDFFF\uFFFE\uFFFF]')
    30         return ''.join(
'[NON-XML-CHAR-0x%2X]' % ord(c) 
for c 
in match.group())
    31     return bad_chars.sub(quote, data)
    35     '''helper to debug GAUDI-1084, dump the list of processes'''    36     from getpass 
import getuser
    37     if 'WORKSPACE' in os.environ:
    38         p = Popen([
'ps', 
'-fH', 
'-U', getuser()], stdout=PIPE)
    39         with open(os.path.join(os.environ[
'WORKSPACE'], name), 
'w') 
as f:
    40             f.write(p.communicate()[0])
    45     Send a signal to a process and all its child processes (starting from the    48     log = logging.getLogger(
'kill_tree')
    49     ps_cmd = [
'ps', 
'--no-headers', 
'-o', 
'pid', 
'--ppid', str(ppid)]
    50     get_children = Popen(ps_cmd, stdout=PIPE, stderr=PIPE)
    51     children = 
map(int, get_children.communicate()[0].split())
    52     for child 
in children:
    55         log.debug(
'killing process %d', ppid)
    60         log.debug(
'no such process %d', ppid)
    96         logging.debug(
'running test %s', self.
name)
    99             if re.search(
r'from\s+Gaudi.Configuration\s+import\s+\*|'   100                          'from\s+Configurables\s+import', self.
options):
   101                 optionFile = tempfile.NamedTemporaryFile(suffix=
'.py')
   103                 optionFile = tempfile.NamedTemporaryFile(suffix=
'.opts')
   104             optionFile.file.write(self.
options)
   113                 self.environment.items() + os.environ.items())
   115         platform_id = (os.environ.get(
'BINARY_TAG') 
or   116                        os.environ.get(
'CMTCONFIG') 
or   119         skip_test = bool([
None   121                           if re.search(prex, platform_id)])
   130                     workdir = tempfile.mkdtemp()
   136             elif "GAUDIEXE" in os.environ:
   137                 prog = os.environ[
"GAUDIEXE"]
   141             dummy, prog_ext = os.path.splitext(prog)
   142             if prog_ext 
not in [
".exe", 
".py", 
".bat"]:
   146             prog = 
which(prog) 
or prog
   148             args = 
map(RationalizePath, self.
args)
   150             if prog_ext == 
".py":
   155             validatorRes = 
Result({
'CAUSE': 
None, 
'EXCEPTION': 
None,
   156                                    'RESOURCE': 
None, 
'TARGET': 
None,
   157                                    'TRACEBACK': 
None, 
'START_TIME': 
None,
   158                                    'END_TIME': 
None, 
'TIMEOUT_DETAIL': 
None})
   159             self.
result = validatorRes
   167                 logging.debug(
'executing %r in %s',
   169                 self.
proc = Popen(params, stdout=PIPE, stderr=PIPE,
   171                 logging.debug(
'(pid: %d)', self.proc.pid)
   172                 self.
out, self.
err = self.proc.communicate()
   174             thread = threading.Thread(target=target)
   179             if thread.is_alive():
   180                 logging.debug(
'time out in test %s (pid %d)',
   181                               self.
name, self.proc.pid)
   183                 cmd = [
'gdb', 
'--pid', str(self.proc.pid), 
'--batch',
   184                        '--eval-command=thread apply all backtrace']
   185                 gdb = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
   190                 if thread.is_alive():
   192                 self.causes.append(
'timeout')
   194                 logging.debug(
'completed test %s', self.
name)
   197                 logging.debug(
'returnedCode = %s', self.proc.returncode)
   200                 logging.debug(
'validating test...')
   207                 shutil.rmtree(workdir, 
True)
   212             if self.
signal is not None:
   214                     self.causes.append(
'exit code')
   218                     self.causes.append(
'exit code')
   221                 self.causes.append(
"exit code")
   231         logging.debug(
'%s: %s', self.
name, self.
status)
   232         field_mapping = {
'Exit Code': 
'returnedCode',
   235                          'Environment': 
'environment',
   238                          'Program Name': 
'program',
   240                          'Validator': 
'validator',
   241                          'Output Reference File': 
'reference',
   242                          'Error Reference File': 
'error_reference',
   245                          'Unsupported Platforms': 
'unsupported_platforms',
   246                          'Stack Trace': 
'stack_trace'}
   247         resultDict = [(key, getattr(self, attr))
   248                       for key, attr 
in field_mapping.iteritems()
   249                       if getattr(self, attr)]
   250         resultDict.append((
'Working Directory',
   254         resultDict.extend(self.result.annotations.iteritems())
   256         return dict(resultDict)
   265         elif stderr.strip() != self.stderr.strip():
   266             self.causes.append(
'standard error')
   267         return result, self.
causes   269     def findReferenceBlock(self, reference=None, stdout=None, result=None, causes=None, signature_offset=0, signature=None, id=None):
   271             Given a block of text, tries to find it in the output. The block had to be identified by a signature line. By default, the first line is used as signature, or the line pointed to by signature_offset. If signature_offset points outside the block, a signature line can be passed as signature argument. Note: if 'signature' is None (the default), a negative signature_offset is interpreted as index in a list (e.g. -1 means the last line), otherwise the it is interpreted as the number of lines before the first one of the block the signature must appear. The parameter 'id' allow to distinguish between different calls to this function in the same validation code.   274         if reference 
is None:
   284             None, 
map(
lambda s: s.rstrip(), reference.splitlines()))
   286             raise RuntimeError(
"Empty (or null) reference")
   288         outlines = filter(
None, 
map(
lambda s: s.rstrip(), stdout.splitlines()))
   290         res_field = 
"GaudiTest.RefBlock"   292             res_field += 
"_%s" % id
   294         if signature 
is None:
   295             if signature_offset < 0:
   296                 signature_offset = len(reference) + signature_offset
   297             signature = reflines[signature_offset]
   300             pos = outlines.index(signature)
   301             outlines = outlines[pos - signature_offset:pos +
   302                                 len(reflines) - signature_offset]
   303             if reflines != outlines:
   304                 msg = 
"standard output"   306                 if not msg 
in causes:
   309                        ".observed"] = result.Quote(
"\n".join(outlines))
   311             causes.append(
"missing signature")
   312         result[res_field + 
".signature"] = result.Quote(signature)
   313         if len(reflines) > 1 
or signature != reflines[0]:
   314             result[res_field + 
".expected"] = result.Quote(
"\n".join(reflines))
   317     def countErrorLines(self, expected={'ERROR': 0, 
'FATAL': 0}, stdout=
None, result=
None, causes=
None):
   319             Count the number of messages with required severity (by default ERROR and FATAL)   320             and check if their numbers match the expected ones (0 by default).   321             The dictionary "expected" can be used to tune the number of errors and fatals   322             allowed, or to limit the number of expected warnings etc.   337         outlines = stdout.splitlines()
   338         from math 
import log10
   339         fmt = 
"%%%dd - %%s" % (int(log10(len(outlines) + 1)))
   345             if len(words) >= 2 
and words[1] 
in errors:
   346                 errors[words[1]].append(fmt % (linecount, l.rstrip()))
   349             if len(errors[e]) != expected[e]:
   350                 causes.append(
'%s(%d)' % (e, len(errors[e])))
   351                 result[
"GaudiTest.lines.%s" %
   352                        e] = result.Quote(
'\n'.join(errors[e]))
   353                 result[
"GaudiTest.lines.%s.expected#" %
   354                        e] = result.Quote(str(expected[e]))
   360                              ignore=
r"Basket|.*size|Compression"):
   362             Compare the TTree summaries in stdout with the ones in trees_dict or in   363             the reference file. By default ignore the size, compression and basket   365             The presence of TTree summaries when none is expected is not a failure.   373         if trees_dict 
is None:
   376             if lreference 
and os.path.isfile(lreference):
   381         from pprint 
import PrettyPrinter
   384             result[
"GaudiTest.TTrees.expected"] = result.Quote(
   385                 pp.pformat(trees_dict))
   387                 result[
"GaudiTest.TTrees.ignore"] = result.Quote(ignore)
   392             causes.append(
"trees summaries")
   394                 trees_dict, trees, failed)
   395             result[
"GaudiTest.TTrees.failure_on"] = result.Quote(msg)
   396             result[
"GaudiTest.TTrees.found"] = result.Quote(pp.pformat(trees))
   404             Compare the TTree summaries in stdout with the ones in trees_dict or in   405             the reference file. By default ignore the size, compression and basket   407             The presence of TTree summaries when none is expected is not a failure.   419             if lreference 
and os.path.isfile(lreference):
   424         from pprint 
import PrettyPrinter
   427             result[
"GaudiTest.Histos.expected"] = result.Quote(
   430                 result[
"GaudiTest.Histos.ignore"] = result.Quote(ignore)
   435             causes.append(
"histos summaries")
   437             result[
"GaudiTest.Histos.failure_on"] = result.Quote(msg)
   438             result[
"GaudiTest.Histos.found"] = result.Quote(pp.pformat(histos))
   443                               causes=
None, preproc=
None):
   445         Default validation acti*on: compare standard output and error to the   460             preproc = normalizeExamples
   464         if lreference 
and os.path.isfile(lreference):
   468                                              preproc=preproc)(stdout, result)
   474                 newref = open(lreference + 
".new", 
"w")
   476                 for l 
in stdout.splitlines():
   477                     newref.write(l.rstrip() + 
'\n')
   487         if lreference 
and os.path.isfile(lreference):
   491                                                preproc=preproc)(stderr, result)
   494                 newref = open(lreference + 
".new", 
"w")
   496                 for l 
in stderr.splitlines():
   497                     newref.write(l.rstrip() + 
'\n')
   501                                            "ExecTest.expected_stderr")(stderr, result)
   510         def platformSplit(p): 
return set(p.split(
'-' in p 
and '-' or '_'))
   512         reference = os.path.normpath(os.path.join(self.
basedir,
   513                                                   os.path.expandvars(reffile)))
   516         spec_ref = reference[:-3] + 
GetPlatform(self)[0:3] + reference[-3:]
   517         if os.path.isfile(spec_ref):
   521             dirname, basename = os.path.split(reference)
   524             head = basename + 
"."   527             if 'do0' in platform:
   530             for f 
in os.listdir(dirname):
   531                 if f.startswith(head):
   532                     req_plat = platformSplit(f[head_len:])
   533                     if platform.issuperset(req_plat):
   534                         candidates.append((len(req_plat), f))
   539                 reference = os.path.join(dirname, candidates[-1][1])
   551     from GaudiKernel 
import ROOT6WorkAroundEnabled
   562     Function used to normalize the used path   564     newPath = os.path.normpath(os.path.expandvars(p))
   565     if os.path.exists(newPath):
   566         p = os.path.realpath(newPath)
   572     Locates an executable in the executables path ($PATH) and returns the full   573     path to it.  An application is looked for with or without the '.exe' suffix.   574     If the executable cannot be found, None is returned   576     if os.path.isabs(executable):
   577         if not os.path.exists(executable):
   578             if executable.endswith(
'.exe'):
   579                 if os.path.exists(executable[:-4]):
   580                     return executable[:-4]
   582                 head, executable = os.path.split(executable)
   585     for d 
in os.environ.get(
"PATH").split(os.pathsep):
   586         fullpath = os.path.join(d, executable)
   587         if os.path.exists(fullpath):
   589     if executable.endswith(
'.exe'):
   590         return which(executable[:-4])
   606     UNTESTED = 
'UNTESTED'   616     def __init__(self, kind=None, id=None, outcome=PASS, annotations={}):
   620         assert type(key) 
in types.StringTypes
   624         assert type(key) 
in types.StringTypes
   625         assert type(value) 
in types.StringTypes
   647         """Validate the output of the program.   648             'stdout' -- A string containing the data written to the standard output   650             'stderr' -- A string containing the data written to the standard error   652             'result' -- A 'Result' object. It may be used to annotate   653             the outcome according to the content of stderr.   654             returns -- A list of strings giving causes of failure."""   659             causes.append(self.
cause)
   665         """Compare 's1' and 's2', ignoring line endings.   668             returns -- True if 's1' and 's2' are the same, ignoring   669             differences in line endings."""   672             to_ignore = re.compile(
   673                 r'Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*')
   675             def keep_line(l): 
return not to_ignore.match(l)
   676             return filter(keep_line, s1.splitlines()) == filter(keep_line, s2.splitlines())
   678             return s1.splitlines() == s2.splitlines()
   683     """ Base class for a callable that takes a file and returns a modified   698         if hasattr(input, 
"__iter__"):
   702             lines = input.splitlines()
   706             output = 
'\n'.join(output)
   735             if line.find(s) >= 0:
   750         if self.
start in line:
   753         elif self.
end in line:
   763             when = re.compile(when)
   767         if isinstance(rhs, RegexpReplacer):
   769             res._operations = self.
_operations + rhs._operations
   771             res = FilePreprocessor.__add__(self, rhs)
   776             if w 
is None or w.search(line):
   777                 line = o.sub(r, line)
   783 normalizeDate = 
RegexpReplacer(
"[0-2]?[0-9]:[0-5][0-9]:[0-5][0-9] [0-9]{4}[-/][01][0-9][-/][0-3][0-9][ A-Z]*",
   784                                "00:00:00 1970-01-01")
   786 normalizeEOL.__processLine__ = 
lambda line: str(line).rstrip() + 
'\n'   790 skipEmptyLines.__processLine__ = 
lambda line: (line.strip() 
and line) 
or None   804             line = line[:(pos + self.
siglen)]
   805             lst = line[(pos + self.
siglen):].split()
   807             line += 
" ".join(lst)
   813     Sort group of lines matching a regular expression   817         self.
exp = exp 
if hasattr(exp, 
'match') 
else re.compile(exp)
   820         match = self.exp.match
   836 normalizeExamples = maskPointers + normalizeDate
   839     (
"TIMER.TIMER", 
r"\s+[+-]?[0-9]+[0-9.]*", 
" 0"),  
   840     (
"release all pending", 
r"^.*/([^/]*:.*)", 
r"\1"),
   841     (
"^#.*file", 
r"file '.*[/\\]([^/\\]*)$", 
r"file '\1"),
   842     (
"^JobOptionsSvc.*options successfully read in from",
   843      r"read in from .*[/\\]([^/\\]*)$", 
r"file \1"),  
   845     (
None, 
r"[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}(?!-0{12})-[0-9A-Fa-f]{12}",
   846      "00000000-0000-0000-0000-000000000000"),
   848     (
"ServiceLocatorHelper::", 
"ServiceLocatorHelper::(create|locate)Service",
   849      "ServiceLocatorHelper::service"),
   851     (
None, 
r"e([-+])0([0-9][0-9])", 
r"e\1\2"),
   853     (
None, 
r'Service reference count check:',
   854      r'Looping over all active services...'),
   856     (
None, 
r"^(.*(DEBUG|SUCCESS) List of ALL properties of .*#properties = )\d+", 
r"\1NN"),
   861                            "JobOptionsSvc        INFO # ",
   862                            "JobOptionsSvc     WARNING # ",
   865                            "This machine has a speed",
   868                            "ToolSvc.Sequenc...   INFO",
   869                            "DataListenerSvc      INFO XML written to file:",
   870                            "[INFO]", 
"[WARNING]",
   871                            "DEBUG No writable file catalog found which contains FID:",
   872                            "DEBUG Service base class initialized successfully",  
   873                            "DEBUG Incident  timing:",  
   875                            "INFO  'CnvServices':[",
   877                            "DEBUG  'CnvServices':[",
   881     r"^JobOptionsSvc        INFO *$",
   884     r"(Always|SUCCESS)\s*(Root f|[^ ]* F)ile version:",
   885     r"File '.*.xml' does not exist",
   886     r"INFO Refer to dataset .* by its file ID:",
   887     r"INFO Referring to dataset .* by its file ID:",
   888     r"INFO Disconnect from dataset",
   889     r"INFO Disconnected from dataset",
   890     r"INFO Disconnected data IO:",
   891     r"IncidentSvc\s*(DEBUG (Adding|Removing)|VERBOSE Calling)",
   893     r"^StatusCodeSvc.*listing all unchecked return codes:",
   894     r"^StatusCodeSvc\s*INFO\s*$",
   895     r"Num\s*\|\s*Function\s*\|\s*Source Library",
   898     r"ERROR Failed to modify file: .* Errno=2 No such file or directory",
   900     r"^ +[0-9]+ \|.*ROOT",
   901     r"^ +[0-9]+ \|.*\|.*Dict",
   903     r"StatusCodeSvc.*all StatusCode instances where checked",
   905     r"EventLoopMgr.*---> Loop Finished",
   909     r"SUCCESS\s*Booked \d+ Histogram\(s\)",
   913     r"Property(.*)'Audit(Algorithm|Tool|Service)s':",
   915     r"Property(.*)'AuditRe(start|initialize)':",
   916     r"Property(.*)'IsIOBound':",
   918     r"Property(.*)'ErrorCount(er)?':",
   920     r"Property(.*)'Sequential':",
   922     r"Property(.*)'FilterCircularDependencies':",
   924     r"Property(.*)'IsClonable':",
   926     r"Property update for OutputLevel : new value =",
   927     r"EventLoopMgr\s*DEBUG Creating OutputStream",
   933         r'Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*',
   936 normalizeExamples = (lineSkipper + normalizeExamples + skipEmptyLines +
   937                      normalizeEOL + 
LineSorter(
"Services to release : ") +
   944     def __init__(self, reffile, cause, result_key, preproc=normalizeExamples):
   952         if os.path.isfile(self.
reffile):
   953             orig = open(self.
reffile).xreadlines()
   957                     result.Quote(
'\n'.join(
map(str.strip, orig)))
   960         new = stdout.splitlines()
   964         diffs = difflib.ndiff(orig, new, charjunk=difflib.IS_CHARACTER_JUNK)
   965         filterdiffs = 
map(
lambda x: x.strip(), filter(
   966             lambda x: x[0] != 
" ", diffs))
   968             result[self.
result_key] = result.Quote(
"\n".join(filterdiffs))
   972                 +) standard output of the test""")
   974                 result.Quote(
'\n'.join(
map(str.strip, new)))
   975             causes.append(self.
cause)
   981         Scan stdout to find ROOT TTree summaries and digest them.   983     stars = re.compile(
r"^\*+$")
   984     outlines = stdout.splitlines()
   985     nlines = len(outlines)
   991         while i < nlines 
and not stars.match(outlines[i]):
   996                 trees[tree[
"Name"]] = tree
  1003         Check that all the keys in reference are in to_check too, with the same value.  1004         If the value is a dict, the function is called recursively. to_check can  1005         contain more keys than reference, that will not be tested.  1006         The function returns at the first difference found.  1011         ignore_re = re.compile(ignore)
  1012         keys = [key 
for key 
in reference 
if not ignore_re.match(key)]
  1014         keys = reference.keys()
  1018             if (
type(reference[k]) 
is dict) 
and (
type(to_check[k]) 
is dict):
  1021                     reference[k], to_check[k], ignore)
  1024                 failed = to_check[k] != reference[k]
  1029             fail_keys.insert(0, k)
  1040         if c 
is None or r 
is None:
  1042     return (fail_path, r, c)
  1046 h_count_re = re.compile(
  1047     r"^(.*)SUCCESS\s+Booked (\d+) Histogram\(s\) :\s+([\s\w=-]*)")
  1052     Parse the TTree summary table in lines, starting from pos.  1053     Returns a tuple with the dictionary with the digested informations and the  1054     position of the first line after the summary.  1060     def splitcols(l): 
return [f.strip() 
for f 
in l.strip(
"*\n").split(
':', 2)]
  1064         cols = splitcols(ll[0])
  1065         r[
"Name"], r[
"Title"] = cols[1:]
  1067         cols = splitcols(ll[1])
  1068         r[
"Entries"] = int(cols[1])
  1070         sizes = cols[2].split()
  1071         r[
"Total size"] = int(sizes[2])
  1072         if sizes[-1] == 
"memory":
  1075             r[
"File size"] = int(sizes[-1])
  1077         cols = splitcols(ll[2])
  1078         sizes = cols[2].split()
  1079         if cols[0] == 
"Baskets":
  1080             r[
"Baskets"] = int(cols[1])
  1081             r[
"Basket size"] = int(sizes[2])
  1082         r[
"Compression"] = float(sizes[-1])
  1085     if i < (count - 3) 
and lines[i].startswith(
"*Tree"):
  1086         result = parseblock(lines[i:i + 3])
  1087         result[
"Branches"] = {}
  1089         while i < (count - 3) 
and lines[i].startswith(
"*Br"):
  1090             if i < (count - 2) 
and lines[i].startswith(
"*Branch "):
  1094             branch = parseblock(lines[i:i + 3])
  1095             result[
"Branches"][branch[
"Name"]] = branch
  1103         Extract the histograms infos from the lines starting at pos.  1104         Returns the position of the first line after the summary block.  1107     h_table_head = re.compile(
  1108         r'SUCCESS\s+List of booked (1D|2D|3D|1D profile|2D profile) histograms in directory\s+"(\w*)"')
  1109     h_short_summ = re.compile(
r"ID=([^\"]+)\s+\"([^\"]+)\"\s+(.*)")
  1114     m = h_count_re.search(lines[pos])
  1115     name = m.group(1).strip()
  1116     total = int(m.group(2))
  1118     for k, v 
in [x.split(
"=") 
for x 
in m.group(3).split()]:
  1121     header[
"Total"] = total
  1125         m = h_table_head.search(lines[pos])
  1128             t = t.replace(
" profile", 
"Prof")
  1135             if l.startswith(
" | ID"):
  1137                 titles = [x.strip() 
for x 
in l.split(
"|")][1:]
  1139                 while pos < nlines 
and lines[pos].startswith(
" |"):
  1141                     values = [x.strip() 
for x 
in l.split(
"|")][1:]
  1143                     for i 
in range(len(titles)):
  1144                         hcont[titles[i]] = values[i]
  1145                     cont[hcont[
"ID"]] = hcont
  1147             elif l.startswith(
" ID="):
  1148                 while pos < nlines 
and lines[pos].startswith(
" ID="):
  1150                               for x 
in h_short_summ.search(lines[pos]).groups()]
  1151                     cont[values[0]] = values
  1155                     "Cannot understand line %d: '%s'" % (pos, l))
  1159             summ[d][
"header"] = header
  1164         summ[name] = {
"header": header}
  1170         Scan stdout to find ROOT TTree summaries and digest them.  1172     outlines = stdout.splitlines()
  1173     nlines = len(outlines) - 1
  1181         match = h_count_re.search(outlines[pos])
  1182         while pos < nlines 
and not match:
  1184             match = h_count_re.search(outlines[pos])
  1187         summaries.update(summ)
  1193     unsupported = [re.compile(x) 
for x 
in [str(y).strip()
  1194                                            for y 
in unsupported_platforms] 
if x]
  1195     for p_re 
in unsupported:
  1196         if p_re.search(platform):
  1197             result.SetOutcome(result.UNTESTED)
  1198             result[result.CAUSE] = 
'Platform not supported.'  1205         Return the platform Id defined in CMTCONFIG or SCRAM_ARCH.  1209     if "BINARY_TAG" in os.environ:
  1210         arch = os.environ[
"BINARY_TAG"]
  1211     elif "CMTCONFIG" in os.environ:
  1212         arch = os.environ[
"CMTCONFIG"]
  1213     elif "SCRAM_ARCH" in os.environ:
  1214         arch = os.environ[
"SCRAM_ARCH"]
  1220         Return True if the current platform is Windows.  1222         This function was needed because of the change in the CMTCONFIG format,  1223         from win32_vc71_dbg to i686-winxp-vc9-dbg.  1226     return "winxp" in platform 
or platform.startswith(
"win")
 
def PlatformIsNotSupported(self, context, result)
def __processLine__(self, line)
def __init__(self, start, end)
def __call__(self, input)
def validateWithReference(self, stdout=None, stderr=None, result=None, causes=None, preproc=None)
def __processLine__(self, line)
def cmpTreesDicts(reference, to_check, ignore=None)
def __processFile__(self, lines)
def ValidateOutput(self, stdout, stderr, result)
def read(f, regex='.*', skipevents=0)
def __processLine__(self, line)
def __processFile__(self, lines)
def __call__(self, out, result)
def findHistosSummaries(stdout)
def _parseTTreeSummary(lines, pos)
struct GAUDI_API map
Parametrisation class for map-like implementation. 
def __call__(self, stdout, result)
def __processLine__(self, line)
def __init__(self, orig, repl="", when=None)
decltype(auto) range(Args &&...args)
Zips multiple containers together to form a single range. 
def __init__(self, signature)
def __call__(self, input)
def sanitize_for_xml(data)
def getCmpFailingValues(reference, to_check, fail_path)
def __init__(self, members=[])
def __init__(self, strings=[], regexps=[])
def __setitem__(self, key, value)
def __init__(self, kind=None, id=None, outcome=PASS, annotations={})
def __processLine__(self, line)
def parseHistosSummary(lines, pos)
def _expandReferenceFileName(self, reffile)
def findReferenceBlock(self, reference=None, stdout=None, result=None, causes=None, signature_offset=0, signature=None, id=None)
def CheckHistosSummaries(self, stdout=None, result=None, causes=None, dict=None, ignore=None)
def __CompareText(self, s1, s2)
def __init__(self, reffile, cause, result_key, preproc=normalizeExamples)
def __getitem__(self, key)
def findTTreeSummaries(stdout)
def __init__(self, ref, cause, result_key)
def ROOT6WorkAroundEnabled(id=None)
def CheckTTreesSummaries(self, stdout=None, result=None, causes=None, trees_dict=None, ignore=r"Basket|.*size|Compression")