14 from subprocess
import Popen, PIPE, STDOUT
18 Take a string with invalid ASCII/UTF characters and quote them so that the 19 string can be used in an XML text. 21 >>> sanitize_for_xml('this is \x1b') 22 'this is [NON-XML-CHAR-0x1B]' 24 bad_chars = re.compile(
u'[\x00-\x08\x0b\x0c\x0e-\x1F\uD800-\uDFFF\uFFFE\uFFFF]')
27 return ''.join(
'[NON-XML-CHAR-0x%2X]' % ord(c)
for c
in match.group())
28 return bad_chars.sub(quote, data)
31 '''helper to debug GAUDI-1084, dump the list of processes''' 32 from getpass
import getuser
33 if 'WORKSPACE' in os.environ:
34 p = Popen([
'ps',
'-fH',
'-U', getuser()], stdout=PIPE)
35 with open(os.path.join(os.environ[
'WORKSPACE'], name),
'w')
as f:
36 f.write(p.communicate()[0])
40 Send a signal to a process and all its child processes (starting from the 43 log = logging.getLogger(
'kill_tree')
44 ps_cmd = [
'ps',
'--no-headers',
'-o',
'pid',
'--ppid', str(ppid)]
45 get_children = Popen(ps_cmd, stdout=PIPE, stderr=PIPE)
46 children =
map(int, get_children.communicate()[0].split())
47 for child
in children:
50 log.debug(
'killing process %d', ppid)
55 log.debug(
'no such process %d', ppid)
92 logging.debug(
'running test %s', self.name)
95 if re.search(
r'from\s+Gaudi.Configuration\s+import\s+\*|' 96 'from\s+Configurables\s+import', self.options):
97 optionFile = tempfile.NamedTemporaryFile(suffix=
'.py')
99 optionFile = tempfile.NamedTemporaryFile(suffix=
'.opts')
100 optionFile.file.write(self.options)
105 if self.environment
is None : self.environment = os.environ
106 else : self.environment=dict(self.environment.items()+os.environ.items())
108 platform_id = (os.environ.get(
'BINARY_TAG')
or 109 os.environ.get(
'CMTCONFIG')
or 112 skip_test = bool([
None 113 for prex
in self.unsupported_platforms
114 if re.search(prex, platform_id)])
118 workdir = self.workdir
119 if self.use_temp_dir:
120 if self._common_tmpdir:
121 workdir = self._common_tmpdir
123 workdir = tempfile.mkdtemp()
127 if self.program !=
'':
129 elif "GAUDIEXE" in os.environ :
130 prog = os.environ[
"GAUDIEXE"]
134 dummy, prog_ext = os.path.splitext(prog)
135 if prog_ext
not in [
".exe",
".py",
".bat" ]:
139 prog =
which(prog)
or prog
141 args =
map(RationalizePath, self.args)
143 if prog_ext ==
".py" :
148 validatorRes = Result({
'CAUSE':
None,
'EXCEPTION':
None,
149 'RESOURCE':
None,
'TARGET':
None,
150 'TRACEBACK':
None,
'START_TIME':
None,
151 'END_TIME':
None,
'TIMEOUT_DETAIL':
None})
152 self.result = validatorRes
160 logging.debug(
'executing %r in %s',
162 self.proc = Popen(params, stdout=PIPE, stderr=PIPE,
163 env=self.environment)
164 logging.debug(
'(pid: %d)', self.proc.pid)
165 self.out, self.err = self.proc.communicate()
167 thread = threading.Thread(target=target)
170 thread.join(self.timeout)
172 if thread.is_alive():
173 logging.debug(
'time out in test %s (pid %d)', self.name, self.proc.pid)
175 cmd = [
'gdb',
'--pid', str(self.proc.pid),
'--batch',
176 '--eval-command=thread apply all backtrace']
177 gdb = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
178 self.stack_trace = gdb.communicate()[0]
182 if thread.is_alive():
184 self.causes.append(
'timeout')
186 logging.debug(
'completed test %s', self.name)
189 logging.debug(
'returnedCode = %s', self.proc.returncode)
190 self.returnedCode = self.proc.returncode
192 logging.debug(
'validating test...')
193 self.result, self.causes = self.ValidateOutput(stdout=self.out,
198 if self.use_temp_dir
and not self._common_tmpdir:
199 shutil.rmtree(workdir,
True)
201 os.chdir(self.basedir)
204 if self.signal
is not None:
205 if int(self.returnedCode) != -int(self.signal):
206 self.causes.append(
'exit code')
208 elif self.exit_code
is not None:
209 if int(self.returnedCode) != int(self.exit_code):
210 self.causes.append(
'exit code')
212 elif self.returnedCode != 0:
213 self.causes.append(
"exit code")
216 self.status =
"failed" 218 self.status =
"passed" 221 self.status =
"skipped" 223 logging.debug(
'%s: %s', self.name, self.status)
224 field_mapping = {
'Exit Code':
'returnedCode',
227 'Environment':
'environment',
230 'Program Name':
'program',
232 'Validator':
'validator',
233 'Output Reference File':
'reference',
234 'Error Reference File':
'error_reference',
237 'Unsupported Platforms':
'unsupported_platforms',
238 'Stack Trace':
'stack_trace'}
239 resultDict = [(key, getattr(self, attr))
240 for key, attr
in field_mapping.iteritems()
241 if getattr(self, attr)]
242 resultDict.append((
'Working Directory',
246 resultDict.extend(self.result.annotations.iteritems())
248 return dict(resultDict)
257 if self.
validator is not BaseTest.validator:
263 elif stderr.strip() != self.stderr.strip():
264 self.causes.append(
'standard error')
267 return result, causes
271 def findReferenceBlock(self,reference=None, stdout=None, result=None, causes=None, signature_offset=0, signature=None, id = None):
273 Given a block of text, tries to find it in the output. The block had to be identified by a signature line. By default, the first line is used as signature, or the line pointed to by signature_offset. If signature_offset points outside the block, a signature line can be passed as signature argument. Note: if 'signature' is None (the default), a negative signature_offset is interpreted as index in a list (e.g. -1 means the last line), otherwise the it is interpreted as the number of lines before the first one of the block the signature must appear. The parameter 'id' allow to distinguish between different calls to this function in the same validation code. 276 if reference
is None : reference=self.
reference 277 if stdout
is None : stdout=self.
out 278 if result
is None : result=self.
result 279 if causes
is None : causes=self.
causes 281 reflines = filter(
None,
map(
lambda s: s.rstrip(), reference.splitlines()))
283 raise RuntimeError(
"Empty (or null) reference")
285 outlines = filter(
None,
map(
lambda s: s.rstrip(), stdout.splitlines()))
287 res_field =
"GaudiTest.RefBlock" 289 res_field +=
"_%s" % id
291 if signature
is None:
292 if signature_offset < 0:
293 signature_offset = len(reference)+signature_offset
294 signature = reflines[signature_offset]
297 pos = outlines.index(signature)
298 outlines = outlines[pos-signature_offset:pos+len(reflines)-signature_offset]
299 if reflines != outlines:
300 msg =
"standard output" 302 if not msg
in causes:
304 result[res_field +
".observed"] = result.Quote(
"\n".join(outlines))
306 causes.append(
"missing signature")
307 result[res_field +
".signature"] = result.Quote(signature)
308 if len(reflines) > 1
or signature != reflines[0]:
309 result[res_field +
".expected"] = result.Quote(
"\n".join(reflines))
312 def countErrorLines(self, expected = {'ERROR':0,
'FATAL':0}, stdout=
None, result=
None,causes=
None):
314 Count the number of messages with required severity (by default ERROR and FATAL) 315 and check if their numbers match the expected ones (0 by default). 316 The dictionary "expected" can be used to tune the number of errors and fatals 317 allowed, or to limit the number of expected warnings etc. 320 if stdout
is None : stdout=self.
out 321 if result
is None : result=self.
result 322 if causes
is None : causes=self.
causes 329 outlines = stdout.splitlines()
330 from math
import log10
331 fmt =
"%%%dd - %%s" % (int(log10(len(outlines)+1)))
337 if len(words) >= 2
and words[1]
in errors:
338 errors[words[1]].append(fmt%(linecount,l.rstrip()))
341 if len(errors[e]) != expected[e]:
342 causes.append(
'%s(%d)'%(e,len(errors[e])))
343 result[
"GaudiTest.lines.%s"%e] = result.Quote(
'\n'.join(errors[e]))
344 result[
"GaudiTest.lines.%s.expected#"%e] = result.Quote(str(expected[e]))
350 ignore =
r"Basket|.*size|Compression"):
352 Compare the TTree summaries in stdout with the ones in trees_dict or in 353 the reference file. By default ignore the size, compression and basket 355 The presence of TTree summaries when none is expected is not a failure. 357 if stdout
is None : stdout=self.
out 358 if result
is None : result=self.
result 359 if causes
is None : causes=self.
causes 360 if trees_dict
is None:
363 if lreference
and os.path.isfile(lreference):
368 from pprint
import PrettyPrinter
371 result[
"GaudiTest.TTrees.expected"] = result.Quote(pp.pformat(trees_dict))
373 result[
"GaudiTest.TTrees.ignore"] = result.Quote(ignore)
378 causes.append(
"trees summaries")
380 result[
"GaudiTest.TTrees.failure_on"] = result.Quote(msg)
381 result[
"GaudiTest.TTrees.found"] = result.Quote(pp.pformat(trees))
389 Compare the TTree summaries in stdout with the ones in trees_dict or in 390 the reference file. By default ignore the size, compression and basket 392 The presence of TTree summaries when none is expected is not a failure. 394 if stdout
is None : stdout=self.
out 395 if result
is None : result=self.
result 396 if causes
is None : causes=self.
causes 401 if lreference
and os.path.isfile(lreference):
406 from pprint
import PrettyPrinter
409 result[
"GaudiTest.Histos.expected"] = result.Quote(pp.pformat(dict))
411 result[
"GaudiTest.Histos.ignore"] = result.Quote(ignore)
416 causes.append(
"histos summaries")
418 result[
"GaudiTest.Histos.failure_on"] = result.Quote(msg)
419 result[
"GaudiTest.Histos.found"] = result.Quote(pp.pformat(histos))
424 causes=
None, preproc=
None):
426 Default validation acti*on: compare standard output and error to the 430 if stdout
is None : stdout = self.
out 431 if stderr
is None : stderr = self.
err 432 if result
is None : result = self.
result 433 if causes
is None : causes = self.
causes 437 preproc = normalizeExamples
441 if lreference
and os.path.isfile(lreference):
445 preproc=preproc)(stdout, result)
451 newref = open(lreference +
".new",
"w")
453 for l
in stdout.splitlines():
454 newref.write(l.rstrip() +
'\n')
464 if lreference
and os.path.isfile(lreference):
468 preproc=preproc)(stderr, result)
471 newref = open(lreference +
".new",
"w")
473 for l
in stderr.splitlines():
474 newref.write(l.rstrip() +
'\n')
477 causes +=
BasicOutputValidator(lreference,
"standard error",
"ExecTest.expected_stderr")(stderr, result)
486 platformSplit =
lambda p: set(p.split(
'-' in p
and '-' or '_'))
488 reference = os.path.normpath(os.path.join(self.
basedir,
489 os.path.expandvars(reffile)))
492 spec_ref = reference[:-3] +
GetPlatform(self)[0:3] + reference[-3:]
493 if os.path.isfile(spec_ref):
497 dirname, basename = os.path.split(reference)
498 if not dirname: dirname =
'.' 499 head = basename +
"." 502 if 'do0' in platform:
505 for f
in os.listdir(dirname):
506 if f.startswith(head):
507 req_plat = platformSplit(f[head_len:])
508 if platform.issuperset(req_plat):
509 candidates.append( (len(req_plat), f) )
514 reference = os.path.join(dirname, candidates[-1][1])
529 from GaudiKernel
import ROOT6WorkAroundEnabled
539 Function used to normalize the used path 541 newPath = os.path.normpath(os.path.expandvars(p))
542 if os.path.exists(newPath) :
543 p = os.path.realpath(newPath)
549 Locates an executable in the executables path ($PATH) and returns the full 550 path to it. An application is looked for with or without the '.exe' suffix. 551 If the executable cannot be found, None is returned 553 if os.path.isabs(executable):
554 if not os.path.exists(executable):
555 if executable.endswith(
'.exe'):
556 if os.path.exists(executable[:-4]):
557 return executable[:-4]
559 head,executable = os.path.split(executable)
562 for d
in os.environ.get(
"PATH").split(os.pathsep):
563 fullpath = os.path.join(d, executable)
564 if os.path.exists(fullpath):
566 if executable.endswith(
'.exe'):
567 return which(executable[:-4])
592 def __init__(self,kind=None,id=None,outcome=PASS,annotations={}):
596 assert type(key)
in types.StringTypes
600 assert type(key)
in types.StringTypes
601 assert type(value)
in types.StringTypes
623 """Validate the output of the program. 624 'stdout' -- A string containing the data written to the standard output 626 'stderr' -- A string containing the data written to the standard error 628 'result' -- A 'Result' object. It may be used to annotate 629 the outcome according to the content of stderr. 630 returns -- A list of strings giving causes of failure.""" 635 causes.append(self.
cause)
643 """Compare 's1' and 's2', ignoring line endings. 646 returns -- True if 's1' and 's2' are the same, ignoring 647 differences in line endings.""" 650 to_ignore = re.compile(
r'Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*')
651 keep_line =
lambda l:
not to_ignore.match(l)
652 return filter(keep_line, s1.splitlines()) == filter(keep_line, s2.splitlines())
654 return s1.splitlines() == s2.splitlines()
660 """ Base class for a callable that takes a file and returns a modified 668 if l: output.append(l)
671 if hasattr(input,
"__iter__"):
675 lines = input.splitlines()
678 if mergeback: output =
'\n'.join(output)
702 if line.find(s) >= 0:
return None 704 if r.search(line):
return None 714 if self.
start in line:
717 elif self.
end in line:
726 when = re.compile(when)
729 if isinstance(rhs, RegexpReplacer):
731 res._operations = self.
_operations + rhs._operations
733 res = FilePreprocessor.__add__(self, rhs)
737 if w
is None or w.search(line):
738 line = o.sub(r, line)
743 normalizeDate =
RegexpReplacer(
"[0-2]?[0-9]:[0-5][0-9]:[0-5][0-9] [0-9]{4}[-/][01][0-9][-/][0-3][0-9][ A-Z]*",
744 "00:00:00 1970-01-01")
746 normalizeEOL.__processLine__ =
lambda line: str(line).rstrip() +
'\n' 750 skipEmptyLines.__processLine__ =
lambda line: (line.strip()
and line)
or None 761 line = line[:(pos+self.
siglen)]
762 lst = line[(pos+self.
siglen):].split()
764 line +=
" ".join(lst)
769 Sort group of lines matching a regular expression 772 self.
exp = exp
if hasattr(exp,
'match')
else re.compile(exp)
774 match = self.exp.match
789 normalizeExamples = maskPointers + normalizeDate
792 (
"TIMER.TIMER",
r"\s+[+-]?[0-9]+[0-9.]*",
" 0"),
793 (
"release all pending",
r"^.*/([^/]*:.*)",
r"\1"),
794 (
"0x########",
r"\[.*/([^/]*.*)\]",
r"[\1]"),
795 (
"^#.*file",
r"file '.*[/\\]([^/\\]*)$",
r"file '\1"),
796 (
"^JobOptionsSvc.*options successfully read in from",
r"read in from .*[/\\]([^/\\]*)$",
r"file \1"),
798 (
None,
r"[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}(?!-0{12})-[0-9A-Fa-f]{12}",
"00000000-0000-0000-0000-000000000000"),
800 (
"ServiceLocatorHelper::",
"ServiceLocatorHelper::(create|locate)Service",
"ServiceLocatorHelper::service"),
802 (
None,
r"e([-+])0([0-9][0-9])",
r"e\1\2"),
804 (
None,
r'Service reference count check:',
r'Looping over all active services...'),
806 (
None,
r"Property(.*)'ErrorCount':",
r"Property\1'ErrorCounter':"),
808 (
None,
r"^(.*(DEBUG|SUCCESS) List of ALL properties of .*#properties = )\d+",
r"\1NN"),
813 "JobOptionsSvc INFO # ",
814 "JobOptionsSvc WARNING # ",
817 "This machine has a speed",
820 "ToolSvc.Sequenc... INFO",
821 "DataListenerSvc INFO XML written to file:",
822 "[INFO]",
"[WARNING]",
823 "DEBUG No writable file catalog found which contains FID:",
825 "DEBUG Service base class initialized successfully",
826 "DEBUG Incident timing:",
827 "INFO 'CnvServices':[",
831 r"^JobOptionsSvc INFO *$",
833 r"(Always|SUCCESS)\s*(Root f|[^ ]* F)ile version:",
834 r"0x[0-9a-fA-F#]+ *Algorithm::sysInitialize\(\) *\[",
835 r"0x[0-9a-fA-F#]* *__gxx_personality_v0 *\[",
836 r"File '.*.xml' does not exist",
837 r"INFO Refer to dataset .* by its file ID:",
838 r"INFO Referring to dataset .* by its file ID:",
839 r"INFO Disconnect from dataset",
840 r"INFO Disconnected from dataset",
841 r"INFO Disconnected data IO:",
842 r"IncidentSvc\s*(DEBUG (Adding|Removing)|VERBOSE Calling)",
844 r"^StatusCodeSvc.*listing all unchecked return codes:",
845 r"^StatusCodeSvc\s*INFO\s*$",
846 r"Num\s*\|\s*Function\s*\|\s*Source Library",
849 r"ERROR Failed to modify file: .* Errno=2 No such file or directory",
851 r"^ +[0-9]+ \|.*ROOT",
852 r"^ +[0-9]+ \|.*\|.*Dict",
854 r"StatusCodeSvc.*all StatusCode instances where checked",
856 r"EventLoopMgr.*---> Loop Finished",
860 r"SUCCESS\s*Booked \d+ Histogram\(s\)",
864 r"Property(.*)'Audit(Algorithm|Tool|Service)s':",
865 r"Property(.*)'AuditRe(start|initialize)':",
866 r"Property(.*)'IsIOBound':",
868 r"Property update for OutputLevel : new value =",
869 r"EventLoopMgr\s*DEBUG Creating OutputStream",
875 r'Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*',
878 normalizeExamples = (lineSkipper + normalizeExamples + skipEmptyLines +
879 normalizeEOL +
LineSorter(
"Services to release : ") +
885 def __init__(self,reffile, cause, result_key, preproc=normalizeExamples):
893 if os.path.isfile(self.
reffile):
894 orig=open(self.
reffile).xreadlines()
899 new = stdout.splitlines()
903 diffs = difflib.ndiff(orig,new,charjunk=difflib.IS_CHARACTER_JUNK)
904 filterdiffs =
map(
lambda x: x.strip(),filter(
lambda x: x[0] !=
" ",diffs))
906 result[self.
result_key] = result.Quote(
"\n".join(filterdiffs))
910 +) standard output of the test""")
911 causes.append(self.
cause)
916 Scan stdout to find ROOT TTree summaries and digest them. 918 stars = re.compile(
r"^\*+$")
919 outlines = stdout.splitlines()
920 nlines = len(outlines)
926 while i < nlines
and not stars.match(outlines[i]):
931 trees[tree[
"Name"]] = tree
937 Check that all the keys in reference are in to_check too, with the same value. 938 If the value is a dict, the function is called recursively. to_check can 939 contain more keys than reference, that will not be tested. 940 The function returns at the first difference found. 945 ignore_re = re.compile(ignore)
946 keys = [ key
for key
in reference
if not ignore_re.match(key) ]
948 keys = reference.keys()
952 if (
type(reference[k])
is dict)
and (
type(to_check[k])
is dict):
954 failed = fail_keys =
cmpTreesDicts(reference[k], to_check[k], ignore)
957 failed = to_check[k] != reference[k]
962 fail_keys.insert(0, k)
972 if c
is None or r
is None:
974 return (fail_path, r, c)
977 h_count_re = re.compile(
r"^(.*)SUCCESS\s+Booked (\d+) Histogram\(s\) :\s+([\s\w=-]*)")
982 Parse the TTree summary table in lines, starting from pos. 983 Returns a tuple with the dictionary with the digested informations and the 984 position of the first line after the summary. 990 splitcols =
lambda l: [ f.strip()
for f
in l.strip(
"*\n").split(
':',2) ]
993 cols = splitcols(ll[0])
994 r[
"Name"], r[
"Title"] = cols[1:]
996 cols = splitcols(ll[1])
997 r[
"Entries"] = int(cols[1])
999 sizes = cols[2].split()
1000 r[
"Total size"] = int(sizes[2])
1001 if sizes[-1] ==
"memory":
1004 r[
"File size"] = int(sizes[-1])
1006 cols = splitcols(ll[2])
1007 sizes = cols[2].split()
1008 if cols[0] ==
"Baskets":
1009 r[
"Baskets"] = int(cols[1])
1010 r[
"Basket size"] = int(sizes[2])
1011 r[
"Compression"] = float(sizes[-1])
1014 if i < (count - 3)
and lines[i].startswith(
"*Tree"):
1015 result = parseblock(lines[i:i+3])
1016 result[
"Branches"] = {}
1018 while i < (count - 3)
and lines[i].startswith(
"*Br"):
1019 if i < (count - 2)
and lines[i].startswith(
"*Branch "):
1023 branch = parseblock(lines[i:i+3])
1024 result[
"Branches"][branch[
"Name"]] = branch
1031 Extract the histograms infos from the lines starting at pos. 1032 Returns the position of the first line after the summary block. 1035 h_table_head = re.compile(
r'SUCCESS\s+List of booked (1D|2D|3D|1D profile|2D profile) histograms in directory\s+"(\w*)"')
1036 h_short_summ = re.compile(
r"ID=([^\"]+)\s+\"([^\"]+)\"\s+(.*)")
1041 m = h_count_re.search(lines[pos])
1042 name = m.group(1).strip()
1043 total = int(m.group(2))
1045 for k, v
in [ x.split(
"=")
for x
in m.group(3).split() ]:
1048 header[
"Total"] = total
1052 m = h_table_head.search(lines[pos])
1055 t = t.replace(
" profile",
"Prof")
1062 if l.startswith(
" | ID"):
1064 titles = [ x.strip()
for x
in l.split(
"|")][1:]
1066 while pos < nlines
and lines[pos].startswith(
" |"):
1068 values = [ x.strip()
for x
in l.split(
"|")][1:]
1070 for i
in range(len(titles)):
1071 hcont[titles[i]] = values[i]
1072 cont[hcont[
"ID"]] = hcont
1074 elif l.startswith(
" ID="):
1075 while pos < nlines
and lines[pos].startswith(
" ID="):
1076 values = [ x.strip()
for x
in h_short_summ.search(lines[pos]).groups() ]
1077 cont[values[0]] = values
1080 raise RuntimeError(
"Cannot understand line %d: '%s'" % (pos, l))
1084 summ[d][
"header"] = header
1089 summ[name] = {
"header": header}
1096 Scan stdout to find ROOT TTree summaries and digest them. 1098 outlines = stdout.splitlines()
1099 nlines = len(outlines) - 1
1107 match = h_count_re.search(outlines[pos])
1108 while pos < nlines
and not match:
1110 match = h_count_re.search(outlines[pos])
1113 summaries.update(summ)
1118 unsupported = [ re.compile(x)
for x
in [ str(y).strip()
for y
in unsupported_platforms ]
if x]
1119 for p_re
in unsupported :
1120 if p_re.search(platform):
1121 result.SetOutcome(result.UNTESTED)
1122 result[result.CAUSE] =
'Platform not supported.' 1128 Return the platform Id defined in CMTCONFIG or SCRAM_ARCH. 1132 if "BINARY_TAG" in os.environ:
1133 arch = os.environ[
"BINARY_TAG"]
1134 elif "CMTCONFIG" in os.environ:
1135 arch = os.environ[
"CMTCONFIG"]
1136 elif "SCRAM_ARCH" in os.environ:
1137 arch = os.environ[
"SCRAM_ARCH"]
1142 Return True if the current platform is Windows. 1144 This function was needed because of the change in the CMTCONFIG format, 1145 from win32_vc71_dbg to i686-winxp-vc9-dbg. 1148 return "winxp" in platform
or platform.startswith(
"win")
def PlatformIsNotSupported(self, context, result)
def __processLine__(self, line)
def __init__(self, start, end)
def __call__(self, input)
def validateWithReference(self, stdout=None, stderr=None, result=None, causes=None, preproc=None)
def __processLine__(self, line)
def cmpTreesDicts(reference, to_check, ignore=None)
def __processFile__(self, lines)
def ValidateOutput(self, stdout, stderr, result)
def __processLine__(self, line)
def __processFile__(self, lines)
def __call__(self, out, result)
def findHistosSummaries(stdout)
def _parseTTreeSummary(lines, pos)
struct GAUDI_API map
Parametrisation class for map-like implementation.
def __call__(self, stdout, result)
NamedRange_< CONTAINER > range(const CONTAINER &cnt, std::string name)
simple function to create the named range form arbitrary container
def __processLine__(self, line)
def __init__(self, orig, repl="", when=None)
def __init__(self, signature)
def __call__(self, input)
def sanitize_for_xml(data)
def getCmpFailingValues(reference, to_check, fail_path)
def validator(self, stdout='', stderr='')
def __init__(self, members=[])
def __init__(self, strings=[], regexps=[])
def __setitem__(self, key, value)
def __init__(self, kind=None, id=None, outcome=PASS, annotations={})
def __processLine__(self, line)
def parseHistosSummary(lines, pos)
def _expandReferenceFileName(self, reffile)
def findReferenceBlock(self, reference=None, stdout=None, result=None, causes=None, signature_offset=0, signature=None, id=None)
def CheckHistosSummaries(self, stdout=None, result=None, causes=None, dict=None, ignore=None)
def __CompareText(self, s1, s2)
def __init__(self, reffile, cause, result_key, preproc=normalizeExamples)
def __getitem__(self, key)
def findTTreeSummaries(stdout)
def __init__(self, ref, cause, result_key)
def ROOT6WorkAroundEnabled(id=None)
def CheckTTreesSummaries(self, stdout=None, result=None, causes=None, trees_dict=None, ignore=r"Basket|.*size|Compression")
Special preprocessor sorting the list of strings (whitespace separated) that follow a signature on a ...