21 from datetime
import datetime, timedelta
22 from html
import escape
as escape_for_html
23 from subprocess
import PIPE, STDOUT, Popen
24 from tempfile
import NamedTemporaryFile, mkdtemp
25 from unittest
import TestCase
27 if sys.version_info < (3, 5):
30 from codecs
import backslashreplace_errors, register_error
33 if isinstance(exc, UnicodeDecodeError):
34 code =
hex(ord(exc.object[exc.start]))
35 return (
"\\" + code[1:], exc.start + 1)
37 return backslashreplace_errors(exc)
39 register_error(
"backslashreplace", _new_backslashreplace_errors)
41 del backslashreplace_errors
42 del _new_backslashreplace_errors
47 OUTPUT_LIMIT = int(os.environ.get(
"GAUDI_TEST_STDOUT_LIMIT", 100 * 1024**2))
52 Take a string with invalid ASCII/UTF characters and quote them so that the
53 string can be used in an XML text.
55 >>> sanitize_for_xml('this is \x1b')
56 'this is [NON-XML-CHAR-0x1B]'
58 bad_chars = re.compile(
"[\x00-\x08\x0b\x0c\x0e-\x1f\ud800-\udfff\ufffe\uffff]")
62 return "".join(
"[NON-XML-CHAR-0x%2X]" % ord(c)
for c
in match.group())
64 return bad_chars.sub(quote, data)
68 """helper to debug GAUDI-1084, dump the list of processes"""
69 from getpass
import getuser
71 if "WORKSPACE" in os.environ:
72 p = Popen([
"ps",
"-fH",
"-U", getuser()], stdout=PIPE)
73 with open(os.path.join(os.environ[
"WORKSPACE"], name),
"wb")
as f:
74 f.write(p.communicate()[0])
79 Send a signal to a process and all its child processes (starting from the
82 log = logging.getLogger(
"kill_tree")
83 ps_cmd = [
"ps",
"--no-headers",
"-o",
"pid",
"--ppid", str(ppid)]
86 get_children = Popen(ps_cmd, stdout=PIPE, stderr=PIPE, env={})
87 children =
map(int, get_children.communicate()[0].split())
88 for child
in children:
91 log.debug(
"killing process %d", ppid)
93 except OSError
as err:
96 log.debug(
"no such process %d", ppid)
103 _common_tmpdir =
None
133 logging.debug(
"running test %s", self.
name)
144 "TIMEOUT_DETAIL":
None,
150 r"from\s+Gaudi.Configuration\s+import\s+\*|"
151 r"from\s+Configurables\s+import",
154 suffix, lang =
".py",
"python"
156 suffix, lang =
".opts",
"c++"
157 self.
result[
"Options"] =
'<code lang="{}"><pre>{}</pre></code>'.
format(
158 lang, escape_for_html(self.
options)
160 optionFile = NamedTemporaryFile(suffix=suffix)
161 optionFile.file.write(self.
options.encode(
"utf-8"))
168 or platform.platform()
175 if re.search(prex, platform_id)
197 prog_ext = os.path.splitext(prog)[1]
198 if prog_ext
not in [
".exe",
".py",
".bat"]:
202 prog =
which(prog)
or prog
204 args = list(
map(RationalizePath, self.
args))
206 if prog_ext ==
".py":
216 "stdout": NamedTemporaryFile(),
217 "stderr": NamedTemporaryFile(),
222 logging.debug(
"executing %r in %s", params, workdir)
225 stdout=tmp_streams[
"stdout"],
226 stderr=tmp_streams[
"stderr"],
229 logging.debug(
"(pid: %d)", self.
proc.pid)
230 self.
proc.communicate()
231 tmp_streams[
"stdout"].seek(0)
233 tmp_streams[
"stdout"]
235 .decode(
"utf-8", errors=
"backslashreplace")
237 tmp_streams[
"stderr"].seek(0)
239 tmp_streams[
"stderr"]
241 .decode(
"utf-8", errors=
"backslashreplace")
244 thread = threading.Thread(target=target)
247 when_to_stop = datetime.now() + timedelta(seconds=self.
timeout)
248 too_big_stream =
None
250 datetime.now() < when_to_stop
251 and thread.is_alive()
252 and not too_big_stream
257 if thread.is_alive():
258 for stream
in tmp_streams:
259 if os.path.getsize(tmp_streams[stream].name) > OUTPUT_LIMIT:
260 too_big_stream = stream
262 if thread.is_alive():
263 if not too_big_stream:
265 "time out in test %s (pid %d)", self.
name, self.
proc.pid
273 "--eval-command=thread apply all backtrace",
275 gdb = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
277 "utf-8", errors=
"backslashreplace"
279 self.
causes.append(
"timeout")
282 "too big %s detected (pid %d)", too_big_stream, self.
proc.pid
284 self.
result[f
"{too_big_stream} limit"] = str(OUTPUT_LIMIT)
285 self.
result[f
"{too_big_stream} size"] = str(
286 os.path.getsize(tmp_streams[too_big_stream].name)
288 self.
causes.append(f
"too big {too_big_stream}")
292 if thread.is_alive():
299 f
"completed test {self.name} with returncode = {self.returnedCode}"
301 logging.debug(
"validating test...")
302 val_start_time = time.perf_counter()
306 self.
validate_time = round(time.perf_counter() - val_start_time, 2)
308 logging.debug(f
"skipped test {self.name}")
313 shutil.rmtree(workdir,
True)
317 if self.
status !=
"skipped":
319 if self.
signal is not None:
321 self.
causes.append(
"exit code")
325 self.
causes.append(
"exit code")
328 self.
causes.append(
"exit code")
338 logging.debug(
"%s: %s", self.
name, self.
status)
340 "Exit Code":
"returnedCode",
343 "Runtime Environment":
"environment",
346 "Program Name":
"program",
348 "Validator":
"validator",
349 "Validation execution time":
"validate_time",
350 "Output Reference File":
"reference",
351 "Error Reference File":
"error_reference",
354 "Unsupported Platforms":
"unsupported_platforms",
355 "Stack Trace":
"stack_trace",
358 (key, getattr(self, attr))
359 for key, attr
in field_mapping.items()
360 if getattr(self, attr)
369 resultDict.extend(self.
result.annotations.items())
371 resultDict = dict(resultDict)
374 if "Validator" in resultDict:
375 resultDict[
"Validator"] =
'<code lang="{}"><pre>{}</pre></code>'.
format(
376 "python", escape_for_html(resultDict[
"Validator"])
387 elif stderr.strip() != self.
stderr.strip():
388 self.
causes.append(
"standard error")
389 return result, self.
causes
402 Given a block of text, tries to find it in the output. The block had to be identified by a signature line. By default, the first line is used as signature, or the line pointed to by signature_offset. If signature_offset points outside the block, a signature line can be passed as signature argument. Note: if 'signature' is None (the default), a negative signature_offset is interpreted as index in a list (e.g. -1 means the last line), otherwise the it is interpreted as the number of lines before the first one of the block the signature must appear. The parameter 'id' allow to distinguish between different calls to this function in the same validation code.
405 if reference
is None:
414 reflines = list(filter(
None,
map(
lambda s: s.rstrip(), reference.splitlines())))
416 raise RuntimeError(
"Empty (or null) reference")
418 outlines = list(filter(
None,
map(
lambda s: s.rstrip(), stdout.splitlines())))
420 res_field =
"GaudiTest.RefBlock"
422 res_field +=
"_%s" % id
424 if signature
is None:
425 if signature_offset < 0:
426 signature_offset = len(reference) + signature_offset
427 signature = reflines[signature_offset]
430 pos = outlines.index(signature)
432 pos - signature_offset : pos + len(reflines) - signature_offset
434 if reflines != outlines:
435 msg =
"standard output"
438 if msg
not in causes:
440 result[res_field +
".observed"] = result.Quote(
"\n".join(outlines))
442 causes.append(
"missing signature")
443 result[res_field +
".signature"] = result.Quote(signature)
444 if len(reflines) > 1
or signature != reflines[0]:
445 result[res_field +
".expected"] = result.Quote(
"\n".join(reflines))
449 self, expected={"ERROR": 0,
"FATAL": 0}, stdout=
None, result=
None, causes=
None
452 Count the number of messages with required severity (by default ERROR and FATAL)
453 and check if their numbers match the expected ones (0 by default).
454 The dictionary "expected" can be used to tune the number of errors and fatals
455 allowed, or to limit the number of expected warnings etc.
470 outlines = stdout.splitlines()
471 from math
import log10
473 fmt =
"%%%dd - %%s" % (int(log10(len(outlines) + 1)))
479 if len(words) >= 2
and words[1]
in errors:
480 errors[words[1]].append(fmt % (linecount, l.rstrip()))
483 if len(errors[e]) != expected[e]:
484 causes.append(
"%s(%d)" % (e, len(errors[e])))
485 result[
"GaudiTest.lines.%s" % e] = result.Quote(
"\n".join(errors[e]))
486 result[
"GaudiTest.lines.%s.expected#" % e] = result.Quote(
498 ignore=r"Basket|.*size|Compression",
501 Compare the TTree summaries in stdout with the ones in trees_dict or in
502 the reference file. By default ignore the size, compression and basket
504 The presence of TTree summaries when none is expected is not a failure.
512 if trees_dict
is None:
515 if lreference
and os.path.isfile(lreference):
520 from pprint
import PrettyPrinter
524 result[
"GaudiTest.TTrees.expected"] = result.Quote(pp.pformat(trees_dict))
526 result[
"GaudiTest.TTrees.ignore"] = result.Quote(ignore)
531 causes.append(
"trees summaries")
533 result[
"GaudiTest.TTrees.failure_on"] = result.Quote(msg)
534 result[
"GaudiTest.TTrees.found"] = result.Quote(pp.pformat(trees))
539 self, stdout=None, result=None, causes=None, dict=None, ignore=None
542 Compare the TTree summaries in stdout with the ones in trees_dict or in
543 the reference file. By default ignore the size, compression and basket
545 The presence of TTree summaries when none is expected is not a failure.
557 if lreference
and os.path.isfile(lreference):
562 from pprint
import PrettyPrinter
566 result[
"GaudiTest.Histos.expected"] = result.Quote(pp.pformat(dict))
568 result[
"GaudiTest.Histos.ignore"] = result.Quote(ignore)
573 causes.append(
"histos summaries")
575 result[
"GaudiTest.Histos.failure_on"] = result.Quote(msg)
576 result[
"GaudiTest.Histos.found"] = result.Quote(pp.pformat(histos))
581 self, stdout=None, stderr=None, result=None, causes=None, preproc=None
584 Default validation acti*on: compare standard output and error to the
599 preproc = normalizeTestSuite
603 if lreference
and os.path.isfile(lreference):
605 lreference,
"standard output",
"Output Diff", preproc=preproc
608 causes += [
"missing reference file"]
612 if causes
and lreference:
615 newrefname =
".".join([lreference,
"new"])
616 while os.path.exists(newrefname):
618 newrefname =
".".join([lreference,
"~%d~" % cnt,
"new"])
619 newref = open(newrefname,
"w")
621 for l
in stdout.splitlines():
622 newref.write(l.rstrip() +
"\n")
624 result[
"New Output Reference File"] = os.path.relpath(
636 if os.path.isfile(lreference):
638 lreference,
"standard error",
"Error Diff", preproc=preproc
641 newcauses = [
"missing error reference file"]
643 if newcauses
and lreference:
645 newrefname =
".".join([lreference,
"new"])
646 while os.path.exists(newrefname):
648 newrefname =
".".join([lreference,
"~%d~" % cnt,
"new"])
649 newref = open(newrefname,
"w")
651 for l
in stderr.splitlines():
652 newref.write(l.rstrip() +
"\n")
654 result[
"New Error Reference File"] = os.path.relpath(
659 lreference,
"standard error",
"ExecTest.expected_stderr"
672 JSON validation action: compare json file to reference file
680 if not os.path.isfile(output_file):
681 causes.append(f
"output file {output_file} does not exist")
685 with open(output_file)
as f:
686 output = json.load(f)
687 except json.JSONDecodeError
as err:
688 causes.append(
"json parser error")
689 result[
"output_parse_error"] = f
"json parser error in {output_file}: {err}"
694 causes.append(
"reference file not set")
695 elif not os.path.isfile(lreference):
696 causes.append(
"reference file does not exist")
699 if causes
and lreference:
702 newrefname =
".".join([lreference,
"new"])
703 while os.path.exists(newrefname):
705 newrefname =
".".join([lreference,
"~%d~" % cnt,
"new"])
706 with open(newrefname,
"w")
as newref:
707 json.dump(output, newref, indent=4)
708 result[
"New JSON Output Reference File"] = os.path.relpath(
725 def platformSplit(p):
726 return set(re.split(
r"[-+]", p))
728 reference = os.path.normpath(
729 os.path.join(self.
basedir, os.path.expandvars(reffile))
733 spec_ref = reference[:-3] +
GetPlatform(self)[0:3] + reference[-3:]
734 if os.path.isfile(spec_ref):
738 dirname, basename = os.path.split(reference)
741 head = basename +
"."
744 if "do0" in platform:
747 for f
in os.listdir(dirname):
748 if f.startswith(head):
749 req_plat = platformSplit(f[head_len:])
750 if platform.issuperset(req_plat):
751 candidates.append((len(req_plat), f))
756 reference = os.path.join(dirname, candidates[-1][1])
766 from GaudiKernel
import ROOT6WorkAroundEnabled
779 Function used to normalize the used path
781 newPath = os.path.normpath(os.path.expandvars(p))
782 if os.path.exists(newPath):
783 p = os.path.realpath(newPath)
789 Locates an executable in the executables path ($PATH) and returns the full
790 path to it. An application is looked for with or without the '.exe' suffix.
791 If the executable cannot be found, None is returned
793 if os.path.isabs(executable):
794 if not os.path.isfile(executable):
795 if executable.endswith(
".exe"):
796 if os.path.isfile(executable[:-4]):
797 return executable[:-4]
799 executable = os.path.split(executable)[1]
802 for d
in os.environ.get(
"PATH").split(os.pathsep):
803 fullpath = os.path.join(d, executable)
804 if os.path.isfile(fullpath):
806 elif executable.endswith(
".exe")
and os.path.isfile(fullpath[:-4]):
820 UNTESTED =
"UNTESTED"
830 def __init__(self, kind=None, id=None, outcome=PASS, annotations={}):
834 assert isinstance(key, str)
838 assert isinstance(key, str)
839 assert isinstance(value, str),
"{!r} is not a string".
format(value)
844 Convert text to html by escaping special chars and adding <pre> tags.
846 return "<pre>{}</pre>".
format(escape_for_html(text))
865 """Validate the output of the program.
866 'stdout' -- A string containing the data written to the standard output
868 'stderr' -- A string containing the data written to the standard error
870 'result' -- A 'Result' object. It may be used to annotate
871 the outcome according to the content of stderr.
872 returns -- A list of strings giving causes of failure."""
877 causes.append(self.
cause)
883 """Compare 's1' and 's2', ignoring line endings.
886 returns -- True if 's1' and 's2' are the same, ignoring
887 differences in line endings."""
891 to_ignore = re.compile(
892 r"Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*"
896 return not to_ignore.match(l)
898 return list(filter(keep_line, s1.splitlines())) == list(
899 filter(keep_line, s2.splitlines())
902 return s1.splitlines() == s2.splitlines()
907 """Base class for a callable that takes a file and returns a modified
922 if not isinstance(input, str):
926 lines = input.splitlines()
930 output =
"\n".join(output)
960 if line.find(s) >= 0:
975 if self.
start in line:
978 elif self.
end in line:
988 when = re.compile(when)
992 if isinstance(rhs, RegexpReplacer):
994 res._operations = self.
_operations + rhs._operations
996 res = FilePreprocessor.__add__(self, rhs)
1001 if w
is None or w.search(line):
1002 line = o.sub(r, line)
1009 "[0-2]?[0-9]:[0-5][0-9]:[0-5][0-9] [0-9]{4}[-/][01][0-9][-/][0-3][0-9][ A-Z]*",
1010 "00:00:00 1970-01-01",
1013 normalizeEOL.__processLine__ =
lambda line: str(line).rstrip() +
"\n"
1017 skipEmptyLines.__processLine__ =
lambda line: (line.strip()
and line)
or None
1031 line = line[: (pos + self.
siglen)]
1032 lst = line[(pos + self.
siglen) :].split()
1034 line +=
" ".join(lst)
1040 Sort group of lines matching a regular expression
1044 self.
exp = exp
if hasattr(exp,
"match")
else re.compile(exp)
1047 match = self.
exp.match
1056 output.extend(group)
1063 normalizeTestSuite = maskPointers + normalizeDate
1065 (
"TIMER",
r"\s+[+-]?[0-9]+[0-9.e+-]*",
" 0"),
1066 (
"release all pending",
r"^.*/([^/]*:.*)",
r"\1"),
1067 (
"^#.*file",
r"file '.*[/\\]([^/\\]*)$",
r"file '\1"),
1069 "^JobOptionsSvc.*options successfully read in from",
1070 r"read in from .*[/\\]([^/\\]*)$",
1076 r"[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}(?!-0{12})-[0-9A-Fa-f]{12}",
1077 "00000000-0000-0000-0000-000000000000",
1081 "ServiceLocatorHelper::",
1082 "ServiceLocatorHelper::(create|locate)Service",
1083 "ServiceLocatorHelper::service",
1086 (
None,
r"e([-+])0([0-9][0-9])",
r"e\1\2"),
1088 (
None,
r"Service reference count check:",
r"Looping over all active services..."),
1092 r"^(.*(DEBUG|SUCCESS) List of ALL properties of .*#properties = )\d+",
1095 (
"ApplicationMgr",
r"(declareMultiSvcType|addMultiSvc): ",
""),
1096 (
r"Property \['Name': Value\]",
r"( = '[^']+':)'(.*)'",
r"\1\2"),
1097 (
"TimelineSvc",
"to file 'TimelineFile':",
"to file "),
1098 (
"DataObjectHandleBase",
r'DataObjectHandleBase\("([^"]*)"\)',
r"'\1'"),
1101 "Added successfully Conversion service:",
1102 "Added successfully Conversion service:",
1103 "Added successfully Conversion service ",
1111 "JobOptionsSvc INFO # ",
1112 "JobOptionsSvc WARNING # ",
1115 "This machine has a speed",
1117 "ToolSvc.Sequenc... INFO",
1118 "DataListenerSvc INFO XML written to file:",
1121 "DEBUG No writable file catalog found which contains FID:",
1122 "DEBUG Service base class initialized successfully",
1124 "DEBUG Incident timing:",
1128 "INFO 'CnvServices':[",
1130 "DEBUG 'CnvServices':[",
1135 "ServiceLocatorHelper::service: found service JobOptionsSvc",
1137 "mismatching case for property name:",
1139 "Histograms saving not required.",
1141 "Properties are dumped into",
1143 "WARNING no ROOT output file name",
1144 "INFO Writing ROOT histograms to:",
1145 "INFO Completed update of ROOT histograms in:",
1148 "data dependencies:",
1151 r"^JobOptionsSvc INFO *$",
1154 r"(Always|SUCCESS)\s*(Root f|[^ ]* F)ile version:",
1155 r"File '.*.xml' does not exist",
1156 r"INFO Refer to dataset .* by its file ID:",
1157 r"INFO Referring to dataset .* by its file ID:",
1158 r"INFO Disconnect from dataset",
1159 r"INFO Disconnected from dataset",
1160 r"INFO Disconnected data IO:",
1161 r"IncidentSvc\s*(DEBUG (Adding|Removing)|VERBOSE Calling)",
1163 r".*StatusCodeSvc.*",
1164 r".*StatusCodeCheck.*",
1165 r"Num\s*\|\s*Function\s*\|\s*Source Library",
1168 r"ERROR Failed to modify file: .* Errno=2 No such file or directory",
1170 r"^ +[0-9]+ \|.*ROOT",
1171 r"^ +[0-9]+ \|.*\|.*Dict",
1173 r"EventLoopMgr.*---> Loop Finished",
1174 r"HiveSlimEventLo.*---> Loop Finished",
1179 r"SUCCESS\s*Booked \d+ Histogram\(s\)",
1183 r"Property(.*)'Audit(Algorithm|Tool|Service)s':",
1184 r"Property(.*)'Audit(Begin|End)Run':",
1186 r"Property(.*)'AuditRe(start|initialize)':",
1187 r"Property(.*)'Blocking':",
1189 r"Property(.*)'ErrorCount(er)?':",
1191 r"Property(.*)'Sequential':",
1193 r"Property(.*)'FilterCircularDependencies':",
1195 r"Property(.*)'IsClonable':",
1197 r"Property update for OutputLevel : new value =",
1198 r"EventLoopMgr\s*DEBUG Creating OutputStream",
1207 r"Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*",
1211 normalizeTestSuite = (
1213 + normalizeTestSuite
1220 normalizeExamples = normalizeTestSuite
1226 def __init__(self, reffile, cause, result_key, preproc=normalizeTestSuite):
1234 if os.path.isfile(self.
reffile):
1235 orig = open(self.
reffile).readlines()
1238 result[self.
result_key +
".preproc.orig"] = result.Quote(
1239 "\n".join(
map(str.strip, orig))
1243 new = stdout.splitlines()
1249 difflib.unified_diff(
1250 [l.rstrip()
for l
in orig],
1251 [l.rstrip()
for l
in new],
1253 fromfile=
"Reference file",
1254 tofile=
"Actual output",
1259 result[self.
result_key] = result.Quote(
"\n".join(filterdiffs))
1260 result[self.
result_key +
".preproc.new"] = result.Quote(
1261 "\n".join(
map(str.strip, new))
1263 causes.append(self.
cause)
1269 Scan stdout to find ROOT TTree summaries and digest them.
1271 stars = re.compile(
r"^\*+$")
1272 outlines = stdout.splitlines()
1273 nlines = len(outlines)
1279 while i < nlines
and not stars.match(outlines[i]):
1284 trees[tree[
"Name"]] = tree
1291 Check that all the keys in reference are in to_check too, with the same value.
1292 If the value is a dict, the function is called recursively. to_check can
1293 contain more keys than reference, that will not be tested.
1294 The function returns at the first difference found.
1299 ignore_re = re.compile(ignore)
1300 keys = [key
for key
in reference
if not ignore_re.match(key)]
1302 keys = reference.keys()
1306 if isinstance(reference[k], dict)
and isinstance(to_check[k], dict):
1309 failed = fail_keys =
cmpTreesDicts(reference[k], to_check[k], ignore)
1312 failed = to_check[k] != reference[k]
1317 fail_keys.insert(0, k)
1328 if c
is None or r
is None:
1330 return (fail_path, r, c)
1334 h_count_re = re.compile(
r"^(.*)SUCCESS\s+Booked (\d+) Histogram\(s\) :\s+([\s\w=-]*)")
1339 Parse the TTree summary table in lines, starting from pos.
1340 Returns a tuple with the dictionary with the digested informations and the
1341 position of the first line after the summary.
1348 return [f.strip()
for f
in l.strip(
"*\n").split(
":", 2)]
1353 cols = splitcols(ll[0])
1357 r[
"Name"], r[
"Title"] = cols[1:]
1362 r[
"Title"] = ll[1].strip(
"*\n").split(
"|")[1].strip()
1366 cols = splitcols(ll[1 + delta_i])
1367 r[
"Entries"] = int(cols[1])
1369 sizes = cols[2].split()
1370 r[
"Total size"] = int(sizes[2])
1371 if sizes[-1] ==
"memory":
1374 r[
"File size"] = int(sizes[-1])
1376 cols = splitcols(ll[2 + delta_i])
1377 sizes = cols[2].split()
1378 if cols[0] ==
"Baskets":
1379 r[
"Baskets"] = int(cols[1])
1380 r[
"Basket size"] = int(sizes[2])
1381 r[
"Compression"] = float(sizes[-1])
1385 def nextblock(lines, i):
1387 dots = re.compile(
r"^\.+$")
1388 stars = re.compile(
r"^\*+$")
1392 and not dots.match(lines[i + delta_i][1:-1])
1393 and not stars.match(lines[i + delta_i])
1398 if i < (count - 3)
and lines[i].startswith(
"*Tree"):
1399 i_nextblock = nextblock(lines, i)
1400 result = parseblock(lines[i:i_nextblock])
1401 result[
"Branches"] = {}
1403 while i < (count - 3)
and lines[i].startswith(
"*Br"):
1404 if i < (count - 2)
and lines[i].startswith(
"*Branch "):
1408 i_nextblock = nextblock(lines, i)
1409 if i_nextblock >= count:
1411 branch = parseblock(lines[i:i_nextblock])
1412 result[
"Branches"][branch[
"Name"]] = branch
1420 Extract the histograms infos from the lines starting at pos.
1421 Returns the position of the first line after the summary block.
1424 h_table_head = re.compile(
1425 r'SUCCESS\s+(1D|2D|3D|1D profile|2D profile) histograms in directory\s+"(\w*)"'
1427 h_short_summ = re.compile(
r"ID=([^\"]+)\s+\"([^\"]+)\"\s+(.*)")
1432 m = h_count_re.search(lines[pos])
1433 name = m.group(1).strip()
1434 total = int(m.group(2))
1436 for k, v
in [x.split(
"=")
for x
in m.group(3).split()]:
1439 header[
"Total"] = total
1443 m = h_table_head.search(lines[pos])
1446 t = t.replace(
" profile",
"Prof")
1453 if l.startswith(
" | ID"):
1455 titles = [x.strip()
for x
in l.split(
"|")][1:]
1457 while pos < nlines
and lines[pos].startswith(
" |"):
1459 values = [x.strip()
for x
in l.split(
"|")][1:]
1461 for i
in range(len(titles)):
1462 hcont[titles[i]] = values[i]
1463 cont[hcont[
"ID"]] = hcont
1465 elif l.startswith(
" ID="):
1466 while pos < nlines
and lines[pos].startswith(
" ID="):
1468 x.strip()
for x
in h_short_summ.search(lines[pos]).groups()
1470 cont[values[0]] = values
1473 raise RuntimeError(
"Cannot understand line %d: '%s'" % (pos, l))
1477 summ[d][
"header"] = header
1482 summ[name] = {
"header": header}
1488 Scan stdout to find ROOT TTree summaries and digest them.
1490 outlines = stdout.splitlines()
1491 nlines = len(outlines) - 1
1499 match = h_count_re.search(outlines[pos])
1500 while pos < nlines
and not match:
1502 match = h_count_re.search(outlines[pos])
1505 summaries.update(summ)
1511 Return the platform Id defined in CMTCONFIG or SCRAM_ARCH.
1515 if "BINARY_TAG" in os.environ:
1516 arch = os.environ[
"BINARY_TAG"]
1517 elif "CMTCONFIG" in os.environ:
1518 arch = os.environ[
"CMTCONFIG"]
1519 elif "SCRAM_ARCH" in os.environ:
1520 arch = os.environ[
"SCRAM_ARCH"]
1521 elif os.environ.get(
"ENV_CMAKE_BUILD_TYPE",
"")
in (
1527 elif os.environ.get(
"ENV_CMAKE_BUILD_TYPE",
"")
in (
1539 Return True if the current platform is Windows.
1541 This function was needed because of the change in the CMTCONFIG format,
1542 from win32_vc71_dbg to i686-winxp-vc9-dbg.
1545 return "winxp" in platform
or platform.startswith(
"win")
1550 """Validate JSON output.
1551 returns -- A list of strings giving causes of failure."""
1555 with open(ref)
as f:
1556 expected = json.load(f)
1557 except json.JSONDecodeError
as err:
1558 causes.append(
"json parser error")
1559 result[
"reference_parse_error"] = f
"json parser error in {ref}: {err}"
1564 causes.append(
"json content")
1565 result[
"json_diff"] =
"detailed diff was turned off"
1573 expected = sorted(expected, key=
lambda item: (item[
"component"], item[
"name"]))
1574 out = sorted(out, key=
lambda item: (item[
"component"], item[
"name"]))
1576 t.assertEqual(expected, out)
1577 except AssertionError
as err:
1578 causes.append(
"json content")
1579 result[
"json_diff"] = str(err).splitlines()[0]