21 from datetime
import datetime, timedelta
22 from html
import escape
as escape_for_html
23 from subprocess
import PIPE, STDOUT, Popen
24 from tempfile
import NamedTemporaryFile, mkdtemp
25 from unittest
import TestCase
27 if sys.version_info < (3, 5):
30 from codecs
import backslashreplace_errors, register_error
33 if isinstance(exc, UnicodeDecodeError):
34 code =
hex(ord(exc.object[exc.start]))
35 return (
"\\" + code[1:], exc.start + 1)
37 return backslashreplace_errors(exc)
39 register_error(
"backslashreplace", _new_backslashreplace_errors)
41 del backslashreplace_errors
42 del _new_backslashreplace_errors
47 OUTPUT_LIMIT = int(os.environ.get(
"GAUDI_TEST_STDOUT_LIMIT", 100 * 1024**2))
52 Take a string with invalid ASCII/UTF characters and quote them so that the
53 string can be used in an XML text.
55 >>> sanitize_for_xml('this is \x1b')
56 'this is [NON-XML-CHAR-0x1B]'
58 bad_chars = re.compile(
"[\x00-\x08\x0b\x0c\x0e-\x1f\ud800-\udfff\ufffe\uffff]")
62 return "".join(
"[NON-XML-CHAR-0x%2X]" % ord(c)
for c
in match.group())
64 return bad_chars.sub(quote, data)
68 """helper to debug GAUDI-1084, dump the list of processes"""
69 from getpass
import getuser
71 if "WORKSPACE" in os.environ:
72 p = Popen([
"ps",
"-fH",
"-U", getuser()], stdout=PIPE)
73 with open(os.path.join(os.environ[
"WORKSPACE"], name),
"wb")
as f:
74 f.write(p.communicate()[0])
79 Send a signal to a process and all its child processes (starting from the
82 log = logging.getLogger(
"kill_tree")
83 ps_cmd = [
"ps",
"--no-headers",
"-o",
"pid",
"--ppid", str(ppid)]
86 get_children = Popen(ps_cmd, stdout=PIPE, stderr=PIPE, env={})
87 children =
map(int, get_children.communicate()[0].split())
88 for child
in children:
91 log.debug(
"killing process %d", ppid)
93 except OSError
as err:
96 log.debug(
"no such process %d", ppid)
103 _common_tmpdir =
None
133 logging.debug(
"running test %s", self.
name)
144 "TIMEOUT_DETAIL":
None,
150 r"from\s+Gaudi.Configuration\s+import\s+\*|"
151 r"from\s+Configurables\s+import",
154 suffix, lang =
".py",
"python"
156 suffix, lang =
".opts",
"c++"
157 self.
result[
"Options"] = (
158 '<pre><code class="language-{}">{}</code></pre>'.
format(
159 lang, escape_for_html(self.
options)
162 optionFile = NamedTemporaryFile(suffix=suffix)
163 optionFile.file.write(self.
options.encode(
"utf-8"))
170 or platform.platform()
177 if re.search(prex, platform_id)
199 prog_ext = os.path.splitext(prog)[1]
200 if prog_ext
not in [
".exe",
".py",
".bat"]:
204 prog =
which(prog)
or prog
206 args = list(
map(RationalizePath, self.
args))
208 if prog_ext ==
".py":
218 "stdout": NamedTemporaryFile(),
219 "stderr": NamedTemporaryFile(),
224 logging.debug(
"executing %r in %s", params, workdir)
227 stdout=tmp_streams[
"stdout"],
228 stderr=tmp_streams[
"stderr"],
231 logging.debug(
"(pid: %d)", self.
proc.pid)
232 self.
proc.communicate()
233 tmp_streams[
"stdout"].seek(0)
235 tmp_streams[
"stdout"]
237 .decode(
"utf-8", errors=
"backslashreplace")
239 tmp_streams[
"stderr"].seek(0)
241 tmp_streams[
"stderr"]
243 .decode(
"utf-8", errors=
"backslashreplace")
246 thread = threading.Thread(target=target)
249 when_to_stop = datetime.now() + timedelta(seconds=self.
timeout)
250 too_big_stream =
None
252 datetime.now() < when_to_stop
253 and thread.is_alive()
254 and not too_big_stream
259 if thread.is_alive():
260 for stream
in tmp_streams:
261 if os.path.getsize(tmp_streams[stream].name) > OUTPUT_LIMIT:
262 too_big_stream = stream
264 if thread.is_alive():
265 if not too_big_stream:
267 "time out in test %s (pid %d)", self.
name, self.
proc.pid
275 "--eval-command=thread apply all backtrace",
277 gdb = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
279 "utf-8", errors=
"backslashreplace"
281 self.
causes.append(
"timeout")
284 "too big %s detected (pid %d)", too_big_stream, self.
proc.pid
286 self.
result[f
"{too_big_stream} limit"] = str(OUTPUT_LIMIT)
287 self.
result[f
"{too_big_stream} size"] = str(
288 os.path.getsize(tmp_streams[too_big_stream].name)
290 self.
causes.append(f
"too big {too_big_stream}")
294 if thread.is_alive():
301 f
"completed test {self.name} with returncode = {self.returnedCode}"
303 logging.debug(
"validating test...")
304 val_start_time = time.perf_counter()
308 self.
validate_time = round(time.perf_counter() - val_start_time, 2)
310 logging.debug(f
"skipped test {self.name}")
315 shutil.rmtree(workdir,
True)
319 if self.
status !=
"skipped":
321 if self.
signal is not None:
323 self.
causes.append(
"exit code")
327 self.
causes.append(
"exit code")
330 self.
causes.append(
"exit code")
340 logging.debug(
"%s: %s", self.
name, self.
status)
342 "Exit Code":
"returnedCode",
345 "Runtime Environment":
"environment",
348 "Program Name":
"program",
350 "Validator":
"validator",
351 "Validation execution time":
"validate_time",
352 "Output Reference File":
"reference",
353 "Error Reference File":
"error_reference",
356 "Unsupported Platforms":
"unsupported_platforms",
357 "Stack Trace":
"stack_trace",
360 (key, getattr(self, attr))
361 for key, attr
in field_mapping.items()
362 if getattr(self, attr)
371 resultDict.extend(self.
result.annotations.items())
373 resultDict = dict(resultDict)
376 if "Validator" in resultDict:
377 resultDict[
"Validator"] = (
378 '<pre><code class="language-{}">{}</code></pre>'.
format(
379 "python", escape_for_html(resultDict[
"Validator"])
391 elif stderr.strip() != self.
stderr.strip():
392 self.
causes.append(
"standard error")
393 return result, self.
causes
406 Given a block of text, tries to find it in the output. The block had to be identified by a signature line. By default, the first line is used as signature, or the line pointed to by signature_offset. If signature_offset points outside the block, a signature line can be passed as signature argument. Note: if 'signature' is None (the default), a negative signature_offset is interpreted as index in a list (e.g. -1 means the last line), otherwise the it is interpreted as the number of lines before the first one of the block the signature must appear. The parameter 'id' allow to distinguish between different calls to this function in the same validation code.
409 if reference
is None:
418 reflines = list(filter(
None,
map(
lambda s: s.rstrip(), reference.splitlines())))
420 raise RuntimeError(
"Empty (or null) reference")
422 outlines = list(filter(
None,
map(
lambda s: s.rstrip(), stdout.splitlines())))
424 res_field =
"GaudiTest.RefBlock"
426 res_field +=
"_%s" % id
428 if signature
is None:
429 if signature_offset < 0:
430 signature_offset = len(reference) + signature_offset
431 signature = reflines[signature_offset]
434 pos = outlines.index(signature)
436 pos - signature_offset : pos + len(reflines) - signature_offset
438 if reflines != outlines:
439 msg =
"standard output"
442 if msg
not in causes:
444 result[res_field +
".observed"] = result.Quote(
"\n".join(outlines))
446 causes.append(
"missing signature")
447 result[res_field +
".signature"] = result.Quote(signature)
448 if len(reflines) > 1
or signature != reflines[0]:
449 result[res_field +
".expected"] = result.Quote(
"\n".join(reflines))
453 self, expected={"ERROR": 0,
"FATAL": 0}, stdout=
None, result=
None, causes=
None
456 Count the number of messages with required severity (by default ERROR and FATAL)
457 and check if their numbers match the expected ones (0 by default).
458 The dictionary "expected" can be used to tune the number of errors and fatals
459 allowed, or to limit the number of expected warnings etc.
474 outlines = stdout.splitlines()
475 from math
import log10
477 fmt =
"%%%dd - %%s" % (int(log10(len(outlines) + 1)))
483 if len(words) >= 2
and words[1]
in errors:
484 errors[words[1]].append(fmt % (linecount, l.rstrip()))
487 if len(errors[e]) != expected[e]:
488 causes.append(
"%s(%d)" % (e, len(errors[e])))
489 result[
"GaudiTest.lines.%s" % e] = result.Quote(
"\n".join(errors[e]))
490 result[
"GaudiTest.lines.%s.expected#" % e] = result.Quote(
502 ignore=r"Basket|.*size|Compression",
505 Compare the TTree summaries in stdout with the ones in trees_dict or in
506 the reference file. By default ignore the size, compression and basket
508 The presence of TTree summaries when none is expected is not a failure.
516 if trees_dict
is None:
519 if lreference
and os.path.isfile(lreference):
524 from pprint
import PrettyPrinter
528 result[
"GaudiTest.TTrees.expected"] = result.Quote(pp.pformat(trees_dict))
530 result[
"GaudiTest.TTrees.ignore"] = result.Quote(ignore)
535 causes.append(
"trees summaries")
537 result[
"GaudiTest.TTrees.failure_on"] = result.Quote(msg)
538 result[
"GaudiTest.TTrees.found"] = result.Quote(pp.pformat(trees))
543 self, stdout=None, result=None, causes=None, dict=None, ignore=None
546 Compare the TTree summaries in stdout with the ones in trees_dict or in
547 the reference file. By default ignore the size, compression and basket
549 The presence of TTree summaries when none is expected is not a failure.
561 if lreference
and os.path.isfile(lreference):
566 from pprint
import PrettyPrinter
570 result[
"GaudiTest.Histos.expected"] = result.Quote(pp.pformat(dict))
572 result[
"GaudiTest.Histos.ignore"] = result.Quote(ignore)
577 causes.append(
"histos summaries")
579 result[
"GaudiTest.Histos.failure_on"] = result.Quote(msg)
580 result[
"GaudiTest.Histos.found"] = result.Quote(pp.pformat(histos))
585 self, stdout=None, stderr=None, result=None, causes=None, preproc=None
588 Default validation acti*on: compare standard output and error to the
603 preproc = normalizeTestSuite
607 if lreference
and os.path.isfile(lreference):
609 lreference,
"standard output",
"Output Diff", preproc=preproc
612 causes += [
"missing reference file"]
616 if causes
and lreference:
619 newrefname =
".".join([lreference,
"new"])
620 while os.path.exists(newrefname):
622 newrefname =
".".join([lreference,
"~%d~" % cnt,
"new"])
623 newref = open(newrefname,
"w")
625 for l
in stdout.splitlines():
626 newref.write(l.rstrip() +
"\n")
628 result[
"New Output Reference File"] = os.path.relpath(
640 if os.path.isfile(lreference):
642 lreference,
"standard error",
"Error Diff", preproc=preproc
645 newcauses = [
"missing error reference file"]
647 if newcauses
and lreference:
649 newrefname =
".".join([lreference,
"new"])
650 while os.path.exists(newrefname):
652 newrefname =
".".join([lreference,
"~%d~" % cnt,
"new"])
653 newref = open(newrefname,
"w")
655 for l
in stderr.splitlines():
656 newref.write(l.rstrip() +
"\n")
658 result[
"New Error Reference File"] = os.path.relpath(
663 lreference,
"standard error",
"ExecTest.expected_stderr"
676 JSON validation action: compare json file to reference file
684 if not os.path.isfile(output_file):
685 causes.append(f
"output file {output_file} does not exist")
689 with open(output_file)
as f:
690 output = json.load(f)
691 except json.JSONDecodeError
as err:
692 causes.append(
"json parser error")
693 result[
"output_parse_error"] = f
"json parser error in {output_file}: {err}"
698 causes.append(
"reference file not set")
699 elif not os.path.isfile(lreference):
700 causes.append(
"reference file does not exist")
703 if causes
and lreference:
706 newrefname =
".".join([lreference,
"new"])
707 while os.path.exists(newrefname):
709 newrefname =
".".join([lreference,
"~%d~" % cnt,
"new"])
710 with open(newrefname,
"w")
as newref:
711 json.dump(output, newref, indent=4)
712 result[
"New JSON Output Reference File"] = os.path.relpath(
729 def platformSplit(p):
730 return set(re.split(
r"[-+]", p))
732 reference = os.path.normpath(
733 os.path.join(self.
basedir, os.path.expandvars(reffile))
737 spec_ref = reference[:-3] +
GetPlatform(self)[0:3] + reference[-3:]
738 if os.path.isfile(spec_ref):
742 dirname, basename = os.path.split(reference)
745 head = basename +
"."
748 if "do0" in platform:
751 for f
in os.listdir(dirname):
752 if f.startswith(head):
753 req_plat = platformSplit(f[head_len:])
754 if platform.issuperset(req_plat):
755 candidates.append((len(req_plat), f))
760 reference = os.path.join(dirname, candidates[-1][1])
770 from GaudiKernel
import ROOT6WorkAroundEnabled
783 Function used to normalize the used path
785 p = os.path.expandvars(p)
790 p, suffix = p.rsplit(
":", 1)
791 suffix = f
":{suffix}"
793 if os.path.exists(p):
794 p = os.path.realpath(p)
800 Locates an executable in the executables path ($PATH) and returns the full
801 path to it. An application is looked for with or without the '.exe' suffix.
802 If the executable cannot be found, None is returned
804 if os.path.isabs(executable):
805 if not os.path.isfile(executable):
806 if executable.endswith(
".exe"):
807 if os.path.isfile(executable[:-4]):
808 return executable[:-4]
810 executable = os.path.split(executable)[1]
813 for d
in os.environ.get(
"PATH").split(os.pathsep):
814 fullpath = os.path.join(d, executable)
815 if os.path.isfile(fullpath):
817 elif executable.endswith(
".exe")
and os.path.isfile(fullpath[:-4]):
831 UNTESTED =
"UNTESTED"
841 def __init__(self, kind=None, id=None, outcome=PASS, annotations={}):
845 assert isinstance(key, str)
849 assert isinstance(key, str)
850 assert isinstance(value, str),
"{!r} is not a string".
format(value)
855 Convert text to html by escaping special chars and adding <pre> tags.
857 return "<pre>{}</pre>".
format(escape_for_html(text))
876 """Validate the output of the program.
877 'stdout' -- A string containing the data written to the standard output
879 'stderr' -- A string containing the data written to the standard error
881 'result' -- A 'Result' object. It may be used to annotate
882 the outcome according to the content of stderr.
883 returns -- A list of strings giving causes of failure."""
888 causes.append(self.
cause)
894 """Compare 's1' and 's2', ignoring line endings.
897 returns -- True if 's1' and 's2' are the same, ignoring
898 differences in line endings."""
902 to_ignore = re.compile(
903 r"Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*"
907 return not to_ignore.match(l)
909 return list(filter(keep_line, s1.splitlines())) == list(
910 filter(keep_line, s2.splitlines())
913 return s1.splitlines() == s2.splitlines()
918 """Base class for a callable that takes a file and returns a modified
933 if not isinstance(input, str):
937 lines = input.splitlines()
941 output =
"\n".join(output)
971 if line.find(s) >= 0:
986 if self.
start in line:
989 elif self.
end in line:
999 when = re.compile(when)
1003 if isinstance(rhs, RegexpReplacer):
1005 res._operations = self.
_operations + rhs._operations
1007 res = FilePreprocessor.__add__(self, rhs)
1012 if w
is None or w.search(line):
1013 line = o.sub(r, line)
1020 "[0-2]?[0-9]:[0-5][0-9]:[0-5][0-9] [0-9]{4}[-/][01][0-9][-/][0-3][0-9][ A-Z]*",
1021 "00:00:00 1970-01-01",
1024 normalizeEOL.__processLine__ =
lambda line: str(line).rstrip() +
"\n"
1028 skipEmptyLines.__processLine__ =
lambda line: (line.strip()
and line)
or None
1042 line = line[: (pos + self.
siglen)]
1043 lst = line[(pos + self.
siglen) :].split()
1045 line +=
" ".join(lst)
1051 Sort group of lines matching a regular expression
1055 self.
exp = exp
if hasattr(exp,
"match")
else re.compile(exp)
1058 match = self.
exp.match
1067 output.extend(group)
1074 normalizeTestSuite = maskPointers + normalizeDate
1076 (
"TIMER",
r"\s+[+-]?[0-9]+[0-9.e+-]*",
" 0"),
1077 (
"release all pending",
r"^.*/([^/]*:.*)",
r"\1"),
1078 (
"^#.*file",
r"file '.*[/\\]([^/\\]*)$",
r"file '\1"),
1080 "^JobOptionsSvc.*options successfully read in from",
1081 r"read in from .*[/\\]([^/\\]*)$",
1087 r"[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}(?!-0{12})-[0-9A-Fa-f]{12}",
1088 "00000000-0000-0000-0000-000000000000",
1092 "ServiceLocatorHelper::",
1093 "ServiceLocatorHelper::(create|locate)Service",
1094 "ServiceLocatorHelper::service",
1097 (
None,
r"e([-+])0([0-9][0-9])",
r"e\1\2"),
1099 (
None,
r"Service reference count check:",
r"Looping over all active services..."),
1103 r"^(.*(DEBUG|SUCCESS) List of ALL properties of .*#properties = )\d+",
1106 (
"ApplicationMgr",
r"(declareMultiSvcType|addMultiSvc): ",
""),
1107 (
r"Property \['Name': Value\]",
r"( = '[^']+':)'(.*)'",
r"\1\2"),
1108 (
"TimelineSvc",
"to file 'TimelineFile':",
"to file "),
1109 (
"DataObjectHandleBase",
r'DataObjectHandleBase\("([^"]*)"\)',
r"'\1'"),
1112 "Added successfully Conversion service:",
1113 "Added successfully Conversion service:",
1114 "Added successfully Conversion service ",
1122 "JobOptionsSvc INFO # ",
1123 "JobOptionsSvc WARNING # ",
1126 "This machine has a speed",
1128 "ToolSvc.Sequenc... INFO",
1129 "DataListenerSvc INFO XML written to file:",
1132 "DEBUG No writable file catalog found which contains FID:",
1133 "DEBUG Service base class initialized successfully",
1135 "DEBUG Incident timing:",
1139 "INFO 'CnvServices':[",
1141 "DEBUG 'CnvServices':[",
1146 "ServiceLocatorHelper::service: found service JobOptionsSvc",
1148 "mismatching case for property name:",
1150 "Histograms saving not required.",
1152 "Properties are dumped into",
1154 "WARNING no ROOT output file name",
1155 "INFO Writing ROOT histograms to:",
1156 "INFO Completed update of ROOT histograms in:",
1159 "data dependencies:",
1162 r"^JobOptionsSvc INFO *$",
1165 r"(Always|SUCCESS)\s*(Root f|[^ ]* F)ile version:",
1166 r"File '.*.xml' does not exist",
1167 r"INFO Refer to dataset .* by its file ID:",
1168 r"INFO Referring to dataset .* by its file ID:",
1169 r"INFO Disconnect from dataset",
1170 r"INFO Disconnected from dataset",
1171 r"INFO Disconnected data IO:",
1172 r"IncidentSvc\s*(DEBUG (Adding|Removing)|VERBOSE Calling)",
1174 r".*StatusCodeSvc.*",
1175 r".*StatusCodeCheck.*",
1176 r"Num\s*\|\s*Function\s*\|\s*Source Library",
1179 r"ERROR Failed to modify file: .* Errno=2 No such file or directory",
1181 r"^ +[0-9]+ \|.*ROOT",
1182 r"^ +[0-9]+ \|.*\|.*Dict",
1184 r"EventLoopMgr.*---> Loop Finished",
1185 r"HiveSlimEventLo.*---> Loop Finished",
1190 r"SUCCESS\s*Booked \d+ Histogram\(s\)",
1194 r"Property(.*)'Audit(Algorithm|Tool|Service)s':",
1195 r"Property(.*)'Audit(Begin|End)Run':",
1197 r"Property(.*)'AuditRe(start|initialize)':",
1198 r"Property(.*)'Blocking':",
1200 r"Property(.*)'ErrorCount(er)?':",
1202 r"Property(.*)'Sequential':",
1204 r"Property(.*)'FilterCircularDependencies':",
1206 r"Property(.*)'IsClonable':",
1208 r"Property update for OutputLevel : new value =",
1209 r"EventLoopMgr\s*DEBUG Creating OutputStream",
1210 r".*StalledEventMonitoring.*",
1219 r"Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*",
1223 normalizeTestSuite = (
1225 + normalizeTestSuite
1232 normalizeExamples = normalizeTestSuite
1238 def __init__(self, reffile, cause, result_key, preproc=normalizeTestSuite):
1246 if os.path.isfile(self.
reffile):
1247 orig = open(self.
reffile).readlines()
1250 result[self.
result_key +
".preproc.orig"] = result.Quote(
1251 "\n".join(
map(str.strip, orig))
1255 new = stdout.splitlines()
1261 difflib.unified_diff(
1262 [l.rstrip()
for l
in orig],
1263 [l.rstrip()
for l
in new],
1265 fromfile=
"Reference file",
1266 tofile=
"Actual output",
1271 result[self.
result_key] = result.Quote(
"\n".join(filterdiffs))
1272 result[self.
result_key +
".preproc.new"] = result.Quote(
1273 "\n".join(
map(str.strip, new))
1275 causes.append(self.
cause)
1281 Scan stdout to find ROOT TTree summaries and digest them.
1283 stars = re.compile(
r"^\*+$")
1284 outlines = stdout.splitlines()
1285 nlines = len(outlines)
1291 while i < nlines
and not stars.match(outlines[i]):
1296 trees[tree[
"Name"]] = tree
1303 Check that all the keys in reference are in to_check too, with the same value.
1304 If the value is a dict, the function is called recursively. to_check can
1305 contain more keys than reference, that will not be tested.
1306 The function returns at the first difference found.
1311 ignore_re = re.compile(ignore)
1312 keys = [key
for key
in reference
if not ignore_re.match(key)]
1314 keys = reference.keys()
1318 if isinstance(reference[k], dict)
and isinstance(to_check[k], dict):
1321 failed = fail_keys =
cmpTreesDicts(reference[k], to_check[k], ignore)
1324 failed = to_check[k] != reference[k]
1329 fail_keys.insert(0, k)
1340 if c
is None or r
is None:
1342 return (fail_path, r, c)
1346 h_count_re = re.compile(
r"^(.*)SUCCESS\s+Booked (\d+) Histogram\(s\) :\s+([\s\w=-]*)")
1351 Parse the TTree summary table in lines, starting from pos.
1352 Returns a tuple with the dictionary with the digested informations and the
1353 position of the first line after the summary.
1360 return [f.strip()
for f
in l.strip(
"*\n").split(
":", 2)]
1365 cols = splitcols(ll[0])
1369 r[
"Name"], r[
"Title"] = cols[1:]
1374 r[
"Title"] = ll[1].strip(
"*\n").split(
"|")[1].strip()
1378 cols = splitcols(ll[1 + delta_i])
1379 r[
"Entries"] = int(cols[1])
1381 sizes = cols[2].split()
1382 r[
"Total size"] = int(sizes[2])
1383 if sizes[-1] ==
"memory":
1386 r[
"File size"] = int(sizes[-1])
1388 cols = splitcols(ll[2 + delta_i])
1389 sizes = cols[2].split()
1390 if cols[0] ==
"Baskets":
1391 r[
"Baskets"] = int(cols[1])
1392 r[
"Basket size"] = int(sizes[2])
1393 r[
"Compression"] = float(sizes[-1])
1397 def nextblock(lines, i):
1399 dots = re.compile(
r"^\.+$")
1400 stars = re.compile(
r"^\*+$")
1404 and not dots.match(lines[i + delta_i][1:-1])
1405 and not stars.match(lines[i + delta_i])
1410 if i < (count - 3)
and lines[i].startswith(
"*Tree"):
1411 i_nextblock = nextblock(lines, i)
1412 result = parseblock(lines[i:i_nextblock])
1413 result[
"Branches"] = {}
1415 while i < (count - 3)
and lines[i].startswith(
"*Br"):
1416 if i < (count - 2)
and lines[i].startswith(
"*Branch "):
1420 i_nextblock = nextblock(lines, i)
1421 if i_nextblock >= count:
1423 branch = parseblock(lines[i:i_nextblock])
1424 result[
"Branches"][branch[
"Name"]] = branch
1432 Extract the histograms infos from the lines starting at pos.
1433 Returns the position of the first line after the summary block.
1436 h_table_head = re.compile(
1437 r'(?:INFO|SUCCESS)\s+(1D|2D|3D|1D profile|2D profile|3d profile) histograms in directory\s+"(\w*)"'
1439 h_short_summ = re.compile(
r"ID=([^\"]+)\s+\"([^\"]*)\"\s+(.*)")
1444 m = h_count_re.search(lines[pos])
1445 name = m.group(1).strip()
1446 total = int(m.group(2))
1448 for k, v
in [x.split(
"=")
for x
in m.group(3).split()]:
1451 header[
"Total"] = total
1455 m = h_table_head.search(lines[pos])
1458 t = t.replace(
" profile",
"Prof")
1465 if l.startswith(
" | ID"):
1467 titles = [x.strip()
for x
in l.split(
"|")][1:]
1469 while pos < nlines
and lines[pos].startswith(
" |"):
1471 values = [x.strip()
for x
in l.split(
"|")][1:]
1473 for i
in range(len(titles)):
1474 hcont[titles[i]] = values[i]
1475 cont[hcont[
"ID"]] = hcont
1477 elif l.startswith(
" ID="):
1478 while pos < nlines
and lines[pos].startswith(
" ID="):
1480 x.strip()
for x
in h_short_summ.search(lines[pos]).groups()
1482 cont[values[0]] = values
1485 raise RuntimeError(
"Cannot understand line %d: '%s'" % (pos, l))
1489 summ[d][
"header"] = header
1494 summ[name] = {
"header": header}
1500 Scan stdout to find ROOT TTree summaries and digest them.
1502 outlines = stdout.splitlines()
1503 nlines = len(outlines) - 1
1511 match = h_count_re.search(outlines[pos])
1512 while pos < nlines
and not match:
1514 match = h_count_re.search(outlines[pos])
1517 summaries.update(summ)
1523 Return the platform Id defined in CMTCONFIG or SCRAM_ARCH.
1527 if "BINARY_TAG" in os.environ:
1528 arch = os.environ[
"BINARY_TAG"]
1529 elif "CMTCONFIG" in os.environ:
1530 arch = os.environ[
"CMTCONFIG"]
1531 elif "SCRAM_ARCH" in os.environ:
1532 arch = os.environ[
"SCRAM_ARCH"]
1533 elif os.environ.get(
"ENV_CMAKE_BUILD_TYPE",
"")
in (
1539 elif os.environ.get(
"ENV_CMAKE_BUILD_TYPE",
"")
in (
1551 Return True if the current platform is Windows.
1553 This function was needed because of the change in the CMTCONFIG format,
1554 from win32_vc71_dbg to i686-winxp-vc9-dbg.
1557 return "winxp" in platform
or platform.startswith(
"win")
1562 """Validate JSON output.
1563 returns -- A list of strings giving causes of failure."""
1567 with open(ref)
as f:
1568 expected = json.load(f)
1569 except json.JSONDecodeError
as err:
1570 causes.append(
"json parser error")
1571 result[
"reference_parse_error"] = f
"json parser error in {ref}: {err}"
1576 causes.append(
"json content")
1577 result[
"json_diff"] =
"detailed diff was turned off"
1585 expected = sorted(expected, key=
lambda item: (item[
"component"], item[
"name"]))
1586 out = sorted(out, key=
lambda item: (item[
"component"], item[
"name"]))
1588 t.assertEqual(expected, out)
1589 except AssertionError
as err:
1590 causes.append(
"json content")
1591 result[
"json_diff"] = str(err).splitlines()[0]