22 from html
import escape
as escape_for_html
23 from subprocess
import PIPE, STDOUT, Popen
24 from unittest
import TestCase
26 if sys.version_info < (3, 5):
29 from codecs
import backslashreplace_errors, register_error
32 if isinstance(exc, UnicodeDecodeError):
33 code =
hex(ord(exc.object[exc.start]))
34 return (
"\\" + code[1:], exc.start + 1)
36 return backslashreplace_errors(exc)
38 register_error(
"backslashreplace", _new_backslashreplace_errors)
40 del backslashreplace_errors
41 del _new_backslashreplace_errors
48 Take a string with invalid ASCII/UTF characters and quote them so that the
49 string can be used in an XML text.
51 >>> sanitize_for_xml('this is \x1b')
52 'this is [NON-XML-CHAR-0x1B]'
54 bad_chars = re.compile(
"[\x00-\x08\x0b\x0c\x0e-\x1F\uD800-\uDFFF\uFFFE\uFFFF]")
58 return "".join(
"[NON-XML-CHAR-0x%2X]" % ord(c)
for c
in match.group())
60 return bad_chars.sub(quote, data)
64 """helper to debug GAUDI-1084, dump the list of processes"""
65 from getpass
import getuser
67 if "WORKSPACE" in os.environ:
68 p = Popen([
"ps",
"-fH",
"-U", getuser()], stdout=PIPE)
69 with open(os.path.join(os.environ[
"WORKSPACE"], name),
"wb")
as f:
70 f.write(p.communicate()[0])
75 Send a signal to a process and all its child processes (starting from the
78 log = logging.getLogger(
"kill_tree")
79 ps_cmd = [
"ps",
"--no-headers",
"-o",
"pid",
"--ppid", str(ppid)]
82 get_children = Popen(ps_cmd, stdout=PIPE, stderr=PIPE, env={})
83 children =
map(int, get_children.communicate()[0].split())
84 for child
in children:
87 log.debug(
"killing process %d", ppid)
89 except OSError
as err:
92 log.debug(
"no such process %d", ppid)
129 logging.debug(
"running test %s", self.
name)
140 "TIMEOUT_DETAIL":
None,
146 r"from\s+Gaudi.Configuration\s+import\s+\*|"
147 r"from\s+Configurables\s+import",
150 suffix, lang =
".py",
"python"
152 suffix, lang =
".opts",
"c++"
153 self.
result[
"Options"] =
'<code lang="{}"><pre>{}</pre></code>'.
format(
154 lang, escape_for_html(self.
options)
156 optionFile = tempfile.NamedTemporaryFile(suffix=suffix)
157 optionFile.file.write(self.
options.encode(
"utf-8"))
164 or platform.platform()
171 if re.search(prex, platform_id)
182 workdir = tempfile.mkdtemp()
193 prog_ext = os.path.splitext(prog)[1]
194 if prog_ext
not in [
".exe",
".py",
".bat"]:
198 prog =
which(prog)
or prog
200 args = list(
map(RationalizePath, self.
args))
202 if prog_ext ==
".py":
213 logging.debug(
"executing %r in %s", params, workdir)
215 params, stdout=PIPE, stderr=PIPE, env=self.
environment
217 logging.debug(
"(pid: %d)", self.
proc.pid)
218 out, err = self.
proc.communicate()
219 self.
out = out.decode(
"utf-8", errors=
"backslashreplace")
220 self.
err = err.decode(
"utf-8", errors=
"backslashreplace")
222 thread = threading.Thread(target=target)
227 if thread.is_alive():
228 logging.debug(
"time out in test %s (pid %d)", self.
name, self.
proc.pid)
235 "--eval-command=thread apply all backtrace",
237 gdb = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
239 "utf-8", errors=
"backslashreplace"
244 if thread.is_alive():
246 self.
causes.append(
"timeout")
251 f
"completed test {self.name} with returncode = {self.returnedCode}"
253 logging.debug(
"validating test...")
254 val_start_time = time.perf_counter()
258 self.
validate_time = round(time.perf_counter() - val_start_time, 2)
260 logging.debug(f
"skipped test {self.name}")
265 shutil.rmtree(workdir,
True)
269 if self.
status !=
"skipped":
271 if self.
signal is not None:
273 self.
causes.append(
"exit code")
277 self.
causes.append(
"exit code")
280 self.
causes.append(
"exit code")
290 logging.debug(
"%s: %s", self.
name, self.
status)
292 "Exit Code":
"returnedCode",
295 "Runtime Environment":
"environment",
298 "Program Name":
"program",
300 "Validator":
"validator",
301 "Validation execution time":
"validate_time",
302 "Output Reference File":
"reference",
303 "Error Reference File":
"error_reference",
306 "Unsupported Platforms":
"unsupported_platforms",
307 "Stack Trace":
"stack_trace",
310 (key, getattr(self, attr))
311 for key, attr
in field_mapping.items()
312 if getattr(self, attr)
321 resultDict.extend(self.
result.annotations.items())
323 resultDict = dict(resultDict)
326 if "Validator" in resultDict:
327 resultDict[
"Validator"] =
'<code lang="{}"><pre>{}</pre></code>'.
format(
328 "python", escape_for_html(resultDict[
"Validator"])
339 elif stderr.strip() != self.
stderr.strip():
340 self.
causes.append(
"standard error")
341 return result, self.
causes
354 Given a block of text, tries to find it in the output. The block had to be identified by a signature line. By default, the first line is used as signature, or the line pointed to by signature_offset. If signature_offset points outside the block, a signature line can be passed as signature argument. Note: if 'signature' is None (the default), a negative signature_offset is interpreted as index in a list (e.g. -1 means the last line), otherwise the it is interpreted as the number of lines before the first one of the block the signature must appear. The parameter 'id' allow to distinguish between different calls to this function in the same validation code.
357 if reference
is None:
366 reflines = list(filter(
None,
map(
lambda s: s.rstrip(), reference.splitlines())))
368 raise RuntimeError(
"Empty (or null) reference")
370 outlines = list(filter(
None,
map(
lambda s: s.rstrip(), stdout.splitlines())))
372 res_field =
"GaudiTest.RefBlock"
374 res_field +=
"_%s" % id
376 if signature
is None:
377 if signature_offset < 0:
378 signature_offset = len(reference) + signature_offset
379 signature = reflines[signature_offset]
382 pos = outlines.index(signature)
384 pos - signature_offset : pos + len(reflines) - signature_offset
386 if reflines != outlines:
387 msg =
"standard output"
390 if msg
not in causes:
392 result[res_field +
".observed"] = result.Quote(
"\n".join(outlines))
394 causes.append(
"missing signature")
395 result[res_field +
".signature"] = result.Quote(signature)
396 if len(reflines) > 1
or signature != reflines[0]:
397 result[res_field +
".expected"] = result.Quote(
"\n".join(reflines))
401 self, expected={"ERROR": 0,
"FATAL": 0}, stdout=
None, result=
None, causes=
None
404 Count the number of messages with required severity (by default ERROR and FATAL)
405 and check if their numbers match the expected ones (0 by default).
406 The dictionary "expected" can be used to tune the number of errors and fatals
407 allowed, or to limit the number of expected warnings etc.
422 outlines = stdout.splitlines()
423 from math
import log10
425 fmt =
"%%%dd - %%s" % (int(log10(len(outlines) + 1)))
431 if len(words) >= 2
and words[1]
in errors:
432 errors[words[1]].append(fmt % (linecount, l.rstrip()))
435 if len(errors[e]) != expected[e]:
436 causes.append(
"%s(%d)" % (e, len(errors[e])))
437 result[
"GaudiTest.lines.%s" % e] = result.Quote(
"\n".join(errors[e]))
438 result[
"GaudiTest.lines.%s.expected#" % e] = result.Quote(
450 ignore=r"Basket|.*size|Compression",
453 Compare the TTree summaries in stdout with the ones in trees_dict or in
454 the reference file. By default ignore the size, compression and basket
456 The presence of TTree summaries when none is expected is not a failure.
464 if trees_dict
is None:
467 if lreference
and os.path.isfile(lreference):
472 from pprint
import PrettyPrinter
476 result[
"GaudiTest.TTrees.expected"] = result.Quote(pp.pformat(trees_dict))
478 result[
"GaudiTest.TTrees.ignore"] = result.Quote(ignore)
483 causes.append(
"trees summaries")
485 result[
"GaudiTest.TTrees.failure_on"] = result.Quote(msg)
486 result[
"GaudiTest.TTrees.found"] = result.Quote(pp.pformat(trees))
491 self, stdout=None, result=None, causes=None, dict=None, ignore=None
494 Compare the TTree summaries in stdout with the ones in trees_dict or in
495 the reference file. By default ignore the size, compression and basket
497 The presence of TTree summaries when none is expected is not a failure.
509 if lreference
and os.path.isfile(lreference):
514 from pprint
import PrettyPrinter
518 result[
"GaudiTest.Histos.expected"] = result.Quote(pp.pformat(dict))
520 result[
"GaudiTest.Histos.ignore"] = result.Quote(ignore)
525 causes.append(
"histos summaries")
527 result[
"GaudiTest.Histos.failure_on"] = result.Quote(msg)
528 result[
"GaudiTest.Histos.found"] = result.Quote(pp.pformat(histos))
533 self, stdout=None, stderr=None, result=None, causes=None, preproc=None
536 Default validation acti*on: compare standard output and error to the
551 preproc = normalizeTestSuite
555 if lreference
and os.path.isfile(lreference):
557 lreference,
"standard output",
"Output Diff", preproc=preproc
560 causes += [
"missing reference file"]
564 if causes
and lreference:
567 newrefname =
".".join([lreference,
"new"])
568 while os.path.exists(newrefname):
570 newrefname =
".".join([lreference,
"~%d~" % cnt,
"new"])
571 newref = open(newrefname,
"w")
573 for l
in stdout.splitlines():
574 newref.write(l.rstrip() +
"\n")
576 result[
"New Output Reference File"] = os.path.relpath(
588 if os.path.isfile(lreference):
590 lreference,
"standard error",
"Error Diff", preproc=preproc
593 newcauses = [
"missing error reference file"]
595 if newcauses
and lreference:
597 newrefname =
".".join([lreference,
"new"])
598 while os.path.exists(newrefname):
600 newrefname =
".".join([lreference,
"~%d~" % cnt,
"new"])
601 newref = open(newrefname,
"w")
603 for l
in stderr.splitlines():
604 newref.write(l.rstrip() +
"\n")
606 result[
"New Error Reference File"] = os.path.relpath(
611 lreference,
"standard error",
"ExecTest.expected_stderr"
624 JSON validation action: compare json file to reference file
632 if not os.path.isfile(output_file):
633 causes.append(f
"output file {output_file} does not exist")
637 with open(output_file)
as f:
638 output = json.load(f)
639 except json.JSONDecodeError
as err:
640 causes.append(
"json parser error")
641 result[
"output_parse_error"] = f
"json parser error in {output_file}: {err}"
646 causes.append(
"reference file not set")
647 elif not os.path.isfile(lreference):
648 causes.append(
"reference file does not exist")
651 if causes
and lreference:
654 newrefname =
".".join([lreference,
"new"])
655 while os.path.exists(newrefname):
657 newrefname =
".".join([lreference,
"~%d~" % cnt,
"new"])
658 with open(newrefname,
"w")
as newref:
659 json.dump(output, newref, indent=4)
660 result[
"New JSON Output Reference File"] = os.path.relpath(
677 def platformSplit(p):
678 return set(re.split(
r"[-+]", p))
680 reference = os.path.normpath(
681 os.path.join(self.
basedir, os.path.expandvars(reffile))
685 spec_ref = reference[:-3] +
GetPlatform(self)[0:3] + reference[-3:]
686 if os.path.isfile(spec_ref):
690 dirname, basename = os.path.split(reference)
693 head = basename +
"."
696 if "do0" in platform:
699 for f
in os.listdir(dirname):
700 if f.startswith(head):
701 req_plat = platformSplit(f[head_len:])
702 if platform.issuperset(req_plat):
703 candidates.append((len(req_plat), f))
708 reference = os.path.join(dirname, candidates[-1][1])
718 from GaudiKernel
import ROOT6WorkAroundEnabled
731 Function used to normalize the used path
733 newPath = os.path.normpath(os.path.expandvars(p))
734 if os.path.exists(newPath):
735 p = os.path.realpath(newPath)
741 Locates an executable in the executables path ($PATH) and returns the full
742 path to it. An application is looked for with or without the '.exe' suffix.
743 If the executable cannot be found, None is returned
745 if os.path.isabs(executable):
746 if not os.path.isfile(executable):
747 if executable.endswith(
".exe"):
748 if os.path.isfile(executable[:-4]):
749 return executable[:-4]
751 executable = os.path.split(executable)[1]
754 for d
in os.environ.get(
"PATH").split(os.pathsep):
755 fullpath = os.path.join(d, executable)
756 if os.path.isfile(fullpath):
758 elif executable.endswith(
".exe")
and os.path.isfile(fullpath[:-4]):
772 UNTESTED =
"UNTESTED"
782 def __init__(self, kind=None, id=None, outcome=PASS, annotations={}):
786 assert isinstance(key, str)
790 assert isinstance(key, str)
791 assert isinstance(value, str),
"{!r} is not a string".
format(value)
796 Convert text to html by escaping special chars and adding <pre> tags.
798 return "<pre>{}</pre>".
format(escape_for_html(text))
817 """Validate the output of the program.
818 'stdout' -- A string containing the data written to the standard output
820 'stderr' -- A string containing the data written to the standard error
822 'result' -- A 'Result' object. It may be used to annotate
823 the outcome according to the content of stderr.
824 returns -- A list of strings giving causes of failure."""
829 causes.append(self.
cause)
835 """Compare 's1' and 's2', ignoring line endings.
838 returns -- True if 's1' and 's2' are the same, ignoring
839 differences in line endings."""
843 to_ignore = re.compile(
844 r"Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*"
848 return not to_ignore.match(l)
850 return list(filter(keep_line, s1.splitlines())) == list(
851 filter(keep_line, s2.splitlines())
854 return s1.splitlines() == s2.splitlines()
859 """Base class for a callable that takes a file and returns a modified
874 if not isinstance(input, str):
878 lines = input.splitlines()
882 output =
"\n".join(output)
912 if line.find(s) >= 0:
927 if self.
start in line:
930 elif self.
end in line:
940 when = re.compile(when)
944 if isinstance(rhs, RegexpReplacer):
946 res._operations = self.
_operations + rhs._operations
948 res = FilePreprocessor.__add__(self, rhs)
953 if w
is None or w.search(line):
954 line = o.sub(r, line)
961 "[0-2]?[0-9]:[0-5][0-9]:[0-5][0-9] [0-9]{4}[-/][01][0-9][-/][0-3][0-9][ A-Z]*",
962 "00:00:00 1970-01-01",
965 normalizeEOL.__processLine__ =
lambda line: str(line).rstrip() +
"\n"
969 skipEmptyLines.__processLine__ =
lambda line: (line.strip()
and line)
or None
983 line = line[: (pos + self.
siglen)]
984 lst = line[(pos + self.
siglen) :].split()
986 line +=
" ".join(lst)
992 Sort group of lines matching a regular expression
996 self.
exp = exp
if hasattr(exp,
"match")
else re.compile(exp)
999 match = self.
exp.match
1008 output.extend(group)
1015 normalizeTestSuite = maskPointers + normalizeDate
1017 (
"TIMER",
r"\s+[+-]?[0-9]+[0-9.e+-]*",
" 0"),
1018 (
"release all pending",
r"^.*/([^/]*:.*)",
r"\1"),
1019 (
"^#.*file",
r"file '.*[/\\]([^/\\]*)$",
r"file '\1"),
1021 "^JobOptionsSvc.*options successfully read in from",
1022 r"read in from .*[/\\]([^/\\]*)$",
1028 r"[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}(?!-0{12})-[0-9A-Fa-f]{12}",
1029 "00000000-0000-0000-0000-000000000000",
1033 "ServiceLocatorHelper::",
1034 "ServiceLocatorHelper::(create|locate)Service",
1035 "ServiceLocatorHelper::service",
1038 (
None,
r"e([-+])0([0-9][0-9])",
r"e\1\2"),
1040 (
None,
r"Service reference count check:",
r"Looping over all active services..."),
1044 r"^(.*(DEBUG|SUCCESS) List of ALL properties of .*#properties = )\d+",
1047 (
"ApplicationMgr",
r"(declareMultiSvcType|addMultiSvc): ",
""),
1048 (
r"Property 'Name': Value",
r"( = '[^']+':)'(.*)'",
r"\1\2"),
1049 (
"TimelineSvc",
"to file 'TimelineFile':",
"to file "),
1050 (
"DataObjectHandleBase",
r'DataObjectHandleBase\("([^"]*)"\)',
r"'\1'"),
1057 "JobOptionsSvc INFO # ",
1058 "JobOptionsSvc WARNING # ",
1061 "This machine has a speed",
1063 "ToolSvc.Sequenc... INFO",
1064 "DataListenerSvc INFO XML written to file:",
1067 "DEBUG No writable file catalog found which contains FID:",
1068 "DEBUG Service base class initialized successfully",
1070 "DEBUG Incident timing:",
1074 "INFO 'CnvServices':[",
1076 "DEBUG 'CnvServices':[",
1081 "ServiceLocatorHelper::service: found service JobOptionsSvc",
1083 "mismatching case for property name:",
1085 "Histograms saving not required.",
1087 "Properties are dumped into",
1089 "WARNING no ROOT output file name",
1090 "INFO Writing ROOT histograms to:",
1091 "INFO Completed update of ROOT histograms in:",
1094 "data dependencies:",
1097 r"^JobOptionsSvc INFO *$",
1100 r"(Always|SUCCESS)\s*(Root f|[^ ]* F)ile version:",
1101 r"File '.*.xml' does not exist",
1102 r"INFO Refer to dataset .* by its file ID:",
1103 r"INFO Referring to dataset .* by its file ID:",
1104 r"INFO Disconnect from dataset",
1105 r"INFO Disconnected from dataset",
1106 r"INFO Disconnected data IO:",
1107 r"IncidentSvc\s*(DEBUG (Adding|Removing)|VERBOSE Calling)",
1109 r".*StatusCodeSvc.*",
1110 r".*StatusCodeCheck.*",
1111 r"Num\s*\|\s*Function\s*\|\s*Source Library",
1114 r"ERROR Failed to modify file: .* Errno=2 No such file or directory",
1116 r"^ +[0-9]+ \|.*ROOT",
1117 r"^ +[0-9]+ \|.*\|.*Dict",
1119 r"EventLoopMgr.*---> Loop Finished",
1120 r"HiveSlimEventLo.*---> Loop Finished",
1125 r"SUCCESS\s*Booked \d+ Histogram\(s\)",
1129 r"Property(.*)'Audit(Algorithm|Tool|Service)s':",
1130 r"Property(.*)'Audit(Begin|End)Run':",
1132 r"Property(.*)'AuditRe(start|initialize)':",
1133 r"Property(.*)'Blocking':",
1135 r"Property(.*)'ErrorCount(er)?':",
1137 r"Property(.*)'Sequential':",
1139 r"Property(.*)'FilterCircularDependencies':",
1141 r"Property(.*)'IsClonable':",
1143 r"Property update for OutputLevel : new value =",
1144 r"EventLoopMgr\s*DEBUG Creating OutputStream",
1153 r"Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*",
1157 normalizeTestSuite = (
1159 + normalizeTestSuite
1166 normalizeExamples = normalizeTestSuite
1172 def __init__(self, reffile, cause, result_key, preproc=normalizeTestSuite):
1180 if os.path.isfile(self.
reffile):
1181 orig = open(self.
reffile).readlines()
1184 result[self.
result_key +
".preproc.orig"] = result.Quote(
1185 "\n".join(
map(str.strip, orig))
1189 new = stdout.splitlines()
1195 difflib.unified_diff(
1196 [l.rstrip()
for l
in orig],
1197 [l.rstrip()
for l
in new],
1199 fromfile=
"Reference file",
1200 tofile=
"Actual output",
1205 result[self.
result_key] = result.Quote(
"\n".join(filterdiffs))
1206 result[self.
result_key +
".preproc.new"] = result.Quote(
1207 "\n".join(
map(str.strip, new))
1209 causes.append(self.
cause)
1215 Scan stdout to find ROOT TTree summaries and digest them.
1217 stars = re.compile(
r"^\*+$")
1218 outlines = stdout.splitlines()
1219 nlines = len(outlines)
1225 while i < nlines
and not stars.match(outlines[i]):
1230 trees[tree[
"Name"]] = tree
1237 Check that all the keys in reference are in to_check too, with the same value.
1238 If the value is a dict, the function is called recursively. to_check can
1239 contain more keys than reference, that will not be tested.
1240 The function returns at the first difference found.
1245 ignore_re = re.compile(ignore)
1246 keys = [key
for key
in reference
if not ignore_re.match(key)]
1248 keys = reference.keys()
1252 if (
type(reference[k])
is dict)
and (
type(to_check[k])
is dict):
1255 failed = fail_keys =
cmpTreesDicts(reference[k], to_check[k], ignore)
1258 failed = to_check[k] != reference[k]
1263 fail_keys.insert(0, k)
1274 if c
is None or r
is None:
1276 return (fail_path, r, c)
1280 h_count_re = re.compile(
r"^(.*)SUCCESS\s+Booked (\d+) Histogram\(s\) :\s+([\s\w=-]*)")
1285 Parse the TTree summary table in lines, starting from pos.
1286 Returns a tuple with the dictionary with the digested informations and the
1287 position of the first line after the summary.
1294 return [f.strip()
for f
in l.strip(
"*\n").split(
":", 2)]
1299 cols = splitcols(ll[0])
1303 r[
"Name"], r[
"Title"] = cols[1:]
1308 r[
"Title"] = ll[1].strip(
"*\n").split(
"|")[1].strip()
1312 cols = splitcols(ll[1 + delta_i])
1313 r[
"Entries"] = int(cols[1])
1315 sizes = cols[2].split()
1316 r[
"Total size"] = int(sizes[2])
1317 if sizes[-1] ==
"memory":
1320 r[
"File size"] = int(sizes[-1])
1322 cols = splitcols(ll[2 + delta_i])
1323 sizes = cols[2].split()
1324 if cols[0] ==
"Baskets":
1325 r[
"Baskets"] = int(cols[1])
1326 r[
"Basket size"] = int(sizes[2])
1327 r[
"Compression"] = float(sizes[-1])
1331 def nextblock(lines, i):
1333 dots = re.compile(
r"^\.+$")
1334 stars = re.compile(
r"^\*+$")
1338 and not dots.match(lines[i + delta_i][1:-1])
1339 and not stars.match(lines[i + delta_i])
1344 if i < (count - 3)
and lines[i].startswith(
"*Tree"):
1345 i_nextblock = nextblock(lines, i)
1346 result = parseblock(lines[i:i_nextblock])
1347 result[
"Branches"] = {}
1349 while i < (count - 3)
and lines[i].startswith(
"*Br"):
1350 if i < (count - 2)
and lines[i].startswith(
"*Branch "):
1354 i_nextblock = nextblock(lines, i)
1355 if i_nextblock >= count:
1357 branch = parseblock(lines[i:i_nextblock])
1358 result[
"Branches"][branch[
"Name"]] = branch
1366 Extract the histograms infos from the lines starting at pos.
1367 Returns the position of the first line after the summary block.
1370 h_table_head = re.compile(
1371 r'SUCCESS\s+(1D|2D|3D|1D profile|2D profile) histograms in directory\s+"(\w*)"'
1373 h_short_summ = re.compile(
r"ID=([^\"]+)\s+\"([^\"]+)\"\s+(.*)")
1378 m = h_count_re.search(lines[pos])
1379 name = m.group(1).strip()
1380 total = int(m.group(2))
1382 for k, v
in [x.split(
"=")
for x
in m.group(3).split()]:
1385 header[
"Total"] = total
1389 m = h_table_head.search(lines[pos])
1392 t = t.replace(
" profile",
"Prof")
1399 if l.startswith(
" | ID"):
1401 titles = [x.strip()
for x
in l.split(
"|")][1:]
1403 while pos < nlines
and lines[pos].startswith(
" |"):
1405 values = [x.strip()
for x
in l.split(
"|")][1:]
1407 for i
in range(len(titles)):
1408 hcont[titles[i]] = values[i]
1409 cont[hcont[
"ID"]] = hcont
1411 elif l.startswith(
" ID="):
1412 while pos < nlines
and lines[pos].startswith(
" ID="):
1414 x.strip()
for x
in h_short_summ.search(lines[pos]).groups()
1416 cont[values[0]] = values
1419 raise RuntimeError(
"Cannot understand line %d: '%s'" % (pos, l))
1423 summ[d][
"header"] = header
1428 summ[name] = {
"header": header}
1434 Scan stdout to find ROOT TTree summaries and digest them.
1436 outlines = stdout.splitlines()
1437 nlines = len(outlines) - 1
1445 match = h_count_re.search(outlines[pos])
1446 while pos < nlines
and not match:
1448 match = h_count_re.search(outlines[pos])
1451 summaries.update(summ)
1457 Return the platform Id defined in CMTCONFIG or SCRAM_ARCH.
1461 if "BINARY_TAG" in os.environ:
1462 arch = os.environ[
"BINARY_TAG"]
1463 elif "CMTCONFIG" in os.environ:
1464 arch = os.environ[
"CMTCONFIG"]
1465 elif "SCRAM_ARCH" in os.environ:
1466 arch = os.environ[
"SCRAM_ARCH"]
1467 elif os.environ.get(
"ENV_CMAKE_BUILD_TYPE",
"")
in (
1473 elif os.environ.get(
"ENV_CMAKE_BUILD_TYPE",
"")
in (
1485 Return True if the current platform is Windows.
1487 This function was needed because of the change in the CMTCONFIG format,
1488 from win32_vc71_dbg to i686-winxp-vc9-dbg.
1491 return "winxp" in platform
or platform.startswith(
"win")
1496 """Validate JSON output.
1497 returns -- A list of strings giving causes of failure."""
1501 with open(ref)
as f:
1502 expected = json.load(f)
1503 except json.JSONDecodeError
as err:
1504 causes.append(
"json parser error")
1505 result[
"reference_parse_error"] = f
"json parser error in {ref}: {err}"
1510 causes.append(
"json content")
1511 result[
"json_diff"] =
"detailed diff was turned off"
1519 expected = sorted(expected, key=
lambda item: (item[
"component"], item[
"name"]))
1520 out = sorted(out, key=
lambda item: (item[
"component"], item[
"name"]))
1522 t.assertEqual(expected, out)
1523 except AssertionError
as err:
1524 causes.append(
"json content")
1525 result[
"json_diff"] = str(err).splitlines()[0]