22 from subprocess
import PIPE, STDOUT, Popen
23 from unittest
import TestCase
26 from html
import escape
as escape_for_html
28 from cgi
import escape
as escape_for_html
32 if sys.version_info < (3, 5):
35 from codecs
import backslashreplace_errors, register_error
38 if isinstance(exc, UnicodeDecodeError):
39 code =
hex(ord(exc.object[exc.start]))
40 return (
"\\" + code[1:], exc.start + 1)
42 return backslashreplace_errors(exc)
44 register_error(
"backslashreplace", _new_backslashreplace_errors)
46 del backslashreplace_errors
47 del _new_backslashreplace_errors
54 Take a string with invalid ASCII/UTF characters and quote them so that the
55 string can be used in an XML text.
57 >>> sanitize_for_xml('this is \x1b')
58 'this is [NON-XML-CHAR-0x1B]'
60 bad_chars = re.compile(
"[\x00-\x08\x0b\x0c\x0e-\x1F\uD800-\uDFFF\uFFFE\uFFFF]")
64 return "".join(
"[NON-XML-CHAR-0x%2X]" % ord(c)
for c
in match.group())
66 return bad_chars.sub(quote, data)
70 """helper to debug GAUDI-1084, dump the list of processes"""
71 from getpass
import getuser
73 if "WORKSPACE" in os.environ:
74 p = Popen([
"ps",
"-fH",
"-U", getuser()], stdout=PIPE)
75 with open(os.path.join(os.environ[
"WORKSPACE"], name),
"wb")
as f:
76 f.write(p.communicate()[0])
81 Send a signal to a process and all its child processes (starting from the
84 log = logging.getLogger(
"kill_tree")
85 ps_cmd = [
"ps",
"--no-headers",
"-o",
"pid",
"--ppid", str(ppid)]
88 get_children = Popen(ps_cmd, stdout=PIPE, stderr=PIPE, env={})
89 children =
map(int, get_children.communicate()[0].split())
90 for child
in children:
93 log.debug(
"killing process %d", ppid)
95 except OSError
as err:
98 log.debug(
"no such process %d", ppid)
105 _common_tmpdir =
None
135 logging.debug(
"running test %s", self.
name)
146 "TIMEOUT_DETAIL":
None,
152 r"from\s+Gaudi.Configuration\s+import\s+\*|"
153 r"from\s+Configurables\s+import",
156 suffix, lang =
".py",
"python"
158 suffix, lang =
".opts",
"c++"
159 self.
result[
"Options"] =
'<code lang="{}"><pre>{}</pre></code>'.
format(
160 lang, escape_for_html(self.
options)
162 optionFile = tempfile.NamedTemporaryFile(suffix=suffix)
163 optionFile.file.write(self.
options.encode(
"utf-8"))
170 or platform.platform()
177 if re.search(prex, platform_id)
188 workdir = tempfile.mkdtemp()
199 prog_ext = os.path.splitext(prog)[1]
200 if prog_ext
not in [
".exe",
".py",
".bat"]:
204 prog =
which(prog)
or prog
206 args = list(
map(RationalizePath, self.
args))
208 if prog_ext ==
".py":
219 logging.debug(
"executing %r in %s", params, workdir)
221 params, stdout=PIPE, stderr=PIPE, env=self.
environment
223 logging.debug(
"(pid: %d)", self.
proc.pid)
224 out, err = self.
proc.communicate()
225 self.
out = out.decode(
"utf-8", errors=
"backslashreplace")
226 self.
err = err.decode(
"utf-8", errors=
"backslashreplace")
228 thread = threading.Thread(target=target)
233 if thread.is_alive():
234 logging.debug(
"time out in test %s (pid %d)", self.
name, self.
proc.pid)
241 "--eval-command=thread apply all backtrace",
243 gdb = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
245 "utf-8", errors=
"backslashreplace"
250 if thread.is_alive():
252 self.
causes.append(
"timeout")
257 f
"completed test {self.name} with returncode = {self.returnedCode}"
259 logging.debug(
"validating test...")
260 val_start_time = time.perf_counter()
264 self.
validate_time = round(time.perf_counter() - val_start_time, 2)
266 logging.debug(f
"skipped test {self.name}")
271 shutil.rmtree(workdir,
True)
275 if self.
status !=
"skipped":
277 if self.
signal is not None:
279 self.
causes.append(
"exit code")
283 self.
causes.append(
"exit code")
286 self.
causes.append(
"exit code")
296 logging.debug(
"%s: %s", self.
name, self.
status)
298 "Exit Code":
"returnedCode",
301 "Runtime Environment":
"environment",
304 "Program Name":
"program",
306 "Validator":
"validator",
307 "Validation execution time":
"validate_time",
308 "Output Reference File":
"reference",
309 "Error Reference File":
"error_reference",
312 "Unsupported Platforms":
"unsupported_platforms",
313 "Stack Trace":
"stack_trace",
316 (key, getattr(self, attr))
317 for key, attr
in field_mapping.items()
318 if getattr(self, attr)
327 resultDict.extend(self.
result.annotations.items())
329 resultDict = dict(resultDict)
332 if "Validator" in resultDict:
333 resultDict[
"Validator"] =
'<code lang="{}"><pre>{}</pre></code>'.
format(
334 "python", escape_for_html(resultDict[
"Validator"])
345 elif stderr.strip() != self.
stderr.strip():
346 self.
causes.append(
"standard error")
347 return result, self.
causes
360 Given a block of text, tries to find it in the output. The block had to be identified by a signature line. By default, the first line is used as signature, or the line pointed to by signature_offset. If signature_offset points outside the block, a signature line can be passed as signature argument. Note: if 'signature' is None (the default), a negative signature_offset is interpreted as index in a list (e.g. -1 means the last line), otherwise the it is interpreted as the number of lines before the first one of the block the signature must appear. The parameter 'id' allow to distinguish between different calls to this function in the same validation code.
363 if reference
is None:
372 reflines = list(filter(
None,
map(
lambda s: s.rstrip(), reference.splitlines())))
374 raise RuntimeError(
"Empty (or null) reference")
376 outlines = list(filter(
None,
map(
lambda s: s.rstrip(), stdout.splitlines())))
378 res_field =
"GaudiTest.RefBlock"
380 res_field +=
"_%s" % id
382 if signature
is None:
383 if signature_offset < 0:
384 signature_offset = len(reference) + signature_offset
385 signature = reflines[signature_offset]
388 pos = outlines.index(signature)
390 pos - signature_offset : pos + len(reflines) - signature_offset
392 if reflines != outlines:
393 msg =
"standard output"
396 if msg
not in causes:
398 result[res_field +
".observed"] = result.Quote(
"\n".join(outlines))
400 causes.append(
"missing signature")
401 result[res_field +
".signature"] = result.Quote(signature)
402 if len(reflines) > 1
or signature != reflines[0]:
403 result[res_field +
".expected"] = result.Quote(
"\n".join(reflines))
407 self, expected={"ERROR": 0,
"FATAL": 0}, stdout=
None, result=
None, causes=
None
410 Count the number of messages with required severity (by default ERROR and FATAL)
411 and check if their numbers match the expected ones (0 by default).
412 The dictionary "expected" can be used to tune the number of errors and fatals
413 allowed, or to limit the number of expected warnings etc.
428 outlines = stdout.splitlines()
429 from math
import log10
431 fmt =
"%%%dd - %%s" % (int(log10(len(outlines) + 1)))
437 if len(words) >= 2
and words[1]
in errors:
438 errors[words[1]].append(fmt % (linecount, l.rstrip()))
441 if len(errors[e]) != expected[e]:
442 causes.append(
"%s(%d)" % (e, len(errors[e])))
443 result[
"GaudiTest.lines.%s" % e] = result.Quote(
"\n".join(errors[e]))
444 result[
"GaudiTest.lines.%s.expected#" % e] = result.Quote(
456 ignore=r"Basket|.*size|Compression",
459 Compare the TTree summaries in stdout with the ones in trees_dict or in
460 the reference file. By default ignore the size, compression and basket
462 The presence of TTree summaries when none is expected is not a failure.
470 if trees_dict
is None:
473 if lreference
and os.path.isfile(lreference):
478 from pprint
import PrettyPrinter
482 result[
"GaudiTest.TTrees.expected"] = result.Quote(pp.pformat(trees_dict))
484 result[
"GaudiTest.TTrees.ignore"] = result.Quote(ignore)
489 causes.append(
"trees summaries")
491 result[
"GaudiTest.TTrees.failure_on"] = result.Quote(msg)
492 result[
"GaudiTest.TTrees.found"] = result.Quote(pp.pformat(trees))
497 self, stdout=None, result=None, causes=None, dict=None, ignore=None
500 Compare the TTree summaries in stdout with the ones in trees_dict or in
501 the reference file. By default ignore the size, compression and basket
503 The presence of TTree summaries when none is expected is not a failure.
515 if lreference
and os.path.isfile(lreference):
520 from pprint
import PrettyPrinter
524 result[
"GaudiTest.Histos.expected"] = result.Quote(pp.pformat(dict))
526 result[
"GaudiTest.Histos.ignore"] = result.Quote(ignore)
531 causes.append(
"histos summaries")
533 result[
"GaudiTest.Histos.failure_on"] = result.Quote(msg)
534 result[
"GaudiTest.Histos.found"] = result.Quote(pp.pformat(histos))
539 self, stdout=None, stderr=None, result=None, causes=None, preproc=None
542 Default validation acti*on: compare standard output and error to the
557 preproc = normalizeExamples
561 if lreference
and os.path.isfile(lreference):
563 lreference,
"standard output",
"Output Diff", preproc=preproc
566 causes += [
"missing reference file"]
570 if causes
and lreference:
573 newrefname =
".".join([lreference,
"new"])
574 while os.path.exists(newrefname):
576 newrefname =
".".join([lreference,
"~%d~" % cnt,
"new"])
577 newref = open(newrefname,
"w")
579 for l
in stdout.splitlines():
580 newref.write(l.rstrip() +
"\n")
582 result[
"New Output Reference File"] = os.path.relpath(
594 if os.path.isfile(lreference):
596 lreference,
"standard error",
"Error Diff", preproc=preproc
599 newcauses = [
"missing error reference file"]
601 if newcauses
and lreference:
603 newrefname =
".".join([lreference,
"new"])
604 while os.path.exists(newrefname):
606 newrefname =
".".join([lreference,
"~%d~" % cnt,
"new"])
607 newref = open(newrefname,
"w")
609 for l
in stderr.splitlines():
610 newref.write(l.rstrip() +
"\n")
612 result[
"New Error Reference File"] = os.path.relpath(
617 lreference,
"standard error",
"ExecTest.expected_stderr"
630 JSON validation action: compare json file to reference file
638 if not os.path.isfile(output_file):
639 causes.append(f
"output file {output_file} does not exist")
643 with open(output_file)
as f:
644 output = json.load(f)
645 except json.JSONDecodeError
as err:
646 causes.append(
"json parser error")
647 result[
"output_parse_error"] = f
"json parser error in {output_file}: {err}"
652 causes.append(
"reference file not set")
653 elif not os.path.isfile(lreference):
654 causes.append(
"reference file does not exist")
657 if causes
and lreference:
660 newrefname =
".".join([lreference,
"new"])
661 while os.path.exists(newrefname):
663 newrefname =
".".join([lreference,
"~%d~" % cnt,
"new"])
664 with open(newrefname,
"w")
as newref:
665 json.dump(output, newref, indent=4)
666 result[
"New JSON Output Reference File"] = os.path.relpath(
683 def platformSplit(p):
684 return set(re.split(
r"[-+]", p))
686 reference = os.path.normpath(
687 os.path.join(self.
basedir, os.path.expandvars(reffile))
691 spec_ref = reference[:-3] +
GetPlatform(self)[0:3] + reference[-3:]
692 if os.path.isfile(spec_ref):
696 dirname, basename = os.path.split(reference)
699 head = basename +
"."
702 if "do0" in platform:
705 for f
in os.listdir(dirname):
706 if f.startswith(head):
707 req_plat = platformSplit(f[head_len:])
708 if platform.issuperset(req_plat):
709 candidates.append((len(req_plat), f))
714 reference = os.path.join(dirname, candidates[-1][1])
724 from GaudiKernel
import ROOT6WorkAroundEnabled
737 Function used to normalize the used path
739 newPath = os.path.normpath(os.path.expandvars(p))
740 if os.path.exists(newPath):
741 p = os.path.realpath(newPath)
747 Locates an executable in the executables path ($PATH) and returns the full
748 path to it. An application is looked for with or without the '.exe' suffix.
749 If the executable cannot be found, None is returned
751 if os.path.isabs(executable):
752 if not os.path.isfile(executable):
753 if executable.endswith(
".exe"):
754 if os.path.isfile(executable[:-4]):
755 return executable[:-4]
757 executable = os.path.split(executable)[1]
760 for d
in os.environ.get(
"PATH").split(os.pathsep):
761 fullpath = os.path.join(d, executable)
762 if os.path.isfile(fullpath):
764 elif executable.endswith(
".exe")
and os.path.isfile(fullpath[:-4]):
778 UNTESTED =
"UNTESTED"
788 def __init__(self, kind=None, id=None, outcome=PASS, annotations={}):
792 assert isinstance(key, six.string_types)
796 assert isinstance(key, six.string_types)
797 assert isinstance(value, six.string_types),
"{!r} is not a string".
format(value)
802 Convert text to html by escaping special chars and adding <pre> tags.
804 return "<pre>{}</pre>".
format(escape_for_html(text))
823 """Validate the output of the program.
824 'stdout' -- A string containing the data written to the standard output
826 'stderr' -- A string containing the data written to the standard error
828 'result' -- A 'Result' object. It may be used to annotate
829 the outcome according to the content of stderr.
830 returns -- A list of strings giving causes of failure."""
835 causes.append(self.
cause)
841 """Compare 's1' and 's2', ignoring line endings.
844 returns -- True if 's1' and 's2' are the same, ignoring
845 differences in line endings."""
849 to_ignore = re.compile(
850 r"Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*"
854 return not to_ignore.match(l)
856 return list(filter(keep_line, s1.splitlines())) == list(
857 filter(keep_line, s2.splitlines())
860 return s1.splitlines() == s2.splitlines()
865 """Base class for a callable that takes a file and returns a modified
880 if not isinstance(input, six.string_types):
884 lines = input.splitlines()
888 output =
"\n".join(output)
918 if line.find(s) >= 0:
933 if self.
start in line:
936 elif self.
end in line:
946 when = re.compile(when)
950 if isinstance(rhs, RegexpReplacer):
952 res._operations = self.
_operations + rhs._operations
954 res = FilePreprocessor.__add__(self, rhs)
959 if w
is None or w.search(line):
960 line = o.sub(r, line)
967 "[0-2]?[0-9]:[0-5][0-9]:[0-5][0-9] [0-9]{4}[-/][01][0-9][-/][0-3][0-9][ A-Z]*",
968 "00:00:00 1970-01-01",
971 normalizeEOL.__processLine__ =
lambda line: str(line).rstrip() +
"\n"
975 skipEmptyLines.__processLine__ =
lambda line: (line.strip()
and line)
or None
989 line = line[: (pos + self.
siglen)]
990 lst = line[(pos + self.
siglen) :].split()
992 line +=
" ".join(lst)
998 Sort group of lines matching a regular expression
1002 self.
exp = exp
if hasattr(exp,
"match")
else re.compile(exp)
1005 match = self.
exp.match
1014 output.extend(group)
1021 normalizeExamples = maskPointers + normalizeDate
1023 (
"TIMER",
r"\s+[+-]?[0-9]+[0-9.e+-]*",
" 0"),
1024 (
"release all pending",
r"^.*/([^/]*:.*)",
r"\1"),
1025 (
"^#.*file",
r"file '.*[/\\]([^/\\]*)$",
r"file '\1"),
1027 "^JobOptionsSvc.*options successfully read in from",
1028 r"read in from .*[/\\]([^/\\]*)$",
1034 r"[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}(?!-0{12})-[0-9A-Fa-f]{12}",
1035 "00000000-0000-0000-0000-000000000000",
1039 "ServiceLocatorHelper::",
1040 "ServiceLocatorHelper::(create|locate)Service",
1041 "ServiceLocatorHelper::service",
1044 (
None,
r"e([-+])0([0-9][0-9])",
r"e\1\2"),
1046 (
None,
r"Service reference count check:",
r"Looping over all active services..."),
1050 r"^(.*(DEBUG|SUCCESS) List of ALL properties of .*#properties = )\d+",
1053 (
"ApplicationMgr",
r"(declareMultiSvcType|addMultiSvc): ",
""),
1054 (
r"Property \['Name': Value\]",
r"( = '[^']+':)'(.*)'",
r"\1\2"),
1055 (
"TimelineSvc",
"to file 'TimelineFile':",
"to file "),
1056 (
"DataObjectHandleBase",
r'DataObjectHandleBase\("([^"]*)"\)',
r"'\1'"),
1063 "JobOptionsSvc INFO # ",
1064 "JobOptionsSvc WARNING # ",
1067 "This machine has a speed",
1069 "ToolSvc.Sequenc... INFO",
1070 "DataListenerSvc INFO XML written to file:",
1073 "DEBUG No writable file catalog found which contains FID:",
1074 "DEBUG Service base class initialized successfully",
1076 "DEBUG Incident timing:",
1080 "INFO 'CnvServices':[",
1082 "DEBUG 'CnvServices':[",
1087 "ServiceLocatorHelper::service: found service JobOptionsSvc",
1089 "mismatching case for property name:",
1091 "Histograms saving not required.",
1093 "Properties are dumped into",
1095 "WARNING no ROOT output file name",
1096 "INFO Writing ROOT histograms to:",
1097 "INFO Completed update of ROOT histograms in:",
1100 "data dependencies:",
1103 r"^JobOptionsSvc INFO *$",
1106 r"(Always|SUCCESS)\s*(Root f|[^ ]* F)ile version:",
1107 r"File '.*.xml' does not exist",
1108 r"INFO Refer to dataset .* by its file ID:",
1109 r"INFO Referring to dataset .* by its file ID:",
1110 r"INFO Disconnect from dataset",
1111 r"INFO Disconnected from dataset",
1112 r"INFO Disconnected data IO:",
1113 r"IncidentSvc\s*(DEBUG (Adding|Removing)|VERBOSE Calling)",
1115 r".*StatusCodeSvc.*",
1116 r".*StatusCodeCheck.*",
1117 r"Num\s*\|\s*Function\s*\|\s*Source Library",
1120 r"ERROR Failed to modify file: .* Errno=2 No such file or directory",
1122 r"^ +[0-9]+ \|.*ROOT",
1123 r"^ +[0-9]+ \|.*\|.*Dict",
1125 r"EventLoopMgr.*---> Loop Finished",
1126 r"HiveSlimEventLo.*---> Loop Finished",
1131 r"SUCCESS\s*Booked \d+ Histogram\(s\)",
1135 r"Property(.*)'Audit(Algorithm|Tool|Service)s':",
1136 r"Property(.*)'Audit(Begin|End)Run':",
1138 r"Property(.*)'AuditRe(start|initialize)':",
1139 r"Property(.*)'Blocking':",
1141 r"Property(.*)'ErrorCount(er)?':",
1143 r"Property(.*)'Sequential':",
1145 r"Property(.*)'FilterCircularDependencies':",
1147 r"Property(.*)'IsClonable':",
1149 r"Property update for OutputLevel : new value =",
1150 r"EventLoopMgr\s*DEBUG Creating OutputStream",
1159 r"Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*",
1163 normalizeExamples = (
1176 def __init__(self, reffile, cause, result_key, preproc=normalizeExamples):
1184 if os.path.isfile(self.
reffile):
1185 orig = open(self.
reffile).readlines()
1188 result[self.
result_key +
".preproc.orig"] = result.Quote(
1189 "\n".join(
map(str.strip, orig))
1193 new = stdout.splitlines()
1198 difflib.unified_diff(
1199 orig, new, n=1, fromfile=
"Reference file", tofile=
"Actual output"
1203 result[self.
result_key] = result.Quote(
"".join(filterdiffs))
1204 result[self.
result_key +
".preproc.new"] = result.Quote(
1205 "\n".join(
map(str.strip, new))
1207 causes.append(self.
cause)
1213 Scan stdout to find ROOT TTree summaries and digest them.
1215 stars = re.compile(
r"^\*+$")
1216 outlines = stdout.splitlines()
1217 nlines = len(outlines)
1223 while i < nlines
and not stars.match(outlines[i]):
1228 trees[tree[
"Name"]] = tree
1235 Check that all the keys in reference are in to_check too, with the same value.
1236 If the value is a dict, the function is called recursively. to_check can
1237 contain more keys than reference, that will not be tested.
1238 The function returns at the first difference found.
1243 ignore_re = re.compile(ignore)
1244 keys = [key
for key
in reference
if not ignore_re.match(key)]
1246 keys = reference.keys()
1250 if (
type(reference[k])
is dict)
and (
type(to_check[k])
is dict):
1253 failed = fail_keys =
cmpTreesDicts(reference[k], to_check[k], ignore)
1256 failed = to_check[k] != reference[k]
1261 fail_keys.insert(0, k)
1272 if c
is None or r
is None:
1274 return (fail_path, r, c)
1278 h_count_re = re.compile(
r"^(.*)SUCCESS\s+Booked (\d+) Histogram\(s\) :\s+([\s\w=-]*)")
1283 Parse the TTree summary table in lines, starting from pos.
1284 Returns a tuple with the dictionary with the digested informations and the
1285 position of the first line after the summary.
1292 return [f.strip()
for f
in l.strip(
"*\n").split(
":", 2)]
1297 cols = splitcols(ll[0])
1301 r[
"Name"], r[
"Title"] = cols[1:]
1306 r[
"Title"] = ll[1].strip(
"*\n").split(
"|")[1].strip()
1310 cols = splitcols(ll[1 + delta_i])
1311 r[
"Entries"] = int(cols[1])
1313 sizes = cols[2].split()
1314 r[
"Total size"] = int(sizes[2])
1315 if sizes[-1] ==
"memory":
1318 r[
"File size"] = int(sizes[-1])
1320 cols = splitcols(ll[2 + delta_i])
1321 sizes = cols[2].split()
1322 if cols[0] ==
"Baskets":
1323 r[
"Baskets"] = int(cols[1])
1324 r[
"Basket size"] = int(sizes[2])
1325 r[
"Compression"] = float(sizes[-1])
1329 def nextblock(lines, i):
1331 dots = re.compile(
r"^\.+$")
1332 stars = re.compile(
r"^\*+$")
1336 and not dots.match(lines[i + delta_i][1:-1])
1337 and not stars.match(lines[i + delta_i])
1342 if i < (count - 3)
and lines[i].startswith(
"*Tree"):
1343 i_nextblock = nextblock(lines, i)
1344 result = parseblock(lines[i:i_nextblock])
1345 result[
"Branches"] = {}
1347 while i < (count - 3)
and lines[i].startswith(
"*Br"):
1348 if i < (count - 2)
and lines[i].startswith(
"*Branch "):
1352 i_nextblock = nextblock(lines, i)
1353 if i_nextblock >= count:
1355 branch = parseblock(lines[i:i_nextblock])
1356 result[
"Branches"][branch[
"Name"]] = branch
1364 Extract the histograms infos from the lines starting at pos.
1365 Returns the position of the first line after the summary block.
1368 h_table_head = re.compile(
1369 r'SUCCESS\s+(1D|2D|3D|1D profile|2D profile) histograms in directory\s+"(\w*)"'
1371 h_short_summ = re.compile(
r"ID=([^\"]+)\s+\"([^\"]+)\"\s+(.*)")
1376 m = h_count_re.search(lines[pos])
1377 name = m.group(1).strip()
1378 total = int(m.group(2))
1380 for k, v
in [x.split(
"=")
for x
in m.group(3).split()]:
1383 header[
"Total"] = total
1387 m = h_table_head.search(lines[pos])
1390 t = t.replace(
" profile",
"Prof")
1397 if l.startswith(
" | ID"):
1399 titles = [x.strip()
for x
in l.split(
"|")][1:]
1401 while pos < nlines
and lines[pos].startswith(
" |"):
1403 values = [x.strip()
for x
in l.split(
"|")][1:]
1405 for i
in range(len(titles)):
1406 hcont[titles[i]] = values[i]
1407 cont[hcont[
"ID"]] = hcont
1409 elif l.startswith(
" ID="):
1410 while pos < nlines
and lines[pos].startswith(
" ID="):
1412 x.strip()
for x
in h_short_summ.search(lines[pos]).groups()
1414 cont[values[0]] = values
1417 raise RuntimeError(
"Cannot understand line %d: '%s'" % (pos, l))
1421 summ[d][
"header"] = header
1426 summ[name] = {
"header": header}
1432 Scan stdout to find ROOT TTree summaries and digest them.
1434 outlines = stdout.splitlines()
1435 nlines = len(outlines) - 1
1443 match = h_count_re.search(outlines[pos])
1444 while pos < nlines
and not match:
1446 match = h_count_re.search(outlines[pos])
1449 summaries.update(summ)
1455 Return the platform Id defined in CMTCONFIG or SCRAM_ARCH.
1459 if "BINARY_TAG" in os.environ:
1460 arch = os.environ[
"BINARY_TAG"]
1461 elif "CMTCONFIG" in os.environ:
1462 arch = os.environ[
"CMTCONFIG"]
1463 elif "SCRAM_ARCH" in os.environ:
1464 arch = os.environ[
"SCRAM_ARCH"]
1465 elif os.environ.get(
"ENV_CMAKE_BUILD_TYPE",
"")
in (
1471 elif os.environ.get(
"ENV_CMAKE_BUILD_TYPE",
"")
in (
1483 Return True if the current platform is Windows.
1485 This function was needed because of the change in the CMTCONFIG format,
1486 from win32_vc71_dbg to i686-winxp-vc9-dbg.
1489 return "winxp" in platform
or platform.startswith(
"win")
1494 """Validate JSON output.
1495 returns -- A list of strings giving causes of failure."""
1499 with open(ref)
as f:
1500 expected = json.load(f)
1501 except json.JSONDecodeError
as err:
1502 causes.append(
"json parser error")
1503 result[
"reference_parse_error"] = f
"json parser error in {ref}: {err}"
1508 causes.append(
"json content")
1509 result[
"json_diff"] =
"detailed diff was turned off"
1517 expected = sorted(expected, key=
lambda item: (item[
"component"], item[
"name"]))
1518 out = sorted(out, key=
lambda item: (item[
"component"], item[
"name"]))
1520 t.assertEqual(expected, out)
1521 except AssertionError
as err:
1522 causes.append(
"json content")
1523 result[
"json_diff"] = str(err).splitlines()[0]