22 from subprocess
import PIPE, STDOUT, Popen
23 from unittest
import TestCase
26 from html
import escape
as escape_for_html
28 from cgi
import escape
as escape_for_html
32 if sys.version_info < (3, 5):
35 from codecs
import backslashreplace_errors, register_error
38 if isinstance(exc, UnicodeDecodeError):
39 code =
hex(ord(exc.object[exc.start]))
40 return (
"\\" + code[1:], exc.start + 1)
42 return backslashreplace_errors(exc)
44 register_error(
"backslashreplace", _new_backslashreplace_errors)
46 del backslashreplace_errors
47 del _new_backslashreplace_errors
54 Take a string with invalid ASCII/UTF characters and quote them so that the
55 string can be used in an XML text.
57 >>> sanitize_for_xml('this is \x1b')
58 'this is [NON-XML-CHAR-0x1B]'
60 bad_chars = re.compile(
"[\x00-\x08\x0b\x0c\x0e-\x1F\uD800-\uDFFF\uFFFE\uFFFF]")
64 return "".join(
"[NON-XML-CHAR-0x%2X]" % ord(c)
for c
in match.group())
66 return bad_chars.sub(quote, data)
70 """helper to debug GAUDI-1084, dump the list of processes"""
71 from getpass
import getuser
73 if "WORKSPACE" in os.environ:
74 p = Popen([
"ps",
"-fH",
"-U", getuser()], stdout=PIPE)
75 with open(os.path.join(os.environ[
"WORKSPACE"], name),
"wb")
as f:
76 f.write(p.communicate()[0])
81 Send a signal to a process and all its child processes (starting from the
84 log = logging.getLogger(
"kill_tree")
85 ps_cmd = [
"ps",
"--no-headers",
"-o",
"pid",
"--ppid", str(ppid)]
88 get_children = Popen(ps_cmd, stdout=PIPE, stderr=PIPE, env={})
89 children =
map(int, get_children.communicate()[0].split())
90 for child
in children:
93 log.debug(
"killing process %d", ppid)
95 except OSError
as err:
98 log.debug(
"no such process %d", ppid)
106 _common_tmpdir =
None
136 logging.debug(
"running test %s", self.
name)
147 "TIMEOUT_DETAIL":
None,
153 r"from\s+Gaudi.Configuration\s+import\s+\*|"
154 r"from\s+Configurables\s+import",
157 suffix, lang =
".py",
"python"
159 suffix, lang =
".opts",
"c++"
160 self.
result[
"Options"] =
'<code lang="{}"><pre>{}</pre></code>'.
format(
161 lang, escape_for_html(self.
options)
163 optionFile = tempfile.NamedTemporaryFile(suffix=suffix)
164 optionFile.file.write(self.
options.encode(
"utf-8"))
171 or platform.platform()
178 if re.search(prex, platform_id)
189 workdir = tempfile.mkdtemp()
200 prog_ext = os.path.splitext(prog)[1]
201 if prog_ext
not in [
".exe",
".py",
".bat"]:
205 prog =
which(prog)
or prog
207 args = list(
map(RationalizePath, self.
args))
209 if prog_ext ==
".py":
220 logging.debug(
"executing %r in %s", params, workdir)
222 params, stdout=PIPE, stderr=PIPE, env=self.
environment
224 logging.debug(
"(pid: %d)", self.
proc.pid)
225 out, err = self.
proc.communicate()
226 self.
out = out.decode(
"utf-8", errors=
"backslashreplace")
227 self.
err = err.decode(
"utf-8", errors=
"backslashreplace")
229 thread = threading.Thread(target=target)
234 if thread.is_alive():
235 logging.debug(
"time out in test %s (pid %d)", self.
name, self.
proc.pid)
242 "--eval-command=thread apply all backtrace",
244 gdb = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
246 "utf-8", errors=
"backslashreplace"
251 if thread.is_alive():
253 self.
causes.append(
"timeout")
258 f
"completed test {self.name} with returncode = {self.returnedCode}"
260 logging.debug(
"validating test...")
261 val_start_time = time.perf_counter()
265 self.
validate_time = round(time.perf_counter() - val_start_time, 2)
267 logging.debug(f
"skipped test {self.name}")
272 shutil.rmtree(workdir,
True)
276 if self.
status !=
"skipped":
278 if self.
signal is not None:
280 self.
causes.append(
"exit code")
284 self.
causes.append(
"exit code")
287 self.
causes.append(
"exit code")
297 logging.debug(
"%s: %s", self.
name, self.
status)
299 "Exit Code":
"returnedCode",
302 "Runtime Environment":
"environment",
305 "Program Name":
"program",
307 "Validator":
"validator",
308 "Validation execution time":
"validate_time",
309 "Output Reference File":
"reference",
310 "Error Reference File":
"error_reference",
313 "Unsupported Platforms":
"unsupported_platforms",
314 "Stack Trace":
"stack_trace",
317 (key, getattr(self, attr))
318 for key, attr
in field_mapping.items()
319 if getattr(self, attr)
328 resultDict.extend(self.
result.annotations.items())
330 resultDict = dict(resultDict)
333 if "Validator" in resultDict:
334 resultDict[
"Validator"] =
'<code lang="{}"><pre>{}</pre></code>'.
format(
335 "python", escape_for_html(resultDict[
"Validator"])
346 elif stderr.strip() != self.
stderr.strip():
347 self.
causes.append(
"standard error")
348 return result, self.
causes
361 Given a block of text, tries to find it in the output. The block had to be identified by a signature line. By default, the first line is used as signature, or the line pointed to by signature_offset. If signature_offset points outside the block, a signature line can be passed as signature argument. Note: if 'signature' is None (the default), a negative signature_offset is interpreted as index in a list (e.g. -1 means the last line), otherwise the it is interpreted as the number of lines before the first one of the block the signature must appear. The parameter 'id' allow to distinguish between different calls to this function in the same validation code.
364 if reference
is None:
373 reflines = list(filter(
None,
map(
lambda s: s.rstrip(), reference.splitlines())))
375 raise RuntimeError(
"Empty (or null) reference")
377 outlines = list(filter(
None,
map(
lambda s: s.rstrip(), stdout.splitlines())))
379 res_field =
"GaudiTest.RefBlock"
381 res_field +=
"_%s" % id
383 if signature
is None:
384 if signature_offset < 0:
385 signature_offset = len(reference) + signature_offset
386 signature = reflines[signature_offset]
389 pos = outlines.index(signature)
391 pos - signature_offset : pos + len(reflines) - signature_offset
393 if reflines != outlines:
394 msg =
"standard output"
397 if msg
not in causes:
399 result[res_field +
".observed"] = result.Quote(
"\n".join(outlines))
401 causes.append(
"missing signature")
402 result[res_field +
".signature"] = result.Quote(signature)
403 if len(reflines) > 1
or signature != reflines[0]:
404 result[res_field +
".expected"] = result.Quote(
"\n".join(reflines))
408 self, expected={"ERROR": 0,
"FATAL": 0}, stdout=
None, result=
None, causes=
None
411 Count the number of messages with required severity (by default ERROR and FATAL)
412 and check if their numbers match the expected ones (0 by default).
413 The dictionary "expected" can be used to tune the number of errors and fatals
414 allowed, or to limit the number of expected warnings etc.
429 outlines = stdout.splitlines()
430 from math
import log10
432 fmt =
"%%%dd - %%s" % (int(log10(len(outlines) + 1)))
438 if len(words) >= 2
and words[1]
in errors:
439 errors[words[1]].append(fmt % (linecount, l.rstrip()))
442 if len(errors[e]) != expected[e]:
443 causes.append(
"%s(%d)" % (e, len(errors[e])))
444 result[
"GaudiTest.lines.%s" % e] = result.Quote(
"\n".join(errors[e]))
445 result[
"GaudiTest.lines.%s.expected#" % e] = result.Quote(
457 ignore=r"Basket|.*size|Compression",
460 Compare the TTree summaries in stdout with the ones in trees_dict or in
461 the reference file. By default ignore the size, compression and basket
463 The presence of TTree summaries when none is expected is not a failure.
471 if trees_dict
is None:
474 if lreference
and os.path.isfile(lreference):
479 from pprint
import PrettyPrinter
483 result[
"GaudiTest.TTrees.expected"] = result.Quote(pp.pformat(trees_dict))
485 result[
"GaudiTest.TTrees.ignore"] = result.Quote(ignore)
490 causes.append(
"trees summaries")
492 result[
"GaudiTest.TTrees.failure_on"] = result.Quote(msg)
493 result[
"GaudiTest.TTrees.found"] = result.Quote(pp.pformat(trees))
498 self, stdout=None, result=None, causes=None, dict=None, ignore=None
501 Compare the TTree summaries in stdout with the ones in trees_dict or in
502 the reference file. By default ignore the size, compression and basket
504 The presence of TTree summaries when none is expected is not a failure.
516 if lreference
and os.path.isfile(lreference):
521 from pprint
import PrettyPrinter
525 result[
"GaudiTest.Histos.expected"] = result.Quote(pp.pformat(dict))
527 result[
"GaudiTest.Histos.ignore"] = result.Quote(ignore)
532 causes.append(
"histos summaries")
534 result[
"GaudiTest.Histos.failure_on"] = result.Quote(msg)
535 result[
"GaudiTest.Histos.found"] = result.Quote(pp.pformat(histos))
540 self, stdout=None, stderr=None, result=None, causes=None, preproc=None
543 Default validation acti*on: compare standard output and error to the
558 preproc = normalizeExamples
562 if lreference
and os.path.isfile(lreference):
564 lreference,
"standard output",
"Output Diff", preproc=preproc
567 causes += [
"missing reference file"]
571 if causes
and lreference:
574 newrefname =
".".join([lreference,
"new"])
575 while os.path.exists(newrefname):
577 newrefname =
".".join([lreference,
"~%d~" % cnt,
"new"])
578 newref = open(newrefname,
"w")
580 for l
in stdout.splitlines():
581 newref.write(l.rstrip() +
"\n")
583 result[
"New Output Reference File"] = os.path.relpath(
595 if os.path.isfile(lreference):
597 lreference,
"standard error",
"Error Diff", preproc=preproc
600 newcauses = [
"missing error reference file"]
602 if newcauses
and lreference:
604 newrefname =
".".join([lreference,
"new"])
605 while os.path.exists(newrefname):
607 newrefname =
".".join([lreference,
"~%d~" % cnt,
"new"])
608 newref = open(newrefname,
"w")
610 for l
in stderr.splitlines():
611 newref.write(l.rstrip() +
"\n")
613 result[
"New Error Reference File"] = os.path.relpath(
618 lreference,
"standard error",
"ExecTest.expected_stderr"
631 JSON validation action: compare json file to reference file
639 if not os.path.isfile(output_file):
640 causes.append(f
"output file {output_file} does not exist")
644 with open(output_file)
as f:
645 output = json.load(f)
646 except json.JSONDecodeError
as err:
647 causes.append(
"json parser error")
648 result[
"output_parse_error"] = f
"json parser error in {output_file}: {err}"
653 causes.append(
"reference file not set")
654 elif not os.path.isfile(lreference):
655 causes.append(
"reference file does not exist")
658 if causes
and lreference:
661 newrefname =
".".join([lreference,
"new"])
662 while os.path.exists(newrefname):
664 newrefname =
".".join([lreference,
"~%d~" % cnt,
"new"])
665 with open(newrefname,
"w")
as newref:
666 json.dump(output, newref, indent=4)
667 result[
"New JSON Output Reference File"] = os.path.relpath(
684 def platformSplit(p):
685 return set(re.split(
r"[-+]", p))
687 reference = os.path.normpath(
688 os.path.join(self.
basedir, os.path.expandvars(reffile))
692 spec_ref = reference[:-3] +
GetPlatform(self)[0:3] + reference[-3:]
693 if os.path.isfile(spec_ref):
697 dirname, basename = os.path.split(reference)
700 head = basename +
"."
703 if "do0" in platform:
706 for f
in os.listdir(dirname):
707 if f.startswith(head):
708 req_plat = platformSplit(f[head_len:])
709 if platform.issuperset(req_plat):
710 candidates.append((len(req_plat), f))
715 reference = os.path.join(dirname, candidates[-1][1])
725 from GaudiKernel
import ROOT6WorkAroundEnabled
738 Function used to normalize the used path
740 newPath = os.path.normpath(os.path.expandvars(p))
741 if os.path.exists(newPath):
742 p = os.path.realpath(newPath)
748 Locates an executable in the executables path ($PATH) and returns the full
749 path to it. An application is looked for with or without the '.exe' suffix.
750 If the executable cannot be found, None is returned
752 if os.path.isabs(executable):
753 if not os.path.isfile(executable):
754 if executable.endswith(
".exe"):
755 if os.path.isfile(executable[:-4]):
756 return executable[:-4]
758 executable = os.path.split(executable)[1]
761 for d
in os.environ.get(
"PATH").split(os.pathsep):
762 fullpath = os.path.join(d, executable)
763 if os.path.isfile(fullpath):
765 elif executable.endswith(
".exe")
and os.path.isfile(fullpath[:-4]):
780 UNTESTED =
"UNTESTED"
790 def __init__(self, kind=None, id=None, outcome=PASS, annotations={}):
794 assert isinstance(key, six.string_types)
798 assert isinstance(key, six.string_types)
799 assert isinstance(value, six.string_types),
"{!r} is not a string".
format(value)
804 Convert text to html by escaping special chars and adding <pre> tags.
806 return "<pre>{}</pre>".
format(escape_for_html(text))
825 """Validate the output of the program.
826 'stdout' -- A string containing the data written to the standard output
828 'stderr' -- A string containing the data written to the standard error
830 'result' -- A 'Result' object. It may be used to annotate
831 the outcome according to the content of stderr.
832 returns -- A list of strings giving causes of failure."""
837 causes.append(self.
cause)
843 """Compare 's1' and 's2', ignoring line endings.
846 returns -- True if 's1' and 's2' are the same, ignoring
847 differences in line endings."""
851 to_ignore = re.compile(
852 r"Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*"
856 return not to_ignore.match(l)
858 return list(filter(keep_line, s1.splitlines())) == list(
859 filter(keep_line, s2.splitlines())
862 return s1.splitlines() == s2.splitlines()
867 """Base class for a callable that takes a file and returns a modified
882 if not isinstance(input, six.string_types):
886 lines = input.splitlines()
890 output =
"\n".join(output)
920 if line.find(s) >= 0:
935 if self.
start in line:
938 elif self.
end in line:
948 when = re.compile(when)
952 if isinstance(rhs, RegexpReplacer):
954 res._operations = self.
_operations + rhs._operations
956 res = FilePreprocessor.__add__(self, rhs)
961 if w
is None or w.search(line):
962 line = o.sub(r, line)
969 "[0-2]?[0-9]:[0-5][0-9]:[0-5][0-9] [0-9]{4}[-/][01][0-9][-/][0-3][0-9][ A-Z]*",
970 "00:00:00 1970-01-01",
973 normalizeEOL.__processLine__ =
lambda line: str(line).rstrip() +
"\n"
977 skipEmptyLines.__processLine__ =
lambda line: (line.strip()
and line)
or None
991 line = line[: (pos + self.
siglen)]
992 lst = line[(pos + self.
siglen) :].split()
994 line +=
" ".join(lst)
1000 Sort group of lines matching a regular expression
1004 self.
exp = exp
if hasattr(exp,
"match")
else re.compile(exp)
1007 match = self.
exp.match
1016 output.extend(group)
1023 normalizeExamples = maskPointers + normalizeDate
1026 (
"TIMER.TIMER",
r"\s+[+-]?[0-9]+[0-9.]*",
" 0"),
1027 (
"release all pending",
r"^.*/([^/]*:.*)",
r"\1"),
1028 (
"^#.*file",
r"file '.*[/\\]([^/\\]*)$",
r"file '\1"),
1030 "^JobOptionsSvc.*options successfully read in from",
1031 r"read in from .*[/\\]([^/\\]*)$",
1037 r"[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}(?!-0{12})-[0-9A-Fa-f]{12}",
1038 "00000000-0000-0000-0000-000000000000",
1042 "ServiceLocatorHelper::",
1043 "ServiceLocatorHelper::(create|locate)Service",
1044 "ServiceLocatorHelper::service",
1047 (
None,
r"e([-+])0([0-9][0-9])",
r"e\1\2"),
1049 (
None,
r"Service reference count check:",
r"Looping over all active services..."),
1053 r"^(.*(DEBUG|SUCCESS) List of ALL properties of .*#properties = )\d+",
1056 (
"ApplicationMgr",
r"(declareMultiSvcType|addMultiSvc): ",
""),
1057 (
r"Property 'Name': Value",
r"( = '[^']+':)'(.*)'",
r"\1\2"),
1058 (
"TimelineSvc",
"to file 'TimelineFile':",
"to file "),
1059 (
"DataObjectHandleBase",
r'DataObjectHandleBase\("([^"]*)"\)',
r"'\1'"),
1066 "JobOptionsSvc INFO # ",
1067 "JobOptionsSvc WARNING # ",
1070 "This machine has a speed",
1072 "ToolSvc.Sequenc... INFO",
1073 "DataListenerSvc INFO XML written to file:",
1076 "DEBUG No writable file catalog found which contains FID:",
1077 "DEBUG Service base class initialized successfully",
1079 "DEBUG Incident timing:",
1083 "INFO 'CnvServices':[",
1085 "DEBUG 'CnvServices':[",
1090 "ServiceLocatorHelper::service: found service JobOptionsSvc",
1092 "mismatching case for property name:",
1094 "Histograms saving not required.",
1096 "Properties are dumped into",
1098 "WARNING no ROOT output file name",
1099 "INFO Writing ROOT histograms to:",
1100 "INFO Completed update of ROOT histograms in:",
1103 "data dependencies:",
1106 r"^JobOptionsSvc INFO *$",
1109 r"(Always|SUCCESS)\s*(Root f|[^ ]* F)ile version:",
1110 r"File '.*.xml' does not exist",
1111 r"INFO Refer to dataset .* by its file ID:",
1112 r"INFO Referring to dataset .* by its file ID:",
1113 r"INFO Disconnect from dataset",
1114 r"INFO Disconnected from dataset",
1115 r"INFO Disconnected data IO:",
1116 r"IncidentSvc\s*(DEBUG (Adding|Removing)|VERBOSE Calling)",
1118 r".*StatusCodeSvc.*",
1119 r".*StatusCodeCheck.*",
1120 r"Num\s*\|\s*Function\s*\|\s*Source Library",
1123 r"ERROR Failed to modify file: .* Errno=2 No such file or directory",
1125 r"^ +[0-9]+ \|.*ROOT",
1126 r"^ +[0-9]+ \|.*\|.*Dict",
1128 r"EventLoopMgr.*---> Loop Finished",
1129 r"HiveSlimEventLo.*---> Loop Finished",
1134 r"SUCCESS\s*Booked \d+ Histogram\(s\)",
1138 r"Property(.*)'Audit(Algorithm|Tool|Service)s':",
1139 r"Property(.*)'Audit(Begin|End)Run':",
1141 r"Property(.*)'AuditRe(start|initialize)':",
1142 r"Property(.*)'Blocking':",
1144 r"Property(.*)'ErrorCount(er)?':",
1146 r"Property(.*)'Sequential':",
1148 r"Property(.*)'FilterCircularDependencies':",
1150 r"Property(.*)'IsClonable':",
1152 r"Property update for OutputLevel : new value =",
1153 r"EventLoopMgr\s*DEBUG Creating OutputStream",
1162 r"Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*",
1166 normalizeExamples = (
1179 def __init__(self, reffile, cause, result_key, preproc=normalizeExamples):
1187 if os.path.isfile(self.
reffile):
1188 orig = open(self.
reffile).readlines()
1191 result[self.
result_key +
".preproc.orig"] = result.Quote(
1192 "\n".join(
map(str.strip, orig))
1196 new = stdout.splitlines()
1201 difflib.unified_diff(
1202 orig, new, n=1, fromfile=
"Reference file", tofile=
"Actual output"
1206 result[self.
result_key] = result.Quote(
"".join(filterdiffs))
1207 result[self.
result_key +
".preproc.new"] = result.Quote(
1208 "\n".join(
map(str.strip, new))
1210 causes.append(self.
cause)
1216 Scan stdout to find ROOT TTree summaries and digest them.
1218 stars = re.compile(
r"^\*+$")
1219 outlines = stdout.splitlines()
1220 nlines = len(outlines)
1226 while i < nlines
and not stars.match(outlines[i]):
1231 trees[tree[
"Name"]] = tree
1238 Check that all the keys in reference are in to_check too, with the same value.
1239 If the value is a dict, the function is called recursively. to_check can
1240 contain more keys than reference, that will not be tested.
1241 The function returns at the first difference found.
1246 ignore_re = re.compile(ignore)
1247 keys = [key
for key
in reference
if not ignore_re.match(key)]
1249 keys = reference.keys()
1253 if (
type(reference[k])
is dict)
and (
type(to_check[k])
is dict):
1256 failed = fail_keys =
cmpTreesDicts(reference[k], to_check[k], ignore)
1259 failed = to_check[k] != reference[k]
1264 fail_keys.insert(0, k)
1275 if c
is None or r
is None:
1277 return (fail_path, r, c)
1281 h_count_re = re.compile(
r"^(.*)SUCCESS\s+Booked (\d+) Histogram\(s\) :\s+([\s\w=-]*)")
1286 Parse the TTree summary table in lines, starting from pos.
1287 Returns a tuple with the dictionary with the digested informations and the
1288 position of the first line after the summary.
1295 return [f.strip()
for f
in l.strip(
"*\n").split(
":", 2)]
1299 cols = splitcols(ll[0])
1300 r[
"Name"], r[
"Title"] = cols[1:]
1302 cols = splitcols(ll[1])
1303 r[
"Entries"] = int(cols[1])
1305 sizes = cols[2].split()
1306 r[
"Total size"] = int(sizes[2])
1307 if sizes[-1] ==
"memory":
1310 r[
"File size"] = int(sizes[-1])
1312 cols = splitcols(ll[2])
1313 sizes = cols[2].split()
1314 if cols[0] ==
"Baskets":
1315 r[
"Baskets"] = int(cols[1])
1316 r[
"Basket size"] = int(sizes[2])
1317 r[
"Compression"] = float(sizes[-1])
1320 if i < (count - 3)
and lines[i].startswith(
"*Tree"):
1321 result = parseblock(lines[i : i + 3])
1322 result[
"Branches"] = {}
1324 while i < (count - 3)
and lines[i].startswith(
"*Br"):
1325 if i < (count - 2)
and lines[i].startswith(
"*Branch "):
1329 branch = parseblock(lines[i : i + 3])
1330 result[
"Branches"][branch[
"Name"]] = branch
1338 Extract the histograms infos from the lines starting at pos.
1339 Returns the position of the first line after the summary block.
1342 h_table_head = re.compile(
1343 r'SUCCESS\s+(1D|2D|3D|1D profile|2D profile) histograms in directory\s+"(\w*)"'
1345 h_short_summ = re.compile(
r"ID=([^\"]+)\s+\"([^\"]+)\"\s+(.*)")
1350 m = h_count_re.search(lines[pos])
1351 name = m.group(1).strip()
1352 total = int(m.group(2))
1354 for k, v
in [x.split(
"=")
for x
in m.group(3).split()]:
1357 header[
"Total"] = total
1361 m = h_table_head.search(lines[pos])
1364 t = t.replace(
" profile",
"Prof")
1371 if l.startswith(
" | ID"):
1373 titles = [x.strip()
for x
in l.split(
"|")][1:]
1375 while pos < nlines
and lines[pos].startswith(
" |"):
1377 values = [x.strip()
for x
in l.split(
"|")][1:]
1379 for i
in range(len(titles)):
1380 hcont[titles[i]] = values[i]
1381 cont[hcont[
"ID"]] = hcont
1383 elif l.startswith(
" ID="):
1384 while pos < nlines
and lines[pos].startswith(
" ID="):
1386 x.strip()
for x
in h_short_summ.search(lines[pos]).groups()
1388 cont[values[0]] = values
1391 raise RuntimeError(
"Cannot understand line %d: '%s'" % (pos, l))
1395 summ[d][
"header"] = header
1400 summ[name] = {
"header": header}
1406 Scan stdout to find ROOT TTree summaries and digest them.
1408 outlines = stdout.splitlines()
1409 nlines = len(outlines) - 1
1417 match = h_count_re.search(outlines[pos])
1418 while pos < nlines
and not match:
1420 match = h_count_re.search(outlines[pos])
1423 summaries.update(summ)
1429 Return the platform Id defined in CMTCONFIG or SCRAM_ARCH.
1433 if "BINARY_TAG" in os.environ:
1434 arch = os.environ[
"BINARY_TAG"]
1435 elif "CMTCONFIG" in os.environ:
1436 arch = os.environ[
"CMTCONFIG"]
1437 elif "SCRAM_ARCH" in os.environ:
1438 arch = os.environ[
"SCRAM_ARCH"]
1439 elif os.environ.get(
"ENV_CMAKE_BUILD_TYPE",
"")
in (
1445 elif os.environ.get(
"ENV_CMAKE_BUILD_TYPE",
"")
in (
1457 Return True if the current platform is Windows.
1459 This function was needed because of the change in the CMTCONFIG format,
1460 from win32_vc71_dbg to i686-winxp-vc9-dbg.
1463 return "winxp" in platform
or platform.startswith(
"win")
1468 """Validate JSON output.
1469 returns -- A list of strings giving causes of failure."""
1473 with open(ref)
as f:
1474 expected = json.load(f)
1475 except json.JSONDecodeError
as err:
1476 causes.append(
"json parser error")
1477 result[
"reference_parse_error"] = f
"json parser error in {ref}: {err}"
1482 causes.append(
"json content")
1483 result[
"json_diff"] =
"detailed diff was turned off"
1489 t.assertEqual(expected, out)
1490 except AssertionError
as err:
1491 causes.append(
"json content")
1492 result[
"json_diff"] = str(err).splitlines()[0]