23 from subprocess
import PIPE, STDOUT, Popen
24 from unittest
import TestCase
27 from html
import escape
as escape_for_html
29 from cgi
import escape
as escape_for_html
33 if sys.version_info < (3, 5):
36 from codecs
import backslashreplace_errors, register_error
39 if isinstance(exc, UnicodeDecodeError):
40 code =
hex(ord(exc.object[exc.start]))
41 return (
"\\" + code[1:], exc.start + 1)
43 return backslashreplace_errors(exc)
45 register_error(
"backslashreplace", _new_backslashreplace_errors)
47 del backslashreplace_errors
48 del _new_backslashreplace_errors
55 Take a string with invalid ASCII/UTF characters and quote them so that the
56 string can be used in an XML text.
58 >>> sanitize_for_xml('this is \x1b')
59 'this is [NON-XML-CHAR-0x1B]'
61 bad_chars = re.compile(
"[\x00-\x08\x0b\x0c\x0e-\x1F\uD800-\uDFFF\uFFFE\uFFFF]")
65 return "".join(
"[NON-XML-CHAR-0x%2X]" % ord(c)
for c
in match.group())
67 return bad_chars.sub(quote, data)
71 """helper to debug GAUDI-1084, dump the list of processes"""
72 from getpass
import getuser
74 if "WORKSPACE" in os.environ:
75 p = Popen([
"ps",
"-fH",
"-U", getuser()], stdout=PIPE)
76 with open(os.path.join(os.environ[
"WORKSPACE"], name),
"wb")
as f:
77 f.write(p.communicate()[0])
82 Send a signal to a process and all its child processes (starting from the
85 log = logging.getLogger(
"kill_tree")
86 ps_cmd = [
"ps",
"--no-headers",
"-o",
"pid",
"--ppid", str(ppid)]
89 get_children = Popen(ps_cmd, stdout=PIPE, stderr=PIPE, env={})
90 children =
map(int, get_children.communicate()[0].split())
91 for child
in children:
94 log.debug(
"killing process %d", ppid)
96 except OSError
as err:
99 log.debug(
"no such process %d", ppid)
107 _common_tmpdir =
None
137 logging.debug(
"running test %s", self.
name)
148 "TIMEOUT_DETAIL":
None,
154 r"from\s+Gaudi.Configuration\s+import\s+\*|"
155 "from\s+Configurables\s+import",
158 suffix, lang =
".py",
"python"
160 suffix, lang =
".opts",
"c++"
161 self.
result[
"Options"] =
'<code lang="{}"><pre>{}</pre></code>'.
format(
162 lang, escape_for_html(self.
options)
164 optionFile = tempfile.NamedTemporaryFile(suffix=suffix)
165 optionFile.file.write(self.
options.encode(
"utf-8"))
172 or platform.platform()
179 if re.search(prex, platform_id)
190 workdir = tempfile.mkdtemp()
201 prog_ext = os.path.splitext(prog)[1]
202 if prog_ext
not in [
".exe",
".py",
".bat"]:
206 prog =
which(prog)
or prog
208 args = list(
map(RationalizePath, self.
args))
210 if prog_ext ==
".py":
221 logging.debug(
"executing %r in %s", params, workdir)
223 params, stdout=PIPE, stderr=PIPE, env=self.
environment
225 logging.debug(
"(pid: %d)", self.
proc.pid)
226 out, err = self.
proc.communicate()
227 self.
out = out.decode(
"utf-8", errors=
"backslashreplace")
228 self.
err = err.decode(
"utf-8", errors=
"backslashreplace")
230 thread = threading.Thread(target=target)
235 if thread.is_alive():
236 logging.debug(
"time out in test %s (pid %d)", self.
name, self.
proc.pid)
243 "--eval-command=thread apply all backtrace",
245 gdb = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
247 "utf-8", errors=
"backslashreplace"
252 if thread.is_alive():
254 self.
causes.append(
"timeout")
259 f
"completed test {self.name} with returncode = {self.returnedCode}"
261 logging.debug(
"validating test...")
262 val_start_time = time.perf_counter()
266 self.
validate_time = round(time.perf_counter() - val_start_time, 2)
268 logging.debug(f
"skipped test {self.name}")
273 shutil.rmtree(workdir,
True)
277 if self.
status !=
"skipped":
279 if self.
signal is not None:
281 self.
causes.append(
"exit code")
285 self.
causes.append(
"exit code")
288 self.
causes.append(
"exit code")
298 logging.debug(
"%s: %s", self.
name, self.
status)
300 "Exit Code":
"returnedCode",
303 "Runtime Environment":
"environment",
306 "Program Name":
"program",
308 "Validator":
"validator",
309 "Validation execution time":
"validate_time",
310 "Output Reference File":
"reference",
311 "Error Reference File":
"error_reference",
314 "Unsupported Platforms":
"unsupported_platforms",
315 "Stack Trace":
"stack_trace",
318 (key, getattr(self, attr))
319 for key, attr
in field_mapping.items()
320 if getattr(self, attr)
329 resultDict.extend(self.
result.annotations.items())
331 resultDict = dict(resultDict)
334 if "Validator" in resultDict:
335 resultDict[
"Validator"] =
'<code lang="{}"><pre>{}</pre></code>'.
format(
336 "python", escape_for_html(resultDict[
"Validator"])
347 elif stderr.strip() != self.
stderr.strip():
348 self.
causes.append(
"standard error")
349 return result, self.
causes
362 Given a block of text, tries to find it in the output. The block had to be identified by a signature line. By default, the first line is used as signature, or the line pointed to by signature_offset. If signature_offset points outside the block, a signature line can be passed as signature argument. Note: if 'signature' is None (the default), a negative signature_offset is interpreted as index in a list (e.g. -1 means the last line), otherwise the it is interpreted as the number of lines before the first one of the block the signature must appear. The parameter 'id' allow to distinguish between different calls to this function in the same validation code.
365 if reference
is None:
374 reflines = list(filter(
None,
map(
lambda s: s.rstrip(), reference.splitlines())))
376 raise RuntimeError(
"Empty (or null) reference")
378 outlines = list(filter(
None,
map(
lambda s: s.rstrip(), stdout.splitlines())))
380 res_field =
"GaudiTest.RefBlock"
382 res_field +=
"_%s" % id
384 if signature
is None:
385 if signature_offset < 0:
386 signature_offset = len(reference) + signature_offset
387 signature = reflines[signature_offset]
390 pos = outlines.index(signature)
392 pos - signature_offset : pos + len(reflines) - signature_offset
394 if reflines != outlines:
395 msg =
"standard output"
398 if not msg
in causes:
400 result[res_field +
".observed"] = result.Quote(
"\n".join(outlines))
402 causes.append(
"missing signature")
403 result[res_field +
".signature"] = result.Quote(signature)
404 if len(reflines) > 1
or signature != reflines[0]:
405 result[res_field +
".expected"] = result.Quote(
"\n".join(reflines))
409 self, expected={"ERROR": 0,
"FATAL": 0}, stdout=
None, result=
None, causes=
None
412 Count the number of messages with required severity (by default ERROR and FATAL)
413 and check if their numbers match the expected ones (0 by default).
414 The dictionary "expected" can be used to tune the number of errors and fatals
415 allowed, or to limit the number of expected warnings etc.
430 outlines = stdout.splitlines()
431 from math
import log10
433 fmt =
"%%%dd - %%s" % (int(log10(len(outlines) + 1)))
439 if len(words) >= 2
and words[1]
in errors:
440 errors[words[1]].append(fmt % (linecount, l.rstrip()))
443 if len(errors[e]) != expected[e]:
444 causes.append(
"%s(%d)" % (e, len(errors[e])))
445 result[
"GaudiTest.lines.%s" % e] = result.Quote(
"\n".join(errors[e]))
446 result[
"GaudiTest.lines.%s.expected#" % e] = result.Quote(
458 ignore=r"Basket|.*size|Compression",
461 Compare the TTree summaries in stdout with the ones in trees_dict or in
462 the reference file. By default ignore the size, compression and basket
464 The presence of TTree summaries when none is expected is not a failure.
472 if trees_dict
is None:
475 if lreference
and os.path.isfile(lreference):
480 from pprint
import PrettyPrinter
484 result[
"GaudiTest.TTrees.expected"] = result.Quote(pp.pformat(trees_dict))
486 result[
"GaudiTest.TTrees.ignore"] = result.Quote(ignore)
491 causes.append(
"trees summaries")
493 result[
"GaudiTest.TTrees.failure_on"] = result.Quote(msg)
494 result[
"GaudiTest.TTrees.found"] = result.Quote(pp.pformat(trees))
499 self, stdout=None, result=None, causes=None, dict=None, ignore=None
502 Compare the TTree summaries in stdout with the ones in trees_dict or in
503 the reference file. By default ignore the size, compression and basket
505 The presence of TTree summaries when none is expected is not a failure.
517 if lreference
and os.path.isfile(lreference):
522 from pprint
import PrettyPrinter
526 result[
"GaudiTest.Histos.expected"] = result.Quote(pp.pformat(dict))
528 result[
"GaudiTest.Histos.ignore"] = result.Quote(ignore)
533 causes.append(
"histos summaries")
535 result[
"GaudiTest.Histos.failure_on"] = result.Quote(msg)
536 result[
"GaudiTest.Histos.found"] = result.Quote(pp.pformat(histos))
541 self, stdout=None, stderr=None, result=None, causes=None, preproc=None
544 Default validation acti*on: compare standard output and error to the
559 preproc = normalizeExamples
563 if lreference
and os.path.isfile(lreference):
565 lreference,
"standard output",
"Output Diff", preproc=preproc
568 causes += [
"missing reference file"]
572 if causes
and lreference:
575 newrefname =
".".join([lreference,
"new"])
576 while os.path.exists(newrefname):
578 newrefname =
".".join([lreference,
"~%d~" % cnt,
"new"])
579 newref = open(newrefname,
"w")
581 for l
in stdout.splitlines():
582 newref.write(l.rstrip() +
"\n")
584 result[
"New Output Reference File"] = os.path.relpath(
596 if os.path.isfile(lreference):
598 lreference,
"standard error",
"Error Diff", preproc=preproc
601 newcauses = [
"missing error reference file"]
603 if newcauses
and lreference:
605 newrefname =
".".join([lreference,
"new"])
606 while os.path.exists(newrefname):
608 newrefname =
".".join([lreference,
"~%d~" % cnt,
"new"])
609 newref = open(newrefname,
"w")
611 for l
in stderr.splitlines():
612 newref.write(l.rstrip() +
"\n")
614 result[
"New Error Reference File"] = os.path.relpath(
619 lreference,
"standard error",
"ExecTest.expected_stderr"
632 JSON validation action: compare json file to reference file
640 if not os.path.isfile(output_file):
641 causes.append(f
"output file {output_file} does not exist")
645 with open(output_file)
as f:
646 output = json.load(f)
647 except json.JSONDecodeError
as err:
648 causes.append(
"json parser error")
649 result[
"output_parse_error"] = f
"json parser error in {output_file}: {err}"
654 causes.append(
"reference file not set")
655 elif not os.path.isfile(lreference):
656 causes.append(
"reference file does not exist")
659 if causes
and lreference:
662 newrefname =
".".join([lreference,
"new"])
663 while os.path.exists(newrefname):
665 newrefname =
".".join([lreference,
"~%d~" % cnt,
"new"])
666 with open(newrefname,
"w")
as newref:
667 json.dump(output, newref, indent=4)
668 result[
"New JSON Output Reference File"] = os.path.relpath(
685 platformSplit =
lambda p: set(re.split(
r"[-+]", p))
687 reference = os.path.normpath(
688 os.path.join(self.
basedir, os.path.expandvars(reffile))
692 spec_ref = reference[:-3] +
GetPlatform(self)[0:3] + reference[-3:]
693 if os.path.isfile(spec_ref):
697 dirname, basename = os.path.split(reference)
700 head = basename +
"."
703 if "do0" in platform:
706 for f
in os.listdir(dirname):
707 if f.startswith(head):
708 req_plat = platformSplit(f[head_len:])
709 if platform.issuperset(req_plat):
710 candidates.append((len(req_plat), f))
715 reference = os.path.join(dirname, candidates[-1][1])
727 from GaudiKernel
import ROOT6WorkAroundEnabled
740 Function used to normalize the used path
742 newPath = os.path.normpath(os.path.expandvars(p))
743 if os.path.exists(newPath):
744 p = os.path.realpath(newPath)
750 Locates an executable in the executables path ($PATH) and returns the full
751 path to it. An application is looked for with or without the '.exe' suffix.
752 If the executable cannot be found, None is returned
754 if os.path.isabs(executable):
755 if not os.path.isfile(executable):
756 if executable.endswith(
".exe"):
757 if os.path.isfile(executable[:-4]):
758 return executable[:-4]
760 executable = os.path.split(executable)[1]
763 for d
in os.environ.get(
"PATH").split(os.pathsep):
764 fullpath = os.path.join(d, executable)
765 if os.path.isfile(fullpath):
767 elif executable.endswith(
".exe")
and os.path.isfile(fullpath[:-4]):
783 UNTESTED =
"UNTESTED"
793 def __init__(self, kind=None, id=None, outcome=PASS, annotations={}):
797 assert isinstance(key, six.string_types)
801 assert isinstance(key, six.string_types)
802 assert isinstance(value, six.string_types),
"{!r} is not a string".
format(value)
807 Convert text to html by escaping special chars and adding <pre> tags.
809 return "<pre>{}</pre>".
format(escape_for_html(text))
828 """Validate the output of the program.
829 'stdout' -- A string containing the data written to the standard output
831 'stderr' -- A string containing the data written to the standard error
833 'result' -- A 'Result' object. It may be used to annotate
834 the outcome according to the content of stderr.
835 returns -- A list of strings giving causes of failure."""
840 causes.append(self.
cause)
846 """Compare 's1' and 's2', ignoring line endings.
849 returns -- True if 's1' and 's2' are the same, ignoring
850 differences in line endings."""
854 to_ignore = re.compile(
855 r"Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*"
859 return not to_ignore.match(l)
861 return list(filter(keep_line, s1.splitlines())) == list(
862 filter(keep_line, s2.splitlines())
865 return s1.splitlines() == s2.splitlines()
870 """Base class for a callable that takes a file and returns a modified
885 if not isinstance(input, six.string_types):
889 lines = input.splitlines()
893 output =
"\n".join(output)
923 if line.find(s) >= 0:
938 if self.
start in line:
941 elif self.
end in line:
951 when = re.compile(when)
955 if isinstance(rhs, RegexpReplacer):
957 res._operations = self.
_operations + rhs._operations
959 res = FilePreprocessor.__add__(self, rhs)
964 if w
is None or w.search(line):
965 line = o.sub(r, line)
972 "[0-2]?[0-9]:[0-5][0-9]:[0-5][0-9] [0-9]{4}[-/][01][0-9][-/][0-3][0-9][ A-Z]*",
973 "00:00:00 1970-01-01",
976 normalizeEOL.__processLine__ =
lambda line: str(line).rstrip() +
"\n"
980 skipEmptyLines.__processLine__ =
lambda line: (line.strip()
and line)
or None
994 line = line[: (pos + self.
siglen)]
995 lst = line[(pos + self.
siglen) :].split()
997 line +=
" ".join(lst)
1003 Sort group of lines matching a regular expression
1007 self.
exp = exp
if hasattr(exp,
"match")
else re.compile(exp)
1010 match = self.
exp.match
1019 output.extend(group)
1026 normalizeExamples = maskPointers + normalizeDate
1029 (
"TIMER.TIMER",
r"\s+[+-]?[0-9]+[0-9.]*",
" 0"),
1030 (
"release all pending",
r"^.*/([^/]*:.*)",
r"\1"),
1031 (
"^#.*file",
r"file '.*[/\\]([^/\\]*)$",
r"file '\1"),
1033 "^JobOptionsSvc.*options successfully read in from",
1034 r"read in from .*[/\\]([^/\\]*)$",
1040 r"[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}(?!-0{12})-[0-9A-Fa-f]{12}",
1041 "00000000-0000-0000-0000-000000000000",
1045 "ServiceLocatorHelper::",
1046 "ServiceLocatorHelper::(create|locate)Service",
1047 "ServiceLocatorHelper::service",
1050 (
None,
r"e([-+])0([0-9][0-9])",
r"e\1\2"),
1052 (
None,
r"Service reference count check:",
r"Looping over all active services..."),
1056 r"^(.*(DEBUG|SUCCESS) List of ALL properties of .*#properties = )\d+",
1059 (
"ApplicationMgr",
r"(declareMultiSvcType|addMultiSvc): ",
""),
1060 (
r"Property 'Name': Value",
r"( = '[^']+':)'(.*)'",
r"\1\2"),
1061 (
"TimelineSvc",
"to file 'TimelineFile':",
"to file "),
1062 (
"DataObjectHandleBase",
r'DataObjectHandleBase\("([^"]*)"\)',
r"'\1'"),
1069 "JobOptionsSvc INFO # ",
1070 "JobOptionsSvc WARNING # ",
1073 "This machine has a speed",
1075 "ToolSvc.Sequenc... INFO",
1076 "DataListenerSvc INFO XML written to file:",
1079 "DEBUG No writable file catalog found which contains FID:",
1080 "DEBUG Service base class initialized successfully",
1082 "DEBUG Incident timing:",
1086 "INFO 'CnvServices':[",
1088 "DEBUG 'CnvServices':[",
1093 "ServiceLocatorHelper::service: found service JobOptionsSvc",
1095 "mismatching case for property name:",
1097 "Histograms saving not required.",
1099 "Properties are dumped into",
1101 "WARNING no ROOT output file name",
1102 "INFO Writing ROOT histograms to:",
1103 "INFO Completed update of ROOT histograms in:",
1106 r"^JobOptionsSvc INFO *$",
1109 r"(Always|SUCCESS)\s*(Root f|[^ ]* F)ile version:",
1110 r"File '.*.xml' does not exist",
1111 r"INFO Refer to dataset .* by its file ID:",
1112 r"INFO Referring to dataset .* by its file ID:",
1113 r"INFO Disconnect from dataset",
1114 r"INFO Disconnected from dataset",
1115 r"INFO Disconnected data IO:",
1116 r"IncidentSvc\s*(DEBUG (Adding|Removing)|VERBOSE Calling)",
1118 r".*StatusCodeSvc.*",
1119 r".*StatusCodeCheck.*",
1120 r"Num\s*\|\s*Function\s*\|\s*Source Library",
1123 r"ERROR Failed to modify file: .* Errno=2 No such file or directory",
1125 r"^ +[0-9]+ \|.*ROOT",
1126 r"^ +[0-9]+ \|.*\|.*Dict",
1128 r"EventLoopMgr.*---> Loop Finished",
1129 r"HiveSlimEventLo.*---> Loop Finished",
1134 r"SUCCESS\s*Booked \d+ Histogram\(s\)",
1138 r"Property(.*)'Audit(Algorithm|Tool|Service)s':",
1139 r"Property(.*)'Audit(Begin|End)Run':",
1141 r"Property(.*)'AuditRe(start|initialize)':",
1142 r"Property(.*)'Blocking':",
1144 r"Property(.*)'ErrorCount(er)?':",
1146 r"Property(.*)'Sequential':",
1148 r"Property(.*)'FilterCircularDependencies':",
1150 r"Property(.*)'IsClonable':",
1152 r"Property update for OutputLevel : new value =",
1153 r"EventLoopMgr\s*DEBUG Creating OutputStream",
1162 r"Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*",
1166 normalizeExamples = (
1179 def __init__(self, reffile, cause, result_key, preproc=normalizeExamples):
1187 if os.path.isfile(self.
reffile):
1188 orig = open(self.
reffile).readlines()
1191 result[self.
result_key +
".preproc.orig"] = result.Quote(
1192 "\n".join(
map(str.strip, orig))
1196 new = stdout.splitlines()
1201 difflib.unified_diff(
1202 orig, new, n=1, fromfile=
"Reference file", tofile=
"Actual output"
1206 result[self.
result_key] = result.Quote(
"".join(filterdiffs))
1207 result[self.
result_key +
".preproc.new"] = result.Quote(
1208 "\n".join(
map(str.strip, new))
1210 causes.append(self.
cause)
1216 Scan stdout to find ROOT TTree summaries and digest them.
1218 stars = re.compile(
r"^\*+$")
1219 outlines = stdout.splitlines()
1220 nlines = len(outlines)
1226 while i < nlines
and not stars.match(outlines[i]):
1231 trees[tree[
"Name"]] = tree
1238 Check that all the keys in reference are in to_check too, with the same value.
1239 If the value is a dict, the function is called recursively. to_check can
1240 contain more keys than reference, that will not be tested.
1241 The function returns at the first difference found.
1246 ignore_re = re.compile(ignore)
1247 keys = [key
for key
in reference
if not ignore_re.match(key)]
1249 keys = reference.keys()
1253 if (
type(reference[k])
is dict)
and (
type(to_check[k])
is dict):
1256 failed = fail_keys =
cmpTreesDicts(reference[k], to_check[k], ignore)
1259 failed = to_check[k] != reference[k]
1264 fail_keys.insert(0, k)
1275 if c
is None or r
is None:
1277 return (fail_path, r, c)
1281 h_count_re = re.compile(
r"^(.*)SUCCESS\s+Booked (\d+) Histogram\(s\) :\s+([\s\w=-]*)")
1286 Parse the TTree summary table in lines, starting from pos.
1287 Returns a tuple with the dictionary with the digested informations and the
1288 position of the first line after the summary.
1295 return [f.strip()
for f
in l.strip(
"*\n").split(
":", 2)]
1299 cols = splitcols(ll[0])
1300 r[
"Name"], r[
"Title"] = cols[1:]
1302 cols = splitcols(ll[1])
1303 r[
"Entries"] = int(cols[1])
1305 sizes = cols[2].split()
1306 r[
"Total size"] = int(sizes[2])
1307 if sizes[-1] ==
"memory":
1310 r[
"File size"] = int(sizes[-1])
1312 cols = splitcols(ll[2])
1313 sizes = cols[2].split()
1314 if cols[0] ==
"Baskets":
1315 r[
"Baskets"] = int(cols[1])
1316 r[
"Basket size"] = int(sizes[2])
1317 r[
"Compression"] = float(sizes[-1])
1320 if i < (count - 3)
and lines[i].startswith(
"*Tree"):
1321 result = parseblock(lines[i : i + 3])
1322 result[
"Branches"] = {}
1324 while i < (count - 3)
and lines[i].startswith(
"*Br"):
1325 if i < (count - 2)
and lines[i].startswith(
"*Branch "):
1329 branch = parseblock(lines[i : i + 3])
1330 result[
"Branches"][branch[
"Name"]] = branch
1338 Extract the histograms infos from the lines starting at pos.
1339 Returns the position of the first line after the summary block.
1342 h_table_head = re.compile(
1343 r'SUCCESS\s+(1D|2D|3D|1D profile|2D profile) histograms in directory\s+"(\w*)"'
1345 h_short_summ = re.compile(
r"ID=([^\"]+)\s+\"([^\"]+)\"\s+(.*)")
1350 m = h_count_re.search(lines[pos])
1351 name = m.group(1).strip()
1352 total = int(m.group(2))
1354 for k, v
in [x.split(
"=")
for x
in m.group(3).split()]:
1357 header[
"Total"] = total
1361 m = h_table_head.search(lines[pos])
1364 t = t.replace(
" profile",
"Prof")
1371 if l.startswith(
" | ID"):
1373 titles = [x.strip()
for x
in l.split(
"|")][1:]
1375 while pos < nlines
and lines[pos].startswith(
" |"):
1377 values = [x.strip()
for x
in l.split(
"|")][1:]
1379 for i
in range(len(titles)):
1380 hcont[titles[i]] = values[i]
1381 cont[hcont[
"ID"]] = hcont
1383 elif l.startswith(
" ID="):
1384 while pos < nlines
and lines[pos].startswith(
" ID="):
1386 x.strip()
for x
in h_short_summ.search(lines[pos]).groups()
1388 cont[values[0]] = values
1391 raise RuntimeError(
"Cannot understand line %d: '%s'" % (pos, l))
1395 summ[d][
"header"] = header
1400 summ[name] = {
"header": header}
1406 Scan stdout to find ROOT TTree summaries and digest them.
1408 outlines = stdout.splitlines()
1409 nlines = len(outlines) - 1
1417 match = h_count_re.search(outlines[pos])
1418 while pos < nlines
and not match:
1420 match = h_count_re.search(outlines[pos])
1423 summaries.update(summ)
1429 Return the platform Id defined in CMTCONFIG or SCRAM_ARCH.
1433 if "BINARY_TAG" in os.environ:
1434 arch = os.environ[
"BINARY_TAG"]
1435 elif "CMTCONFIG" in os.environ:
1436 arch = os.environ[
"CMTCONFIG"]
1437 elif "SCRAM_ARCH" in os.environ:
1438 arch = os.environ[
"SCRAM_ARCH"]
1439 elif os.environ.get(
"ENV_CMAKE_BUILD_TYPE",
"")
in (
1445 elif os.environ.get(
"ENV_CMAKE_BUILD_TYPE",
"")
in (
1457 Return True if the current platform is Windows.
1459 This function was needed because of the change in the CMTCONFIG format,
1460 from win32_vc71_dbg to i686-winxp-vc9-dbg.
1463 return "winxp" in platform
or platform.startswith(
"win")
1468 """Validate JSON output.
1469 returns -- A list of strings giving causes of failure."""
1473 with open(ref)
as f:
1474 expected = json.load(f)
1475 except json.JSONDecodeError
as err:
1476 causes.append(
"json parser error")
1477 result[
"reference_parse_error"] = f
"json parser error in {ref}: {err}"
1482 causes.append(
"json content")
1483 result[
"json_diff"] =
"detailed diff was turned off"
1489 t.assertEqual(expected, out)
1490 except AssertionError
as err:
1491 causes.append(
"json content")
1492 result[
"json_diff"] = str(err).splitlines()[0]