24 from subprocess
import PIPE, STDOUT, Popen
25 from unittest
import TestCase
28 from html
import escape
as escape_for_html
30 from cgi
import escape
as escape_for_html
34 if sys.version_info < (3, 5):
37 from codecs
import backslashreplace_errors, register_error
40 if isinstance(exc, UnicodeDecodeError):
41 code =
hex(ord(exc.object[exc.start]))
42 return (
"\\" + code[1:], exc.start + 1)
44 return backslashreplace_errors(exc)
46 register_error(
"backslashreplace", _new_backslashreplace_errors)
48 del backslashreplace_errors
49 del _new_backslashreplace_errors
56 Take a string with invalid ASCII/UTF characters and quote them so that the
57 string can be used in an XML text.
59 >>> sanitize_for_xml('this is \x1b')
60 'this is [NON-XML-CHAR-0x1B]'
62 bad_chars = re.compile(
"[\x00-\x08\x0b\x0c\x0e-\x1F\uD800-\uDFFF\uFFFE\uFFFF]")
66 return "".join(
"[NON-XML-CHAR-0x%2X]" % ord(c)
for c
in match.group())
68 return bad_chars.sub(quote, data)
72 """helper to debug GAUDI-1084, dump the list of processes"""
73 from getpass
import getuser
75 if "WORKSPACE" in os.environ:
76 p = Popen([
"ps",
"-fH",
"-U", getuser()], stdout=PIPE)
77 with open(os.path.join(os.environ[
"WORKSPACE"], name),
"wb")
as f:
78 f.write(p.communicate()[0])
83 Send a signal to a process and all its child processes (starting from the
86 log = logging.getLogger(
"kill_tree")
87 ps_cmd = [
"ps",
"--no-headers",
"-o",
"pid",
"--ppid", str(ppid)]
88 get_children = Popen(ps_cmd, stdout=PIPE, stderr=PIPE)
89 children =
map(int, get_children.communicate()[0].split())
90 for child
in children:
93 log.debug(
"killing process %d", ppid)
95 except OSError
as err:
98 log.debug(
"no such process %d", ppid)
106 _common_tmpdir =
None
135 logging.debug(
"running test %s", self.
name)
146 "TIMEOUT_DETAIL":
None,
152 r"from\s+Gaudi.Configuration\s+import\s+\*|"
153 "from\s+Configurables\s+import",
156 suffix, lang =
".py",
"python"
158 suffix, lang =
".opts",
"c++"
159 self.
result[
"Options"] =
'<code lang="{}"><pre>{}</pre></code>'.
format(
160 lang, escape_for_html(self.
options)
162 optionFile = tempfile.NamedTemporaryFile(suffix=suffix)
163 optionFile.file.write(self.
options.encode(
"utf-8"))
170 or platform.platform()
177 if re.search(prex, platform_id)
188 workdir = tempfile.mkdtemp()
199 prog_ext = os.path.splitext(prog)[1]
200 if prog_ext
not in [
".exe",
".py",
".bat"]:
204 prog =
which(prog)
or prog
206 args = list(
map(RationalizePath, self.
args))
208 if prog_ext ==
".py":
219 logging.debug(
"executing %r in %s", params, workdir)
221 params, stdout=PIPE, stderr=PIPE, env=self.
environment
223 logging.debug(
"(pid: %d)", self.
proc.pid)
224 out, err = self.
proc.communicate()
225 self.
out = out.decode(
"utf-8", errors=
"backslashreplace")
226 self.
err = err.decode(
"utf-8", errors=
"backslashreplace")
228 thread = threading.Thread(target=target)
233 if thread.is_alive():
234 logging.debug(
"time out in test %s (pid %d)", self.
name, self.
proc.pid)
241 "--eval-command=thread apply all backtrace",
243 gdb = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
245 "utf-8", errors=
"backslashreplace"
250 if thread.is_alive():
252 self.
causes.append(
"timeout")
257 f
"completed test {self.name} with returncode = {self.returnedCode}"
259 logging.debug(
"validating test...")
264 logging.debug(f
"skipped test {self.name}")
269 shutil.rmtree(workdir,
True)
273 if self.
status !=
"skipped":
275 if self.
signal is not None:
277 self.
causes.append(
"exit code")
281 self.
causes.append(
"exit code")
284 self.
causes.append(
"exit code")
294 logging.debug(
"%s: %s", self.
name, self.
status)
296 "Exit Code":
"returnedCode",
299 "Runtime Environment":
"environment",
302 "Program Name":
"program",
304 "Validator":
"validator",
305 "Output Reference File":
"reference",
306 "Error Reference File":
"error_reference",
309 "Unsupported Platforms":
"unsupported_platforms",
310 "Stack Trace":
"stack_trace",
313 (key, getattr(self, attr))
314 for key, attr
in field_mapping.items()
315 if getattr(self, attr)
324 resultDict.extend(self.
result.annotations.items())
326 resultDict = dict(resultDict)
329 if "Validator" in resultDict:
330 resultDict[
"Validator"] =
'<code lang="{}"><pre>{}</pre></code>'.
format(
331 "python", escape_for_html(resultDict[
"Validator"])
342 elif stderr.strip() != self.
stderr.strip():
343 self.
causes.append(
"standard error")
344 return result, self.
causes
357 Given a block of text, tries to find it in the output. The block had to be identified by a signature line. By default, the first line is used as signature, or the line pointed to by signature_offset. If signature_offset points outside the block, a signature line can be passed as signature argument. Note: if 'signature' is None (the default), a negative signature_offset is interpreted as index in a list (e.g. -1 means the last line), otherwise the it is interpreted as the number of lines before the first one of the block the signature must appear. The parameter 'id' allow to distinguish between different calls to this function in the same validation code.
360 if reference
is None:
369 reflines = list(filter(
None,
map(
lambda s: s.rstrip(), reference.splitlines())))
371 raise RuntimeError(
"Empty (or null) reference")
373 outlines = list(filter(
None,
map(
lambda s: s.rstrip(), stdout.splitlines())))
375 res_field =
"GaudiTest.RefBlock"
377 res_field +=
"_%s" % id
379 if signature
is None:
380 if signature_offset < 0:
381 signature_offset = len(reference) + signature_offset
382 signature = reflines[signature_offset]
385 pos = outlines.index(signature)
387 pos - signature_offset : pos + len(reflines) - signature_offset
389 if reflines != outlines:
390 msg =
"standard output"
393 if not msg
in causes:
395 result[res_field +
".observed"] = result.Quote(
"\n".join(outlines))
397 causes.append(
"missing signature")
398 result[res_field +
".signature"] = result.Quote(signature)
399 if len(reflines) > 1
or signature != reflines[0]:
400 result[res_field +
".expected"] = result.Quote(
"\n".join(reflines))
404 self, expected={"ERROR": 0,
"FATAL": 0}, stdout=
None, result=
None, causes=
None
407 Count the number of messages with required severity (by default ERROR and FATAL)
408 and check if their numbers match the expected ones (0 by default).
409 The dictionary "expected" can be used to tune the number of errors and fatals
410 allowed, or to limit the number of expected warnings etc.
425 outlines = stdout.splitlines()
426 from math
import log10
428 fmt =
"%%%dd - %%s" % (int(log10(len(outlines) + 1)))
434 if len(words) >= 2
and words[1]
in errors:
435 errors[words[1]].append(fmt % (linecount, l.rstrip()))
438 if len(errors[e]) != expected[e]:
439 causes.append(
"%s(%d)" % (e, len(errors[e])))
440 result[
"GaudiTest.lines.%s" % e] = result.Quote(
"\n".join(errors[e]))
441 result[
"GaudiTest.lines.%s.expected#" % e] = result.Quote(
453 ignore=r"Basket|.*size|Compression",
456 Compare the TTree summaries in stdout with the ones in trees_dict or in
457 the reference file. By default ignore the size, compression and basket
459 The presence of TTree summaries when none is expected is not a failure.
467 if trees_dict
is None:
470 if lreference
and os.path.isfile(lreference):
475 from pprint
import PrettyPrinter
479 result[
"GaudiTest.TTrees.expected"] = result.Quote(pp.pformat(trees_dict))
481 result[
"GaudiTest.TTrees.ignore"] = result.Quote(ignore)
486 causes.append(
"trees summaries")
488 result[
"GaudiTest.TTrees.failure_on"] = result.Quote(msg)
489 result[
"GaudiTest.TTrees.found"] = result.Quote(pp.pformat(trees))
494 self, stdout=None, result=None, causes=None, dict=None, ignore=None
497 Compare the TTree summaries in stdout with the ones in trees_dict or in
498 the reference file. By default ignore the size, compression and basket
500 The presence of TTree summaries when none is expected is not a failure.
512 if lreference
and os.path.isfile(lreference):
517 from pprint
import PrettyPrinter
521 result[
"GaudiTest.Histos.expected"] = result.Quote(pp.pformat(dict))
523 result[
"GaudiTest.Histos.ignore"] = result.Quote(ignore)
528 causes.append(
"histos summaries")
530 result[
"GaudiTest.Histos.failure_on"] = result.Quote(msg)
531 result[
"GaudiTest.Histos.found"] = result.Quote(pp.pformat(histos))
536 self, stdout=None, stderr=None, result=None, causes=None, preproc=None
539 Default validation acti*on: compare standard output and error to the
554 preproc = normalizeExamples
558 if lreference
and os.path.isfile(lreference):
560 lreference,
"standard output",
"Output Diff", preproc=preproc
563 causes += [
"missing reference file"]
567 if causes
and lreference:
570 newrefname =
".".join([lreference,
"new"])
571 while os.path.exists(newrefname):
573 newrefname =
".".join([lreference,
"~%d~" % cnt,
"new"])
574 newref = open(newrefname,
"w")
576 for l
in stdout.splitlines():
577 newref.write(l.rstrip() +
"\n")
579 result[
"New Output Reference File"] = os.path.relpath(
591 if os.path.isfile(lreference):
593 lreference,
"standard error",
"Error Diff", preproc=preproc
596 newcauses = [
"missing error reference file"]
598 if newcauses
and lreference:
600 newrefname =
".".join([lreference,
"new"])
601 while os.path.exists(newrefname):
603 newrefname =
".".join([lreference,
"~%d~" % cnt,
"new"])
604 newref = open(newrefname,
"w")
606 for l
in stderr.splitlines():
607 newref.write(l.rstrip() +
"\n")
609 result[
"New Error Reference File"] = os.path.relpath(
614 lreference,
"standard error",
"ExecTest.expected_stderr"
627 JSON validation action: compare json file to reference file
635 if not os.path.isfile(output_file):
636 causes.append(f
"output file {output_file} does not exist")
640 with open(output_file)
as f:
641 output = json.load(f)
642 except json.JSONDecodeError
as err:
643 causes.append(
"json parser error")
644 result[
"output_parse_error"] = f
"json parser error in {output_file}: {err}"
649 causes.append(
"reference file not set")
650 elif not os.path.isfile(lreference):
651 causes.append(
"reference file does not exist")
654 if causes
and lreference:
657 newrefname =
".".join([lreference,
"new"])
658 while os.path.exists(newrefname):
660 newrefname =
".".join([lreference,
"~%d~" % cnt,
"new"])
661 with open(newrefname,
"w")
as newref:
662 json.dump(output, newref, indent=4)
663 result[
"New JSON Output Reference File"] = os.path.relpath(
678 def platformSplit(p):
681 delim = re.compile(
"-" in p
and r"[-+]" or r"_")
682 return set(delim.split(p))
684 reference = os.path.normpath(
685 os.path.join(self.
basedir, os.path.expandvars(reffile))
689 spec_ref = reference[:-3] +
GetPlatform(self)[0:3] + reference[-3:]
690 if os.path.isfile(spec_ref):
694 dirname, basename = os.path.split(reference)
697 head = basename +
"."
700 if "do0" in platform:
703 for f
in os.listdir(dirname):
704 if f.startswith(head):
705 req_plat = platformSplit(f[head_len:])
706 if platform.issuperset(req_plat):
707 candidates.append((len(req_plat), f))
712 reference = os.path.join(dirname, candidates[-1][1])
724 from GaudiKernel
import ROOT6WorkAroundEnabled
737 Function used to normalize the used path
739 newPath = os.path.normpath(os.path.expandvars(p))
740 if os.path.exists(newPath):
741 p = os.path.realpath(newPath)
747 Locates an executable in the executables path ($PATH) and returns the full
748 path to it. An application is looked for with or without the '.exe' suffix.
749 If the executable cannot be found, None is returned
751 if os.path.isabs(executable):
752 if not os.path.isfile(executable):
753 if executable.endswith(
".exe"):
754 if os.path.isfile(executable[:-4]):
755 return executable[:-4]
757 executable = os.path.split(executable)[1]
760 for d
in os.environ.get(
"PATH").split(os.pathsep):
761 fullpath = os.path.join(d, executable)
762 if os.path.isfile(fullpath):
764 elif executable.endswith(
".exe")
and os.path.isfile(fullpath[:-4]):
780 UNTESTED =
"UNTESTED"
790 def __init__(self, kind=None, id=None, outcome=PASS, annotations={}):
794 assert isinstance(key, six.string_types)
798 assert isinstance(key, six.string_types)
799 assert isinstance(value, six.string_types),
"{!r} is not a string".
format(value)
804 Convert text to html by escaping special chars and adding <pre> tags.
806 return "<pre>{}</pre>".
format(escape_for_html(text))
825 """Validate the output of the program.
826 'stdout' -- A string containing the data written to the standard output
828 'stderr' -- A string containing the data written to the standard error
830 'result' -- A 'Result' object. It may be used to annotate
831 the outcome according to the content of stderr.
832 returns -- A list of strings giving causes of failure."""
837 causes.append(self.
cause)
843 """Compare 's1' and 's2', ignoring line endings.
846 returns -- True if 's1' and 's2' are the same, ignoring
847 differences in line endings."""
851 to_ignore = re.compile(
852 r"Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*"
856 return not to_ignore.match(l)
858 return list(filter(keep_line, s1.splitlines())) == list(
859 filter(keep_line, s2.splitlines())
862 return s1.splitlines() == s2.splitlines()
867 """Base class for a callable that takes a file and returns a modified
882 if not isinstance(input, six.string_types):
886 lines = input.splitlines()
890 output =
"\n".join(output)
920 if line.find(s) >= 0:
935 if self.
start in line:
938 elif self.
end in line:
948 when = re.compile(when)
952 if isinstance(rhs, RegexpReplacer):
954 res._operations = self.
_operations + rhs._operations
956 res = FilePreprocessor.__add__(self, rhs)
961 if w
is None or w.search(line):
962 line = o.sub(r, line)
969 "[0-2]?[0-9]:[0-5][0-9]:[0-5][0-9] [0-9]{4}[-/][01][0-9][-/][0-3][0-9][ A-Z]*",
970 "00:00:00 1970-01-01",
973 normalizeEOL.__processLine__ =
lambda line: str(line).rstrip() +
"\n"
977 skipEmptyLines.__processLine__ =
lambda line: (line.strip()
and line)
or None
991 line = line[: (pos + self.
siglen)]
992 lst = line[(pos + self.
siglen) :].split()
994 line +=
" ".join(lst)
1000 Sort group of lines matching a regular expression
1004 self.
exp = exp
if hasattr(exp,
"match")
else re.compile(exp)
1007 match = self.
exp.match
1016 output.extend(group)
1023 normalizeExamples = maskPointers + normalizeDate
1026 (
"TIMER.TIMER",
r"\s+[+-]?[0-9]+[0-9.]*",
" 0"),
1027 (
"release all pending",
r"^.*/([^/]*:.*)",
r"\1"),
1028 (
"^#.*file",
r"file '.*[/\\]([^/\\]*)$",
r"file '\1"),
1030 "^JobOptionsSvc.*options successfully read in from",
1031 r"read in from .*[/\\]([^/\\]*)$",
1037 r"[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}(?!-0{12})-[0-9A-Fa-f]{12}",
1038 "00000000-0000-0000-0000-000000000000",
1042 "ServiceLocatorHelper::",
1043 "ServiceLocatorHelper::(create|locate)Service",
1044 "ServiceLocatorHelper::service",
1047 (
None,
r"e([-+])0([0-9][0-9])",
r"e\1\2"),
1049 (
None,
r"Service reference count check:",
r"Looping over all active services..."),
1053 r"^(.*(DEBUG|SUCCESS) List of ALL properties of .*#properties = )\d+",
1056 (
"ApplicationMgr",
r"(declareMultiSvcType|addMultiSvc): ",
""),
1057 (
r"Property \['Name': Value\]",
r"( = '[^']+':)'(.*)'",
r"\1\2"),
1058 (
"TimelineSvc",
"to file 'TimelineFile':",
"to file "),
1059 (
"DataObjectHandleBase",
r'DataObjectHandleBase\("([^"]*)"\)',
r"'\1'"),
1066 "JobOptionsSvc INFO # ",
1067 "JobOptionsSvc WARNING # ",
1070 "This machine has a speed",
1073 "ToolSvc.Sequenc... INFO",
1074 "DataListenerSvc INFO XML written to file:",
1077 "DEBUG No writable file catalog found which contains FID:",
1078 "DEBUG Service base class initialized successfully",
1080 "DEBUG Incident timing:",
1084 "INFO 'CnvServices':[",
1086 "DEBUG 'CnvServices':[",
1091 "ServiceLocatorHelper::service: found service JobOptionsSvc",
1093 "mismatching case for property name:",
1095 "Histograms saving not required.",
1097 "Properties are dumped into",
1100 r"^JobOptionsSvc INFO *$",
1103 r"(Always|SUCCESS)\s*(Root f|[^ ]* F)ile version:",
1104 r"File '.*.xml' does not exist",
1105 r"INFO Refer to dataset .* by its file ID:",
1106 r"INFO Referring to dataset .* by its file ID:",
1107 r"INFO Disconnect from dataset",
1108 r"INFO Disconnected from dataset",
1109 r"INFO Disconnected data IO:",
1110 r"IncidentSvc\s*(DEBUG (Adding|Removing)|VERBOSE Calling)",
1112 r".*StatusCodeSvc.*",
1113 r".*StatusCodeCheck.*",
1114 r"Num\s*\|\s*Function\s*\|\s*Source Library",
1117 r"ERROR Failed to modify file: .* Errno=2 No such file or directory",
1119 r"^ +[0-9]+ \|.*ROOT",
1120 r"^ +[0-9]+ \|.*\|.*Dict",
1122 r"EventLoopMgr.*---> Loop Finished",
1123 r"HiveSlimEventLo.*---> Loop Finished",
1128 r"SUCCESS\s*Booked \d+ Histogram\(s\)",
1132 r"Property(.*)'Audit(Algorithm|Tool|Service)s':",
1133 r"Property(.*)'Audit(Begin|End)Run':",
1135 r"Property(.*)'AuditRe(start|initialize)':",
1136 r"Property(.*)'Blocking':",
1138 r"Property(.*)'ErrorCount(er)?':",
1140 r"Property(.*)'Sequential':",
1142 r"Property(.*)'FilterCircularDependencies':",
1144 r"Property(.*)'IsClonable':",
1146 r"Property update for OutputLevel : new value =",
1147 r"EventLoopMgr\s*DEBUG Creating OutputStream",
1156 r"Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*",
1160 normalizeExamples = (
1173 def __init__(self, reffile, cause, result_key, preproc=normalizeExamples):
1181 if os.path.isfile(self.
reffile):
1182 orig = open(self.
reffile).readlines()
1185 result[self.
result_key +
".preproc.orig"] = result.Quote(
1186 "\n".join(
map(str.strip, orig))
1190 new = stdout.splitlines()
1194 diffs = difflib.ndiff(orig, new, charjunk=difflib.IS_CHARACTER_JUNK)
1196 map(
lambda x: x.strip(), filter(
lambda x: x[0] !=
" ", diffs))
1199 result[self.
result_key] = result.Quote(
"\n".join(filterdiffs))
1204 +) standard output of the test"""
1206 result[self.
result_key +
".preproc.new"] = result.Quote(
1207 "\n".join(
map(str.strip, new))
1209 causes.append(self.
cause)
1215 Scan stdout to find ROOT TTree summaries and digest them.
1217 stars = re.compile(
r"^\*+$")
1218 outlines = stdout.splitlines()
1219 nlines = len(outlines)
1225 while i < nlines
and not stars.match(outlines[i]):
1230 trees[tree[
"Name"]] = tree
1237 Check that all the keys in reference are in to_check too, with the same value.
1238 If the value is a dict, the function is called recursively. to_check can
1239 contain more keys than reference, that will not be tested.
1240 The function returns at the first difference found.
1245 ignore_re = re.compile(ignore)
1246 keys = [key
for key
in reference
if not ignore_re.match(key)]
1248 keys = reference.keys()
1252 if (
type(reference[k])
is dict)
and (
type(to_check[k])
is dict):
1255 failed = fail_keys =
cmpTreesDicts(reference[k], to_check[k], ignore)
1258 failed = to_check[k] != reference[k]
1263 fail_keys.insert(0, k)
1274 if c
is None or r
is None:
1276 return (fail_path, r, c)
1280 h_count_re = re.compile(
r"^(.*)SUCCESS\s+Booked (\d+) Histogram\(s\) :\s+([\s\w=-]*)")
1285 Parse the TTree summary table in lines, starting from pos.
1286 Returns a tuple with the dictionary with the digested informations and the
1287 position of the first line after the summary.
1294 return [f.strip()
for f
in l.strip(
"*\n").split(
":", 2)]
1298 cols = splitcols(ll[0])
1299 r[
"Name"], r[
"Title"] = cols[1:]
1301 cols = splitcols(ll[1])
1302 r[
"Entries"] = int(cols[1])
1304 sizes = cols[2].split()
1305 r[
"Total size"] = int(sizes[2])
1306 if sizes[-1] ==
"memory":
1309 r[
"File size"] = int(sizes[-1])
1311 cols = splitcols(ll[2])
1312 sizes = cols[2].split()
1313 if cols[0] ==
"Baskets":
1314 r[
"Baskets"] = int(cols[1])
1315 r[
"Basket size"] = int(sizes[2])
1316 r[
"Compression"] = float(sizes[-1])
1319 if i < (count - 3)
and lines[i].startswith(
"*Tree"):
1320 result = parseblock(lines[i : i + 3])
1321 result[
"Branches"] = {}
1323 while i < (count - 3)
and lines[i].startswith(
"*Br"):
1324 if i < (count - 2)
and lines[i].startswith(
"*Branch "):
1328 branch = parseblock(lines[i : i + 3])
1329 result[
"Branches"][branch[
"Name"]] = branch
1337 Extract the histograms infos from the lines starting at pos.
1338 Returns the position of the first line after the summary block.
1341 h_table_head = re.compile(
1342 r'SUCCESS\s+(1D|2D|3D|1D profile|2D profile) histograms in directory\s+"(\w*)"'
1344 h_short_summ = re.compile(
r"ID=([^\"]+)\s+\"([^\"]+)\"\s+(.*)")
1349 m = h_count_re.search(lines[pos])
1350 name = m.group(1).strip()
1351 total = int(m.group(2))
1353 for k, v
in [x.split(
"=")
for x
in m.group(3).split()]:
1356 header[
"Total"] = total
1360 m = h_table_head.search(lines[pos])
1363 t = t.replace(
" profile",
"Prof")
1370 if l.startswith(
" | ID"):
1372 titles = [x.strip()
for x
in l.split(
"|")][1:]
1374 while pos < nlines
and lines[pos].startswith(
" |"):
1376 values = [x.strip()
for x
in l.split(
"|")][1:]
1378 for i
in range(len(titles)):
1379 hcont[titles[i]] = values[i]
1380 cont[hcont[
"ID"]] = hcont
1382 elif l.startswith(
" ID="):
1383 while pos < nlines
and lines[pos].startswith(
" ID="):
1385 x.strip()
for x
in h_short_summ.search(lines[pos]).groups()
1387 cont[values[0]] = values
1390 raise RuntimeError(
"Cannot understand line %d: '%s'" % (pos, l))
1394 summ[d][
"header"] = header
1399 summ[name] = {
"header": header}
1405 Scan stdout to find ROOT TTree summaries and digest them.
1407 outlines = stdout.splitlines()
1408 nlines = len(outlines) - 1
1416 match = h_count_re.search(outlines[pos])
1417 while pos < nlines
and not match:
1419 match = h_count_re.search(outlines[pos])
1422 summaries.update(summ)
1428 Return the platform Id defined in CMTCONFIG or SCRAM_ARCH.
1432 if "BINARY_TAG" in os.environ:
1433 arch = os.environ[
"BINARY_TAG"]
1434 elif "CMTCONFIG" in os.environ:
1435 arch = os.environ[
"CMTCONFIG"]
1436 elif "SCRAM_ARCH" in os.environ:
1437 arch = os.environ[
"SCRAM_ARCH"]
1438 elif os.environ.get(
"ENV_CMAKE_BUILD_TYPE",
"")
in (
1444 elif os.environ.get(
"ENV_CMAKE_BUILD_TYPE",
"")
in (
1456 Return True if the current platform is Windows.
1458 This function was needed because of the change in the CMTCONFIG format,
1459 from win32_vc71_dbg to i686-winxp-vc9-dbg.
1462 return "winxp" in platform
or platform.startswith(
"win")
1467 """Validate JSON output.
1468 returns -- A list of strings giving causes of failure."""
1472 with open(ref)
as f:
1473 expected = json.load(f)
1474 except json.JSONDecodeError
as err:
1475 causes.append(
"json parser error")
1476 result[
"reference_parse_error"] = f
"json parser error in {ref}: {err}"
1481 causes.append(
"json content")
1482 result[
"json_diff"] =
"detailed diff was turned off"
1488 t.assertEqual(expected, out)
1489 except AssertionError
as err:
1490 causes.append(
"json content")
1491 result[
"json_diff"] = str(err).splitlines()[0]