14 from subprocess
import Popen, PIPE, STDOUT
19 Take a string with invalid ASCII/UTF characters and quote them so that the 20 string can be used in an XML text. 22 >>> sanitize_for_xml('this is \x1b') 23 'this is [NON-XML-CHAR-0x1B]' 25 bad_chars = re.compile(
26 u'[\x00-\x08\x0b\x0c\x0e-\x1F\uD800-\uDFFF\uFFFE\uFFFF]')
30 return ''.join(
'[NON-XML-CHAR-0x%2X]' % ord(c)
for c
in match.group())
32 return bad_chars.sub(quote, data)
36 '''helper to debug GAUDI-1084, dump the list of processes''' 37 from getpass
import getuser
38 if 'WORKSPACE' in os.environ:
39 p = Popen([
'ps',
'-fH',
'-U', getuser()], stdout=PIPE)
40 with open(os.path.join(os.environ[
'WORKSPACE'], name),
'w')
as f:
41 f.write(p.communicate()[0])
46 Send a signal to a process and all its child processes (starting from the 49 log = logging.getLogger(
'kill_tree')
50 ps_cmd = [
'ps',
'--no-headers',
'-o',
'pid',
'--ppid', str(ppid)]
51 get_children = Popen(ps_cmd, stdout=PIPE, stderr=PIPE)
52 children =
map(int, get_children.communicate()[0].split())
53 for child
in children:
56 log.debug(
'killing process %d', ppid)
58 except OSError
as err:
61 log.debug(
'no such process %d', ppid)
98 logging.debug(
'running test %s', self.
name)
102 r'from\s+Gaudi.Configuration\s+import\s+\*|' 103 'from\s+Configurables\s+import', self.
options):
104 optionFile = tempfile.NamedTemporaryFile(suffix=
'.py')
106 optionFile = tempfile.NamedTemporaryFile(suffix=
'.opts')
107 optionFile.file.write(self.
options)
118 platform_id = (os.environ.get(
'BINARY_TAG')
119 or os.environ.get(
'CMTCONFIG')
or platform.platform())
123 if re.search(prex, platform_id)
133 workdir = tempfile.mkdtemp()
139 elif "GAUDIEXE" in os.environ:
140 prog = os.environ[
"GAUDIEXE"]
144 dummy, prog_ext = os.path.splitext(prog)
145 if prog_ext
not in [
".exe",
".py",
".bat"]:
149 prog =
which(prog)
or prog
151 args =
map(RationalizePath, self.
args)
153 if prog_ext ==
".py":
166 'TIMEOUT_DETAIL':
None 168 self.
result = validatorRes
176 logging.debug(
'executing %r in %s', params, workdir)
178 params, stdout=PIPE, stderr=PIPE, env=self.
environment)
179 logging.debug(
'(pid: %d)', self.proc.pid)
180 self.
out, self.
err = self.proc.communicate()
182 thread = threading.Thread(target=target)
187 if thread.is_alive():
188 logging.debug(
'time out in test %s (pid %d)', self.
name,
193 str(self.proc.pid),
'--batch',
194 '--eval-command=thread apply all backtrace' 196 gdb = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
201 if thread.is_alive():
203 self.causes.append(
'timeout')
205 logging.debug(
'completed test %s', self.
name)
208 logging.debug(
'returnedCode = %s', self.proc.returncode)
211 logging.debug(
'validating test...')
213 stdout=self.
out, stderr=self.
err, result=validatorRes)
217 shutil.rmtree(workdir,
True)
222 if self.
signal is not None:
224 self.causes.append(
'exit code')
228 self.causes.append(
'exit code')
231 self.causes.append(
"exit code")
241 logging.debug(
'%s: %s', self.
name, self.
status)
243 'Exit Code':
'returnedCode',
246 'Environment':
'environment',
249 'Program Name':
'program',
251 'Validator':
'validator',
252 'Output Reference File':
'reference',
253 'Error Reference File':
'error_reference',
256 'Unsupported Platforms':
'unsupported_platforms',
257 'Stack Trace':
'stack_trace' 259 resultDict = [(key, getattr(self, attr))
260 for key, attr
in field_mapping.iteritems()
261 if getattr(self, attr)]
262 resultDict.append((
'Working Directory',
264 os.path.join(os.getcwd(), self.
workdir))))
266 resultDict.extend(self.result.annotations.iteritems())
268 return dict(resultDict)
277 elif stderr.strip() != self.stderr.strip():
278 self.causes.append(
'standard error')
279 return result, self.
causes 290 Given a block of text, tries to find it in the output. The block had to be identified by a signature line. By default, the first line is used as signature, or the line pointed to by signature_offset. If signature_offset points outside the block, a signature line can be passed as signature argument. Note: if 'signature' is None (the default), a negative signature_offset is interpreted as index in a list (e.g. -1 means the last line), otherwise the it is interpreted as the number of lines before the first one of the block the signature must appear. The parameter 'id' allow to distinguish between different calls to this function in the same validation code. 293 if reference
is None:
302 reflines = filter(
None,
303 map(
lambda s: s.rstrip(), reference.splitlines()))
305 raise RuntimeError(
"Empty (or null) reference")
307 outlines = filter(
None,
map(
lambda s: s.rstrip(), stdout.splitlines()))
309 res_field =
"GaudiTest.RefBlock" 311 res_field +=
"_%s" % id
313 if signature
is None:
314 if signature_offset < 0:
315 signature_offset = len(reference) + signature_offset
316 signature = reflines[signature_offset]
319 pos = outlines.index(signature)
320 outlines = outlines[pos - signature_offset:pos + len(reflines) -
322 if reflines != outlines:
323 msg =
"standard output" 326 if not msg
in causes:
328 result[res_field +
".observed"] = result.Quote(
331 causes.append(
"missing signature")
332 result[res_field +
".signature"] = result.Quote(signature)
333 if len(reflines) > 1
or signature != reflines[0]:
334 result[res_field +
".expected"] = result.Quote(
"\n".join(reflines))
346 Count the number of messages with required severity (by default ERROR and FATAL) 347 and check if their numbers match the expected ones (0 by default). 348 The dictionary "expected" can be used to tune the number of errors and fatals 349 allowed, or to limit the number of expected warnings etc. 364 outlines = stdout.splitlines()
365 from math
import log10
366 fmt =
"%%%dd - %%s" % (int(log10(len(outlines) + 1)))
372 if len(words) >= 2
and words[1]
in errors:
373 errors[words[1]].append(fmt % (linecount, l.rstrip()))
376 if len(errors[e]) != expected[e]:
377 causes.append(
'%s(%d)' % (e, len(errors[e])))
378 result[
"GaudiTest.lines.%s" % e] = result.Quote(
'\n'.join(
380 result[
"GaudiTest.lines.%s.expected#" % e] = result.Quote(
390 ignore=
r"Basket|.*size|Compression"):
392 Compare the TTree summaries in stdout with the ones in trees_dict or in 393 the reference file. By default ignore the size, compression and basket 395 The presence of TTree summaries when none is expected is not a failure. 403 if trees_dict
is None:
406 if lreference
and os.path.isfile(lreference):
411 from pprint
import PrettyPrinter
414 result[
"GaudiTest.TTrees.expected"] = result.Quote(
415 pp.pformat(trees_dict))
417 result[
"GaudiTest.TTrees.ignore"] = result.Quote(ignore)
422 causes.append(
"trees summaries")
425 result[
"GaudiTest.TTrees.failure_on"] = result.Quote(msg)
426 result[
"GaudiTest.TTrees.found"] = result.Quote(pp.pformat(trees))
437 Compare the TTree summaries in stdout with the ones in trees_dict or in 438 the reference file. By default ignore the size, compression and basket 440 The presence of TTree summaries when none is expected is not a failure. 452 if lreference
and os.path.isfile(lreference):
457 from pprint
import PrettyPrinter
460 result[
"GaudiTest.Histos.expected"] = result.Quote(
463 result[
"GaudiTest.Histos.ignore"] = result.Quote(ignore)
468 causes.append(
"histos summaries")
470 result[
"GaudiTest.Histos.failure_on"] = result.Quote(msg)
471 result[
"GaudiTest.Histos.found"] = result.Quote(pp.pformat(histos))
482 Default validation acti*on: compare standard output and error to the 497 preproc = normalizeExamples
501 if lreference
and os.path.isfile(lreference):
503 lreference,
"standard output",
"Output Diff",
504 preproc=preproc)(stdout, result)
506 causes += [
"missing reference file"]
510 if causes
and lreference:
513 newrefname =
'.'.join([lreference,
'new'])
514 while os.path.exists(newrefname):
516 newrefname =
'.'.join([lreference,
'~%d~' % cnt,
'new'])
517 newref = open(newrefname,
"w")
519 for l
in stdout.splitlines():
520 newref.write(l.rstrip() +
'\n')
522 result[
'New Output Reference File'] = os.path.relpath(
533 if os.path.isfile(lreference):
538 preproc=preproc)(stderr, result)
540 newcauses += [
"missing error reference file"]
542 if newcauses
and lreference:
544 newrefname =
'.'.join([lreference,
'new'])
545 while os.path.exists(newrefname):
547 newrefname =
'.'.join([lreference,
'~%d~' % cnt,
'new'])
548 newref = open(newrefname,
"w")
550 for l
in stderr.splitlines():
551 newref.write(l.rstrip() +
'\n')
553 result[
'New Error Reference File'] = os.path.relpath(
557 "ExecTest.expected_stderr")(stderr,
567 def platformSplit(p):
569 delim = re.compile(
'-' in p
and r"[-+]" or r"_")
570 return set(delim.split(p))
572 reference = os.path.normpath(
573 os.path.join(self.
basedir, os.path.expandvars(reffile)))
576 spec_ref = reference[:-3] +
GetPlatform(self)[0:3] + reference[-3:]
577 if os.path.isfile(spec_ref):
581 dirname, basename = os.path.split(reference)
584 head = basename +
"." 587 if 'do0' in platform:
590 for f
in os.listdir(dirname):
591 if f.startswith(head):
592 req_plat = platformSplit(f[head_len:])
593 if platform.issuperset(req_plat):
594 candidates.append((len(req_plat), f))
599 reference = os.path.join(dirname, candidates[-1][1])
611 from GaudiKernel
import ROOT6WorkAroundEnabled
624 Function used to normalize the used path 626 newPath = os.path.normpath(os.path.expandvars(p))
627 if os.path.exists(newPath):
628 p = os.path.realpath(newPath)
634 Locates an executable in the executables path ($PATH) and returns the full 635 path to it. An application is looked for with or without the '.exe' suffix. 636 If the executable cannot be found, None is returned 638 if os.path.isabs(executable):
639 if not os.path.exists(executable):
640 if executable.endswith(
'.exe'):
641 if os.path.exists(executable[:-4]):
642 return executable[:-4]
644 head, executable = os.path.split(executable)
647 for d
in os.environ.get(
"PATH").split(os.pathsep):
648 fullpath = os.path.join(d, executable)
649 if os.path.exists(fullpath):
651 if executable.endswith(
'.exe'):
652 return which(executable[:-4])
667 UNTESTED =
'UNTESTED' 677 def __init__(self, kind=None, id=None, outcome=PASS, annotations={}):
681 assert type(key)
in types.StringTypes
685 assert type(key)
in types.StringTypes
686 assert type(value)
in types.StringTypes
709 """Validate the output of the program. 710 'stdout' -- A string containing the data written to the standard output 712 'stderr' -- A string containing the data written to the standard error 714 'result' -- A 'Result' object. It may be used to annotate 715 the outcome according to the content of stderr. 716 returns -- A list of strings giving causes of failure.""" 721 causes.append(self.
cause)
727 """Compare 's1' and 's2', ignoring line endings. 730 returns -- True if 's1' and 's2' are the same, ignoring 731 differences in line endings.""" 735 to_ignore = re.compile(
736 r'Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*' 740 return not to_ignore.match(l)
742 return filter(keep_line, s1.splitlines()) == filter(
743 keep_line, s2.splitlines())
745 return s1.splitlines() == s2.splitlines()
750 """ Base class for a callable that takes a file and returns a modified 765 if hasattr(input,
"__iter__"):
769 lines = input.splitlines()
773 output =
'\n'.join(output)
802 if line.find(s) >= 0:
817 if self.
start in line:
820 elif self.
end in line:
830 when = re.compile(when)
834 if isinstance(rhs, RegexpReplacer):
836 res._operations = self.
_operations + rhs._operations
838 res = FilePreprocessor.__add__(self, rhs)
843 if w
is None or w.search(line):
844 line = o.sub(r, line)
851 "[0-2]?[0-9]:[0-5][0-9]:[0-5][0-9] [0-9]{4}[-/][01][0-9][-/][0-3][0-9][ A-Z]*",
852 "00:00:00 1970-01-01")
854 normalizeEOL.__processLine__ =
lambda line: str(line).rstrip() +
'\n' 858 skipEmptyLines.__processLine__ =
lambda line: (line.strip()
and line)
or None 872 line = line[:(pos + self.
siglen)]
873 lst = line[(pos + self.
siglen):].split()
875 line +=
" ".join(lst)
881 Sort group of lines matching a regular expression 885 self.
exp = exp
if hasattr(exp,
'match')
else re.compile(exp)
888 match = self.exp.match
904 normalizeExamples = maskPointers + normalizeDate
907 (
"TIMER.TIMER",
r"\s+[+-]?[0-9]+[0-9.]*",
" 0"),
908 (
"release all pending",
r"^.*/([^/]*:.*)",
r"\1"),
909 (
"^#.*file",
r"file '.*[/\\]([^/\\]*)$",
r"file '\1"),
910 (
"^JobOptionsSvc.*options successfully read in from",
911 r"read in from .*[/\\]([^/\\]*)$",
915 r"[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}(?!-0{12})-[0-9A-Fa-f]{12}",
916 "00000000-0000-0000-0000-000000000000"),
918 (
"ServiceLocatorHelper::",
"ServiceLocatorHelper::(create|locate)Service",
919 "ServiceLocatorHelper::service"),
921 (
None,
r"e([-+])0([0-9][0-9])",
r"e\1\2"),
923 (
None,
r'Service reference count check:',
924 r'Looping over all active services...'),
927 r"^(.*(DEBUG|SUCCESS) List of ALL properties of .*#properties = )\d+",
929 (
'ApplicationMgr',
r'(declareMultiSvcType|addMultiSvc): ',
''),
936 "JobOptionsSvc INFO # ",
937 "JobOptionsSvc WARNING # ",
940 "This machine has a speed",
943 "ToolSvc.Sequenc... INFO",
944 "DataListenerSvc INFO XML written to file:",
947 "DEBUG No writable file catalog found which contains FID:",
948 "DEBUG Service base class initialized successfully",
950 "DEBUG Incident timing:",
954 "INFO 'CnvServices':[",
956 "DEBUG 'CnvServices':[",
962 r"^JobOptionsSvc INFO *$",
965 r"(Always|SUCCESS)\s*(Root f|[^ ]* F)ile version:",
966 r"File '.*.xml' does not exist",
967 r"INFO Refer to dataset .* by its file ID:",
968 r"INFO Referring to dataset .* by its file ID:",
969 r"INFO Disconnect from dataset",
970 r"INFO Disconnected from dataset",
971 r"INFO Disconnected data IO:",
972 r"IncidentSvc\s*(DEBUG (Adding|Removing)|VERBOSE Calling)",
974 r"^StatusCodeSvc.*listing all unchecked return codes:",
975 r"^StatusCodeSvc\s*INFO\s*$",
976 r"Num\s*\|\s*Function\s*\|\s*Source Library",
979 r"ERROR Failed to modify file: .* Errno=2 No such file or directory",
981 r"^ +[0-9]+ \|.*ROOT",
982 r"^ +[0-9]+ \|.*\|.*Dict",
984 r"StatusCodeSvc.*all StatusCode instances where checked",
986 r"EventLoopMgr.*---> Loop Finished",
987 r"HiveSlimEventLo.*---> Loop Finished",
992 r"SUCCESS\s*Booked \d+ Histogram\(s\)",
996 r"Property(.*)'Audit(Algorithm|Tool|Service)s':",
998 r"Property(.*)'AuditRe(start|initialize)':",
999 r"Property(.*)'IsIOBound':",
1001 r"Property(.*)'ErrorCount(er)?':",
1003 r"Property(.*)'Sequential':",
1005 r"Property(.*)'FilterCircularDependencies':",
1007 r"Property(.*)'IsClonable':",
1009 r"Property(.*)'PrintAlgsSequence':",
1011 r"Property update for OutputLevel : new value =",
1012 r"EventLoopMgr\s*DEBUG Creating OutputStream",
1019 r'Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*',
1022 normalizeExamples = (
1023 lineSkipper + normalizeExamples + skipEmptyLines + normalizeEOL +
1031 def __init__(self, reffile, cause, result_key, preproc=normalizeExamples):
1039 if os.path.isfile(self.
reffile):
1040 orig = open(self.
reffile).xreadlines()
1043 result[self.
result_key +
'.preproc.orig'] = \
1044 result.Quote(
'\n'.join(
map(str.strip, orig)))
1047 new = stdout.splitlines()
1051 diffs = difflib.ndiff(orig, new, charjunk=difflib.IS_CHARACTER_JUNK)
1052 filterdiffs =
map(
lambda x: x.strip(),
1053 filter(
lambda x: x[0] !=
" ", diffs))
1055 result[self.
result_key] = result.Quote(
"\n".join(filterdiffs))
1059 +) standard output of the test""")
1061 result.Quote(
'\n'.join(
map(str.strip, new)))
1062 causes.append(self.
cause)
1068 Scan stdout to find ROOT TTree summaries and digest them. 1070 stars = re.compile(
r"^\*+$")
1071 outlines = stdout.splitlines()
1072 nlines = len(outlines)
1078 while i < nlines
and not stars.match(outlines[i]):
1083 trees[tree[
"Name"]] = tree
1090 Check that all the keys in reference are in to_check too, with the same value. 1091 If the value is a dict, the function is called recursively. to_check can 1092 contain more keys than reference, that will not be tested. 1093 The function returns at the first difference found. 1098 ignore_re = re.compile(ignore)
1099 keys = [key
for key
in reference
if not ignore_re.match(key)]
1101 keys = reference.keys()
1105 if (
type(reference[k])
is dict)
and (
type(to_check[k])
is dict):
1108 failed = fail_keys =
cmpTreesDicts(reference[k], to_check[k],
1112 failed = to_check[k] != reference[k]
1117 fail_keys.insert(0, k)
1128 if c
is None or r
is None:
1130 return (fail_path, r, c)
1134 h_count_re = re.compile(
1135 r"^(.*)SUCCESS\s+Booked (\d+) Histogram\(s\) :\s+([\s\w=-]*)")
1140 Parse the TTree summary table in lines, starting from pos. 1141 Returns a tuple with the dictionary with the digested informations and the 1142 position of the first line after the summary. 1149 return [f.strip()
for f
in l.strip(
"*\n").split(
':', 2)]
1153 cols = splitcols(ll[0])
1154 r[
"Name"], r[
"Title"] = cols[1:]
1156 cols = splitcols(ll[1])
1157 r[
"Entries"] = int(cols[1])
1159 sizes = cols[2].split()
1160 r[
"Total size"] = int(sizes[2])
1161 if sizes[-1] ==
"memory":
1164 r[
"File size"] = int(sizes[-1])
1166 cols = splitcols(ll[2])
1167 sizes = cols[2].split()
1168 if cols[0] ==
"Baskets":
1169 r[
"Baskets"] = int(cols[1])
1170 r[
"Basket size"] = int(sizes[2])
1171 r[
"Compression"] = float(sizes[-1])
1174 if i < (count - 3)
and lines[i].startswith(
"*Tree"):
1175 result = parseblock(lines[i:i + 3])
1176 result[
"Branches"] = {}
1178 while i < (count - 3)
and lines[i].startswith(
"*Br"):
1179 if i < (count - 2)
and lines[i].startswith(
"*Branch "):
1183 branch = parseblock(lines[i:i + 3])
1184 result[
"Branches"][branch[
"Name"]] = branch
1192 Extract the histograms infos from the lines starting at pos. 1193 Returns the position of the first line after the summary block. 1196 h_table_head = re.compile(
1197 r'SUCCESS\s+(1D|2D|3D|1D profile|2D profile) histograms in directory\s+"(\w*)"' 1199 h_short_summ = re.compile(
r"ID=([^\"]+)\s+\"([^\"]+)\"\s+(.*)")
1204 m = h_count_re.search(lines[pos])
1205 name = m.group(1).strip()
1206 total = int(m.group(2))
1208 for k, v
in [x.split(
"=")
for x
in m.group(3).split()]:
1211 header[
"Total"] = total
1215 m = h_table_head.search(lines[pos])
1218 t = t.replace(
" profile",
"Prof")
1225 if l.startswith(
" | ID"):
1227 titles = [x.strip()
for x
in l.split(
"|")][1:]
1229 while pos < nlines
and lines[pos].startswith(
" |"):
1231 values = [x.strip()
for x
in l.split(
"|")][1:]
1233 for i
in range(len(titles)):
1234 hcont[titles[i]] = values[i]
1235 cont[hcont[
"ID"]] = hcont
1237 elif l.startswith(
" ID="):
1238 while pos < nlines
and lines[pos].startswith(
" ID="):
1241 for x
in h_short_summ.search(lines[pos]).groups()
1243 cont[values[0]] = values
1247 "Cannot understand line %d: '%s'" % (pos, l))
1251 summ[d][
"header"] = header
1256 summ[name] = {
"header": header}
1262 Scan stdout to find ROOT TTree summaries and digest them. 1264 outlines = stdout.splitlines()
1265 nlines = len(outlines) - 1
1273 match = h_count_re.search(outlines[pos])
1274 while pos < nlines
and not match:
1276 match = h_count_re.search(outlines[pos])
1279 summaries.update(summ)
1286 re.compile(x)
for x
in [str(y).strip()
for y
in unsupported_platforms]
1289 for p_re
in unsupported:
1290 if p_re.search(platform):
1291 result.SetOutcome(result.UNTESTED)
1292 result[result.CAUSE] =
'Platform not supported.' 1299 Return the platform Id defined in CMTCONFIG or SCRAM_ARCH. 1303 if "BINARY_TAG" in os.environ:
1304 arch = os.environ[
"BINARY_TAG"]
1305 elif "CMTCONFIG" in os.environ:
1306 arch = os.environ[
"CMTCONFIG"]
1307 elif "SCRAM_ARCH" in os.environ:
1308 arch = os.environ[
"SCRAM_ARCH"]
1314 Return True if the current platform is Windows. 1316 This function was needed because of the change in the CMTCONFIG format, 1317 from win32_vc71_dbg to i686-winxp-vc9-dbg. 1320 return "winxp" in platform
or platform.startswith(
"win")
def PlatformIsNotSupported(self, context, result)
def __processLine__(self, line)
def __init__(self, start, end)
def __call__(self, input)
def validateWithReference(self, stdout=None, stderr=None, result=None, causes=None, preproc=None)
def __processLine__(self, line)
def cmpTreesDicts(reference, to_check, ignore=None)
def __processFile__(self, lines)
def ValidateOutput(self, stdout, stderr, result)
def read(f, regex='.*', skipevents=0)
def __processLine__(self, line)
def __processFile__(self, lines)
def __call__(self, out, result)
def findHistosSummaries(stdout)
def _parseTTreeSummary(lines, pos)
struct GAUDI_API map
Parametrisation class for map-like implementation.
def __call__(self, stdout, result)
def __processLine__(self, line)
def __init__(self, orig, repl="", when=None)
decltype(auto) range(Args &&...args)
Zips multiple containers together to form a single range.
def __init__(self, signature)
def __call__(self, input)
def sanitize_for_xml(data)
def getCmpFailingValues(reference, to_check, fail_path)
def __init__(self, members=[])
def __init__(self, strings=[], regexps=[])
def __setitem__(self, key, value)
def __init__(self, kind=None, id=None, outcome=PASS, annotations={})
def __processLine__(self, line)
def parseHistosSummary(lines, pos)
def _expandReferenceFileName(self, reffile)
def findReferenceBlock(self, reference=None, stdout=None, result=None, causes=None, signature_offset=0, signature=None, id=None)
def CheckHistosSummaries(self, stdout=None, result=None, causes=None, dict=None, ignore=None)
def __CompareText(self, s1, s2)
def __init__(self, reffile, cause, result_key, preproc=normalizeExamples)
def __getitem__(self, key)
def findTTreeSummaries(stdout)
def __init__(self, ref, cause, result_key)
def ROOT6WorkAroundEnabled(id=None)
def CheckTTreesSummaries(self, stdout=None, result=None, causes=None, trees_dict=None, ignore=r"Basket|.*size|Compression")