14 from subprocess
import Popen, PIPE, STDOUT
19 Take a string with invalid ASCII/UTF characters and quote them so that the 20 string can be used in an XML text. 22 >>> sanitize_for_xml('this is \x1b') 23 'this is [NON-XML-CHAR-0x1B]' 25 bad_chars = re.compile(
26 u'[\x00-\x08\x0b\x0c\x0e-\x1F\uD800-\uDFFF\uFFFE\uFFFF]')
30 return ''.join(
'[NON-XML-CHAR-0x%2X]' % ord(c)
for c
in match.group())
31 return bad_chars.sub(quote, data)
35 '''helper to debug GAUDI-1084, dump the list of processes''' 36 from getpass
import getuser
37 if 'WORKSPACE' in os.environ:
38 p = Popen([
'ps',
'-fH',
'-U', getuser()], stdout=PIPE)
39 with open(os.path.join(os.environ[
'WORKSPACE'], name),
'w')
as f:
40 f.write(p.communicate()[0])
45 Send a signal to a process and all its child processes (starting from the 48 log = logging.getLogger(
'kill_tree')
49 ps_cmd = [
'ps',
'--no-headers',
'-o',
'pid',
'--ppid', str(ppid)]
50 get_children = Popen(ps_cmd, stdout=PIPE, stderr=PIPE)
51 children =
map(int, get_children.communicate()[0].split())
52 for child
in children:
55 log.debug(
'killing process %d', ppid)
60 log.debug(
'no such process %d', ppid)
96 logging.debug(
'running test %s', self.
name)
99 if re.search(
r'from\s+Gaudi.Configuration\s+import\s+\*|' 100 'from\s+Configurables\s+import', self.
options):
101 optionFile = tempfile.NamedTemporaryFile(suffix=
'.py')
103 optionFile = tempfile.NamedTemporaryFile(suffix=
'.opts')
104 optionFile.file.write(self.
options)
113 self.environment.items() + os.environ.items())
115 platform_id = (os.environ.get(
'BINARY_TAG')
or 116 os.environ.get(
'CMTCONFIG')
or 119 skip_test = bool([
None 121 if re.search(prex, platform_id)])
130 workdir = tempfile.mkdtemp()
136 elif "GAUDIEXE" in os.environ:
137 prog = os.environ[
"GAUDIEXE"]
141 dummy, prog_ext = os.path.splitext(prog)
142 if prog_ext
not in [
".exe",
".py",
".bat"]:
146 prog =
which(prog)
or prog
148 args =
map(RationalizePath, self.
args)
150 if prog_ext ==
".py":
155 validatorRes =
Result({
'CAUSE':
None,
'EXCEPTION':
None,
156 'RESOURCE':
None,
'TARGET':
None,
157 'TRACEBACK':
None,
'START_TIME':
None,
158 'END_TIME':
None,
'TIMEOUT_DETAIL':
None})
159 self.
result = validatorRes
167 logging.debug(
'executing %r in %s',
169 self.
proc = Popen(params, stdout=PIPE, stderr=PIPE,
171 logging.debug(
'(pid: %d)', self.proc.pid)
172 self.
out, self.
err = self.proc.communicate()
174 thread = threading.Thread(target=target)
179 if thread.is_alive():
180 logging.debug(
'time out in test %s (pid %d)',
181 self.
name, self.proc.pid)
183 cmd = [
'gdb',
'--pid', str(self.proc.pid),
'--batch',
184 '--eval-command=thread apply all backtrace']
185 gdb = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
190 if thread.is_alive():
192 self.causes.append(
'timeout')
194 logging.debug(
'completed test %s', self.
name)
197 logging.debug(
'returnedCode = %s', self.proc.returncode)
200 logging.debug(
'validating test...')
207 shutil.rmtree(workdir,
True)
212 if self.
signal is not None:
214 self.causes.append(
'exit code')
218 self.causes.append(
'exit code')
221 self.causes.append(
"exit code")
231 logging.debug(
'%s: %s', self.
name, self.
status)
232 field_mapping = {
'Exit Code':
'returnedCode',
235 'Environment':
'environment',
238 'Program Name':
'program',
240 'Validator':
'validator',
241 'Output Reference File':
'reference',
242 'Error Reference File':
'error_reference',
245 'Unsupported Platforms':
'unsupported_platforms',
246 'Stack Trace':
'stack_trace'}
247 resultDict = [(key, getattr(self, attr))
248 for key, attr
in field_mapping.iteritems()
249 if getattr(self, attr)]
250 resultDict.append((
'Working Directory',
254 resultDict.extend(self.result.annotations.iteritems())
256 return dict(resultDict)
265 elif stderr.strip() != self.stderr.strip():
266 self.causes.append(
'standard error')
267 return result, self.
causes 269 def findReferenceBlock(self, reference=None, stdout=None, result=None, causes=None, signature_offset=0, signature=None, id=None):
271 Given a block of text, tries to find it in the output. The block had to be identified by a signature line. By default, the first line is used as signature, or the line pointed to by signature_offset. If signature_offset points outside the block, a signature line can be passed as signature argument. Note: if 'signature' is None (the default), a negative signature_offset is interpreted as index in a list (e.g. -1 means the last line), otherwise the it is interpreted as the number of lines before the first one of the block the signature must appear. The parameter 'id' allow to distinguish between different calls to this function in the same validation code. 274 if reference
is None:
284 None,
map(
lambda s: s.rstrip(), reference.splitlines()))
286 raise RuntimeError(
"Empty (or null) reference")
288 outlines = filter(
None,
map(
lambda s: s.rstrip(), stdout.splitlines()))
290 res_field =
"GaudiTest.RefBlock" 292 res_field +=
"_%s" % id
294 if signature
is None:
295 if signature_offset < 0:
296 signature_offset = len(reference) + signature_offset
297 signature = reflines[signature_offset]
300 pos = outlines.index(signature)
301 outlines = outlines[pos - signature_offset:pos +
302 len(reflines) - signature_offset]
303 if reflines != outlines:
304 msg =
"standard output" 307 if not msg
in causes:
310 ".observed"] = result.Quote(
"\n".join(outlines))
312 causes.append(
"missing signature")
313 result[res_field +
".signature"] = result.Quote(signature)
314 if len(reflines) > 1
or signature != reflines[0]:
315 result[res_field +
".expected"] = result.Quote(
"\n".join(reflines))
318 def countErrorLines(self, expected={'ERROR': 0,
'FATAL': 0}, stdout=
None, result=
None, causes=
None):
320 Count the number of messages with required severity (by default ERROR and FATAL) 321 and check if their numbers match the expected ones (0 by default). 322 The dictionary "expected" can be used to tune the number of errors and fatals 323 allowed, or to limit the number of expected warnings etc. 338 outlines = stdout.splitlines()
339 from math
import log10
340 fmt =
"%%%dd - %%s" % (int(log10(len(outlines) + 1)))
346 if len(words) >= 2
and words[1]
in errors:
347 errors[words[1]].append(fmt % (linecount, l.rstrip()))
350 if len(errors[e]) != expected[e]:
351 causes.append(
'%s(%d)' % (e, len(errors[e])))
352 result[
"GaudiTest.lines.%s" %
353 e] = result.Quote(
'\n'.join(errors[e]))
354 result[
"GaudiTest.lines.%s.expected#" %
355 e] = result.Quote(str(expected[e]))
361 ignore=
r"Basket|.*size|Compression"):
363 Compare the TTree summaries in stdout with the ones in trees_dict or in 364 the reference file. By default ignore the size, compression and basket 366 The presence of TTree summaries when none is expected is not a failure. 374 if trees_dict
is None:
377 if lreference
and os.path.isfile(lreference):
382 from pprint
import PrettyPrinter
385 result[
"GaudiTest.TTrees.expected"] = result.Quote(
386 pp.pformat(trees_dict))
388 result[
"GaudiTest.TTrees.ignore"] = result.Quote(ignore)
393 causes.append(
"trees summaries")
395 trees_dict, trees, failed)
396 result[
"GaudiTest.TTrees.failure_on"] = result.Quote(msg)
397 result[
"GaudiTest.TTrees.found"] = result.Quote(pp.pformat(trees))
405 Compare the TTree summaries in stdout with the ones in trees_dict or in 406 the reference file. By default ignore the size, compression and basket 408 The presence of TTree summaries when none is expected is not a failure. 420 if lreference
and os.path.isfile(lreference):
425 from pprint
import PrettyPrinter
428 result[
"GaudiTest.Histos.expected"] = result.Quote(
431 result[
"GaudiTest.Histos.ignore"] = result.Quote(ignore)
436 causes.append(
"histos summaries")
438 result[
"GaudiTest.Histos.failure_on"] = result.Quote(msg)
439 result[
"GaudiTest.Histos.found"] = result.Quote(pp.pformat(histos))
444 causes=
None, preproc=
None):
446 Default validation acti*on: compare standard output and error to the 461 preproc = normalizeExamples
465 if lreference
and os.path.isfile(lreference):
469 preproc=preproc)(stdout, result)
475 newref = open(lreference +
".new",
"w")
477 for l
in stdout.splitlines():
478 newref.write(l.rstrip() +
'\n')
488 if lreference
and os.path.isfile(lreference):
492 preproc=preproc)(stderr, result)
495 newref = open(lreference +
".new",
"w")
497 for l
in stderr.splitlines():
498 newref.write(l.rstrip() +
'\n')
502 "ExecTest.expected_stderr")(stderr, result)
511 def platformSplit(p):
return set(p.split(
'-' in p
and '-' or '_'))
513 reference = os.path.normpath(os.path.join(self.
basedir,
514 os.path.expandvars(reffile)))
517 spec_ref = reference[:-3] +
GetPlatform(self)[0:3] + reference[-3:]
518 if os.path.isfile(spec_ref):
522 dirname, basename = os.path.split(reference)
525 head = basename +
"." 528 if 'do0' in platform:
531 for f
in os.listdir(dirname):
532 if f.startswith(head):
533 req_plat = platformSplit(f[head_len:])
534 if platform.issuperset(req_plat):
535 candidates.append((len(req_plat), f))
540 reference = os.path.join(dirname, candidates[-1][1])
552 from GaudiKernel
import ROOT6WorkAroundEnabled
563 Function used to normalize the used path 565 newPath = os.path.normpath(os.path.expandvars(p))
566 if os.path.exists(newPath):
567 p = os.path.realpath(newPath)
573 Locates an executable in the executables path ($PATH) and returns the full 574 path to it. An application is looked for with or without the '.exe' suffix. 575 If the executable cannot be found, None is returned 577 if os.path.isabs(executable):
578 if not os.path.exists(executable):
579 if executable.endswith(
'.exe'):
580 if os.path.exists(executable[:-4]):
581 return executable[:-4]
583 head, executable = os.path.split(executable)
586 for d
in os.environ.get(
"PATH").split(os.pathsep):
587 fullpath = os.path.join(d, executable)
588 if os.path.exists(fullpath):
590 if executable.endswith(
'.exe'):
591 return which(executable[:-4])
606 UNTESTED =
'UNTESTED' 616 def __init__(self, kind=None, id=None, outcome=PASS, annotations={}):
620 assert type(key)
in types.StringTypes
624 assert type(key)
in types.StringTypes
625 assert type(value)
in types.StringTypes
649 """Validate the output of the program. 650 'stdout' -- A string containing the data written to the standard output 652 'stderr' -- A string containing the data written to the standard error 654 'result' -- A 'Result' object. It may be used to annotate 655 the outcome according to the content of stderr. 656 returns -- A list of strings giving causes of failure.""" 661 causes.append(self.
cause)
667 """Compare 's1' and 's2', ignoring line endings. 670 returns -- True if 's1' and 's2' are the same, ignoring 671 differences in line endings.""" 675 to_ignore = re.compile(
676 r'Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*')
678 def keep_line(l):
return not to_ignore.match(l)
679 return filter(keep_line, s1.splitlines()) == filter(keep_line, s2.splitlines())
681 return s1.splitlines() == s2.splitlines()
687 """ Base class for a callable that takes a file and returns a modified 702 if hasattr(input,
"__iter__"):
706 lines = input.splitlines()
710 output =
'\n'.join(output)
741 if line.find(s) >= 0:
757 if self.
start in line:
760 elif self.
end in line:
771 when = re.compile(when)
775 if isinstance(rhs, RegexpReplacer):
777 res._operations = self.
_operations + rhs._operations
779 res = FilePreprocessor.__add__(self, rhs)
784 if w
is None or w.search(line):
785 line = o.sub(r, line)
792 "[0-2]?[0-9]:[0-5][0-9]:[0-5][0-9] [0-9]{4}[-/][01][0-9][-/][0-3][0-9][ A-Z]*",
793 "00:00:00 1970-01-01")
795 normalizeEOL.__processLine__ =
lambda line: str(line).rstrip() +
'\n' 799 skipEmptyLines.__processLine__ =
lambda line: (line.strip()
and line)
or None 814 line = line[:(pos + self.
siglen)]
815 lst = line[(pos + self.
siglen):].split()
817 line +=
" ".join(lst)
824 Sort group of lines matching a regular expression 828 self.
exp = exp
if hasattr(exp,
'match')
else re.compile(exp)
831 match = self.exp.match
847 normalizeExamples = maskPointers + normalizeDate
850 (
"TIMER.TIMER",
r"\s+[+-]?[0-9]+[0-9.]*",
" 0"),
851 (
"release all pending",
r"^.*/([^/]*:.*)",
r"\1"),
852 (
"^#.*file",
r"file '.*[/\\]([^/\\]*)$",
r"file '\1"),
853 (
"^JobOptionsSvc.*options successfully read in from",
854 r"read in from .*[/\\]([^/\\]*)$",
r"file \1"),
856 (
None,
r"[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}(?!-0{12})-[0-9A-Fa-f]{12}",
857 "00000000-0000-0000-0000-000000000000"),
859 (
"ServiceLocatorHelper::",
"ServiceLocatorHelper::(create|locate)Service",
860 "ServiceLocatorHelper::service"),
862 (
None,
r"e([-+])0([0-9][0-9])",
r"e\1\2"),
864 (
None,
r'Service reference count check:',
865 r'Looping over all active services...'),
867 (
None,
r"^(.*(DEBUG|SUCCESS) List of ALL properties of .*#properties = )\d+",
873 "JobOptionsSvc INFO # ",
874 "JobOptionsSvc WARNING # ",
877 "This machine has a speed",
880 "ToolSvc.Sequenc... INFO",
881 "DataListenerSvc INFO XML written to file:",
882 "[INFO]",
"[WARNING]",
883 "DEBUG No writable file catalog found which contains FID:",
884 "DEBUG Service base class initialized successfully",
886 "DEBUG Incident timing:",
890 "INFO 'CnvServices':[",
892 "DEBUG 'CnvServices':[",
897 r"^JobOptionsSvc INFO *$",
900 r"(Always|SUCCESS)\s*(Root f|[^ ]* F)ile version:",
901 r"File '.*.xml' does not exist",
902 r"INFO Refer to dataset .* by its file ID:",
903 r"INFO Referring to dataset .* by its file ID:",
904 r"INFO Disconnect from dataset",
905 r"INFO Disconnected from dataset",
906 r"INFO Disconnected data IO:",
907 r"IncidentSvc\s*(DEBUG (Adding|Removing)|VERBOSE Calling)",
909 r"^StatusCodeSvc.*listing all unchecked return codes:",
910 r"^StatusCodeSvc\s*INFO\s*$",
911 r"Num\s*\|\s*Function\s*\|\s*Source Library",
914 r"ERROR Failed to modify file: .* Errno=2 No such file or directory",
916 r"^ +[0-9]+ \|.*ROOT",
917 r"^ +[0-9]+ \|.*\|.*Dict",
919 r"StatusCodeSvc.*all StatusCode instances where checked",
921 r"EventLoopMgr.*---> Loop Finished",
922 r"HiveSlimEventLo.*---> Loop Finished",
927 r"SUCCESS\s*Booked \d+ Histogram\(s\)",
931 r"Property(.*)'Audit(Algorithm|Tool|Service)s':",
933 r"Property(.*)'AuditRe(start|initialize)':",
934 r"Property(.*)'IsIOBound':",
936 r"Property(.*)'ErrorCount(er)?':",
938 r"Property(.*)'Sequential':",
940 r"Property(.*)'FilterCircularDependencies':",
942 r"Property(.*)'IsClonable':",
944 r"Property update for OutputLevel : new value =",
945 r"EventLoopMgr\s*DEBUG Creating OutputStream",
952 r'Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*',
955 normalizeExamples = (lineSkipper + normalizeExamples + skipEmptyLines +
956 normalizeEOL +
LineSorter(
"Services to release : ") +
964 def __init__(self, reffile, cause, result_key, preproc=normalizeExamples):
972 if os.path.isfile(self.
reffile):
973 orig = open(self.
reffile).xreadlines()
977 result.Quote(
'\n'.join(
map(str.strip, orig)))
980 new = stdout.splitlines()
984 diffs = difflib.ndiff(orig, new, charjunk=difflib.IS_CHARACTER_JUNK)
985 filterdiffs =
map(
lambda x: x.strip(), filter(
986 lambda x: x[0] !=
" ", diffs))
988 result[self.
result_key] = result.Quote(
"\n".join(filterdiffs))
992 +) standard output of the test""")
994 result.Quote(
'\n'.join(
map(str.strip, new)))
995 causes.append(self.
cause)
1001 Scan stdout to find ROOT TTree summaries and digest them. 1003 stars = re.compile(
r"^\*+$")
1004 outlines = stdout.splitlines()
1005 nlines = len(outlines)
1011 while i < nlines
and not stars.match(outlines[i]):
1016 trees[tree[
"Name"]] = tree
1023 Check that all the keys in reference are in to_check too, with the same value. 1024 If the value is a dict, the function is called recursively. to_check can 1025 contain more keys than reference, that will not be tested. 1026 The function returns at the first difference found. 1031 ignore_re = re.compile(ignore)
1032 keys = [key
for key
in reference
if not ignore_re.match(key)]
1034 keys = reference.keys()
1038 if (
type(reference[k])
is dict)
and (
type(to_check[k])
is dict):
1042 reference[k], to_check[k], ignore)
1045 failed = to_check[k] != reference[k]
1050 fail_keys.insert(0, k)
1061 if c
is None or r
is None:
1063 return (fail_path, r, c)
1067 h_count_re = re.compile(
1068 r"^(.*)SUCCESS\s+Booked (\d+) Histogram\(s\) :\s+([\s\w=-]*)")
1073 Parse the TTree summary table in lines, starting from pos. 1074 Returns a tuple with the dictionary with the digested informations and the 1075 position of the first line after the summary. 1081 def splitcols(l):
return [f.strip()
for f
in l.strip(
"*\n").split(
':', 2)]
1085 cols = splitcols(ll[0])
1086 r[
"Name"], r[
"Title"] = cols[1:]
1088 cols = splitcols(ll[1])
1089 r[
"Entries"] = int(cols[1])
1091 sizes = cols[2].split()
1092 r[
"Total size"] = int(sizes[2])
1093 if sizes[-1] ==
"memory":
1096 r[
"File size"] = int(sizes[-1])
1098 cols = splitcols(ll[2])
1099 sizes = cols[2].split()
1100 if cols[0] ==
"Baskets":
1101 r[
"Baskets"] = int(cols[1])
1102 r[
"Basket size"] = int(sizes[2])
1103 r[
"Compression"] = float(sizes[-1])
1106 if i < (count - 3)
and lines[i].startswith(
"*Tree"):
1107 result = parseblock(lines[i:i + 3])
1108 result[
"Branches"] = {}
1110 while i < (count - 3)
and lines[i].startswith(
"*Br"):
1111 if i < (count - 2)
and lines[i].startswith(
"*Branch "):
1115 branch = parseblock(lines[i:i + 3])
1116 result[
"Branches"][branch[
"Name"]] = branch
1124 Extract the histograms infos from the lines starting at pos. 1125 Returns the position of the first line after the summary block. 1128 h_table_head = re.compile(
1129 r'SUCCESS\s+List of booked (1D|2D|3D|1D profile|2D profile) histograms in directory\s+"(\w*)"')
1130 h_short_summ = re.compile(
r"ID=([^\"]+)\s+\"([^\"]+)\"\s+(.*)")
1135 m = h_count_re.search(lines[pos])
1136 name = m.group(1).strip()
1137 total = int(m.group(2))
1139 for k, v
in [x.split(
"=")
for x
in m.group(3).split()]:
1142 header[
"Total"] = total
1146 m = h_table_head.search(lines[pos])
1149 t = t.replace(
" profile",
"Prof")
1156 if l.startswith(
" | ID"):
1158 titles = [x.strip()
for x
in l.split(
"|")][1:]
1160 while pos < nlines
and lines[pos].startswith(
" |"):
1162 values = [x.strip()
for x
in l.split(
"|")][1:]
1164 for i
in range(len(titles)):
1165 hcont[titles[i]] = values[i]
1166 cont[hcont[
"ID"]] = hcont
1168 elif l.startswith(
" ID="):
1169 while pos < nlines
and lines[pos].startswith(
" ID="):
1171 for x
in h_short_summ.search(lines[pos]).groups()]
1172 cont[values[0]] = values
1176 "Cannot understand line %d: '%s'" % (pos, l))
1180 summ[d][
"header"] = header
1185 summ[name] = {
"header": header}
1191 Scan stdout to find ROOT TTree summaries and digest them. 1193 outlines = stdout.splitlines()
1194 nlines = len(outlines) - 1
1202 match = h_count_re.search(outlines[pos])
1203 while pos < nlines
and not match:
1205 match = h_count_re.search(outlines[pos])
1208 summaries.update(summ)
1214 unsupported = [re.compile(x)
for x
in [str(y).strip()
1215 for y
in unsupported_platforms]
if x]
1216 for p_re
in unsupported:
1217 if p_re.search(platform):
1218 result.SetOutcome(result.UNTESTED)
1219 result[result.CAUSE] =
'Platform not supported.' 1226 Return the platform Id defined in CMTCONFIG or SCRAM_ARCH. 1230 if "BINARY_TAG" in os.environ:
1231 arch = os.environ[
"BINARY_TAG"]
1232 elif "CMTCONFIG" in os.environ:
1233 arch = os.environ[
"CMTCONFIG"]
1234 elif "SCRAM_ARCH" in os.environ:
1235 arch = os.environ[
"SCRAM_ARCH"]
1241 Return True if the current platform is Windows. 1243 This function was needed because of the change in the CMTCONFIG format, 1244 from win32_vc71_dbg to i686-winxp-vc9-dbg. 1247 return "winxp" in platform
or platform.startswith(
"win")
def PlatformIsNotSupported(self, context, result)
def __processLine__(self, line)
def __init__(self, start, end)
def __call__(self, input)
def validateWithReference(self, stdout=None, stderr=None, result=None, causes=None, preproc=None)
def __processLine__(self, line)
def cmpTreesDicts(reference, to_check, ignore=None)
def __processFile__(self, lines)
def ValidateOutput(self, stdout, stderr, result)
def read(f, regex='.*', skipevents=0)
def __processLine__(self, line)
def __processFile__(self, lines)
def __call__(self, out, result)
def findHistosSummaries(stdout)
def _parseTTreeSummary(lines, pos)
struct GAUDI_API map
Parametrisation class for map-like implementation.
def __call__(self, stdout, result)
def __processLine__(self, line)
def __init__(self, orig, repl="", when=None)
decltype(auto) range(Args &&...args)
Zips multiple containers together to form a single range.
def __init__(self, signature)
def __call__(self, input)
def sanitize_for_xml(data)
def getCmpFailingValues(reference, to_check, fail_path)
def __init__(self, members=[])
def __init__(self, strings=[], regexps=[])
def __setitem__(self, key, value)
def __init__(self, kind=None, id=None, outcome=PASS, annotations={})
def __processLine__(self, line)
def parseHistosSummary(lines, pos)
def _expandReferenceFileName(self, reffile)
def findReferenceBlock(self, reference=None, stdout=None, result=None, causes=None, signature_offset=0, signature=None, id=None)
def CheckHistosSummaries(self, stdout=None, result=None, causes=None, dict=None, ignore=None)
def __CompareText(self, s1, s2)
def __init__(self, reffile, cause, result_key, preproc=normalizeExamples)
def __getitem__(self, key)
def findTTreeSummaries(stdout)
def __init__(self, ref, cause, result_key)
def ROOT6WorkAroundEnabled(id=None)
def CheckTTreesSummaries(self, stdout=None, result=None, causes=None, trees_dict=None, ignore=r"Basket|.*size|Compression")