The Gaudi Framework  master (181af51f)
Loading...
Searching...
No Matches
GaudiExeTest.py
Go to the documentation of this file.
11import difflib
12import inspect
13import json
14import os
15import re
16import time
17from pathlib import Path
18from textwrap import dedent
19from typing import Callable, Dict, List
20
21import pytest
22
23from GaudiTesting.preprocessors import normalizeTestSuite
24from GaudiTesting.SubprocessBaseTest import SubprocessBaseTest
25from GaudiTesting.utils import (
26 CodeWrapper,
27 filter_dict,
28 find_histos_summaries,
29 find_ttree_summaries,
30)
31
32NO_ERROR_MESSAGES = {"ERROR": 0, "FATAL": 0}
33
34
36 """
37 An extension of SubprocessBaseTest tailored to the Gaudi/LHCb workflow.
38 It includes additional functionalities for handling options,
39 preprocessing output, and validating against platform-specific reference files.
40 """
41
42 options: Callable = None
43 options_code = None
44 preprocessor: Callable = normalizeTestSuite
45
46 @classmethod
47 def _prepare_command(cls, tmp_path=Path()) -> List[str]:
48 """
49 Override the base class to include options.
50 """
51 command = super()._prepare_command(tmp_path=tmp_path)
52
53 def generate_unique_options_filename(extension, directory):
54 timestamp = time.strftime("%Y%m%d_%H%M%S")
55 return directory / f"options_{timestamp}.{extension}"
56
57 if hasattr(cls, "options") and cls.options is not None:
58 options = cls.options
59 filename = None
60
61 # Check if options is a function
62 if callable(options):
63 source_lines = inspect.getsource(options).splitlines()
64 clean_source = dedent(
65 "\n".join(source_lines[1:])
66 ) # Skip the first line (def options():)
67 filename = generate_unique_options_filename("py", tmp_path)
68 with open(filename, "w") as file:
69 file.write(clean_source)
70 cls.options_code = CodeWrapper(clean_source, "python")
71
72 # Check if options is a dictionary
73 elif isinstance(options, dict):
74 filename = generate_unique_options_filename("json", tmp_path)
75 with open(filename, "w") as file:
76 json.dump(options, file, indent=4)
77 cls.options_code = CodeWrapper(options, "json")
78
79 # Check if options is a string
80 elif isinstance(options, str):
81 options = dedent(options)
82 filename = generate_unique_options_filename("opts", tmp_path)
83 with open(filename, "w") as file:
84 file.write(options)
85 cls.options_code = CodeWrapper(options, "cpp")
86
87 else:
88 raise ValueError(f"invalid options type '{type(options).__name__}'")
89
90 if filename:
91 command.append(str(filename))
92 return command
93
94 @staticmethod
96 reference_data: dict,
97 output: str,
98 preprocessor: Callable[[str], str] = lambda x: x,
99 ) -> str:
100 """
101 Compute the difference between the reference data and the current output.
102 """
103 expected_output = (
104 reference_data.splitlines()
105 if hasattr(reference_data, "splitlines")
106 else reference_data
107 )
108 actual_output = preprocessor(output).splitlines()
109 return "\n".join(
110 difflib.unified_diff(
111 expected_output,
112 actual_output,
113 fromfile="expected",
114 tofile="actual",
115 lineterm="",
116 )
117 )
118
119 @classmethod
121 cls,
122 data: bytes,
123 key: str,
124 reference: Dict,
125 record_property: Callable[[str, str], None],
126 ) -> None:
127 """
128 Validate the given data against a reference file for the specified key.
129 """
130 if cls.reference:
131 try:
132 if key in reference:
133 # If the data is less than 100k, we can compare it directly
134 # and let pytest generate a nice diff, otherwise we hide the values
135 # to pytest to avoid that the test (practically) hangs.
136 #
137 # See
138 # - https://gitlab.cern.ch/lhcb/LHCb/-/issues/252
139 # - https://gitlab.cern.ch/gaudi/Gaudi/-/merge_requests/1375
140 # - https://gitlab.cern.ch/lhcb/LHCb/-/merge_requests/4773#note_9075367
141 if len(data) < 100000:
142 assert data == reference[key]
143 else:
144 same_as_reference = data == reference[key]
145 assert same_as_reference, "data is different from reference"
146 except AssertionError:
147 record_property(
148 f"{key}_diff",
150 cls._output_diff(reference[key] or "", data, cls.preprocessor),
151 "diff",
152 ),
153 )
154 reference[key] = data
155 if os.environ.get("GAUDI_TEST_IGNORE_STDOUT_VALIDATION") == "1":
156 pytest.xfail("Ignoring stdout validation")
157 raise
158 else:
159 pytest.skip("No reference file provided")
160
161 @classmethod
163 cls, output_file: str, reference_file: str, detailed=True
164 ):
165 """
166 Validate the JSON output against a reference JSON file.
167 """
168 assert os.path.isfile(output_file)
169
170 try:
171 with open(output_file) as f:
172 output = json.load(f)
173 except json.JSONDecodeError as err:
174 pytest.fail(f"json parser error in {output_file}: {err}")
175
176 lreference = cls.resolve_path(reference_file)
177 assert lreference, "reference file not set"
178 assert os.path.isfile(lreference)
179
180 try:
181 with open(lreference) as f:
182 expected = json.load(f)
183 except json.JSONDecodeError as err:
184 pytest.fail(f"JSON parser error in {lreference}: {err}")
185
186 if not detailed:
187 assert output == expected
188
189 expected = sorted(expected, key=lambda item: (item["component"], item["name"]))
190 output = sorted(output, key=lambda item: (item["component"], item["name"]))
191 assert output == expected
192
193 @classmethod
195 cls,
196 reference_block: str,
197 preprocessor: Callable = None,
198 signature: str = None,
199 signature_offset: int = 0,
200 ):
201 def assert_function(
202 cls,
203 stdout,
204 record_property,
205 preprocessor=preprocessor,
206 signature=signature,
207 signature_offset=signature_offset,
208 ):
209 processed_stdout = (
210 preprocessor(stdout.decode("utf-8"))
211 if preprocessor
212 else stdout.decode("utf-8")
213 )
214 stdout_lines = processed_stdout.strip().split("\n")
215 reference_lines = dedent(reference_block).strip().split("\n")
216
217 if signature is None and signature_offset is not None:
218 if signature_offset < 0:
219 signature_offset = len(reference_lines) + signature_offset
220 signature = reference_lines[signature_offset]
221
222 try:
223 start_index = stdout_lines.index(signature)
224 end_index = start_index + len(reference_lines)
225 observed_block = stdout_lines[start_index:end_index]
226
227 if observed_block != reference_lines:
228 diff = list(
229 difflib.unified_diff(
230 reference_lines,
231 observed_block,
232 fromfile="expected",
233 tofile="actual",
234 )
235 )
236 diff_text = "\n".join(diff)
237 record_property("block_diff", CodeWrapper(diff_text, "diff"))
238 raise AssertionError(
239 "The observed block does not match the reference."
240 )
241 except ValueError:
242 raise AssertionError(
243 f"Signature '{signature}' not found in the output."
244 )
245
246 return assert_function
247
248 @pytest.mark.do_not_collect_source
249 def test_count_messages(self, reference, stdout, record_property):
250 """
251 Test the count of error messages in the stdout against expected values.
252 """
253 expected_messages = (
254 None if reference is None else reference.get("messages_count")
255 )
256 if expected_messages is None:
257 pytest.skip()
258
259 if not isinstance(expected_messages, dict):
260 raise ValueError("reference['messages_count'] must be a dict")
261 if not expected_messages:
262 # an empty dict doesn't make sense, let's assume we are bootstrapping the reference
263 # and start from the default
264 expected_messages = NO_ERROR_MESSAGES
265 reference["messages_count"] = expected_messages
266
267 outlines = self.preprocessor(
268 stdout.decode("utf-8", errors="backslashreplace")
269 ).splitlines()
270
271 messages = {key: [] for key in expected_messages}
272 for n, line in enumerate(outlines, 1):
273 words = line.split()
274 if len(words) >= 2 and words[1] in messages:
275 messages[words[1]].append((n, line.rstrip()))
276
277 messages_count = {key: len(value) for key, value in messages.items()}
278 try:
279 assert messages_count == expected_messages
280 except AssertionError:
281 reference["messages_count"] = messages_count
282 record_property("unexpected_messages_count", messages)
283 raise
284
285 @pytest.mark.do_not_collect_source
287 self, stdout: bytes, record_property: Callable, reference: Dict
288 ) -> None:
289 """
290 Test the standard output against the reference.
291 """
292 if not self.reference or reference.get("stdout") is None:
293 pytest.skip("No stdout reference")
294
295 out = self.preprocessor(stdout.decode("utf-8", errors="backslashreplace"))
296 self.validate_with_reference(out, "stdout", reference, record_property)
297
298 @pytest.mark.do_not_collect_source
300 self, stdout: bytes, record_property: Callable, reference: Dict
301 ) -> None:
302 """
303 Test the TTree summaries against the reference.
304 """
305 if not self.reference or reference.get("ttrees") is None:
306 pytest.skip()
307
308 ttrees = filter_dict(
309 find_ttree_summaries(stdout.decode()),
310 re.compile(r"Basket|.*size|Compression"),
311 )
312 try:
313 assert ttrees == reference["ttrees"]
314 except AssertionError:
315 reference["ttrees"] = ttrees
316 if os.environ.get("GAUDI_TEST_IGNORE_STDOUT_VALIDATION") == "1":
317 pytest.xfail("Ignoring stdout validation")
318 raise
319
320 @pytest.mark.do_not_collect_source
322 self, stdout: bytes, record_property: Callable, reference: Dict
323 ) -> None:
324 """
325 Test the histogram summaries against the reference.
326 """
327 if not self.reference or reference.get("histos") is None:
328 pytest.skip()
329
330 histos = filter_dict(
331 find_histos_summaries(stdout.decode()),
332 re.compile(r"Basket|.*size|Compression"),
333 )
334 try:
335 assert histos == reference["histos"]
336 except AssertionError:
337 reference["histos"] = histos
338 if os.environ.get("GAUDI_TEST_IGNORE_STDOUT_VALIDATION") == "1":
339 pytest.xfail("Ignoring stdout validation")
340 raise
341
342 @pytest.mark.do_not_collect_source
344 self, stderr: bytes, record_property: Callable, reference: Dict
345 ) -> None:
346 """
347 Test the standard error output against the reference.
348 """
349 err = self.preprocessor(stderr.decode("utf-8", errors="backslashreplace"))
350 if self.reference and reference.get("stderr") is not None:
351 self.validate_with_reference(err, "stderr", reference, record_property)
352
353 else:
354 assert not err.strip(), "Expected no standard error output, but got some."
355
356 @pytest.mark.do_not_collect_source
357 def test_record_options(self, record_property: Callable):
358 if self.options_code:
359 record_property("options", self.options_code)
test_count_messages(self, reference, stdout, record_property)
None test_ttrees(self, bytes stdout, Callable record_property, Dict reference)
str _output_diff(dict reference_data, str output, Callable[[str], str] preprocessor=lambda x:x)
test_record_options(self, Callable record_property)
None validate_with_reference(cls, bytes data, str key, Dict reference, Callable[[str, str], None] record_property)
List[str] _prepare_command(cls, tmp_path=Path())
find_reference_block(cls, str reference_block, Callable preprocessor=None, str signature=None, int signature_offset=0)
None test_stderr(self, bytes stderr, Callable record_property, Dict reference)
None test_stdout(self, bytes stdout, Callable record_property, Dict reference)
validate_json_with_reference(cls, str output_file, str reference_file, detailed=True)
None test_histos(self, bytes stdout, Callable record_property, Dict reference)