Merge tag 'sound-fix-6.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai...
[linux-block.git] / tools / testing / kunit / kunit_parser.py
CommitLineData
6ebf5866
FG
1# SPDX-License-Identifier: GPL-2.0
2#
d65d07cb
RM
3# Parses KTAP test results from a kernel dmesg log and incrementally prints
4# results with reader-friendly format. Stores and returns test results in a
5# Test object.
6ebf5866
FG
6#
7# Copyright (C) 2019, Google LLC.
8# Author: Felix Guo <felixguoxiuping@gmail.com>
9# Author: Brendan Higgins <brendanhiggins@google.com>
d65d07cb 10# Author: Rae Moar <rmoar@google.com>
6ebf5866 11
d65d07cb 12from __future__ import annotations
f473dd94 13from dataclasses import dataclass
6ebf5866 14import re
c2bb92bc 15import textwrap
6ebf5866 16
6ebf5866 17from enum import Enum, auto
81c60306 18from typing import Iterable, Iterator, List, Optional, Tuple
6ebf5866 19
e756dbeb
DL
20from kunit_printer import stdout
21
0453f984 22class Test:
d65d07cb
RM
23 """
24 A class to represent a test parsed from KTAP results. All KTAP
25 results within a test log are stored in a main Test object as
26 subtests.
27
28 Attributes:
29 status : TestStatus - status of the test
30 name : str - name of the test
31 expected_count : int - expected number of subtests (0 if single
32 test case and None if unknown expected number of subtests)
33 subtests : List[Test] - list of subtests
34 log : List[str] - log of KTAP lines that correspond to the test
35 counts : TestCounts - counts of the test statuses and errors of
36 subtests or of the test itself if the test is a single
37 test case.
38 """
09641f7c 39 def __init__(self) -> None:
d65d07cb
RM
40 """Creates Test object with default attributes."""
41 self.status = TestStatus.TEST_CRASHED
6ebf5866 42 self.name = ''
d65d07cb
RM
43 self.expected_count = 0 # type: Optional[int]
44 self.subtests = [] # type: List[Test]
09641f7c 45 self.log = [] # type: List[str]
d65d07cb 46 self.counts = TestCounts()
6ebf5866 47
09641f7c 48 def __str__(self) -> str:
d65d07cb 49 """Returns string representation of a Test class object."""
94507ee3
DL
50 return (f'Test({self.status}, {self.name}, {self.expected_count}, '
51 f'{self.subtests}, {self.log}, {self.counts})')
6ebf5866 52
09641f7c 53 def __repr__(self) -> str:
d65d07cb 54 """Returns string representation of a Test class object."""
6ebf5866
FG
55 return str(self)
56
d65d07cb
RM
57 def add_error(self, error_message: str) -> None:
58 """Records an error that occurred while parsing this test."""
59 self.counts.errors += 1
e756dbeb 60 stdout.print_with_timestamp(stdout.red('[ERROR]') + f' Test: {self.name}: {error_message}')
d65d07cb 61
f19dd011
DL
62 def ok_status(self) -> bool:
63 """Returns true if the status was ok, i.e. passed or skipped."""
64 return self.status in (TestStatus.SUCCESS, TestStatus.SKIPPED)
65
6ebf5866 66class TestStatus(Enum):
d65d07cb 67 """An enumeration class to represent the status of a test."""
6ebf5866
FG
68 SUCCESS = auto()
69 FAILURE = auto()
5acaf603 70 SKIPPED = auto()
6ebf5866
FG
71 TEST_CRASHED = auto()
72 NO_TESTS = auto()
45dcbb6f 73 FAILURE_TO_PARSE_TESTS = auto()
6ebf5866 74
f473dd94 75@dataclass
d65d07cb
RM
76class TestCounts:
77 """
78 Tracks the counts of statuses of all test cases and any errors within
79 a Test.
d65d07cb 80 """
f473dd94
DL
81 passed: int = 0
82 failed: int = 0
83 crashed: int = 0
84 skipped: int = 0
85 errors: int = 0
d65d07cb
RM
86
87 def __str__(self) -> str:
94507ee3 88 """Returns the string representation of a TestCounts object."""
c2497643
DL
89 statuses = [('passed', self.passed), ('failed', self.failed),
90 ('crashed', self.crashed), ('skipped', self.skipped),
91 ('errors', self.errors)]
92 return f'Ran {self.total()} tests: ' + \
93 ', '.join(f'{s}: {n}' for s, n in statuses if n > 0)
d65d07cb
RM
94
95 def total(self) -> int:
96 """Returns the total number of test cases within a test
97 object, where a test case is a test with no subtests.
98 """
99 return (self.passed + self.failed + self.crashed +
100 self.skipped)
101
102 def add_subtest_counts(self, counts: TestCounts) -> None:
103 """
104 Adds the counts of another TestCounts object to the current
105 TestCounts object. Used to add the counts of a subtest to the
106 parent test.
107
108 Parameters:
109 counts - a different TestCounts object whose counts
110 will be added to the counts of the TestCounts object
111 """
112 self.passed += counts.passed
113 self.failed += counts.failed
114 self.crashed += counts.crashed
115 self.skipped += counts.skipped
116 self.errors += counts.errors
117
118 def get_status(self) -> TestStatus:
119 """Returns the aggregated status of a Test using test
120 counts.
121 """
122 if self.total() == 0:
123 return TestStatus.NO_TESTS
0453f984 124 if self.crashed:
94507ee3 125 # Crashes should take priority.
d65d07cb 126 return TestStatus.TEST_CRASHED
0453f984 127 if self.failed:
d65d07cb 128 return TestStatus.FAILURE
0453f984 129 if self.passed:
94507ee3 130 # No failures or crashes, looks good!
d65d07cb 131 return TestStatus.SUCCESS
0453f984
DL
132 # We have only skipped tests.
133 return TestStatus.SKIPPED
d65d07cb
RM
134
135 def add_status(self, status: TestStatus) -> None:
94507ee3 136 """Increments the count for `status`."""
d65d07cb
RM
137 if status == TestStatus.SUCCESS:
138 self.passed += 1
139 elif status == TestStatus.FAILURE:
140 self.failed += 1
141 elif status == TestStatus.SKIPPED:
142 self.skipped += 1
143 elif status != TestStatus.NO_TESTS:
144 self.crashed += 1
145
b29b14f1 146class LineStream:
d65d07cb
RM
147 """
148 A class to represent the lines of kernel output.
142189f0 149 Provides a lazy peek()/pop() interface over an iterator of
d65d07cb
RM
150 (line#, text).
151 """
b29b14f1
DL
152 _lines: Iterator[Tuple[int, str]]
153 _next: Tuple[int, str]
142189f0 154 _need_next: bool
b29b14f1
DL
155 _done: bool
156
157 def __init__(self, lines: Iterator[Tuple[int, str]]):
d65d07cb 158 """Creates a new LineStream that wraps the given iterator."""
b29b14f1
DL
159 self._lines = lines
160 self._done = False
142189f0 161 self._need_next = True
b29b14f1 162 self._next = (0, '')
b29b14f1
DL
163
164 def _get_next(self) -> None:
142189f0
DL
165 """Advances the LineSteam to the next line, if necessary."""
166 if not self._need_next:
167 return
b29b14f1
DL
168 try:
169 self._next = next(self._lines)
170 except StopIteration:
171 self._done = True
142189f0
DL
172 finally:
173 self._need_next = False
b29b14f1
DL
174
175 def peek(self) -> str:
d65d07cb
RM
176 """Returns the current line, without advancing the LineStream.
177 """
142189f0 178 self._get_next()
b29b14f1
DL
179 return self._next[1]
180
181 def pop(self) -> str:
d65d07cb
RM
182 """Returns the current line and advances the LineStream to
183 the next line.
184 """
142189f0
DL
185 s = self.peek()
186 if self._done:
187 raise ValueError(f'LineStream: going past EOF, last line was {s}')
188 self._need_next = True
189 return s
b29b14f1
DL
190
191 def __bool__(self) -> bool:
d65d07cb 192 """Returns True if stream has more lines."""
142189f0 193 self._get_next()
b29b14f1
DL
194 return not self._done
195
196 # Only used by kunit_tool_test.py.
197 def __iter__(self) -> Iterator[str]:
d65d07cb
RM
198 """Empties all lines stored in LineStream object into
199 Iterator object and returns the Iterator object.
200 """
b29b14f1
DL
201 while bool(self):
202 yield self.pop()
203
204 def line_number(self) -> int:
d65d07cb 205 """Returns the line number of the current line."""
142189f0 206 self._get_next()
b29b14f1
DL
207 return self._next[0]
208
d65d07cb
RM
209# Parsing helper methods:
210
c2bb92bc
DL
211KTAP_START = re.compile(r'\s*KTAP version ([0-9]+)$')
212TAP_START = re.compile(r'\s*TAP version ([0-9]+)$')
213KTAP_END = re.compile(r'\s*(List of all partitions:|'
d65d07cb 214 'Kernel panic - not syncing: VFS:|reboot: System halted)')
723c8258 215EXECUTOR_ERROR = re.compile(r'\s*kunit executor: (.*)$')
6ebf5866 216
c2bb92bc 217def extract_tap_lines(kernel_output: Iterable[str]) -> LineStream:
d65d07cb
RM
218 """Extracts KTAP lines from the kernel output."""
219 def isolate_ktap_output(kernel_output: Iterable[str]) \
220 -> Iterator[Tuple[int, str]]:
b29b14f1
DL
221 line_num = 0
222 started = False
223 for line in kernel_output:
224 line_num += 1
d65d07cb
RM
225 line = line.rstrip() # remove trailing \n
226 if not started and KTAP_START.search(line):
227 # start extracting KTAP lines and set prefix
228 # to number of characters before version line
229 prefix_len = len(
230 line.split('KTAP version')[0])
231 started = True
232 yield line_num, line[prefix_len:]
233 elif not started and TAP_START.search(line):
234 # start extracting KTAP lines and set prefix
235 # to number of characters before version line
b29b14f1
DL
236 prefix_len = len(line.split('TAP version')[0])
237 started = True
238 yield line_num, line[prefix_len:]
d65d07cb
RM
239 elif started and KTAP_END.search(line):
240 # stop extracting KTAP lines
b29b14f1
DL
241 break
242 elif started:
c2bb92bc 243 # remove the prefix, if any.
a15cfa39 244 line = line[prefix_len:]
d65d07cb 245 yield line_num, line
723c8258
RM
246 elif EXECUTOR_ERROR.search(line):
247 yield line_num, line
d65d07cb
RM
248 return LineStream(lines=isolate_ktap_output(kernel_output))
249
250KTAP_VERSIONS = [1]
251TAP_VERSIONS = [13, 14]
252
253def check_version(version_num: int, accepted_versions: List[int],
254 version_type: str, test: Test) -> None:
255 """
256 Adds error to test object if version number is too high or too
257 low.
258
259 Parameters:
260 version_num - The inputted version number from the parsed KTAP or TAP
261 header line
262 accepted_version - List of accepted KTAP or TAP versions
263 version_type - 'KTAP' or 'TAP' depending on the type of
264 version line.
265 test - Test object for current test being parsed
266 """
267 if version_num < min(accepted_versions):
94507ee3 268 test.add_error(f'{version_type} version lower than expected!')
d65d07cb 269 elif version_num > max(accepted_versions):
94507ee3 270 test.add_error(f'{version_type} version higer than expected!')
d65d07cb
RM
271
272def parse_ktap_header(lines: LineStream, test: Test) -> bool:
273 """
274 Parses KTAP/TAP header line and checks version number.
275 Returns False if fails to parse KTAP/TAP header line.
276
277 Accepted formats:
278 - 'KTAP version [version number]'
279 - 'TAP version [version number]'
280
281 Parameters:
282 lines - LineStream of KTAP output to parse
283 test - Test object for current test being parsed
284
285 Return:
286 True if successfully parsed KTAP/TAP header line
287 """
288 ktap_match = KTAP_START.match(lines.peek())
289 tap_match = TAP_START.match(lines.peek())
290 if ktap_match:
291 version_num = int(ktap_match.group(1))
292 check_version(version_num, KTAP_VERSIONS, 'KTAP', test)
293 elif tap_match:
294 version_num = int(tap_match.group(1))
295 check_version(version_num, TAP_VERSIONS, 'TAP', test)
296 else:
297 return False
5937e0c0 298 lines.pop()
d65d07cb 299 return True
6ebf5866 300
c2bb92bc 301TEST_HEADER = re.compile(r'^\s*# Subtest: (.*)$')
6ebf5866 302
d65d07cb
RM
303def parse_test_header(lines: LineStream, test: Test) -> bool:
304 """
305 Parses test header and stores test name in test object.
306 Returns False if fails to parse test header line.
6ebf5866 307
d65d07cb
RM
308 Accepted format:
309 - '# Subtest: [test name]'
6ebf5866 310
d65d07cb
RM
311 Parameters:
312 lines - LineStream of KTAP output to parse
313 test - Test object for current test being parsed
6ebf5866 314
d65d07cb
RM
315 Return:
316 True if successfully parsed test header line
317 """
318 match = TEST_HEADER.match(lines.peek())
319 if not match:
320 return False
d65d07cb 321 test.name = match.group(1)
5937e0c0 322 lines.pop()
d65d07cb 323 return True
6ebf5866 324
c2bb92bc 325TEST_PLAN = re.compile(r'^\s*1\.\.([0-9]+)')
6ebf5866 326
d65d07cb
RM
327def parse_test_plan(lines: LineStream, test: Test) -> bool:
328 """
329 Parses test plan line and stores the expected number of subtests in
330 test object. Reports an error if expected count is 0.
c68077b1
DG
331 Returns False and sets expected_count to None if there is no valid test
332 plan.
5acaf603 333
d65d07cb
RM
334 Accepted format:
335 - '1..[number of subtests]'
6ebf5866 336
d65d07cb
RM
337 Parameters:
338 lines - LineStream of KTAP output to parse
339 test - Test object for current test being parsed
6ebf5866 340
d65d07cb
RM
341 Return:
342 True if successfully parsed test plan line
343 """
344 match = TEST_PLAN.match(lines.peek())
345 if not match:
346 test.expected_count = None
6ebf5866 347 return False
d65d07cb
RM
348 expected_count = int(match.group(1))
349 test.expected_count = expected_count
5937e0c0 350 lines.pop()
d65d07cb
RM
351 return True
352
c2bb92bc 353TEST_RESULT = re.compile(r'^\s*(ok|not ok) ([0-9]+) (- )?([^#]*)( # .*)?$')
d65d07cb 354
c2bb92bc 355TEST_RESULT_SKIP = re.compile(r'^\s*(ok|not ok) ([0-9]+) (- )?(.*) # SKIP(.*)$')
d65d07cb
RM
356
357def peek_test_name_match(lines: LineStream, test: Test) -> bool:
358 """
359 Matches current line with the format of a test result line and checks
360 if the name matches the name of the current test.
361 Returns False if fails to match format or name.
362
363 Accepted format:
364 - '[ok|not ok] [test number] [-] [test name] [optional skip
365 directive]'
366
367 Parameters:
368 lines - LineStream of KTAP output to parse
369 test - Test object for current test being parsed
370
371 Return:
372 True if matched a test result line and the name matching the
373 expected test name
374 """
375 line = lines.peek()
376 match = TEST_RESULT.match(line)
377 if not match:
6ebf5866 378 return False
d65d07cb 379 name = match.group(4)
0453f984 380 return name == test.name
d65d07cb
RM
381
382def parse_test_result(lines: LineStream, test: Test,
383 expected_num: int) -> bool:
384 """
385 Parses test result line and stores the status and name in the test
386 object. Reports an error if the test number does not match expected
387 test number.
388 Returns False if fails to parse test result line.
389
390 Note that the SKIP directive is the only direction that causes a
391 change in status.
392
393 Accepted format:
394 - '[ok|not ok] [test number] [-] [test name] [optional skip
395 directive]'
396
397 Parameters:
398 lines - LineStream of KTAP output to parse
399 test - Test object for current test being parsed
400 expected_num - expected test number for current test
401
402 Return:
403 True if successfully parsed a test result line.
404 """
b29b14f1 405 line = lines.peek()
d65d07cb
RM
406 match = TEST_RESULT.match(line)
407 skip_match = TEST_RESULT_SKIP.match(line)
408
409 # Check if line matches test result line format
410 if not match:
6ebf5866 411 return False
5937e0c0 412 lines.pop()
6ebf5866 413
d65d07cb
RM
414 # Set name of test object
415 if skip_match:
416 test.name = skip_match.group(4)
6ebf5866 417 else:
d65d07cb
RM
418 test.name = match.group(4)
419
420 # Check test num
421 num = int(match.group(2))
422 if num != expected_num:
94507ee3 423 test.add_error(f'Expected test number {expected_num} but found {num}')
d65d07cb
RM
424
425 # Set status of test object
426 status = match.group(1)
427 if skip_match:
428 test.status = TestStatus.SKIPPED
429 elif status == 'ok':
430 test.status = TestStatus.SUCCESS
6ebf5866 431 else:
d65d07cb
RM
432 test.status = TestStatus.FAILURE
433 return True
434
435def parse_diagnostic(lines: LineStream) -> List[str]:
436 """
437 Parse lines that do not match the format of a test result line or
438 test header line and returns them in list.
439
440 Line formats that are not parsed:
441 - '# Subtest: [test name]'
442 - '[ok|not ok] [test number] [-] [test name] [optional skip
443 directive]'
434498a6 444 - 'KTAP version [version number]'
d65d07cb
RM
445
446 Parameters:
447 lines - LineStream of KTAP output to parse
448
449 Return:
450 Log of diagnostic lines
451 """
452 log = [] # type: List[str]
723c8258 453 non_diagnostic_lines = [TEST_RESULT, TEST_HEADER, KTAP_START, TAP_START]
434498a6
RM
454 while lines and not any(re.match(lines.peek())
455 for re in non_diagnostic_lines):
d65d07cb
RM
456 log.append(lines.pop())
457 return log
458
6ebf5866 459
d65d07cb 460# Printing helper methods:
6ebf5866 461
d65d07cb 462DIVIDER = '=' * 60
6ebf5866 463
d65d07cb
RM
464def format_test_divider(message: str, len_message: int) -> str:
465 """
466 Returns string with message centered in fixed width divider.
467
468 Example:
469 '===================== message example ====================='
470
471 Parameters:
472 message - message to be centered in divider line
473 len_message - length of the message to be printed such that
474 any characters of the color codes are not counted
475
476 Return:
477 String containing message centered in fixed width divider
478 """
479 default_count = 3 # default number of dashes
480 len_1 = default_count
481 len_2 = default_count
482 difference = len(DIVIDER) - len_message - 2 # 2 spaces added
483 if difference > 0:
484 # calculate number of dashes for each side of the divider
485 len_1 = int(difference / 2)
486 len_2 = difference - len_1
94507ee3 487 return ('=' * len_1) + f' {message} ' + ('=' * len_2)
d65d07cb
RM
488
489def print_test_header(test: Test) -> None:
490 """
491 Prints test header with test name and optionally the expected number
492 of subtests.
493
494 Example:
495 '=================== example (2 subtests) ==================='
496
497 Parameters:
498 test - Test object representing current test being printed
499 """
500 message = test.name
434498a6
RM
501 if message != "":
502 # Add a leading space before the subtest counts only if a test name
503 # is provided using a "# Subtest" header line.
504 message += " "
d65d07cb
RM
505 if test.expected_count:
506 if test.expected_count == 1:
434498a6 507 message += '(1 subtest)'
45dcbb6f 508 else:
434498a6 509 message += f'({test.expected_count} subtests)'
e756dbeb 510 stdout.print_with_timestamp(format_test_divider(message, len(message)))
6ebf5866 511
d65d07cb 512def print_log(log: Iterable[str]) -> None:
94507ee3 513 """Prints all strings in saved log for test in yellow."""
c2bb92bc
DL
514 formatted = textwrap.dedent('\n'.join(log))
515 for line in formatted.splitlines():
516 stdout.print_with_timestamp(stdout.yellow(line))
d65d07cb
RM
517
518def format_test_result(test: Test) -> str:
519 """
520 Returns string with formatted test result with colored status and test
521 name.
522
523 Example:
524 '[PASSED] example'
525
526 Parameters:
527 test - Test object representing current test being printed
528
529 Return:
530 String containing formatted test result
531 """
532 if test.status == TestStatus.SUCCESS:
e756dbeb 533 return stdout.green('[PASSED] ') + test.name
0453f984 534 if test.status == TestStatus.SKIPPED:
e756dbeb 535 return stdout.yellow('[SKIPPED] ') + test.name
0453f984 536 if test.status == TestStatus.NO_TESTS:
e756dbeb 537 return stdout.yellow('[NO TESTS RUN] ') + test.name
0453f984 538 if test.status == TestStatus.TEST_CRASHED:
d65d07cb 539 print_log(test.log)
e756dbeb 540 return stdout.red('[CRASHED] ') + test.name
0453f984 541 print_log(test.log)
e756dbeb 542 return stdout.red('[FAILED] ') + test.name
d65d07cb
RM
543
544def print_test_result(test: Test) -> None:
545 """
546 Prints result line with status of test.
547
548 Example:
549 '[PASSED] example'
550
551 Parameters:
552 test - Test object representing current test being printed
553 """
e756dbeb 554 stdout.print_with_timestamp(format_test_result(test))
d65d07cb
RM
555
556def print_test_footer(test: Test) -> None:
557 """
558 Prints test footer with status of test.
559
560 Example:
561 '===================== [PASSED] example ====================='
562
563 Parameters:
564 test - Test object representing current test being printed
565 """
566 message = format_test_result(test)
e756dbeb
DL
567 stdout.print_with_timestamp(format_test_divider(message,
568 len(message) - stdout.color_len()))
d65d07cb 569
f19dd011
DL
570
571
572def _summarize_failed_tests(test: Test) -> str:
573 """Tries to summarize all the failing subtests in `test`."""
574
575 def failed_names(test: Test, parent_name: str) -> List[str]:
576 # Note: we use 'main' internally for the top-level test.
577 if not parent_name or parent_name == 'main':
578 full_name = test.name
579 else:
580 full_name = parent_name + '.' + test.name
581
582 if not test.subtests: # this is a leaf node
583 return [full_name]
584
585 # If all the children failed, just say this subtest failed.
586 # Don't summarize it down "the top-level test failed", though.
587 failed_subtests = [sub for sub in test.subtests if not sub.ok_status()]
588 if parent_name and len(failed_subtests) == len(test.subtests):
589 return [full_name]
590
591 all_failures = [] # type: List[str]
592 for t in failed_subtests:
593 all_failures.extend(failed_names(t, full_name))
594 return all_failures
595
596 failures = failed_names(test, '')
597 # If there are too many failures, printing them out will just be noisy.
598 if len(failures) > 10: # this is an arbitrary limit
599 return ''
600
601 return 'Failures: ' + ', '.join(failures)
602
603
d65d07cb
RM
604def print_summary_line(test: Test) -> None:
605 """
606 Prints summary line of test object. Color of line is dependent on
607 status of test. Color is green if test passes, yellow if test is
608 skipped, and red if the test fails or crashes. Summary line contains
609 counts of the statuses of the tests subtests or the test itself if it
610 has no subtests.
611
612 Example:
613 "Testing complete. Passed: 2, Failed: 0, Crashed: 0, Skipped: 0,
614 Errors: 0"
615
616 test - Test object representing current test being printed
617 """
618 if test.status == TestStatus.SUCCESS:
e756dbeb 619 color = stdout.green
0453f984 620 elif test.status in (TestStatus.SKIPPED, TestStatus.NO_TESTS):
e756dbeb 621 color = stdout.yellow
d65d07cb 622 else:
e756dbeb
DL
623 color = stdout.red
624 stdout.print_with_timestamp(color(f'Testing complete. {test.counts}'))
d65d07cb 625
f19dd011
DL
626 # Summarize failures that might have gone off-screen since we had a lot
627 # of tests (arbitrarily defined as >=100 for now).
628 if test.ok_status() or test.counts.total() < 100:
629 return
630 summarized = _summarize_failed_tests(test)
631 if not summarized:
632 return
633 stdout.print_with_timestamp(color(summarized))
634
d65d07cb
RM
635# Other methods:
636
637def bubble_up_test_results(test: Test) -> None:
638 """
639 If the test has subtests, add the test counts of the subtests to the
640 test and check if any of the tests crashed and if so set the test
641 status to crashed. Otherwise if the test has no subtests add the
642 status of the test to the test counts.
643
644 Parameters:
645 test - Test object for current test being parsed
646 """
d65d07cb
RM
647 subtests = test.subtests
648 counts = test.counts
649 status = test.status
650 for t in subtests:
651 counts.add_subtest_counts(t.counts)
652 if counts.total() == 0:
653 counts.add_status(status)
654 elif test.counts.get_status() == TestStatus.TEST_CRASHED:
655 test.status = TestStatus.TEST_CRASHED
656
434498a6 657def parse_test(lines: LineStream, expected_num: int, log: List[str], is_subtest: bool) -> Test:
d65d07cb
RM
658 """
659 Finds next test to parse in LineStream, creates new Test object,
660 parses any subtests of the test, populates Test object with all
661 information (status, name) about the test and the Test objects for
662 any subtests, and then returns the Test object. The method accepts
663 three formats of tests:
664
665 Accepted test formats:
666
667 - Main KTAP/TAP header
668
669 Example:
670
671 KTAP version 1
672 1..4
673 [subtests]
674
434498a6
RM
675 - Subtest header (must include either the KTAP version line or
676 "# Subtest" header line)
d65d07cb 677
434498a6
RM
678 Example (preferred format with both KTAP version line and
679 "# Subtest" line):
680
681 KTAP version 1
682 # Subtest: name
683 1..3
684 [subtests]
685 ok 1 name
686
687 Example (only "# Subtest" line):
d65d07cb
RM
688
689 # Subtest: name
690 1..3
691 [subtests]
692 ok 1 name
693
434498a6
RM
694 Example (only KTAP version line, compliant with KTAP v1 spec):
695
696 KTAP version 1
697 1..3
698 [subtests]
699 ok 1 name
700
d65d07cb
RM
701 - Test result line
702
703 Example:
704
705 ok 1 - test
706
707 Parameters:
708 lines - LineStream of KTAP output to parse
709 expected_num - expected test number for test to be parsed
710 log - list of strings containing any preceding diagnostic lines
711 corresponding to the current test
434498a6 712 is_subtest - boolean indicating whether test is a subtest
d65d07cb
RM
713
714 Return:
715 Test object populated with characteristics and any subtests
716 """
717 test = Test()
718 test.log.extend(log)
723c8258
RM
719
720 # Parse any errors prior to parsing tests
721 err_log = parse_diagnostic(lines)
722 test.log.extend(err_log)
723
434498a6
RM
724 if not is_subtest:
725 # If parsing the main/top-level test, parse KTAP version line and
d65d07cb
RM
726 # test plan
727 test.name = "main"
434498a6 728 ktap_line = parse_ktap_header(lines, test)
d65d07cb 729 parse_test_plan(lines, test)
e56e4828 730 parent_test = True
d65d07cb 731 else:
434498a6
RM
732 # If not the main test, attempt to parse a test header containing
733 # the KTAP version line and/or subtest header line
734 ktap_line = parse_ktap_header(lines, test)
735 subtest_line = parse_test_header(lines, test)
736 parent_test = (ktap_line or subtest_line)
d65d07cb 737 if parent_test:
434498a6
RM
738 # If KTAP version line and/or subtest header is found, attempt
739 # to parse test plan and print test header
d65d07cb
RM
740 parse_test_plan(lines, test)
741 print_test_header(test)
742 expected_count = test.expected_count
743 subtests = []
744 test_num = 1
e56e4828 745 while parent_test and (expected_count is None or test_num <= expected_count):
d65d07cb
RM
746 # Loop to parse any subtests.
747 # Break after parsing expected number of tests or
748 # if expected number of tests is unknown break when test
749 # result line with matching name to subtest header is found
750 # or no more lines in stream.
751 sub_log = parse_diagnostic(lines)
752 sub_test = Test()
753 if not lines or (peek_test_name_match(lines, test) and
434498a6 754 is_subtest):
d65d07cb
RM
755 if expected_count and test_num <= expected_count:
756 # If parser reaches end of test before
757 # parsing expected number of subtests, print
758 # crashed subtest and record error
759 test.add_error('missing expected subtest!')
760 sub_test.log.extend(sub_log)
761 test.counts.add_status(
762 TestStatus.TEST_CRASHED)
763 print_test_result(sub_test)
6ebf5866 764 else:
d65d07cb
RM
765 test.log.extend(sub_log)
766 break
767 else:
434498a6 768 sub_test = parse_test(lines, test_num, sub_log, True)
d65d07cb
RM
769 subtests.append(sub_test)
770 test_num += 1
771 test.subtests = subtests
434498a6 772 if is_subtest:
d65d07cb
RM
773 # If not main test, look for test result line
774 test.log.extend(parse_diagnostic(lines))
434498a6 775 if test.name != "" and not peek_test_name_match(lines, test):
d65d07cb 776 test.add_error('missing subtest result line!')
434498a6
RM
777 else:
778 parse_test_result(lines, test, expected_num)
e56e4828 779
434498a6 780 # Check for there being no subtests within parent test
e56e4828 781 if parent_test and len(subtests) == 0:
dbf0b0d5
DL
782 # Don't override a bad status if this test had one reported.
783 # Assumption: no subtests means CRASHED is from Test.__init__()
784 if test.status in (TestStatus.TEST_CRASHED, TestStatus.SUCCESS):
723c8258 785 print_log(test.log)
dbf0b0d5
DL
786 test.status = TestStatus.NO_TESTS
787 test.add_error('0 tests run!')
e56e4828 788
d65d07cb
RM
789 # Add statuses to TestCounts attribute in Test object
790 bubble_up_test_results(test)
434498a6 791 if parent_test and is_subtest:
d65d07cb
RM
792 # If test has subtests and is not the main test object, print
793 # footer.
794 print_test_footer(test)
434498a6 795 elif is_subtest:
d65d07cb
RM
796 print_test_result(test)
797 return test
45dcbb6f 798
e0cc8c05 799def parse_run_tests(kernel_output: Iterable[str]) -> Test:
d65d07cb
RM
800 """
801 Using kernel output, extract KTAP lines, parse the lines for test
94507ee3 802 results and print condensed test results and summary line.
d65d07cb
RM
803
804 Parameters:
805 kernel_output - Iterable object contains lines of kernel output
806
807 Return:
e0cc8c05 808 Test - the main test object with all subtests.
d65d07cb 809 """
e756dbeb 810 stdout.print_with_timestamp(DIVIDER)
b29b14f1 811 lines = extract_tap_lines(kernel_output)
d65d07cb
RM
812 test = Test()
813 if not lines:
9660209d 814 test.name = '<missing>'
0a7d5c30 815 test.add_error('Could not find any KTAP output. Did any KUnit tests run?')
d65d07cb 816 test.status = TestStatus.FAILURE_TO_PARSE_TESTS
45dcbb6f 817 else:
434498a6 818 test = parse_test(lines, 0, [], False)
d65d07cb
RM
819 if test.status != TestStatus.NO_TESTS:
820 test.status = test.counts.get_status()
e756dbeb 821 stdout.print_with_timestamp(DIVIDER)
d65d07cb 822 print_summary_line(test)
e0cc8c05 823 return test