1# SPDX-License-Identifier: GPL-2.0 2# 3# Parses KTAP test results from a kernel dmesg log and incrementally prints 4# results with reader-friendly format. Stores and returns test results in a 5# Test object. 6# 7# Copyright (C) 2019, Google LLC. 8# Author: Felix Guo <[email protected]> 9# Author: Brendan Higgins <[email protected]> 10# Author: Rae Moar <[email protected]> 11 12from __future__ import annotations 13from dataclasses import dataclass 14import re 15import sys 16 17from enum import Enum, auto 18from typing import Iterable, Iterator, List, Optional, Tuple 19 20from kunit_printer import stdout 21 22class Test: 23 """ 24 A class to represent a test parsed from KTAP results. All KTAP 25 results within a test log are stored in a main Test object as 26 subtests. 27 28 Attributes: 29 status : TestStatus - status of the test 30 name : str - name of the test 31 expected_count : int - expected number of subtests (0 if single 32 test case and None if unknown expected number of subtests) 33 subtests : List[Test] - list of subtests 34 log : List[str] - log of KTAP lines that correspond to the test 35 counts : TestCounts - counts of the test statuses and errors of 36 subtests or of the test itself if the test is a single 37 test case. 38 """ 39 def __init__(self) -> None: 40 """Creates Test object with default attributes.""" 41 self.status = TestStatus.TEST_CRASHED 42 self.name = '' 43 self.expected_count = 0 # type: Optional[int] 44 self.subtests = [] # type: List[Test] 45 self.log = [] # type: List[str] 46 self.counts = TestCounts() 47 48 def __str__(self) -> str: 49 """Returns string representation of a Test class object.""" 50 return (f'Test({self.status}, {self.name}, {self.expected_count}, ' 51 f'{self.subtests}, {self.log}, {self.counts})') 52 53 def __repr__(self) -> str: 54 """Returns string representation of a Test class object.""" 55 return str(self) 56 57 def add_error(self, error_message: str) -> None: 58 """Records an error that occurred while parsing this test.""" 59 self.counts.errors += 1 60 stdout.print_with_timestamp(stdout.red('[ERROR]') + f' Test: {self.name}: {error_message}') 61 62 def ok_status(self) -> bool: 63 """Returns true if the status was ok, i.e. passed or skipped.""" 64 return self.status in (TestStatus.SUCCESS, TestStatus.SKIPPED) 65 66class TestStatus(Enum): 67 """An enumeration class to represent the status of a test.""" 68 SUCCESS = auto() 69 FAILURE = auto() 70 SKIPPED = auto() 71 TEST_CRASHED = auto() 72 NO_TESTS = auto() 73 FAILURE_TO_PARSE_TESTS = auto() 74 75@dataclass 76class TestCounts: 77 """ 78 Tracks the counts of statuses of all test cases and any errors within 79 a Test. 80 """ 81 passed: int = 0 82 failed: int = 0 83 crashed: int = 0 84 skipped: int = 0 85 errors: int = 0 86 87 def __str__(self) -> str: 88 """Returns the string representation of a TestCounts object.""" 89 statuses = [('passed', self.passed), ('failed', self.failed), 90 ('crashed', self.crashed), ('skipped', self.skipped), 91 ('errors', self.errors)] 92 return f'Ran {self.total()} tests: ' + \ 93 ', '.join(f'{s}: {n}' for s, n in statuses if n > 0) 94 95 def total(self) -> int: 96 """Returns the total number of test cases within a test 97 object, where a test case is a test with no subtests. 98 """ 99 return (self.passed + self.failed + self.crashed + 100 self.skipped) 101 102 def add_subtest_counts(self, counts: TestCounts) -> None: 103 """ 104 Adds the counts of another TestCounts object to the current 105 TestCounts object. Used to add the counts of a subtest to the 106 parent test. 107 108 Parameters: 109 counts - a different TestCounts object whose counts 110 will be added to the counts of the TestCounts object 111 """ 112 self.passed += counts.passed 113 self.failed += counts.failed 114 self.crashed += counts.crashed 115 self.skipped += counts.skipped 116 self.errors += counts.errors 117 118 def get_status(self) -> TestStatus: 119 """Returns the aggregated status of a Test using test 120 counts. 121 """ 122 if self.total() == 0: 123 return TestStatus.NO_TESTS 124 if self.crashed: 125 # Crashes should take priority. 126 return TestStatus.TEST_CRASHED 127 if self.failed: 128 return TestStatus.FAILURE 129 if self.passed: 130 # No failures or crashes, looks good! 131 return TestStatus.SUCCESS 132 # We have only skipped tests. 133 return TestStatus.SKIPPED 134 135 def add_status(self, status: TestStatus) -> None: 136 """Increments the count for `status`.""" 137 if status == TestStatus.SUCCESS: 138 self.passed += 1 139 elif status == TestStatus.FAILURE: 140 self.failed += 1 141 elif status == TestStatus.SKIPPED: 142 self.skipped += 1 143 elif status != TestStatus.NO_TESTS: 144 self.crashed += 1 145 146class LineStream: 147 """ 148 A class to represent the lines of kernel output. 149 Provides a lazy peek()/pop() interface over an iterator of 150 (line#, text). 151 """ 152 _lines: Iterator[Tuple[int, str]] 153 _next: Tuple[int, str] 154 _need_next: bool 155 _done: bool 156 157 def __init__(self, lines: Iterator[Tuple[int, str]]): 158 """Creates a new LineStream that wraps the given iterator.""" 159 self._lines = lines 160 self._done = False 161 self._need_next = True 162 self._next = (0, '') 163 164 def _get_next(self) -> None: 165 """Advances the LineSteam to the next line, if necessary.""" 166 if not self._need_next: 167 return 168 try: 169 self._next = next(self._lines) 170 except StopIteration: 171 self._done = True 172 finally: 173 self._need_next = False 174 175 def peek(self) -> str: 176 """Returns the current line, without advancing the LineStream. 177 """ 178 self._get_next() 179 return self._next[1] 180 181 def pop(self) -> str: 182 """Returns the current line and advances the LineStream to 183 the next line. 184 """ 185 s = self.peek() 186 if self._done: 187 raise ValueError(f'LineStream: going past EOF, last line was {s}') 188 self._need_next = True 189 return s 190 191 def __bool__(self) -> bool: 192 """Returns True if stream has more lines.""" 193 self._get_next() 194 return not self._done 195 196 # Only used by kunit_tool_test.py. 197 def __iter__(self) -> Iterator[str]: 198 """Empties all lines stored in LineStream object into 199 Iterator object and returns the Iterator object. 200 """ 201 while bool(self): 202 yield self.pop() 203 204 def line_number(self) -> int: 205 """Returns the line number of the current line.""" 206 self._get_next() 207 return self._next[0] 208 209# Parsing helper methods: 210 211KTAP_START = re.compile(r'KTAP version ([0-9]+)$') 212TAP_START = re.compile(r'TAP version ([0-9]+)$') 213KTAP_END = re.compile('(List of all partitions:|' 214 'Kernel panic - not syncing: VFS:|reboot: System halted)') 215 216def extract_tap_lines(kernel_output: Iterable[str], lstrip=True) -> LineStream: 217 """Extracts KTAP lines from the kernel output.""" 218 def isolate_ktap_output(kernel_output: Iterable[str]) \ 219 -> Iterator[Tuple[int, str]]: 220 line_num = 0 221 started = False 222 for line in kernel_output: 223 line_num += 1 224 line = line.rstrip() # remove trailing \n 225 if not started and KTAP_START.search(line): 226 # start extracting KTAP lines and set prefix 227 # to number of characters before version line 228 prefix_len = len( 229 line.split('KTAP version')[0]) 230 started = True 231 yield line_num, line[prefix_len:] 232 elif not started and TAP_START.search(line): 233 # start extracting KTAP lines and set prefix 234 # to number of characters before version line 235 prefix_len = len(line.split('TAP version')[0]) 236 started = True 237 yield line_num, line[prefix_len:] 238 elif started and KTAP_END.search(line): 239 # stop extracting KTAP lines 240 break 241 elif started: 242 # remove the prefix and optionally any leading 243 # whitespace. Our parsing logic relies on this. 244 line = line[prefix_len:] 245 if lstrip: 246 line = line.lstrip() 247 yield line_num, line 248 return LineStream(lines=isolate_ktap_output(kernel_output)) 249 250KTAP_VERSIONS = [1] 251TAP_VERSIONS = [13, 14] 252 253def check_version(version_num: int, accepted_versions: List[int], 254 version_type: str, test: Test) -> None: 255 """ 256 Adds error to test object if version number is too high or too 257 low. 258 259 Parameters: 260 version_num - The inputted version number from the parsed KTAP or TAP 261 header line 262 accepted_version - List of accepted KTAP or TAP versions 263 version_type - 'KTAP' or 'TAP' depending on the type of 264 version line. 265 test - Test object for current test being parsed 266 """ 267 if version_num < min(accepted_versions): 268 test.add_error(f'{version_type} version lower than expected!') 269 elif version_num > max(accepted_versions): 270 test.add_error(f'{version_type} version higer than expected!') 271 272def parse_ktap_header(lines: LineStream, test: Test) -> bool: 273 """ 274 Parses KTAP/TAP header line and checks version number. 275 Returns False if fails to parse KTAP/TAP header line. 276 277 Accepted formats: 278 - 'KTAP version [version number]' 279 - 'TAP version [version number]' 280 281 Parameters: 282 lines - LineStream of KTAP output to parse 283 test - Test object for current test being parsed 284 285 Return: 286 True if successfully parsed KTAP/TAP header line 287 """ 288 ktap_match = KTAP_START.match(lines.peek()) 289 tap_match = TAP_START.match(lines.peek()) 290 if ktap_match: 291 version_num = int(ktap_match.group(1)) 292 check_version(version_num, KTAP_VERSIONS, 'KTAP', test) 293 elif tap_match: 294 version_num = int(tap_match.group(1)) 295 check_version(version_num, TAP_VERSIONS, 'TAP', test) 296 else: 297 return False 298 test.log.append(lines.pop()) 299 return True 300 301TEST_HEADER = re.compile(r'^# Subtest: (.*)$') 302 303def parse_test_header(lines: LineStream, test: Test) -> bool: 304 """ 305 Parses test header and stores test name in test object. 306 Returns False if fails to parse test header line. 307 308 Accepted format: 309 - '# Subtest: [test name]' 310 311 Parameters: 312 lines - LineStream of KTAP output to parse 313 test - Test object for current test being parsed 314 315 Return: 316 True if successfully parsed test header line 317 """ 318 match = TEST_HEADER.match(lines.peek()) 319 if not match: 320 return False 321 test.log.append(lines.pop()) 322 test.name = match.group(1) 323 return True 324 325TEST_PLAN = re.compile(r'1\.\.([0-9]+)') 326 327def parse_test_plan(lines: LineStream, test: Test) -> bool: 328 """ 329 Parses test plan line and stores the expected number of subtests in 330 test object. Reports an error if expected count is 0. 331 Returns False and sets expected_count to None if there is no valid test 332 plan. 333 334 Accepted format: 335 - '1..[number of subtests]' 336 337 Parameters: 338 lines - LineStream of KTAP output to parse 339 test - Test object for current test being parsed 340 341 Return: 342 True if successfully parsed test plan line 343 """ 344 match = TEST_PLAN.match(lines.peek()) 345 if not match: 346 test.expected_count = None 347 return False 348 test.log.append(lines.pop()) 349 expected_count = int(match.group(1)) 350 test.expected_count = expected_count 351 return True 352 353TEST_RESULT = re.compile(r'^(ok|not ok) ([0-9]+) (- )?([^#]*)( # .*)?$') 354 355TEST_RESULT_SKIP = re.compile(r'^(ok|not ok) ([0-9]+) (- )?(.*) # SKIP(.*)$') 356 357def peek_test_name_match(lines: LineStream, test: Test) -> bool: 358 """ 359 Matches current line with the format of a test result line and checks 360 if the name matches the name of the current test. 361 Returns False if fails to match format or name. 362 363 Accepted format: 364 - '[ok|not ok] [test number] [-] [test name] [optional skip 365 directive]' 366 367 Parameters: 368 lines - LineStream of KTAP output to parse 369 test - Test object for current test being parsed 370 371 Return: 372 True if matched a test result line and the name matching the 373 expected test name 374 """ 375 line = lines.peek() 376 match = TEST_RESULT.match(line) 377 if not match: 378 return False 379 name = match.group(4) 380 return name == test.name 381 382def parse_test_result(lines: LineStream, test: Test, 383 expected_num: int) -> bool: 384 """ 385 Parses test result line and stores the status and name in the test 386 object. Reports an error if the test number does not match expected 387 test number. 388 Returns False if fails to parse test result line. 389 390 Note that the SKIP directive is the only direction that causes a 391 change in status. 392 393 Accepted format: 394 - '[ok|not ok] [test number] [-] [test name] [optional skip 395 directive]' 396 397 Parameters: 398 lines - LineStream of KTAP output to parse 399 test - Test object for current test being parsed 400 expected_num - expected test number for current test 401 402 Return: 403 True if successfully parsed a test result line. 404 """ 405 line = lines.peek() 406 match = TEST_RESULT.match(line) 407 skip_match = TEST_RESULT_SKIP.match(line) 408 409 # Check if line matches test result line format 410 if not match: 411 return False 412 test.log.append(lines.pop()) 413 414 # Set name of test object 415 if skip_match: 416 test.name = skip_match.group(4) 417 else: 418 test.name = match.group(4) 419 420 # Check test num 421 num = int(match.group(2)) 422 if num != expected_num: 423 test.add_error(f'Expected test number {expected_num} but found {num}') 424 425 # Set status of test object 426 status = match.group(1) 427 if skip_match: 428 test.status = TestStatus.SKIPPED 429 elif status == 'ok': 430 test.status = TestStatus.SUCCESS 431 else: 432 test.status = TestStatus.FAILURE 433 return True 434 435def parse_diagnostic(lines: LineStream) -> List[str]: 436 """ 437 Parse lines that do not match the format of a test result line or 438 test header line and returns them in list. 439 440 Line formats that are not parsed: 441 - '# Subtest: [test name]' 442 - '[ok|not ok] [test number] [-] [test name] [optional skip 443 directive]' 444 445 Parameters: 446 lines - LineStream of KTAP output to parse 447 448 Return: 449 Log of diagnostic lines 450 """ 451 log = [] # type: List[str] 452 while lines and not TEST_RESULT.match(lines.peek()) and not \ 453 TEST_HEADER.match(lines.peek()): 454 log.append(lines.pop()) 455 return log 456 457 458# Printing helper methods: 459 460DIVIDER = '=' * 60 461 462def format_test_divider(message: str, len_message: int) -> str: 463 """ 464 Returns string with message centered in fixed width divider. 465 466 Example: 467 '===================== message example =====================' 468 469 Parameters: 470 message - message to be centered in divider line 471 len_message - length of the message to be printed such that 472 any characters of the color codes are not counted 473 474 Return: 475 String containing message centered in fixed width divider 476 """ 477 default_count = 3 # default number of dashes 478 len_1 = default_count 479 len_2 = default_count 480 difference = len(DIVIDER) - len_message - 2 # 2 spaces added 481 if difference > 0: 482 # calculate number of dashes for each side of the divider 483 len_1 = int(difference / 2) 484 len_2 = difference - len_1 485 return ('=' * len_1) + f' {message} ' + ('=' * len_2) 486 487def print_test_header(test: Test) -> None: 488 """ 489 Prints test header with test name and optionally the expected number 490 of subtests. 491 492 Example: 493 '=================== example (2 subtests) ===================' 494 495 Parameters: 496 test - Test object representing current test being printed 497 """ 498 message = test.name 499 if test.expected_count: 500 if test.expected_count == 1: 501 message += ' (1 subtest)' 502 else: 503 message += f' ({test.expected_count} subtests)' 504 stdout.print_with_timestamp(format_test_divider(message, len(message))) 505 506def print_log(log: Iterable[str]) -> None: 507 """Prints all strings in saved log for test in yellow.""" 508 for m in log: 509 stdout.print_with_timestamp(stdout.yellow(m)) 510 511def format_test_result(test: Test) -> str: 512 """ 513 Returns string with formatted test result with colored status and test 514 name. 515 516 Example: 517 '[PASSED] example' 518 519 Parameters: 520 test - Test object representing current test being printed 521 522 Return: 523 String containing formatted test result 524 """ 525 if test.status == TestStatus.SUCCESS: 526 return stdout.green('[PASSED] ') + test.name 527 if test.status == TestStatus.SKIPPED: 528 return stdout.yellow('[SKIPPED] ') + test.name 529 if test.status == TestStatus.NO_TESTS: 530 return stdout.yellow('[NO TESTS RUN] ') + test.name 531 if test.status == TestStatus.TEST_CRASHED: 532 print_log(test.log) 533 return stdout.red('[CRASHED] ') + test.name 534 print_log(test.log) 535 return stdout.red('[FAILED] ') + test.name 536 537def print_test_result(test: Test) -> None: 538 """ 539 Prints result line with status of test. 540 541 Example: 542 '[PASSED] example' 543 544 Parameters: 545 test - Test object representing current test being printed 546 """ 547 stdout.print_with_timestamp(format_test_result(test)) 548 549def print_test_footer(test: Test) -> None: 550 """ 551 Prints test footer with status of test. 552 553 Example: 554 '===================== [PASSED] example =====================' 555 556 Parameters: 557 test - Test object representing current test being printed 558 """ 559 message = format_test_result(test) 560 stdout.print_with_timestamp(format_test_divider(message, 561 len(message) - stdout.color_len())) 562 563 564 565def _summarize_failed_tests(test: Test) -> str: 566 """Tries to summarize all the failing subtests in `test`.""" 567 568 def failed_names(test: Test, parent_name: str) -> List[str]: 569 # Note: we use 'main' internally for the top-level test. 570 if not parent_name or parent_name == 'main': 571 full_name = test.name 572 else: 573 full_name = parent_name + '.' + test.name 574 575 if not test.subtests: # this is a leaf node 576 return [full_name] 577 578 # If all the children failed, just say this subtest failed. 579 # Don't summarize it down "the top-level test failed", though. 580 failed_subtests = [sub for sub in test.subtests if not sub.ok_status()] 581 if parent_name and len(failed_subtests) == len(test.subtests): 582 return [full_name] 583 584 all_failures = [] # type: List[str] 585 for t in failed_subtests: 586 all_failures.extend(failed_names(t, full_name)) 587 return all_failures 588 589 failures = failed_names(test, '') 590 # If there are too many failures, printing them out will just be noisy. 591 if len(failures) > 10: # this is an arbitrary limit 592 return '' 593 594 return 'Failures: ' + ', '.join(failures) 595 596 597def print_summary_line(test: Test) -> None: 598 """ 599 Prints summary line of test object. Color of line is dependent on 600 status of test. Color is green if test passes, yellow if test is 601 skipped, and red if the test fails or crashes. Summary line contains 602 counts of the statuses of the tests subtests or the test itself if it 603 has no subtests. 604 605 Example: 606 "Testing complete. Passed: 2, Failed: 0, Crashed: 0, Skipped: 0, 607 Errors: 0" 608 609 test - Test object representing current test being printed 610 """ 611 if test.status == TestStatus.SUCCESS: 612 color = stdout.green 613 elif test.status in (TestStatus.SKIPPED, TestStatus.NO_TESTS): 614 color = stdout.yellow 615 else: 616 color = stdout.red 617 stdout.print_with_timestamp(color(f'Testing complete. {test.counts}')) 618 619 # Summarize failures that might have gone off-screen since we had a lot 620 # of tests (arbitrarily defined as >=100 for now). 621 if test.ok_status() or test.counts.total() < 100: 622 return 623 summarized = _summarize_failed_tests(test) 624 if not summarized: 625 return 626 stdout.print_with_timestamp(color(summarized)) 627 628# Other methods: 629 630def bubble_up_test_results(test: Test) -> None: 631 """ 632 If the test has subtests, add the test counts of the subtests to the 633 test and check if any of the tests crashed and if so set the test 634 status to crashed. Otherwise if the test has no subtests add the 635 status of the test to the test counts. 636 637 Parameters: 638 test - Test object for current test being parsed 639 """ 640 subtests = test.subtests 641 counts = test.counts 642 status = test.status 643 for t in subtests: 644 counts.add_subtest_counts(t.counts) 645 if counts.total() == 0: 646 counts.add_status(status) 647 elif test.counts.get_status() == TestStatus.TEST_CRASHED: 648 test.status = TestStatus.TEST_CRASHED 649 650def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test: 651 """ 652 Finds next test to parse in LineStream, creates new Test object, 653 parses any subtests of the test, populates Test object with all 654 information (status, name) about the test and the Test objects for 655 any subtests, and then returns the Test object. The method accepts 656 three formats of tests: 657 658 Accepted test formats: 659 660 - Main KTAP/TAP header 661 662 Example: 663 664 KTAP version 1 665 1..4 666 [subtests] 667 668 - Subtest header line 669 670 Example: 671 672 # Subtest: name 673 1..3 674 [subtests] 675 ok 1 name 676 677 - Test result line 678 679 Example: 680 681 ok 1 - test 682 683 Parameters: 684 lines - LineStream of KTAP output to parse 685 expected_num - expected test number for test to be parsed 686 log - list of strings containing any preceding diagnostic lines 687 corresponding to the current test 688 689 Return: 690 Test object populated with characteristics and any subtests 691 """ 692 test = Test() 693 test.log.extend(log) 694 parent_test = False 695 main = parse_ktap_header(lines, test) 696 if main: 697 # If KTAP/TAP header is found, attempt to parse 698 # test plan 699 test.name = "main" 700 parse_test_plan(lines, test) 701 parent_test = True 702 else: 703 # If KTAP/TAP header is not found, test must be subtest 704 # header or test result line so parse attempt to parser 705 # subtest header 706 parent_test = parse_test_header(lines, test) 707 if parent_test: 708 # If subtest header is found, attempt to parse 709 # test plan and print header 710 parse_test_plan(lines, test) 711 print_test_header(test) 712 expected_count = test.expected_count 713 subtests = [] 714 test_num = 1 715 while parent_test and (expected_count is None or test_num <= expected_count): 716 # Loop to parse any subtests. 717 # Break after parsing expected number of tests or 718 # if expected number of tests is unknown break when test 719 # result line with matching name to subtest header is found 720 # or no more lines in stream. 721 sub_log = parse_diagnostic(lines) 722 sub_test = Test() 723 if not lines or (peek_test_name_match(lines, test) and 724 not main): 725 if expected_count and test_num <= expected_count: 726 # If parser reaches end of test before 727 # parsing expected number of subtests, print 728 # crashed subtest and record error 729 test.add_error('missing expected subtest!') 730 sub_test.log.extend(sub_log) 731 test.counts.add_status( 732 TestStatus.TEST_CRASHED) 733 print_test_result(sub_test) 734 else: 735 test.log.extend(sub_log) 736 break 737 else: 738 sub_test = parse_test(lines, test_num, sub_log) 739 subtests.append(sub_test) 740 test_num += 1 741 test.subtests = subtests 742 if not main: 743 # If not main test, look for test result line 744 test.log.extend(parse_diagnostic(lines)) 745 if (parent_test and peek_test_name_match(lines, test)) or \ 746 not parent_test: 747 parse_test_result(lines, test, expected_num) 748 else: 749 test.add_error('missing subtest result line!') 750 751 # Check for there being no tests 752 if parent_test and len(subtests) == 0: 753 # Don't override a bad status if this test had one reported. 754 # Assumption: no subtests means CRASHED is from Test.__init__() 755 if test.status in (TestStatus.TEST_CRASHED, TestStatus.SUCCESS): 756 test.status = TestStatus.NO_TESTS 757 test.add_error('0 tests run!') 758 759 # Add statuses to TestCounts attribute in Test object 760 bubble_up_test_results(test) 761 if parent_test and not main: 762 # If test has subtests and is not the main test object, print 763 # footer. 764 print_test_footer(test) 765 elif not main: 766 print_test_result(test) 767 return test 768 769def parse_run_tests(kernel_output: Iterable[str]) -> Test: 770 """ 771 Using kernel output, extract KTAP lines, parse the lines for test 772 results and print condensed test results and summary line. 773 774 Parameters: 775 kernel_output - Iterable object contains lines of kernel output 776 777 Return: 778 Test - the main test object with all subtests. 779 """ 780 stdout.print_with_timestamp(DIVIDER) 781 lines = extract_tap_lines(kernel_output) 782 test = Test() 783 if not lines: 784 test.name = '<missing>' 785 test.add_error('could not find any KTAP output!') 786 test.status = TestStatus.FAILURE_TO_PARSE_TESTS 787 else: 788 test = parse_test(lines, 0, []) 789 if test.status != TestStatus.NO_TESTS: 790 test.status = test.counts.get_status() 791 stdout.print_with_timestamp(DIVIDER) 792 print_summary_line(test) 793 return test 794