1# SPDX-License-Identifier: GPL-2.0
2#
3# Parses KTAP test results from a kernel dmesg log and incrementally prints
4# results with reader-friendly format. Stores and returns test results in a
5# Test object.
6#
7# Copyright (C) 2019, Google LLC.
8# Author: Felix Guo <[email protected]>
9# Author: Brendan Higgins <[email protected]>
10# Author: Rae Moar <[email protected]>
11
12from __future__ import annotations
13import re
14import sys
15
16from enum import Enum, auto
17from typing import Iterable, Iterator, List, Optional, Tuple
18
19from kunit_printer import stdout
20
21class Test:
22	"""
23	A class to represent a test parsed from KTAP results. All KTAP
24	results within a test log are stored in a main Test object as
25	subtests.
26
27	Attributes:
28	status : TestStatus - status of the test
29	name : str - name of the test
30	expected_count : int - expected number of subtests (0 if single
31		test case and None if unknown expected number of subtests)
32	subtests : List[Test] - list of subtests
33	log : List[str] - log of KTAP lines that correspond to the test
34	counts : TestCounts - counts of the test statuses and errors of
35		subtests or of the test itself if the test is a single
36		test case.
37	"""
38	def __init__(self) -> None:
39		"""Creates Test object with default attributes."""
40		self.status = TestStatus.TEST_CRASHED
41		self.name = ''
42		self.expected_count = 0  # type: Optional[int]
43		self.subtests = []  # type: List[Test]
44		self.log = []  # type: List[str]
45		self.counts = TestCounts()
46
47	def __str__(self) -> str:
48		"""Returns string representation of a Test class object."""
49		return (f'Test({self.status}, {self.name}, {self.expected_count}, '
50			f'{self.subtests}, {self.log}, {self.counts})')
51
52	def __repr__(self) -> str:
53		"""Returns string representation of a Test class object."""
54		return str(self)
55
56	def add_error(self, error_message: str) -> None:
57		"""Records an error that occurred while parsing this test."""
58		self.counts.errors += 1
59		stdout.print_with_timestamp(stdout.red('[ERROR]') + f' Test: {self.name}: {error_message}')
60
61	def ok_status(self) -> bool:
62		"""Returns true if the status was ok, i.e. passed or skipped."""
63		return self.status in (TestStatus.SUCCESS, TestStatus.SKIPPED)
64
65class TestStatus(Enum):
66	"""An enumeration class to represent the status of a test."""
67	SUCCESS = auto()
68	FAILURE = auto()
69	SKIPPED = auto()
70	TEST_CRASHED = auto()
71	NO_TESTS = auto()
72	FAILURE_TO_PARSE_TESTS = auto()
73
74class TestCounts:
75	"""
76	Tracks the counts of statuses of all test cases and any errors within
77	a Test.
78
79	Attributes:
80	passed : int - the number of tests that have passed
81	failed : int - the number of tests that have failed
82	crashed : int - the number of tests that have crashed
83	skipped : int - the number of tests that have skipped
84	errors : int - the number of errors in the test and subtests
85	"""
86	def __init__(self):
87		"""Creates TestCounts object with counts of all test
88		statuses and test errors set to 0.
89		"""
90		self.passed = 0
91		self.failed = 0
92		self.crashed = 0
93		self.skipped = 0
94		self.errors = 0
95
96	def __str__(self) -> str:
97		"""Returns the string representation of a TestCounts object."""
98		statuses = [('passed', self.passed), ('failed', self.failed),
99			('crashed', self.crashed), ('skipped', self.skipped),
100			('errors', self.errors)]
101		return f'Ran {self.total()} tests: ' + \
102			', '.join(f'{s}: {n}' for s, n in statuses if n > 0)
103
104	def total(self) -> int:
105		"""Returns the total number of test cases within a test
106		object, where a test case is a test with no subtests.
107		"""
108		return (self.passed + self.failed + self.crashed +
109			self.skipped)
110
111	def add_subtest_counts(self, counts: TestCounts) -> None:
112		"""
113		Adds the counts of another TestCounts object to the current
114		TestCounts object. Used to add the counts of a subtest to the
115		parent test.
116
117		Parameters:
118		counts - a different TestCounts object whose counts
119			will be added to the counts of the TestCounts object
120		"""
121		self.passed += counts.passed
122		self.failed += counts.failed
123		self.crashed += counts.crashed
124		self.skipped += counts.skipped
125		self.errors += counts.errors
126
127	def get_status(self) -> TestStatus:
128		"""Returns the aggregated status of a Test using test
129		counts.
130		"""
131		if self.total() == 0:
132			return TestStatus.NO_TESTS
133		if self.crashed:
134			# Crashes should take priority.
135			return TestStatus.TEST_CRASHED
136		if self.failed:
137			return TestStatus.FAILURE
138		if self.passed:
139			# No failures or crashes, looks good!
140			return TestStatus.SUCCESS
141		# We have only skipped tests.
142		return TestStatus.SKIPPED
143
144	def add_status(self, status: TestStatus) -> None:
145		"""Increments the count for `status`."""
146		if status == TestStatus.SUCCESS:
147			self.passed += 1
148		elif status == TestStatus.FAILURE:
149			self.failed += 1
150		elif status == TestStatus.SKIPPED:
151			self.skipped += 1
152		elif status != TestStatus.NO_TESTS:
153			self.crashed += 1
154
155class LineStream:
156	"""
157	A class to represent the lines of kernel output.
158	Provides a lazy peek()/pop() interface over an iterator of
159	(line#, text).
160	"""
161	_lines: Iterator[Tuple[int, str]]
162	_next: Tuple[int, str]
163	_need_next: bool
164	_done: bool
165
166	def __init__(self, lines: Iterator[Tuple[int, str]]):
167		"""Creates a new LineStream that wraps the given iterator."""
168		self._lines = lines
169		self._done = False
170		self._need_next = True
171		self._next = (0, '')
172
173	def _get_next(self) -> None:
174		"""Advances the LineSteam to the next line, if necessary."""
175		if not self._need_next:
176			return
177		try:
178			self._next = next(self._lines)
179		except StopIteration:
180			self._done = True
181		finally:
182			self._need_next = False
183
184	def peek(self) -> str:
185		"""Returns the current line, without advancing the LineStream.
186		"""
187		self._get_next()
188		return self._next[1]
189
190	def pop(self) -> str:
191		"""Returns the current line and advances the LineStream to
192		the next line.
193		"""
194		s = self.peek()
195		if self._done:
196			raise ValueError(f'LineStream: going past EOF, last line was {s}')
197		self._need_next = True
198		return s
199
200	def __bool__(self) -> bool:
201		"""Returns True if stream has more lines."""
202		self._get_next()
203		return not self._done
204
205	# Only used by kunit_tool_test.py.
206	def __iter__(self) -> Iterator[str]:
207		"""Empties all lines stored in LineStream object into
208		Iterator object and returns the Iterator object.
209		"""
210		while bool(self):
211			yield self.pop()
212
213	def line_number(self) -> int:
214		"""Returns the line number of the current line."""
215		self._get_next()
216		return self._next[0]
217
218# Parsing helper methods:
219
220KTAP_START = re.compile(r'KTAP version ([0-9]+)$')
221TAP_START = re.compile(r'TAP version ([0-9]+)$')
222KTAP_END = re.compile('(List of all partitions:|'
223	'Kernel panic - not syncing: VFS:|reboot: System halted)')
224
225def extract_tap_lines(kernel_output: Iterable[str], lstrip=True) -> LineStream:
226	"""Extracts KTAP lines from the kernel output."""
227	def isolate_ktap_output(kernel_output: Iterable[str]) \
228			-> Iterator[Tuple[int, str]]:
229		line_num = 0
230		started = False
231		for line in kernel_output:
232			line_num += 1
233			line = line.rstrip()  # remove trailing \n
234			if not started and KTAP_START.search(line):
235				# start extracting KTAP lines and set prefix
236				# to number of characters before version line
237				prefix_len = len(
238					line.split('KTAP version')[0])
239				started = True
240				yield line_num, line[prefix_len:]
241			elif not started and TAP_START.search(line):
242				# start extracting KTAP lines and set prefix
243				# to number of characters before version line
244				prefix_len = len(line.split('TAP version')[0])
245				started = True
246				yield line_num, line[prefix_len:]
247			elif started and KTAP_END.search(line):
248				# stop extracting KTAP lines
249				break
250			elif started:
251				# remove the prefix and optionally any leading
252				# whitespace. Our parsing logic relies on this.
253				line = line[prefix_len:]
254				if lstrip:
255					line = line.lstrip()
256				yield line_num, line
257	return LineStream(lines=isolate_ktap_output(kernel_output))
258
259KTAP_VERSIONS = [1]
260TAP_VERSIONS = [13, 14]
261
262def check_version(version_num: int, accepted_versions: List[int],
263			version_type: str, test: Test) -> None:
264	"""
265	Adds error to test object if version number is too high or too
266	low.
267
268	Parameters:
269	version_num - The inputted version number from the parsed KTAP or TAP
270		header line
271	accepted_version - List of accepted KTAP or TAP versions
272	version_type - 'KTAP' or 'TAP' depending on the type of
273		version line.
274	test - Test object for current test being parsed
275	"""
276	if version_num < min(accepted_versions):
277		test.add_error(f'{version_type} version lower than expected!')
278	elif version_num > max(accepted_versions):
279		test.add_error(f'{version_type} version higer than expected!')
280
281def parse_ktap_header(lines: LineStream, test: Test) -> bool:
282	"""
283	Parses KTAP/TAP header line and checks version number.
284	Returns False if fails to parse KTAP/TAP header line.
285
286	Accepted formats:
287	- 'KTAP version [version number]'
288	- 'TAP version [version number]'
289
290	Parameters:
291	lines - LineStream of KTAP output to parse
292	test - Test object for current test being parsed
293
294	Return:
295	True if successfully parsed KTAP/TAP header line
296	"""
297	ktap_match = KTAP_START.match(lines.peek())
298	tap_match = TAP_START.match(lines.peek())
299	if ktap_match:
300		version_num = int(ktap_match.group(1))
301		check_version(version_num, KTAP_VERSIONS, 'KTAP', test)
302	elif tap_match:
303		version_num = int(tap_match.group(1))
304		check_version(version_num, TAP_VERSIONS, 'TAP', test)
305	else:
306		return False
307	test.log.append(lines.pop())
308	return True
309
310TEST_HEADER = re.compile(r'^# Subtest: (.*)$')
311
312def parse_test_header(lines: LineStream, test: Test) -> bool:
313	"""
314	Parses test header and stores test name in test object.
315	Returns False if fails to parse test header line.
316
317	Accepted format:
318	- '# Subtest: [test name]'
319
320	Parameters:
321	lines - LineStream of KTAP output to parse
322	test - Test object for current test being parsed
323
324	Return:
325	True if successfully parsed test header line
326	"""
327	match = TEST_HEADER.match(lines.peek())
328	if not match:
329		return False
330	test.log.append(lines.pop())
331	test.name = match.group(1)
332	return True
333
334TEST_PLAN = re.compile(r'1\.\.([0-9]+)')
335
336def parse_test_plan(lines: LineStream, test: Test) -> bool:
337	"""
338	Parses test plan line and stores the expected number of subtests in
339	test object. Reports an error if expected count is 0.
340	Returns False and sets expected_count to None if there is no valid test
341	plan.
342
343	Accepted format:
344	- '1..[number of subtests]'
345
346	Parameters:
347	lines - LineStream of KTAP output to parse
348	test - Test object for current test being parsed
349
350	Return:
351	True if successfully parsed test plan line
352	"""
353	match = TEST_PLAN.match(lines.peek())
354	if not match:
355		test.expected_count = None
356		return False
357	test.log.append(lines.pop())
358	expected_count = int(match.group(1))
359	test.expected_count = expected_count
360	return True
361
362TEST_RESULT = re.compile(r'^(ok|not ok) ([0-9]+) (- )?([^#]*)( # .*)?$')
363
364TEST_RESULT_SKIP = re.compile(r'^(ok|not ok) ([0-9]+) (- )?(.*) # SKIP(.*)$')
365
366def peek_test_name_match(lines: LineStream, test: Test) -> bool:
367	"""
368	Matches current line with the format of a test result line and checks
369	if the name matches the name of the current test.
370	Returns False if fails to match format or name.
371
372	Accepted format:
373	- '[ok|not ok] [test number] [-] [test name] [optional skip
374		directive]'
375
376	Parameters:
377	lines - LineStream of KTAP output to parse
378	test - Test object for current test being parsed
379
380	Return:
381	True if matched a test result line and the name matching the
382		expected test name
383	"""
384	line = lines.peek()
385	match = TEST_RESULT.match(line)
386	if not match:
387		return False
388	name = match.group(4)
389	return name == test.name
390
391def parse_test_result(lines: LineStream, test: Test,
392			expected_num: int) -> bool:
393	"""
394	Parses test result line and stores the status and name in the test
395	object. Reports an error if the test number does not match expected
396	test number.
397	Returns False if fails to parse test result line.
398
399	Note that the SKIP directive is the only direction that causes a
400	change in status.
401
402	Accepted format:
403	- '[ok|not ok] [test number] [-] [test name] [optional skip
404		directive]'
405
406	Parameters:
407	lines - LineStream of KTAP output to parse
408	test - Test object for current test being parsed
409	expected_num - expected test number for current test
410
411	Return:
412	True if successfully parsed a test result line.
413	"""
414	line = lines.peek()
415	match = TEST_RESULT.match(line)
416	skip_match = TEST_RESULT_SKIP.match(line)
417
418	# Check if line matches test result line format
419	if not match:
420		return False
421	test.log.append(lines.pop())
422
423	# Set name of test object
424	if skip_match:
425		test.name = skip_match.group(4)
426	else:
427		test.name = match.group(4)
428
429	# Check test num
430	num = int(match.group(2))
431	if num != expected_num:
432		test.add_error(f'Expected test number {expected_num} but found {num}')
433
434	# Set status of test object
435	status = match.group(1)
436	if skip_match:
437		test.status = TestStatus.SKIPPED
438	elif status == 'ok':
439		test.status = TestStatus.SUCCESS
440	else:
441		test.status = TestStatus.FAILURE
442	return True
443
444def parse_diagnostic(lines: LineStream) -> List[str]:
445	"""
446	Parse lines that do not match the format of a test result line or
447	test header line and returns them in list.
448
449	Line formats that are not parsed:
450	- '# Subtest: [test name]'
451	- '[ok|not ok] [test number] [-] [test name] [optional skip
452		directive]'
453
454	Parameters:
455	lines - LineStream of KTAP output to parse
456
457	Return:
458	Log of diagnostic lines
459	"""
460	log = []  # type: List[str]
461	while lines and not TEST_RESULT.match(lines.peek()) and not \
462			TEST_HEADER.match(lines.peek()):
463		log.append(lines.pop())
464	return log
465
466
467# Printing helper methods:
468
469DIVIDER = '=' * 60
470
471def format_test_divider(message: str, len_message: int) -> str:
472	"""
473	Returns string with message centered in fixed width divider.
474
475	Example:
476	'===================== message example ====================='
477
478	Parameters:
479	message - message to be centered in divider line
480	len_message - length of the message to be printed such that
481		any characters of the color codes are not counted
482
483	Return:
484	String containing message centered in fixed width divider
485	"""
486	default_count = 3  # default number of dashes
487	len_1 = default_count
488	len_2 = default_count
489	difference = len(DIVIDER) - len_message - 2  # 2 spaces added
490	if difference > 0:
491		# calculate number of dashes for each side of the divider
492		len_1 = int(difference / 2)
493		len_2 = difference - len_1
494	return ('=' * len_1) + f' {message} ' + ('=' * len_2)
495
496def print_test_header(test: Test) -> None:
497	"""
498	Prints test header with test name and optionally the expected number
499	of subtests.
500
501	Example:
502	'=================== example (2 subtests) ==================='
503
504	Parameters:
505	test - Test object representing current test being printed
506	"""
507	message = test.name
508	if test.expected_count:
509		if test.expected_count == 1:
510			message += ' (1 subtest)'
511		else:
512			message += f' ({test.expected_count} subtests)'
513	stdout.print_with_timestamp(format_test_divider(message, len(message)))
514
515def print_log(log: Iterable[str]) -> None:
516	"""Prints all strings in saved log for test in yellow."""
517	for m in log:
518		stdout.print_with_timestamp(stdout.yellow(m))
519
520def format_test_result(test: Test) -> str:
521	"""
522	Returns string with formatted test result with colored status and test
523	name.
524
525	Example:
526	'[PASSED] example'
527
528	Parameters:
529	test - Test object representing current test being printed
530
531	Return:
532	String containing formatted test result
533	"""
534	if test.status == TestStatus.SUCCESS:
535		return stdout.green('[PASSED] ') + test.name
536	if test.status == TestStatus.SKIPPED:
537		return stdout.yellow('[SKIPPED] ') + test.name
538	if test.status == TestStatus.NO_TESTS:
539		return stdout.yellow('[NO TESTS RUN] ') + test.name
540	if test.status == TestStatus.TEST_CRASHED:
541		print_log(test.log)
542		return stdout.red('[CRASHED] ') + test.name
543	print_log(test.log)
544	return stdout.red('[FAILED] ') + test.name
545
546def print_test_result(test: Test) -> None:
547	"""
548	Prints result line with status of test.
549
550	Example:
551	'[PASSED] example'
552
553	Parameters:
554	test - Test object representing current test being printed
555	"""
556	stdout.print_with_timestamp(format_test_result(test))
557
558def print_test_footer(test: Test) -> None:
559	"""
560	Prints test footer with status of test.
561
562	Example:
563	'===================== [PASSED] example ====================='
564
565	Parameters:
566	test - Test object representing current test being printed
567	"""
568	message = format_test_result(test)
569	stdout.print_with_timestamp(format_test_divider(message,
570		len(message) - stdout.color_len()))
571
572
573
574def _summarize_failed_tests(test: Test) -> str:
575	"""Tries to summarize all the failing subtests in `test`."""
576
577	def failed_names(test: Test, parent_name: str) -> List[str]:
578		# Note: we use 'main' internally for the top-level test.
579		if not parent_name or parent_name == 'main':
580			full_name = test.name
581		else:
582			full_name = parent_name + '.' + test.name
583
584		if not test.subtests:  # this is a leaf node
585			return [full_name]
586
587		# If all the children failed, just say this subtest failed.
588		# Don't summarize it down "the top-level test failed", though.
589		failed_subtests = [sub for sub in test.subtests if not sub.ok_status()]
590		if parent_name and len(failed_subtests) ==  len(test.subtests):
591			return [full_name]
592
593		all_failures = []  # type: List[str]
594		for t in failed_subtests:
595			all_failures.extend(failed_names(t, full_name))
596		return all_failures
597
598	failures = failed_names(test, '')
599	# If there are too many failures, printing them out will just be noisy.
600	if len(failures) > 10:  # this is an arbitrary limit
601		return ''
602
603	return 'Failures: ' + ', '.join(failures)
604
605
606def print_summary_line(test: Test) -> None:
607	"""
608	Prints summary line of test object. Color of line is dependent on
609	status of test. Color is green if test passes, yellow if test is
610	skipped, and red if the test fails or crashes. Summary line contains
611	counts of the statuses of the tests subtests or the test itself if it
612	has no subtests.
613
614	Example:
615	"Testing complete. Passed: 2, Failed: 0, Crashed: 0, Skipped: 0,
616	Errors: 0"
617
618	test - Test object representing current test being printed
619	"""
620	if test.status == TestStatus.SUCCESS:
621		color = stdout.green
622	elif test.status in (TestStatus.SKIPPED, TestStatus.NO_TESTS):
623		color = stdout.yellow
624	else:
625		color = stdout.red
626	stdout.print_with_timestamp(color(f'Testing complete. {test.counts}'))
627
628	# Summarize failures that might have gone off-screen since we had a lot
629	# of tests (arbitrarily defined as >=100 for now).
630	if test.ok_status() or test.counts.total() < 100:
631		return
632	summarized = _summarize_failed_tests(test)
633	if not summarized:
634		return
635	stdout.print_with_timestamp(color(summarized))
636
637# Other methods:
638
639def bubble_up_test_results(test: Test) -> None:
640	"""
641	If the test has subtests, add the test counts of the subtests to the
642	test and check if any of the tests crashed and if so set the test
643	status to crashed. Otherwise if the test has no subtests add the
644	status of the test to the test counts.
645
646	Parameters:
647	test - Test object for current test being parsed
648	"""
649	subtests = test.subtests
650	counts = test.counts
651	status = test.status
652	for t in subtests:
653		counts.add_subtest_counts(t.counts)
654	if counts.total() == 0:
655		counts.add_status(status)
656	elif test.counts.get_status() == TestStatus.TEST_CRASHED:
657		test.status = TestStatus.TEST_CRASHED
658
659def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test:
660	"""
661	Finds next test to parse in LineStream, creates new Test object,
662	parses any subtests of the test, populates Test object with all
663	information (status, name) about the test and the Test objects for
664	any subtests, and then returns the Test object. The method accepts
665	three formats of tests:
666
667	Accepted test formats:
668
669	- Main KTAP/TAP header
670
671	Example:
672
673	KTAP version 1
674	1..4
675	[subtests]
676
677	- Subtest header line
678
679	Example:
680
681	# Subtest: name
682	1..3
683	[subtests]
684	ok 1 name
685
686	- Test result line
687
688	Example:
689
690	ok 1 - test
691
692	Parameters:
693	lines - LineStream of KTAP output to parse
694	expected_num - expected test number for test to be parsed
695	log - list of strings containing any preceding diagnostic lines
696		corresponding to the current test
697
698	Return:
699	Test object populated with characteristics and any subtests
700	"""
701	test = Test()
702	test.log.extend(log)
703	parent_test = False
704	main = parse_ktap_header(lines, test)
705	if main:
706		# If KTAP/TAP header is found, attempt to parse
707		# test plan
708		test.name = "main"
709		parse_test_plan(lines, test)
710		parent_test = True
711	else:
712		# If KTAP/TAP header is not found, test must be subtest
713		# header or test result line so parse attempt to parser
714		# subtest header
715		parent_test = parse_test_header(lines, test)
716		if parent_test:
717			# If subtest header is found, attempt to parse
718			# test plan and print header
719			parse_test_plan(lines, test)
720			print_test_header(test)
721	expected_count = test.expected_count
722	subtests = []
723	test_num = 1
724	while parent_test and (expected_count is None or test_num <= expected_count):
725		# Loop to parse any subtests.
726		# Break after parsing expected number of tests or
727		# if expected number of tests is unknown break when test
728		# result line with matching name to subtest header is found
729		# or no more lines in stream.
730		sub_log = parse_diagnostic(lines)
731		sub_test = Test()
732		if not lines or (peek_test_name_match(lines, test) and
733				not main):
734			if expected_count and test_num <= expected_count:
735				# If parser reaches end of test before
736				# parsing expected number of subtests, print
737				# crashed subtest and record error
738				test.add_error('missing expected subtest!')
739				sub_test.log.extend(sub_log)
740				test.counts.add_status(
741					TestStatus.TEST_CRASHED)
742				print_test_result(sub_test)
743			else:
744				test.log.extend(sub_log)
745				break
746		else:
747			sub_test = parse_test(lines, test_num, sub_log)
748		subtests.append(sub_test)
749		test_num += 1
750	test.subtests = subtests
751	if not main:
752		# If not main test, look for test result line
753		test.log.extend(parse_diagnostic(lines))
754		if (parent_test and peek_test_name_match(lines, test)) or \
755				not parent_test:
756			parse_test_result(lines, test, expected_num)
757		else:
758			test.add_error('missing subtest result line!')
759
760	# Check for there being no tests
761	if parent_test and len(subtests) == 0:
762		# Don't override a bad status if this test had one reported.
763		# Assumption: no subtests means CRASHED is from Test.__init__()
764		if test.status in (TestStatus.TEST_CRASHED, TestStatus.SUCCESS):
765			test.status = TestStatus.NO_TESTS
766			test.add_error('0 tests run!')
767
768	# Add statuses to TestCounts attribute in Test object
769	bubble_up_test_results(test)
770	if parent_test and not main:
771		# If test has subtests and is not the main test object, print
772		# footer.
773		print_test_footer(test)
774	elif not main:
775		print_test_result(test)
776	return test
777
778def parse_run_tests(kernel_output: Iterable[str]) -> Test:
779	"""
780	Using kernel output, extract KTAP lines, parse the lines for test
781	results and print condensed test results and summary line.
782
783	Parameters:
784	kernel_output - Iterable object contains lines of kernel output
785
786	Return:
787	Test - the main test object with all subtests.
788	"""
789	stdout.print_with_timestamp(DIVIDER)
790	lines = extract_tap_lines(kernel_output)
791	test = Test()
792	if not lines:
793		test.name = '<missing>'
794		test.add_error('could not find any KTAP output!')
795		test.status = TestStatus.FAILURE_TO_PARSE_TESTS
796	else:
797		test = parse_test(lines, 0, [])
798		if test.status != TestStatus.NO_TESTS:
799			test.status = test.counts.get_status()
800	stdout.print_with_timestamp(DIVIDER)
801	print_summary_line(test)
802	return test
803