1# SPDX-License-Identifier: GPL-2.0
2#
3# Parses KTAP test results from a kernel dmesg log and incrementally prints
4# results with reader-friendly format. Stores and returns test results in a
5# Test object.
6#
7# Copyright (C) 2019, Google LLC.
8# Author: Felix Guo <[email protected]>
9# Author: Brendan Higgins <[email protected]>
10# Author: Rae Moar <[email protected]>
11
12from __future__ import annotations
13import re
14
15from collections import namedtuple
16from datetime import datetime
17from enum import Enum, auto
18from functools import reduce
19from typing import Iterable, Iterator, List, Optional, Tuple
20
21TestResult = namedtuple('TestResult', ['status','test','log'])
22
23class Test(object):
24	"""
25	A class to represent a test parsed from KTAP results. All KTAP
26	results within a test log are stored in a main Test object as
27	subtests.
28
29	Attributes:
30	status : TestStatus - status of the test
31	name : str - name of the test
32	expected_count : int - expected number of subtests (0 if single
33		test case and None if unknown expected number of subtests)
34	subtests : List[Test] - list of subtests
35	log : List[str] - log of KTAP lines that correspond to the test
36	counts : TestCounts - counts of the test statuses and errors of
37		subtests or of the test itself if the test is a single
38		test case.
39	"""
40	def __init__(self) -> None:
41		"""Creates Test object with default attributes."""
42		self.status = TestStatus.TEST_CRASHED
43		self.name = ''
44		self.expected_count = 0  # type: Optional[int]
45		self.subtests = []  # type: List[Test]
46		self.log = []  # type: List[str]
47		self.counts = TestCounts()
48
49	def __str__(self) -> str:
50		"""Returns string representation of a Test class object."""
51		return ('Test(' + str(self.status) + ', ' + self.name +
52			', ' + str(self.expected_count) + ', ' +
53			str(self.subtests) + ', ' + str(self.log) + ', ' +
54			str(self.counts) + ')')
55
56	def __repr__(self) -> str:
57		"""Returns string representation of a Test class object."""
58		return str(self)
59
60	def add_error(self, error_message: str) -> None:
61		"""Records an error that occurred while parsing this test."""
62		self.counts.errors += 1
63		print_error('Test ' + self.name + ': ' + error_message)
64
65class TestStatus(Enum):
66	"""An enumeration class to represent the status of a test."""
67	SUCCESS = auto()
68	FAILURE = auto()
69	SKIPPED = auto()
70	TEST_CRASHED = auto()
71	NO_TESTS = auto()
72	FAILURE_TO_PARSE_TESTS = auto()
73
74class TestCounts:
75	"""
76	Tracks the counts of statuses of all test cases and any errors within
77	a Test.
78
79	Attributes:
80	passed : int - the number of tests that have passed
81	failed : int - the number of tests that have failed
82	crashed : int - the number of tests that have crashed
83	skipped : int - the number of tests that have skipped
84	errors : int - the number of errors in the test and subtests
85	"""
86	def __init__(self):
87		"""Creates TestCounts object with counts of all test
88		statuses and test errors set to 0.
89		"""
90		self.passed = 0
91		self.failed = 0
92		self.crashed = 0
93		self.skipped = 0
94		self.errors = 0
95
96	def __str__(self) -> str:
97		"""Returns the string representation of a TestCounts object.
98		"""
99		return ('Passed: ' + str(self.passed) +
100			', Failed: ' + str(self.failed) +
101			', Crashed: ' + str(self.crashed) +
102			', Skipped: ' + str(self.skipped) +
103			', Errors: ' + str(self.errors))
104
105	def total(self) -> int:
106		"""Returns the total number of test cases within a test
107		object, where a test case is a test with no subtests.
108		"""
109		return (self.passed + self.failed + self.crashed +
110			self.skipped)
111
112	def add_subtest_counts(self, counts: TestCounts) -> None:
113		"""
114		Adds the counts of another TestCounts object to the current
115		TestCounts object. Used to add the counts of a subtest to the
116		parent test.
117
118		Parameters:
119		counts - a different TestCounts object whose counts
120			will be added to the counts of the TestCounts object
121		"""
122		self.passed += counts.passed
123		self.failed += counts.failed
124		self.crashed += counts.crashed
125		self.skipped += counts.skipped
126		self.errors += counts.errors
127
128	def get_status(self) -> TestStatus:
129		"""Returns the aggregated status of a Test using test
130		counts.
131		"""
132		if self.total() == 0:
133			return TestStatus.NO_TESTS
134		elif self.crashed:
135			# If one of the subtests crash, the expected status
136			# of the Test is crashed.
137			return TestStatus.TEST_CRASHED
138		elif self.failed:
139			# Otherwise if one of the subtests fail, the
140			# expected status of the Test is failed.
141			return TestStatus.FAILURE
142		elif self.passed:
143			# Otherwise if one of the subtests pass, the
144			# expected status of the Test is passed.
145			return TestStatus.SUCCESS
146		else:
147			# Finally, if none of the subtests have failed,
148			# crashed, or passed, the expected status of the
149			# Test is skipped.
150			return TestStatus.SKIPPED
151
152	def add_status(self, status: TestStatus) -> None:
153		"""
154		Increments count of inputted status.
155
156		Parameters:
157		status - status to be added to the TestCounts object
158		"""
159		if status == TestStatus.SUCCESS:
160			self.passed += 1
161		elif status == TestStatus.FAILURE:
162			self.failed += 1
163		elif status == TestStatus.SKIPPED:
164			self.skipped += 1
165		elif status != TestStatus.NO_TESTS:
166			self.crashed += 1
167
168class LineStream:
169	"""
170	A class to represent the lines of kernel output.
171	Provides a lazy peek()/pop() interface over an iterator of
172	(line#, text).
173	"""
174	_lines: Iterator[Tuple[int, str]]
175	_next: Tuple[int, str]
176	_need_next: bool
177	_done: bool
178
179	def __init__(self, lines: Iterator[Tuple[int, str]]):
180		"""Creates a new LineStream that wraps the given iterator."""
181		self._lines = lines
182		self._done = False
183		self._need_next = True
184		self._next = (0, '')
185
186	def _get_next(self) -> None:
187		"""Advances the LineSteam to the next line, if necessary."""
188		if not self._need_next:
189			return
190		try:
191			self._next = next(self._lines)
192		except StopIteration:
193			self._done = True
194		finally:
195			self._need_next = False
196
197	def peek(self) -> str:
198		"""Returns the current line, without advancing the LineStream.
199		"""
200		self._get_next()
201		return self._next[1]
202
203	def pop(self) -> str:
204		"""Returns the current line and advances the LineStream to
205		the next line.
206		"""
207		s = self.peek()
208		if self._done:
209			raise ValueError(f'LineStream: going past EOF, last line was {s}')
210		self._need_next = True
211		return s
212
213	def __bool__(self) -> bool:
214		"""Returns True if stream has more lines."""
215		self._get_next()
216		return not self._done
217
218	# Only used by kunit_tool_test.py.
219	def __iter__(self) -> Iterator[str]:
220		"""Empties all lines stored in LineStream object into
221		Iterator object and returns the Iterator object.
222		"""
223		while bool(self):
224			yield self.pop()
225
226	def line_number(self) -> int:
227		"""Returns the line number of the current line."""
228		self._get_next()
229		return self._next[0]
230
231# Parsing helper methods:
232
233KTAP_START = re.compile(r'KTAP version ([0-9]+)$')
234TAP_START = re.compile(r'TAP version ([0-9]+)$')
235KTAP_END = re.compile('(List of all partitions:|'
236	'Kernel panic - not syncing: VFS:|reboot: System halted)')
237
238def extract_tap_lines(kernel_output: Iterable[str]) -> LineStream:
239	"""Extracts KTAP lines from the kernel output."""
240	def isolate_ktap_output(kernel_output: Iterable[str]) \
241			-> Iterator[Tuple[int, str]]:
242		line_num = 0
243		started = False
244		for line in kernel_output:
245			line_num += 1
246			line = line.rstrip()  # remove trailing \n
247			if not started and KTAP_START.search(line):
248				# start extracting KTAP lines and set prefix
249				# to number of characters before version line
250				prefix_len = len(
251					line.split('KTAP version')[0])
252				started = True
253				yield line_num, line[prefix_len:]
254			elif not started and TAP_START.search(line):
255				# start extracting KTAP lines and set prefix
256				# to number of characters before version line
257				prefix_len = len(line.split('TAP version')[0])
258				started = True
259				yield line_num, line[prefix_len:]
260			elif started and KTAP_END.search(line):
261				# stop extracting KTAP lines
262				break
263			elif started:
264				# remove prefix and any indention and yield
265				# line with line number
266				line = line[prefix_len:].lstrip()
267				yield line_num, line
268	return LineStream(lines=isolate_ktap_output(kernel_output))
269
270KTAP_VERSIONS = [1]
271TAP_VERSIONS = [13, 14]
272
273def check_version(version_num: int, accepted_versions: List[int],
274			version_type: str, test: Test) -> None:
275	"""
276	Adds error to test object if version number is too high or too
277	low.
278
279	Parameters:
280	version_num - The inputted version number from the parsed KTAP or TAP
281		header line
282	accepted_version - List of accepted KTAP or TAP versions
283	version_type - 'KTAP' or 'TAP' depending on the type of
284		version line.
285	test - Test object for current test being parsed
286	"""
287	if version_num < min(accepted_versions):
288		test.add_error(version_type +
289			' version lower than expected!')
290	elif version_num > max(accepted_versions):
291		test.add_error(
292			version_type + ' version higher than expected!')
293
294def parse_ktap_header(lines: LineStream, test: Test) -> bool:
295	"""
296	Parses KTAP/TAP header line and checks version number.
297	Returns False if fails to parse KTAP/TAP header line.
298
299	Accepted formats:
300	- 'KTAP version [version number]'
301	- 'TAP version [version number]'
302
303	Parameters:
304	lines - LineStream of KTAP output to parse
305	test - Test object for current test being parsed
306
307	Return:
308	True if successfully parsed KTAP/TAP header line
309	"""
310	ktap_match = KTAP_START.match(lines.peek())
311	tap_match = TAP_START.match(lines.peek())
312	if ktap_match:
313		version_num = int(ktap_match.group(1))
314		check_version(version_num, KTAP_VERSIONS, 'KTAP', test)
315	elif tap_match:
316		version_num = int(tap_match.group(1))
317		check_version(version_num, TAP_VERSIONS, 'TAP', test)
318	else:
319		return False
320	test.log.append(lines.pop())
321	return True
322
323TEST_HEADER = re.compile(r'^# Subtest: (.*)$')
324
325def parse_test_header(lines: LineStream, test: Test) -> bool:
326	"""
327	Parses test header and stores test name in test object.
328	Returns False if fails to parse test header line.
329
330	Accepted format:
331	- '# Subtest: [test name]'
332
333	Parameters:
334	lines - LineStream of KTAP output to parse
335	test - Test object for current test being parsed
336
337	Return:
338	True if successfully parsed test header line
339	"""
340	match = TEST_HEADER.match(lines.peek())
341	if not match:
342		return False
343	test.log.append(lines.pop())
344	test.name = match.group(1)
345	return True
346
347TEST_PLAN = re.compile(r'1\.\.([0-9]+)')
348
349def parse_test_plan(lines: LineStream, test: Test) -> bool:
350	"""
351	Parses test plan line and stores the expected number of subtests in
352	test object. Reports an error if expected count is 0.
353	Returns False and sets expected_count to None if there is no valid test
354	plan.
355
356	Accepted format:
357	- '1..[number of subtests]'
358
359	Parameters:
360	lines - LineStream of KTAP output to parse
361	test - Test object for current test being parsed
362
363	Return:
364	True if successfully parsed test plan line
365	"""
366	match = TEST_PLAN.match(lines.peek())
367	if not match:
368		test.expected_count = None
369		return False
370	test.log.append(lines.pop())
371	expected_count = int(match.group(1))
372	test.expected_count = expected_count
373	return True
374
375TEST_RESULT = re.compile(r'^(ok|not ok) ([0-9]+) (- )?([^#]*)( # .*)?$')
376
377TEST_RESULT_SKIP = re.compile(r'^(ok|not ok) ([0-9]+) (- )?(.*) # SKIP(.*)$')
378
379def peek_test_name_match(lines: LineStream, test: Test) -> bool:
380	"""
381	Matches current line with the format of a test result line and checks
382	if the name matches the name of the current test.
383	Returns False if fails to match format or name.
384
385	Accepted format:
386	- '[ok|not ok] [test number] [-] [test name] [optional skip
387		directive]'
388
389	Parameters:
390	lines - LineStream of KTAP output to parse
391	test - Test object for current test being parsed
392
393	Return:
394	True if matched a test result line and the name matching the
395		expected test name
396	"""
397	line = lines.peek()
398	match = TEST_RESULT.match(line)
399	if not match:
400		return False
401	name = match.group(4)
402	return (name == test.name)
403
404def parse_test_result(lines: LineStream, test: Test,
405			expected_num: int) -> bool:
406	"""
407	Parses test result line and stores the status and name in the test
408	object. Reports an error if the test number does not match expected
409	test number.
410	Returns False if fails to parse test result line.
411
412	Note that the SKIP directive is the only direction that causes a
413	change in status.
414
415	Accepted format:
416	- '[ok|not ok] [test number] [-] [test name] [optional skip
417		directive]'
418
419	Parameters:
420	lines - LineStream of KTAP output to parse
421	test - Test object for current test being parsed
422	expected_num - expected test number for current test
423
424	Return:
425	True if successfully parsed a test result line.
426	"""
427	line = lines.peek()
428	match = TEST_RESULT.match(line)
429	skip_match = TEST_RESULT_SKIP.match(line)
430
431	# Check if line matches test result line format
432	if not match:
433		return False
434	test.log.append(lines.pop())
435
436	# Set name of test object
437	if skip_match:
438		test.name = skip_match.group(4)
439	else:
440		test.name = match.group(4)
441
442	# Check test num
443	num = int(match.group(2))
444	if num != expected_num:
445		test.add_error('Expected test number ' +
446			str(expected_num) + ' but found ' + str(num))
447
448	# Set status of test object
449	status = match.group(1)
450	if skip_match:
451		test.status = TestStatus.SKIPPED
452	elif status == 'ok':
453		test.status = TestStatus.SUCCESS
454	else:
455		test.status = TestStatus.FAILURE
456	return True
457
458def parse_diagnostic(lines: LineStream) -> List[str]:
459	"""
460	Parse lines that do not match the format of a test result line or
461	test header line and returns them in list.
462
463	Line formats that are not parsed:
464	- '# Subtest: [test name]'
465	- '[ok|not ok] [test number] [-] [test name] [optional skip
466		directive]'
467
468	Parameters:
469	lines - LineStream of KTAP output to parse
470
471	Return:
472	Log of diagnostic lines
473	"""
474	log = []  # type: List[str]
475	while lines and not TEST_RESULT.match(lines.peek()) and not \
476			TEST_HEADER.match(lines.peek()):
477		log.append(lines.pop())
478	return log
479
480DIAGNOSTIC_CRASH_MESSAGE = re.compile(r'^# .*?: kunit test case crashed!$')
481
482def parse_crash_in_log(test: Test) -> bool:
483	"""
484	Iterate through the lines of the log to parse for crash message.
485	If crash message found, set status to crashed and return True.
486	Otherwise return False.
487
488	Parameters:
489	test - Test object for current test being parsed
490
491	Return:
492	True if crash message found in log
493	"""
494	for line in test.log:
495		if DIAGNOSTIC_CRASH_MESSAGE.match(line):
496			test.status = TestStatus.TEST_CRASHED
497			return True
498	return False
499
500
501# Printing helper methods:
502
503DIVIDER = '=' * 60
504
505RESET = '\033[0;0m'
506
507def red(text: str) -> str:
508	"""Returns inputted string with red color code."""
509	return '\033[1;31m' + text + RESET
510
511def yellow(text: str) -> str:
512	"""Returns inputted string with yellow color code."""
513	return '\033[1;33m' + text + RESET
514
515def green(text: str) -> str:
516	"""Returns inputted string with green color code."""
517	return '\033[1;32m' + text + RESET
518
519ANSI_LEN = len(red(''))
520
521def print_with_timestamp(message: str) -> None:
522	"""Prints message with timestamp at beginning."""
523	print('[%s] %s' % (datetime.now().strftime('%H:%M:%S'), message))
524
525def format_test_divider(message: str, len_message: int) -> str:
526	"""
527	Returns string with message centered in fixed width divider.
528
529	Example:
530	'===================== message example ====================='
531
532	Parameters:
533	message - message to be centered in divider line
534	len_message - length of the message to be printed such that
535		any characters of the color codes are not counted
536
537	Return:
538	String containing message centered in fixed width divider
539	"""
540	default_count = 3  # default number of dashes
541	len_1 = default_count
542	len_2 = default_count
543	difference = len(DIVIDER) - len_message - 2  # 2 spaces added
544	if difference > 0:
545		# calculate number of dashes for each side of the divider
546		len_1 = int(difference / 2)
547		len_2 = difference - len_1
548	return ('=' * len_1) + ' ' + message + ' ' + ('=' * len_2)
549
550def print_test_header(test: Test) -> None:
551	"""
552	Prints test header with test name and optionally the expected number
553	of subtests.
554
555	Example:
556	'=================== example (2 subtests) ==================='
557
558	Parameters:
559	test - Test object representing current test being printed
560	"""
561	message = test.name
562	if test.expected_count:
563		if test.expected_count == 1:
564			message += (' (' + str(test.expected_count) +
565				' subtest)')
566		else:
567			message += (' (' + str(test.expected_count) +
568				' subtests)')
569	print_with_timestamp(format_test_divider(message, len(message)))
570
571def print_log(log: Iterable[str]) -> None:
572	"""
573	Prints all strings in saved log for test in yellow.
574
575	Parameters:
576	log - Iterable object with all strings saved in log for test
577	"""
578	for m in log:
579		print_with_timestamp(yellow(m))
580
581def format_test_result(test: Test) -> str:
582	"""
583	Returns string with formatted test result with colored status and test
584	name.
585
586	Example:
587	'[PASSED] example'
588
589	Parameters:
590	test - Test object representing current test being printed
591
592	Return:
593	String containing formatted test result
594	"""
595	if test.status == TestStatus.SUCCESS:
596		return (green('[PASSED] ') + test.name)
597	elif test.status == TestStatus.SKIPPED:
598		return (yellow('[SKIPPED] ') + test.name)
599	elif test.status == TestStatus.NO_TESTS:
600		return (yellow('[NO TESTS RUN] ') + test.name)
601	elif test.status == TestStatus.TEST_CRASHED:
602		print_log(test.log)
603		return (red('[CRASHED] ') + test.name)
604	else:
605		print_log(test.log)
606		return (red('[FAILED] ') + test.name)
607
608def print_test_result(test: Test) -> None:
609	"""
610	Prints result line with status of test.
611
612	Example:
613	'[PASSED] example'
614
615	Parameters:
616	test - Test object representing current test being printed
617	"""
618	print_with_timestamp(format_test_result(test))
619
620def print_test_footer(test: Test) -> None:
621	"""
622	Prints test footer with status of test.
623
624	Example:
625	'===================== [PASSED] example ====================='
626
627	Parameters:
628	test - Test object representing current test being printed
629	"""
630	message = format_test_result(test)
631	print_with_timestamp(format_test_divider(message,
632		len(message) - ANSI_LEN))
633
634def print_summary_line(test: Test) -> None:
635	"""
636	Prints summary line of test object. Color of line is dependent on
637	status of test. Color is green if test passes, yellow if test is
638	skipped, and red if the test fails or crashes. Summary line contains
639	counts of the statuses of the tests subtests or the test itself if it
640	has no subtests.
641
642	Example:
643	"Testing complete. Passed: 2, Failed: 0, Crashed: 0, Skipped: 0,
644	Errors: 0"
645
646	test - Test object representing current test being printed
647	"""
648	if test.status == TestStatus.SUCCESS:
649		color = green
650	elif test.status == TestStatus.SKIPPED or test.status == TestStatus.NO_TESTS:
651		color = yellow
652	else:
653		color = red
654	counts = test.counts
655	print_with_timestamp(color('Testing complete. ' + str(counts)))
656
657def print_error(error_message: str) -> None:
658	"""
659	Prints error message with error format.
660
661	Example:
662	"[ERROR] Test example: missing test plan!"
663
664	Parameters:
665	error_message - message describing error
666	"""
667	print_with_timestamp(red('[ERROR] ') + error_message)
668
669# Other methods:
670
671def bubble_up_test_results(test: Test) -> None:
672	"""
673	If the test has subtests, add the test counts of the subtests to the
674	test and check if any of the tests crashed and if so set the test
675	status to crashed. Otherwise if the test has no subtests add the
676	status of the test to the test counts.
677
678	Parameters:
679	test - Test object for current test being parsed
680	"""
681	parse_crash_in_log(test)
682	subtests = test.subtests
683	counts = test.counts
684	status = test.status
685	for t in subtests:
686		counts.add_subtest_counts(t.counts)
687	if counts.total() == 0:
688		counts.add_status(status)
689	elif test.counts.get_status() == TestStatus.TEST_CRASHED:
690		test.status = TestStatus.TEST_CRASHED
691
692def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test:
693	"""
694	Finds next test to parse in LineStream, creates new Test object,
695	parses any subtests of the test, populates Test object with all
696	information (status, name) about the test and the Test objects for
697	any subtests, and then returns the Test object. The method accepts
698	three formats of tests:
699
700	Accepted test formats:
701
702	- Main KTAP/TAP header
703
704	Example:
705
706	KTAP version 1
707	1..4
708	[subtests]
709
710	- Subtest header line
711
712	Example:
713
714	# Subtest: name
715	1..3
716	[subtests]
717	ok 1 name
718
719	- Test result line
720
721	Example:
722
723	ok 1 - test
724
725	Parameters:
726	lines - LineStream of KTAP output to parse
727	expected_num - expected test number for test to be parsed
728	log - list of strings containing any preceding diagnostic lines
729		corresponding to the current test
730
731	Return:
732	Test object populated with characteristics and any subtests
733	"""
734	test = Test()
735	test.log.extend(log)
736	parent_test = False
737	main = parse_ktap_header(lines, test)
738	if main:
739		# If KTAP/TAP header is found, attempt to parse
740		# test plan
741		test.name = "main"
742		parse_test_plan(lines, test)
743		parent_test = True
744	else:
745		# If KTAP/TAP header is not found, test must be subtest
746		# header or test result line so parse attempt to parser
747		# subtest header
748		parent_test = parse_test_header(lines, test)
749		if parent_test:
750			# If subtest header is found, attempt to parse
751			# test plan and print header
752			parse_test_plan(lines, test)
753			print_test_header(test)
754	expected_count = test.expected_count
755	subtests = []
756	test_num = 1
757	while parent_test and (expected_count is None or test_num <= expected_count):
758		# Loop to parse any subtests.
759		# Break after parsing expected number of tests or
760		# if expected number of tests is unknown break when test
761		# result line with matching name to subtest header is found
762		# or no more lines in stream.
763		sub_log = parse_diagnostic(lines)
764		sub_test = Test()
765		if not lines or (peek_test_name_match(lines, test) and
766				not main):
767			if expected_count and test_num <= expected_count:
768				# If parser reaches end of test before
769				# parsing expected number of subtests, print
770				# crashed subtest and record error
771				test.add_error('missing expected subtest!')
772				sub_test.log.extend(sub_log)
773				test.counts.add_status(
774					TestStatus.TEST_CRASHED)
775				print_test_result(sub_test)
776			else:
777				test.log.extend(sub_log)
778				break
779		else:
780			sub_test = parse_test(lines, test_num, sub_log)
781		subtests.append(sub_test)
782		test_num += 1
783	test.subtests = subtests
784	if not main:
785		# If not main test, look for test result line
786		test.log.extend(parse_diagnostic(lines))
787		if (parent_test and peek_test_name_match(lines, test)) or \
788				not parent_test:
789			parse_test_result(lines, test, expected_num)
790		else:
791			test.add_error('missing subtest result line!')
792
793	# Check for there being no tests
794	if parent_test and len(subtests) == 0:
795		test.status = TestStatus.NO_TESTS
796		test.add_error('0 tests run!')
797
798	# Add statuses to TestCounts attribute in Test object
799	bubble_up_test_results(test)
800	if parent_test and not main:
801		# If test has subtests and is not the main test object, print
802		# footer.
803		print_test_footer(test)
804	elif not main:
805		print_test_result(test)
806	return test
807
808def parse_run_tests(kernel_output: Iterable[str]) -> TestResult:
809	"""
810	Using kernel output, extract KTAP lines, parse the lines for test
811	results and print condensed test results and summary line .
812
813	Parameters:
814	kernel_output - Iterable object contains lines of kernel output
815
816	Return:
817	TestResult - Tuple containg status of main test object, main test
818		object with all subtests, and log of all KTAP lines.
819	"""
820	print_with_timestamp(DIVIDER)
821	lines = extract_tap_lines(kernel_output)
822	test = Test()
823	if not lines:
824		test.add_error('invalid KTAP input!')
825		test.status = TestStatus.FAILURE_TO_PARSE_TESTS
826	else:
827		test = parse_test(lines, 0, [])
828		if test.status != TestStatus.NO_TESTS:
829			test.status = test.counts.get_status()
830	print_with_timestamp(DIVIDER)
831	print_summary_line(test)
832	return TestResult(test.status, test, lines)
833