1# SPDX-License-Identifier: GPL-2.0
2#
3# Parses KTAP test results from a kernel dmesg log and incrementally prints
4# results with reader-friendly format. Stores and returns test results in a
5# Test object.
6#
7# Copyright (C) 2019, Google LLC.
8# Author: Felix Guo <[email protected]>
9# Author: Brendan Higgins <[email protected]>
10# Author: Rae Moar <[email protected]>
11
12from __future__ import annotations
13import re
14import sys
15
16import datetime
17from enum import Enum, auto
18from functools import reduce
19from typing import Iterable, Iterator, List, Optional, Tuple
20
21class Test(object):
22	"""
23	A class to represent a test parsed from KTAP results. All KTAP
24	results within a test log are stored in a main Test object as
25	subtests.
26
27	Attributes:
28	status : TestStatus - status of the test
29	name : str - name of the test
30	expected_count : int - expected number of subtests (0 if single
31		test case and None if unknown expected number of subtests)
32	subtests : List[Test] - list of subtests
33	log : List[str] - log of KTAP lines that correspond to the test
34	counts : TestCounts - counts of the test statuses and errors of
35		subtests or of the test itself if the test is a single
36		test case.
37	"""
38	def __init__(self) -> None:
39		"""Creates Test object with default attributes."""
40		self.status = TestStatus.TEST_CRASHED
41		self.name = ''
42		self.expected_count = 0  # type: Optional[int]
43		self.subtests = []  # type: List[Test]
44		self.log = []  # type: List[str]
45		self.counts = TestCounts()
46
47	def __str__(self) -> str:
48		"""Returns string representation of a Test class object."""
49		return ('Test(' + str(self.status) + ', ' + self.name +
50			', ' + str(self.expected_count) + ', ' +
51			str(self.subtests) + ', ' + str(self.log) + ', ' +
52			str(self.counts) + ')')
53
54	def __repr__(self) -> str:
55		"""Returns string representation of a Test class object."""
56		return str(self)
57
58	def add_error(self, error_message: str) -> None:
59		"""Records an error that occurred while parsing this test."""
60		self.counts.errors += 1
61		print_error('Test ' + self.name + ': ' + error_message)
62
63class TestStatus(Enum):
64	"""An enumeration class to represent the status of a test."""
65	SUCCESS = auto()
66	FAILURE = auto()
67	SKIPPED = auto()
68	TEST_CRASHED = auto()
69	NO_TESTS = auto()
70	FAILURE_TO_PARSE_TESTS = auto()
71
72class TestCounts:
73	"""
74	Tracks the counts of statuses of all test cases and any errors within
75	a Test.
76
77	Attributes:
78	passed : int - the number of tests that have passed
79	failed : int - the number of tests that have failed
80	crashed : int - the number of tests that have crashed
81	skipped : int - the number of tests that have skipped
82	errors : int - the number of errors in the test and subtests
83	"""
84	def __init__(self):
85		"""Creates TestCounts object with counts of all test
86		statuses and test errors set to 0.
87		"""
88		self.passed = 0
89		self.failed = 0
90		self.crashed = 0
91		self.skipped = 0
92		self.errors = 0
93
94	def __str__(self) -> str:
95		"""Returns the string representation of a TestCounts object.
96		"""
97		statuses = [('passed', self.passed), ('failed', self.failed),
98			('crashed', self.crashed), ('skipped', self.skipped),
99			('errors', self.errors)]
100		return f'Ran {self.total()} tests: ' + \
101			', '.join(f'{s}: {n}' for s, n in statuses if n > 0)
102
103	def total(self) -> int:
104		"""Returns the total number of test cases within a test
105		object, where a test case is a test with no subtests.
106		"""
107		return (self.passed + self.failed + self.crashed +
108			self.skipped)
109
110	def add_subtest_counts(self, counts: TestCounts) -> None:
111		"""
112		Adds the counts of another TestCounts object to the current
113		TestCounts object. Used to add the counts of a subtest to the
114		parent test.
115
116		Parameters:
117		counts - a different TestCounts object whose counts
118			will be added to the counts of the TestCounts object
119		"""
120		self.passed += counts.passed
121		self.failed += counts.failed
122		self.crashed += counts.crashed
123		self.skipped += counts.skipped
124		self.errors += counts.errors
125
126	def get_status(self) -> TestStatus:
127		"""Returns the aggregated status of a Test using test
128		counts.
129		"""
130		if self.total() == 0:
131			return TestStatus.NO_TESTS
132		elif self.crashed:
133			# If one of the subtests crash, the expected status
134			# of the Test is crashed.
135			return TestStatus.TEST_CRASHED
136		elif self.failed:
137			# Otherwise if one of the subtests fail, the
138			# expected status of the Test is failed.
139			return TestStatus.FAILURE
140		elif self.passed:
141			# Otherwise if one of the subtests pass, the
142			# expected status of the Test is passed.
143			return TestStatus.SUCCESS
144		else:
145			# Finally, if none of the subtests have failed,
146			# crashed, or passed, the expected status of the
147			# Test is skipped.
148			return TestStatus.SKIPPED
149
150	def add_status(self, status: TestStatus) -> None:
151		"""
152		Increments count of inputted status.
153
154		Parameters:
155		status - status to be added to the TestCounts object
156		"""
157		if status == TestStatus.SUCCESS:
158			self.passed += 1
159		elif status == TestStatus.FAILURE:
160			self.failed += 1
161		elif status == TestStatus.SKIPPED:
162			self.skipped += 1
163		elif status != TestStatus.NO_TESTS:
164			self.crashed += 1
165
166class LineStream:
167	"""
168	A class to represent the lines of kernel output.
169	Provides a lazy peek()/pop() interface over an iterator of
170	(line#, text).
171	"""
172	_lines: Iterator[Tuple[int, str]]
173	_next: Tuple[int, str]
174	_need_next: bool
175	_done: bool
176
177	def __init__(self, lines: Iterator[Tuple[int, str]]):
178		"""Creates a new LineStream that wraps the given iterator."""
179		self._lines = lines
180		self._done = False
181		self._need_next = True
182		self._next = (0, '')
183
184	def _get_next(self) -> None:
185		"""Advances the LineSteam to the next line, if necessary."""
186		if not self._need_next:
187			return
188		try:
189			self._next = next(self._lines)
190		except StopIteration:
191			self._done = True
192		finally:
193			self._need_next = False
194
195	def peek(self) -> str:
196		"""Returns the current line, without advancing the LineStream.
197		"""
198		self._get_next()
199		return self._next[1]
200
201	def pop(self) -> str:
202		"""Returns the current line and advances the LineStream to
203		the next line.
204		"""
205		s = self.peek()
206		if self._done:
207			raise ValueError(f'LineStream: going past EOF, last line was {s}')
208		self._need_next = True
209		return s
210
211	def __bool__(self) -> bool:
212		"""Returns True if stream has more lines."""
213		self._get_next()
214		return not self._done
215
216	# Only used by kunit_tool_test.py.
217	def __iter__(self) -> Iterator[str]:
218		"""Empties all lines stored in LineStream object into
219		Iterator object and returns the Iterator object.
220		"""
221		while bool(self):
222			yield self.pop()
223
224	def line_number(self) -> int:
225		"""Returns the line number of the current line."""
226		self._get_next()
227		return self._next[0]
228
229# Parsing helper methods:
230
231KTAP_START = re.compile(r'KTAP version ([0-9]+)$')
232TAP_START = re.compile(r'TAP version ([0-9]+)$')
233KTAP_END = re.compile('(List of all partitions:|'
234	'Kernel panic - not syncing: VFS:|reboot: System halted)')
235
236def extract_tap_lines(kernel_output: Iterable[str]) -> LineStream:
237	"""Extracts KTAP lines from the kernel output."""
238	def isolate_ktap_output(kernel_output: Iterable[str]) \
239			-> Iterator[Tuple[int, str]]:
240		line_num = 0
241		started = False
242		for line in kernel_output:
243			line_num += 1
244			line = line.rstrip()  # remove trailing \n
245			if not started and KTAP_START.search(line):
246				# start extracting KTAP lines and set prefix
247				# to number of characters before version line
248				prefix_len = len(
249					line.split('KTAP version')[0])
250				started = True
251				yield line_num, line[prefix_len:]
252			elif not started and TAP_START.search(line):
253				# start extracting KTAP lines and set prefix
254				# to number of characters before version line
255				prefix_len = len(line.split('TAP version')[0])
256				started = True
257				yield line_num, line[prefix_len:]
258			elif started and KTAP_END.search(line):
259				# stop extracting KTAP lines
260				break
261			elif started:
262				# remove prefix and any indention and yield
263				# line with line number
264				line = line[prefix_len:].lstrip()
265				yield line_num, line
266	return LineStream(lines=isolate_ktap_output(kernel_output))
267
268KTAP_VERSIONS = [1]
269TAP_VERSIONS = [13, 14]
270
271def check_version(version_num: int, accepted_versions: List[int],
272			version_type: str, test: Test) -> None:
273	"""
274	Adds error to test object if version number is too high or too
275	low.
276
277	Parameters:
278	version_num - The inputted version number from the parsed KTAP or TAP
279		header line
280	accepted_version - List of accepted KTAP or TAP versions
281	version_type - 'KTAP' or 'TAP' depending on the type of
282		version line.
283	test - Test object for current test being parsed
284	"""
285	if version_num < min(accepted_versions):
286		test.add_error(version_type +
287			' version lower than expected!')
288	elif version_num > max(accepted_versions):
289		test.add_error(
290			version_type + ' version higher than expected!')
291
292def parse_ktap_header(lines: LineStream, test: Test) -> bool:
293	"""
294	Parses KTAP/TAP header line and checks version number.
295	Returns False if fails to parse KTAP/TAP header line.
296
297	Accepted formats:
298	- 'KTAP version [version number]'
299	- 'TAP version [version number]'
300
301	Parameters:
302	lines - LineStream of KTAP output to parse
303	test - Test object for current test being parsed
304
305	Return:
306	True if successfully parsed KTAP/TAP header line
307	"""
308	ktap_match = KTAP_START.match(lines.peek())
309	tap_match = TAP_START.match(lines.peek())
310	if ktap_match:
311		version_num = int(ktap_match.group(1))
312		check_version(version_num, KTAP_VERSIONS, 'KTAP', test)
313	elif tap_match:
314		version_num = int(tap_match.group(1))
315		check_version(version_num, TAP_VERSIONS, 'TAP', test)
316	else:
317		return False
318	test.log.append(lines.pop())
319	return True
320
321TEST_HEADER = re.compile(r'^# Subtest: (.*)$')
322
323def parse_test_header(lines: LineStream, test: Test) -> bool:
324	"""
325	Parses test header and stores test name in test object.
326	Returns False if fails to parse test header line.
327
328	Accepted format:
329	- '# Subtest: [test name]'
330
331	Parameters:
332	lines - LineStream of KTAP output to parse
333	test - Test object for current test being parsed
334
335	Return:
336	True if successfully parsed test header line
337	"""
338	match = TEST_HEADER.match(lines.peek())
339	if not match:
340		return False
341	test.log.append(lines.pop())
342	test.name = match.group(1)
343	return True
344
345TEST_PLAN = re.compile(r'1\.\.([0-9]+)')
346
347def parse_test_plan(lines: LineStream, test: Test) -> bool:
348	"""
349	Parses test plan line and stores the expected number of subtests in
350	test object. Reports an error if expected count is 0.
351	Returns False and sets expected_count to None if there is no valid test
352	plan.
353
354	Accepted format:
355	- '1..[number of subtests]'
356
357	Parameters:
358	lines - LineStream of KTAP output to parse
359	test - Test object for current test being parsed
360
361	Return:
362	True if successfully parsed test plan line
363	"""
364	match = TEST_PLAN.match(lines.peek())
365	if not match:
366		test.expected_count = None
367		return False
368	test.log.append(lines.pop())
369	expected_count = int(match.group(1))
370	test.expected_count = expected_count
371	return True
372
373TEST_RESULT = re.compile(r'^(ok|not ok) ([0-9]+) (- )?([^#]*)( # .*)?$')
374
375TEST_RESULT_SKIP = re.compile(r'^(ok|not ok) ([0-9]+) (- )?(.*) # SKIP(.*)$')
376
377def peek_test_name_match(lines: LineStream, test: Test) -> bool:
378	"""
379	Matches current line with the format of a test result line and checks
380	if the name matches the name of the current test.
381	Returns False if fails to match format or name.
382
383	Accepted format:
384	- '[ok|not ok] [test number] [-] [test name] [optional skip
385		directive]'
386
387	Parameters:
388	lines - LineStream of KTAP output to parse
389	test - Test object for current test being parsed
390
391	Return:
392	True if matched a test result line and the name matching the
393		expected test name
394	"""
395	line = lines.peek()
396	match = TEST_RESULT.match(line)
397	if not match:
398		return False
399	name = match.group(4)
400	return (name == test.name)
401
402def parse_test_result(lines: LineStream, test: Test,
403			expected_num: int) -> bool:
404	"""
405	Parses test result line and stores the status and name in the test
406	object. Reports an error if the test number does not match expected
407	test number.
408	Returns False if fails to parse test result line.
409
410	Note that the SKIP directive is the only direction that causes a
411	change in status.
412
413	Accepted format:
414	- '[ok|not ok] [test number] [-] [test name] [optional skip
415		directive]'
416
417	Parameters:
418	lines - LineStream of KTAP output to parse
419	test - Test object for current test being parsed
420	expected_num - expected test number for current test
421
422	Return:
423	True if successfully parsed a test result line.
424	"""
425	line = lines.peek()
426	match = TEST_RESULT.match(line)
427	skip_match = TEST_RESULT_SKIP.match(line)
428
429	# Check if line matches test result line format
430	if not match:
431		return False
432	test.log.append(lines.pop())
433
434	# Set name of test object
435	if skip_match:
436		test.name = skip_match.group(4)
437	else:
438		test.name = match.group(4)
439
440	# Check test num
441	num = int(match.group(2))
442	if num != expected_num:
443		test.add_error('Expected test number ' +
444			str(expected_num) + ' but found ' + str(num))
445
446	# Set status of test object
447	status = match.group(1)
448	if skip_match:
449		test.status = TestStatus.SKIPPED
450	elif status == 'ok':
451		test.status = TestStatus.SUCCESS
452	else:
453		test.status = TestStatus.FAILURE
454	return True
455
456def parse_diagnostic(lines: LineStream) -> List[str]:
457	"""
458	Parse lines that do not match the format of a test result line or
459	test header line and returns them in list.
460
461	Line formats that are not parsed:
462	- '# Subtest: [test name]'
463	- '[ok|not ok] [test number] [-] [test name] [optional skip
464		directive]'
465
466	Parameters:
467	lines - LineStream of KTAP output to parse
468
469	Return:
470	Log of diagnostic lines
471	"""
472	log = []  # type: List[str]
473	while lines and not TEST_RESULT.match(lines.peek()) and not \
474			TEST_HEADER.match(lines.peek()):
475		log.append(lines.pop())
476	return log
477
478DIAGNOSTIC_CRASH_MESSAGE = re.compile(r'^# .*?: kunit test case crashed!$')
479
480def parse_crash_in_log(test: Test) -> bool:
481	"""
482	Iterate through the lines of the log to parse for crash message.
483	If crash message found, set status to crashed and return True.
484	Otherwise return False.
485
486	Parameters:
487	test - Test object for current test being parsed
488
489	Return:
490	True if crash message found in log
491	"""
492	for line in test.log:
493		if DIAGNOSTIC_CRASH_MESSAGE.match(line):
494			test.status = TestStatus.TEST_CRASHED
495			return True
496	return False
497
498
499# Printing helper methods:
500
501DIVIDER = '=' * 60
502
503RESET = '\033[0;0m'
504
505def red(text: str) -> str:
506	"""Returns inputted string with red color code."""
507	if not sys.stdout.isatty():
508		return text
509	return '\033[1;31m' + text + RESET
510
511def yellow(text: str) -> str:
512	"""Returns inputted string with yellow color code."""
513	if not sys.stdout.isatty():
514		return text
515	return '\033[1;33m' + text + RESET
516
517def green(text: str) -> str:
518	"""Returns inputted string with green color code."""
519	if not sys.stdout.isatty():
520		return text
521	return '\033[1;32m' + text + RESET
522
523ANSI_LEN = len(red(''))
524
525def print_with_timestamp(message: str) -> None:
526	"""Prints message with timestamp at beginning."""
527	print('[%s] %s' % (datetime.datetime.now().strftime('%H:%M:%S'), message))
528
529def format_test_divider(message: str, len_message: int) -> str:
530	"""
531	Returns string with message centered in fixed width divider.
532
533	Example:
534	'===================== message example ====================='
535
536	Parameters:
537	message - message to be centered in divider line
538	len_message - length of the message to be printed such that
539		any characters of the color codes are not counted
540
541	Return:
542	String containing message centered in fixed width divider
543	"""
544	default_count = 3  # default number of dashes
545	len_1 = default_count
546	len_2 = default_count
547	difference = len(DIVIDER) - len_message - 2  # 2 spaces added
548	if difference > 0:
549		# calculate number of dashes for each side of the divider
550		len_1 = int(difference / 2)
551		len_2 = difference - len_1
552	return ('=' * len_1) + ' ' + message + ' ' + ('=' * len_2)
553
554def print_test_header(test: Test) -> None:
555	"""
556	Prints test header with test name and optionally the expected number
557	of subtests.
558
559	Example:
560	'=================== example (2 subtests) ==================='
561
562	Parameters:
563	test - Test object representing current test being printed
564	"""
565	message = test.name
566	if test.expected_count:
567		if test.expected_count == 1:
568			message += (' (' + str(test.expected_count) +
569				' subtest)')
570		else:
571			message += (' (' + str(test.expected_count) +
572				' subtests)')
573	print_with_timestamp(format_test_divider(message, len(message)))
574
575def print_log(log: Iterable[str]) -> None:
576	"""
577	Prints all strings in saved log for test in yellow.
578
579	Parameters:
580	log - Iterable object with all strings saved in log for test
581	"""
582	for m in log:
583		print_with_timestamp(yellow(m))
584
585def format_test_result(test: Test) -> str:
586	"""
587	Returns string with formatted test result with colored status and test
588	name.
589
590	Example:
591	'[PASSED] example'
592
593	Parameters:
594	test - Test object representing current test being printed
595
596	Return:
597	String containing formatted test result
598	"""
599	if test.status == TestStatus.SUCCESS:
600		return (green('[PASSED] ') + test.name)
601	elif test.status == TestStatus.SKIPPED:
602		return (yellow('[SKIPPED] ') + test.name)
603	elif test.status == TestStatus.NO_TESTS:
604		return (yellow('[NO TESTS RUN] ') + test.name)
605	elif test.status == TestStatus.TEST_CRASHED:
606		print_log(test.log)
607		return (red('[CRASHED] ') + test.name)
608	else:
609		print_log(test.log)
610		return (red('[FAILED] ') + test.name)
611
612def print_test_result(test: Test) -> None:
613	"""
614	Prints result line with status of test.
615
616	Example:
617	'[PASSED] example'
618
619	Parameters:
620	test - Test object representing current test being printed
621	"""
622	print_with_timestamp(format_test_result(test))
623
624def print_test_footer(test: Test) -> None:
625	"""
626	Prints test footer with status of test.
627
628	Example:
629	'===================== [PASSED] example ====================='
630
631	Parameters:
632	test - Test object representing current test being printed
633	"""
634	message = format_test_result(test)
635	print_with_timestamp(format_test_divider(message,
636		len(message) - ANSI_LEN))
637
638def print_summary_line(test: Test) -> None:
639	"""
640	Prints summary line of test object. Color of line is dependent on
641	status of test. Color is green if test passes, yellow if test is
642	skipped, and red if the test fails or crashes. Summary line contains
643	counts of the statuses of the tests subtests or the test itself if it
644	has no subtests.
645
646	Example:
647	"Testing complete. Passed: 2, Failed: 0, Crashed: 0, Skipped: 0,
648	Errors: 0"
649
650	test - Test object representing current test being printed
651	"""
652	if test.status == TestStatus.SUCCESS:
653		color = green
654	elif test.status == TestStatus.SKIPPED or test.status == TestStatus.NO_TESTS:
655		color = yellow
656	else:
657		color = red
658	counts = test.counts
659	print_with_timestamp(color('Testing complete. ' + str(counts)))
660
661def print_error(error_message: str) -> None:
662	"""
663	Prints error message with error format.
664
665	Example:
666	"[ERROR] Test example: missing test plan!"
667
668	Parameters:
669	error_message - message describing error
670	"""
671	print_with_timestamp(red('[ERROR] ') + error_message)
672
673# Other methods:
674
675def bubble_up_test_results(test: Test) -> None:
676	"""
677	If the test has subtests, add the test counts of the subtests to the
678	test and check if any of the tests crashed and if so set the test
679	status to crashed. Otherwise if the test has no subtests add the
680	status of the test to the test counts.
681
682	Parameters:
683	test - Test object for current test being parsed
684	"""
685	parse_crash_in_log(test)
686	subtests = test.subtests
687	counts = test.counts
688	status = test.status
689	for t in subtests:
690		counts.add_subtest_counts(t.counts)
691	if counts.total() == 0:
692		counts.add_status(status)
693	elif test.counts.get_status() == TestStatus.TEST_CRASHED:
694		test.status = TestStatus.TEST_CRASHED
695
696def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test:
697	"""
698	Finds next test to parse in LineStream, creates new Test object,
699	parses any subtests of the test, populates Test object with all
700	information (status, name) about the test and the Test objects for
701	any subtests, and then returns the Test object. The method accepts
702	three formats of tests:
703
704	Accepted test formats:
705
706	- Main KTAP/TAP header
707
708	Example:
709
710	KTAP version 1
711	1..4
712	[subtests]
713
714	- Subtest header line
715
716	Example:
717
718	# Subtest: name
719	1..3
720	[subtests]
721	ok 1 name
722
723	- Test result line
724
725	Example:
726
727	ok 1 - test
728
729	Parameters:
730	lines - LineStream of KTAP output to parse
731	expected_num - expected test number for test to be parsed
732	log - list of strings containing any preceding diagnostic lines
733		corresponding to the current test
734
735	Return:
736	Test object populated with characteristics and any subtests
737	"""
738	test = Test()
739	test.log.extend(log)
740	parent_test = False
741	main = parse_ktap_header(lines, test)
742	if main:
743		# If KTAP/TAP header is found, attempt to parse
744		# test plan
745		test.name = "main"
746		parse_test_plan(lines, test)
747		parent_test = True
748	else:
749		# If KTAP/TAP header is not found, test must be subtest
750		# header or test result line so parse attempt to parser
751		# subtest header
752		parent_test = parse_test_header(lines, test)
753		if parent_test:
754			# If subtest header is found, attempt to parse
755			# test plan and print header
756			parse_test_plan(lines, test)
757			print_test_header(test)
758	expected_count = test.expected_count
759	subtests = []
760	test_num = 1
761	while parent_test and (expected_count is None or test_num <= expected_count):
762		# Loop to parse any subtests.
763		# Break after parsing expected number of tests or
764		# if expected number of tests is unknown break when test
765		# result line with matching name to subtest header is found
766		# or no more lines in stream.
767		sub_log = parse_diagnostic(lines)
768		sub_test = Test()
769		if not lines or (peek_test_name_match(lines, test) and
770				not main):
771			if expected_count and test_num <= expected_count:
772				# If parser reaches end of test before
773				# parsing expected number of subtests, print
774				# crashed subtest and record error
775				test.add_error('missing expected subtest!')
776				sub_test.log.extend(sub_log)
777				test.counts.add_status(
778					TestStatus.TEST_CRASHED)
779				print_test_result(sub_test)
780			else:
781				test.log.extend(sub_log)
782				break
783		else:
784			sub_test = parse_test(lines, test_num, sub_log)
785		subtests.append(sub_test)
786		test_num += 1
787	test.subtests = subtests
788	if not main:
789		# If not main test, look for test result line
790		test.log.extend(parse_diagnostic(lines))
791		if (parent_test and peek_test_name_match(lines, test)) or \
792				not parent_test:
793			parse_test_result(lines, test, expected_num)
794		else:
795			test.add_error('missing subtest result line!')
796
797	# Check for there being no tests
798	if parent_test and len(subtests) == 0:
799		test.status = TestStatus.NO_TESTS
800		test.add_error('0 tests run!')
801
802	# Add statuses to TestCounts attribute in Test object
803	bubble_up_test_results(test)
804	if parent_test and not main:
805		# If test has subtests and is not the main test object, print
806		# footer.
807		print_test_footer(test)
808	elif not main:
809		print_test_result(test)
810	return test
811
812def parse_run_tests(kernel_output: Iterable[str]) -> Test:
813	"""
814	Using kernel output, extract KTAP lines, parse the lines for test
815	results and print condensed test results and summary line .
816
817	Parameters:
818	kernel_output - Iterable object contains lines of kernel output
819
820	Return:
821	Test - the main test object with all subtests.
822	"""
823	print_with_timestamp(DIVIDER)
824	lines = extract_tap_lines(kernel_output)
825	test = Test()
826	if not lines:
827		test.add_error('invalid KTAP input!')
828		test.status = TestStatus.FAILURE_TO_PARSE_TESTS
829	else:
830		test = parse_test(lines, 0, [])
831		if test.status != TestStatus.NO_TESTS:
832			test.status = test.counts.get_status()
833	print_with_timestamp(DIVIDER)
834	print_summary_line(test)
835	return test
836