1##
2# Copyright (c) 2023 Apple Inc. All rights reserved.
3#
4# @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5#
6# This file contains Original Code and/or Modifications of Original Code
7# as defined in and that are subject to the Apple Public Source License
8# Version 2.0 (the 'License'). You may not use this file except in
9# compliance with the License. The rights granted to you under the License
10# may not be used to create, or enable the creation or redistribution of,
11# unlawful or unlicensed copies of an Apple operating system, or to
12# circumvent, violate, or enable the circumvention or violation of, any
13# terms of an Apple operating system software license agreement.
14#
15# Please obtain a copy of the License at
16# http://www.opensource.apple.com/apsl/ and read it before using this file.
17#
18# The Original Code and all software distributed under the License are
19# distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20# EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21# INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22# FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23# Please see the License for the specific language governing rights and
24# limitations under the License.
25#
26# @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27##
28
29""" LLDB unit test runner
30
31    This module implements main runner and result reporting.
32"""
33
34import json
35import unittest
36import unittest.case
37import unittest.result
38
39from enum import Enum
40from collections import namedtuple
41from traceback import TracebackException, format_exception
42from textwrap import TextWrapper, indent
43
44from lldbtest.testcase import LLDBTestCase
45from lldbtest.coverage import cov_start, cov_stop
46
47def _format_exc(exc):
48    """ Format detailed exception for debugging purposes. """
49
50    out_str = ""
51
52    textwrap = TextWrapper(width=100, placeholder="...", max_lines=3)
53    tbexc = TracebackException.from_exception(exc[1], limit=None,
54                                              lookup_lines=True, capture_locals=True)
55
56    for frame in tbexc.stack:
57        out_str += f"File \"{frame.filename}\"  @{frame.lineno} in {frame.name}\n"
58        out_str += "  Locals:\n"
59        for name, value in frame.locals.items():
60            variable = f"    {name} = "
61            first = True
62            for wline in textwrap.wrap(str(value)):
63                if first:
64                    out_str += variable + wline + "\n"
65                    first = False
66                else:
67                    out_str += " " * (len(name) + 7) + wline + "\n"
68
69        out_str += "  " + "-" * 100 + "\n"
70        with open(frame.filename, "r", encoding='utf-8') as src:
71            lines = src.readlines()
72            startline = frame.lineno - 3 if frame.lineno > 2 else 0
73            for lineno in range(startline, frame.lineno + 2):
74
75                marker = '>' if (lineno + 1) == frame.lineno else ' '
76                out_str += f"  {marker} {(lineno + 1):5}  {lines[lineno].rstrip()}\n"
77        out_str += "  " + "-" * 100 + "\n"
78        out_str += "\n"
79
80    return out_str
81
82
83#
84# text based output
85#
86
87
88class LLDBTextTestResult(unittest.TextTestResult):
89    """ Custom result instance that also records code coverage and other statistics. """
90
91    def __init__(self, stream, descriptions, verbosity, debug = False):
92        super().__init__(stream, descriptions, verbosity)
93
94        self._debug_exception = debug
95
96    def addError(self, test, err):
97
98        if self._debug_exception:
99            self.errors.append((test, _format_exc(err)))
100        else:
101            self.errors.append(
102                (test, format_exception(err[0], err[1], err[2]))
103            )
104
105    def startTest(self, test) -> None:
106        self._cov = cov_start()
107        return super().startTest(test)
108
109    def stopTest(self, test) -> None:
110        cov_stop(self._cov)
111        return super().stopTest(test)
112
113
114class LLDBTextTestRunner(unittest.TextTestRunner):
115    """ Test runner designed to run unit tests inside LLDB instance. """
116
117    def __init__(self, stream = None, descriptions = True, verbosity = 1,
118                 failfast = False, buffer = False, resultclass = None, warnings = None,
119                 *, tb_locals = False, debug = False) -> None:
120        super().__init__(stream, descriptions, verbosity, failfast, buffer, resultclass,
121                         warnings, tb_locals=tb_locals)
122        self._debug = debug
123
124    def _makeResult(self) -> 'LLDBTextTestResult':
125        return LLDBTextTestResult(self.stream, self.descriptions, self.verbosity,
126                                  self._debug)
127
128    def _printTestDescription(self, state, test):
129        """ display test details """
130
131        self.stream.writeln()
132
133        if isinstance(test, LLDBTestCase):
134            self.stream.writeln(f' {state}: {test.id()} ({test.COMPONENT})')
135        else:
136            self.stream.writeln(f' {state}: {test}')
137            self.stream.writeln()
138            return
139
140        self.stream.writeln()
141        self.stream.writeln(' Description:')
142
143        if doc := test.getDescription():
144            self.stream.writeln()
145            self.stream.writelines(indent(doc, "    "))
146            self.stream.writeln()
147
148        self.stream.writeln()
149
150    def printFailureDetails(self, result: LLDBTextTestResult) -> None:
151        """ display failures """
152
153        self.stream.writeln()
154
155        for test, failure in result.failures:
156            self.stream.writeln()
157            self.stream.writeln('=' * 100)
158            self._printTestDescription("FAILED", test)
159            self.stream.writeln('-' * 100)
160            self.stream.writeln()
161            self.stream.writelines(failure)
162            self.stream.writeln('=' * 100)
163
164        self.stream.writeln()
165
166    def printErrorDetails(self, result):
167        """ display error details """
168
169        self.stream.writeln()
170
171        for test, error in result.errors:
172            self.stream.writeln()
173            self.stream.writeln('=' * 100)
174            self._printTestDescription("ERROR", test)
175            self.stream.writeln('-' * 100)
176            self.stream.writeln()
177            self.stream.writelines(error)
178            self.stream.writeln('=' * 100)
179
180        self.stream.writeln()
181
182    def printOveralResults(self, result):
183        """ Print overal summary of results. """
184
185        self.stream.writeln()
186        self.stream.writeln('-' * 100)
187        self.stream.writeln(f'  Tests total:   {result.testsRun:5}')
188        self.stream.writeln(f'  Tests failed:  {len(result.failures):5}')
189        self.stream.writeln(f'  Tests skipped: {len(result.skipped):5}')
190        self.stream.writeln(f'  Test errors:   {len(result.errors):5}')
191        self.stream.writeln('-' * 100)
192
193    def printSkippedDetails(self, result):
194        """ Print summary of skipped tests and reasons. """
195
196        self.stream.writeln()
197        self.stream.writeln('=' * 100)
198        for test, reason in result.skipped:
199            self.stream.writeln(f' SKIPPED {test.id()} - {reason}')
200        self.stream.writeln('=' * 100)
201
202    def run(self, test):
203        result = self._makeResult()
204
205        # Run a test case / test suite
206        result.startTestRun()
207        try:
208            test(result)
209        finally:
210            result.stopTestRun()
211
212        # Display failures
213        if result.failures:
214            self.printFailureDetails(result)
215
216        # Display exceptions
217        if result.errors:
218            self.printErrorDetails(result)
219
220        # Display skipped tests
221        if result.skipped:
222            self.printSkippedDetails(result)
223
224        # Print summary
225        self.printOveralResults(result)
226
227        return result
228
229
230#
231# JSON file based output
232#
233
234
235class LLDBTestResult(unittest.TestResult):
236    """ Holds results of all tests encode as Result tuple for later processing. """
237
238    # Tuple holding result of every test ran.
239    Result = namedtuple('Result', ['test', 'result', 'detail'])
240
241    # Enum holding result type
242    class ResultCode(str, Enum):
243        PASS = 0
244        SKIP = 1
245        ERROR = 2
246
247    def __init__(self, stream = None, descriptions = None, verbosity = None):
248        super().__init__(stream, descriptions, verbosity)
249
250        self._cov = None
251        self.tests = []
252
253    def addError(self, test, err):
254
255        exc = _format_exc(err)
256        self.errors.append((test, _format_exc(err)))
257
258        self.tests.append(LLDBTestResult.Result(
259            test, LLDBTestResult.ResultCode.ERROR, exc
260        ))
261
262    def addExpectedFailure(self, test, err):
263        self.tests.append(LLDBTestResult.Result(
264            test, LLDBTestResult.ResultCode.ERROR, err
265        ))
266        return super().addExpectedFailure(test, err)
267
268    def addSkip(self, test, reason):
269        self.tests.append(LLDBTestResult.Result(
270            test, LLDBTestResult.ResultCode.SKIP, reason
271        ))
272        return super().addSkip(test, reason)
273
274    def addFailure(self, test, err):
275        # This path is most of the time taken by failed assertions. There is no
276        # point in providing detailed backtraces for assert failures.
277        exc = format_exception(err[0], err[1], err[2])
278
279        self.tests.append(LLDBTestResult.Result(
280            test, LLDBTestResult.ResultCode.ERROR, exc
281        ))
282        return super().addFailure(test, err)
283
284    def addSuccess(self, test):
285        self.tests.append(LLDBTestResult.Result(
286             test, LLDBTestResult.ResultCode.PASS, None
287             ))
288        return super().addSuccess(test)
289
290    def addUnexpectedSuccess(self, test):
291        self.tests.append(LLDBTestResult.Result(
292            test, LLDBTestResult.ResultCode.PASS, None
293        ))
294        return super().addUnexpectedSuccess(test)
295
296    def startTest(self, test) -> None:
297        self._cov = cov_start()
298        return super().startTest(test)
299
300    def stopTest(self, test) -> None:
301        cov_stop(self._cov)
302        return super().stopTest(test)
303
304
305class LLDBJSONTestRunner(unittest.TextTestRunner):
306    """ Produces JSON report of the test run. """
307
308    def __init__(self, stream = None, descriptions = True, verbosity = 1,
309                 failfast = False, buffer = False, resultclass = None, warnings = None,
310                 *, tb_locals = False, debug = False) -> None:
311        super().__init__(stream, descriptions, verbosity, failfast, buffer, resultclass,
312                         warnings, tb_locals=tb_locals)
313        self._debug = debug
314
315    def _makeResult(self) -> 'LLDBTextTestResult':
316        return LLDBTestResult(self.stream, self.descriptions, self.verbosity)
317
318    def run(self, test):
319        result = self._makeResult()
320
321        # Run a test case / test suite
322        result.startTestRun()
323        try:
324            test(result)
325        finally:
326            result.stopTestRun()
327
328        # Write JSON result file
329        test_results = []
330
331        for res in result.tests:
332            if isinstance(res.test, LLDBTestCase):
333                test_results.append({
334                    "id": res.test.id(),
335                    "desc": res.test.getDescription(),
336                    "result": res.result,
337                    "detail": res.detail
338                })
339            else:
340                test_results.append({
341                    "id": str(res.test),
342                    "desc": "",
343                    "result": res.result,
344                    "detail": res.detail
345                })
346
347        json.dump(test_results, self.stream)
348
349        return result
350