1#!/usr/bin/python3
2
3# format_vm_parameter_validation.py
4# Pretty-print the output of tests/vm/vm_parameter_validation.c
5#
6# usage:
7#     vm_parameter_validation | format_vm_parameter_validation.py
8
9import re
10import sys
11import copy
12import itertools
13
14# magic return values used for in-band signalling
15# fixme duplicated in vm_parameter_validation.c
16# fixme also duplicated in other_return_values below
17RESULT_SUCCESS  = 0
18RESULT_BUSTED   = -99
19RESULT_IGNORED  = -98
20RESULT_ZEROSIZE = -97
21RESULT_PANIC    = -96
22RESULT_GUARD    = -95
23RESULT_MISMATCH = -94
24RESULT_OUT_PARAM_BAD = -93
25# Some Mach errors use their normal integer values,
26# but we handle them specially here because those
27# integers are too long to fit in the grid output.
28RESULT_MACH_SEND_INVALID_MEMORY = 0x1000000c
29RESULT_MACH_SEND_INVALID_DEST = 0x10000003
30
31# output formatting
32format_result = {
33    RESULT_SUCCESS       : '  .',
34    RESULT_BUSTED        : ' **',
35    RESULT_MISMATCH      : ' ##',
36    RESULT_IGNORED       : '   ',
37    RESULT_ZEROSIZE      : '  o',
38    RESULT_PANIC         : ' pp',
39    RESULT_GUARD         : ' gg',
40    RESULT_OUT_PARAM_BAD : ' ot',
41    RESULT_MACH_SEND_INVALID_MEMORY : ' mi',
42    RESULT_MACH_SEND_INVALID_DEST :   ' md',
43}
44
45# same as format_result, but for functions
46# where 0=failure and 1=success
47format_bool_result = format_result.copy()
48format_bool_result.update({
49    0 : '  x',
50    1 : format_result[RESULT_SUCCESS],
51})
52
53def formatter_for_testname(testname):
54    if (error_code_values_for_testname(testname) == bool_return_values):
55        return format_bool_result
56    return format_result
57
58format_default = '%3d'
59format_col_width = 3
60format_empty_col = format_col_width * ' '
61format_indent_width = 4
62format_indent = format_indent_width * ' '
63
64
65# record the result of one trial:
66# ret: the return value from the tested function
67# parameters: array of the input parameter names for that trial
68#   (for example ["start PGSZ-2", "size -1"])
69class Result:
70    def __init__(self, new_ret, new_parameters):
71        self.ret = new_ret
72        self.parameters = new_parameters
73    def __repr__(self):
74        return str(self.ret) + " = " + str(self.parameters)
75
76# record the results of all trials in one test
77# testname: the name of the test (including the function being tested)
78# config: a string describing OS, CPU, etc
79# compat: code for error compatibility
80# results: an array of Result, one per trial
81class Test:
82    def __init__(self, new_name, new_config, new_compat, new_results = []):
83        self.testname = new_name
84        self.config = new_config
85        self.compat = new_compat
86        self.results = new_results
87
88# print column labels under some output
89# example output given indent=2 col_width=4 labels=[foo,bar,baz,qux]:
90#  |   |   |   |
91#  |   |   |   qux
92#  |   |   baz
93#  |   bar
94#  foo
95def print_column_labels(labels, indent_width, col_width):
96    indent = indent_width * ' '
97    empty_column = '|' + (col_width-1) * ' '
98
99    unprinted = len(labels)
100    print(indent + unprinted*empty_column)
101
102    for label in reversed(labels):
103        unprinted -= 1
104        print(indent + unprinted*empty_column + label)
105
106# pretty-print one function return code
107def print_one_result(ret, formatter):
108    if ret in formatter:
109        print(formatter[ret], end='')
110    else:
111        print(format_default % (ret), end='')
112
113# choose the appropriate error code table for a test
114# (either errno_return_values, bool_return_values, or kern_return_values)
115def error_code_values_for_testname(testname):
116    errno_fns = ['mprotect', 'msync', 'minherit', 'mincore', 'mlock', 'munlock',
117                 'mmap', 'munmap', 'mremap_encrypted', 'vslock', 'vsunlock',
118                 'madvise']
119    bool_fns = ['useracc', 'task_find_region_details']
120    for fn in errno_fns:
121        if testname.startswith(fn):
122            return errno_return_values
123    for fn in bool_fns:
124        if testname.startswith(fn):
125            return bool_return_values
126    return kern_return_values
127
128# print a helpful description of the return values seen in results
129# fixme these won't include RESULT_MISMATCH
130def print_legend(test):
131    # find all error codes represented in the results
132    codes = {}
133    for result in test.results:
134        codes[result.ret] = True
135
136    known_return_values = error_code_values_for_testname(test.testname)
137
138    # print the names of the detected error codes
139    output = []
140    for code in sorted(codes.keys()):
141        if code in known_return_values:
142            output.append(known_return_values[code])
143        elif code in other_return_values:
144            output.append(other_return_values[code])
145        elif code != 0:
146            output.append(str(code) + ': ????')
147
148    print(format_indent + '(' + ', '.join(output) + ')')
149
150# display names for error codes returned in errno
151errno_return_values = {
152    1: 'EPERM',
153    9: 'EBADF',
154    12: 'ENOMEM',
155    13: 'EACCES',
156    14: 'EFAULT',
157    22: 'EINVAL',
158    45: 'ENOTSUP',
159}
160for k, v in errno_return_values.items():
161    errno_return_values[k] = str(k) + ': ' + v
162
163# display names for error codes returned in kern_return_t
164kern_return_values = {
165    1: 'KERN_INVALID_ADDRESS',
166    2: 'KERN_PROTECTION_FAILURE',
167    3: 'KERN_NO_SPACE',
168    4: 'KERN_INVALID_ARGUMENT',
169    5: 'KERN_FAILURE',
170    6: 'KERN_RESOURCE_SHORTAGE',
171    7: 'KERN_NOT_RECEIVER',
172    8: 'KERN_NO_ACCESS',
173    9: 'KERN_MEMORY_FAILURE',
174    10: 'KERN_MEMORY_ERROR',
175    11: 'KERN_ALREADY_IN_SET',
176    12: 'KERN_NOT_IN_SET',
177    13: 'KERN_NAME_EXISTS',
178    14: 'KERN_ABORTED',
179    15: 'KERN_INVALID_NAME',
180    16: 'KERN_INVALID_TASK',
181    17: 'KERN_INVALID_RIGHT',
182    18: 'KERN_INVALID_VALUE',
183    19: 'KERN_UREFS_OVERFLOW',
184    20: 'KERN_INVALID_CAPABILITY',
185    21: 'KERN_RIGHT_EXISTS',
186    22: 'KERN_INVALID_HOST',
187    23: 'KERN_MEMORY_PRESENT',
188    24: 'KERN_MEMORY_DATA_MOVED',
189    25: 'KERN_MEMORY_RESTART_COPY',
190    26: 'KERN_INVALID_PROCESSOR_SET',
191    27: 'KERN_POLICY_LIMIT',
192    28: 'KERN_INVALID_POLICY',
193    29: 'KERN_INVALID_OBJECT',
194    30: 'KERN_ALREADY_WAITING',
195    31: 'KERN_DEFAULT_SET',
196    32: 'KERN_EXCEPTION_PROTECTED',
197    33: 'KERN_INVALID_LEDGER',
198    34: 'KERN_INVALID_MEMORY_CONTROL',
199    35: 'KERN_INVALID_SECURITY',
200    36: 'KERN_NOT_DEPRESSED',
201    37: 'KERN_TERMINATED',
202    38: 'KERN_LOCK_SET_DESTROYED',
203    39: 'KERN_LOCK_UNSTABLE',
204    40: 'KERN_LOCK_OWNED',
205    41: 'KERN_LOCK_OWNED_SELF',
206    42: 'KERN_SEMAPHORE_DESTROYED',
207    43: 'KERN_RPC_SERVER_TERMINATED',
208    44: 'KERN_RPC_TERMINATE_ORPHAN',
209    45: 'KERN_RPC_CONTINUE_ORPHAN',
210    46: 'KERN_NOT_SUPPORTED',
211    47: 'KERN_NODE_DOWN',
212    48: 'KERN_NOT_WAITING',
213    49: 'KERN_OPERATION_TIMED_OUT',
214    50: 'KERN_CODESIGN_ERROR',
215    51: 'KERN_POLICY_STATIC',
216    52: 'KERN_INSUFFICIENT_BUFFER_SIZE',
217    53: 'KERN_DENIED',
218    54: 'KERN_MISSING_KC',
219    55: 'KERN_INVALID_KC',
220    56: 'KERN_NOT_FOUND',
221    100: 'KERN_RETURN_MAX',
222    -304: 'MIG_BAD_ARGUMENTS (server type check failure)',
223    # MACH_SEND_INVALID_MEMORY and other Mach errors with large integer values
224    # are not handled here. They use format_result and other_return_values instead.
225}
226for k, v in kern_return_values.items():
227    kern_return_values[k] = str(k) + ': ' + v
228
229# display names for error codes return by a boolean function
230# where 0=failure and 1=success
231bool_return_values = {
232    0: format_bool_result[0].lstrip() + ': false/failure',
233    1: format_bool_result[1].lstrip() + ': true/success',
234}
235
236# display names for the special return values used by the test machinery
237other_return_values = {
238    RESULT_BUSTED:   format_result[RESULT_BUSTED].lstrip() + ': trial broken, not performed',
239    RESULT_IGNORED:  '<empty> trial ignored, not performed',
240    RESULT_ZEROSIZE: format_result[RESULT_ZEROSIZE].lstrip() + ': size == 0',
241    RESULT_PANIC:    format_result[RESULT_PANIC].lstrip() + ': trial is believed to panic, not performed',
242    RESULT_GUARD:    format_result[RESULT_GUARD].lstrip() + ': trial is believed to throw EXC_GUARD, not performed',
243    RESULT_OUT_PARAM_BAD: format_result[RESULT_OUT_PARAM_BAD].lstrip() + ': trial set incorrect values to out parameters',
244    RESULT_MACH_SEND_INVALID_MEMORY: format_result[RESULT_MACH_SEND_INVALID_MEMORY].lstrip() + ': MACH_SEND_INVALID_MEMORY',
245    RESULT_MACH_SEND_INVALID_DEST:   format_result[RESULT_MACH_SEND_INVALID_DEST].lstrip() + ': MACH_SEND_INVALID_DEST',
246}
247
248# inside line, replace 'return 123' with 'return ERR_CODE_NAME'
249def replace_error_code_return(test, line):
250    known_return_values = error_code_values_for_testname(test.testname)
251    for code, name in known_return_values.items():
252        line = line.replace('return ' + str(code) + ';', 'return ' + name + ';')
253    return line
254
255def dimensions(results):
256    if len(results) == 0:
257        return 0
258    return len(results[0].parameters)
259
260# given one k-dimensional results
261# return a list of k counts that is the size of each dimension
262def count_each_dimension(results):
263    if len(results) == 0:
264        return []
265    first = results[0].parameters
266    k = dimensions(results)
267    counts = []
268    step = 1
269    for dim in range(k-1, -1, -1):
270        count = round(len(results) / step)
271        for i in range(0, len(results), step):
272            cur = results[i].parameters
273            if i != 0 and cur[dim] == first[dim]:
274                count = round(i / step)
275                break;
276        step *= count
277        counts.append(count)
278
279    counts.reverse()
280    return counts;
281
282# Reduce one k-dimensional results to many (k-1) dimensional results
283# Yields a sequence of [results, name] pairs
284# where results has k-1 dimensions
285# and name is the parameter name from the removed dimension
286def iterate_dimension(results, dim = 0):
287    if len(results) == 0:
288        return
289
290    k = dimensions(results)
291    dim_counts = count_each_dimension(results)
292
293    inner_count = 1
294    for d in range(dim+1, k):
295        inner_count *= dim_counts[d]
296
297    outer_step = len(results)
298    for d in range(0, dim):
299        outer_step = int(outer_step / dim_counts[d])
300
301    for r in range(dim_counts[dim]):
302        start = r * inner_count
303        name = results[start].parameters[dim]
304        new_results = []
305        for i in range(start, len(results), outer_step):
306            for j in range(inner_count):
307                new_result = copy.deepcopy(results[i+j])
308                del new_result.parameters[dim]
309                new_results.append(new_result)
310        yield [new_results, name]
311
312# Print the results of a test that has two parameters (for example a test of start/size)
313# If overrides!=None, use any non-SUCCESS return values from override in place of the other results.
314def print_results_2D(results, formatter, overrides=None):
315    # complain if results and override have different dimensions
316    if overrides:
317        if len(overrides) != len(results):
318            print("WARNING: override results have a different height; overrides ignored")
319        for i, result in enumerate(results):
320            if len(overrides[i].parameters) != len(result.parameters):
321                print("WARNING: override results have a different width; overrides ignored")
322
323    columns = []
324    prev_row_label = ''
325    first_row_label = ''
326    for i, result in enumerate(results):
327        if overrides: override = overrides[i].ret
328
329        if first_row_label == '':
330            # record first row's name so we can use it to find columns
331            # (assumes every row has the same column labels)
332            first_row_label = result.parameters[0]
333
334        if result.parameters[0] == first_row_label:
335            # record column names in the first row
336            columns.append(result.parameters[1])
337
338        if result.parameters[0] != prev_row_label:
339            # new row
340            if prev_row_label != '': print(format_indent + prev_row_label)
341            print(format_indent, end='')
342            prev_row_label = result.parameters[0]
343
344        if overrides and override != RESULT_SUCCESS:
345            print_one_result(override, formatter)
346        else:
347            print_one_result(result.ret, formatter)
348
349    if prev_row_label: print(format_indent + prev_row_label)
350    print_column_labels(columns, format_indent_width + format_col_width - 1, format_col_width)
351
352def print_results_2D_try_condensed(results, formatter):
353    if 0 == len(results):
354        return
355    singleton = results[0].ret
356    if any([result.ret != singleton for result in results]):
357        print_results_2D(results, formatter)
358        return
359    # will print as condensed
360    rows = set()
361    cols = set()
362    for result in results:
363        rows.add(result.parameters[0].split()[1])
364        cols.add(result.parameters[1].split()[1])
365    print_one_result(result.ret, formatter)
366    print(" for all pairs")
367
368def print_results_3D(results, formatter, testname):
369    # foreach parameter[1], print 2D table of parameter[0] and parameter[2]
370    for results2D, name in iterate_dimension(results, 1):
371        print(testname + ': ' + name)
372        print_results_2D(results2D, formatter)
373
374    # foreach parameter[0], print 2D table of parameter[1] and parameter[2]
375    # This is redundant but can be useful for human readers.
376    for results2D, name in iterate_dimension(results, 0):
377        print(testname + ': ' + name)
378        print_results_2D(results2D, formatter)
379
380def print_results_4D(results, formatter):
381    x, y, z = '', '', ''
382    # Make a map[{3rd_param, 4th_param, ...}] = {all options}
383    # For now, we print 2d tables of 1st, 2nd param for each possible combination of remaining values
384
385    map_of_results = {}
386    for _, result in enumerate(results):
387        k = tuple(result.parameters[2:])
388
389        if k not in map_of_results:
390            map_of_results[k] = [result]
391        else:
392            map_of_results[k].append(result)
393
394    # prepare to iterate
395    prev_matrix = []
396    iterable = []
397    for k, result_list in map_of_results.items():
398        one_2d_result = []
399        matrix = []
400        for result in result_list:
401            x = result.parameters[0]
402            y = result.parameters[1]
403            repl_result = Result(result.ret, (x, y))
404            one_2d_result.append(repl_result)
405            matrix.append(result.ret)
406        if matrix == prev_matrix:
407            # if the return codes are the same everywhere, we will print successive tables only once
408            # note that this assumes that the sets of 2D labels are the same everywhere, and doesn't check that assumption
409            iterable[-1][0].append(k)
410        else:
411            iterable.append(([k], one_2d_result))
412        prev_matrix = matrix
413
414    # print
415    for iter in iterable:
416        print(iter[0])
417        print_results_2D_try_condensed(iter[1], formatter)
418
419
420# Print the results of a test that has two parameters
421# (for example a test of addr only, or size only)
422# If overrides!=None, use any non-SUCCESS return values from override in place of the other results.
423def print_results_1D(results, formatter, overrides=None):
424    # complain if results and overrides have different dimensions
425    if overrides:
426        if len(overrides) != len(results):
427            print("WARNING: override results have a different height; overrides ignored")
428        for i, result in enumerate(results):
429            if len(overrides[i].parameters) != len(result.parameters):
430                print("WARNING: override results have a different width; overrides ignored")
431
432    for i, result in enumerate(results):
433        if overrides: override = overrides[i].ret
434
435        # indent, value, indent, label
436        print(format_indent, end='')
437        if overrides and override != RESULT_SUCCESS:
438            print_one_result(override, formatter)
439        else:
440            print_one_result(result.ret, formatter)
441        print(format_indent + result.parameters[0])
442
443def print_results_nD(results, testname, overrides=None):
444    formatter = formatter_for_testname(testname)
445
446    if (dimensions(results) == 1):
447        print_results_1D(results, formatter, overrides)
448    elif (dimensions(results) == 2):
449        print_results_2D(results, formatter, overrides)
450    elif dimensions(results) == 3:
451        print_results_3D(results, formatter, testname)
452    elif dimensions(results) == 4:
453        print_results_4D(results, formatter)
454    else:
455        print(format_indent + 'too many dimensions')
456
457
458def main():
459    data = sys.stdin.readlines()
460
461
462    # remove any lines that don't start with "TESTNAME" or "TESTCONFIG" or "RESULT"
463    # (including darwintest output like "PASS" or "FAIL")
464    # and print them now
465    # Also verify that the counts of "TEST BEGIN" == "TEST END"
466    # (they will mismatch if a test suite crashed)
467    testbegincount = 0
468    testendcount = 0
469    testlines = []
470    for line in data:
471        unmodified_line = line
472        # count TEST BEGIN and TEST END
473        if ('TEST BEGIN' in line):
474            testbegincount += 1
475        if ('TEST END' in line):
476            testendcount += 1
477        # remove any T_LOG() timestamp prefixes and KTEST prefixes
478        line = re.sub('^\s*\d+:\d+:\d+ ', '', line)
479        line = re.sub('^\[KTEST\]\s+[A-Z]+\s+\d+\s+(\d+\s+)?\S+\s+\d+\s+', '', line)
480        line = line.lstrip()
481
482        if (line.startswith('TESTNAME') or line.startswith('RESULT')
483            or line.startswith('TESTCONFIG') or line.startswith('TESTCOMPAT')):
484            testlines.append(line)  # line is test output
485        elif line == '':
486            pass # ignore empty lines
487        else:
488            print(unmodified_line, end='')  # line is other output
489
490    # parse test output into Test and Result objects
491
492    testnum = 0
493    def group_by_test(line):
494        nonlocal testnum
495        if line.startswith('TESTNAME '):
496            testnum = testnum+1
497        return testnum
498
499    tests = []
500    for _, group in itertools.groupby(testlines, group_by_test):
501        lines = list(group)
502
503        name = lines.pop(0).removeprefix('TESTNAME ').rstrip()
504        config = lines.pop(0).removeprefix('TESTCONFIG ').rstrip()
505        compat = []
506        results = []
507        for line in lines:
508            if line.startswith('RESULT'):
509                components = line.removeprefix('RESULT ').rstrip().split(', ')
510                ret = int(components.pop(0))
511                results.append(Result(ret, components))
512
513        tests.append(Test(name, config, compat, results))
514
515    print('found %d tests' % (len(tests)))
516
517    # stats to print at the end
518    test_count = len(tests)
519    all_configurations = set()
520
521    # print test output
522    for test in tests:
523        # print test name and test config on separate lines
524        # `diff` handles this better than putting both on the same line
525        print('test ' + test.testname)
526
527        print(format_indent + 'config ' + test.config)
528        all_configurations.add(test.config)
529
530        if len(test.results) == 0:
531            print(format_indent + 'no results')
532        else:
533            print_legend(test)
534            print_results_nD(test.results, test.testname)
535
536
537        print('end  ' + test.testname)
538
539    print()
540    print(str(test_count) + ' test(s) performed')
541
542    if (testbegincount != testendcount):
543        print('### error: %d TEST BEGINs, %d TEST ENDs - some tests may have crashed'
544              % (testbegincount, testendcount))
545
546    print(str(len(all_configurations)) + ' configuration(s) tested:')
547    for config in sorted(all_configurations):
548        print(format_indent + '[' + config + ']')
549
550
551main()
552