1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Common arm64 stack unwinder code. 4 * 5 * To implement a new arm64 stack unwinder: 6 * 1) Include this header 7 * 8 * 2) Provide implementations for the following functions: 9 * on_overflow_stack(): Returns true if SP is on the overflow 10 * stack. 11 * on_accessible_stack(): Returns true is SP is on any accessible 12 * stack. 13 * unwind_next(): Performs validation checks on the frame 14 * pointer, and transitions unwind_state 15 * to the next frame. 16 * 17 * See: arch/arm64/include/asm/stacktrace.h for reference 18 * implementations. 19 * 20 * Copyright (C) 2012 ARM Ltd. 21 */ 22 #ifndef __ASM_STACKTRACE_COMMON_H 23 #define __ASM_STACKTRACE_COMMON_H 24 25 #include <linux/bitmap.h> 26 #include <linux/bitops.h> 27 #include <linux/kprobes.h> 28 #include <linux/types.h> 29 30 enum stack_type { 31 STACK_TYPE_UNKNOWN, 32 STACK_TYPE_TASK, 33 STACK_TYPE_IRQ, 34 STACK_TYPE_OVERFLOW, 35 STACK_TYPE_SDEI_NORMAL, 36 STACK_TYPE_SDEI_CRITICAL, 37 STACK_TYPE_HYP, 38 __NR_STACK_TYPES 39 }; 40 41 struct stack_info { 42 unsigned long low; 43 unsigned long high; 44 enum stack_type type; 45 }; 46 47 /* 48 * A snapshot of a frame record or fp/lr register values, along with some 49 * accounting information necessary for robust unwinding. 50 * 51 * @fp: The fp value in the frame record (or the real fp) 52 * @pc: The lr value in the frame record (or the real lr) 53 * 54 * @stacks_done: Stacks which have been entirely unwound, for which it is no 55 * longer valid to unwind to. 56 * 57 * @prev_fp: The fp that pointed to this frame record, or a synthetic value 58 * of 0. This is used to ensure that within a stack, each 59 * subsequent frame record is at an increasing address. 60 * @prev_type: The type of stack this frame record was on, or a synthetic 61 * value of STACK_TYPE_UNKNOWN. This is used to detect a 62 * transition from one stack to another. 63 * 64 * @kr_cur: When KRETPROBES is selected, holds the kretprobe instance 65 * associated with the most recently encountered replacement lr 66 * value. 67 * 68 * @task: The task being unwound. 69 */ 70 struct unwind_state { 71 unsigned long fp; 72 unsigned long pc; 73 DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES); 74 unsigned long prev_fp; 75 enum stack_type prev_type; 76 #ifdef CONFIG_KRETPROBES 77 struct llist_node *kr_cur; 78 #endif 79 struct task_struct *task; 80 }; 81 82 static inline bool on_stack(unsigned long sp, unsigned long size, 83 unsigned long low, unsigned long high, 84 enum stack_type type, struct stack_info *info) 85 { 86 if (!low) 87 return false; 88 89 if (sp < low || sp + size < sp || sp + size > high) 90 return false; 91 92 if (info) { 93 info->low = low; 94 info->high = high; 95 info->type = type; 96 } 97 return true; 98 } 99 100 static inline void unwind_init_common(struct unwind_state *state, 101 struct task_struct *task) 102 { 103 state->task = task; 104 #ifdef CONFIG_KRETPROBES 105 state->kr_cur = NULL; 106 #endif 107 108 /* 109 * Prime the first unwind. 110 * 111 * In unwind_next() we'll check that the FP points to a valid stack, 112 * which can't be STACK_TYPE_UNKNOWN, and the first unwind will be 113 * treated as a transition to whichever stack that happens to be. The 114 * prev_fp value won't be used, but we set it to 0 such that it is 115 * definitely not an accessible stack address. 116 */ 117 bitmap_zero(state->stacks_done, __NR_STACK_TYPES); 118 state->prev_fp = 0; 119 state->prev_type = STACK_TYPE_UNKNOWN; 120 } 121 122 /* 123 * stack_trace_translate_fp_fn() - Translates a non-kernel frame pointer to 124 * a kernel address. 125 * 126 * @fp: the frame pointer to be updated to its kernel address. 127 * @type: the stack type associated with frame pointer @fp 128 * 129 * Returns true and success and @fp is updated to the corresponding 130 * kernel virtual address; otherwise returns false. 131 */ 132 typedef bool (*stack_trace_translate_fp_fn)(unsigned long *fp, 133 enum stack_type type); 134 135 /* 136 * on_accessible_stack_fn() - Check whether a stack range is on any 137 * of the possible stacks. 138 * 139 * @tsk: task whose stack is being unwound 140 * @sp: stack address being checked 141 * @size: size of the stack range being checked 142 * @info: stack unwinding context 143 */ 144 typedef bool (*on_accessible_stack_fn)(const struct task_struct *tsk, 145 unsigned long sp, unsigned long size, 146 struct stack_info *info); 147 148 static inline int unwind_next_common(struct unwind_state *state, 149 struct stack_info *info, 150 on_accessible_stack_fn accessible, 151 stack_trace_translate_fp_fn translate_fp) 152 { 153 unsigned long fp = state->fp, kern_fp = fp; 154 struct task_struct *tsk = state->task; 155 156 if (fp & 0x7) 157 return -EINVAL; 158 159 if (!accessible(tsk, fp, 16, info)) 160 return -EINVAL; 161 162 if (test_bit(info->type, state->stacks_done)) 163 return -EINVAL; 164 165 /* 166 * If fp is not from the current address space perform the necessary 167 * translation before dereferencing it to get the next fp. 168 */ 169 if (translate_fp && !translate_fp(&kern_fp, info->type)) 170 return -EINVAL; 171 172 /* 173 * As stacks grow downward, any valid record on the same stack must be 174 * at a strictly higher address than the prior record. 175 * 176 * Stacks can nest in several valid orders, e.g. 177 * 178 * TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL 179 * TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW 180 * HYP -> OVERFLOW 181 * 182 * ... but the nesting itself is strict. Once we transition from one 183 * stack to another, it's never valid to unwind back to that first 184 * stack. 185 */ 186 if (info->type == state->prev_type) { 187 if (fp <= state->prev_fp) 188 return -EINVAL; 189 } else { 190 __set_bit(state->prev_type, state->stacks_done); 191 } 192 193 /* 194 * Record this frame record's values and location. The prev_fp and 195 * prev_type are only meaningful to the next unwind_next() invocation. 196 */ 197 state->fp = READ_ONCE(*(unsigned long *)(kern_fp)); 198 state->pc = READ_ONCE(*(unsigned long *)(kern_fp + 8)); 199 state->prev_fp = fp; 200 state->prev_type = info->type; 201 202 return 0; 203 } 204 205 #endif /* __ASM_STACKTRACE_COMMON_H */ 206