1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Stack tracing support 4 * 5 * Copyright (C) 2012 ARM Ltd. 6 */ 7 #include <linux/kernel.h> 8 #include <linux/efi.h> 9 #include <linux/export.h> 10 #include <linux/ftrace.h> 11 #include <linux/sched.h> 12 #include <linux/sched/debug.h> 13 #include <linux/sched/task_stack.h> 14 #include <linux/stacktrace.h> 15 16 #include <asm/efi.h> 17 #include <asm/irq.h> 18 #include <asm/stack_pointer.h> 19 #include <asm/stacktrace.h> 20 21 /* 22 * Start an unwind from a pt_regs. 23 * 24 * The unwind will begin at the PC within the regs. 25 * 26 * The regs must be on a stack currently owned by the calling task. 27 */ 28 static __always_inline void unwind_init_from_regs(struct unwind_state *state, 29 struct pt_regs *regs) 30 { 31 unwind_init_common(state, current); 32 33 state->fp = regs->regs[29]; 34 state->pc = regs->pc; 35 } 36 37 /* 38 * Start an unwind from a caller. 39 * 40 * The unwind will begin at the caller of whichever function this is inlined 41 * into. 42 * 43 * The function which invokes this must be noinline. 44 */ 45 static __always_inline void unwind_init_from_caller(struct unwind_state *state) 46 { 47 unwind_init_common(state, current); 48 49 state->fp = (unsigned long)__builtin_frame_address(1); 50 state->pc = (unsigned long)__builtin_return_address(0); 51 } 52 53 /* 54 * Start an unwind from a blocked task. 55 * 56 * The unwind will begin at the blocked tasks saved PC (i.e. the caller of 57 * cpu_switch_to()). 58 * 59 * The caller should ensure the task is blocked in cpu_switch_to() for the 60 * duration of the unwind, or the unwind will be bogus. It is never valid to 61 * call this for the current task. 62 */ 63 static __always_inline void unwind_init_from_task(struct unwind_state *state, 64 struct task_struct *task) 65 { 66 unwind_init_common(state, task); 67 68 state->fp = thread_saved_fp(task); 69 state->pc = thread_saved_pc(task); 70 } 71 72 static __always_inline int 73 unwind_recover_return_address(struct unwind_state *state) 74 { 75 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 76 if (state->task->ret_stack && 77 (state->pc == (unsigned long)return_to_handler)) { 78 unsigned long orig_pc; 79 orig_pc = ftrace_graph_ret_addr(state->task, NULL, state->pc, 80 (void *)state->fp); 81 if (WARN_ON_ONCE(state->pc == orig_pc)) 82 return -EINVAL; 83 state->pc = orig_pc; 84 } 85 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 86 87 #ifdef CONFIG_KRETPROBES 88 if (is_kretprobe_trampoline(state->pc)) { 89 state->pc = kretprobe_find_ret_addr(state->task, 90 (void *)state->fp, 91 &state->kr_cur); 92 } 93 #endif /* CONFIG_KRETPROBES */ 94 95 return 0; 96 } 97 98 /* 99 * Unwind from one frame record (A) to the next frame record (B). 100 * 101 * We terminate early if the location of B indicates a malformed chain of frame 102 * records (e.g. a cycle), determined based on the location and fp value of A 103 * and the location (but not the fp value) of B. 104 */ 105 static int notrace unwind_next(struct unwind_state *state) 106 { 107 struct task_struct *tsk = state->task; 108 unsigned long fp = state->fp; 109 int err; 110 111 /* Final frame; nothing to unwind */ 112 if (fp == (unsigned long)task_pt_regs(tsk)->stackframe) 113 return -ENOENT; 114 115 err = unwind_next_frame_record(state); 116 if (err) 117 return err; 118 119 state->pc = ptrauth_strip_insn_pac(state->pc); 120 121 return unwind_recover_return_address(state); 122 } 123 NOKPROBE_SYMBOL(unwind_next); 124 125 static void notrace unwind(struct unwind_state *state, 126 stack_trace_consume_fn consume_entry, void *cookie) 127 { 128 if (unwind_recover_return_address(state)) 129 return; 130 131 while (1) { 132 int ret; 133 134 if (!consume_entry(cookie, state->pc)) 135 break; 136 ret = unwind_next(state); 137 if (ret < 0) 138 break; 139 } 140 } 141 NOKPROBE_SYMBOL(unwind); 142 143 /* 144 * Per-cpu stacks are only accessible when unwinding the current task in a 145 * non-preemptible context. 146 */ 147 #define STACKINFO_CPU(name) \ 148 ({ \ 149 ((task == current) && !preemptible()) \ 150 ? stackinfo_get_##name() \ 151 : stackinfo_get_unknown(); \ 152 }) 153 154 /* 155 * SDEI stacks are only accessible when unwinding the current task in an NMI 156 * context. 157 */ 158 #define STACKINFO_SDEI(name) \ 159 ({ \ 160 ((task == current) && in_nmi()) \ 161 ? stackinfo_get_sdei_##name() \ 162 : stackinfo_get_unknown(); \ 163 }) 164 165 #define STACKINFO_EFI \ 166 ({ \ 167 ((task == current) && current_in_efi()) \ 168 ? stackinfo_get_efi() \ 169 : stackinfo_get_unknown(); \ 170 }) 171 172 noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry, 173 void *cookie, struct task_struct *task, 174 struct pt_regs *regs) 175 { 176 struct stack_info stacks[] = { 177 stackinfo_get_task(task), 178 STACKINFO_CPU(irq), 179 #if defined(CONFIG_VMAP_STACK) 180 STACKINFO_CPU(overflow), 181 #endif 182 #if defined(CONFIG_VMAP_STACK) && defined(CONFIG_ARM_SDE_INTERFACE) 183 STACKINFO_SDEI(normal), 184 STACKINFO_SDEI(critical), 185 #endif 186 #ifdef CONFIG_EFI 187 STACKINFO_EFI, 188 #endif 189 }; 190 struct unwind_state state = { 191 .stacks = stacks, 192 .nr_stacks = ARRAY_SIZE(stacks), 193 }; 194 195 if (regs) { 196 if (task != current) 197 return; 198 unwind_init_from_regs(&state, regs); 199 } else if (task == current) { 200 unwind_init_from_caller(&state); 201 } else { 202 unwind_init_from_task(&state, task); 203 } 204 205 unwind(&state, consume_entry, cookie); 206 } 207 208 static bool dump_backtrace_entry(void *arg, unsigned long where) 209 { 210 char *loglvl = arg; 211 printk("%s %pSb\n", loglvl, (void *)where); 212 return true; 213 } 214 215 void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, 216 const char *loglvl) 217 { 218 pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); 219 220 if (regs && user_mode(regs)) 221 return; 222 223 if (!tsk) 224 tsk = current; 225 226 if (!try_get_task_stack(tsk)) 227 return; 228 229 printk("%sCall trace:\n", loglvl); 230 arch_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs); 231 232 put_task_stack(tsk); 233 } 234 235 void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl) 236 { 237 dump_backtrace(NULL, tsk, loglvl); 238 barrier(); 239 } 240