xref: /linux-6.15/arch/arm64/kernel/stacktrace.c (revision f05a4a42)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Stack tracing support
4  *
5  * Copyright (C) 2012 ARM Ltd.
6  */
7 #include <linux/kernel.h>
8 #include <linux/efi.h>
9 #include <linux/export.h>
10 #include <linux/filter.h>
11 #include <linux/ftrace.h>
12 #include <linux/kprobes.h>
13 #include <linux/sched.h>
14 #include <linux/sched/debug.h>
15 #include <linux/sched/task_stack.h>
16 #include <linux/stacktrace.h>
17 
18 #include <asm/efi.h>
19 #include <asm/irq.h>
20 #include <asm/stack_pointer.h>
21 #include <asm/stacktrace.h>
22 
23 enum kunwind_source {
24 	KUNWIND_SOURCE_UNKNOWN,
25 	KUNWIND_SOURCE_FRAME,
26 	KUNWIND_SOURCE_CALLER,
27 	KUNWIND_SOURCE_TASK,
28 	KUNWIND_SOURCE_REGS_PC,
29 };
30 
31 union unwind_flags {
32 	unsigned long	all;
33 	struct {
34 		unsigned long	fgraph : 1,
35 				kretprobe : 1;
36 	};
37 };
38 
39 /*
40  * Kernel unwind state
41  *
42  * @common:      Common unwind state.
43  * @task:        The task being unwound.
44  * @graph_idx:   Used by ftrace_graph_ret_addr() for optimized stack unwinding.
45  * @kr_cur:      When KRETPROBES is selected, holds the kretprobe instance
46  *               associated with the most recently encountered replacement lr
47  *               value.
48  */
49 struct kunwind_state {
50 	struct unwind_state common;
51 	struct task_struct *task;
52 	int graph_idx;
53 #ifdef CONFIG_KRETPROBES
54 	struct llist_node *kr_cur;
55 #endif
56 	enum kunwind_source source;
57 	union unwind_flags flags;
58 };
59 
60 static __always_inline void
61 kunwind_init(struct kunwind_state *state,
62 	     struct task_struct *task)
63 {
64 	unwind_init_common(&state->common);
65 	state->task = task;
66 	state->source = KUNWIND_SOURCE_UNKNOWN;
67 	state->flags.all = 0;
68 }
69 
70 /*
71  * Start an unwind from a pt_regs.
72  *
73  * The unwind will begin at the PC within the regs.
74  *
75  * The regs must be on a stack currently owned by the calling task.
76  */
77 static __always_inline void
78 kunwind_init_from_regs(struct kunwind_state *state,
79 		       struct pt_regs *regs)
80 {
81 	kunwind_init(state, current);
82 
83 	state->common.fp = regs->regs[29];
84 	state->common.pc = regs->pc;
85 	state->source = KUNWIND_SOURCE_REGS_PC;
86 }
87 
88 /*
89  * Start an unwind from a caller.
90  *
91  * The unwind will begin at the caller of whichever function this is inlined
92  * into.
93  *
94  * The function which invokes this must be noinline.
95  */
96 static __always_inline void
97 kunwind_init_from_caller(struct kunwind_state *state)
98 {
99 	kunwind_init(state, current);
100 
101 	state->common.fp = (unsigned long)__builtin_frame_address(1);
102 	state->common.pc = (unsigned long)__builtin_return_address(0);
103 	state->source = KUNWIND_SOURCE_CALLER;
104 }
105 
106 /*
107  * Start an unwind from a blocked task.
108  *
109  * The unwind will begin at the blocked tasks saved PC (i.e. the caller of
110  * cpu_switch_to()).
111  *
112  * The caller should ensure the task is blocked in cpu_switch_to() for the
113  * duration of the unwind, or the unwind will be bogus. It is never valid to
114  * call this for the current task.
115  */
116 static __always_inline void
117 kunwind_init_from_task(struct kunwind_state *state,
118 		       struct task_struct *task)
119 {
120 	kunwind_init(state, task);
121 
122 	state->common.fp = thread_saved_fp(task);
123 	state->common.pc = thread_saved_pc(task);
124 	state->source = KUNWIND_SOURCE_TASK;
125 }
126 
127 static __always_inline int
128 kunwind_recover_return_address(struct kunwind_state *state)
129 {
130 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
131 	if (state->task->ret_stack &&
132 	    (state->common.pc == (unsigned long)return_to_handler)) {
133 		unsigned long orig_pc;
134 		orig_pc = ftrace_graph_ret_addr(state->task, &state->graph_idx,
135 						state->common.pc,
136 						(void *)state->common.fp);
137 		if (WARN_ON_ONCE(state->common.pc == orig_pc))
138 			return -EINVAL;
139 		state->common.pc = orig_pc;
140 		state->flags.fgraph = 1;
141 	}
142 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
143 
144 #ifdef CONFIG_KRETPROBES
145 	if (is_kretprobe_trampoline(state->common.pc)) {
146 		unsigned long orig_pc;
147 		orig_pc = kretprobe_find_ret_addr(state->task,
148 						  (void *)state->common.fp,
149 						  &state->kr_cur);
150 		state->common.pc = orig_pc;
151 		state->flags.kretprobe = 1;
152 	}
153 #endif /* CONFIG_KRETPROBES */
154 
155 	return 0;
156 }
157 
158 /*
159  * Unwind from one frame record (A) to the next frame record (B).
160  *
161  * We terminate early if the location of B indicates a malformed chain of frame
162  * records (e.g. a cycle), determined based on the location and fp value of A
163  * and the location (but not the fp value) of B.
164  */
165 static __always_inline int
166 kunwind_next(struct kunwind_state *state)
167 {
168 	struct task_struct *tsk = state->task;
169 	unsigned long fp = state->common.fp;
170 	int err;
171 
172 	state->flags.all = 0;
173 
174 	/* Final frame; nothing to unwind */
175 	if (fp == (unsigned long)&task_pt_regs(tsk)->stackframe)
176 		return -ENOENT;
177 
178 	switch (state->source) {
179 	case KUNWIND_SOURCE_FRAME:
180 	case KUNWIND_SOURCE_CALLER:
181 	case KUNWIND_SOURCE_TASK:
182 	case KUNWIND_SOURCE_REGS_PC:
183 		err = unwind_next_frame_record(&state->common);
184 		if (err)
185 			return err;
186 		state->source = KUNWIND_SOURCE_FRAME;
187 		break;
188 	default:
189 		return -EINVAL;
190 	}
191 
192 	state->common.pc = ptrauth_strip_kernel_insn_pac(state->common.pc);
193 
194 	return kunwind_recover_return_address(state);
195 }
196 
197 typedef bool (*kunwind_consume_fn)(const struct kunwind_state *state, void *cookie);
198 
199 static __always_inline void
200 do_kunwind(struct kunwind_state *state, kunwind_consume_fn consume_state,
201 	   void *cookie)
202 {
203 	if (kunwind_recover_return_address(state))
204 		return;
205 
206 	while (1) {
207 		int ret;
208 
209 		if (!consume_state(state, cookie))
210 			break;
211 		ret = kunwind_next(state);
212 		if (ret < 0)
213 			break;
214 	}
215 }
216 
217 /*
218  * Per-cpu stacks are only accessible when unwinding the current task in a
219  * non-preemptible context.
220  */
221 #define STACKINFO_CPU(name)					\
222 	({							\
223 		((task == current) && !preemptible())		\
224 			? stackinfo_get_##name()		\
225 			: stackinfo_get_unknown();		\
226 	})
227 
228 /*
229  * SDEI stacks are only accessible when unwinding the current task in an NMI
230  * context.
231  */
232 #define STACKINFO_SDEI(name)					\
233 	({							\
234 		((task == current) && in_nmi())			\
235 			? stackinfo_get_sdei_##name()		\
236 			: stackinfo_get_unknown();		\
237 	})
238 
239 #define STACKINFO_EFI						\
240 	({							\
241 		((task == current) && current_in_efi())		\
242 			? stackinfo_get_efi()			\
243 			: stackinfo_get_unknown();		\
244 	})
245 
246 static __always_inline void
247 kunwind_stack_walk(kunwind_consume_fn consume_state,
248 		   void *cookie, struct task_struct *task,
249 		   struct pt_regs *regs)
250 {
251 	struct stack_info stacks[] = {
252 		stackinfo_get_task(task),
253 		STACKINFO_CPU(irq),
254 #if defined(CONFIG_VMAP_STACK)
255 		STACKINFO_CPU(overflow),
256 #endif
257 #if defined(CONFIG_VMAP_STACK) && defined(CONFIG_ARM_SDE_INTERFACE)
258 		STACKINFO_SDEI(normal),
259 		STACKINFO_SDEI(critical),
260 #endif
261 #ifdef CONFIG_EFI
262 		STACKINFO_EFI,
263 #endif
264 	};
265 	struct kunwind_state state = {
266 		.common = {
267 			.stacks = stacks,
268 			.nr_stacks = ARRAY_SIZE(stacks),
269 		},
270 	};
271 
272 	if (regs) {
273 		if (task != current)
274 			return;
275 		kunwind_init_from_regs(&state, regs);
276 	} else if (task == current) {
277 		kunwind_init_from_caller(&state);
278 	} else {
279 		kunwind_init_from_task(&state, task);
280 	}
281 
282 	do_kunwind(&state, consume_state, cookie);
283 }
284 
285 struct kunwind_consume_entry_data {
286 	stack_trace_consume_fn consume_entry;
287 	void *cookie;
288 };
289 
290 static __always_inline bool
291 arch_kunwind_consume_entry(const struct kunwind_state *state, void *cookie)
292 {
293 	struct kunwind_consume_entry_data *data = cookie;
294 	return data->consume_entry(data->cookie, state->common.pc);
295 }
296 
297 noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
298 			      void *cookie, struct task_struct *task,
299 			      struct pt_regs *regs)
300 {
301 	struct kunwind_consume_entry_data data = {
302 		.consume_entry = consume_entry,
303 		.cookie = cookie,
304 	};
305 
306 	kunwind_stack_walk(arch_kunwind_consume_entry, &data, task, regs);
307 }
308 
309 struct bpf_unwind_consume_entry_data {
310 	bool (*consume_entry)(void *cookie, u64 ip, u64 sp, u64 fp);
311 	void *cookie;
312 };
313 
314 static bool
315 arch_bpf_unwind_consume_entry(const struct kunwind_state *state, void *cookie)
316 {
317 	struct bpf_unwind_consume_entry_data *data = cookie;
318 
319 	return data->consume_entry(data->cookie, state->common.pc, 0,
320 				   state->common.fp);
321 }
322 
323 noinline noinstr void arch_bpf_stack_walk(bool (*consume_entry)(void *cookie, u64 ip, u64 sp,
324 								u64 fp), void *cookie)
325 {
326 	struct bpf_unwind_consume_entry_data data = {
327 		.consume_entry = consume_entry,
328 		.cookie = cookie,
329 	};
330 
331 	kunwind_stack_walk(arch_bpf_unwind_consume_entry, &data, current, NULL);
332 }
333 
334 static const char *state_source_string(const struct kunwind_state *state)
335 {
336 	switch (state->source) {
337 	case KUNWIND_SOURCE_FRAME:	return NULL;
338 	case KUNWIND_SOURCE_CALLER:	return "C";
339 	case KUNWIND_SOURCE_TASK:	return "T";
340 	case KUNWIND_SOURCE_REGS_PC:	return "P";
341 	default:			return "U";
342 	}
343 }
344 
345 static bool dump_backtrace_entry(const struct kunwind_state *state, void *arg)
346 {
347 	const char *source = state_source_string(state);
348 	union unwind_flags flags = state->flags;
349 	bool has_info = source || flags.all;
350 	char *loglvl = arg;
351 
352 	printk("%s %pSb%s%s%s%s%s\n", loglvl,
353 		(void *)state->common.pc,
354 		has_info ? " (" : "",
355 		source ? source : "",
356 		flags.fgraph ? "F" : "",
357 		flags.kretprobe ? "K" : "",
358 		has_info ? ")" : "");
359 
360 	return true;
361 }
362 
363 void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
364 		    const char *loglvl)
365 {
366 	pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
367 
368 	if (regs && user_mode(regs))
369 		return;
370 
371 	if (!tsk)
372 		tsk = current;
373 
374 	if (!try_get_task_stack(tsk))
375 		return;
376 
377 	printk("%sCall trace:\n", loglvl);
378 	kunwind_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs);
379 
380 	put_task_stack(tsk);
381 }
382 
383 void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
384 {
385 	dump_backtrace(NULL, tsk, loglvl);
386 	barrier();
387 }
388 
389 /*
390  * The struct defined for userspace stack frame in AARCH64 mode.
391  */
392 struct frame_tail {
393 	struct frame_tail	__user *fp;
394 	unsigned long		lr;
395 } __attribute__((packed));
396 
397 /*
398  * Get the return address for a single stackframe and return a pointer to the
399  * next frame tail.
400  */
401 static struct frame_tail __user *
402 unwind_user_frame(struct frame_tail __user *tail, void *cookie,
403 	       stack_trace_consume_fn consume_entry)
404 {
405 	struct frame_tail buftail;
406 	unsigned long err;
407 	unsigned long lr;
408 
409 	/* Also check accessibility of one struct frame_tail beyond */
410 	if (!access_ok(tail, sizeof(buftail)))
411 		return NULL;
412 
413 	pagefault_disable();
414 	err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
415 	pagefault_enable();
416 
417 	if (err)
418 		return NULL;
419 
420 	lr = ptrauth_strip_user_insn_pac(buftail.lr);
421 
422 	if (!consume_entry(cookie, lr))
423 		return NULL;
424 
425 	/*
426 	 * Frame pointers should strictly progress back up the stack
427 	 * (towards higher addresses).
428 	 */
429 	if (tail >= buftail.fp)
430 		return NULL;
431 
432 	return buftail.fp;
433 }
434 
435 #ifdef CONFIG_COMPAT
436 /*
437  * The registers we're interested in are at the end of the variable
438  * length saved register structure. The fp points at the end of this
439  * structure so the address of this struct is:
440  * (struct compat_frame_tail *)(xxx->fp)-1
441  *
442  * This code has been adapted from the ARM OProfile support.
443  */
444 struct compat_frame_tail {
445 	compat_uptr_t	fp; /* a (struct compat_frame_tail *) in compat mode */
446 	u32		sp;
447 	u32		lr;
448 } __attribute__((packed));
449 
450 static struct compat_frame_tail __user *
451 unwind_compat_user_frame(struct compat_frame_tail __user *tail, void *cookie,
452 				stack_trace_consume_fn consume_entry)
453 {
454 	struct compat_frame_tail buftail;
455 	unsigned long err;
456 
457 	/* Also check accessibility of one struct frame_tail beyond */
458 	if (!access_ok(tail, sizeof(buftail)))
459 		return NULL;
460 
461 	pagefault_disable();
462 	err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
463 	pagefault_enable();
464 
465 	if (err)
466 		return NULL;
467 
468 	if (!consume_entry(cookie, buftail.lr))
469 		return NULL;
470 
471 	/*
472 	 * Frame pointers should strictly progress back up the stack
473 	 * (towards higher addresses).
474 	 */
475 	if (tail + 1 >= (struct compat_frame_tail __user *)
476 			compat_ptr(buftail.fp))
477 		return NULL;
478 
479 	return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
480 }
481 #endif /* CONFIG_COMPAT */
482 
483 
484 void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
485 					const struct pt_regs *regs)
486 {
487 	if (!consume_entry(cookie, regs->pc))
488 		return;
489 
490 	if (!compat_user_mode(regs)) {
491 		/* AARCH64 mode */
492 		struct frame_tail __user *tail;
493 
494 		tail = (struct frame_tail __user *)regs->regs[29];
495 		while (tail && !((unsigned long)tail & 0x7))
496 			tail = unwind_user_frame(tail, cookie, consume_entry);
497 	} else {
498 #ifdef CONFIG_COMPAT
499 		/* AARCH32 compat mode */
500 		struct compat_frame_tail __user *tail;
501 
502 		tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
503 		while (tail && !((unsigned long)tail & 0x3))
504 			tail = unwind_compat_user_frame(tail, cookie, consume_entry);
505 #endif
506 	}
507 }
508