xref: /linux-6.15/arch/arm64/kernel/stacktrace.c (revision bdf8eafb)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Stack tracing support
4  *
5  * Copyright (C) 2012 ARM Ltd.
6  */
7 #include <linux/kernel.h>
8 #include <linux/efi.h>
9 #include <linux/export.h>
10 #include <linux/filter.h>
11 #include <linux/ftrace.h>
12 #include <linux/kprobes.h>
13 #include <linux/sched.h>
14 #include <linux/sched/debug.h>
15 #include <linux/sched/task_stack.h>
16 #include <linux/stacktrace.h>
17 
18 #include <asm/efi.h>
19 #include <asm/irq.h>
20 #include <asm/stack_pointer.h>
21 #include <asm/stacktrace.h>
22 
23 enum kunwind_source {
24 	KUNWIND_SOURCE_UNKNOWN,
25 	KUNWIND_SOURCE_FRAME,
26 	KUNWIND_SOURCE_CALLER,
27 	KUNWIND_SOURCE_TASK,
28 	KUNWIND_SOURCE_REGS_PC,
29 };
30 
31 /*
32  * Kernel unwind state
33  *
34  * @common:      Common unwind state.
35  * @task:        The task being unwound.
36  * @graph_idx:   Used by ftrace_graph_ret_addr() for optimized stack unwinding.
37  * @kr_cur:      When KRETPROBES is selected, holds the kretprobe instance
38  *               associated with the most recently encountered replacement lr
39  *               value.
40  */
41 struct kunwind_state {
42 	struct unwind_state common;
43 	struct task_struct *task;
44 	int graph_idx;
45 #ifdef CONFIG_KRETPROBES
46 	struct llist_node *kr_cur;
47 #endif
48 	enum kunwind_source source;
49 };
50 
51 static __always_inline void
52 kunwind_init(struct kunwind_state *state,
53 	     struct task_struct *task)
54 {
55 	unwind_init_common(&state->common);
56 	state->task = task;
57 	state->source = KUNWIND_SOURCE_UNKNOWN;
58 }
59 
60 /*
61  * Start an unwind from a pt_regs.
62  *
63  * The unwind will begin at the PC within the regs.
64  *
65  * The regs must be on a stack currently owned by the calling task.
66  */
67 static __always_inline void
68 kunwind_init_from_regs(struct kunwind_state *state,
69 		       struct pt_regs *regs)
70 {
71 	kunwind_init(state, current);
72 
73 	state->common.fp = regs->regs[29];
74 	state->common.pc = regs->pc;
75 	state->source = KUNWIND_SOURCE_REGS_PC;
76 }
77 
78 /*
79  * Start an unwind from a caller.
80  *
81  * The unwind will begin at the caller of whichever function this is inlined
82  * into.
83  *
84  * The function which invokes this must be noinline.
85  */
86 static __always_inline void
87 kunwind_init_from_caller(struct kunwind_state *state)
88 {
89 	kunwind_init(state, current);
90 
91 	state->common.fp = (unsigned long)__builtin_frame_address(1);
92 	state->common.pc = (unsigned long)__builtin_return_address(0);
93 	state->source = KUNWIND_SOURCE_CALLER;
94 }
95 
96 /*
97  * Start an unwind from a blocked task.
98  *
99  * The unwind will begin at the blocked tasks saved PC (i.e. the caller of
100  * cpu_switch_to()).
101  *
102  * The caller should ensure the task is blocked in cpu_switch_to() for the
103  * duration of the unwind, or the unwind will be bogus. It is never valid to
104  * call this for the current task.
105  */
106 static __always_inline void
107 kunwind_init_from_task(struct kunwind_state *state,
108 		       struct task_struct *task)
109 {
110 	kunwind_init(state, task);
111 
112 	state->common.fp = thread_saved_fp(task);
113 	state->common.pc = thread_saved_pc(task);
114 	state->source = KUNWIND_SOURCE_TASK;
115 }
116 
117 static __always_inline int
118 kunwind_recover_return_address(struct kunwind_state *state)
119 {
120 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
121 	if (state->task->ret_stack &&
122 	    (state->common.pc == (unsigned long)return_to_handler)) {
123 		unsigned long orig_pc;
124 		orig_pc = ftrace_graph_ret_addr(state->task, &state->graph_idx,
125 						state->common.pc,
126 						(void *)state->common.fp);
127 		if (WARN_ON_ONCE(state->common.pc == orig_pc))
128 			return -EINVAL;
129 		state->common.pc = orig_pc;
130 	}
131 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
132 
133 #ifdef CONFIG_KRETPROBES
134 	if (is_kretprobe_trampoline(state->common.pc)) {
135 		unsigned long orig_pc;
136 		orig_pc = kretprobe_find_ret_addr(state->task,
137 						  (void *)state->common.fp,
138 						  &state->kr_cur);
139 		state->common.pc = orig_pc;
140 	}
141 #endif /* CONFIG_KRETPROBES */
142 
143 	return 0;
144 }
145 
146 /*
147  * Unwind from one frame record (A) to the next frame record (B).
148  *
149  * We terminate early if the location of B indicates a malformed chain of frame
150  * records (e.g. a cycle), determined based on the location and fp value of A
151  * and the location (but not the fp value) of B.
152  */
153 static __always_inline int
154 kunwind_next(struct kunwind_state *state)
155 {
156 	struct task_struct *tsk = state->task;
157 	unsigned long fp = state->common.fp;
158 	int err;
159 
160 	/* Final frame; nothing to unwind */
161 	if (fp == (unsigned long)&task_pt_regs(tsk)->stackframe)
162 		return -ENOENT;
163 
164 	switch (state->source) {
165 	case KUNWIND_SOURCE_FRAME:
166 	case KUNWIND_SOURCE_CALLER:
167 	case KUNWIND_SOURCE_TASK:
168 	case KUNWIND_SOURCE_REGS_PC:
169 		err = unwind_next_frame_record(&state->common);
170 		if (err)
171 			return err;
172 		state->source = KUNWIND_SOURCE_FRAME;
173 		break;
174 	default:
175 		return -EINVAL;
176 	}
177 
178 	state->common.pc = ptrauth_strip_kernel_insn_pac(state->common.pc);
179 
180 	return kunwind_recover_return_address(state);
181 }
182 
183 typedef bool (*kunwind_consume_fn)(const struct kunwind_state *state, void *cookie);
184 
185 static __always_inline void
186 do_kunwind(struct kunwind_state *state, kunwind_consume_fn consume_state,
187 	   void *cookie)
188 {
189 	if (kunwind_recover_return_address(state))
190 		return;
191 
192 	while (1) {
193 		int ret;
194 
195 		if (!consume_state(state, cookie))
196 			break;
197 		ret = kunwind_next(state);
198 		if (ret < 0)
199 			break;
200 	}
201 }
202 
203 /*
204  * Per-cpu stacks are only accessible when unwinding the current task in a
205  * non-preemptible context.
206  */
207 #define STACKINFO_CPU(name)					\
208 	({							\
209 		((task == current) && !preemptible())		\
210 			? stackinfo_get_##name()		\
211 			: stackinfo_get_unknown();		\
212 	})
213 
214 /*
215  * SDEI stacks are only accessible when unwinding the current task in an NMI
216  * context.
217  */
218 #define STACKINFO_SDEI(name)					\
219 	({							\
220 		((task == current) && in_nmi())			\
221 			? stackinfo_get_sdei_##name()		\
222 			: stackinfo_get_unknown();		\
223 	})
224 
225 #define STACKINFO_EFI						\
226 	({							\
227 		((task == current) && current_in_efi())		\
228 			? stackinfo_get_efi()			\
229 			: stackinfo_get_unknown();		\
230 	})
231 
232 static __always_inline void
233 kunwind_stack_walk(kunwind_consume_fn consume_state,
234 		   void *cookie, struct task_struct *task,
235 		   struct pt_regs *regs)
236 {
237 	struct stack_info stacks[] = {
238 		stackinfo_get_task(task),
239 		STACKINFO_CPU(irq),
240 #if defined(CONFIG_VMAP_STACK)
241 		STACKINFO_CPU(overflow),
242 #endif
243 #if defined(CONFIG_VMAP_STACK) && defined(CONFIG_ARM_SDE_INTERFACE)
244 		STACKINFO_SDEI(normal),
245 		STACKINFO_SDEI(critical),
246 #endif
247 #ifdef CONFIG_EFI
248 		STACKINFO_EFI,
249 #endif
250 	};
251 	struct kunwind_state state = {
252 		.common = {
253 			.stacks = stacks,
254 			.nr_stacks = ARRAY_SIZE(stacks),
255 		},
256 	};
257 
258 	if (regs) {
259 		if (task != current)
260 			return;
261 		kunwind_init_from_regs(&state, regs);
262 	} else if (task == current) {
263 		kunwind_init_from_caller(&state);
264 	} else {
265 		kunwind_init_from_task(&state, task);
266 	}
267 
268 	do_kunwind(&state, consume_state, cookie);
269 }
270 
271 struct kunwind_consume_entry_data {
272 	stack_trace_consume_fn consume_entry;
273 	void *cookie;
274 };
275 
276 static __always_inline bool
277 arch_kunwind_consume_entry(const struct kunwind_state *state, void *cookie)
278 {
279 	struct kunwind_consume_entry_data *data = cookie;
280 	return data->consume_entry(data->cookie, state->common.pc);
281 }
282 
283 noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
284 			      void *cookie, struct task_struct *task,
285 			      struct pt_regs *regs)
286 {
287 	struct kunwind_consume_entry_data data = {
288 		.consume_entry = consume_entry,
289 		.cookie = cookie,
290 	};
291 
292 	kunwind_stack_walk(arch_kunwind_consume_entry, &data, task, regs);
293 }
294 
295 struct bpf_unwind_consume_entry_data {
296 	bool (*consume_entry)(void *cookie, u64 ip, u64 sp, u64 fp);
297 	void *cookie;
298 };
299 
300 static bool
301 arch_bpf_unwind_consume_entry(const struct kunwind_state *state, void *cookie)
302 {
303 	struct bpf_unwind_consume_entry_data *data = cookie;
304 
305 	return data->consume_entry(data->cookie, state->common.pc, 0,
306 				   state->common.fp);
307 }
308 
309 noinline noinstr void arch_bpf_stack_walk(bool (*consume_entry)(void *cookie, u64 ip, u64 sp,
310 								u64 fp), void *cookie)
311 {
312 	struct bpf_unwind_consume_entry_data data = {
313 		.consume_entry = consume_entry,
314 		.cookie = cookie,
315 	};
316 
317 	kunwind_stack_walk(arch_bpf_unwind_consume_entry, &data, current, NULL);
318 }
319 
320 static const char *state_source_string(const struct kunwind_state *state)
321 {
322 	switch (state->source) {
323 	case KUNWIND_SOURCE_FRAME:	return NULL;
324 	case KUNWIND_SOURCE_CALLER:	return "C";
325 	case KUNWIND_SOURCE_TASK:	return "T";
326 	case KUNWIND_SOURCE_REGS_PC:	return "P";
327 	default:			return "U";
328 	}
329 }
330 
331 static bool dump_backtrace_entry(const struct kunwind_state *state, void *arg)
332 {
333 	const char *source = state_source_string(state);
334 	char *loglvl = arg;
335 	printk("%s %pSb%s%s%s\n", loglvl,
336 		(void *)state->common.pc,
337 		source ? " (" : "",
338 		source ? source : "",
339 		source ? ")" : "");
340 	return true;
341 }
342 
343 void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
344 		    const char *loglvl)
345 {
346 	pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
347 
348 	if (regs && user_mode(regs))
349 		return;
350 
351 	if (!tsk)
352 		tsk = current;
353 
354 	if (!try_get_task_stack(tsk))
355 		return;
356 
357 	printk("%sCall trace:\n", loglvl);
358 	kunwind_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs);
359 
360 	put_task_stack(tsk);
361 }
362 
363 void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
364 {
365 	dump_backtrace(NULL, tsk, loglvl);
366 	barrier();
367 }
368 
369 /*
370  * The struct defined for userspace stack frame in AARCH64 mode.
371  */
372 struct frame_tail {
373 	struct frame_tail	__user *fp;
374 	unsigned long		lr;
375 } __attribute__((packed));
376 
377 /*
378  * Get the return address for a single stackframe and return a pointer to the
379  * next frame tail.
380  */
381 static struct frame_tail __user *
382 unwind_user_frame(struct frame_tail __user *tail, void *cookie,
383 	       stack_trace_consume_fn consume_entry)
384 {
385 	struct frame_tail buftail;
386 	unsigned long err;
387 	unsigned long lr;
388 
389 	/* Also check accessibility of one struct frame_tail beyond */
390 	if (!access_ok(tail, sizeof(buftail)))
391 		return NULL;
392 
393 	pagefault_disable();
394 	err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
395 	pagefault_enable();
396 
397 	if (err)
398 		return NULL;
399 
400 	lr = ptrauth_strip_user_insn_pac(buftail.lr);
401 
402 	if (!consume_entry(cookie, lr))
403 		return NULL;
404 
405 	/*
406 	 * Frame pointers should strictly progress back up the stack
407 	 * (towards higher addresses).
408 	 */
409 	if (tail >= buftail.fp)
410 		return NULL;
411 
412 	return buftail.fp;
413 }
414 
415 #ifdef CONFIG_COMPAT
416 /*
417  * The registers we're interested in are at the end of the variable
418  * length saved register structure. The fp points at the end of this
419  * structure so the address of this struct is:
420  * (struct compat_frame_tail *)(xxx->fp)-1
421  *
422  * This code has been adapted from the ARM OProfile support.
423  */
424 struct compat_frame_tail {
425 	compat_uptr_t	fp; /* a (struct compat_frame_tail *) in compat mode */
426 	u32		sp;
427 	u32		lr;
428 } __attribute__((packed));
429 
430 static struct compat_frame_tail __user *
431 unwind_compat_user_frame(struct compat_frame_tail __user *tail, void *cookie,
432 				stack_trace_consume_fn consume_entry)
433 {
434 	struct compat_frame_tail buftail;
435 	unsigned long err;
436 
437 	/* Also check accessibility of one struct frame_tail beyond */
438 	if (!access_ok(tail, sizeof(buftail)))
439 		return NULL;
440 
441 	pagefault_disable();
442 	err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
443 	pagefault_enable();
444 
445 	if (err)
446 		return NULL;
447 
448 	if (!consume_entry(cookie, buftail.lr))
449 		return NULL;
450 
451 	/*
452 	 * Frame pointers should strictly progress back up the stack
453 	 * (towards higher addresses).
454 	 */
455 	if (tail + 1 >= (struct compat_frame_tail __user *)
456 			compat_ptr(buftail.fp))
457 		return NULL;
458 
459 	return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
460 }
461 #endif /* CONFIG_COMPAT */
462 
463 
464 void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
465 					const struct pt_regs *regs)
466 {
467 	if (!consume_entry(cookie, regs->pc))
468 		return;
469 
470 	if (!compat_user_mode(regs)) {
471 		/* AARCH64 mode */
472 		struct frame_tail __user *tail;
473 
474 		tail = (struct frame_tail __user *)regs->regs[29];
475 		while (tail && !((unsigned long)tail & 0x7))
476 			tail = unwind_user_frame(tail, cookie, consume_entry);
477 	} else {
478 #ifdef CONFIG_COMPAT
479 		/* AARCH32 compat mode */
480 		struct compat_frame_tail __user *tail;
481 
482 		tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
483 		while (tail && !((unsigned long)tail & 0x3))
484 			tail = unwind_compat_user_frame(tail, cookie, consume_entry);
485 #endif
486 	}
487 }
488