xref: /linux-6.15/arch/arm64/kernel/stacktrace.c (revision 1beef60e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Stack tracing support
4  *
5  * Copyright (C) 2012 ARM Ltd.
6  */
7 #include <linux/kernel.h>
8 #include <linux/efi.h>
9 #include <linux/export.h>
10 #include <linux/ftrace.h>
11 #include <linux/kprobes.h>
12 #include <linux/sched.h>
13 #include <linux/sched/debug.h>
14 #include <linux/sched/task_stack.h>
15 #include <linux/stacktrace.h>
16 
17 #include <asm/efi.h>
18 #include <asm/irq.h>
19 #include <asm/stack_pointer.h>
20 #include <asm/stacktrace.h>
21 
22 /*
23  * Kernel unwind state
24  *
25  * @common:      Common unwind state.
26  * @task:        The task being unwound.
27  * @kr_cur:      When KRETPROBES is selected, holds the kretprobe instance
28  *               associated with the most recently encountered replacement lr
29  *               value.
30  */
31 struct kunwind_state {
32 	struct unwind_state common;
33 	struct task_struct *task;
34 #ifdef CONFIG_KRETPROBES
35 	struct llist_node *kr_cur;
36 #endif
37 };
38 
39 static __always_inline void
40 kunwind_init(struct kunwind_state *state,
41 	     struct task_struct *task)
42 {
43 	unwind_init_common(&state->common);
44 	state->task = task;
45 }
46 
47 /*
48  * Start an unwind from a pt_regs.
49  *
50  * The unwind will begin at the PC within the regs.
51  *
52  * The regs must be on a stack currently owned by the calling task.
53  */
54 static __always_inline void
55 kunwind_init_from_regs(struct kunwind_state *state,
56 		       struct pt_regs *regs)
57 {
58 	kunwind_init(state, current);
59 
60 	state->common.fp = regs->regs[29];
61 	state->common.pc = regs->pc;
62 }
63 
64 /*
65  * Start an unwind from a caller.
66  *
67  * The unwind will begin at the caller of whichever function this is inlined
68  * into.
69  *
70  * The function which invokes this must be noinline.
71  */
72 static __always_inline void
73 kunwind_init_from_caller(struct kunwind_state *state)
74 {
75 	kunwind_init(state, current);
76 
77 	state->common.fp = (unsigned long)__builtin_frame_address(1);
78 	state->common.pc = (unsigned long)__builtin_return_address(0);
79 }
80 
81 /*
82  * Start an unwind from a blocked task.
83  *
84  * The unwind will begin at the blocked tasks saved PC (i.e. the caller of
85  * cpu_switch_to()).
86  *
87  * The caller should ensure the task is blocked in cpu_switch_to() for the
88  * duration of the unwind, or the unwind will be bogus. It is never valid to
89  * call this for the current task.
90  */
91 static __always_inline void
92 kunwind_init_from_task(struct kunwind_state *state,
93 		       struct task_struct *task)
94 {
95 	kunwind_init(state, task);
96 
97 	state->common.fp = thread_saved_fp(task);
98 	state->common.pc = thread_saved_pc(task);
99 }
100 
101 static __always_inline int
102 kunwind_recover_return_address(struct kunwind_state *state)
103 {
104 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
105 	if (state->task->ret_stack &&
106 	    (state->common.pc == (unsigned long)return_to_handler)) {
107 		unsigned long orig_pc;
108 		orig_pc = ftrace_graph_ret_addr(state->task, NULL,
109 						state->common.pc,
110 						(void *)state->common.fp);
111 		if (WARN_ON_ONCE(state->common.pc == orig_pc))
112 			return -EINVAL;
113 		state->common.pc = orig_pc;
114 	}
115 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
116 
117 #ifdef CONFIG_KRETPROBES
118 	if (is_kretprobe_trampoline(state->common.pc)) {
119 		unsigned long orig_pc;
120 		orig_pc = kretprobe_find_ret_addr(state->task,
121 						  (void *)state->common.fp,
122 						  &state->kr_cur);
123 		state->common.pc = orig_pc;
124 	}
125 #endif /* CONFIG_KRETPROBES */
126 
127 	return 0;
128 }
129 
130 /*
131  * Unwind from one frame record (A) to the next frame record (B).
132  *
133  * We terminate early if the location of B indicates a malformed chain of frame
134  * records (e.g. a cycle), determined based on the location and fp value of A
135  * and the location (but not the fp value) of B.
136  */
137 static __always_inline int
138 kunwind_next(struct kunwind_state *state)
139 {
140 	struct task_struct *tsk = state->task;
141 	unsigned long fp = state->common.fp;
142 	int err;
143 
144 	/* Final frame; nothing to unwind */
145 	if (fp == (unsigned long)task_pt_regs(tsk)->stackframe)
146 		return -ENOENT;
147 
148 	err = unwind_next_frame_record(&state->common);
149 	if (err)
150 		return err;
151 
152 	state->common.pc = ptrauth_strip_kernel_insn_pac(state->common.pc);
153 
154 	return kunwind_recover_return_address(state);
155 }
156 
157 static __always_inline void
158 do_kunwind(struct kunwind_state *state, stack_trace_consume_fn consume_entry,
159 	   void *cookie)
160 {
161 	if (kunwind_recover_return_address(state))
162 		return;
163 
164 	while (1) {
165 		int ret;
166 
167 		if (!consume_entry(cookie, state->common.pc))
168 			break;
169 		ret = kunwind_next(state);
170 		if (ret < 0)
171 			break;
172 	}
173 }
174 
175 /*
176  * Per-cpu stacks are only accessible when unwinding the current task in a
177  * non-preemptible context.
178  */
179 #define STACKINFO_CPU(name)					\
180 	({							\
181 		((task == current) && !preemptible())		\
182 			? stackinfo_get_##name()		\
183 			: stackinfo_get_unknown();		\
184 	})
185 
186 /*
187  * SDEI stacks are only accessible when unwinding the current task in an NMI
188  * context.
189  */
190 #define STACKINFO_SDEI(name)					\
191 	({							\
192 		((task == current) && in_nmi())			\
193 			? stackinfo_get_sdei_##name()		\
194 			: stackinfo_get_unknown();		\
195 	})
196 
197 #define STACKINFO_EFI						\
198 	({							\
199 		((task == current) && current_in_efi())		\
200 			? stackinfo_get_efi()			\
201 			: stackinfo_get_unknown();		\
202 	})
203 
204 noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
205 			      void *cookie, struct task_struct *task,
206 			      struct pt_regs *regs)
207 {
208 	struct stack_info stacks[] = {
209 		stackinfo_get_task(task),
210 		STACKINFO_CPU(irq),
211 #if defined(CONFIG_VMAP_STACK)
212 		STACKINFO_CPU(overflow),
213 #endif
214 #if defined(CONFIG_VMAP_STACK) && defined(CONFIG_ARM_SDE_INTERFACE)
215 		STACKINFO_SDEI(normal),
216 		STACKINFO_SDEI(critical),
217 #endif
218 #ifdef CONFIG_EFI
219 		STACKINFO_EFI,
220 #endif
221 	};
222 	struct kunwind_state state = {
223 		.common = {
224 			.stacks = stacks,
225 			.nr_stacks = ARRAY_SIZE(stacks),
226 		},
227 	};
228 
229 	if (regs) {
230 		if (task != current)
231 			return;
232 		kunwind_init_from_regs(&state, regs);
233 	} else if (task == current) {
234 		kunwind_init_from_caller(&state);
235 	} else {
236 		kunwind_init_from_task(&state, task);
237 	}
238 
239 	do_kunwind(&state, consume_entry, cookie);
240 }
241 
242 static bool dump_backtrace_entry(void *arg, unsigned long where)
243 {
244 	char *loglvl = arg;
245 	printk("%s %pSb\n", loglvl, (void *)where);
246 	return true;
247 }
248 
249 void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
250 		    const char *loglvl)
251 {
252 	pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
253 
254 	if (regs && user_mode(regs))
255 		return;
256 
257 	if (!tsk)
258 		tsk = current;
259 
260 	if (!try_get_task_stack(tsk))
261 		return;
262 
263 	printk("%sCall trace:\n", loglvl);
264 	arch_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs);
265 
266 	put_task_stack(tsk);
267 }
268 
269 void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
270 {
271 	dump_backtrace(NULL, tsk, loglvl);
272 	barrier();
273 }
274