xref: /linux-6.15/arch/arm64/kernel/stacktrace.c (revision 65ac33be)
1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
260ffc30dSCatalin Marinas /*
360ffc30dSCatalin Marinas  * Stack tracing support
460ffc30dSCatalin Marinas  *
560ffc30dSCatalin Marinas  * Copyright (C) 2012 ARM Ltd.
660ffc30dSCatalin Marinas  */
760ffc30dSCatalin Marinas #include <linux/kernel.h>
87ea55715SArd Biesheuvel #include <linux/efi.h>
960ffc30dSCatalin Marinas #include <linux/export.h>
10e74cb1b4SPuranjay Mohan #include <linux/filter.h>
1120380bb3SAKASHI Takahiro #include <linux/ftrace.h>
121beef60eSMark Rutland #include <linux/kprobes.h>
1360ffc30dSCatalin Marinas #include <linux/sched.h>
14b17b0153SIngo Molnar #include <linux/sched/debug.h>
1568db0cf1SIngo Molnar #include <linux/sched/task_stack.h>
1660ffc30dSCatalin Marinas #include <linux/stacktrace.h>
1760ffc30dSCatalin Marinas 
187ea55715SArd Biesheuvel #include <asm/efi.h>
19132cd887SAKASHI Takahiro #include <asm/irq.h>
20a9ea0017SMark Rutland #include <asm/stack_pointer.h>
2160ffc30dSCatalin Marinas #include <asm/stacktrace.h>
2260ffc30dSCatalin Marinas 
23bdf8eafbSMark Rutland enum kunwind_source {
24bdf8eafbSMark Rutland 	KUNWIND_SOURCE_UNKNOWN,
25bdf8eafbSMark Rutland 	KUNWIND_SOURCE_FRAME,
26bdf8eafbSMark Rutland 	KUNWIND_SOURCE_CALLER,
27bdf8eafbSMark Rutland 	KUNWIND_SOURCE_TASK,
28bdf8eafbSMark Rutland 	KUNWIND_SOURCE_REGS_PC,
29bdf8eafbSMark Rutland };
30bdf8eafbSMark Rutland 
318094df1cSMark Rutland union unwind_flags {
328094df1cSMark Rutland 	unsigned long	all;
338094df1cSMark Rutland 	struct {
348094df1cSMark Rutland 		unsigned long	fgraph : 1,
358094df1cSMark Rutland 				kretprobe : 1;
368094df1cSMark Rutland 	};
378094df1cSMark Rutland };
388094df1cSMark Rutland 
3996bb1530SMark Rutland /*
401beef60eSMark Rutland  * Kernel unwind state
411beef60eSMark Rutland  *
421beef60eSMark Rutland  * @common:      Common unwind state.
431beef60eSMark Rutland  * @task:        The task being unwound.
44c060f932SPuranjay Mohan  * @graph_idx:   Used by ftrace_graph_ret_addr() for optimized stack unwinding.
451beef60eSMark Rutland  * @kr_cur:      When KRETPROBES is selected, holds the kretprobe instance
461beef60eSMark Rutland  *               associated with the most recently encountered replacement lr
471beef60eSMark Rutland  *               value.
481beef60eSMark Rutland  */
491beef60eSMark Rutland struct kunwind_state {
501beef60eSMark Rutland 	struct unwind_state common;
511beef60eSMark Rutland 	struct task_struct *task;
52c060f932SPuranjay Mohan 	int graph_idx;
531beef60eSMark Rutland #ifdef CONFIG_KRETPROBES
541beef60eSMark Rutland 	struct llist_node *kr_cur;
551beef60eSMark Rutland #endif
56bdf8eafbSMark Rutland 	enum kunwind_source source;
578094df1cSMark Rutland 	union unwind_flags flags;
58c2c6b27bSMark Rutland 	struct pt_regs *regs;
591beef60eSMark Rutland };
601beef60eSMark Rutland 
611beef60eSMark Rutland static __always_inline void
kunwind_init(struct kunwind_state * state,struct task_struct * task)621beef60eSMark Rutland kunwind_init(struct kunwind_state *state,
631beef60eSMark Rutland 	     struct task_struct *task)
641beef60eSMark Rutland {
651beef60eSMark Rutland 	unwind_init_common(&state->common);
661beef60eSMark Rutland 	state->task = task;
67bdf8eafbSMark Rutland 	state->source = KUNWIND_SOURCE_UNKNOWN;
688094df1cSMark Rutland 	state->flags.all = 0;
69c2c6b27bSMark Rutland 	state->regs = NULL;
701beef60eSMark Rutland }
711beef60eSMark Rutland 
721beef60eSMark Rutland /*
73a019d8a2SMadhavan T. Venkataraman  * Start an unwind from a pt_regs.
74a019d8a2SMadhavan T. Venkataraman  *
75a019d8a2SMadhavan T. Venkataraman  * The unwind will begin at the PC within the regs.
76a019d8a2SMadhavan T. Venkataraman  *
77a019d8a2SMadhavan T. Venkataraman  * The regs must be on a stack currently owned by the calling task.
78a019d8a2SMadhavan T. Venkataraman  */
79b5ecc19eSMark Rutland static __always_inline void
kunwind_init_from_regs(struct kunwind_state * state,struct pt_regs * regs)801beef60eSMark Rutland kunwind_init_from_regs(struct kunwind_state *state,
81a019d8a2SMadhavan T. Venkataraman 		       struct pt_regs *regs)
82a019d8a2SMadhavan T. Venkataraman {
831beef60eSMark Rutland 	kunwind_init(state, current);
84a019d8a2SMadhavan T. Venkataraman 
85c2c6b27bSMark Rutland 	state->regs = regs;
861beef60eSMark Rutland 	state->common.fp = regs->regs[29];
871beef60eSMark Rutland 	state->common.pc = regs->pc;
88bdf8eafbSMark Rutland 	state->source = KUNWIND_SOURCE_REGS_PC;
89a019d8a2SMadhavan T. Venkataraman }
90a019d8a2SMadhavan T. Venkataraman 
91a019d8a2SMadhavan T. Venkataraman /*
92a019d8a2SMadhavan T. Venkataraman  * Start an unwind from a caller.
93a019d8a2SMadhavan T. Venkataraman  *
94a019d8a2SMadhavan T. Venkataraman  * The unwind will begin at the caller of whichever function this is inlined
95a019d8a2SMadhavan T. Venkataraman  * into.
96a019d8a2SMadhavan T. Venkataraman  *
97a019d8a2SMadhavan T. Venkataraman  * The function which invokes this must be noinline.
98a019d8a2SMadhavan T. Venkataraman  */
99b5ecc19eSMark Rutland static __always_inline void
kunwind_init_from_caller(struct kunwind_state * state)1001beef60eSMark Rutland kunwind_init_from_caller(struct kunwind_state *state)
101a019d8a2SMadhavan T. Venkataraman {
1021beef60eSMark Rutland 	kunwind_init(state, current);
103a019d8a2SMadhavan T. Venkataraman 
1041beef60eSMark Rutland 	state->common.fp = (unsigned long)__builtin_frame_address(1);
1051beef60eSMark Rutland 	state->common.pc = (unsigned long)__builtin_return_address(0);
106bdf8eafbSMark Rutland 	state->source = KUNWIND_SOURCE_CALLER;
107a019d8a2SMadhavan T. Venkataraman }
108a019d8a2SMadhavan T. Venkataraman 
109a019d8a2SMadhavan T. Venkataraman /*
110a019d8a2SMadhavan T. Venkataraman  * Start an unwind from a blocked task.
111a019d8a2SMadhavan T. Venkataraman  *
112a019d8a2SMadhavan T. Venkataraman  * The unwind will begin at the blocked tasks saved PC (i.e. the caller of
113a019d8a2SMadhavan T. Venkataraman  * cpu_switch_to()).
114a019d8a2SMadhavan T. Venkataraman  *
115a019d8a2SMadhavan T. Venkataraman  * The caller should ensure the task is blocked in cpu_switch_to() for the
116a019d8a2SMadhavan T. Venkataraman  * duration of the unwind, or the unwind will be bogus. It is never valid to
117a019d8a2SMadhavan T. Venkataraman  * call this for the current task.
118a019d8a2SMadhavan T. Venkataraman  */
119b5ecc19eSMark Rutland static __always_inline void
kunwind_init_from_task(struct kunwind_state * state,struct task_struct * task)1201beef60eSMark Rutland kunwind_init_from_task(struct kunwind_state *state,
121a019d8a2SMadhavan T. Venkataraman 		       struct task_struct *task)
122a019d8a2SMadhavan T. Venkataraman {
1231beef60eSMark Rutland 	kunwind_init(state, task);
124a019d8a2SMadhavan T. Venkataraman 
1251beef60eSMark Rutland 	state->common.fp = thread_saved_fp(task);
1261beef60eSMark Rutland 	state->common.pc = thread_saved_pc(task);
127bdf8eafbSMark Rutland 	state->source = KUNWIND_SOURCE_TASK;
128a019d8a2SMadhavan T. Venkataraman }
129b07f3499SMark Brown 
1309e09d445SMark Rutland static __always_inline int
kunwind_recover_return_address(struct kunwind_state * state)1311beef60eSMark Rutland kunwind_recover_return_address(struct kunwind_state *state)
1329e09d445SMark Rutland {
1339e09d445SMark Rutland #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1349e09d445SMark Rutland 	if (state->task->ret_stack &&
1351beef60eSMark Rutland 	    (state->common.pc == (unsigned long)return_to_handler)) {
1369e09d445SMark Rutland 		unsigned long orig_pc;
137c060f932SPuranjay Mohan 		orig_pc = ftrace_graph_ret_addr(state->task, &state->graph_idx,
1381beef60eSMark Rutland 						state->common.pc,
1391beef60eSMark Rutland 						(void *)state->common.fp);
140*65ac33beSMark Rutland 		if (state->common.pc == orig_pc) {
141*65ac33beSMark Rutland 			WARN_ON_ONCE(state->task == current);
1429e09d445SMark Rutland 			return -EINVAL;
143*65ac33beSMark Rutland 		}
1441beef60eSMark Rutland 		state->common.pc = orig_pc;
1458094df1cSMark Rutland 		state->flags.fgraph = 1;
1469e09d445SMark Rutland 	}
1479e09d445SMark Rutland #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1489e09d445SMark Rutland 
1499e09d445SMark Rutland #ifdef CONFIG_KRETPROBES
1501beef60eSMark Rutland 	if (is_kretprobe_trampoline(state->common.pc)) {
1511beef60eSMark Rutland 		unsigned long orig_pc;
1521beef60eSMark Rutland 		orig_pc = kretprobe_find_ret_addr(state->task,
1531beef60eSMark Rutland 						  (void *)state->common.fp,
1549e09d445SMark Rutland 						  &state->kr_cur);
1551beef60eSMark Rutland 		state->common.pc = orig_pc;
1568094df1cSMark Rutland 		state->flags.kretprobe = 1;
1579e09d445SMark Rutland 	}
1589e09d445SMark Rutland #endif /* CONFIG_KRETPROBES */
1599e09d445SMark Rutland 
1609e09d445SMark Rutland 	return 0;
1619e09d445SMark Rutland }
1629e09d445SMark Rutland 
163c2c6b27bSMark Rutland static __always_inline
kunwind_next_regs_pc(struct kunwind_state * state)164c2c6b27bSMark Rutland int kunwind_next_regs_pc(struct kunwind_state *state)
165c2c6b27bSMark Rutland {
166c2c6b27bSMark Rutland 	struct stack_info *info;
167c2c6b27bSMark Rutland 	unsigned long fp = state->common.fp;
168c2c6b27bSMark Rutland 	struct pt_regs *regs;
169c2c6b27bSMark Rutland 
170c2c6b27bSMark Rutland 	regs = container_of((u64 *)fp, struct pt_regs, stackframe.record.fp);
171c2c6b27bSMark Rutland 
172c2c6b27bSMark Rutland 	info = unwind_find_stack(&state->common, (unsigned long)regs, sizeof(*regs));
173c2c6b27bSMark Rutland 	if (!info)
174c2c6b27bSMark Rutland 		return -EINVAL;
175c2c6b27bSMark Rutland 
176c2c6b27bSMark Rutland 	unwind_consume_stack(&state->common, info, (unsigned long)regs,
177c2c6b27bSMark Rutland 			     sizeof(*regs));
178c2c6b27bSMark Rutland 
179c2c6b27bSMark Rutland 	state->regs = regs;
180c2c6b27bSMark Rutland 	state->common.pc = regs->pc;
181c2c6b27bSMark Rutland 	state->common.fp = regs->regs[29];
182c2c6b27bSMark Rutland 	state->regs = NULL;
18332ed1205SMark Rutland 	state->source = KUNWIND_SOURCE_REGS_PC;
184c2c6b27bSMark Rutland 	return 0;
185c2c6b27bSMark Rutland }
186c2c6b27bSMark Rutland 
187c2c6b27bSMark Rutland static __always_inline int
kunwind_next_frame_record_meta(struct kunwind_state * state)188c2c6b27bSMark Rutland kunwind_next_frame_record_meta(struct kunwind_state *state)
189c2c6b27bSMark Rutland {
190c2c6b27bSMark Rutland 	struct task_struct *tsk = state->task;
191c2c6b27bSMark Rutland 	unsigned long fp = state->common.fp;
192c2c6b27bSMark Rutland 	struct frame_record_meta *meta;
193c2c6b27bSMark Rutland 	struct stack_info *info;
194c2c6b27bSMark Rutland 
195c2c6b27bSMark Rutland 	info = unwind_find_stack(&state->common, fp, sizeof(*meta));
196c2c6b27bSMark Rutland 	if (!info)
197c2c6b27bSMark Rutland 		return -EINVAL;
198c2c6b27bSMark Rutland 
199c2c6b27bSMark Rutland 	meta = (struct frame_record_meta *)fp;
200c2c6b27bSMark Rutland 	switch (READ_ONCE(meta->type)) {
201c2c6b27bSMark Rutland 	case FRAME_META_TYPE_FINAL:
202c2c6b27bSMark Rutland 		if (meta == &task_pt_regs(tsk)->stackframe)
203c2c6b27bSMark Rutland 			return -ENOENT;
204*65ac33beSMark Rutland 		WARN_ON_ONCE(tsk == current);
205c2c6b27bSMark Rutland 		return -EINVAL;
206c2c6b27bSMark Rutland 	case FRAME_META_TYPE_PT_REGS:
207c2c6b27bSMark Rutland 		return kunwind_next_regs_pc(state);
208c2c6b27bSMark Rutland 	default:
209*65ac33beSMark Rutland 		WARN_ON_ONCE(tsk == current);
210c2c6b27bSMark Rutland 		return -EINVAL;
211c2c6b27bSMark Rutland 	}
212c2c6b27bSMark Rutland }
213c2c6b27bSMark Rutland 
214c2c6b27bSMark Rutland static __always_inline int
kunwind_next_frame_record(struct kunwind_state * state)215c2c6b27bSMark Rutland kunwind_next_frame_record(struct kunwind_state *state)
216c2c6b27bSMark Rutland {
217c2c6b27bSMark Rutland 	unsigned long fp = state->common.fp;
218c2c6b27bSMark Rutland 	struct frame_record *record;
219c2c6b27bSMark Rutland 	struct stack_info *info;
220c2c6b27bSMark Rutland 	unsigned long new_fp, new_pc;
221c2c6b27bSMark Rutland 
222c2c6b27bSMark Rutland 	if (fp & 0x7)
223c2c6b27bSMark Rutland 		return -EINVAL;
224c2c6b27bSMark Rutland 
225c2c6b27bSMark Rutland 	info = unwind_find_stack(&state->common, fp, sizeof(*record));
226c2c6b27bSMark Rutland 	if (!info)
227c2c6b27bSMark Rutland 		return -EINVAL;
228c2c6b27bSMark Rutland 
229c2c6b27bSMark Rutland 	record = (struct frame_record *)fp;
230c2c6b27bSMark Rutland 	new_fp = READ_ONCE(record->fp);
231c2c6b27bSMark Rutland 	new_pc = READ_ONCE(record->lr);
232c2c6b27bSMark Rutland 
233c2c6b27bSMark Rutland 	if (!new_fp && !new_pc)
234c2c6b27bSMark Rutland 		return kunwind_next_frame_record_meta(state);
235c2c6b27bSMark Rutland 
236c2c6b27bSMark Rutland 	unwind_consume_stack(&state->common, info, fp, sizeof(*record));
237c2c6b27bSMark Rutland 
238c2c6b27bSMark Rutland 	state->common.fp = new_fp;
239c2c6b27bSMark Rutland 	state->common.pc = new_pc;
240c2c6b27bSMark Rutland 	state->source = KUNWIND_SOURCE_FRAME;
241c2c6b27bSMark Rutland 
242c2c6b27bSMark Rutland 	return 0;
243c2c6b27bSMark Rutland }
244c2c6b27bSMark Rutland 
2454e00532fSMarc Zyngier /*
2464e00532fSMarc Zyngier  * Unwind from one frame record (A) to the next frame record (B).
2474e00532fSMarc Zyngier  *
2484e00532fSMarc Zyngier  * We terminate early if the location of B indicates a malformed chain of frame
2494e00532fSMarc Zyngier  * records (e.g. a cycle), determined based on the location and fp value of A
2504e00532fSMarc Zyngier  * and the location (but not the fp value) of B.
2514e00532fSMarc Zyngier  */
252b5ecc19eSMark Rutland static __always_inline int
kunwind_next(struct kunwind_state * state)2531beef60eSMark Rutland kunwind_next(struct kunwind_state *state)
2544e00532fSMarc Zyngier {
2554e00532fSMarc Zyngier 	int err;
2564e00532fSMarc Zyngier 
2578094df1cSMark Rutland 	state->flags.all = 0;
2588094df1cSMark Rutland 
259bdf8eafbSMark Rutland 	switch (state->source) {
260bdf8eafbSMark Rutland 	case KUNWIND_SOURCE_FRAME:
261bdf8eafbSMark Rutland 	case KUNWIND_SOURCE_CALLER:
262bdf8eafbSMark Rutland 	case KUNWIND_SOURCE_TASK:
263bdf8eafbSMark Rutland 	case KUNWIND_SOURCE_REGS_PC:
26432ed1205SMark Rutland 		err = kunwind_next_frame_record(state);
265bdf8eafbSMark Rutland 		break;
266bdf8eafbSMark Rutland 	default:
267c2c6b27bSMark Rutland 		err = -EINVAL;
268bdf8eafbSMark Rutland 	}
2694e00532fSMarc Zyngier 
270c2c6b27bSMark Rutland 	if (err)
271c2c6b27bSMark Rutland 		return err;
272c2c6b27bSMark Rutland 
2731beef60eSMark Rutland 	state->common.pc = ptrauth_strip_kernel_insn_pac(state->common.pc);
2744e00532fSMarc Zyngier 
2751beef60eSMark Rutland 	return kunwind_recover_return_address(state);
2764e00532fSMarc Zyngier }
2774e00532fSMarc Zyngier 
2781aba06e7SMark Rutland typedef bool (*kunwind_consume_fn)(const struct kunwind_state *state, void *cookie);
2791aba06e7SMark Rutland 
280b5ecc19eSMark Rutland static __always_inline void
do_kunwind(struct kunwind_state * state,kunwind_consume_fn consume_state,void * cookie)2811aba06e7SMark Rutland do_kunwind(struct kunwind_state *state, kunwind_consume_fn consume_state,
282b5ecc19eSMark Rutland 	   void *cookie)
2834e00532fSMarc Zyngier {
2841beef60eSMark Rutland 	if (kunwind_recover_return_address(state))
2859e09d445SMark Rutland 		return;
2869e09d445SMark Rutland 
2874e00532fSMarc Zyngier 	while (1) {
2884e00532fSMarc Zyngier 		int ret;
2894e00532fSMarc Zyngier 
2901aba06e7SMark Rutland 		if (!consume_state(state, cookie))
2914e00532fSMarc Zyngier 			break;
2921beef60eSMark Rutland 		ret = kunwind_next(state);
2934e00532fSMarc Zyngier 		if (ret < 0)
2944e00532fSMarc Zyngier 			break;
2954e00532fSMarc Zyngier 	}
2964e00532fSMarc Zyngier }
2974e00532fSMarc Zyngier 
2988df13730SMark Rutland /*
2998df13730SMark Rutland  * Per-cpu stacks are only accessible when unwinding the current task in a
3008df13730SMark Rutland  * non-preemptible context.
3018df13730SMark Rutland  */
3028df13730SMark Rutland #define STACKINFO_CPU(name)					\
3038df13730SMark Rutland 	({							\
3048df13730SMark Rutland 		((task == current) && !preemptible())		\
3058df13730SMark Rutland 			? stackinfo_get_##name()		\
3068df13730SMark Rutland 			: stackinfo_get_unknown();		\
3078df13730SMark Rutland 	})
3088df13730SMark Rutland 
3098df13730SMark Rutland /*
3108df13730SMark Rutland  * SDEI stacks are only accessible when unwinding the current task in an NMI
3118df13730SMark Rutland  * context.
3128df13730SMark Rutland  */
3138df13730SMark Rutland #define STACKINFO_SDEI(name)					\
3148df13730SMark Rutland 	({							\
3158df13730SMark Rutland 		((task == current) && in_nmi())			\
3168df13730SMark Rutland 			? stackinfo_get_sdei_##name()		\
3178df13730SMark Rutland 			: stackinfo_get_unknown();		\
3188df13730SMark Rutland 	})
3198df13730SMark Rutland 
3207ea55715SArd Biesheuvel #define STACKINFO_EFI						\
3217ea55715SArd Biesheuvel 	({							\
3227ea55715SArd Biesheuvel 		((task == current) && current_in_efi())		\
3237ea55715SArd Biesheuvel 			? stackinfo_get_efi()			\
3247ea55715SArd Biesheuvel 			: stackinfo_get_unknown();		\
3257ea55715SArd Biesheuvel 	})
3267ea55715SArd Biesheuvel 
3271aba06e7SMark Rutland static __always_inline void
kunwind_stack_walk(kunwind_consume_fn consume_state,void * cookie,struct task_struct * task,struct pt_regs * regs)3281aba06e7SMark Rutland kunwind_stack_walk(kunwind_consume_fn consume_state,
329c607ab4fSMark Rutland 		   void *cookie, struct task_struct *task,
330c607ab4fSMark Rutland 		   struct pt_regs *regs)
33160ffc30dSCatalin Marinas {
3328df13730SMark Rutland 	struct stack_info stacks[] = {
3338df13730SMark Rutland 		stackinfo_get_task(task),
3348df13730SMark Rutland 		STACKINFO_CPU(irq),
3358df13730SMark Rutland #if defined(CONFIG_VMAP_STACK)
3368df13730SMark Rutland 		STACKINFO_CPU(overflow),
3378df13730SMark Rutland #endif
3388df13730SMark Rutland #if defined(CONFIG_VMAP_STACK) && defined(CONFIG_ARM_SDE_INTERFACE)
3398df13730SMark Rutland 		STACKINFO_SDEI(normal),
3408df13730SMark Rutland 		STACKINFO_SDEI(critical),
3418df13730SMark Rutland #endif
3427ea55715SArd Biesheuvel #ifdef CONFIG_EFI
3437ea55715SArd Biesheuvel 		STACKINFO_EFI,
3447ea55715SArd Biesheuvel #endif
3458df13730SMark Rutland 	};
3461beef60eSMark Rutland 	struct kunwind_state state = {
3471beef60eSMark Rutland 		.common = {
3488df13730SMark Rutland 			.stacks = stacks,
3498df13730SMark Rutland 			.nr_stacks = ARRAY_SIZE(stacks),
3501beef60eSMark Rutland 		},
3518df13730SMark Rutland 	};
35298ab10e9SPratyush Anand 
35382a592c1SMadhavan T. Venkataraman 	if (regs) {
35482a592c1SMadhavan T. Venkataraman 		if (task != current)
35582a592c1SMadhavan T. Venkataraman 			return;
3561beef60eSMark Rutland 		kunwind_init_from_regs(&state, regs);
35782a592c1SMadhavan T. Venkataraman 	} else if (task == current) {
3581beef60eSMark Rutland 		kunwind_init_from_caller(&state);
35982a592c1SMadhavan T. Venkataraman 	} else {
3601beef60eSMark Rutland 		kunwind_init_from_task(&state, task);
36182a592c1SMadhavan T. Venkataraman 	}
3625fc57df2SMark Brown 
3631aba06e7SMark Rutland 	do_kunwind(&state, consume_state, cookie);
3641aba06e7SMark Rutland }
3651aba06e7SMark Rutland 
3661aba06e7SMark Rutland struct kunwind_consume_entry_data {
3671aba06e7SMark Rutland 	stack_trace_consume_fn consume_entry;
3681aba06e7SMark Rutland 	void *cookie;
3691aba06e7SMark Rutland };
3701aba06e7SMark Rutland 
3712c79bd34SPuranjay Mohan static __always_inline bool
arch_kunwind_consume_entry(const struct kunwind_state * state,void * cookie)3721aba06e7SMark Rutland arch_kunwind_consume_entry(const struct kunwind_state *state, void *cookie)
3731aba06e7SMark Rutland {
3741aba06e7SMark Rutland 	struct kunwind_consume_entry_data *data = cookie;
3751aba06e7SMark Rutland 	return data->consume_entry(data->cookie, state->common.pc);
3761aba06e7SMark Rutland }
3771aba06e7SMark Rutland 
arch_stack_walk(stack_trace_consume_fn consume_entry,void * cookie,struct task_struct * task,struct pt_regs * regs)3781aba06e7SMark Rutland noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
3791aba06e7SMark Rutland 			      void *cookie, struct task_struct *task,
3801aba06e7SMark Rutland 			      struct pt_regs *regs)
3811aba06e7SMark Rutland {
3821aba06e7SMark Rutland 	struct kunwind_consume_entry_data data = {
3831aba06e7SMark Rutland 		.consume_entry = consume_entry,
3841aba06e7SMark Rutland 		.cookie = cookie,
3851aba06e7SMark Rutland 	};
3861aba06e7SMark Rutland 
3871aba06e7SMark Rutland 	kunwind_stack_walk(arch_kunwind_consume_entry, &data, task, regs);
38860ffc30dSCatalin Marinas }
389ead6122cSMark Rutland 
390e74cb1b4SPuranjay Mohan struct bpf_unwind_consume_entry_data {
391e74cb1b4SPuranjay Mohan 	bool (*consume_entry)(void *cookie, u64 ip, u64 sp, u64 fp);
392e74cb1b4SPuranjay Mohan 	void *cookie;
393e74cb1b4SPuranjay Mohan };
394e74cb1b4SPuranjay Mohan 
395e74cb1b4SPuranjay Mohan static bool
arch_bpf_unwind_consume_entry(const struct kunwind_state * state,void * cookie)396e74cb1b4SPuranjay Mohan arch_bpf_unwind_consume_entry(const struct kunwind_state *state, void *cookie)
397e74cb1b4SPuranjay Mohan {
398e74cb1b4SPuranjay Mohan 	struct bpf_unwind_consume_entry_data *data = cookie;
399e74cb1b4SPuranjay Mohan 
400e74cb1b4SPuranjay Mohan 	return data->consume_entry(data->cookie, state->common.pc, 0,
401e74cb1b4SPuranjay Mohan 				   state->common.fp);
402e74cb1b4SPuranjay Mohan }
403e74cb1b4SPuranjay Mohan 
arch_bpf_stack_walk(bool (* consume_entry)(void * cookie,u64 ip,u64 sp,u64 fp),void * cookie)404e74cb1b4SPuranjay Mohan noinline noinstr void arch_bpf_stack_walk(bool (*consume_entry)(void *cookie, u64 ip, u64 sp,
405e74cb1b4SPuranjay Mohan 								u64 fp), void *cookie)
406e74cb1b4SPuranjay Mohan {
407e74cb1b4SPuranjay Mohan 	struct bpf_unwind_consume_entry_data data = {
408e74cb1b4SPuranjay Mohan 		.consume_entry = consume_entry,
409e74cb1b4SPuranjay Mohan 		.cookie = cookie,
410e74cb1b4SPuranjay Mohan 	};
411e74cb1b4SPuranjay Mohan 
412e74cb1b4SPuranjay Mohan 	kunwind_stack_walk(arch_bpf_unwind_consume_entry, &data, current, NULL);
413e74cb1b4SPuranjay Mohan }
414e74cb1b4SPuranjay Mohan 
state_source_string(const struct kunwind_state * state)415bdf8eafbSMark Rutland static const char *state_source_string(const struct kunwind_state *state)
416bdf8eafbSMark Rutland {
417bdf8eafbSMark Rutland 	switch (state->source) {
418bdf8eafbSMark Rutland 	case KUNWIND_SOURCE_FRAME:	return NULL;
419bdf8eafbSMark Rutland 	case KUNWIND_SOURCE_CALLER:	return "C";
420bdf8eafbSMark Rutland 	case KUNWIND_SOURCE_TASK:	return "T";
421bdf8eafbSMark Rutland 	case KUNWIND_SOURCE_REGS_PC:	return "P";
422bdf8eafbSMark Rutland 	default:			return "U";
423bdf8eafbSMark Rutland 	}
424bdf8eafbSMark Rutland }
425bdf8eafbSMark Rutland 
dump_backtrace_entry(const struct kunwind_state * state,void * arg)426b7794795SMark Rutland static bool dump_backtrace_entry(const struct kunwind_state *state, void *arg)
427ead6122cSMark Rutland {
428bdf8eafbSMark Rutland 	const char *source = state_source_string(state);
4298094df1cSMark Rutland 	union unwind_flags flags = state->flags;
4308094df1cSMark Rutland 	bool has_info = source || flags.all;
431ead6122cSMark Rutland 	char *loglvl = arg;
4328094df1cSMark Rutland 
4338094df1cSMark Rutland 	printk("%s %pSb%s%s%s%s%s\n", loglvl,
434bdf8eafbSMark Rutland 		(void *)state->common.pc,
4358094df1cSMark Rutland 		has_info ? " (" : "",
436bdf8eafbSMark Rutland 		source ? source : "",
4378094df1cSMark Rutland 		flags.fgraph ? "F" : "",
4388094df1cSMark Rutland 		flags.kretprobe ? "K" : "",
4398094df1cSMark Rutland 		has_info ? ")" : "");
4408094df1cSMark Rutland 
441ead6122cSMark Rutland 	return true;
442ead6122cSMark Rutland }
443ead6122cSMark Rutland 
dump_backtrace(struct pt_regs * regs,struct task_struct * tsk,const char * loglvl)444ead6122cSMark Rutland void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
445ead6122cSMark Rutland 		    const char *loglvl)
446ead6122cSMark Rutland {
447ead6122cSMark Rutland 	pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
448ead6122cSMark Rutland 
449ead6122cSMark Rutland 	if (regs && user_mode(regs))
450ead6122cSMark Rutland 		return;
451ead6122cSMark Rutland 
452ead6122cSMark Rutland 	if (!tsk)
453ead6122cSMark Rutland 		tsk = current;
454ead6122cSMark Rutland 
455ead6122cSMark Rutland 	if (!try_get_task_stack(tsk))
456ead6122cSMark Rutland 		return;
457ead6122cSMark Rutland 
458ead6122cSMark Rutland 	printk("%sCall trace:\n", loglvl);
459b7794795SMark Rutland 	kunwind_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs);
460ead6122cSMark Rutland 
461ead6122cSMark Rutland 	put_task_stack(tsk);
462ead6122cSMark Rutland }
463ead6122cSMark Rutland 
show_stack(struct task_struct * tsk,unsigned long * sp,const char * loglvl)464ead6122cSMark Rutland void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
465ead6122cSMark Rutland {
466ead6122cSMark Rutland 	dump_backtrace(NULL, tsk, loglvl);
467ead6122cSMark Rutland 	barrier();
468ead6122cSMark Rutland }
469410e471fSchenqiwu 
470410e471fSchenqiwu /*
471410e471fSchenqiwu  * The struct defined for userspace stack frame in AARCH64 mode.
472410e471fSchenqiwu  */
473410e471fSchenqiwu struct frame_tail {
474410e471fSchenqiwu 	struct frame_tail	__user *fp;
475410e471fSchenqiwu 	unsigned long		lr;
476410e471fSchenqiwu } __attribute__((packed));
477410e471fSchenqiwu 
478410e471fSchenqiwu /*
479410e471fSchenqiwu  * Get the return address for a single stackframe and return a pointer to the
480410e471fSchenqiwu  * next frame tail.
481410e471fSchenqiwu  */
482410e471fSchenqiwu static struct frame_tail __user *
unwind_user_frame(struct frame_tail __user * tail,void * cookie,stack_trace_consume_fn consume_entry)483410e471fSchenqiwu unwind_user_frame(struct frame_tail __user *tail, void *cookie,
484410e471fSchenqiwu 	       stack_trace_consume_fn consume_entry)
485410e471fSchenqiwu {
486410e471fSchenqiwu 	struct frame_tail buftail;
487410e471fSchenqiwu 	unsigned long err;
488410e471fSchenqiwu 	unsigned long lr;
489410e471fSchenqiwu 
490410e471fSchenqiwu 	/* Also check accessibility of one struct frame_tail beyond */
491410e471fSchenqiwu 	if (!access_ok(tail, sizeof(buftail)))
492410e471fSchenqiwu 		return NULL;
493410e471fSchenqiwu 
494410e471fSchenqiwu 	pagefault_disable();
495410e471fSchenqiwu 	err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
496410e471fSchenqiwu 	pagefault_enable();
497410e471fSchenqiwu 
498410e471fSchenqiwu 	if (err)
499410e471fSchenqiwu 		return NULL;
500410e471fSchenqiwu 
501410e471fSchenqiwu 	lr = ptrauth_strip_user_insn_pac(buftail.lr);
502410e471fSchenqiwu 
503410e471fSchenqiwu 	if (!consume_entry(cookie, lr))
504410e471fSchenqiwu 		return NULL;
505410e471fSchenqiwu 
506410e471fSchenqiwu 	/*
507410e471fSchenqiwu 	 * Frame pointers should strictly progress back up the stack
508410e471fSchenqiwu 	 * (towards higher addresses).
509410e471fSchenqiwu 	 */
510410e471fSchenqiwu 	if (tail >= buftail.fp)
511410e471fSchenqiwu 		return NULL;
512410e471fSchenqiwu 
513410e471fSchenqiwu 	return buftail.fp;
514410e471fSchenqiwu }
515410e471fSchenqiwu 
516410e471fSchenqiwu #ifdef CONFIG_COMPAT
517410e471fSchenqiwu /*
518410e471fSchenqiwu  * The registers we're interested in are at the end of the variable
519410e471fSchenqiwu  * length saved register structure. The fp points at the end of this
520410e471fSchenqiwu  * structure so the address of this struct is:
521410e471fSchenqiwu  * (struct compat_frame_tail *)(xxx->fp)-1
522410e471fSchenqiwu  *
523410e471fSchenqiwu  * This code has been adapted from the ARM OProfile support.
524410e471fSchenqiwu  */
525410e471fSchenqiwu struct compat_frame_tail {
526410e471fSchenqiwu 	compat_uptr_t	fp; /* a (struct compat_frame_tail *) in compat mode */
527410e471fSchenqiwu 	u32		sp;
528410e471fSchenqiwu 	u32		lr;
529410e471fSchenqiwu } __attribute__((packed));
530410e471fSchenqiwu 
531410e471fSchenqiwu static struct compat_frame_tail __user *
unwind_compat_user_frame(struct compat_frame_tail __user * tail,void * cookie,stack_trace_consume_fn consume_entry)532410e471fSchenqiwu unwind_compat_user_frame(struct compat_frame_tail __user *tail, void *cookie,
533410e471fSchenqiwu 				stack_trace_consume_fn consume_entry)
534410e471fSchenqiwu {
535410e471fSchenqiwu 	struct compat_frame_tail buftail;
536410e471fSchenqiwu 	unsigned long err;
537410e471fSchenqiwu 
538410e471fSchenqiwu 	/* Also check accessibility of one struct frame_tail beyond */
539410e471fSchenqiwu 	if (!access_ok(tail, sizeof(buftail)))
540410e471fSchenqiwu 		return NULL;
541410e471fSchenqiwu 
542410e471fSchenqiwu 	pagefault_disable();
543410e471fSchenqiwu 	err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
544410e471fSchenqiwu 	pagefault_enable();
545410e471fSchenqiwu 
546410e471fSchenqiwu 	if (err)
547410e471fSchenqiwu 		return NULL;
548410e471fSchenqiwu 
549410e471fSchenqiwu 	if (!consume_entry(cookie, buftail.lr))
550410e471fSchenqiwu 		return NULL;
551410e471fSchenqiwu 
552410e471fSchenqiwu 	/*
553410e471fSchenqiwu 	 * Frame pointers should strictly progress back up the stack
554410e471fSchenqiwu 	 * (towards higher addresses).
555410e471fSchenqiwu 	 */
556410e471fSchenqiwu 	if (tail + 1 >= (struct compat_frame_tail __user *)
557410e471fSchenqiwu 			compat_ptr(buftail.fp))
558410e471fSchenqiwu 		return NULL;
559410e471fSchenqiwu 
560410e471fSchenqiwu 	return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
561410e471fSchenqiwu }
562410e471fSchenqiwu #endif /* CONFIG_COMPAT */
563410e471fSchenqiwu 
564410e471fSchenqiwu 
arch_stack_walk_user(stack_trace_consume_fn consume_entry,void * cookie,const struct pt_regs * regs)565410e471fSchenqiwu void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
566410e471fSchenqiwu 					const struct pt_regs *regs)
567410e471fSchenqiwu {
568410e471fSchenqiwu 	if (!consume_entry(cookie, regs->pc))
569410e471fSchenqiwu 		return;
570410e471fSchenqiwu 
571410e471fSchenqiwu 	if (!compat_user_mode(regs)) {
572410e471fSchenqiwu 		/* AARCH64 mode */
573410e471fSchenqiwu 		struct frame_tail __user *tail;
574410e471fSchenqiwu 
575410e471fSchenqiwu 		tail = (struct frame_tail __user *)regs->regs[29];
576410e471fSchenqiwu 		while (tail && !((unsigned long)tail & 0x7))
577410e471fSchenqiwu 			tail = unwind_user_frame(tail, cookie, consume_entry);
578410e471fSchenqiwu 	} else {
579410e471fSchenqiwu #ifdef CONFIG_COMPAT
580410e471fSchenqiwu 		/* AARCH32 compat mode */
581410e471fSchenqiwu 		struct compat_frame_tail __user *tail;
582410e471fSchenqiwu 
583410e471fSchenqiwu 		tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
584410e471fSchenqiwu 		while (tail && !((unsigned long)tail & 0x3))
585410e471fSchenqiwu 			tail = unwind_compat_user_frame(tail, cookie, consume_entry);
586410e471fSchenqiwu #endif
587410e471fSchenqiwu 	}
588410e471fSchenqiwu }
589