xref: /linux-6.15/arch/arm64/include/asm/stacktrace.h (revision f3dcbe67)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012 ARM Ltd.
4  */
5 #ifndef __ASM_STACKTRACE_H
6 #define __ASM_STACKTRACE_H
7 
8 #include <linux/percpu.h>
9 #include <linux/sched.h>
10 #include <linux/sched/task_stack.h>
11 
12 #include <asm/memory.h>
13 #include <asm/ptrace.h>
14 #include <asm/sdei.h>
15 
16 struct stackframe {
17 	unsigned long fp;
18 	unsigned long pc;
19 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
20 	int graph;
21 #endif
22 };
23 
24 enum stack_type {
25 	STACK_TYPE_UNKNOWN,
26 	STACK_TYPE_TASK,
27 	STACK_TYPE_IRQ,
28 	STACK_TYPE_OVERFLOW,
29 	STACK_TYPE_SDEI_NORMAL,
30 	STACK_TYPE_SDEI_CRITICAL,
31 };
32 
33 struct stack_info {
34 	unsigned long low;
35 	unsigned long high;
36 	enum stack_type type;
37 };
38 
39 extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame);
40 extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
41 			    int (*fn)(struct stackframe *, void *), void *data);
42 extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk);
43 
44 DECLARE_PER_CPU(unsigned long *, irq_stack_ptr);
45 
46 static inline bool on_irq_stack(unsigned long sp,
47 				struct stack_info *info)
48 {
49 	unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr);
50 	unsigned long high = low + IRQ_STACK_SIZE;
51 
52 	if (!low)
53 		return false;
54 
55 	if (sp < low || sp >= high)
56 		return false;
57 
58 	if (info) {
59 		info->low = low;
60 		info->high = high;
61 		info->type = STACK_TYPE_IRQ;
62 	}
63 
64 	return true;
65 }
66 
67 static inline bool on_task_stack(const struct task_struct *tsk,
68 				 unsigned long sp,
69 				 struct stack_info *info)
70 {
71 	unsigned long low = (unsigned long)task_stack_page(tsk);
72 	unsigned long high = low + THREAD_SIZE;
73 
74 	if (sp < low || sp >= high)
75 		return false;
76 
77 	if (info) {
78 		info->low = low;
79 		info->high = high;
80 		info->type = STACK_TYPE_TASK;
81 	}
82 
83 	return true;
84 }
85 
86 #ifdef CONFIG_VMAP_STACK
87 DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
88 
89 static inline bool on_overflow_stack(unsigned long sp,
90 				struct stack_info *info)
91 {
92 	unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack);
93 	unsigned long high = low + OVERFLOW_STACK_SIZE;
94 
95 	if (sp < low || sp >= high)
96 		return false;
97 
98 	if (info) {
99 		info->low = low;
100 		info->high = high;
101 		info->type = STACK_TYPE_OVERFLOW;
102 	}
103 
104 	return true;
105 }
106 #else
107 static inline bool on_overflow_stack(unsigned long sp,
108 			struct stack_info *info) { return false; }
109 #endif
110 
111 
112 /*
113  * We can only safely access per-cpu stacks from current in a non-preemptible
114  * context.
115  */
116 static inline bool on_accessible_stack(const struct task_struct *tsk,
117 				       unsigned long sp,
118 				       struct stack_info *info)
119 {
120 	if (on_task_stack(tsk, sp, info))
121 		return true;
122 	if (tsk != current || preemptible())
123 		return false;
124 	if (on_irq_stack(sp, info))
125 		return true;
126 	if (on_overflow_stack(sp, info))
127 		return true;
128 	if (on_sdei_stack(sp, info))
129 		return true;
130 
131 	return false;
132 }
133 
134 static inline void start_backtrace(struct stackframe *frame,
135 				   unsigned long fp, unsigned long pc)
136 {
137 	frame->fp = fp;
138 	frame->pc = pc;
139 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
140 	frame->graph = 0;
141 #endif
142 }
143 
144 #endif	/* __ASM_STACKTRACE_H */
145