xref: /linux-6.15/arch/x86/kernel/stacktrace.c (revision 1959a601)
1 /*
2  * Stack trace management functions
3  *
4  *  Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <[email protected]>
5  */
6 #include <linux/sched.h>
7 #include <linux/stacktrace.h>
8 #include <linux/export.h>
9 #include <linux/uaccess.h>
10 #include <asm/stacktrace.h>
11 
12 static int save_stack_stack(void *data, const char *name)
13 {
14 	return 0;
15 }
16 
17 static int
18 __save_stack_address(void *data, unsigned long addr, bool reliable, bool nosched)
19 {
20 	struct stack_trace *trace = data;
21 #ifdef CONFIG_FRAME_POINTER
22 	if (!reliable)
23 		return 0;
24 #endif
25 	if (nosched && in_sched_functions(addr))
26 		return 0;
27 	if (trace->skip > 0) {
28 		trace->skip--;
29 		return 0;
30 	}
31 	if (trace->nr_entries < trace->max_entries) {
32 		trace->entries[trace->nr_entries++] = addr;
33 		return 0;
34 	} else {
35 		return -1; /* no more room, stop walking the stack */
36 	}
37 }
38 
39 static int save_stack_address(void *data, unsigned long addr, int reliable)
40 {
41 	return __save_stack_address(data, addr, reliable, false);
42 }
43 
44 static int
45 save_stack_address_nosched(void *data, unsigned long addr, int reliable)
46 {
47 	return __save_stack_address(data, addr, reliable, true);
48 }
49 
50 static const struct stacktrace_ops save_stack_ops = {
51 	.stack		= save_stack_stack,
52 	.address	= save_stack_address,
53 	.walk_stack	= print_context_stack,
54 };
55 
56 static const struct stacktrace_ops save_stack_ops_nosched = {
57 	.stack		= save_stack_stack,
58 	.address	= save_stack_address_nosched,
59 	.walk_stack	= print_context_stack,
60 };
61 
62 /*
63  * Save stack-backtrace addresses into a stack_trace buffer.
64  */
65 void save_stack_trace(struct stack_trace *trace)
66 {
67 	dump_trace(current, NULL, NULL, 0, &save_stack_ops, trace);
68 	if (trace->nr_entries < trace->max_entries)
69 		trace->entries[trace->nr_entries++] = ULONG_MAX;
70 }
71 EXPORT_SYMBOL_GPL(save_stack_trace);
72 
73 void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
74 {
75 	dump_trace(current, regs, NULL, 0, &save_stack_ops, trace);
76 	if (trace->nr_entries < trace->max_entries)
77 		trace->entries[trace->nr_entries++] = ULONG_MAX;
78 }
79 
80 void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
81 {
82 	if (!try_get_task_stack(tsk))
83 		return;
84 
85 	dump_trace(tsk, NULL, NULL, 0, &save_stack_ops_nosched, trace);
86 	if (trace->nr_entries < trace->max_entries)
87 		trace->entries[trace->nr_entries++] = ULONG_MAX;
88 
89 	put_task_stack(tsk);
90 }
91 EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
92 
93 /* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
94 
95 struct stack_frame_user {
96 	const void __user	*next_fp;
97 	unsigned long		ret_addr;
98 };
99 
100 static int
101 copy_stack_frame(const void __user *fp, struct stack_frame_user *frame)
102 {
103 	int ret;
104 
105 	if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
106 		return 0;
107 
108 	ret = 1;
109 	pagefault_disable();
110 	if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
111 		ret = 0;
112 	pagefault_enable();
113 
114 	return ret;
115 }
116 
117 static inline void __save_stack_trace_user(struct stack_trace *trace)
118 {
119 	const struct pt_regs *regs = task_pt_regs(current);
120 	const void __user *fp = (const void __user *)regs->bp;
121 
122 	if (trace->nr_entries < trace->max_entries)
123 		trace->entries[trace->nr_entries++] = regs->ip;
124 
125 	while (trace->nr_entries < trace->max_entries) {
126 		struct stack_frame_user frame;
127 
128 		frame.next_fp = NULL;
129 		frame.ret_addr = 0;
130 		if (!copy_stack_frame(fp, &frame))
131 			break;
132 		if ((unsigned long)fp < regs->sp)
133 			break;
134 		if (frame.ret_addr) {
135 			trace->entries[trace->nr_entries++] =
136 				frame.ret_addr;
137 		}
138 		if (fp == frame.next_fp)
139 			break;
140 		fp = frame.next_fp;
141 	}
142 }
143 
144 void save_stack_trace_user(struct stack_trace *trace)
145 {
146 	/*
147 	 * Trace user stack if we are not a kernel thread
148 	 */
149 	if (current->mm) {
150 		__save_stack_trace_user(trace);
151 	}
152 	if (trace->nr_entries < trace->max_entries)
153 		trace->entries[trace->nr_entries++] = ULONG_MAX;
154 }
155 
156