xref: /linux-6.15/arch/powerpc/kernel/stacktrace.c (revision bd79010f)
1 /*
2  * Stack trace utility
3  *
4  * Copyright 2008 Christoph Hellwig, IBM Corp.
5  * Copyright 2018 SUSE Linux GmbH
6  *
7  *      This program is free software; you can redistribute it and/or
8  *      modify it under the terms of the GNU General Public License
9  *      as published by the Free Software Foundation; either version
10  *      2 of the License, or (at your option) any later version.
11  */
12 
13 #include <linux/export.h>
14 #include <linux/kallsyms.h>
15 #include <linux/module.h>
16 #include <linux/sched.h>
17 #include <linux/sched/debug.h>
18 #include <linux/sched/task_stack.h>
19 #include <linux/stacktrace.h>
20 #include <asm/ptrace.h>
21 #include <asm/processor.h>
22 #include <linux/ftrace.h>
23 #include <asm/kprobes.h>
24 
25 /*
26  * Save stack-backtrace addresses into a stack_trace buffer.
27  */
28 static void save_context_stack(struct stack_trace *trace, unsigned long sp,
29 			struct task_struct *tsk, int savesched)
30 {
31 	for (;;) {
32 		unsigned long *stack = (unsigned long *) sp;
33 		unsigned long newsp, ip;
34 
35 		if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
36 			return;
37 
38 		newsp = stack[0];
39 		ip = stack[STACK_FRAME_LR_SAVE];
40 
41 		if (savesched || !in_sched_functions(ip)) {
42 			if (!trace->skip)
43 				trace->entries[trace->nr_entries++] = ip;
44 			else
45 				trace->skip--;
46 		}
47 
48 		if (trace->nr_entries >= trace->max_entries)
49 			return;
50 
51 		sp = newsp;
52 	}
53 }
54 
55 void save_stack_trace(struct stack_trace *trace)
56 {
57 	unsigned long sp;
58 
59 	sp = current_stack_pointer();
60 
61 	save_context_stack(trace, sp, current, 1);
62 }
63 EXPORT_SYMBOL_GPL(save_stack_trace);
64 
65 void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
66 {
67 	unsigned long sp;
68 
69 	if (tsk == current)
70 		sp = current_stack_pointer();
71 	else
72 		sp = tsk->thread.ksp;
73 
74 	save_context_stack(trace, sp, tsk, 0);
75 }
76 EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
77 
78 void
79 save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
80 {
81 	save_context_stack(trace, regs->gpr[1], current, 0);
82 }
83 EXPORT_SYMBOL_GPL(save_stack_trace_regs);
84 
85 #ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
86 int
87 save_stack_trace_tsk_reliable(struct task_struct *tsk,
88 				struct stack_trace *trace)
89 {
90 	unsigned long sp;
91 	unsigned long stack_page = (unsigned long)task_stack_page(tsk);
92 	unsigned long stack_end;
93 	int graph_idx = 0;
94 
95 	/*
96 	 * The last frame (unwinding first) may not yet have saved
97 	 * its LR onto the stack.
98 	 */
99 	int firstframe = 1;
100 
101 	if (tsk == current)
102 		sp = current_stack_pointer();
103 	else
104 		sp = tsk->thread.ksp;
105 
106 	stack_end = stack_page + THREAD_SIZE;
107 	if (!is_idle_task(tsk)) {
108 		/*
109 		 * For user tasks, this is the SP value loaded on
110 		 * kernel entry, see "PACAKSAVE(r13)" in _switch() and
111 		 * system_call_common()/EXCEPTION_PROLOG_COMMON().
112 		 *
113 		 * Likewise for non-swapper kernel threads,
114 		 * this also happens to be the top of the stack
115 		 * as setup by copy_thread().
116 		 *
117 		 * Note that stack backlinks are not properly setup by
118 		 * copy_thread() and thus, a forked task() will have
119 		 * an unreliable stack trace until it's been
120 		 * _switch()'ed to for the first time.
121 		 */
122 		stack_end -= STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
123 	} else {
124 		/*
125 		 * idle tasks have a custom stack layout,
126 		 * c.f. cpu_idle_thread_init().
127 		 */
128 		stack_end -= STACK_FRAME_OVERHEAD;
129 	}
130 
131 	if (sp < stack_page + sizeof(struct thread_struct) ||
132 	    sp > stack_end - STACK_FRAME_MIN_SIZE) {
133 		return 1;
134 	}
135 
136 	for (;;) {
137 		unsigned long *stack = (unsigned long *) sp;
138 		unsigned long newsp, ip;
139 
140 		/* sanity check: ABI requires SP to be aligned 16 bytes. */
141 		if (sp & 0xF)
142 			return 1;
143 
144 		/* Mark stacktraces with exception frames as unreliable. */
145 		if (sp <= stack_end - STACK_INT_FRAME_SIZE &&
146 		    stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
147 			return 1;
148 		}
149 
150 		newsp = stack[0];
151 		/* Stack grows downwards; unwinder may only go up. */
152 		if (newsp <= sp)
153 			return 1;
154 
155 		if (newsp != stack_end &&
156 		    newsp > stack_end - STACK_FRAME_MIN_SIZE) {
157 			return 1; /* invalid backlink, too far up. */
158 		}
159 
160 		/* Examine the saved LR: it must point into kernel code. */
161 		ip = stack[STACK_FRAME_LR_SAVE];
162 		if (!firstframe && !__kernel_text_address(ip))
163 			return 1;
164 		firstframe = 0;
165 
166 		/*
167 		 * FIXME: IMHO these tests do not belong in
168 		 * arch-dependent code, they are generic.
169 		 */
170 		ip = ftrace_graph_ret_addr(tsk, &graph_idx, ip, NULL);
171 
172 		/*
173 		 * Mark stacktraces with kretprobed functions on them
174 		 * as unreliable.
175 		 */
176 		if (ip == (unsigned long)kretprobe_trampoline)
177 			return 1;
178 
179 		if (!trace->skip)
180 			trace->entries[trace->nr_entries++] = ip;
181 		else
182 			trace->skip--;
183 
184 		if (newsp == stack_end)
185 			break;
186 
187 		if (trace->nr_entries >= trace->max_entries)
188 			return -E2BIG;
189 
190 		sp = newsp;
191 	}
192 	return 0;
193 }
194 EXPORT_SYMBOL_GPL(save_stack_trace_tsk_reliable);
195 #endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */
196