xref: /linux-6.15/kernel/trace/trace_stack.c (revision f7edb451)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2008 Steven Rostedt <[email protected]>
4  *
5  */
6 #include <linux/sched/task_stack.h>
7 #include <linux/stacktrace.h>
8 #include <linux/kallsyms.h>
9 #include <linux/seq_file.h>
10 #include <linux/spinlock.h>
11 #include <linux/uaccess.h>
12 #include <linux/ftrace.h>
13 #include <linux/module.h>
14 #include <linux/sysctl.h>
15 #include <linux/init.h>
16 
17 #include <asm/setup.h>
18 
19 #include "trace.h"
20 
21 #define STACK_TRACE_ENTRIES 500
22 
23 static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES];
24 static unsigned stack_trace_index[STACK_TRACE_ENTRIES];
25 
26 static unsigned int stack_trace_nr_entries;
27 static unsigned long stack_trace_max_size;
28 static arch_spinlock_t stack_trace_max_lock =
29 	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
30 
31 DEFINE_PER_CPU(int, disable_stack_tracer);
32 static DEFINE_MUTEX(stack_sysctl_mutex);
33 
34 int stack_tracer_enabled;
35 
36 static void print_max_stack(void)
37 {
38 	long i;
39 	int size;
40 
41 	pr_emerg("        Depth    Size   Location    (%d entries)\n"
42 			   "        -----    ----   --------\n",
43 			   stack_trace_nr_entries);
44 
45 	for (i = 0; i < stack_trace_nr_entries; i++) {
46 		if (i + 1 == stack_trace_nr_entries)
47 			size = stack_trace_index[i];
48 		else
49 			size = stack_trace_index[i] - stack_trace_index[i+1];
50 
51 		pr_emerg("%3ld) %8d   %5d   %pS\n", i, stack_trace_index[i],
52 				size, (void *)stack_dump_trace[i]);
53 	}
54 }
55 
56 static void check_stack(unsigned long ip, unsigned long *stack)
57 {
58 	unsigned long this_size, flags; unsigned long *p, *top, *start;
59 	static int tracer_frame;
60 	int frame_size = READ_ONCE(tracer_frame);
61 	int i, x;
62 
63 	this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
64 	this_size = THREAD_SIZE - this_size;
65 	/* Remove the frame of the tracer */
66 	this_size -= frame_size;
67 
68 	if (this_size <= stack_trace_max_size)
69 		return;
70 
71 	/* we do not handle interrupt stacks yet */
72 	if (!object_is_on_stack(stack))
73 		return;
74 
75 	/* Can't do this from NMI context (can cause deadlocks) */
76 	if (in_nmi())
77 		return;
78 
79 	local_irq_save(flags);
80 	arch_spin_lock(&stack_trace_max_lock);
81 
82 	/* In case another CPU set the tracer_frame on us */
83 	if (unlikely(!frame_size))
84 		this_size -= tracer_frame;
85 
86 	/* a race could have already updated it */
87 	if (this_size <= stack_trace_max_size)
88 		goto out;
89 
90 	stack_trace_max_size = this_size;
91 
92 	stack_trace_nr_entries = stack_trace_save(stack_dump_trace,
93 					       ARRAY_SIZE(stack_dump_trace) - 1,
94 					       0);
95 
96 	/* Skip over the overhead of the stack tracer itself */
97 	for (i = 0; i < stack_trace_nr_entries; i++) {
98 		if (stack_dump_trace[i] == ip)
99 			break;
100 	}
101 
102 	/*
103 	 * Some archs may not have the passed in ip in the dump.
104 	 * If that happens, we need to show everything.
105 	 */
106 	if (i == stack_trace_nr_entries)
107 		i = 0;
108 
109 	/*
110 	 * Now find where in the stack these are.
111 	 */
112 	x = 0;
113 	start = stack;
114 	top = (unsigned long *)
115 		(((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
116 
117 	/*
118 	 * Loop through all the entries. One of the entries may
119 	 * for some reason be missed on the stack, so we may
120 	 * have to account for them. If they are all there, this
121 	 * loop will only happen once. This code only takes place
122 	 * on a new max, so it is far from a fast path.
123 	 */
124 	while (i < stack_trace_nr_entries) {
125 		int found = 0;
126 
127 		stack_trace_index[x] = this_size;
128 		p = start;
129 
130 		for (; p < top && i < stack_trace_nr_entries; p++) {
131 			/*
132 			 * The READ_ONCE_NOCHECK is used to let KASAN know that
133 			 * this is not a stack-out-of-bounds error.
134 			 */
135 			if ((READ_ONCE_NOCHECK(*p)) == stack_dump_trace[i]) {
136 				stack_dump_trace[x] = stack_dump_trace[i++];
137 				this_size = stack_trace_index[x++] =
138 					(top - p) * sizeof(unsigned long);
139 				found = 1;
140 				/* Start the search from here */
141 				start = p + 1;
142 				/*
143 				 * We do not want to show the overhead
144 				 * of the stack tracer stack in the
145 				 * max stack. If we haven't figured
146 				 * out what that is, then figure it out
147 				 * now.
148 				 */
149 				if (unlikely(!tracer_frame)) {
150 					tracer_frame = (p - stack) *
151 						sizeof(unsigned long);
152 					stack_trace_max_size -= tracer_frame;
153 				}
154 			}
155 		}
156 
157 		if (!found)
158 			i++;
159 	}
160 
161 #ifdef ARCH_FTRACE_SHIFT_STACK_TRACER
162 	/*
163 	 * Some archs will store the link register before calling
164 	 * nested functions. This means the saved return address
165 	 * comes after the local storage, and we need to shift
166 	 * for that.
167 	 */
168 	if (x > 1) {
169 		memmove(&stack_trace_index[0], &stack_trace_index[1],
170 			sizeof(stack_trace_index[0]) * (x - 1));
171 		x--;
172 	}
173 #endif
174 
175 	stack_trace_nr_entries = x;
176 
177 	if (task_stack_end_corrupted(current)) {
178 		print_max_stack();
179 		BUG();
180 	}
181 
182  out:
183 	arch_spin_unlock(&stack_trace_max_lock);
184 	local_irq_restore(flags);
185 }
186 
187 static void
188 stack_trace_call(unsigned long ip, unsigned long parent_ip,
189 		 struct ftrace_ops *op, struct pt_regs *pt_regs)
190 {
191 	unsigned long stack;
192 
193 	preempt_disable_notrace();
194 
195 	/* no atomic needed, we only modify this variable by this cpu */
196 	__this_cpu_inc(disable_stack_tracer);
197 	if (__this_cpu_read(disable_stack_tracer) != 1)
198 		goto out;
199 
200 	/* If rcu is not watching, then save stack trace can fail */
201 	if (!rcu_is_watching())
202 		goto out;
203 
204 	ip += MCOUNT_INSN_SIZE;
205 
206 	check_stack(ip, &stack);
207 
208  out:
209 	__this_cpu_dec(disable_stack_tracer);
210 	/* prevent recursion in schedule */
211 	preempt_enable_notrace();
212 }
213 
214 static struct ftrace_ops trace_ops __read_mostly =
215 {
216 	.func = stack_trace_call,
217 	.flags = FTRACE_OPS_FL_RECURSION_SAFE,
218 };
219 
220 static ssize_t
221 stack_max_size_read(struct file *filp, char __user *ubuf,
222 		    size_t count, loff_t *ppos)
223 {
224 	unsigned long *ptr = filp->private_data;
225 	char buf[64];
226 	int r;
227 
228 	r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
229 	if (r > sizeof(buf))
230 		r = sizeof(buf);
231 	return simple_read_from_buffer(ubuf, count, ppos, buf, r);
232 }
233 
234 static ssize_t
235 stack_max_size_write(struct file *filp, const char __user *ubuf,
236 		     size_t count, loff_t *ppos)
237 {
238 	long *ptr = filp->private_data;
239 	unsigned long val, flags;
240 	int ret;
241 
242 	ret = kstrtoul_from_user(ubuf, count, 10, &val);
243 	if (ret)
244 		return ret;
245 
246 	local_irq_save(flags);
247 
248 	/*
249 	 * In case we trace inside arch_spin_lock() or after (NMI),
250 	 * we will cause circular lock, so we also need to increase
251 	 * the percpu disable_stack_tracer here.
252 	 */
253 	__this_cpu_inc(disable_stack_tracer);
254 
255 	arch_spin_lock(&stack_trace_max_lock);
256 	*ptr = val;
257 	arch_spin_unlock(&stack_trace_max_lock);
258 
259 	__this_cpu_dec(disable_stack_tracer);
260 	local_irq_restore(flags);
261 
262 	return count;
263 }
264 
265 static const struct file_operations stack_max_size_fops = {
266 	.open		= tracing_open_generic,
267 	.read		= stack_max_size_read,
268 	.write		= stack_max_size_write,
269 	.llseek		= default_llseek,
270 };
271 
272 static void *
273 __next(struct seq_file *m, loff_t *pos)
274 {
275 	long n = *pos - 1;
276 
277 	if (n >= stack_trace_nr_entries)
278 		return NULL;
279 
280 	m->private = (void *)n;
281 	return &m->private;
282 }
283 
284 static void *
285 t_next(struct seq_file *m, void *v, loff_t *pos)
286 {
287 	(*pos)++;
288 	return __next(m, pos);
289 }
290 
291 static void *t_start(struct seq_file *m, loff_t *pos)
292 {
293 	local_irq_disable();
294 
295 	__this_cpu_inc(disable_stack_tracer);
296 
297 	arch_spin_lock(&stack_trace_max_lock);
298 
299 	if (*pos == 0)
300 		return SEQ_START_TOKEN;
301 
302 	return __next(m, pos);
303 }
304 
305 static void t_stop(struct seq_file *m, void *p)
306 {
307 	arch_spin_unlock(&stack_trace_max_lock);
308 
309 	__this_cpu_dec(disable_stack_tracer);
310 
311 	local_irq_enable();
312 }
313 
314 static void trace_lookup_stack(struct seq_file *m, long i)
315 {
316 	unsigned long addr = stack_dump_trace[i];
317 
318 	seq_printf(m, "%pS\n", (void *)addr);
319 }
320 
321 static void print_disabled(struct seq_file *m)
322 {
323 	seq_puts(m, "#\n"
324 		 "#  Stack tracer disabled\n"
325 		 "#\n"
326 		 "# To enable the stack tracer, either add 'stacktrace' to the\n"
327 		 "# kernel command line\n"
328 		 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
329 		 "#\n");
330 }
331 
332 static int t_show(struct seq_file *m, void *v)
333 {
334 	long i;
335 	int size;
336 
337 	if (v == SEQ_START_TOKEN) {
338 		seq_printf(m, "        Depth    Size   Location"
339 			   "    (%d entries)\n"
340 			   "        -----    ----   --------\n",
341 			   stack_trace_nr_entries);
342 
343 		if (!stack_tracer_enabled && !stack_trace_max_size)
344 			print_disabled(m);
345 
346 		return 0;
347 	}
348 
349 	i = *(long *)v;
350 
351 	if (i >= stack_trace_nr_entries)
352 		return 0;
353 
354 	if (i + 1 == stack_trace_nr_entries)
355 		size = stack_trace_index[i];
356 	else
357 		size = stack_trace_index[i] - stack_trace_index[i+1];
358 
359 	seq_printf(m, "%3ld) %8d   %5d   ", i, stack_trace_index[i], size);
360 
361 	trace_lookup_stack(m, i);
362 
363 	return 0;
364 }
365 
366 static const struct seq_operations stack_trace_seq_ops = {
367 	.start		= t_start,
368 	.next		= t_next,
369 	.stop		= t_stop,
370 	.show		= t_show,
371 };
372 
373 static int stack_trace_open(struct inode *inode, struct file *file)
374 {
375 	return seq_open(file, &stack_trace_seq_ops);
376 }
377 
378 static const struct file_operations stack_trace_fops = {
379 	.open		= stack_trace_open,
380 	.read		= seq_read,
381 	.llseek		= seq_lseek,
382 	.release	= seq_release,
383 };
384 
385 #ifdef CONFIG_DYNAMIC_FTRACE
386 
387 static int
388 stack_trace_filter_open(struct inode *inode, struct file *file)
389 {
390 	struct ftrace_ops *ops = inode->i_private;
391 
392 	return ftrace_regex_open(ops, FTRACE_ITER_FILTER,
393 				 inode, file);
394 }
395 
396 static const struct file_operations stack_trace_filter_fops = {
397 	.open = stack_trace_filter_open,
398 	.read = seq_read,
399 	.write = ftrace_filter_write,
400 	.llseek = tracing_lseek,
401 	.release = ftrace_regex_release,
402 };
403 
404 #endif /* CONFIG_DYNAMIC_FTRACE */
405 
406 int
407 stack_trace_sysctl(struct ctl_table *table, int write,
408 		   void __user *buffer, size_t *lenp,
409 		   loff_t *ppos)
410 {
411 	int was_enabled;
412 	int ret;
413 
414 	mutex_lock(&stack_sysctl_mutex);
415 	was_enabled = !!stack_tracer_enabled;
416 
417 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
418 
419 	if (ret || !write || (was_enabled == !!stack_tracer_enabled))
420 		goto out;
421 
422 	if (stack_tracer_enabled)
423 		register_ftrace_function(&trace_ops);
424 	else
425 		unregister_ftrace_function(&trace_ops);
426  out:
427 	mutex_unlock(&stack_sysctl_mutex);
428 	return ret;
429 }
430 
431 static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
432 
433 static __init int enable_stacktrace(char *str)
434 {
435 	int len;
436 
437 	if ((len = str_has_prefix(str, "_filter=")))
438 		strncpy(stack_trace_filter_buf, str + len, COMMAND_LINE_SIZE);
439 
440 	stack_tracer_enabled = 1;
441 	return 1;
442 }
443 __setup("stacktrace", enable_stacktrace);
444 
445 static __init int stack_trace_init(void)
446 {
447 	struct dentry *d_tracer;
448 
449 	d_tracer = tracing_init_dentry();
450 	if (IS_ERR(d_tracer))
451 		return 0;
452 
453 	trace_create_file("stack_max_size", 0644, d_tracer,
454 			&stack_trace_max_size, &stack_max_size_fops);
455 
456 	trace_create_file("stack_trace", 0444, d_tracer,
457 			NULL, &stack_trace_fops);
458 
459 #ifdef CONFIG_DYNAMIC_FTRACE
460 	trace_create_file("stack_trace_filter", 0644, d_tracer,
461 			  &trace_ops, &stack_trace_filter_fops);
462 #endif
463 
464 	if (stack_trace_filter_buf[0])
465 		ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
466 
467 	if (stack_tracer_enabled)
468 		register_ftrace_function(&trace_ops);
469 
470 	return 0;
471 }
472 
473 device_initcall(stack_trace_init);
474