xref: /linux-6.15/kernel/trace/trace_stack.c (revision 3d9a8072)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2008 Steven Rostedt <[email protected]>
4  *
5  */
6 #include <linux/sched/task_stack.h>
7 #include <linux/stacktrace.h>
8 #include <linux/kallsyms.h>
9 #include <linux/seq_file.h>
10 #include <linux/spinlock.h>
11 #include <linux/uaccess.h>
12 #include <linux/ftrace.h>
13 #include <linux/module.h>
14 #include <linux/sysctl.h>
15 #include <linux/init.h>
16 
17 #include <asm/setup.h>
18 
19 #include "trace.h"
20 
21 #define STACK_TRACE_ENTRIES 500
22 
23 static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES];
24 static unsigned stack_trace_index[STACK_TRACE_ENTRIES];
25 
26 struct stack_trace stack_trace_max = {
27 	.max_entries		= STACK_TRACE_ENTRIES,
28 	.entries		= &stack_dump_trace[0],
29 };
30 
31 static unsigned long stack_trace_max_size;
32 static arch_spinlock_t stack_trace_max_lock =
33 	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
34 
35 DEFINE_PER_CPU(int, disable_stack_tracer);
36 static DEFINE_MUTEX(stack_sysctl_mutex);
37 
38 int stack_tracer_enabled;
39 
40 static void print_max_stack(void)
41 {
42 	long i;
43 	int size;
44 
45 	pr_emerg("        Depth    Size   Location    (%d entries)\n"
46 			   "        -----    ----   --------\n",
47 			   stack_trace_max.nr_entries);
48 
49 	for (i = 0; i < stack_trace_max.nr_entries; i++) {
50 		if (i + 1 == stack_trace_max.nr_entries)
51 			size = stack_trace_index[i];
52 		else
53 			size = stack_trace_index[i] - stack_trace_index[i+1];
54 
55 		pr_emerg("%3ld) %8d   %5d   %pS\n", i, stack_trace_index[i],
56 				size, (void *)stack_dump_trace[i]);
57 	}
58 }
59 
60 static void check_stack(unsigned long ip, unsigned long *stack)
61 {
62 	unsigned long this_size, flags; unsigned long *p, *top, *start;
63 	static int tracer_frame;
64 	int frame_size = READ_ONCE(tracer_frame);
65 	int i, x;
66 
67 	this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
68 	this_size = THREAD_SIZE - this_size;
69 	/* Remove the frame of the tracer */
70 	this_size -= frame_size;
71 
72 	if (this_size <= stack_trace_max_size)
73 		return;
74 
75 	/* we do not handle interrupt stacks yet */
76 	if (!object_is_on_stack(stack))
77 		return;
78 
79 	/* Can't do this from NMI context (can cause deadlocks) */
80 	if (in_nmi())
81 		return;
82 
83 	local_irq_save(flags);
84 	arch_spin_lock(&stack_trace_max_lock);
85 
86 	/* In case another CPU set the tracer_frame on us */
87 	if (unlikely(!frame_size))
88 		this_size -= tracer_frame;
89 
90 	/* a race could have already updated it */
91 	if (this_size <= stack_trace_max_size)
92 		goto out;
93 
94 	stack_trace_max_size = this_size;
95 
96 	stack_trace_max.nr_entries = 0;
97 	stack_trace_max.skip = 0;
98 
99 	save_stack_trace(&stack_trace_max);
100 
101 	/* Skip over the overhead of the stack tracer itself */
102 	for (i = 0; i < stack_trace_max.nr_entries; i++) {
103 		if (stack_dump_trace[i] == ip)
104 			break;
105 	}
106 
107 	/*
108 	 * Some archs may not have the passed in ip in the dump.
109 	 * If that happens, we need to show everything.
110 	 */
111 	if (i == stack_trace_max.nr_entries)
112 		i = 0;
113 
114 	/*
115 	 * Now find where in the stack these are.
116 	 */
117 	x = 0;
118 	start = stack;
119 	top = (unsigned long *)
120 		(((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
121 
122 	/*
123 	 * Loop through all the entries. One of the entries may
124 	 * for some reason be missed on the stack, so we may
125 	 * have to account for them. If they are all there, this
126 	 * loop will only happen once. This code only takes place
127 	 * on a new max, so it is far from a fast path.
128 	 */
129 	while (i < stack_trace_max.nr_entries) {
130 		int found = 0;
131 
132 		stack_trace_index[x] = this_size;
133 		p = start;
134 
135 		for (; p < top && i < stack_trace_max.nr_entries; p++) {
136 			/*
137 			 * The READ_ONCE_NOCHECK is used to let KASAN know that
138 			 * this is not a stack-out-of-bounds error.
139 			 */
140 			if ((READ_ONCE_NOCHECK(*p)) == stack_dump_trace[i]) {
141 				stack_dump_trace[x] = stack_dump_trace[i++];
142 				this_size = stack_trace_index[x++] =
143 					(top - p) * sizeof(unsigned long);
144 				found = 1;
145 				/* Start the search from here */
146 				start = p + 1;
147 				/*
148 				 * We do not want to show the overhead
149 				 * of the stack tracer stack in the
150 				 * max stack. If we haven't figured
151 				 * out what that is, then figure it out
152 				 * now.
153 				 */
154 				if (unlikely(!tracer_frame)) {
155 					tracer_frame = (p - stack) *
156 						sizeof(unsigned long);
157 					stack_trace_max_size -= tracer_frame;
158 				}
159 			}
160 		}
161 
162 		if (!found)
163 			i++;
164 	}
165 
166 	stack_trace_max.nr_entries = x;
167 
168 	if (task_stack_end_corrupted(current)) {
169 		print_max_stack();
170 		BUG();
171 	}
172 
173  out:
174 	arch_spin_unlock(&stack_trace_max_lock);
175 	local_irq_restore(flags);
176 }
177 
178 static void
179 stack_trace_call(unsigned long ip, unsigned long parent_ip,
180 		 struct ftrace_ops *op, struct pt_regs *pt_regs)
181 {
182 	unsigned long stack;
183 
184 	preempt_disable_notrace();
185 
186 	/* no atomic needed, we only modify this variable by this cpu */
187 	__this_cpu_inc(disable_stack_tracer);
188 	if (__this_cpu_read(disable_stack_tracer) != 1)
189 		goto out;
190 
191 	/* If rcu is not watching, then save stack trace can fail */
192 	if (!rcu_is_watching())
193 		goto out;
194 
195 	ip += MCOUNT_INSN_SIZE;
196 
197 	check_stack(ip, &stack);
198 
199  out:
200 	__this_cpu_dec(disable_stack_tracer);
201 	/* prevent recursion in schedule */
202 	preempt_enable_notrace();
203 }
204 
205 static struct ftrace_ops trace_ops __read_mostly =
206 {
207 	.func = stack_trace_call,
208 	.flags = FTRACE_OPS_FL_RECURSION_SAFE,
209 };
210 
211 static ssize_t
212 stack_max_size_read(struct file *filp, char __user *ubuf,
213 		    size_t count, loff_t *ppos)
214 {
215 	unsigned long *ptr = filp->private_data;
216 	char buf[64];
217 	int r;
218 
219 	r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
220 	if (r > sizeof(buf))
221 		r = sizeof(buf);
222 	return simple_read_from_buffer(ubuf, count, ppos, buf, r);
223 }
224 
225 static ssize_t
226 stack_max_size_write(struct file *filp, const char __user *ubuf,
227 		     size_t count, loff_t *ppos)
228 {
229 	long *ptr = filp->private_data;
230 	unsigned long val, flags;
231 	int ret;
232 
233 	ret = kstrtoul_from_user(ubuf, count, 10, &val);
234 	if (ret)
235 		return ret;
236 
237 	local_irq_save(flags);
238 
239 	/*
240 	 * In case we trace inside arch_spin_lock() or after (NMI),
241 	 * we will cause circular lock, so we also need to increase
242 	 * the percpu disable_stack_tracer here.
243 	 */
244 	__this_cpu_inc(disable_stack_tracer);
245 
246 	arch_spin_lock(&stack_trace_max_lock);
247 	*ptr = val;
248 	arch_spin_unlock(&stack_trace_max_lock);
249 
250 	__this_cpu_dec(disable_stack_tracer);
251 	local_irq_restore(flags);
252 
253 	return count;
254 }
255 
256 static const struct file_operations stack_max_size_fops = {
257 	.open		= tracing_open_generic,
258 	.read		= stack_max_size_read,
259 	.write		= stack_max_size_write,
260 	.llseek		= default_llseek,
261 };
262 
263 static void *
264 __next(struct seq_file *m, loff_t *pos)
265 {
266 	long n = *pos - 1;
267 
268 	if (n >= stack_trace_max.nr_entries)
269 		return NULL;
270 
271 	m->private = (void *)n;
272 	return &m->private;
273 }
274 
275 static void *
276 t_next(struct seq_file *m, void *v, loff_t *pos)
277 {
278 	(*pos)++;
279 	return __next(m, pos);
280 }
281 
282 static void *t_start(struct seq_file *m, loff_t *pos)
283 {
284 	local_irq_disable();
285 
286 	__this_cpu_inc(disable_stack_tracer);
287 
288 	arch_spin_lock(&stack_trace_max_lock);
289 
290 	if (*pos == 0)
291 		return SEQ_START_TOKEN;
292 
293 	return __next(m, pos);
294 }
295 
296 static void t_stop(struct seq_file *m, void *p)
297 {
298 	arch_spin_unlock(&stack_trace_max_lock);
299 
300 	__this_cpu_dec(disable_stack_tracer);
301 
302 	local_irq_enable();
303 }
304 
305 static void trace_lookup_stack(struct seq_file *m, long i)
306 {
307 	unsigned long addr = stack_dump_trace[i];
308 
309 	seq_printf(m, "%pS\n", (void *)addr);
310 }
311 
312 static void print_disabled(struct seq_file *m)
313 {
314 	seq_puts(m, "#\n"
315 		 "#  Stack tracer disabled\n"
316 		 "#\n"
317 		 "# To enable the stack tracer, either add 'stacktrace' to the\n"
318 		 "# kernel command line\n"
319 		 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
320 		 "#\n");
321 }
322 
323 static int t_show(struct seq_file *m, void *v)
324 {
325 	long i;
326 	int size;
327 
328 	if (v == SEQ_START_TOKEN) {
329 		seq_printf(m, "        Depth    Size   Location"
330 			   "    (%d entries)\n"
331 			   "        -----    ----   --------\n",
332 			   stack_trace_max.nr_entries);
333 
334 		if (!stack_tracer_enabled && !stack_trace_max_size)
335 			print_disabled(m);
336 
337 		return 0;
338 	}
339 
340 	i = *(long *)v;
341 
342 	if (i >= stack_trace_max.nr_entries)
343 		return 0;
344 
345 	if (i + 1 == stack_trace_max.nr_entries)
346 		size = stack_trace_index[i];
347 	else
348 		size = stack_trace_index[i] - stack_trace_index[i+1];
349 
350 	seq_printf(m, "%3ld) %8d   %5d   ", i, stack_trace_index[i], size);
351 
352 	trace_lookup_stack(m, i);
353 
354 	return 0;
355 }
356 
357 static const struct seq_operations stack_trace_seq_ops = {
358 	.start		= t_start,
359 	.next		= t_next,
360 	.stop		= t_stop,
361 	.show		= t_show,
362 };
363 
364 static int stack_trace_open(struct inode *inode, struct file *file)
365 {
366 	return seq_open(file, &stack_trace_seq_ops);
367 }
368 
369 static const struct file_operations stack_trace_fops = {
370 	.open		= stack_trace_open,
371 	.read		= seq_read,
372 	.llseek		= seq_lseek,
373 	.release	= seq_release,
374 };
375 
376 #ifdef CONFIG_DYNAMIC_FTRACE
377 
378 static int
379 stack_trace_filter_open(struct inode *inode, struct file *file)
380 {
381 	struct ftrace_ops *ops = inode->i_private;
382 
383 	return ftrace_regex_open(ops, FTRACE_ITER_FILTER,
384 				 inode, file);
385 }
386 
387 static const struct file_operations stack_trace_filter_fops = {
388 	.open = stack_trace_filter_open,
389 	.read = seq_read,
390 	.write = ftrace_filter_write,
391 	.llseek = tracing_lseek,
392 	.release = ftrace_regex_release,
393 };
394 
395 #endif /* CONFIG_DYNAMIC_FTRACE */
396 
397 int
398 stack_trace_sysctl(struct ctl_table *table, int write,
399 		   void __user *buffer, size_t *lenp,
400 		   loff_t *ppos)
401 {
402 	int was_enabled;
403 	int ret;
404 
405 	mutex_lock(&stack_sysctl_mutex);
406 	was_enabled = !!stack_tracer_enabled;
407 
408 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
409 
410 	if (ret || !write || (was_enabled == !!stack_tracer_enabled))
411 		goto out;
412 
413 	if (stack_tracer_enabled)
414 		register_ftrace_function(&trace_ops);
415 	else
416 		unregister_ftrace_function(&trace_ops);
417  out:
418 	mutex_unlock(&stack_sysctl_mutex);
419 	return ret;
420 }
421 
422 static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
423 
424 static __init int enable_stacktrace(char *str)
425 {
426 	int len;
427 
428 	if ((len = str_has_prefix(str, "_filter=")))
429 		strncpy(stack_trace_filter_buf, str + len, COMMAND_LINE_SIZE);
430 
431 	stack_tracer_enabled = 1;
432 	return 1;
433 }
434 __setup("stacktrace", enable_stacktrace);
435 
436 static __init int stack_trace_init(void)
437 {
438 	struct dentry *d_tracer;
439 
440 	d_tracer = tracing_init_dentry();
441 	if (IS_ERR(d_tracer))
442 		return 0;
443 
444 	trace_create_file("stack_max_size", 0644, d_tracer,
445 			&stack_trace_max_size, &stack_max_size_fops);
446 
447 	trace_create_file("stack_trace", 0444, d_tracer,
448 			NULL, &stack_trace_fops);
449 
450 #ifdef CONFIG_DYNAMIC_FTRACE
451 	trace_create_file("stack_trace_filter", 0644, d_tracer,
452 			  &trace_ops, &stack_trace_filter_fops);
453 #endif
454 
455 	if (stack_trace_filter_buf[0])
456 		ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
457 
458 	if (stack_tracer_enabled)
459 		register_ftrace_function(&trace_ops);
460 
461 	return 0;
462 }
463 
464 device_initcall(stack_trace_init);
465