xref: /linux-6.15/kernel/trace/trace_stack.c (revision a2d76290)
1e5a81b62SSteven Rostedt /*
2e5a81b62SSteven Rostedt  * Copyright (C) 2008 Steven Rostedt <[email protected]>
3e5a81b62SSteven Rostedt  *
4e5a81b62SSteven Rostedt  */
5e5a81b62SSteven Rostedt #include <linux/stacktrace.h>
6e5a81b62SSteven Rostedt #include <linux/kallsyms.h>
7e5a81b62SSteven Rostedt #include <linux/seq_file.h>
8e5a81b62SSteven Rostedt #include <linux/spinlock.h>
9e5a81b62SSteven Rostedt #include <linux/uaccess.h>
10e5a81b62SSteven Rostedt #include <linux/ftrace.h>
11e5a81b62SSteven Rostedt #include <linux/module.h>
12f38f1d2aSSteven Rostedt #include <linux/sysctl.h>
13e5a81b62SSteven Rostedt #include <linux/init.h>
14762e1207SSteven Rostedt 
15762e1207SSteven Rostedt #include <asm/setup.h>
16762e1207SSteven Rostedt 
17e5a81b62SSteven Rostedt #include "trace.h"
18e5a81b62SSteven Rostedt 
19e5a81b62SSteven Rostedt #define STACK_TRACE_ENTRIES 500
20e5a81b62SSteven Rostedt 
211b6cced6SSteven Rostedt static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
221b6cced6SSteven Rostedt 	 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
231b6cced6SSteven Rostedt static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
241b6cced6SSteven Rostedt 
254df29712SSteven Rostedt (Red Hat) /*
264df29712SSteven Rostedt (Red Hat)  * Reserve one entry for the passed in ip. This will allow
274df29712SSteven Rostedt (Red Hat)  * us to remove most or all of the stack size overhead
284df29712SSteven Rostedt (Red Hat)  * added by the stack tracer itself.
294df29712SSteven Rostedt (Red Hat)  */
30e5a81b62SSteven Rostedt static struct stack_trace max_stack_trace = {
314df29712SSteven Rostedt (Red Hat) 	.max_entries		= STACK_TRACE_ENTRIES - 1,
3272ac426aSSteven Rostedt (Red Hat) 	.entries		= &stack_dump_trace[0],
33e5a81b62SSteven Rostedt };
34e5a81b62SSteven Rostedt 
35e5a81b62SSteven Rostedt static unsigned long max_stack_size;
36445c8951SThomas Gleixner static arch_spinlock_t max_stack_lock =
37edc35bd7SThomas Gleixner 	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
38e5a81b62SSteven Rostedt 
39e5a81b62SSteven Rostedt static DEFINE_PER_CPU(int, trace_active);
40f38f1d2aSSteven Rostedt static DEFINE_MUTEX(stack_sysctl_mutex);
41f38f1d2aSSteven Rostedt 
42f38f1d2aSSteven Rostedt int stack_tracer_enabled;
43f38f1d2aSSteven Rostedt static int last_stack_tracer_enabled;
44e5a81b62SSteven Rostedt 
45e3172181SMinchan Kim static inline void print_max_stack(void)
46e3172181SMinchan Kim {
47e3172181SMinchan Kim 	long i;
48e3172181SMinchan Kim 	int size;
49e3172181SMinchan Kim 
50e3172181SMinchan Kim 	pr_emerg("        Depth    Size   Location    (%d entries)\n"
51e3172181SMinchan Kim 			   "        -----    ----   --------\n",
5272ac426aSSteven Rostedt (Red Hat) 			   max_stack_trace.nr_entries);
53e3172181SMinchan Kim 
54e3172181SMinchan Kim 	for (i = 0; i < max_stack_trace.nr_entries; i++) {
55e3172181SMinchan Kim 		if (stack_dump_trace[i] == ULONG_MAX)
56e3172181SMinchan Kim 			break;
57e3172181SMinchan Kim 		if (i+1 == max_stack_trace.nr_entries ||
58e3172181SMinchan Kim 				stack_dump_trace[i+1] == ULONG_MAX)
59e3172181SMinchan Kim 			size = stack_dump_index[i];
60e3172181SMinchan Kim 		else
61e3172181SMinchan Kim 			size = stack_dump_index[i] - stack_dump_index[i+1];
62e3172181SMinchan Kim 
63e3172181SMinchan Kim 		pr_emerg("%3ld) %8d   %5d   %pS\n", i, stack_dump_index[i],
64e3172181SMinchan Kim 				size, (void *)stack_dump_trace[i]);
65e3172181SMinchan Kim 	}
66e3172181SMinchan Kim }
67e3172181SMinchan Kim 
6887889501SSteven Rostedt (Red Hat) static inline void
69d4ecbfc4SSteven Rostedt (Red Hat) check_stack(unsigned long ip, unsigned long *stack)
70e5a81b62SSteven Rostedt {
71e3172181SMinchan Kim 	unsigned long this_size, flags; unsigned long *p, *top, *start;
724df29712SSteven Rostedt (Red Hat) 	static int tracer_frame;
734df29712SSteven Rostedt (Red Hat) 	int frame_size = ACCESS_ONCE(tracer_frame);
7472ac426aSSteven Rostedt (Red Hat) 	int i, x;
75e5a81b62SSteven Rostedt 
7687889501SSteven Rostedt (Red Hat) 	this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
77e5a81b62SSteven Rostedt 	this_size = THREAD_SIZE - this_size;
784df29712SSteven Rostedt (Red Hat) 	/* Remove the frame of the tracer */
794df29712SSteven Rostedt (Red Hat) 	this_size -= frame_size;
80e5a81b62SSteven Rostedt 
81e5a81b62SSteven Rostedt 	if (this_size <= max_stack_size)
82e5a81b62SSteven Rostedt 		return;
83e5a81b62SSteven Rostedt 
8481520a1bSSteven Rostedt 	/* we do not handle interrupt stacks yet */
8587889501SSteven Rostedt (Red Hat) 	if (!object_is_on_stack(stack))
8681520a1bSSteven Rostedt 		return;
8781520a1bSSteven Rostedt 
88a5e25883SSteven Rostedt 	local_irq_save(flags);
890199c4e6SThomas Gleixner 	arch_spin_lock(&max_stack_lock);
90e5a81b62SSteven Rostedt 
91*a2d76290SSteven Rostedt (Red Hat) 	/*
92*a2d76290SSteven Rostedt (Red Hat) 	 * RCU may not be watching, make it see us.
93*a2d76290SSteven Rostedt (Red Hat) 	 * The stack trace code uses rcu_sched.
94*a2d76290SSteven Rostedt (Red Hat) 	 */
95*a2d76290SSteven Rostedt (Red Hat) 	rcu_irq_enter();
96*a2d76290SSteven Rostedt (Red Hat) 
974df29712SSteven Rostedt (Red Hat) 	/* In case another CPU set the tracer_frame on us */
984df29712SSteven Rostedt (Red Hat) 	if (unlikely(!frame_size))
994df29712SSteven Rostedt (Red Hat) 		this_size -= tracer_frame;
1004df29712SSteven Rostedt (Red Hat) 
101e5a81b62SSteven Rostedt 	/* a race could have already updated it */
102e5a81b62SSteven Rostedt 	if (this_size <= max_stack_size)
103e5a81b62SSteven Rostedt 		goto out;
104e5a81b62SSteven Rostedt 
105e5a81b62SSteven Rostedt 	max_stack_size = this_size;
106e5a81b62SSteven Rostedt 
107e5a81b62SSteven Rostedt 	max_stack_trace.nr_entries = 0;
1081b6cced6SSteven Rostedt 	max_stack_trace.skip = 3;
109e5a81b62SSteven Rostedt 
110e5a81b62SSteven Rostedt 	save_stack_trace(&max_stack_trace);
111e5a81b62SSteven Rostedt 
11272ac426aSSteven Rostedt (Red Hat) 	/* Skip over the overhead of the stack tracer itself */
11372ac426aSSteven Rostedt (Red Hat) 	for (i = 0; i < max_stack_trace.nr_entries; i++) {
11472ac426aSSteven Rostedt (Red Hat) 		if (stack_dump_trace[i] == ip)
11572ac426aSSteven Rostedt (Red Hat) 			break;
11672ac426aSSteven Rostedt (Red Hat) 	}
117d4ecbfc4SSteven Rostedt (Red Hat) 
118d4ecbfc4SSteven Rostedt (Red Hat) 	/*
1191b6cced6SSteven Rostedt 	 * Now find where in the stack these are.
1201b6cced6SSteven Rostedt 	 */
12172ac426aSSteven Rostedt (Red Hat) 	x = 0;
12287889501SSteven Rostedt (Red Hat) 	start = stack;
1231b6cced6SSteven Rostedt 	top = (unsigned long *)
1241b6cced6SSteven Rostedt 		(((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
1251b6cced6SSteven Rostedt 
1261b6cced6SSteven Rostedt 	/*
1271b6cced6SSteven Rostedt 	 * Loop through all the entries. One of the entries may
1281b6cced6SSteven Rostedt 	 * for some reason be missed on the stack, so we may
1291b6cced6SSteven Rostedt 	 * have to account for them. If they are all there, this
1301b6cced6SSteven Rostedt 	 * loop will only happen once. This code only takes place
1311b6cced6SSteven Rostedt 	 * on a new max, so it is far from a fast path.
1321b6cced6SSteven Rostedt 	 */
1331b6cced6SSteven Rostedt 	while (i < max_stack_trace.nr_entries) {
1340a37119dSSteven Rostedt 		int found = 0;
1351b6cced6SSteven Rostedt 
13672ac426aSSteven Rostedt (Red Hat) 		stack_dump_index[x] = this_size;
1371b6cced6SSteven Rostedt 		p = start;
1381b6cced6SSteven Rostedt 
1391b6cced6SSteven Rostedt 		for (; p < top && i < max_stack_trace.nr_entries; p++) {
14072ac426aSSteven Rostedt (Red Hat) 			if (stack_dump_trace[i] == ULONG_MAX)
14172ac426aSSteven Rostedt (Red Hat) 				break;
1421b6cced6SSteven Rostedt 			if (*p == stack_dump_trace[i]) {
14372ac426aSSteven Rostedt (Red Hat) 				stack_dump_trace[x] = stack_dump_trace[i++];
14472ac426aSSteven Rostedt (Red Hat) 				this_size = stack_dump_index[x++] =
1451b6cced6SSteven Rostedt 					(top - p) * sizeof(unsigned long);
1460a37119dSSteven Rostedt 				found = 1;
1471b6cced6SSteven Rostedt 				/* Start the search from here */
1481b6cced6SSteven Rostedt 				start = p + 1;
1494df29712SSteven Rostedt (Red Hat) 				/*
1504df29712SSteven Rostedt (Red Hat) 				 * We do not want to show the overhead
1514df29712SSteven Rostedt (Red Hat) 				 * of the stack tracer stack in the
1524df29712SSteven Rostedt (Red Hat) 				 * max stack. If we haven't figured
1534df29712SSteven Rostedt (Red Hat) 				 * out what that is, then figure it out
1544df29712SSteven Rostedt (Red Hat) 				 * now.
1554df29712SSteven Rostedt (Red Hat) 				 */
15672ac426aSSteven Rostedt (Red Hat) 				if (unlikely(!tracer_frame)) {
1574df29712SSteven Rostedt (Red Hat) 					tracer_frame = (p - stack) *
1584df29712SSteven Rostedt (Red Hat) 						sizeof(unsigned long);
1594df29712SSteven Rostedt (Red Hat) 					max_stack_size -= tracer_frame;
1604df29712SSteven Rostedt (Red Hat) 				}
1611b6cced6SSteven Rostedt 			}
1621b6cced6SSteven Rostedt 		}
1631b6cced6SSteven Rostedt 
1640a37119dSSteven Rostedt 		if (!found)
1651b6cced6SSteven Rostedt 			i++;
1661b6cced6SSteven Rostedt 	}
1671b6cced6SSteven Rostedt 
16872ac426aSSteven Rostedt (Red Hat) 	max_stack_trace.nr_entries = x;
16972ac426aSSteven Rostedt (Red Hat) 	for (; x < i; x++)
17072ac426aSSteven Rostedt (Red Hat) 		stack_dump_trace[x] = ULONG_MAX;
17172ac426aSSteven Rostedt (Red Hat) 
172a70857e4SAaron Tomlin 	if (task_stack_end_corrupted(current)) {
173e3172181SMinchan Kim 		print_max_stack();
174e3172181SMinchan Kim 		BUG();
175e3172181SMinchan Kim 	}
176e3172181SMinchan Kim 
177e5a81b62SSteven Rostedt  out:
178*a2d76290SSteven Rostedt (Red Hat) 	rcu_irq_exit();
1790199c4e6SThomas Gleixner 	arch_spin_unlock(&max_stack_lock);
180a5e25883SSteven Rostedt 	local_irq_restore(flags);
181e5a81b62SSteven Rostedt }
182e5a81b62SSteven Rostedt 
183e5a81b62SSteven Rostedt static void
184a1e2e31dSSteven Rostedt stack_trace_call(unsigned long ip, unsigned long parent_ip,
185a1e2e31dSSteven Rostedt 		 struct ftrace_ops *op, struct pt_regs *pt_regs)
186e5a81b62SSteven Rostedt {
18787889501SSteven Rostedt (Red Hat) 	unsigned long stack;
1885168ae50SSteven Rostedt 	int cpu;
189e5a81b62SSteven Rostedt 
1905168ae50SSteven Rostedt 	preempt_disable_notrace();
191e5a81b62SSteven Rostedt 
192e5a81b62SSteven Rostedt 	cpu = raw_smp_processor_id();
193e5a81b62SSteven Rostedt 	/* no atomic needed, we only modify this variable by this cpu */
194e5a81b62SSteven Rostedt 	if (per_cpu(trace_active, cpu)++ != 0)
195e5a81b62SSteven Rostedt 		goto out;
196e5a81b62SSteven Rostedt 
1974df29712SSteven Rostedt (Red Hat) 	ip += MCOUNT_INSN_SIZE;
1984df29712SSteven Rostedt (Red Hat) 
1994df29712SSteven Rostedt (Red Hat) 	check_stack(ip, &stack);
200e5a81b62SSteven Rostedt 
201e5a81b62SSteven Rostedt  out:
202e5a81b62SSteven Rostedt 	per_cpu(trace_active, cpu)--;
203e5a81b62SSteven Rostedt 	/* prevent recursion in schedule */
2045168ae50SSteven Rostedt 	preempt_enable_notrace();
205e5a81b62SSteven Rostedt }
206e5a81b62SSteven Rostedt 
207e5a81b62SSteven Rostedt static struct ftrace_ops trace_ops __read_mostly =
208e5a81b62SSteven Rostedt {
209e5a81b62SSteven Rostedt 	.func = stack_trace_call,
2104740974aSSteven Rostedt 	.flags = FTRACE_OPS_FL_RECURSION_SAFE,
211e5a81b62SSteven Rostedt };
212e5a81b62SSteven Rostedt 
213e5a81b62SSteven Rostedt static ssize_t
214e5a81b62SSteven Rostedt stack_max_size_read(struct file *filp, char __user *ubuf,
215e5a81b62SSteven Rostedt 		    size_t count, loff_t *ppos)
216e5a81b62SSteven Rostedt {
217e5a81b62SSteven Rostedt 	unsigned long *ptr = filp->private_data;
218e5a81b62SSteven Rostedt 	char buf[64];
219e5a81b62SSteven Rostedt 	int r;
220e5a81b62SSteven Rostedt 
221e5a81b62SSteven Rostedt 	r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
222e5a81b62SSteven Rostedt 	if (r > sizeof(buf))
223e5a81b62SSteven Rostedt 		r = sizeof(buf);
224e5a81b62SSteven Rostedt 	return simple_read_from_buffer(ubuf, count, ppos, buf, r);
225e5a81b62SSteven Rostedt }
226e5a81b62SSteven Rostedt 
227e5a81b62SSteven Rostedt static ssize_t
228e5a81b62SSteven Rostedt stack_max_size_write(struct file *filp, const char __user *ubuf,
229e5a81b62SSteven Rostedt 		     size_t count, loff_t *ppos)
230e5a81b62SSteven Rostedt {
231e5a81b62SSteven Rostedt 	long *ptr = filp->private_data;
232e5a81b62SSteven Rostedt 	unsigned long val, flags;
233e5a81b62SSteven Rostedt 	int ret;
2344f48f8b7SLai Jiangshan 	int cpu;
235e5a81b62SSteven Rostedt 
23622fe9b54SPeter Huewe 	ret = kstrtoul_from_user(ubuf, count, 10, &val);
23722fe9b54SPeter Huewe 	if (ret)
238e5a81b62SSteven Rostedt 		return ret;
239e5a81b62SSteven Rostedt 
240a5e25883SSteven Rostedt 	local_irq_save(flags);
2414f48f8b7SLai Jiangshan 
2424f48f8b7SLai Jiangshan 	/*
2434f48f8b7SLai Jiangshan 	 * In case we trace inside arch_spin_lock() or after (NMI),
2444f48f8b7SLai Jiangshan 	 * we will cause circular lock, so we also need to increase
2454f48f8b7SLai Jiangshan 	 * the percpu trace_active here.
2464f48f8b7SLai Jiangshan 	 */
2474f48f8b7SLai Jiangshan 	cpu = smp_processor_id();
2484f48f8b7SLai Jiangshan 	per_cpu(trace_active, cpu)++;
2494f48f8b7SLai Jiangshan 
2500199c4e6SThomas Gleixner 	arch_spin_lock(&max_stack_lock);
251e5a81b62SSteven Rostedt 	*ptr = val;
2520199c4e6SThomas Gleixner 	arch_spin_unlock(&max_stack_lock);
2534f48f8b7SLai Jiangshan 
2544f48f8b7SLai Jiangshan 	per_cpu(trace_active, cpu)--;
255a5e25883SSteven Rostedt 	local_irq_restore(flags);
256e5a81b62SSteven Rostedt 
257e5a81b62SSteven Rostedt 	return count;
258e5a81b62SSteven Rostedt }
259e5a81b62SSteven Rostedt 
260f38f1d2aSSteven Rostedt static const struct file_operations stack_max_size_fops = {
261e5a81b62SSteven Rostedt 	.open		= tracing_open_generic,
262e5a81b62SSteven Rostedt 	.read		= stack_max_size_read,
263e5a81b62SSteven Rostedt 	.write		= stack_max_size_write,
2646038f373SArnd Bergmann 	.llseek		= default_llseek,
265e5a81b62SSteven Rostedt };
266e5a81b62SSteven Rostedt 
267e5a81b62SSteven Rostedt static void *
2682fc5f0cfSLi Zefan __next(struct seq_file *m, loff_t *pos)
269e5a81b62SSteven Rostedt {
2702fc5f0cfSLi Zefan 	long n = *pos - 1;
271e5a81b62SSteven Rostedt 
27272ac426aSSteven Rostedt (Red Hat) 	if (n > max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
273e5a81b62SSteven Rostedt 		return NULL;
274e5a81b62SSteven Rostedt 
2752fc5f0cfSLi Zefan 	m->private = (void *)n;
2761b6cced6SSteven Rostedt 	return &m->private;
277e5a81b62SSteven Rostedt }
278e5a81b62SSteven Rostedt 
2792fc5f0cfSLi Zefan static void *
2802fc5f0cfSLi Zefan t_next(struct seq_file *m, void *v, loff_t *pos)
2812fc5f0cfSLi Zefan {
2822fc5f0cfSLi Zefan 	(*pos)++;
2832fc5f0cfSLi Zefan 	return __next(m, pos);
2842fc5f0cfSLi Zefan }
2852fc5f0cfSLi Zefan 
286e5a81b62SSteven Rostedt static void *t_start(struct seq_file *m, loff_t *pos)
287e5a81b62SSteven Rostedt {
2884f48f8b7SLai Jiangshan 	int cpu;
2894f48f8b7SLai Jiangshan 
290e5a81b62SSteven Rostedt 	local_irq_disable();
2914f48f8b7SLai Jiangshan 
2924f48f8b7SLai Jiangshan 	cpu = smp_processor_id();
2934f48f8b7SLai Jiangshan 	per_cpu(trace_active, cpu)++;
2944f48f8b7SLai Jiangshan 
2950199c4e6SThomas Gleixner 	arch_spin_lock(&max_stack_lock);
296e5a81b62SSteven Rostedt 
297522a110bSLiming Wang 	if (*pos == 0)
298522a110bSLiming Wang 		return SEQ_START_TOKEN;
299522a110bSLiming Wang 
3002fc5f0cfSLi Zefan 	return __next(m, pos);
301e5a81b62SSteven Rostedt }
302e5a81b62SSteven Rostedt 
303e5a81b62SSteven Rostedt static void t_stop(struct seq_file *m, void *p)
304e5a81b62SSteven Rostedt {
3054f48f8b7SLai Jiangshan 	int cpu;
3064f48f8b7SLai Jiangshan 
3070199c4e6SThomas Gleixner 	arch_spin_unlock(&max_stack_lock);
3084f48f8b7SLai Jiangshan 
3094f48f8b7SLai Jiangshan 	cpu = smp_processor_id();
3104f48f8b7SLai Jiangshan 	per_cpu(trace_active, cpu)--;
3114f48f8b7SLai Jiangshan 
312e5a81b62SSteven Rostedt 	local_irq_enable();
313e5a81b62SSteven Rostedt }
314e5a81b62SSteven Rostedt 
315962e3707SJoe Perches static void trace_lookup_stack(struct seq_file *m, long i)
316e5a81b62SSteven Rostedt {
3171b6cced6SSteven Rostedt 	unsigned long addr = stack_dump_trace[i];
318e5a81b62SSteven Rostedt 
319962e3707SJoe Perches 	seq_printf(m, "%pS\n", (void *)addr);
320e5a81b62SSteven Rostedt }
321e5a81b62SSteven Rostedt 
322e447e1dfSSteven Rostedt static void print_disabled(struct seq_file *m)
323e447e1dfSSteven Rostedt {
324e447e1dfSSteven Rostedt 	seq_puts(m, "#\n"
325e447e1dfSSteven Rostedt 		 "#  Stack tracer disabled\n"
326e447e1dfSSteven Rostedt 		 "#\n"
327e447e1dfSSteven Rostedt 		 "# To enable the stack tracer, either add 'stacktrace' to the\n"
328e447e1dfSSteven Rostedt 		 "# kernel command line\n"
329e447e1dfSSteven Rostedt 		 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
330e447e1dfSSteven Rostedt 		 "#\n");
331e447e1dfSSteven Rostedt }
332e447e1dfSSteven Rostedt 
333e5a81b62SSteven Rostedt static int t_show(struct seq_file *m, void *v)
334e5a81b62SSteven Rostedt {
335522a110bSLiming Wang 	long i;
3361b6cced6SSteven Rostedt 	int size;
337e5a81b62SSteven Rostedt 
338522a110bSLiming Wang 	if (v == SEQ_START_TOKEN) {
3391b6cced6SSteven Rostedt 		seq_printf(m, "        Depth    Size   Location"
3401b6cced6SSteven Rostedt 			   "    (%d entries)\n"
3411b6cced6SSteven Rostedt 			   "        -----    ----   --------\n",
34272ac426aSSteven Rostedt (Red Hat) 			   max_stack_trace.nr_entries);
343e447e1dfSSteven Rostedt 
344e447e1dfSSteven Rostedt 		if (!stack_tracer_enabled && !max_stack_size)
345e447e1dfSSteven Rostedt 			print_disabled(m);
346e447e1dfSSteven Rostedt 
3471b6cced6SSteven Rostedt 		return 0;
3481b6cced6SSteven Rostedt 	}
3491b6cced6SSteven Rostedt 
350522a110bSLiming Wang 	i = *(long *)v;
351522a110bSLiming Wang 
3521b6cced6SSteven Rostedt 	if (i >= max_stack_trace.nr_entries ||
3531b6cced6SSteven Rostedt 	    stack_dump_trace[i] == ULONG_MAX)
354e5a81b62SSteven Rostedt 		return 0;
355e5a81b62SSteven Rostedt 
3561b6cced6SSteven Rostedt 	if (i+1 == max_stack_trace.nr_entries ||
3571b6cced6SSteven Rostedt 	    stack_dump_trace[i+1] == ULONG_MAX)
3581b6cced6SSteven Rostedt 		size = stack_dump_index[i];
3591b6cced6SSteven Rostedt 	else
3601b6cced6SSteven Rostedt 		size = stack_dump_index[i] - stack_dump_index[i+1];
3611b6cced6SSteven Rostedt 
3621b6cced6SSteven Rostedt 	seq_printf(m, "%3ld) %8d   %5d   ", i, stack_dump_index[i], size);
3631b6cced6SSteven Rostedt 
3641b6cced6SSteven Rostedt 	trace_lookup_stack(m, i);
365e5a81b62SSteven Rostedt 
366e5a81b62SSteven Rostedt 	return 0;
367e5a81b62SSteven Rostedt }
368e5a81b62SSteven Rostedt 
369f38f1d2aSSteven Rostedt static const struct seq_operations stack_trace_seq_ops = {
370e5a81b62SSteven Rostedt 	.start		= t_start,
371e5a81b62SSteven Rostedt 	.next		= t_next,
372e5a81b62SSteven Rostedt 	.stop		= t_stop,
373e5a81b62SSteven Rostedt 	.show		= t_show,
374e5a81b62SSteven Rostedt };
375e5a81b62SSteven Rostedt 
376e5a81b62SSteven Rostedt static int stack_trace_open(struct inode *inode, struct file *file)
377e5a81b62SSteven Rostedt {
378d8cc1ab7SLi Zefan 	return seq_open(file, &stack_trace_seq_ops);
379e5a81b62SSteven Rostedt }
380e5a81b62SSteven Rostedt 
381f38f1d2aSSteven Rostedt static const struct file_operations stack_trace_fops = {
382e5a81b62SSteven Rostedt 	.open		= stack_trace_open,
383e5a81b62SSteven Rostedt 	.read		= seq_read,
384e5a81b62SSteven Rostedt 	.llseek		= seq_lseek,
385d8cc1ab7SLi Zefan 	.release	= seq_release,
386e5a81b62SSteven Rostedt };
387e5a81b62SSteven Rostedt 
388d2d45c7aSSteven Rostedt static int
389d2d45c7aSSteven Rostedt stack_trace_filter_open(struct inode *inode, struct file *file)
390d2d45c7aSSteven Rostedt {
391d2d45c7aSSteven Rostedt 	return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
392d2d45c7aSSteven Rostedt 				 inode, file);
393d2d45c7aSSteven Rostedt }
394d2d45c7aSSteven Rostedt 
395d2d45c7aSSteven Rostedt static const struct file_operations stack_trace_filter_fops = {
396d2d45c7aSSteven Rostedt 	.open = stack_trace_filter_open,
397d2d45c7aSSteven Rostedt 	.read = seq_read,
398d2d45c7aSSteven Rostedt 	.write = ftrace_filter_write,
399098c879eSSteven Rostedt (Red Hat) 	.llseek = tracing_lseek,
400d2d45c7aSSteven Rostedt 	.release = ftrace_regex_release,
401d2d45c7aSSteven Rostedt };
402d2d45c7aSSteven Rostedt 
403f38f1d2aSSteven Rostedt int
404f38f1d2aSSteven Rostedt stack_trace_sysctl(struct ctl_table *table, int write,
4058d65af78SAlexey Dobriyan 		   void __user *buffer, size_t *lenp,
406f38f1d2aSSteven Rostedt 		   loff_t *ppos)
407f38f1d2aSSteven Rostedt {
408f38f1d2aSSteven Rostedt 	int ret;
409f38f1d2aSSteven Rostedt 
410f38f1d2aSSteven Rostedt 	mutex_lock(&stack_sysctl_mutex);
411f38f1d2aSSteven Rostedt 
4128d65af78SAlexey Dobriyan 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
413f38f1d2aSSteven Rostedt 
414f38f1d2aSSteven Rostedt 	if (ret || !write ||
415a32c7765SLi Zefan 	    (last_stack_tracer_enabled == !!stack_tracer_enabled))
416f38f1d2aSSteven Rostedt 		goto out;
417f38f1d2aSSteven Rostedt 
418a32c7765SLi Zefan 	last_stack_tracer_enabled = !!stack_tracer_enabled;
419f38f1d2aSSteven Rostedt 
420f38f1d2aSSteven Rostedt 	if (stack_tracer_enabled)
421f38f1d2aSSteven Rostedt 		register_ftrace_function(&trace_ops);
422f38f1d2aSSteven Rostedt 	else
423f38f1d2aSSteven Rostedt 		unregister_ftrace_function(&trace_ops);
424f38f1d2aSSteven Rostedt 
425f38f1d2aSSteven Rostedt  out:
426f38f1d2aSSteven Rostedt 	mutex_unlock(&stack_sysctl_mutex);
427f38f1d2aSSteven Rostedt 	return ret;
428f38f1d2aSSteven Rostedt }
429f38f1d2aSSteven Rostedt 
430762e1207SSteven Rostedt static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
431762e1207SSteven Rostedt 
432f38f1d2aSSteven Rostedt static __init int enable_stacktrace(char *str)
433f38f1d2aSSteven Rostedt {
434762e1207SSteven Rostedt 	if (strncmp(str, "_filter=", 8) == 0)
435762e1207SSteven Rostedt 		strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
436762e1207SSteven Rostedt 
437e05a43b7SSteven Rostedt 	stack_tracer_enabled = 1;
438e05a43b7SSteven Rostedt 	last_stack_tracer_enabled = 1;
439f38f1d2aSSteven Rostedt 	return 1;
440f38f1d2aSSteven Rostedt }
441f38f1d2aSSteven Rostedt __setup("stacktrace", enable_stacktrace);
442f38f1d2aSSteven Rostedt 
443e5a81b62SSteven Rostedt static __init int stack_trace_init(void)
444e5a81b62SSteven Rostedt {
445e5a81b62SSteven Rostedt 	struct dentry *d_tracer;
446e5a81b62SSteven Rostedt 
447e5a81b62SSteven Rostedt 	d_tracer = tracing_init_dentry();
44814a5ae40SSteven Rostedt (Red Hat) 	if (IS_ERR(d_tracer))
449ed6f1c99SNamhyung Kim 		return 0;
450e5a81b62SSteven Rostedt 
4515452af66SFrederic Weisbecker 	trace_create_file("stack_max_size", 0644, d_tracer,
452e5a81b62SSteven Rostedt 			&max_stack_size, &stack_max_size_fops);
453e5a81b62SSteven Rostedt 
4545452af66SFrederic Weisbecker 	trace_create_file("stack_trace", 0444, d_tracer,
455e5a81b62SSteven Rostedt 			NULL, &stack_trace_fops);
456e5a81b62SSteven Rostedt 
457d2d45c7aSSteven Rostedt 	trace_create_file("stack_trace_filter", 0444, d_tracer,
458d2d45c7aSSteven Rostedt 			NULL, &stack_trace_filter_fops);
459d2d45c7aSSteven Rostedt 
460762e1207SSteven Rostedt 	if (stack_trace_filter_buf[0])
461762e1207SSteven Rostedt 		ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
462762e1207SSteven Rostedt 
463e05a43b7SSteven Rostedt 	if (stack_tracer_enabled)
464e5a81b62SSteven Rostedt 		register_ftrace_function(&trace_ops);
465e5a81b62SSteven Rostedt 
466e5a81b62SSteven Rostedt 	return 0;
467e5a81b62SSteven Rostedt }
468e5a81b62SSteven Rostedt 
469e5a81b62SSteven Rostedt device_initcall(stack_trace_init);
470