xref: /linux-6.15/kernel/trace/trace_stack.c (revision 962e3707)
1e5a81b62SSteven Rostedt /*
2e5a81b62SSteven Rostedt  * Copyright (C) 2008 Steven Rostedt <[email protected]>
3e5a81b62SSteven Rostedt  *
4e5a81b62SSteven Rostedt  */
5e5a81b62SSteven Rostedt #include <linux/stacktrace.h>
6e5a81b62SSteven Rostedt #include <linux/kallsyms.h>
7e5a81b62SSteven Rostedt #include <linux/seq_file.h>
8e5a81b62SSteven Rostedt #include <linux/spinlock.h>
9e5a81b62SSteven Rostedt #include <linux/uaccess.h>
10e5a81b62SSteven Rostedt #include <linux/ftrace.h>
11e5a81b62SSteven Rostedt #include <linux/module.h>
12f38f1d2aSSteven Rostedt #include <linux/sysctl.h>
13e5a81b62SSteven Rostedt #include <linux/init.h>
14762e1207SSteven Rostedt 
15762e1207SSteven Rostedt #include <asm/setup.h>
16762e1207SSteven Rostedt 
17e5a81b62SSteven Rostedt #include "trace.h"
18e5a81b62SSteven Rostedt 
19e5a81b62SSteven Rostedt #define STACK_TRACE_ENTRIES 500
20e5a81b62SSteven Rostedt 
21d4ecbfc4SSteven Rostedt (Red Hat) #ifdef CC_USING_FENTRY
224df29712SSteven Rostedt (Red Hat) # define fentry		1
23d4ecbfc4SSteven Rostedt (Red Hat) #else
244df29712SSteven Rostedt (Red Hat) # define fentry		0
25d4ecbfc4SSteven Rostedt (Red Hat) #endif
26d4ecbfc4SSteven Rostedt (Red Hat) 
271b6cced6SSteven Rostedt static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
281b6cced6SSteven Rostedt 	 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
291b6cced6SSteven Rostedt static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
301b6cced6SSteven Rostedt 
314df29712SSteven Rostedt (Red Hat) /*
324df29712SSteven Rostedt (Red Hat)  * Reserve one entry for the passed in ip. This will allow
334df29712SSteven Rostedt (Red Hat)  * us to remove most or all of the stack size overhead
344df29712SSteven Rostedt (Red Hat)  * added by the stack tracer itself.
354df29712SSteven Rostedt (Red Hat)  */
36e5a81b62SSteven Rostedt static struct stack_trace max_stack_trace = {
374df29712SSteven Rostedt (Red Hat) 	.max_entries		= STACK_TRACE_ENTRIES - 1,
384df29712SSteven Rostedt (Red Hat) 	.entries		= &stack_dump_trace[1],
39e5a81b62SSteven Rostedt };
40e5a81b62SSteven Rostedt 
41e5a81b62SSteven Rostedt static unsigned long max_stack_size;
42445c8951SThomas Gleixner static arch_spinlock_t max_stack_lock =
43edc35bd7SThomas Gleixner 	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
44e5a81b62SSteven Rostedt 
45e5a81b62SSteven Rostedt static DEFINE_PER_CPU(int, trace_active);
46f38f1d2aSSteven Rostedt static DEFINE_MUTEX(stack_sysctl_mutex);
47f38f1d2aSSteven Rostedt 
48f38f1d2aSSteven Rostedt int stack_tracer_enabled;
49f38f1d2aSSteven Rostedt static int last_stack_tracer_enabled;
50e5a81b62SSteven Rostedt 
51e3172181SMinchan Kim static inline void print_max_stack(void)
52e3172181SMinchan Kim {
53e3172181SMinchan Kim 	long i;
54e3172181SMinchan Kim 	int size;
55e3172181SMinchan Kim 
56e3172181SMinchan Kim 	pr_emerg("        Depth    Size   Location    (%d entries)\n"
57e3172181SMinchan Kim 			   "        -----    ----   --------\n",
58e3172181SMinchan Kim 			   max_stack_trace.nr_entries - 1);
59e3172181SMinchan Kim 
60e3172181SMinchan Kim 	for (i = 0; i < max_stack_trace.nr_entries; i++) {
61e3172181SMinchan Kim 		if (stack_dump_trace[i] == ULONG_MAX)
62e3172181SMinchan Kim 			break;
63e3172181SMinchan Kim 		if (i+1 == max_stack_trace.nr_entries ||
64e3172181SMinchan Kim 				stack_dump_trace[i+1] == ULONG_MAX)
65e3172181SMinchan Kim 			size = stack_dump_index[i];
66e3172181SMinchan Kim 		else
67e3172181SMinchan Kim 			size = stack_dump_index[i] - stack_dump_index[i+1];
68e3172181SMinchan Kim 
69e3172181SMinchan Kim 		pr_emerg("%3ld) %8d   %5d   %pS\n", i, stack_dump_index[i],
70e3172181SMinchan Kim 				size, (void *)stack_dump_trace[i]);
71e3172181SMinchan Kim 	}
72e3172181SMinchan Kim }
73e3172181SMinchan Kim 
7487889501SSteven Rostedt (Red Hat) static inline void
75d4ecbfc4SSteven Rostedt (Red Hat) check_stack(unsigned long ip, unsigned long *stack)
76e5a81b62SSteven Rostedt {
77e3172181SMinchan Kim 	unsigned long this_size, flags; unsigned long *p, *top, *start;
784df29712SSteven Rostedt (Red Hat) 	static int tracer_frame;
794df29712SSteven Rostedt (Red Hat) 	int frame_size = ACCESS_ONCE(tracer_frame);
801b6cced6SSteven Rostedt 	int i;
81e5a81b62SSteven Rostedt 
8287889501SSteven Rostedt (Red Hat) 	this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
83e5a81b62SSteven Rostedt 	this_size = THREAD_SIZE - this_size;
844df29712SSteven Rostedt (Red Hat) 	/* Remove the frame of the tracer */
854df29712SSteven Rostedt (Red Hat) 	this_size -= frame_size;
86e5a81b62SSteven Rostedt 
87e5a81b62SSteven Rostedt 	if (this_size <= max_stack_size)
88e5a81b62SSteven Rostedt 		return;
89e5a81b62SSteven Rostedt 
9081520a1bSSteven Rostedt 	/* we do not handle interrupt stacks yet */
9187889501SSteven Rostedt (Red Hat) 	if (!object_is_on_stack(stack))
9281520a1bSSteven Rostedt 		return;
9381520a1bSSteven Rostedt 
94a5e25883SSteven Rostedt 	local_irq_save(flags);
950199c4e6SThomas Gleixner 	arch_spin_lock(&max_stack_lock);
96e5a81b62SSteven Rostedt 
974df29712SSteven Rostedt (Red Hat) 	/* In case another CPU set the tracer_frame on us */
984df29712SSteven Rostedt (Red Hat) 	if (unlikely(!frame_size))
994df29712SSteven Rostedt (Red Hat) 		this_size -= tracer_frame;
1004df29712SSteven Rostedt (Red Hat) 
101e5a81b62SSteven Rostedt 	/* a race could have already updated it */
102e5a81b62SSteven Rostedt 	if (this_size <= max_stack_size)
103e5a81b62SSteven Rostedt 		goto out;
104e5a81b62SSteven Rostedt 
105e5a81b62SSteven Rostedt 	max_stack_size = this_size;
106e5a81b62SSteven Rostedt 
107e5a81b62SSteven Rostedt 	max_stack_trace.nr_entries = 0;
1087eea4fceSJiaxing Wang 
1097eea4fceSJiaxing Wang 	if (using_ftrace_ops_list_func())
1107eea4fceSJiaxing Wang 		max_stack_trace.skip = 4;
1117eea4fceSJiaxing Wang 	else
1121b6cced6SSteven Rostedt 		max_stack_trace.skip = 3;
113e5a81b62SSteven Rostedt 
114e5a81b62SSteven Rostedt 	save_stack_trace(&max_stack_trace);
115e5a81b62SSteven Rostedt 
1161b6cced6SSteven Rostedt 	/*
1174df29712SSteven Rostedt (Red Hat) 	 * Add the passed in ip from the function tracer.
1184df29712SSteven Rostedt (Red Hat) 	 * Searching for this on the stack will skip over
1194df29712SSteven Rostedt (Red Hat) 	 * most of the overhead from the stack tracer itself.
120d4ecbfc4SSteven Rostedt (Red Hat) 	 */
121d4ecbfc4SSteven Rostedt (Red Hat) 	stack_dump_trace[0] = ip;
122d4ecbfc4SSteven Rostedt (Red Hat) 	max_stack_trace.nr_entries++;
123d4ecbfc4SSteven Rostedt (Red Hat) 
124d4ecbfc4SSteven Rostedt (Red Hat) 	/*
1251b6cced6SSteven Rostedt 	 * Now find where in the stack these are.
1261b6cced6SSteven Rostedt 	 */
1271b6cced6SSteven Rostedt 	i = 0;
12887889501SSteven Rostedt (Red Hat) 	start = stack;
1291b6cced6SSteven Rostedt 	top = (unsigned long *)
1301b6cced6SSteven Rostedt 		(((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
1311b6cced6SSteven Rostedt 
1321b6cced6SSteven Rostedt 	/*
1331b6cced6SSteven Rostedt 	 * Loop through all the entries. One of the entries may
1341b6cced6SSteven Rostedt 	 * for some reason be missed on the stack, so we may
1351b6cced6SSteven Rostedt 	 * have to account for them. If they are all there, this
1361b6cced6SSteven Rostedt 	 * loop will only happen once. This code only takes place
1371b6cced6SSteven Rostedt 	 * on a new max, so it is far from a fast path.
1381b6cced6SSteven Rostedt 	 */
1391b6cced6SSteven Rostedt 	while (i < max_stack_trace.nr_entries) {
1400a37119dSSteven Rostedt 		int found = 0;
1411b6cced6SSteven Rostedt 
1421b6cced6SSteven Rostedt 		stack_dump_index[i] = this_size;
1431b6cced6SSteven Rostedt 		p = start;
1441b6cced6SSteven Rostedt 
1451b6cced6SSteven Rostedt 		for (; p < top && i < max_stack_trace.nr_entries; p++) {
1461b6cced6SSteven Rostedt 			if (*p == stack_dump_trace[i]) {
1471b6cced6SSteven Rostedt 				this_size = stack_dump_index[i++] =
1481b6cced6SSteven Rostedt 					(top - p) * sizeof(unsigned long);
1490a37119dSSteven Rostedt 				found = 1;
1501b6cced6SSteven Rostedt 				/* Start the search from here */
1511b6cced6SSteven Rostedt 				start = p + 1;
1524df29712SSteven Rostedt (Red Hat) 				/*
1534df29712SSteven Rostedt (Red Hat) 				 * We do not want to show the overhead
1544df29712SSteven Rostedt (Red Hat) 				 * of the stack tracer stack in the
1554df29712SSteven Rostedt (Red Hat) 				 * max stack. If we haven't figured
1564df29712SSteven Rostedt (Red Hat) 				 * out what that is, then figure it out
1574df29712SSteven Rostedt (Red Hat) 				 * now.
1584df29712SSteven Rostedt (Red Hat) 				 */
1594df29712SSteven Rostedt (Red Hat) 				if (unlikely(!tracer_frame) && i == 1) {
1604df29712SSteven Rostedt (Red Hat) 					tracer_frame = (p - stack) *
1614df29712SSteven Rostedt (Red Hat) 						sizeof(unsigned long);
1624df29712SSteven Rostedt (Red Hat) 					max_stack_size -= tracer_frame;
1634df29712SSteven Rostedt (Red Hat) 				}
1641b6cced6SSteven Rostedt 			}
1651b6cced6SSteven Rostedt 		}
1661b6cced6SSteven Rostedt 
1670a37119dSSteven Rostedt 		if (!found)
1681b6cced6SSteven Rostedt 			i++;
1691b6cced6SSteven Rostedt 	}
1701b6cced6SSteven Rostedt 
171a70857e4SAaron Tomlin 	if (task_stack_end_corrupted(current)) {
172e3172181SMinchan Kim 		print_max_stack();
173e3172181SMinchan Kim 		BUG();
174e3172181SMinchan Kim 	}
175e3172181SMinchan Kim 
176e5a81b62SSteven Rostedt  out:
1770199c4e6SThomas Gleixner 	arch_spin_unlock(&max_stack_lock);
178a5e25883SSteven Rostedt 	local_irq_restore(flags);
179e5a81b62SSteven Rostedt }
180e5a81b62SSteven Rostedt 
181e5a81b62SSteven Rostedt static void
182a1e2e31dSSteven Rostedt stack_trace_call(unsigned long ip, unsigned long parent_ip,
183a1e2e31dSSteven Rostedt 		 struct ftrace_ops *op, struct pt_regs *pt_regs)
184e5a81b62SSteven Rostedt {
18587889501SSteven Rostedt (Red Hat) 	unsigned long stack;
1865168ae50SSteven Rostedt 	int cpu;
187e5a81b62SSteven Rostedt 
1885168ae50SSteven Rostedt 	preempt_disable_notrace();
189e5a81b62SSteven Rostedt 
190e5a81b62SSteven Rostedt 	cpu = raw_smp_processor_id();
191e5a81b62SSteven Rostedt 	/* no atomic needed, we only modify this variable by this cpu */
192e5a81b62SSteven Rostedt 	if (per_cpu(trace_active, cpu)++ != 0)
193e5a81b62SSteven Rostedt 		goto out;
194e5a81b62SSteven Rostedt 
1954df29712SSteven Rostedt (Red Hat) 	/*
1964df29712SSteven Rostedt (Red Hat) 	 * When fentry is used, the traced function does not get
1974df29712SSteven Rostedt (Red Hat) 	 * its stack frame set up, and we lose the parent.
1984df29712SSteven Rostedt (Red Hat) 	 * The ip is pretty useless because the function tracer
1994df29712SSteven Rostedt (Red Hat) 	 * was called before that function set up its stack frame.
2004df29712SSteven Rostedt (Red Hat) 	 * In this case, we use the parent ip.
2014df29712SSteven Rostedt (Red Hat) 	 *
2024df29712SSteven Rostedt (Red Hat) 	 * By adding the return address of either the parent ip
2034df29712SSteven Rostedt (Red Hat) 	 * or the current ip we can disregard most of the stack usage
2044df29712SSteven Rostedt (Red Hat) 	 * caused by the stack tracer itself.
2054df29712SSteven Rostedt (Red Hat) 	 *
2064df29712SSteven Rostedt (Red Hat) 	 * The function tracer always reports the address of where the
2074df29712SSteven Rostedt (Red Hat) 	 * mcount call was, but the stack will hold the return address.
2084df29712SSteven Rostedt (Red Hat) 	 */
2094df29712SSteven Rostedt (Red Hat) 	if (fentry)
2104df29712SSteven Rostedt (Red Hat) 		ip = parent_ip;
2114df29712SSteven Rostedt (Red Hat) 	else
2124df29712SSteven Rostedt (Red Hat) 		ip += MCOUNT_INSN_SIZE;
2134df29712SSteven Rostedt (Red Hat) 
2144df29712SSteven Rostedt (Red Hat) 	check_stack(ip, &stack);
215e5a81b62SSteven Rostedt 
216e5a81b62SSteven Rostedt  out:
217e5a81b62SSteven Rostedt 	per_cpu(trace_active, cpu)--;
218e5a81b62SSteven Rostedt 	/* prevent recursion in schedule */
2195168ae50SSteven Rostedt 	preempt_enable_notrace();
220e5a81b62SSteven Rostedt }
221e5a81b62SSteven Rostedt 
222e5a81b62SSteven Rostedt static struct ftrace_ops trace_ops __read_mostly =
223e5a81b62SSteven Rostedt {
224e5a81b62SSteven Rostedt 	.func = stack_trace_call,
2254740974aSSteven Rostedt 	.flags = FTRACE_OPS_FL_RECURSION_SAFE,
226e5a81b62SSteven Rostedt };
227e5a81b62SSteven Rostedt 
228e5a81b62SSteven Rostedt static ssize_t
229e5a81b62SSteven Rostedt stack_max_size_read(struct file *filp, char __user *ubuf,
230e5a81b62SSteven Rostedt 		    size_t count, loff_t *ppos)
231e5a81b62SSteven Rostedt {
232e5a81b62SSteven Rostedt 	unsigned long *ptr = filp->private_data;
233e5a81b62SSteven Rostedt 	char buf[64];
234e5a81b62SSteven Rostedt 	int r;
235e5a81b62SSteven Rostedt 
236e5a81b62SSteven Rostedt 	r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
237e5a81b62SSteven Rostedt 	if (r > sizeof(buf))
238e5a81b62SSteven Rostedt 		r = sizeof(buf);
239e5a81b62SSteven Rostedt 	return simple_read_from_buffer(ubuf, count, ppos, buf, r);
240e5a81b62SSteven Rostedt }
241e5a81b62SSteven Rostedt 
242e5a81b62SSteven Rostedt static ssize_t
243e5a81b62SSteven Rostedt stack_max_size_write(struct file *filp, const char __user *ubuf,
244e5a81b62SSteven Rostedt 		     size_t count, loff_t *ppos)
245e5a81b62SSteven Rostedt {
246e5a81b62SSteven Rostedt 	long *ptr = filp->private_data;
247e5a81b62SSteven Rostedt 	unsigned long val, flags;
248e5a81b62SSteven Rostedt 	int ret;
2494f48f8b7SLai Jiangshan 	int cpu;
250e5a81b62SSteven Rostedt 
25122fe9b54SPeter Huewe 	ret = kstrtoul_from_user(ubuf, count, 10, &val);
25222fe9b54SPeter Huewe 	if (ret)
253e5a81b62SSteven Rostedt 		return ret;
254e5a81b62SSteven Rostedt 
255a5e25883SSteven Rostedt 	local_irq_save(flags);
2564f48f8b7SLai Jiangshan 
2574f48f8b7SLai Jiangshan 	/*
2584f48f8b7SLai Jiangshan 	 * In case we trace inside arch_spin_lock() or after (NMI),
2594f48f8b7SLai Jiangshan 	 * we will cause circular lock, so we also need to increase
2604f48f8b7SLai Jiangshan 	 * the percpu trace_active here.
2614f48f8b7SLai Jiangshan 	 */
2624f48f8b7SLai Jiangshan 	cpu = smp_processor_id();
2634f48f8b7SLai Jiangshan 	per_cpu(trace_active, cpu)++;
2644f48f8b7SLai Jiangshan 
2650199c4e6SThomas Gleixner 	arch_spin_lock(&max_stack_lock);
266e5a81b62SSteven Rostedt 	*ptr = val;
2670199c4e6SThomas Gleixner 	arch_spin_unlock(&max_stack_lock);
2684f48f8b7SLai Jiangshan 
2694f48f8b7SLai Jiangshan 	per_cpu(trace_active, cpu)--;
270a5e25883SSteven Rostedt 	local_irq_restore(flags);
271e5a81b62SSteven Rostedt 
272e5a81b62SSteven Rostedt 	return count;
273e5a81b62SSteven Rostedt }
274e5a81b62SSteven Rostedt 
275f38f1d2aSSteven Rostedt static const struct file_operations stack_max_size_fops = {
276e5a81b62SSteven Rostedt 	.open		= tracing_open_generic,
277e5a81b62SSteven Rostedt 	.read		= stack_max_size_read,
278e5a81b62SSteven Rostedt 	.write		= stack_max_size_write,
2796038f373SArnd Bergmann 	.llseek		= default_llseek,
280e5a81b62SSteven Rostedt };
281e5a81b62SSteven Rostedt 
282e5a81b62SSteven Rostedt static void *
2832fc5f0cfSLi Zefan __next(struct seq_file *m, loff_t *pos)
284e5a81b62SSteven Rostedt {
2852fc5f0cfSLi Zefan 	long n = *pos - 1;
286e5a81b62SSteven Rostedt 
2872fc5f0cfSLi Zefan 	if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
288e5a81b62SSteven Rostedt 		return NULL;
289e5a81b62SSteven Rostedt 
2902fc5f0cfSLi Zefan 	m->private = (void *)n;
2911b6cced6SSteven Rostedt 	return &m->private;
292e5a81b62SSteven Rostedt }
293e5a81b62SSteven Rostedt 
2942fc5f0cfSLi Zefan static void *
2952fc5f0cfSLi Zefan t_next(struct seq_file *m, void *v, loff_t *pos)
2962fc5f0cfSLi Zefan {
2972fc5f0cfSLi Zefan 	(*pos)++;
2982fc5f0cfSLi Zefan 	return __next(m, pos);
2992fc5f0cfSLi Zefan }
3002fc5f0cfSLi Zefan 
301e5a81b62SSteven Rostedt static void *t_start(struct seq_file *m, loff_t *pos)
302e5a81b62SSteven Rostedt {
3034f48f8b7SLai Jiangshan 	int cpu;
3044f48f8b7SLai Jiangshan 
305e5a81b62SSteven Rostedt 	local_irq_disable();
3064f48f8b7SLai Jiangshan 
3074f48f8b7SLai Jiangshan 	cpu = smp_processor_id();
3084f48f8b7SLai Jiangshan 	per_cpu(trace_active, cpu)++;
3094f48f8b7SLai Jiangshan 
3100199c4e6SThomas Gleixner 	arch_spin_lock(&max_stack_lock);
311e5a81b62SSteven Rostedt 
312522a110bSLiming Wang 	if (*pos == 0)
313522a110bSLiming Wang 		return SEQ_START_TOKEN;
314522a110bSLiming Wang 
3152fc5f0cfSLi Zefan 	return __next(m, pos);
316e5a81b62SSteven Rostedt }
317e5a81b62SSteven Rostedt 
318e5a81b62SSteven Rostedt static void t_stop(struct seq_file *m, void *p)
319e5a81b62SSteven Rostedt {
3204f48f8b7SLai Jiangshan 	int cpu;
3214f48f8b7SLai Jiangshan 
3220199c4e6SThomas Gleixner 	arch_spin_unlock(&max_stack_lock);
3234f48f8b7SLai Jiangshan 
3244f48f8b7SLai Jiangshan 	cpu = smp_processor_id();
3254f48f8b7SLai Jiangshan 	per_cpu(trace_active, cpu)--;
3264f48f8b7SLai Jiangshan 
327e5a81b62SSteven Rostedt 	local_irq_enable();
328e5a81b62SSteven Rostedt }
329e5a81b62SSteven Rostedt 
330*962e3707SJoe Perches static void trace_lookup_stack(struct seq_file *m, long i)
331e5a81b62SSteven Rostedt {
3321b6cced6SSteven Rostedt 	unsigned long addr = stack_dump_trace[i];
333e5a81b62SSteven Rostedt 
334*962e3707SJoe Perches 	seq_printf(m, "%pS\n", (void *)addr);
335e5a81b62SSteven Rostedt }
336e5a81b62SSteven Rostedt 
337e447e1dfSSteven Rostedt static void print_disabled(struct seq_file *m)
338e447e1dfSSteven Rostedt {
339e447e1dfSSteven Rostedt 	seq_puts(m, "#\n"
340e447e1dfSSteven Rostedt 		 "#  Stack tracer disabled\n"
341e447e1dfSSteven Rostedt 		 "#\n"
342e447e1dfSSteven Rostedt 		 "# To enable the stack tracer, either add 'stacktrace' to the\n"
343e447e1dfSSteven Rostedt 		 "# kernel command line\n"
344e447e1dfSSteven Rostedt 		 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
345e447e1dfSSteven Rostedt 		 "#\n");
346e447e1dfSSteven Rostedt }
347e447e1dfSSteven Rostedt 
348e5a81b62SSteven Rostedt static int t_show(struct seq_file *m, void *v)
349e5a81b62SSteven Rostedt {
350522a110bSLiming Wang 	long i;
3511b6cced6SSteven Rostedt 	int size;
352e5a81b62SSteven Rostedt 
353522a110bSLiming Wang 	if (v == SEQ_START_TOKEN) {
3541b6cced6SSteven Rostedt 		seq_printf(m, "        Depth    Size   Location"
3551b6cced6SSteven Rostedt 			   "    (%d entries)\n"
3561b6cced6SSteven Rostedt 			   "        -----    ----   --------\n",
357083a63b4Swalimis 			   max_stack_trace.nr_entries - 1);
358e447e1dfSSteven Rostedt 
359e447e1dfSSteven Rostedt 		if (!stack_tracer_enabled && !max_stack_size)
360e447e1dfSSteven Rostedt 			print_disabled(m);
361e447e1dfSSteven Rostedt 
3621b6cced6SSteven Rostedt 		return 0;
3631b6cced6SSteven Rostedt 	}
3641b6cced6SSteven Rostedt 
365522a110bSLiming Wang 	i = *(long *)v;
366522a110bSLiming Wang 
3671b6cced6SSteven Rostedt 	if (i >= max_stack_trace.nr_entries ||
3681b6cced6SSteven Rostedt 	    stack_dump_trace[i] == ULONG_MAX)
369e5a81b62SSteven Rostedt 		return 0;
370e5a81b62SSteven Rostedt 
3711b6cced6SSteven Rostedt 	if (i+1 == max_stack_trace.nr_entries ||
3721b6cced6SSteven Rostedt 	    stack_dump_trace[i+1] == ULONG_MAX)
3731b6cced6SSteven Rostedt 		size = stack_dump_index[i];
3741b6cced6SSteven Rostedt 	else
3751b6cced6SSteven Rostedt 		size = stack_dump_index[i] - stack_dump_index[i+1];
3761b6cced6SSteven Rostedt 
3771b6cced6SSteven Rostedt 	seq_printf(m, "%3ld) %8d   %5d   ", i, stack_dump_index[i], size);
3781b6cced6SSteven Rostedt 
3791b6cced6SSteven Rostedt 	trace_lookup_stack(m, i);
380e5a81b62SSteven Rostedt 
381e5a81b62SSteven Rostedt 	return 0;
382e5a81b62SSteven Rostedt }
383e5a81b62SSteven Rostedt 
384f38f1d2aSSteven Rostedt static const struct seq_operations stack_trace_seq_ops = {
385e5a81b62SSteven Rostedt 	.start		= t_start,
386e5a81b62SSteven Rostedt 	.next		= t_next,
387e5a81b62SSteven Rostedt 	.stop		= t_stop,
388e5a81b62SSteven Rostedt 	.show		= t_show,
389e5a81b62SSteven Rostedt };
390e5a81b62SSteven Rostedt 
391e5a81b62SSteven Rostedt static int stack_trace_open(struct inode *inode, struct file *file)
392e5a81b62SSteven Rostedt {
393d8cc1ab7SLi Zefan 	return seq_open(file, &stack_trace_seq_ops);
394e5a81b62SSteven Rostedt }
395e5a81b62SSteven Rostedt 
396f38f1d2aSSteven Rostedt static const struct file_operations stack_trace_fops = {
397e5a81b62SSteven Rostedt 	.open		= stack_trace_open,
398e5a81b62SSteven Rostedt 	.read		= seq_read,
399e5a81b62SSteven Rostedt 	.llseek		= seq_lseek,
400d8cc1ab7SLi Zefan 	.release	= seq_release,
401e5a81b62SSteven Rostedt };
402e5a81b62SSteven Rostedt 
403d2d45c7aSSteven Rostedt static int
404d2d45c7aSSteven Rostedt stack_trace_filter_open(struct inode *inode, struct file *file)
405d2d45c7aSSteven Rostedt {
406d2d45c7aSSteven Rostedt 	return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
407d2d45c7aSSteven Rostedt 				 inode, file);
408d2d45c7aSSteven Rostedt }
409d2d45c7aSSteven Rostedt 
410d2d45c7aSSteven Rostedt static const struct file_operations stack_trace_filter_fops = {
411d2d45c7aSSteven Rostedt 	.open = stack_trace_filter_open,
412d2d45c7aSSteven Rostedt 	.read = seq_read,
413d2d45c7aSSteven Rostedt 	.write = ftrace_filter_write,
414098c879eSSteven Rostedt (Red Hat) 	.llseek = tracing_lseek,
415d2d45c7aSSteven Rostedt 	.release = ftrace_regex_release,
416d2d45c7aSSteven Rostedt };
417d2d45c7aSSteven Rostedt 
418f38f1d2aSSteven Rostedt int
419f38f1d2aSSteven Rostedt stack_trace_sysctl(struct ctl_table *table, int write,
4208d65af78SAlexey Dobriyan 		   void __user *buffer, size_t *lenp,
421f38f1d2aSSteven Rostedt 		   loff_t *ppos)
422f38f1d2aSSteven Rostedt {
423f38f1d2aSSteven Rostedt 	int ret;
424f38f1d2aSSteven Rostedt 
425f38f1d2aSSteven Rostedt 	mutex_lock(&stack_sysctl_mutex);
426f38f1d2aSSteven Rostedt 
4278d65af78SAlexey Dobriyan 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
428f38f1d2aSSteven Rostedt 
429f38f1d2aSSteven Rostedt 	if (ret || !write ||
430a32c7765SLi Zefan 	    (last_stack_tracer_enabled == !!stack_tracer_enabled))
431f38f1d2aSSteven Rostedt 		goto out;
432f38f1d2aSSteven Rostedt 
433a32c7765SLi Zefan 	last_stack_tracer_enabled = !!stack_tracer_enabled;
434f38f1d2aSSteven Rostedt 
435f38f1d2aSSteven Rostedt 	if (stack_tracer_enabled)
436f38f1d2aSSteven Rostedt 		register_ftrace_function(&trace_ops);
437f38f1d2aSSteven Rostedt 	else
438f38f1d2aSSteven Rostedt 		unregister_ftrace_function(&trace_ops);
439f38f1d2aSSteven Rostedt 
440f38f1d2aSSteven Rostedt  out:
441f38f1d2aSSteven Rostedt 	mutex_unlock(&stack_sysctl_mutex);
442f38f1d2aSSteven Rostedt 	return ret;
443f38f1d2aSSteven Rostedt }
444f38f1d2aSSteven Rostedt 
445762e1207SSteven Rostedt static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
446762e1207SSteven Rostedt 
447f38f1d2aSSteven Rostedt static __init int enable_stacktrace(char *str)
448f38f1d2aSSteven Rostedt {
449762e1207SSteven Rostedt 	if (strncmp(str, "_filter=", 8) == 0)
450762e1207SSteven Rostedt 		strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
451762e1207SSteven Rostedt 
452e05a43b7SSteven Rostedt 	stack_tracer_enabled = 1;
453e05a43b7SSteven Rostedt 	last_stack_tracer_enabled = 1;
454f38f1d2aSSteven Rostedt 	return 1;
455f38f1d2aSSteven Rostedt }
456f38f1d2aSSteven Rostedt __setup("stacktrace", enable_stacktrace);
457f38f1d2aSSteven Rostedt 
458e5a81b62SSteven Rostedt static __init int stack_trace_init(void)
459e5a81b62SSteven Rostedt {
460e5a81b62SSteven Rostedt 	struct dentry *d_tracer;
461e5a81b62SSteven Rostedt 
462e5a81b62SSteven Rostedt 	d_tracer = tracing_init_dentry();
46314a5ae40SSteven Rostedt (Red Hat) 	if (IS_ERR(d_tracer))
464ed6f1c99SNamhyung Kim 		return 0;
465e5a81b62SSteven Rostedt 
4665452af66SFrederic Weisbecker 	trace_create_file("stack_max_size", 0644, d_tracer,
467e5a81b62SSteven Rostedt 			&max_stack_size, &stack_max_size_fops);
468e5a81b62SSteven Rostedt 
4695452af66SFrederic Weisbecker 	trace_create_file("stack_trace", 0444, d_tracer,
470e5a81b62SSteven Rostedt 			NULL, &stack_trace_fops);
471e5a81b62SSteven Rostedt 
472d2d45c7aSSteven Rostedt 	trace_create_file("stack_trace_filter", 0444, d_tracer,
473d2d45c7aSSteven Rostedt 			NULL, &stack_trace_filter_fops);
474d2d45c7aSSteven Rostedt 
475762e1207SSteven Rostedt 	if (stack_trace_filter_buf[0])
476762e1207SSteven Rostedt 		ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
477762e1207SSteven Rostedt 
478e05a43b7SSteven Rostedt 	if (stack_tracer_enabled)
479e5a81b62SSteven Rostedt 		register_ftrace_function(&trace_ops);
480e5a81b62SSteven Rostedt 
481e5a81b62SSteven Rostedt 	return 0;
482e5a81b62SSteven Rostedt }
483e5a81b62SSteven Rostedt 
484e5a81b62SSteven Rostedt device_initcall(stack_trace_init);
485