xref: /linux-6.15/kernel/trace/trace_stack.c (revision 7eea4fce)
1e5a81b62SSteven Rostedt /*
2e5a81b62SSteven Rostedt  * Copyright (C) 2008 Steven Rostedt <[email protected]>
3e5a81b62SSteven Rostedt  *
4e5a81b62SSteven Rostedt  */
5e5a81b62SSteven Rostedt #include <linux/stacktrace.h>
6e5a81b62SSteven Rostedt #include <linux/kallsyms.h>
7e5a81b62SSteven Rostedt #include <linux/seq_file.h>
8e5a81b62SSteven Rostedt #include <linux/spinlock.h>
9e5a81b62SSteven Rostedt #include <linux/uaccess.h>
10e5a81b62SSteven Rostedt #include <linux/debugfs.h>
11e5a81b62SSteven Rostedt #include <linux/ftrace.h>
12e5a81b62SSteven Rostedt #include <linux/module.h>
13f38f1d2aSSteven Rostedt #include <linux/sysctl.h>
14e5a81b62SSteven Rostedt #include <linux/init.h>
15e5a81b62SSteven Rostedt #include <linux/fs.h>
1638628078SAaron Tomlin #include <linux/magic.h>
17762e1207SSteven Rostedt 
18762e1207SSteven Rostedt #include <asm/setup.h>
19762e1207SSteven Rostedt 
20e5a81b62SSteven Rostedt #include "trace.h"
21e5a81b62SSteven Rostedt 
22e5a81b62SSteven Rostedt #define STACK_TRACE_ENTRIES 500
23e5a81b62SSteven Rostedt 
24d4ecbfc4SSteven Rostedt (Red Hat) #ifdef CC_USING_FENTRY
254df29712SSteven Rostedt (Red Hat) # define fentry		1
26d4ecbfc4SSteven Rostedt (Red Hat) #else
274df29712SSteven Rostedt (Red Hat) # define fentry		0
28d4ecbfc4SSteven Rostedt (Red Hat) #endif
29d4ecbfc4SSteven Rostedt (Red Hat) 
301b6cced6SSteven Rostedt static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
311b6cced6SSteven Rostedt 	 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
321b6cced6SSteven Rostedt static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
331b6cced6SSteven Rostedt 
344df29712SSteven Rostedt (Red Hat) /*
354df29712SSteven Rostedt (Red Hat)  * Reserve one entry for the passed in ip. This will allow
364df29712SSteven Rostedt (Red Hat)  * us to remove most or all of the stack size overhead
374df29712SSteven Rostedt (Red Hat)  * added by the stack tracer itself.
384df29712SSteven Rostedt (Red Hat)  */
39e5a81b62SSteven Rostedt static struct stack_trace max_stack_trace = {
404df29712SSteven Rostedt (Red Hat) 	.max_entries		= STACK_TRACE_ENTRIES - 1,
414df29712SSteven Rostedt (Red Hat) 	.entries		= &stack_dump_trace[1],
42e5a81b62SSteven Rostedt };
43e5a81b62SSteven Rostedt 
44e5a81b62SSteven Rostedt static unsigned long max_stack_size;
45445c8951SThomas Gleixner static arch_spinlock_t max_stack_lock =
46edc35bd7SThomas Gleixner 	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
47e5a81b62SSteven Rostedt 
48e5a81b62SSteven Rostedt static DEFINE_PER_CPU(int, trace_active);
49f38f1d2aSSteven Rostedt static DEFINE_MUTEX(stack_sysctl_mutex);
50f38f1d2aSSteven Rostedt 
51f38f1d2aSSteven Rostedt int stack_tracer_enabled;
52f38f1d2aSSteven Rostedt static int last_stack_tracer_enabled;
53e5a81b62SSteven Rostedt 
5487889501SSteven Rostedt (Red Hat) static inline void
55d4ecbfc4SSteven Rostedt (Red Hat) check_stack(unsigned long ip, unsigned long *stack)
56e5a81b62SSteven Rostedt {
571b6cced6SSteven Rostedt 	unsigned long this_size, flags;
581b6cced6SSteven Rostedt 	unsigned long *p, *top, *start;
594df29712SSteven Rostedt (Red Hat) 	static int tracer_frame;
604df29712SSteven Rostedt (Red Hat) 	int frame_size = ACCESS_ONCE(tracer_frame);
611b6cced6SSteven Rostedt 	int i;
62e5a81b62SSteven Rostedt 
6387889501SSteven Rostedt (Red Hat) 	this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
64e5a81b62SSteven Rostedt 	this_size = THREAD_SIZE - this_size;
654df29712SSteven Rostedt (Red Hat) 	/* Remove the frame of the tracer */
664df29712SSteven Rostedt (Red Hat) 	this_size -= frame_size;
67e5a81b62SSteven Rostedt 
68e5a81b62SSteven Rostedt 	if (this_size <= max_stack_size)
69e5a81b62SSteven Rostedt 		return;
70e5a81b62SSteven Rostedt 
7181520a1bSSteven Rostedt 	/* we do not handle interrupt stacks yet */
7287889501SSteven Rostedt (Red Hat) 	if (!object_is_on_stack(stack))
7381520a1bSSteven Rostedt 		return;
7481520a1bSSteven Rostedt 
75a5e25883SSteven Rostedt 	local_irq_save(flags);
760199c4e6SThomas Gleixner 	arch_spin_lock(&max_stack_lock);
77e5a81b62SSteven Rostedt 
784df29712SSteven Rostedt (Red Hat) 	/* In case another CPU set the tracer_frame on us */
794df29712SSteven Rostedt (Red Hat) 	if (unlikely(!frame_size))
804df29712SSteven Rostedt (Red Hat) 		this_size -= tracer_frame;
814df29712SSteven Rostedt (Red Hat) 
82e5a81b62SSteven Rostedt 	/* a race could have already updated it */
83e5a81b62SSteven Rostedt 	if (this_size <= max_stack_size)
84e5a81b62SSteven Rostedt 		goto out;
85e5a81b62SSteven Rostedt 
86e5a81b62SSteven Rostedt 	max_stack_size = this_size;
87e5a81b62SSteven Rostedt 
88e5a81b62SSteven Rostedt 	max_stack_trace.nr_entries = 0;
89*7eea4fceSJiaxing Wang 
90*7eea4fceSJiaxing Wang 	if (using_ftrace_ops_list_func())
91*7eea4fceSJiaxing Wang 		max_stack_trace.skip = 4;
92*7eea4fceSJiaxing Wang 	else
931b6cced6SSteven Rostedt 		max_stack_trace.skip = 3;
94e5a81b62SSteven Rostedt 
95e5a81b62SSteven Rostedt 	save_stack_trace(&max_stack_trace);
96e5a81b62SSteven Rostedt 
971b6cced6SSteven Rostedt 	/*
984df29712SSteven Rostedt (Red Hat) 	 * Add the passed in ip from the function tracer.
994df29712SSteven Rostedt (Red Hat) 	 * Searching for this on the stack will skip over
1004df29712SSteven Rostedt (Red Hat) 	 * most of the overhead from the stack tracer itself.
101d4ecbfc4SSteven Rostedt (Red Hat) 	 */
102d4ecbfc4SSteven Rostedt (Red Hat) 	stack_dump_trace[0] = ip;
103d4ecbfc4SSteven Rostedt (Red Hat) 	max_stack_trace.nr_entries++;
104d4ecbfc4SSteven Rostedt (Red Hat) 
105d4ecbfc4SSteven Rostedt (Red Hat) 	/*
1061b6cced6SSteven Rostedt 	 * Now find where in the stack these are.
1071b6cced6SSteven Rostedt 	 */
1081b6cced6SSteven Rostedt 	i = 0;
10987889501SSteven Rostedt (Red Hat) 	start = stack;
1101b6cced6SSteven Rostedt 	top = (unsigned long *)
1111b6cced6SSteven Rostedt 		(((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
1121b6cced6SSteven Rostedt 
1131b6cced6SSteven Rostedt 	/*
1141b6cced6SSteven Rostedt 	 * Loop through all the entries. One of the entries may
1151b6cced6SSteven Rostedt 	 * for some reason be missed on the stack, so we may
1161b6cced6SSteven Rostedt 	 * have to account for them. If they are all there, this
1171b6cced6SSteven Rostedt 	 * loop will only happen once. This code only takes place
1181b6cced6SSteven Rostedt 	 * on a new max, so it is far from a fast path.
1191b6cced6SSteven Rostedt 	 */
1201b6cced6SSteven Rostedt 	while (i < max_stack_trace.nr_entries) {
1210a37119dSSteven Rostedt 		int found = 0;
1221b6cced6SSteven Rostedt 
1231b6cced6SSteven Rostedt 		stack_dump_index[i] = this_size;
1241b6cced6SSteven Rostedt 		p = start;
1251b6cced6SSteven Rostedt 
1261b6cced6SSteven Rostedt 		for (; p < top && i < max_stack_trace.nr_entries; p++) {
1271b6cced6SSteven Rostedt 			if (*p == stack_dump_trace[i]) {
1281b6cced6SSteven Rostedt 				this_size = stack_dump_index[i++] =
1291b6cced6SSteven Rostedt 					(top - p) * sizeof(unsigned long);
1300a37119dSSteven Rostedt 				found = 1;
1311b6cced6SSteven Rostedt 				/* Start the search from here */
1321b6cced6SSteven Rostedt 				start = p + 1;
1334df29712SSteven Rostedt (Red Hat) 				/*
1344df29712SSteven Rostedt (Red Hat) 				 * We do not want to show the overhead
1354df29712SSteven Rostedt (Red Hat) 				 * of the stack tracer stack in the
1364df29712SSteven Rostedt (Red Hat) 				 * max stack. If we haven't figured
1374df29712SSteven Rostedt (Red Hat) 				 * out what that is, then figure it out
1384df29712SSteven Rostedt (Red Hat) 				 * now.
1394df29712SSteven Rostedt (Red Hat) 				 */
1404df29712SSteven Rostedt (Red Hat) 				if (unlikely(!tracer_frame) && i == 1) {
1414df29712SSteven Rostedt (Red Hat) 					tracer_frame = (p - stack) *
1424df29712SSteven Rostedt (Red Hat) 						sizeof(unsigned long);
1434df29712SSteven Rostedt (Red Hat) 					max_stack_size -= tracer_frame;
1444df29712SSteven Rostedt (Red Hat) 				}
1451b6cced6SSteven Rostedt 			}
1461b6cced6SSteven Rostedt 		}
1471b6cced6SSteven Rostedt 
1480a37119dSSteven Rostedt 		if (!found)
1491b6cced6SSteven Rostedt 			i++;
1501b6cced6SSteven Rostedt 	}
1511b6cced6SSteven Rostedt 
15238628078SAaron Tomlin 	BUG_ON(current != &init_task &&
15338628078SAaron Tomlin 		*(end_of_stack(current)) != STACK_END_MAGIC);
154e5a81b62SSteven Rostedt  out:
1550199c4e6SThomas Gleixner 	arch_spin_unlock(&max_stack_lock);
156a5e25883SSteven Rostedt 	local_irq_restore(flags);
157e5a81b62SSteven Rostedt }
158e5a81b62SSteven Rostedt 
159e5a81b62SSteven Rostedt static void
160a1e2e31dSSteven Rostedt stack_trace_call(unsigned long ip, unsigned long parent_ip,
161a1e2e31dSSteven Rostedt 		 struct ftrace_ops *op, struct pt_regs *pt_regs)
162e5a81b62SSteven Rostedt {
16387889501SSteven Rostedt (Red Hat) 	unsigned long stack;
1645168ae50SSteven Rostedt 	int cpu;
165e5a81b62SSteven Rostedt 
1665168ae50SSteven Rostedt 	preempt_disable_notrace();
167e5a81b62SSteven Rostedt 
168e5a81b62SSteven Rostedt 	cpu = raw_smp_processor_id();
169e5a81b62SSteven Rostedt 	/* no atomic needed, we only modify this variable by this cpu */
170e5a81b62SSteven Rostedt 	if (per_cpu(trace_active, cpu)++ != 0)
171e5a81b62SSteven Rostedt 		goto out;
172e5a81b62SSteven Rostedt 
1734df29712SSteven Rostedt (Red Hat) 	/*
1744df29712SSteven Rostedt (Red Hat) 	 * When fentry is used, the traced function does not get
1754df29712SSteven Rostedt (Red Hat) 	 * its stack frame set up, and we lose the parent.
1764df29712SSteven Rostedt (Red Hat) 	 * The ip is pretty useless because the function tracer
1774df29712SSteven Rostedt (Red Hat) 	 * was called before that function set up its stack frame.
1784df29712SSteven Rostedt (Red Hat) 	 * In this case, we use the parent ip.
1794df29712SSteven Rostedt (Red Hat) 	 *
1804df29712SSteven Rostedt (Red Hat) 	 * By adding the return address of either the parent ip
1814df29712SSteven Rostedt (Red Hat) 	 * or the current ip we can disregard most of the stack usage
1824df29712SSteven Rostedt (Red Hat) 	 * caused by the stack tracer itself.
1834df29712SSteven Rostedt (Red Hat) 	 *
1844df29712SSteven Rostedt (Red Hat) 	 * The function tracer always reports the address of where the
1854df29712SSteven Rostedt (Red Hat) 	 * mcount call was, but the stack will hold the return address.
1864df29712SSteven Rostedt (Red Hat) 	 */
1874df29712SSteven Rostedt (Red Hat) 	if (fentry)
1884df29712SSteven Rostedt (Red Hat) 		ip = parent_ip;
1894df29712SSteven Rostedt (Red Hat) 	else
1904df29712SSteven Rostedt (Red Hat) 		ip += MCOUNT_INSN_SIZE;
1914df29712SSteven Rostedt (Red Hat) 
1924df29712SSteven Rostedt (Red Hat) 	check_stack(ip, &stack);
193e5a81b62SSteven Rostedt 
194e5a81b62SSteven Rostedt  out:
195e5a81b62SSteven Rostedt 	per_cpu(trace_active, cpu)--;
196e5a81b62SSteven Rostedt 	/* prevent recursion in schedule */
1975168ae50SSteven Rostedt 	preempt_enable_notrace();
198e5a81b62SSteven Rostedt }
199e5a81b62SSteven Rostedt 
200e5a81b62SSteven Rostedt static struct ftrace_ops trace_ops __read_mostly =
201e5a81b62SSteven Rostedt {
202e5a81b62SSteven Rostedt 	.func = stack_trace_call,
2034740974aSSteven Rostedt 	.flags = FTRACE_OPS_FL_RECURSION_SAFE,
204e5a81b62SSteven Rostedt };
205e5a81b62SSteven Rostedt 
206e5a81b62SSteven Rostedt static ssize_t
207e5a81b62SSteven Rostedt stack_max_size_read(struct file *filp, char __user *ubuf,
208e5a81b62SSteven Rostedt 		    size_t count, loff_t *ppos)
209e5a81b62SSteven Rostedt {
210e5a81b62SSteven Rostedt 	unsigned long *ptr = filp->private_data;
211e5a81b62SSteven Rostedt 	char buf[64];
212e5a81b62SSteven Rostedt 	int r;
213e5a81b62SSteven Rostedt 
214e5a81b62SSteven Rostedt 	r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
215e5a81b62SSteven Rostedt 	if (r > sizeof(buf))
216e5a81b62SSteven Rostedt 		r = sizeof(buf);
217e5a81b62SSteven Rostedt 	return simple_read_from_buffer(ubuf, count, ppos, buf, r);
218e5a81b62SSteven Rostedt }
219e5a81b62SSteven Rostedt 
220e5a81b62SSteven Rostedt static ssize_t
221e5a81b62SSteven Rostedt stack_max_size_write(struct file *filp, const char __user *ubuf,
222e5a81b62SSteven Rostedt 		     size_t count, loff_t *ppos)
223e5a81b62SSteven Rostedt {
224e5a81b62SSteven Rostedt 	long *ptr = filp->private_data;
225e5a81b62SSteven Rostedt 	unsigned long val, flags;
226e5a81b62SSteven Rostedt 	int ret;
2274f48f8b7SLai Jiangshan 	int cpu;
228e5a81b62SSteven Rostedt 
22922fe9b54SPeter Huewe 	ret = kstrtoul_from_user(ubuf, count, 10, &val);
23022fe9b54SPeter Huewe 	if (ret)
231e5a81b62SSteven Rostedt 		return ret;
232e5a81b62SSteven Rostedt 
233a5e25883SSteven Rostedt 	local_irq_save(flags);
2344f48f8b7SLai Jiangshan 
2354f48f8b7SLai Jiangshan 	/*
2364f48f8b7SLai Jiangshan 	 * In case we trace inside arch_spin_lock() or after (NMI),
2374f48f8b7SLai Jiangshan 	 * we will cause circular lock, so we also need to increase
2384f48f8b7SLai Jiangshan 	 * the percpu trace_active here.
2394f48f8b7SLai Jiangshan 	 */
2404f48f8b7SLai Jiangshan 	cpu = smp_processor_id();
2414f48f8b7SLai Jiangshan 	per_cpu(trace_active, cpu)++;
2424f48f8b7SLai Jiangshan 
2430199c4e6SThomas Gleixner 	arch_spin_lock(&max_stack_lock);
244e5a81b62SSteven Rostedt 	*ptr = val;
2450199c4e6SThomas Gleixner 	arch_spin_unlock(&max_stack_lock);
2464f48f8b7SLai Jiangshan 
2474f48f8b7SLai Jiangshan 	per_cpu(trace_active, cpu)--;
248a5e25883SSteven Rostedt 	local_irq_restore(flags);
249e5a81b62SSteven Rostedt 
250e5a81b62SSteven Rostedt 	return count;
251e5a81b62SSteven Rostedt }
252e5a81b62SSteven Rostedt 
253f38f1d2aSSteven Rostedt static const struct file_operations stack_max_size_fops = {
254e5a81b62SSteven Rostedt 	.open		= tracing_open_generic,
255e5a81b62SSteven Rostedt 	.read		= stack_max_size_read,
256e5a81b62SSteven Rostedt 	.write		= stack_max_size_write,
2576038f373SArnd Bergmann 	.llseek		= default_llseek,
258e5a81b62SSteven Rostedt };
259e5a81b62SSteven Rostedt 
260e5a81b62SSteven Rostedt static void *
2612fc5f0cfSLi Zefan __next(struct seq_file *m, loff_t *pos)
262e5a81b62SSteven Rostedt {
2632fc5f0cfSLi Zefan 	long n = *pos - 1;
264e5a81b62SSteven Rostedt 
2652fc5f0cfSLi Zefan 	if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
266e5a81b62SSteven Rostedt 		return NULL;
267e5a81b62SSteven Rostedt 
2682fc5f0cfSLi Zefan 	m->private = (void *)n;
2691b6cced6SSteven Rostedt 	return &m->private;
270e5a81b62SSteven Rostedt }
271e5a81b62SSteven Rostedt 
2722fc5f0cfSLi Zefan static void *
2732fc5f0cfSLi Zefan t_next(struct seq_file *m, void *v, loff_t *pos)
2742fc5f0cfSLi Zefan {
2752fc5f0cfSLi Zefan 	(*pos)++;
2762fc5f0cfSLi Zefan 	return __next(m, pos);
2772fc5f0cfSLi Zefan }
2782fc5f0cfSLi Zefan 
279e5a81b62SSteven Rostedt static void *t_start(struct seq_file *m, loff_t *pos)
280e5a81b62SSteven Rostedt {
2814f48f8b7SLai Jiangshan 	int cpu;
2824f48f8b7SLai Jiangshan 
283e5a81b62SSteven Rostedt 	local_irq_disable();
2844f48f8b7SLai Jiangshan 
2854f48f8b7SLai Jiangshan 	cpu = smp_processor_id();
2864f48f8b7SLai Jiangshan 	per_cpu(trace_active, cpu)++;
2874f48f8b7SLai Jiangshan 
2880199c4e6SThomas Gleixner 	arch_spin_lock(&max_stack_lock);
289e5a81b62SSteven Rostedt 
290522a110bSLiming Wang 	if (*pos == 0)
291522a110bSLiming Wang 		return SEQ_START_TOKEN;
292522a110bSLiming Wang 
2932fc5f0cfSLi Zefan 	return __next(m, pos);
294e5a81b62SSteven Rostedt }
295e5a81b62SSteven Rostedt 
296e5a81b62SSteven Rostedt static void t_stop(struct seq_file *m, void *p)
297e5a81b62SSteven Rostedt {
2984f48f8b7SLai Jiangshan 	int cpu;
2994f48f8b7SLai Jiangshan 
3000199c4e6SThomas Gleixner 	arch_spin_unlock(&max_stack_lock);
3014f48f8b7SLai Jiangshan 
3024f48f8b7SLai Jiangshan 	cpu = smp_processor_id();
3034f48f8b7SLai Jiangshan 	per_cpu(trace_active, cpu)--;
3044f48f8b7SLai Jiangshan 
305e5a81b62SSteven Rostedt 	local_irq_enable();
306e5a81b62SSteven Rostedt }
307e5a81b62SSteven Rostedt 
3081b6cced6SSteven Rostedt static int trace_lookup_stack(struct seq_file *m, long i)
309e5a81b62SSteven Rostedt {
3101b6cced6SSteven Rostedt 	unsigned long addr = stack_dump_trace[i];
311e5a81b62SSteven Rostedt 
312151772dbSAnton Blanchard 	return seq_printf(m, "%pS\n", (void *)addr);
313e5a81b62SSteven Rostedt }
314e5a81b62SSteven Rostedt 
315e447e1dfSSteven Rostedt static void print_disabled(struct seq_file *m)
316e447e1dfSSteven Rostedt {
317e447e1dfSSteven Rostedt 	seq_puts(m, "#\n"
318e447e1dfSSteven Rostedt 		 "#  Stack tracer disabled\n"
319e447e1dfSSteven Rostedt 		 "#\n"
320e447e1dfSSteven Rostedt 		 "# To enable the stack tracer, either add 'stacktrace' to the\n"
321e447e1dfSSteven Rostedt 		 "# kernel command line\n"
322e447e1dfSSteven Rostedt 		 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
323e447e1dfSSteven Rostedt 		 "#\n");
324e447e1dfSSteven Rostedt }
325e447e1dfSSteven Rostedt 
326e5a81b62SSteven Rostedt static int t_show(struct seq_file *m, void *v)
327e5a81b62SSteven Rostedt {
328522a110bSLiming Wang 	long i;
3291b6cced6SSteven Rostedt 	int size;
330e5a81b62SSteven Rostedt 
331522a110bSLiming Wang 	if (v == SEQ_START_TOKEN) {
3321b6cced6SSteven Rostedt 		seq_printf(m, "        Depth    Size   Location"
3331b6cced6SSteven Rostedt 			   "    (%d entries)\n"
3341b6cced6SSteven Rostedt 			   "        -----    ----   --------\n",
335083a63b4Swalimis 			   max_stack_trace.nr_entries - 1);
336e447e1dfSSteven Rostedt 
337e447e1dfSSteven Rostedt 		if (!stack_tracer_enabled && !max_stack_size)
338e447e1dfSSteven Rostedt 			print_disabled(m);
339e447e1dfSSteven Rostedt 
3401b6cced6SSteven Rostedt 		return 0;
3411b6cced6SSteven Rostedt 	}
3421b6cced6SSteven Rostedt 
343522a110bSLiming Wang 	i = *(long *)v;
344522a110bSLiming Wang 
3451b6cced6SSteven Rostedt 	if (i >= max_stack_trace.nr_entries ||
3461b6cced6SSteven Rostedt 	    stack_dump_trace[i] == ULONG_MAX)
347e5a81b62SSteven Rostedt 		return 0;
348e5a81b62SSteven Rostedt 
3491b6cced6SSteven Rostedt 	if (i+1 == max_stack_trace.nr_entries ||
3501b6cced6SSteven Rostedt 	    stack_dump_trace[i+1] == ULONG_MAX)
3511b6cced6SSteven Rostedt 		size = stack_dump_index[i];
3521b6cced6SSteven Rostedt 	else
3531b6cced6SSteven Rostedt 		size = stack_dump_index[i] - stack_dump_index[i+1];
3541b6cced6SSteven Rostedt 
3551b6cced6SSteven Rostedt 	seq_printf(m, "%3ld) %8d   %5d   ", i, stack_dump_index[i], size);
3561b6cced6SSteven Rostedt 
3571b6cced6SSteven Rostedt 	trace_lookup_stack(m, i);
358e5a81b62SSteven Rostedt 
359e5a81b62SSteven Rostedt 	return 0;
360e5a81b62SSteven Rostedt }
361e5a81b62SSteven Rostedt 
362f38f1d2aSSteven Rostedt static const struct seq_operations stack_trace_seq_ops = {
363e5a81b62SSteven Rostedt 	.start		= t_start,
364e5a81b62SSteven Rostedt 	.next		= t_next,
365e5a81b62SSteven Rostedt 	.stop		= t_stop,
366e5a81b62SSteven Rostedt 	.show		= t_show,
367e5a81b62SSteven Rostedt };
368e5a81b62SSteven Rostedt 
369e5a81b62SSteven Rostedt static int stack_trace_open(struct inode *inode, struct file *file)
370e5a81b62SSteven Rostedt {
371d8cc1ab7SLi Zefan 	return seq_open(file, &stack_trace_seq_ops);
372e5a81b62SSteven Rostedt }
373e5a81b62SSteven Rostedt 
374f38f1d2aSSteven Rostedt static const struct file_operations stack_trace_fops = {
375e5a81b62SSteven Rostedt 	.open		= stack_trace_open,
376e5a81b62SSteven Rostedt 	.read		= seq_read,
377e5a81b62SSteven Rostedt 	.llseek		= seq_lseek,
378d8cc1ab7SLi Zefan 	.release	= seq_release,
379e5a81b62SSteven Rostedt };
380e5a81b62SSteven Rostedt 
381d2d45c7aSSteven Rostedt static int
382d2d45c7aSSteven Rostedt stack_trace_filter_open(struct inode *inode, struct file *file)
383d2d45c7aSSteven Rostedt {
384d2d45c7aSSteven Rostedt 	return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
385d2d45c7aSSteven Rostedt 				 inode, file);
386d2d45c7aSSteven Rostedt }
387d2d45c7aSSteven Rostedt 
388d2d45c7aSSteven Rostedt static const struct file_operations stack_trace_filter_fops = {
389d2d45c7aSSteven Rostedt 	.open = stack_trace_filter_open,
390d2d45c7aSSteven Rostedt 	.read = seq_read,
391d2d45c7aSSteven Rostedt 	.write = ftrace_filter_write,
392098c879eSSteven Rostedt (Red Hat) 	.llseek = tracing_lseek,
393d2d45c7aSSteven Rostedt 	.release = ftrace_regex_release,
394d2d45c7aSSteven Rostedt };
395d2d45c7aSSteven Rostedt 
396f38f1d2aSSteven Rostedt int
397f38f1d2aSSteven Rostedt stack_trace_sysctl(struct ctl_table *table, int write,
3988d65af78SAlexey Dobriyan 		   void __user *buffer, size_t *lenp,
399f38f1d2aSSteven Rostedt 		   loff_t *ppos)
400f38f1d2aSSteven Rostedt {
401f38f1d2aSSteven Rostedt 	int ret;
402f38f1d2aSSteven Rostedt 
403f38f1d2aSSteven Rostedt 	mutex_lock(&stack_sysctl_mutex);
404f38f1d2aSSteven Rostedt 
4058d65af78SAlexey Dobriyan 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
406f38f1d2aSSteven Rostedt 
407f38f1d2aSSteven Rostedt 	if (ret || !write ||
408a32c7765SLi Zefan 	    (last_stack_tracer_enabled == !!stack_tracer_enabled))
409f38f1d2aSSteven Rostedt 		goto out;
410f38f1d2aSSteven Rostedt 
411a32c7765SLi Zefan 	last_stack_tracer_enabled = !!stack_tracer_enabled;
412f38f1d2aSSteven Rostedt 
413f38f1d2aSSteven Rostedt 	if (stack_tracer_enabled)
414f38f1d2aSSteven Rostedt 		register_ftrace_function(&trace_ops);
415f38f1d2aSSteven Rostedt 	else
416f38f1d2aSSteven Rostedt 		unregister_ftrace_function(&trace_ops);
417f38f1d2aSSteven Rostedt 
418f38f1d2aSSteven Rostedt  out:
419f38f1d2aSSteven Rostedt 	mutex_unlock(&stack_sysctl_mutex);
420f38f1d2aSSteven Rostedt 	return ret;
421f38f1d2aSSteven Rostedt }
422f38f1d2aSSteven Rostedt 
423762e1207SSteven Rostedt static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
424762e1207SSteven Rostedt 
425f38f1d2aSSteven Rostedt static __init int enable_stacktrace(char *str)
426f38f1d2aSSteven Rostedt {
427762e1207SSteven Rostedt 	if (strncmp(str, "_filter=", 8) == 0)
428762e1207SSteven Rostedt 		strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
429762e1207SSteven Rostedt 
430e05a43b7SSteven Rostedt 	stack_tracer_enabled = 1;
431e05a43b7SSteven Rostedt 	last_stack_tracer_enabled = 1;
432f38f1d2aSSteven Rostedt 	return 1;
433f38f1d2aSSteven Rostedt }
434f38f1d2aSSteven Rostedt __setup("stacktrace", enable_stacktrace);
435f38f1d2aSSteven Rostedt 
436e5a81b62SSteven Rostedt static __init int stack_trace_init(void)
437e5a81b62SSteven Rostedt {
438e5a81b62SSteven Rostedt 	struct dentry *d_tracer;
439e5a81b62SSteven Rostedt 
440e5a81b62SSteven Rostedt 	d_tracer = tracing_init_dentry();
441ed6f1c99SNamhyung Kim 	if (!d_tracer)
442ed6f1c99SNamhyung Kim 		return 0;
443e5a81b62SSteven Rostedt 
4445452af66SFrederic Weisbecker 	trace_create_file("stack_max_size", 0644, d_tracer,
445e5a81b62SSteven Rostedt 			&max_stack_size, &stack_max_size_fops);
446e5a81b62SSteven Rostedt 
4475452af66SFrederic Weisbecker 	trace_create_file("stack_trace", 0444, d_tracer,
448e5a81b62SSteven Rostedt 			NULL, &stack_trace_fops);
449e5a81b62SSteven Rostedt 
450d2d45c7aSSteven Rostedt 	trace_create_file("stack_trace_filter", 0444, d_tracer,
451d2d45c7aSSteven Rostedt 			NULL, &stack_trace_filter_fops);
452d2d45c7aSSteven Rostedt 
453762e1207SSteven Rostedt 	if (stack_trace_filter_buf[0])
454762e1207SSteven Rostedt 		ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
455762e1207SSteven Rostedt 
456e05a43b7SSteven Rostedt 	if (stack_tracer_enabled)
457e5a81b62SSteven Rostedt 		register_ftrace_function(&trace_ops);
458e5a81b62SSteven Rostedt 
459e5a81b62SSteven Rostedt 	return 0;
460e5a81b62SSteven Rostedt }
461e5a81b62SSteven Rostedt 
462e5a81b62SSteven Rostedt device_initcall(stack_trace_init);
463