xref: /linux-6.15/kernel/trace/trace_stack.c (revision 38628078)
1e5a81b62SSteven Rostedt /*
2e5a81b62SSteven Rostedt  * Copyright (C) 2008 Steven Rostedt <[email protected]>
3e5a81b62SSteven Rostedt  *
4e5a81b62SSteven Rostedt  */
5e5a81b62SSteven Rostedt #include <linux/stacktrace.h>
6e5a81b62SSteven Rostedt #include <linux/kallsyms.h>
7e5a81b62SSteven Rostedt #include <linux/seq_file.h>
8e5a81b62SSteven Rostedt #include <linux/spinlock.h>
9e5a81b62SSteven Rostedt #include <linux/uaccess.h>
10e5a81b62SSteven Rostedt #include <linux/debugfs.h>
11e5a81b62SSteven Rostedt #include <linux/ftrace.h>
12e5a81b62SSteven Rostedt #include <linux/module.h>
13f38f1d2aSSteven Rostedt #include <linux/sysctl.h>
14e5a81b62SSteven Rostedt #include <linux/init.h>
15e5a81b62SSteven Rostedt #include <linux/fs.h>
16*38628078SAaron Tomlin #include <linux/magic.h>
17762e1207SSteven Rostedt 
18762e1207SSteven Rostedt #include <asm/setup.h>
19762e1207SSteven Rostedt 
20e5a81b62SSteven Rostedt #include "trace.h"
21e5a81b62SSteven Rostedt 
22e5a81b62SSteven Rostedt #define STACK_TRACE_ENTRIES 500
23e5a81b62SSteven Rostedt 
24d4ecbfc4SSteven Rostedt (Red Hat) #ifdef CC_USING_FENTRY
254df29712SSteven Rostedt (Red Hat) # define fentry		1
26d4ecbfc4SSteven Rostedt (Red Hat) #else
274df29712SSteven Rostedt (Red Hat) # define fentry		0
28d4ecbfc4SSteven Rostedt (Red Hat) #endif
29d4ecbfc4SSteven Rostedt (Red Hat) 
301b6cced6SSteven Rostedt static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
311b6cced6SSteven Rostedt 	 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
321b6cced6SSteven Rostedt static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
331b6cced6SSteven Rostedt 
344df29712SSteven Rostedt (Red Hat) /*
354df29712SSteven Rostedt (Red Hat)  * Reserve one entry for the passed in ip. This will allow
364df29712SSteven Rostedt (Red Hat)  * us to remove most or all of the stack size overhead
374df29712SSteven Rostedt (Red Hat)  * added by the stack tracer itself.
384df29712SSteven Rostedt (Red Hat)  */
39e5a81b62SSteven Rostedt static struct stack_trace max_stack_trace = {
404df29712SSteven Rostedt (Red Hat) 	.max_entries		= STACK_TRACE_ENTRIES - 1,
414df29712SSteven Rostedt (Red Hat) 	.entries		= &stack_dump_trace[1],
42e5a81b62SSteven Rostedt };
43e5a81b62SSteven Rostedt 
44e5a81b62SSteven Rostedt static unsigned long max_stack_size;
45445c8951SThomas Gleixner static arch_spinlock_t max_stack_lock =
46edc35bd7SThomas Gleixner 	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
47e5a81b62SSteven Rostedt 
48e5a81b62SSteven Rostedt static DEFINE_PER_CPU(int, trace_active);
49f38f1d2aSSteven Rostedt static DEFINE_MUTEX(stack_sysctl_mutex);
50f38f1d2aSSteven Rostedt 
51f38f1d2aSSteven Rostedt int stack_tracer_enabled;
52f38f1d2aSSteven Rostedt static int last_stack_tracer_enabled;
53e5a81b62SSteven Rostedt 
5487889501SSteven Rostedt (Red Hat) static inline void
55d4ecbfc4SSteven Rostedt (Red Hat) check_stack(unsigned long ip, unsigned long *stack)
56e5a81b62SSteven Rostedt {
571b6cced6SSteven Rostedt 	unsigned long this_size, flags;
581b6cced6SSteven Rostedt 	unsigned long *p, *top, *start;
594df29712SSteven Rostedt (Red Hat) 	static int tracer_frame;
604df29712SSteven Rostedt (Red Hat) 	int frame_size = ACCESS_ONCE(tracer_frame);
611b6cced6SSteven Rostedt 	int i;
62e5a81b62SSteven Rostedt 
6387889501SSteven Rostedt (Red Hat) 	this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
64e5a81b62SSteven Rostedt 	this_size = THREAD_SIZE - this_size;
654df29712SSteven Rostedt (Red Hat) 	/* Remove the frame of the tracer */
664df29712SSteven Rostedt (Red Hat) 	this_size -= frame_size;
67e5a81b62SSteven Rostedt 
68e5a81b62SSteven Rostedt 	if (this_size <= max_stack_size)
69e5a81b62SSteven Rostedt 		return;
70e5a81b62SSteven Rostedt 
7181520a1bSSteven Rostedt 	/* we do not handle interrupt stacks yet */
7287889501SSteven Rostedt (Red Hat) 	if (!object_is_on_stack(stack))
7381520a1bSSteven Rostedt 		return;
7481520a1bSSteven Rostedt 
75a5e25883SSteven Rostedt 	local_irq_save(flags);
760199c4e6SThomas Gleixner 	arch_spin_lock(&max_stack_lock);
77e5a81b62SSteven Rostedt 
784df29712SSteven Rostedt (Red Hat) 	/* In case another CPU set the tracer_frame on us */
794df29712SSteven Rostedt (Red Hat) 	if (unlikely(!frame_size))
804df29712SSteven Rostedt (Red Hat) 		this_size -= tracer_frame;
814df29712SSteven Rostedt (Red Hat) 
82e5a81b62SSteven Rostedt 	/* a race could have already updated it */
83e5a81b62SSteven Rostedt 	if (this_size <= max_stack_size)
84e5a81b62SSteven Rostedt 		goto out;
85e5a81b62SSteven Rostedt 
86e5a81b62SSteven Rostedt 	max_stack_size = this_size;
87e5a81b62SSteven Rostedt 
88e5a81b62SSteven Rostedt 	max_stack_trace.nr_entries	= 0;
891b6cced6SSteven Rostedt 	max_stack_trace.skip		= 3;
90e5a81b62SSteven Rostedt 
91e5a81b62SSteven Rostedt 	save_stack_trace(&max_stack_trace);
92e5a81b62SSteven Rostedt 
931b6cced6SSteven Rostedt 	/*
944df29712SSteven Rostedt (Red Hat) 	 * Add the passed in ip from the function tracer.
954df29712SSteven Rostedt (Red Hat) 	 * Searching for this on the stack will skip over
964df29712SSteven Rostedt (Red Hat) 	 * most of the overhead from the stack tracer itself.
97d4ecbfc4SSteven Rostedt (Red Hat) 	 */
98d4ecbfc4SSteven Rostedt (Red Hat) 	stack_dump_trace[0] = ip;
99d4ecbfc4SSteven Rostedt (Red Hat) 	max_stack_trace.nr_entries++;
100d4ecbfc4SSteven Rostedt (Red Hat) 
101d4ecbfc4SSteven Rostedt (Red Hat) 	/*
1021b6cced6SSteven Rostedt 	 * Now find where in the stack these are.
1031b6cced6SSteven Rostedt 	 */
1041b6cced6SSteven Rostedt 	i = 0;
10587889501SSteven Rostedt (Red Hat) 	start = stack;
1061b6cced6SSteven Rostedt 	top = (unsigned long *)
1071b6cced6SSteven Rostedt 		(((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
1081b6cced6SSteven Rostedt 
1091b6cced6SSteven Rostedt 	/*
1101b6cced6SSteven Rostedt 	 * Loop through all the entries. One of the entries may
1111b6cced6SSteven Rostedt 	 * for some reason be missed on the stack, so we may
1121b6cced6SSteven Rostedt 	 * have to account for them. If they are all there, this
1131b6cced6SSteven Rostedt 	 * loop will only happen once. This code only takes place
1141b6cced6SSteven Rostedt 	 * on a new max, so it is far from a fast path.
1151b6cced6SSteven Rostedt 	 */
1161b6cced6SSteven Rostedt 	while (i < max_stack_trace.nr_entries) {
1170a37119dSSteven Rostedt 		int found = 0;
1181b6cced6SSteven Rostedt 
1191b6cced6SSteven Rostedt 		stack_dump_index[i] = this_size;
1201b6cced6SSteven Rostedt 		p = start;
1211b6cced6SSteven Rostedt 
1221b6cced6SSteven Rostedt 		for (; p < top && i < max_stack_trace.nr_entries; p++) {
1231b6cced6SSteven Rostedt 			if (*p == stack_dump_trace[i]) {
1241b6cced6SSteven Rostedt 				this_size = stack_dump_index[i++] =
1251b6cced6SSteven Rostedt 					(top - p) * sizeof(unsigned long);
1260a37119dSSteven Rostedt 				found = 1;
1271b6cced6SSteven Rostedt 				/* Start the search from here */
1281b6cced6SSteven Rostedt 				start = p + 1;
1294df29712SSteven Rostedt (Red Hat) 				/*
1304df29712SSteven Rostedt (Red Hat) 				 * We do not want to show the overhead
1314df29712SSteven Rostedt (Red Hat) 				 * of the stack tracer stack in the
1324df29712SSteven Rostedt (Red Hat) 				 * max stack. If we haven't figured
1334df29712SSteven Rostedt (Red Hat) 				 * out what that is, then figure it out
1344df29712SSteven Rostedt (Red Hat) 				 * now.
1354df29712SSteven Rostedt (Red Hat) 				 */
1364df29712SSteven Rostedt (Red Hat) 				if (unlikely(!tracer_frame) && i == 1) {
1374df29712SSteven Rostedt (Red Hat) 					tracer_frame = (p - stack) *
1384df29712SSteven Rostedt (Red Hat) 						sizeof(unsigned long);
1394df29712SSteven Rostedt (Red Hat) 					max_stack_size -= tracer_frame;
1404df29712SSteven Rostedt (Red Hat) 				}
1411b6cced6SSteven Rostedt 			}
1421b6cced6SSteven Rostedt 		}
1431b6cced6SSteven Rostedt 
1440a37119dSSteven Rostedt 		if (!found)
1451b6cced6SSteven Rostedt 			i++;
1461b6cced6SSteven Rostedt 	}
1471b6cced6SSteven Rostedt 
148*38628078SAaron Tomlin 	BUG_ON(current != &init_task &&
149*38628078SAaron Tomlin 		*(end_of_stack(current)) != STACK_END_MAGIC);
150e5a81b62SSteven Rostedt  out:
1510199c4e6SThomas Gleixner 	arch_spin_unlock(&max_stack_lock);
152a5e25883SSteven Rostedt 	local_irq_restore(flags);
153e5a81b62SSteven Rostedt }
154e5a81b62SSteven Rostedt 
155e5a81b62SSteven Rostedt static void
156a1e2e31dSSteven Rostedt stack_trace_call(unsigned long ip, unsigned long parent_ip,
157a1e2e31dSSteven Rostedt 		 struct ftrace_ops *op, struct pt_regs *pt_regs)
158e5a81b62SSteven Rostedt {
15987889501SSteven Rostedt (Red Hat) 	unsigned long stack;
1605168ae50SSteven Rostedt 	int cpu;
161e5a81b62SSteven Rostedt 
1625168ae50SSteven Rostedt 	preempt_disable_notrace();
163e5a81b62SSteven Rostedt 
164e5a81b62SSteven Rostedt 	cpu = raw_smp_processor_id();
165e5a81b62SSteven Rostedt 	/* no atomic needed, we only modify this variable by this cpu */
166e5a81b62SSteven Rostedt 	if (per_cpu(trace_active, cpu)++ != 0)
167e5a81b62SSteven Rostedt 		goto out;
168e5a81b62SSteven Rostedt 
1694df29712SSteven Rostedt (Red Hat) 	/*
1704df29712SSteven Rostedt (Red Hat) 	 * When fentry is used, the traced function does not get
1714df29712SSteven Rostedt (Red Hat) 	 * its stack frame set up, and we lose the parent.
1724df29712SSteven Rostedt (Red Hat) 	 * The ip is pretty useless because the function tracer
1734df29712SSteven Rostedt (Red Hat) 	 * was called before that function set up its stack frame.
1744df29712SSteven Rostedt (Red Hat) 	 * In this case, we use the parent ip.
1754df29712SSteven Rostedt (Red Hat) 	 *
1764df29712SSteven Rostedt (Red Hat) 	 * By adding the return address of either the parent ip
1774df29712SSteven Rostedt (Red Hat) 	 * or the current ip we can disregard most of the stack usage
1784df29712SSteven Rostedt (Red Hat) 	 * caused by the stack tracer itself.
1794df29712SSteven Rostedt (Red Hat) 	 *
1804df29712SSteven Rostedt (Red Hat) 	 * The function tracer always reports the address of where the
1814df29712SSteven Rostedt (Red Hat) 	 * mcount call was, but the stack will hold the return address.
1824df29712SSteven Rostedt (Red Hat) 	 */
1834df29712SSteven Rostedt (Red Hat) 	if (fentry)
1844df29712SSteven Rostedt (Red Hat) 		ip = parent_ip;
1854df29712SSteven Rostedt (Red Hat) 	else
1864df29712SSteven Rostedt (Red Hat) 		ip += MCOUNT_INSN_SIZE;
1874df29712SSteven Rostedt (Red Hat) 
1884df29712SSteven Rostedt (Red Hat) 	check_stack(ip, &stack);
189e5a81b62SSteven Rostedt 
190e5a81b62SSteven Rostedt  out:
191e5a81b62SSteven Rostedt 	per_cpu(trace_active, cpu)--;
192e5a81b62SSteven Rostedt 	/* prevent recursion in schedule */
1935168ae50SSteven Rostedt 	preempt_enable_notrace();
194e5a81b62SSteven Rostedt }
195e5a81b62SSteven Rostedt 
196e5a81b62SSteven Rostedt static struct ftrace_ops trace_ops __read_mostly =
197e5a81b62SSteven Rostedt {
198e5a81b62SSteven Rostedt 	.func = stack_trace_call,
1994740974aSSteven Rostedt 	.flags = FTRACE_OPS_FL_RECURSION_SAFE,
200e5a81b62SSteven Rostedt };
201e5a81b62SSteven Rostedt 
202e5a81b62SSteven Rostedt static ssize_t
203e5a81b62SSteven Rostedt stack_max_size_read(struct file *filp, char __user *ubuf,
204e5a81b62SSteven Rostedt 		    size_t count, loff_t *ppos)
205e5a81b62SSteven Rostedt {
206e5a81b62SSteven Rostedt 	unsigned long *ptr = filp->private_data;
207e5a81b62SSteven Rostedt 	char buf[64];
208e5a81b62SSteven Rostedt 	int r;
209e5a81b62SSteven Rostedt 
210e5a81b62SSteven Rostedt 	r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
211e5a81b62SSteven Rostedt 	if (r > sizeof(buf))
212e5a81b62SSteven Rostedt 		r = sizeof(buf);
213e5a81b62SSteven Rostedt 	return simple_read_from_buffer(ubuf, count, ppos, buf, r);
214e5a81b62SSteven Rostedt }
215e5a81b62SSteven Rostedt 
216e5a81b62SSteven Rostedt static ssize_t
217e5a81b62SSteven Rostedt stack_max_size_write(struct file *filp, const char __user *ubuf,
218e5a81b62SSteven Rostedt 		     size_t count, loff_t *ppos)
219e5a81b62SSteven Rostedt {
220e5a81b62SSteven Rostedt 	long *ptr = filp->private_data;
221e5a81b62SSteven Rostedt 	unsigned long val, flags;
222e5a81b62SSteven Rostedt 	int ret;
2234f48f8b7SLai Jiangshan 	int cpu;
224e5a81b62SSteven Rostedt 
22522fe9b54SPeter Huewe 	ret = kstrtoul_from_user(ubuf, count, 10, &val);
22622fe9b54SPeter Huewe 	if (ret)
227e5a81b62SSteven Rostedt 		return ret;
228e5a81b62SSteven Rostedt 
229a5e25883SSteven Rostedt 	local_irq_save(flags);
2304f48f8b7SLai Jiangshan 
2314f48f8b7SLai Jiangshan 	/*
2324f48f8b7SLai Jiangshan 	 * In case we trace inside arch_spin_lock() or after (NMI),
2334f48f8b7SLai Jiangshan 	 * we will cause circular lock, so we also need to increase
2344f48f8b7SLai Jiangshan 	 * the percpu trace_active here.
2354f48f8b7SLai Jiangshan 	 */
2364f48f8b7SLai Jiangshan 	cpu = smp_processor_id();
2374f48f8b7SLai Jiangshan 	per_cpu(trace_active, cpu)++;
2384f48f8b7SLai Jiangshan 
2390199c4e6SThomas Gleixner 	arch_spin_lock(&max_stack_lock);
240e5a81b62SSteven Rostedt 	*ptr = val;
2410199c4e6SThomas Gleixner 	arch_spin_unlock(&max_stack_lock);
2424f48f8b7SLai Jiangshan 
2434f48f8b7SLai Jiangshan 	per_cpu(trace_active, cpu)--;
244a5e25883SSteven Rostedt 	local_irq_restore(flags);
245e5a81b62SSteven Rostedt 
246e5a81b62SSteven Rostedt 	return count;
247e5a81b62SSteven Rostedt }
248e5a81b62SSteven Rostedt 
249f38f1d2aSSteven Rostedt static const struct file_operations stack_max_size_fops = {
250e5a81b62SSteven Rostedt 	.open		= tracing_open_generic,
251e5a81b62SSteven Rostedt 	.read		= stack_max_size_read,
252e5a81b62SSteven Rostedt 	.write		= stack_max_size_write,
2536038f373SArnd Bergmann 	.llseek		= default_llseek,
254e5a81b62SSteven Rostedt };
255e5a81b62SSteven Rostedt 
256e5a81b62SSteven Rostedt static void *
2572fc5f0cfSLi Zefan __next(struct seq_file *m, loff_t *pos)
258e5a81b62SSteven Rostedt {
2592fc5f0cfSLi Zefan 	long n = *pos - 1;
260e5a81b62SSteven Rostedt 
2612fc5f0cfSLi Zefan 	if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
262e5a81b62SSteven Rostedt 		return NULL;
263e5a81b62SSteven Rostedt 
2642fc5f0cfSLi Zefan 	m->private = (void *)n;
2651b6cced6SSteven Rostedt 	return &m->private;
266e5a81b62SSteven Rostedt }
267e5a81b62SSteven Rostedt 
2682fc5f0cfSLi Zefan static void *
2692fc5f0cfSLi Zefan t_next(struct seq_file *m, void *v, loff_t *pos)
2702fc5f0cfSLi Zefan {
2712fc5f0cfSLi Zefan 	(*pos)++;
2722fc5f0cfSLi Zefan 	return __next(m, pos);
2732fc5f0cfSLi Zefan }
2742fc5f0cfSLi Zefan 
275e5a81b62SSteven Rostedt static void *t_start(struct seq_file *m, loff_t *pos)
276e5a81b62SSteven Rostedt {
2774f48f8b7SLai Jiangshan 	int cpu;
2784f48f8b7SLai Jiangshan 
279e5a81b62SSteven Rostedt 	local_irq_disable();
2804f48f8b7SLai Jiangshan 
2814f48f8b7SLai Jiangshan 	cpu = smp_processor_id();
2824f48f8b7SLai Jiangshan 	per_cpu(trace_active, cpu)++;
2834f48f8b7SLai Jiangshan 
2840199c4e6SThomas Gleixner 	arch_spin_lock(&max_stack_lock);
285e5a81b62SSteven Rostedt 
286522a110bSLiming Wang 	if (*pos == 0)
287522a110bSLiming Wang 		return SEQ_START_TOKEN;
288522a110bSLiming Wang 
2892fc5f0cfSLi Zefan 	return __next(m, pos);
290e5a81b62SSteven Rostedt }
291e5a81b62SSteven Rostedt 
292e5a81b62SSteven Rostedt static void t_stop(struct seq_file *m, void *p)
293e5a81b62SSteven Rostedt {
2944f48f8b7SLai Jiangshan 	int cpu;
2954f48f8b7SLai Jiangshan 
2960199c4e6SThomas Gleixner 	arch_spin_unlock(&max_stack_lock);
2974f48f8b7SLai Jiangshan 
2984f48f8b7SLai Jiangshan 	cpu = smp_processor_id();
2994f48f8b7SLai Jiangshan 	per_cpu(trace_active, cpu)--;
3004f48f8b7SLai Jiangshan 
301e5a81b62SSteven Rostedt 	local_irq_enable();
302e5a81b62SSteven Rostedt }
303e5a81b62SSteven Rostedt 
3041b6cced6SSteven Rostedt static int trace_lookup_stack(struct seq_file *m, long i)
305e5a81b62SSteven Rostedt {
3061b6cced6SSteven Rostedt 	unsigned long addr = stack_dump_trace[i];
307e5a81b62SSteven Rostedt 
308151772dbSAnton Blanchard 	return seq_printf(m, "%pS\n", (void *)addr);
309e5a81b62SSteven Rostedt }
310e5a81b62SSteven Rostedt 
311e447e1dfSSteven Rostedt static void print_disabled(struct seq_file *m)
312e447e1dfSSteven Rostedt {
313e447e1dfSSteven Rostedt 	seq_puts(m, "#\n"
314e447e1dfSSteven Rostedt 		 "#  Stack tracer disabled\n"
315e447e1dfSSteven Rostedt 		 "#\n"
316e447e1dfSSteven Rostedt 		 "# To enable the stack tracer, either add 'stacktrace' to the\n"
317e447e1dfSSteven Rostedt 		 "# kernel command line\n"
318e447e1dfSSteven Rostedt 		 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
319e447e1dfSSteven Rostedt 		 "#\n");
320e447e1dfSSteven Rostedt }
321e447e1dfSSteven Rostedt 
322e5a81b62SSteven Rostedt static int t_show(struct seq_file *m, void *v)
323e5a81b62SSteven Rostedt {
324522a110bSLiming Wang 	long i;
3251b6cced6SSteven Rostedt 	int size;
326e5a81b62SSteven Rostedt 
327522a110bSLiming Wang 	if (v == SEQ_START_TOKEN) {
3281b6cced6SSteven Rostedt 		seq_printf(m, "        Depth    Size   Location"
3291b6cced6SSteven Rostedt 			   "    (%d entries)\n"
3301b6cced6SSteven Rostedt 			   "        -----    ----   --------\n",
331083a63b4Swalimis 			   max_stack_trace.nr_entries - 1);
332e447e1dfSSteven Rostedt 
333e447e1dfSSteven Rostedt 		if (!stack_tracer_enabled && !max_stack_size)
334e447e1dfSSteven Rostedt 			print_disabled(m);
335e447e1dfSSteven Rostedt 
3361b6cced6SSteven Rostedt 		return 0;
3371b6cced6SSteven Rostedt 	}
3381b6cced6SSteven Rostedt 
339522a110bSLiming Wang 	i = *(long *)v;
340522a110bSLiming Wang 
3411b6cced6SSteven Rostedt 	if (i >= max_stack_trace.nr_entries ||
3421b6cced6SSteven Rostedt 	    stack_dump_trace[i] == ULONG_MAX)
343e5a81b62SSteven Rostedt 		return 0;
344e5a81b62SSteven Rostedt 
3451b6cced6SSteven Rostedt 	if (i+1 == max_stack_trace.nr_entries ||
3461b6cced6SSteven Rostedt 	    stack_dump_trace[i+1] == ULONG_MAX)
3471b6cced6SSteven Rostedt 		size = stack_dump_index[i];
3481b6cced6SSteven Rostedt 	else
3491b6cced6SSteven Rostedt 		size = stack_dump_index[i] - stack_dump_index[i+1];
3501b6cced6SSteven Rostedt 
3511b6cced6SSteven Rostedt 	seq_printf(m, "%3ld) %8d   %5d   ", i, stack_dump_index[i], size);
3521b6cced6SSteven Rostedt 
3531b6cced6SSteven Rostedt 	trace_lookup_stack(m, i);
354e5a81b62SSteven Rostedt 
355e5a81b62SSteven Rostedt 	return 0;
356e5a81b62SSteven Rostedt }
357e5a81b62SSteven Rostedt 
358f38f1d2aSSteven Rostedt static const struct seq_operations stack_trace_seq_ops = {
359e5a81b62SSteven Rostedt 	.start		= t_start,
360e5a81b62SSteven Rostedt 	.next		= t_next,
361e5a81b62SSteven Rostedt 	.stop		= t_stop,
362e5a81b62SSteven Rostedt 	.show		= t_show,
363e5a81b62SSteven Rostedt };
364e5a81b62SSteven Rostedt 
365e5a81b62SSteven Rostedt static int stack_trace_open(struct inode *inode, struct file *file)
366e5a81b62SSteven Rostedt {
367d8cc1ab7SLi Zefan 	return seq_open(file, &stack_trace_seq_ops);
368e5a81b62SSteven Rostedt }
369e5a81b62SSteven Rostedt 
370f38f1d2aSSteven Rostedt static const struct file_operations stack_trace_fops = {
371e5a81b62SSteven Rostedt 	.open		= stack_trace_open,
372e5a81b62SSteven Rostedt 	.read		= seq_read,
373e5a81b62SSteven Rostedt 	.llseek		= seq_lseek,
374d8cc1ab7SLi Zefan 	.release	= seq_release,
375e5a81b62SSteven Rostedt };
376e5a81b62SSteven Rostedt 
377d2d45c7aSSteven Rostedt static int
378d2d45c7aSSteven Rostedt stack_trace_filter_open(struct inode *inode, struct file *file)
379d2d45c7aSSteven Rostedt {
380d2d45c7aSSteven Rostedt 	return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
381d2d45c7aSSteven Rostedt 				 inode, file);
382d2d45c7aSSteven Rostedt }
383d2d45c7aSSteven Rostedt 
384d2d45c7aSSteven Rostedt static const struct file_operations stack_trace_filter_fops = {
385d2d45c7aSSteven Rostedt 	.open = stack_trace_filter_open,
386d2d45c7aSSteven Rostedt 	.read = seq_read,
387d2d45c7aSSteven Rostedt 	.write = ftrace_filter_write,
388098c879eSSteven Rostedt (Red Hat) 	.llseek = tracing_lseek,
389d2d45c7aSSteven Rostedt 	.release = ftrace_regex_release,
390d2d45c7aSSteven Rostedt };
391d2d45c7aSSteven Rostedt 
392f38f1d2aSSteven Rostedt int
393f38f1d2aSSteven Rostedt stack_trace_sysctl(struct ctl_table *table, int write,
3948d65af78SAlexey Dobriyan 		   void __user *buffer, size_t *lenp,
395f38f1d2aSSteven Rostedt 		   loff_t *ppos)
396f38f1d2aSSteven Rostedt {
397f38f1d2aSSteven Rostedt 	int ret;
398f38f1d2aSSteven Rostedt 
399f38f1d2aSSteven Rostedt 	mutex_lock(&stack_sysctl_mutex);
400f38f1d2aSSteven Rostedt 
4018d65af78SAlexey Dobriyan 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
402f38f1d2aSSteven Rostedt 
403f38f1d2aSSteven Rostedt 	if (ret || !write ||
404a32c7765SLi Zefan 	    (last_stack_tracer_enabled == !!stack_tracer_enabled))
405f38f1d2aSSteven Rostedt 		goto out;
406f38f1d2aSSteven Rostedt 
407a32c7765SLi Zefan 	last_stack_tracer_enabled = !!stack_tracer_enabled;
408f38f1d2aSSteven Rostedt 
409f38f1d2aSSteven Rostedt 	if (stack_tracer_enabled)
410f38f1d2aSSteven Rostedt 		register_ftrace_function(&trace_ops);
411f38f1d2aSSteven Rostedt 	else
412f38f1d2aSSteven Rostedt 		unregister_ftrace_function(&trace_ops);
413f38f1d2aSSteven Rostedt 
414f38f1d2aSSteven Rostedt  out:
415f38f1d2aSSteven Rostedt 	mutex_unlock(&stack_sysctl_mutex);
416f38f1d2aSSteven Rostedt 	return ret;
417f38f1d2aSSteven Rostedt }
418f38f1d2aSSteven Rostedt 
419762e1207SSteven Rostedt static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
420762e1207SSteven Rostedt 
421f38f1d2aSSteven Rostedt static __init int enable_stacktrace(char *str)
422f38f1d2aSSteven Rostedt {
423762e1207SSteven Rostedt 	if (strncmp(str, "_filter=", 8) == 0)
424762e1207SSteven Rostedt 		strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
425762e1207SSteven Rostedt 
426e05a43b7SSteven Rostedt 	stack_tracer_enabled = 1;
427e05a43b7SSteven Rostedt 	last_stack_tracer_enabled = 1;
428f38f1d2aSSteven Rostedt 	return 1;
429f38f1d2aSSteven Rostedt }
430f38f1d2aSSteven Rostedt __setup("stacktrace", enable_stacktrace);
431f38f1d2aSSteven Rostedt 
432e5a81b62SSteven Rostedt static __init int stack_trace_init(void)
433e5a81b62SSteven Rostedt {
434e5a81b62SSteven Rostedt 	struct dentry *d_tracer;
435e5a81b62SSteven Rostedt 
436e5a81b62SSteven Rostedt 	d_tracer = tracing_init_dentry();
437ed6f1c99SNamhyung Kim 	if (!d_tracer)
438ed6f1c99SNamhyung Kim 		return 0;
439e5a81b62SSteven Rostedt 
4405452af66SFrederic Weisbecker 	trace_create_file("stack_max_size", 0644, d_tracer,
441e5a81b62SSteven Rostedt 			&max_stack_size, &stack_max_size_fops);
442e5a81b62SSteven Rostedt 
4435452af66SFrederic Weisbecker 	trace_create_file("stack_trace", 0444, d_tracer,
444e5a81b62SSteven Rostedt 			NULL, &stack_trace_fops);
445e5a81b62SSteven Rostedt 
446d2d45c7aSSteven Rostedt 	trace_create_file("stack_trace_filter", 0444, d_tracer,
447d2d45c7aSSteven Rostedt 			NULL, &stack_trace_filter_fops);
448d2d45c7aSSteven Rostedt 
449762e1207SSteven Rostedt 	if (stack_trace_filter_buf[0])
450762e1207SSteven Rostedt 		ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
451762e1207SSteven Rostedt 
452e05a43b7SSteven Rostedt 	if (stack_tracer_enabled)
453e5a81b62SSteven Rostedt 		register_ftrace_function(&trace_ops);
454e5a81b62SSteven Rostedt 
455e5a81b62SSteven Rostedt 	return 0;
456e5a81b62SSteven Rostedt }
457e5a81b62SSteven Rostedt 
458e5a81b62SSteven Rostedt device_initcall(stack_trace_init);
459