xref: /linux-6.15/kernel/trace/trace_stack.c (revision bb99d8cc)
1e5a81b62SSteven Rostedt /*
2e5a81b62SSteven Rostedt  * Copyright (C) 2008 Steven Rostedt <[email protected]>
3e5a81b62SSteven Rostedt  *
4e5a81b62SSteven Rostedt  */
5e5a81b62SSteven Rostedt #include <linux/stacktrace.h>
6e5a81b62SSteven Rostedt #include <linux/kallsyms.h>
7e5a81b62SSteven Rostedt #include <linux/seq_file.h>
8e5a81b62SSteven Rostedt #include <linux/spinlock.h>
9e5a81b62SSteven Rostedt #include <linux/uaccess.h>
10e5a81b62SSteven Rostedt #include <linux/ftrace.h>
11e5a81b62SSteven Rostedt #include <linux/module.h>
12f38f1d2aSSteven Rostedt #include <linux/sysctl.h>
13e5a81b62SSteven Rostedt #include <linux/init.h>
14762e1207SSteven Rostedt 
15762e1207SSteven Rostedt #include <asm/setup.h>
16762e1207SSteven Rostedt 
17e5a81b62SSteven Rostedt #include "trace.h"
18e5a81b62SSteven Rostedt 
191b6cced6SSteven Rostedt static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
201b6cced6SSteven Rostedt 	 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
21*bb99d8ccSAKASHI Takahiro unsigned stack_trace_index[STACK_TRACE_ENTRIES];
221b6cced6SSteven Rostedt 
234df29712SSteven Rostedt (Red Hat) /*
244df29712SSteven Rostedt (Red Hat)  * Reserve one entry for the passed in ip. This will allow
254df29712SSteven Rostedt (Red Hat)  * us to remove most or all of the stack size overhead
264df29712SSteven Rostedt (Red Hat)  * added by the stack tracer itself.
274df29712SSteven Rostedt (Red Hat)  */
28*bb99d8ccSAKASHI Takahiro struct stack_trace stack_trace_max = {
294df29712SSteven Rostedt (Red Hat) 	.max_entries		= STACK_TRACE_ENTRIES - 1,
3072ac426aSSteven Rostedt (Red Hat) 	.entries		= &stack_dump_trace[0],
31e5a81b62SSteven Rostedt };
32e5a81b62SSteven Rostedt 
33*bb99d8ccSAKASHI Takahiro unsigned long stack_trace_max_size;
34*bb99d8ccSAKASHI Takahiro arch_spinlock_t max_stack_lock =
35edc35bd7SThomas Gleixner 	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
36e5a81b62SSteven Rostedt 
37e5a81b62SSteven Rostedt static DEFINE_PER_CPU(int, trace_active);
38f38f1d2aSSteven Rostedt static DEFINE_MUTEX(stack_sysctl_mutex);
39f38f1d2aSSteven Rostedt 
40f38f1d2aSSteven Rostedt int stack_tracer_enabled;
41f38f1d2aSSteven Rostedt static int last_stack_tracer_enabled;
42e5a81b62SSteven Rostedt 
43*bb99d8ccSAKASHI Takahiro void stack_trace_print(void)
44e3172181SMinchan Kim {
45e3172181SMinchan Kim 	long i;
46e3172181SMinchan Kim 	int size;
47e3172181SMinchan Kim 
48e3172181SMinchan Kim 	pr_emerg("        Depth    Size   Location    (%d entries)\n"
49e3172181SMinchan Kim 			   "        -----    ----   --------\n",
50*bb99d8ccSAKASHI Takahiro 			   stack_trace_max.nr_entries);
51e3172181SMinchan Kim 
52*bb99d8ccSAKASHI Takahiro 	for (i = 0; i < stack_trace_max.nr_entries; i++) {
53e3172181SMinchan Kim 		if (stack_dump_trace[i] == ULONG_MAX)
54e3172181SMinchan Kim 			break;
55*bb99d8ccSAKASHI Takahiro 		if (i+1 == stack_trace_max.nr_entries ||
56e3172181SMinchan Kim 				stack_dump_trace[i+1] == ULONG_MAX)
57*bb99d8ccSAKASHI Takahiro 			size = stack_trace_index[i];
58e3172181SMinchan Kim 		else
59*bb99d8ccSAKASHI Takahiro 			size = stack_trace_index[i] - stack_trace_index[i+1];
60e3172181SMinchan Kim 
61*bb99d8ccSAKASHI Takahiro 		pr_emerg("%3ld) %8d   %5d   %pS\n", i, stack_trace_index[i],
62e3172181SMinchan Kim 				size, (void *)stack_dump_trace[i]);
63e3172181SMinchan Kim 	}
64e3172181SMinchan Kim }
65e3172181SMinchan Kim 
66*bb99d8ccSAKASHI Takahiro /*
67*bb99d8ccSAKASHI Takahiro  * When arch-specific code overides this function, the following
68*bb99d8ccSAKASHI Takahiro  * data should be filled up, assuming max_stack_lock is held to
69*bb99d8ccSAKASHI Takahiro  * prevent concurrent updates.
70*bb99d8ccSAKASHI Takahiro  *     stack_trace_index[]
71*bb99d8ccSAKASHI Takahiro  *     stack_trace_max
72*bb99d8ccSAKASHI Takahiro  *     stack_trace_max_size
73*bb99d8ccSAKASHI Takahiro  */
74*bb99d8ccSAKASHI Takahiro void __weak
75d4ecbfc4SSteven Rostedt (Red Hat) check_stack(unsigned long ip, unsigned long *stack)
76e5a81b62SSteven Rostedt {
77e3172181SMinchan Kim 	unsigned long this_size, flags; unsigned long *p, *top, *start;
784df29712SSteven Rostedt (Red Hat) 	static int tracer_frame;
794df29712SSteven Rostedt (Red Hat) 	int frame_size = ACCESS_ONCE(tracer_frame);
8072ac426aSSteven Rostedt (Red Hat) 	int i, x;
81e5a81b62SSteven Rostedt 
8287889501SSteven Rostedt (Red Hat) 	this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
83e5a81b62SSteven Rostedt 	this_size = THREAD_SIZE - this_size;
844df29712SSteven Rostedt (Red Hat) 	/* Remove the frame of the tracer */
854df29712SSteven Rostedt (Red Hat) 	this_size -= frame_size;
86e5a81b62SSteven Rostedt 
87*bb99d8ccSAKASHI Takahiro 	if (this_size <= stack_trace_max_size)
88e5a81b62SSteven Rostedt 		return;
89e5a81b62SSteven Rostedt 
9081520a1bSSteven Rostedt 	/* we do not handle interrupt stacks yet */
9187889501SSteven Rostedt (Red Hat) 	if (!object_is_on_stack(stack))
9281520a1bSSteven Rostedt 		return;
9381520a1bSSteven Rostedt 
94a5e25883SSteven Rostedt 	local_irq_save(flags);
950199c4e6SThomas Gleixner 	arch_spin_lock(&max_stack_lock);
96e5a81b62SSteven Rostedt 
974df29712SSteven Rostedt (Red Hat) 	/* In case another CPU set the tracer_frame on us */
984df29712SSteven Rostedt (Red Hat) 	if (unlikely(!frame_size))
994df29712SSteven Rostedt (Red Hat) 		this_size -= tracer_frame;
1004df29712SSteven Rostedt (Red Hat) 
101e5a81b62SSteven Rostedt 	/* a race could have already updated it */
102*bb99d8ccSAKASHI Takahiro 	if (this_size <= stack_trace_max_size)
103e5a81b62SSteven Rostedt 		goto out;
104e5a81b62SSteven Rostedt 
105*bb99d8ccSAKASHI Takahiro 	stack_trace_max_size = this_size;
106e5a81b62SSteven Rostedt 
107*bb99d8ccSAKASHI Takahiro 	stack_trace_max.nr_entries = 0;
108*bb99d8ccSAKASHI Takahiro 	stack_trace_max.skip = 3;
109e5a81b62SSteven Rostedt 
110*bb99d8ccSAKASHI Takahiro 	save_stack_trace(&stack_trace_max);
111e5a81b62SSteven Rostedt 
11272ac426aSSteven Rostedt (Red Hat) 	/* Skip over the overhead of the stack tracer itself */
113*bb99d8ccSAKASHI Takahiro 	for (i = 0; i < stack_trace_max.nr_entries; i++) {
11472ac426aSSteven Rostedt (Red Hat) 		if (stack_dump_trace[i] == ip)
11572ac426aSSteven Rostedt (Red Hat) 			break;
11672ac426aSSteven Rostedt (Red Hat) 	}
117d4ecbfc4SSteven Rostedt (Red Hat) 
118d4ecbfc4SSteven Rostedt (Red Hat) 	/*
1191b6cced6SSteven Rostedt 	 * Now find where in the stack these are.
1201b6cced6SSteven Rostedt 	 */
12172ac426aSSteven Rostedt (Red Hat) 	x = 0;
12287889501SSteven Rostedt (Red Hat) 	start = stack;
1231b6cced6SSteven Rostedt 	top = (unsigned long *)
1241b6cced6SSteven Rostedt 		(((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
1251b6cced6SSteven Rostedt 
1261b6cced6SSteven Rostedt 	/*
1271b6cced6SSteven Rostedt 	 * Loop through all the entries. One of the entries may
1281b6cced6SSteven Rostedt 	 * for some reason be missed on the stack, so we may
1291b6cced6SSteven Rostedt 	 * have to account for them. If they are all there, this
1301b6cced6SSteven Rostedt 	 * loop will only happen once. This code only takes place
1311b6cced6SSteven Rostedt 	 * on a new max, so it is far from a fast path.
1321b6cced6SSteven Rostedt 	 */
133*bb99d8ccSAKASHI Takahiro 	while (i < stack_trace_max.nr_entries) {
1340a37119dSSteven Rostedt 		int found = 0;
1351b6cced6SSteven Rostedt 
136*bb99d8ccSAKASHI Takahiro 		stack_trace_index[x] = this_size;
1371b6cced6SSteven Rostedt 		p = start;
1381b6cced6SSteven Rostedt 
139*bb99d8ccSAKASHI Takahiro 		for (; p < top && i < stack_trace_max.nr_entries; p++) {
14072ac426aSSteven Rostedt (Red Hat) 			if (stack_dump_trace[i] == ULONG_MAX)
14172ac426aSSteven Rostedt (Red Hat) 				break;
1421b6cced6SSteven Rostedt 			if (*p == stack_dump_trace[i]) {
14372ac426aSSteven Rostedt (Red Hat) 				stack_dump_trace[x] = stack_dump_trace[i++];
144*bb99d8ccSAKASHI Takahiro 				this_size = stack_trace_index[x++] =
1451b6cced6SSteven Rostedt 					(top - p) * sizeof(unsigned long);
1460a37119dSSteven Rostedt 				found = 1;
1471b6cced6SSteven Rostedt 				/* Start the search from here */
1481b6cced6SSteven Rostedt 				start = p + 1;
1494df29712SSteven Rostedt (Red Hat) 				/*
1504df29712SSteven Rostedt (Red Hat) 				 * We do not want to show the overhead
1514df29712SSteven Rostedt (Red Hat) 				 * of the stack tracer stack in the
1524df29712SSteven Rostedt (Red Hat) 				 * max stack. If we haven't figured
1534df29712SSteven Rostedt (Red Hat) 				 * out what that is, then figure it out
1544df29712SSteven Rostedt (Red Hat) 				 * now.
1554df29712SSteven Rostedt (Red Hat) 				 */
15672ac426aSSteven Rostedt (Red Hat) 				if (unlikely(!tracer_frame)) {
1574df29712SSteven Rostedt (Red Hat) 					tracer_frame = (p - stack) *
1584df29712SSteven Rostedt (Red Hat) 						sizeof(unsigned long);
159*bb99d8ccSAKASHI Takahiro 					stack_trace_max_size -= tracer_frame;
1604df29712SSteven Rostedt (Red Hat) 				}
1611b6cced6SSteven Rostedt 			}
1621b6cced6SSteven Rostedt 		}
1631b6cced6SSteven Rostedt 
1640a37119dSSteven Rostedt 		if (!found)
1651b6cced6SSteven Rostedt 			i++;
1661b6cced6SSteven Rostedt 	}
1671b6cced6SSteven Rostedt 
168*bb99d8ccSAKASHI Takahiro 	stack_trace_max.nr_entries = x;
16972ac426aSSteven Rostedt (Red Hat) 	for (; x < i; x++)
17072ac426aSSteven Rostedt (Red Hat) 		stack_dump_trace[x] = ULONG_MAX;
17172ac426aSSteven Rostedt (Red Hat) 
172a70857e4SAaron Tomlin 	if (task_stack_end_corrupted(current)) {
173*bb99d8ccSAKASHI Takahiro 		stack_trace_print();
174e3172181SMinchan Kim 		BUG();
175e3172181SMinchan Kim 	}
176e3172181SMinchan Kim 
177e5a81b62SSteven Rostedt  out:
1780199c4e6SThomas Gleixner 	arch_spin_unlock(&max_stack_lock);
179a5e25883SSteven Rostedt 	local_irq_restore(flags);
180e5a81b62SSteven Rostedt }
181e5a81b62SSteven Rostedt 
182e5a81b62SSteven Rostedt static void
183a1e2e31dSSteven Rostedt stack_trace_call(unsigned long ip, unsigned long parent_ip,
184a1e2e31dSSteven Rostedt 		 struct ftrace_ops *op, struct pt_regs *pt_regs)
185e5a81b62SSteven Rostedt {
18687889501SSteven Rostedt (Red Hat) 	unsigned long stack;
1875168ae50SSteven Rostedt 	int cpu;
188e5a81b62SSteven Rostedt 
1895168ae50SSteven Rostedt 	preempt_disable_notrace();
190e5a81b62SSteven Rostedt 
191e5a81b62SSteven Rostedt 	cpu = raw_smp_processor_id();
192e5a81b62SSteven Rostedt 	/* no atomic needed, we only modify this variable by this cpu */
193e5a81b62SSteven Rostedt 	if (per_cpu(trace_active, cpu)++ != 0)
194e5a81b62SSteven Rostedt 		goto out;
195e5a81b62SSteven Rostedt 
1964df29712SSteven Rostedt (Red Hat) 	ip += MCOUNT_INSN_SIZE;
1974df29712SSteven Rostedt (Red Hat) 
1984df29712SSteven Rostedt (Red Hat) 	check_stack(ip, &stack);
199e5a81b62SSteven Rostedt 
200e5a81b62SSteven Rostedt  out:
201e5a81b62SSteven Rostedt 	per_cpu(trace_active, cpu)--;
202e5a81b62SSteven Rostedt 	/* prevent recursion in schedule */
2035168ae50SSteven Rostedt 	preempt_enable_notrace();
204e5a81b62SSteven Rostedt }
205e5a81b62SSteven Rostedt 
206e5a81b62SSteven Rostedt static struct ftrace_ops trace_ops __read_mostly =
207e5a81b62SSteven Rostedt {
208e5a81b62SSteven Rostedt 	.func = stack_trace_call,
2094740974aSSteven Rostedt 	.flags = FTRACE_OPS_FL_RECURSION_SAFE,
210e5a81b62SSteven Rostedt };
211e5a81b62SSteven Rostedt 
212e5a81b62SSteven Rostedt static ssize_t
213e5a81b62SSteven Rostedt stack_max_size_read(struct file *filp, char __user *ubuf,
214e5a81b62SSteven Rostedt 		    size_t count, loff_t *ppos)
215e5a81b62SSteven Rostedt {
216e5a81b62SSteven Rostedt 	unsigned long *ptr = filp->private_data;
217e5a81b62SSteven Rostedt 	char buf[64];
218e5a81b62SSteven Rostedt 	int r;
219e5a81b62SSteven Rostedt 
220e5a81b62SSteven Rostedt 	r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
221e5a81b62SSteven Rostedt 	if (r > sizeof(buf))
222e5a81b62SSteven Rostedt 		r = sizeof(buf);
223e5a81b62SSteven Rostedt 	return simple_read_from_buffer(ubuf, count, ppos, buf, r);
224e5a81b62SSteven Rostedt }
225e5a81b62SSteven Rostedt 
226e5a81b62SSteven Rostedt static ssize_t
227e5a81b62SSteven Rostedt stack_max_size_write(struct file *filp, const char __user *ubuf,
228e5a81b62SSteven Rostedt 		     size_t count, loff_t *ppos)
229e5a81b62SSteven Rostedt {
230e5a81b62SSteven Rostedt 	long *ptr = filp->private_data;
231e5a81b62SSteven Rostedt 	unsigned long val, flags;
232e5a81b62SSteven Rostedt 	int ret;
2334f48f8b7SLai Jiangshan 	int cpu;
234e5a81b62SSteven Rostedt 
23522fe9b54SPeter Huewe 	ret = kstrtoul_from_user(ubuf, count, 10, &val);
23622fe9b54SPeter Huewe 	if (ret)
237e5a81b62SSteven Rostedt 		return ret;
238e5a81b62SSteven Rostedt 
239a5e25883SSteven Rostedt 	local_irq_save(flags);
2404f48f8b7SLai Jiangshan 
2414f48f8b7SLai Jiangshan 	/*
2424f48f8b7SLai Jiangshan 	 * In case we trace inside arch_spin_lock() or after (NMI),
2434f48f8b7SLai Jiangshan 	 * we will cause circular lock, so we also need to increase
2444f48f8b7SLai Jiangshan 	 * the percpu trace_active here.
2454f48f8b7SLai Jiangshan 	 */
2464f48f8b7SLai Jiangshan 	cpu = smp_processor_id();
2474f48f8b7SLai Jiangshan 	per_cpu(trace_active, cpu)++;
2484f48f8b7SLai Jiangshan 
2490199c4e6SThomas Gleixner 	arch_spin_lock(&max_stack_lock);
250e5a81b62SSteven Rostedt 	*ptr = val;
2510199c4e6SThomas Gleixner 	arch_spin_unlock(&max_stack_lock);
2524f48f8b7SLai Jiangshan 
2534f48f8b7SLai Jiangshan 	per_cpu(trace_active, cpu)--;
254a5e25883SSteven Rostedt 	local_irq_restore(flags);
255e5a81b62SSteven Rostedt 
256e5a81b62SSteven Rostedt 	return count;
257e5a81b62SSteven Rostedt }
258e5a81b62SSteven Rostedt 
259f38f1d2aSSteven Rostedt static const struct file_operations stack_max_size_fops = {
260e5a81b62SSteven Rostedt 	.open		= tracing_open_generic,
261e5a81b62SSteven Rostedt 	.read		= stack_max_size_read,
262e5a81b62SSteven Rostedt 	.write		= stack_max_size_write,
2636038f373SArnd Bergmann 	.llseek		= default_llseek,
264e5a81b62SSteven Rostedt };
265e5a81b62SSteven Rostedt 
266e5a81b62SSteven Rostedt static void *
2672fc5f0cfSLi Zefan __next(struct seq_file *m, loff_t *pos)
268e5a81b62SSteven Rostedt {
2692fc5f0cfSLi Zefan 	long n = *pos - 1;
270e5a81b62SSteven Rostedt 
271*bb99d8ccSAKASHI Takahiro 	if (n > stack_trace_max.nr_entries || stack_dump_trace[n] == ULONG_MAX)
272e5a81b62SSteven Rostedt 		return NULL;
273e5a81b62SSteven Rostedt 
2742fc5f0cfSLi Zefan 	m->private = (void *)n;
2751b6cced6SSteven Rostedt 	return &m->private;
276e5a81b62SSteven Rostedt }
277e5a81b62SSteven Rostedt 
2782fc5f0cfSLi Zefan static void *
2792fc5f0cfSLi Zefan t_next(struct seq_file *m, void *v, loff_t *pos)
2802fc5f0cfSLi Zefan {
2812fc5f0cfSLi Zefan 	(*pos)++;
2822fc5f0cfSLi Zefan 	return __next(m, pos);
2832fc5f0cfSLi Zefan }
2842fc5f0cfSLi Zefan 
285e5a81b62SSteven Rostedt static void *t_start(struct seq_file *m, loff_t *pos)
286e5a81b62SSteven Rostedt {
2874f48f8b7SLai Jiangshan 	int cpu;
2884f48f8b7SLai Jiangshan 
289e5a81b62SSteven Rostedt 	local_irq_disable();
2904f48f8b7SLai Jiangshan 
2914f48f8b7SLai Jiangshan 	cpu = smp_processor_id();
2924f48f8b7SLai Jiangshan 	per_cpu(trace_active, cpu)++;
2934f48f8b7SLai Jiangshan 
2940199c4e6SThomas Gleixner 	arch_spin_lock(&max_stack_lock);
295e5a81b62SSteven Rostedt 
296522a110bSLiming Wang 	if (*pos == 0)
297522a110bSLiming Wang 		return SEQ_START_TOKEN;
298522a110bSLiming Wang 
2992fc5f0cfSLi Zefan 	return __next(m, pos);
300e5a81b62SSteven Rostedt }
301e5a81b62SSteven Rostedt 
302e5a81b62SSteven Rostedt static void t_stop(struct seq_file *m, void *p)
303e5a81b62SSteven Rostedt {
3044f48f8b7SLai Jiangshan 	int cpu;
3054f48f8b7SLai Jiangshan 
3060199c4e6SThomas Gleixner 	arch_spin_unlock(&max_stack_lock);
3074f48f8b7SLai Jiangshan 
3084f48f8b7SLai Jiangshan 	cpu = smp_processor_id();
3094f48f8b7SLai Jiangshan 	per_cpu(trace_active, cpu)--;
3104f48f8b7SLai Jiangshan 
311e5a81b62SSteven Rostedt 	local_irq_enable();
312e5a81b62SSteven Rostedt }
313e5a81b62SSteven Rostedt 
314962e3707SJoe Perches static void trace_lookup_stack(struct seq_file *m, long i)
315e5a81b62SSteven Rostedt {
3161b6cced6SSteven Rostedt 	unsigned long addr = stack_dump_trace[i];
317e5a81b62SSteven Rostedt 
318962e3707SJoe Perches 	seq_printf(m, "%pS\n", (void *)addr);
319e5a81b62SSteven Rostedt }
320e5a81b62SSteven Rostedt 
321e447e1dfSSteven Rostedt static void print_disabled(struct seq_file *m)
322e447e1dfSSteven Rostedt {
323e447e1dfSSteven Rostedt 	seq_puts(m, "#\n"
324e447e1dfSSteven Rostedt 		 "#  Stack tracer disabled\n"
325e447e1dfSSteven Rostedt 		 "#\n"
326e447e1dfSSteven Rostedt 		 "# To enable the stack tracer, either add 'stacktrace' to the\n"
327e447e1dfSSteven Rostedt 		 "# kernel command line\n"
328e447e1dfSSteven Rostedt 		 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
329e447e1dfSSteven Rostedt 		 "#\n");
330e447e1dfSSteven Rostedt }
331e447e1dfSSteven Rostedt 
332e5a81b62SSteven Rostedt static int t_show(struct seq_file *m, void *v)
333e5a81b62SSteven Rostedt {
334522a110bSLiming Wang 	long i;
3351b6cced6SSteven Rostedt 	int size;
336e5a81b62SSteven Rostedt 
337522a110bSLiming Wang 	if (v == SEQ_START_TOKEN) {
3381b6cced6SSteven Rostedt 		seq_printf(m, "        Depth    Size   Location"
3391b6cced6SSteven Rostedt 			   "    (%d entries)\n"
3401b6cced6SSteven Rostedt 			   "        -----    ----   --------\n",
341*bb99d8ccSAKASHI Takahiro 			   stack_trace_max.nr_entries);
342e447e1dfSSteven Rostedt 
343*bb99d8ccSAKASHI Takahiro 		if (!stack_tracer_enabled && !stack_trace_max_size)
344e447e1dfSSteven Rostedt 			print_disabled(m);
345e447e1dfSSteven Rostedt 
3461b6cced6SSteven Rostedt 		return 0;
3471b6cced6SSteven Rostedt 	}
3481b6cced6SSteven Rostedt 
349522a110bSLiming Wang 	i = *(long *)v;
350522a110bSLiming Wang 
351*bb99d8ccSAKASHI Takahiro 	if (i >= stack_trace_max.nr_entries ||
3521b6cced6SSteven Rostedt 	    stack_dump_trace[i] == ULONG_MAX)
353e5a81b62SSteven Rostedt 		return 0;
354e5a81b62SSteven Rostedt 
355*bb99d8ccSAKASHI Takahiro 	if (i+1 == stack_trace_max.nr_entries ||
3561b6cced6SSteven Rostedt 	    stack_dump_trace[i+1] == ULONG_MAX)
357*bb99d8ccSAKASHI Takahiro 		size = stack_trace_index[i];
3581b6cced6SSteven Rostedt 	else
359*bb99d8ccSAKASHI Takahiro 		size = stack_trace_index[i] - stack_trace_index[i+1];
3601b6cced6SSteven Rostedt 
361*bb99d8ccSAKASHI Takahiro 	seq_printf(m, "%3ld) %8d   %5d   ", i, stack_trace_index[i], size);
3621b6cced6SSteven Rostedt 
3631b6cced6SSteven Rostedt 	trace_lookup_stack(m, i);
364e5a81b62SSteven Rostedt 
365e5a81b62SSteven Rostedt 	return 0;
366e5a81b62SSteven Rostedt }
367e5a81b62SSteven Rostedt 
368f38f1d2aSSteven Rostedt static const struct seq_operations stack_trace_seq_ops = {
369e5a81b62SSteven Rostedt 	.start		= t_start,
370e5a81b62SSteven Rostedt 	.next		= t_next,
371e5a81b62SSteven Rostedt 	.stop		= t_stop,
372e5a81b62SSteven Rostedt 	.show		= t_show,
373e5a81b62SSteven Rostedt };
374e5a81b62SSteven Rostedt 
375e5a81b62SSteven Rostedt static int stack_trace_open(struct inode *inode, struct file *file)
376e5a81b62SSteven Rostedt {
377d8cc1ab7SLi Zefan 	return seq_open(file, &stack_trace_seq_ops);
378e5a81b62SSteven Rostedt }
379e5a81b62SSteven Rostedt 
380f38f1d2aSSteven Rostedt static const struct file_operations stack_trace_fops = {
381e5a81b62SSteven Rostedt 	.open		= stack_trace_open,
382e5a81b62SSteven Rostedt 	.read		= seq_read,
383e5a81b62SSteven Rostedt 	.llseek		= seq_lseek,
384d8cc1ab7SLi Zefan 	.release	= seq_release,
385e5a81b62SSteven Rostedt };
386e5a81b62SSteven Rostedt 
387d2d45c7aSSteven Rostedt static int
388d2d45c7aSSteven Rostedt stack_trace_filter_open(struct inode *inode, struct file *file)
389d2d45c7aSSteven Rostedt {
390d2d45c7aSSteven Rostedt 	return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
391d2d45c7aSSteven Rostedt 				 inode, file);
392d2d45c7aSSteven Rostedt }
393d2d45c7aSSteven Rostedt 
394d2d45c7aSSteven Rostedt static const struct file_operations stack_trace_filter_fops = {
395d2d45c7aSSteven Rostedt 	.open = stack_trace_filter_open,
396d2d45c7aSSteven Rostedt 	.read = seq_read,
397d2d45c7aSSteven Rostedt 	.write = ftrace_filter_write,
398098c879eSSteven Rostedt (Red Hat) 	.llseek = tracing_lseek,
399d2d45c7aSSteven Rostedt 	.release = ftrace_regex_release,
400d2d45c7aSSteven Rostedt };
401d2d45c7aSSteven Rostedt 
402f38f1d2aSSteven Rostedt int
403f38f1d2aSSteven Rostedt stack_trace_sysctl(struct ctl_table *table, int write,
4048d65af78SAlexey Dobriyan 		   void __user *buffer, size_t *lenp,
405f38f1d2aSSteven Rostedt 		   loff_t *ppos)
406f38f1d2aSSteven Rostedt {
407f38f1d2aSSteven Rostedt 	int ret;
408f38f1d2aSSteven Rostedt 
409f38f1d2aSSteven Rostedt 	mutex_lock(&stack_sysctl_mutex);
410f38f1d2aSSteven Rostedt 
4118d65af78SAlexey Dobriyan 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
412f38f1d2aSSteven Rostedt 
413f38f1d2aSSteven Rostedt 	if (ret || !write ||
414a32c7765SLi Zefan 	    (last_stack_tracer_enabled == !!stack_tracer_enabled))
415f38f1d2aSSteven Rostedt 		goto out;
416f38f1d2aSSteven Rostedt 
417a32c7765SLi Zefan 	last_stack_tracer_enabled = !!stack_tracer_enabled;
418f38f1d2aSSteven Rostedt 
419f38f1d2aSSteven Rostedt 	if (stack_tracer_enabled)
420f38f1d2aSSteven Rostedt 		register_ftrace_function(&trace_ops);
421f38f1d2aSSteven Rostedt 	else
422f38f1d2aSSteven Rostedt 		unregister_ftrace_function(&trace_ops);
423f38f1d2aSSteven Rostedt 
424f38f1d2aSSteven Rostedt  out:
425f38f1d2aSSteven Rostedt 	mutex_unlock(&stack_sysctl_mutex);
426f38f1d2aSSteven Rostedt 	return ret;
427f38f1d2aSSteven Rostedt }
428f38f1d2aSSteven Rostedt 
429762e1207SSteven Rostedt static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
430762e1207SSteven Rostedt 
431f38f1d2aSSteven Rostedt static __init int enable_stacktrace(char *str)
432f38f1d2aSSteven Rostedt {
433762e1207SSteven Rostedt 	if (strncmp(str, "_filter=", 8) == 0)
434762e1207SSteven Rostedt 		strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
435762e1207SSteven Rostedt 
436e05a43b7SSteven Rostedt 	stack_tracer_enabled = 1;
437e05a43b7SSteven Rostedt 	last_stack_tracer_enabled = 1;
438f38f1d2aSSteven Rostedt 	return 1;
439f38f1d2aSSteven Rostedt }
440f38f1d2aSSteven Rostedt __setup("stacktrace", enable_stacktrace);
441f38f1d2aSSteven Rostedt 
442e5a81b62SSteven Rostedt static __init int stack_trace_init(void)
443e5a81b62SSteven Rostedt {
444e5a81b62SSteven Rostedt 	struct dentry *d_tracer;
445e5a81b62SSteven Rostedt 
446e5a81b62SSteven Rostedt 	d_tracer = tracing_init_dentry();
44714a5ae40SSteven Rostedt (Red Hat) 	if (IS_ERR(d_tracer))
448ed6f1c99SNamhyung Kim 		return 0;
449e5a81b62SSteven Rostedt 
4505452af66SFrederic Weisbecker 	trace_create_file("stack_max_size", 0644, d_tracer,
451*bb99d8ccSAKASHI Takahiro 			&stack_trace_max_size, &stack_max_size_fops);
452e5a81b62SSteven Rostedt 
4535452af66SFrederic Weisbecker 	trace_create_file("stack_trace", 0444, d_tracer,
454e5a81b62SSteven Rostedt 			NULL, &stack_trace_fops);
455e5a81b62SSteven Rostedt 
456d2d45c7aSSteven Rostedt 	trace_create_file("stack_trace_filter", 0444, d_tracer,
457d2d45c7aSSteven Rostedt 			NULL, &stack_trace_filter_fops);
458d2d45c7aSSteven Rostedt 
459762e1207SSteven Rostedt 	if (stack_trace_filter_buf[0])
460762e1207SSteven Rostedt 		ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
461762e1207SSteven Rostedt 
462e05a43b7SSteven Rostedt 	if (stack_tracer_enabled)
463e5a81b62SSteven Rostedt 		register_ftrace_function(&trace_ops);
464e5a81b62SSteven Rostedt 
465e5a81b62SSteven Rostedt 	return 0;
466e5a81b62SSteven Rostedt }
467e5a81b62SSteven Rostedt 
468e5a81b62SSteven Rostedt device_initcall(stack_trace_init);
469