1e5a81b62SSteven Rostedt /* 2e5a81b62SSteven Rostedt * Copyright (C) 2008 Steven Rostedt <[email protected]> 3e5a81b62SSteven Rostedt * 4e5a81b62SSteven Rostedt */ 568db0cf1SIngo Molnar #include <linux/sched/task_stack.h> 6e5a81b62SSteven Rostedt #include <linux/stacktrace.h> 7e5a81b62SSteven Rostedt #include <linux/kallsyms.h> 8e5a81b62SSteven Rostedt #include <linux/seq_file.h> 9e5a81b62SSteven Rostedt #include <linux/spinlock.h> 10e5a81b62SSteven Rostedt #include <linux/uaccess.h> 11e5a81b62SSteven Rostedt #include <linux/ftrace.h> 12e5a81b62SSteven Rostedt #include <linux/module.h> 13f38f1d2aSSteven Rostedt #include <linux/sysctl.h> 14e5a81b62SSteven Rostedt #include <linux/init.h> 15762e1207SSteven Rostedt 16762e1207SSteven Rostedt #include <asm/setup.h> 17762e1207SSteven Rostedt 18e5a81b62SSteven Rostedt #include "trace.h" 19e5a81b62SSteven Rostedt 201b6cced6SSteven Rostedt static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] = 211b6cced6SSteven Rostedt { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX }; 22bb99d8ccSAKASHI Takahiro unsigned stack_trace_index[STACK_TRACE_ENTRIES]; 231b6cced6SSteven Rostedt 244df29712SSteven Rostedt (Red Hat) /* 254df29712SSteven Rostedt (Red Hat) * Reserve one entry for the passed in ip. This will allow 264df29712SSteven Rostedt (Red Hat) * us to remove most or all of the stack size overhead 274df29712SSteven Rostedt (Red Hat) * added by the stack tracer itself. 284df29712SSteven Rostedt (Red Hat) */ 29bb99d8ccSAKASHI Takahiro struct stack_trace stack_trace_max = { 304df29712SSteven Rostedt (Red Hat) .max_entries = STACK_TRACE_ENTRIES - 1, 3172ac426aSSteven Rostedt (Red Hat) .entries = &stack_dump_trace[0], 32e5a81b62SSteven Rostedt }; 33e5a81b62SSteven Rostedt 34bb99d8ccSAKASHI Takahiro unsigned long stack_trace_max_size; 35d332736dSSteven Rostedt (Red Hat) arch_spinlock_t stack_trace_max_lock = 36edc35bd7SThomas Gleixner (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 37e5a81b62SSteven Rostedt 388aaf1ee7SSteven Rostedt (VMware) DEFINE_PER_CPU(int, disable_stack_tracer); 39f38f1d2aSSteven Rostedt static DEFINE_MUTEX(stack_sysctl_mutex); 40f38f1d2aSSteven Rostedt 41f38f1d2aSSteven Rostedt int stack_tracer_enabled; 42f38f1d2aSSteven Rostedt static int last_stack_tracer_enabled; 43e5a81b62SSteven Rostedt 44bb99d8ccSAKASHI Takahiro void stack_trace_print(void) 45e3172181SMinchan Kim { 46e3172181SMinchan Kim long i; 47e3172181SMinchan Kim int size; 48e3172181SMinchan Kim 49e3172181SMinchan Kim pr_emerg(" Depth Size Location (%d entries)\n" 50e3172181SMinchan Kim " ----- ---- --------\n", 51bb99d8ccSAKASHI Takahiro stack_trace_max.nr_entries); 52e3172181SMinchan Kim 53bb99d8ccSAKASHI Takahiro for (i = 0; i < stack_trace_max.nr_entries; i++) { 54e3172181SMinchan Kim if (stack_dump_trace[i] == ULONG_MAX) 55e3172181SMinchan Kim break; 56bb99d8ccSAKASHI Takahiro if (i+1 == stack_trace_max.nr_entries || 57e3172181SMinchan Kim stack_dump_trace[i+1] == ULONG_MAX) 58bb99d8ccSAKASHI Takahiro size = stack_trace_index[i]; 59e3172181SMinchan Kim else 60bb99d8ccSAKASHI Takahiro size = stack_trace_index[i] - stack_trace_index[i+1]; 61e3172181SMinchan Kim 62bb99d8ccSAKASHI Takahiro pr_emerg("%3ld) %8d %5d %pS\n", i, stack_trace_index[i], 63e3172181SMinchan Kim size, (void *)stack_dump_trace[i]); 64e3172181SMinchan Kim } 65e3172181SMinchan Kim } 66e3172181SMinchan Kim 67bb99d8ccSAKASHI Takahiro /* 68505d3085SMasahiro Yamada * When arch-specific code overrides this function, the following 69d332736dSSteven Rostedt (Red Hat) * data should be filled up, assuming stack_trace_max_lock is held to 70bb99d8ccSAKASHI Takahiro * prevent concurrent updates. 71bb99d8ccSAKASHI Takahiro * stack_trace_index[] 72bb99d8ccSAKASHI Takahiro * stack_trace_max 73bb99d8ccSAKASHI Takahiro * stack_trace_max_size 74bb99d8ccSAKASHI Takahiro */ 75bb99d8ccSAKASHI Takahiro void __weak 76d4ecbfc4SSteven Rostedt (Red Hat) check_stack(unsigned long ip, unsigned long *stack) 77e5a81b62SSteven Rostedt { 78e3172181SMinchan Kim unsigned long this_size, flags; unsigned long *p, *top, *start; 794df29712SSteven Rostedt (Red Hat) static int tracer_frame; 804df29712SSteven Rostedt (Red Hat) int frame_size = ACCESS_ONCE(tracer_frame); 8172ac426aSSteven Rostedt (Red Hat) int i, x; 82e5a81b62SSteven Rostedt 8387889501SSteven Rostedt (Red Hat) this_size = ((unsigned long)stack) & (THREAD_SIZE-1); 84e5a81b62SSteven Rostedt this_size = THREAD_SIZE - this_size; 854df29712SSteven Rostedt (Red Hat) /* Remove the frame of the tracer */ 864df29712SSteven Rostedt (Red Hat) this_size -= frame_size; 87e5a81b62SSteven Rostedt 88bb99d8ccSAKASHI Takahiro if (this_size <= stack_trace_max_size) 89e5a81b62SSteven Rostedt return; 90e5a81b62SSteven Rostedt 9181520a1bSSteven Rostedt /* we do not handle interrupt stacks yet */ 9287889501SSteven Rostedt (Red Hat) if (!object_is_on_stack(stack)) 9381520a1bSSteven Rostedt return; 9481520a1bSSteven Rostedt 951904be1bSSteven Rostedt (Red Hat) /* Can't do this from NMI context (can cause deadlocks) */ 961904be1bSSteven Rostedt (Red Hat) if (in_nmi()) 971904be1bSSteven Rostedt (Red Hat) return; 981904be1bSSteven Rostedt (Red Hat) 9903ecd3f4SSteven Rostedt (VMware) /* 10003ecd3f4SSteven Rostedt (VMware) * There's a slight chance that we are tracing inside the 10103ecd3f4SSteven Rostedt (VMware) * RCU infrastructure, and rcu_irq_enter() will not work 10203ecd3f4SSteven Rostedt (VMware) * as expected. 10303ecd3f4SSteven Rostedt (VMware) */ 10403ecd3f4SSteven Rostedt (VMware) if (unlikely(rcu_irq_enter_disabled())) 10503ecd3f4SSteven Rostedt (VMware) return; 10603ecd3f4SSteven Rostedt (VMware) 107a5e25883SSteven Rostedt local_irq_save(flags); 108d332736dSSteven Rostedt (Red Hat) arch_spin_lock(&stack_trace_max_lock); 109e5a81b62SSteven Rostedt 110a2d76290SSteven Rostedt (Red Hat) /* 111a2d76290SSteven Rostedt (Red Hat) * RCU may not be watching, make it see us. 112a2d76290SSteven Rostedt (Red Hat) * The stack trace code uses rcu_sched. 113a2d76290SSteven Rostedt (Red Hat) */ 114a2d76290SSteven Rostedt (Red Hat) rcu_irq_enter(); 115a2d76290SSteven Rostedt (Red Hat) 1164df29712SSteven Rostedt (Red Hat) /* In case another CPU set the tracer_frame on us */ 1174df29712SSteven Rostedt (Red Hat) if (unlikely(!frame_size)) 1184df29712SSteven Rostedt (Red Hat) this_size -= tracer_frame; 1194df29712SSteven Rostedt (Red Hat) 120e5a81b62SSteven Rostedt /* a race could have already updated it */ 121bb99d8ccSAKASHI Takahiro if (this_size <= stack_trace_max_size) 122e5a81b62SSteven Rostedt goto out; 123e5a81b62SSteven Rostedt 124bb99d8ccSAKASHI Takahiro stack_trace_max_size = this_size; 125e5a81b62SSteven Rostedt 126bb99d8ccSAKASHI Takahiro stack_trace_max.nr_entries = 0; 127bb99d8ccSAKASHI Takahiro stack_trace_max.skip = 3; 128e5a81b62SSteven Rostedt 129bb99d8ccSAKASHI Takahiro save_stack_trace(&stack_trace_max); 130e5a81b62SSteven Rostedt 13172ac426aSSteven Rostedt (Red Hat) /* Skip over the overhead of the stack tracer itself */ 132bb99d8ccSAKASHI Takahiro for (i = 0; i < stack_trace_max.nr_entries; i++) { 13372ac426aSSteven Rostedt (Red Hat) if (stack_dump_trace[i] == ip) 13472ac426aSSteven Rostedt (Red Hat) break; 13572ac426aSSteven Rostedt (Red Hat) } 136d4ecbfc4SSteven Rostedt (Red Hat) 137d4ecbfc4SSteven Rostedt (Red Hat) /* 1386ccd8371SSteven Rostedt * Some archs may not have the passed in ip in the dump. 1396ccd8371SSteven Rostedt * If that happens, we need to show everything. 1406ccd8371SSteven Rostedt */ 1416ccd8371SSteven Rostedt if (i == stack_trace_max.nr_entries) 1426ccd8371SSteven Rostedt i = 0; 1436ccd8371SSteven Rostedt 1446ccd8371SSteven Rostedt /* 1451b6cced6SSteven Rostedt * Now find where in the stack these are. 1461b6cced6SSteven Rostedt */ 14772ac426aSSteven Rostedt (Red Hat) x = 0; 14887889501SSteven Rostedt (Red Hat) start = stack; 1491b6cced6SSteven Rostedt top = (unsigned long *) 1501b6cced6SSteven Rostedt (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE); 1511b6cced6SSteven Rostedt 1521b6cced6SSteven Rostedt /* 1531b6cced6SSteven Rostedt * Loop through all the entries. One of the entries may 1541b6cced6SSteven Rostedt * for some reason be missed on the stack, so we may 1551b6cced6SSteven Rostedt * have to account for them. If they are all there, this 1561b6cced6SSteven Rostedt * loop will only happen once. This code only takes place 1571b6cced6SSteven Rostedt * on a new max, so it is far from a fast path. 1581b6cced6SSteven Rostedt */ 159bb99d8ccSAKASHI Takahiro while (i < stack_trace_max.nr_entries) { 1600a37119dSSteven Rostedt int found = 0; 1611b6cced6SSteven Rostedt 162bb99d8ccSAKASHI Takahiro stack_trace_index[x] = this_size; 1631b6cced6SSteven Rostedt p = start; 1641b6cced6SSteven Rostedt 165bb99d8ccSAKASHI Takahiro for (; p < top && i < stack_trace_max.nr_entries; p++) { 16672ac426aSSteven Rostedt (Red Hat) if (stack_dump_trace[i] == ULONG_MAX) 16772ac426aSSteven Rostedt (Red Hat) break; 1686e22c836SYang Shi /* 1696e22c836SYang Shi * The READ_ONCE_NOCHECK is used to let KASAN know that 1706e22c836SYang Shi * this is not a stack-out-of-bounds error. 1716e22c836SYang Shi */ 1726e22c836SYang Shi if ((READ_ONCE_NOCHECK(*p)) == stack_dump_trace[i]) { 17372ac426aSSteven Rostedt (Red Hat) stack_dump_trace[x] = stack_dump_trace[i++]; 174bb99d8ccSAKASHI Takahiro this_size = stack_trace_index[x++] = 1751b6cced6SSteven Rostedt (top - p) * sizeof(unsigned long); 1760a37119dSSteven Rostedt found = 1; 1771b6cced6SSteven Rostedt /* Start the search from here */ 1781b6cced6SSteven Rostedt start = p + 1; 1794df29712SSteven Rostedt (Red Hat) /* 1804df29712SSteven Rostedt (Red Hat) * We do not want to show the overhead 1814df29712SSteven Rostedt (Red Hat) * of the stack tracer stack in the 1824df29712SSteven Rostedt (Red Hat) * max stack. If we haven't figured 1834df29712SSteven Rostedt (Red Hat) * out what that is, then figure it out 1844df29712SSteven Rostedt (Red Hat) * now. 1854df29712SSteven Rostedt (Red Hat) */ 18672ac426aSSteven Rostedt (Red Hat) if (unlikely(!tracer_frame)) { 1874df29712SSteven Rostedt (Red Hat) tracer_frame = (p - stack) * 1884df29712SSteven Rostedt (Red Hat) sizeof(unsigned long); 189bb99d8ccSAKASHI Takahiro stack_trace_max_size -= tracer_frame; 1904df29712SSteven Rostedt (Red Hat) } 1911b6cced6SSteven Rostedt } 1921b6cced6SSteven Rostedt } 1931b6cced6SSteven Rostedt 1940a37119dSSteven Rostedt if (!found) 1951b6cced6SSteven Rostedt i++; 1961b6cced6SSteven Rostedt } 1971b6cced6SSteven Rostedt 198bb99d8ccSAKASHI Takahiro stack_trace_max.nr_entries = x; 19972ac426aSSteven Rostedt (Red Hat) for (; x < i; x++) 20072ac426aSSteven Rostedt (Red Hat) stack_dump_trace[x] = ULONG_MAX; 20172ac426aSSteven Rostedt (Red Hat) 202a70857e4SAaron Tomlin if (task_stack_end_corrupted(current)) { 203bb99d8ccSAKASHI Takahiro stack_trace_print(); 204e3172181SMinchan Kim BUG(); 205e3172181SMinchan Kim } 206e3172181SMinchan Kim 207e5a81b62SSteven Rostedt out: 208a2d76290SSteven Rostedt (Red Hat) rcu_irq_exit(); 209d332736dSSteven Rostedt (Red Hat) arch_spin_unlock(&stack_trace_max_lock); 210a5e25883SSteven Rostedt local_irq_restore(flags); 211e5a81b62SSteven Rostedt } 212e5a81b62SSteven Rostedt 213e5a81b62SSteven Rostedt static void 214a1e2e31dSSteven Rostedt stack_trace_call(unsigned long ip, unsigned long parent_ip, 215a1e2e31dSSteven Rostedt struct ftrace_ops *op, struct pt_regs *pt_regs) 216e5a81b62SSteven Rostedt { 21787889501SSteven Rostedt (Red Hat) unsigned long stack; 218e5a81b62SSteven Rostedt 2195168ae50SSteven Rostedt preempt_disable_notrace(); 220e5a81b62SSteven Rostedt 221e5a81b62SSteven Rostedt /* no atomic needed, we only modify this variable by this cpu */ 2228aaf1ee7SSteven Rostedt (VMware) __this_cpu_inc(disable_stack_tracer); 2238aaf1ee7SSteven Rostedt (VMware) if (__this_cpu_read(disable_stack_tracer) != 1) 224e5a81b62SSteven Rostedt goto out; 225e5a81b62SSteven Rostedt 2264df29712SSteven Rostedt (Red Hat) ip += MCOUNT_INSN_SIZE; 2274df29712SSteven Rostedt (Red Hat) 2284df29712SSteven Rostedt (Red Hat) check_stack(ip, &stack); 229e5a81b62SSteven Rostedt 230e5a81b62SSteven Rostedt out: 2318aaf1ee7SSteven Rostedt (VMware) __this_cpu_dec(disable_stack_tracer); 232e5a81b62SSteven Rostedt /* prevent recursion in schedule */ 2335168ae50SSteven Rostedt preempt_enable_notrace(); 234e5a81b62SSteven Rostedt } 235e5a81b62SSteven Rostedt 236e5a81b62SSteven Rostedt static struct ftrace_ops trace_ops __read_mostly = 237e5a81b62SSteven Rostedt { 238e5a81b62SSteven Rostedt .func = stack_trace_call, 2394740974aSSteven Rostedt .flags = FTRACE_OPS_FL_RECURSION_SAFE, 240e5a81b62SSteven Rostedt }; 241e5a81b62SSteven Rostedt 242e5a81b62SSteven Rostedt static ssize_t 243e5a81b62SSteven Rostedt stack_max_size_read(struct file *filp, char __user *ubuf, 244e5a81b62SSteven Rostedt size_t count, loff_t *ppos) 245e5a81b62SSteven Rostedt { 246e5a81b62SSteven Rostedt unsigned long *ptr = filp->private_data; 247e5a81b62SSteven Rostedt char buf[64]; 248e5a81b62SSteven Rostedt int r; 249e5a81b62SSteven Rostedt 250e5a81b62SSteven Rostedt r = snprintf(buf, sizeof(buf), "%ld\n", *ptr); 251e5a81b62SSteven Rostedt if (r > sizeof(buf)) 252e5a81b62SSteven Rostedt r = sizeof(buf); 253e5a81b62SSteven Rostedt return simple_read_from_buffer(ubuf, count, ppos, buf, r); 254e5a81b62SSteven Rostedt } 255e5a81b62SSteven Rostedt 256e5a81b62SSteven Rostedt static ssize_t 257e5a81b62SSteven Rostedt stack_max_size_write(struct file *filp, const char __user *ubuf, 258e5a81b62SSteven Rostedt size_t count, loff_t *ppos) 259e5a81b62SSteven Rostedt { 260e5a81b62SSteven Rostedt long *ptr = filp->private_data; 261e5a81b62SSteven Rostedt unsigned long val, flags; 262e5a81b62SSteven Rostedt int ret; 263e5a81b62SSteven Rostedt 26422fe9b54SPeter Huewe ret = kstrtoul_from_user(ubuf, count, 10, &val); 26522fe9b54SPeter Huewe if (ret) 266e5a81b62SSteven Rostedt return ret; 267e5a81b62SSteven Rostedt 268a5e25883SSteven Rostedt local_irq_save(flags); 2694f48f8b7SLai Jiangshan 2704f48f8b7SLai Jiangshan /* 2714f48f8b7SLai Jiangshan * In case we trace inside arch_spin_lock() or after (NMI), 2724f48f8b7SLai Jiangshan * we will cause circular lock, so we also need to increase 2738aaf1ee7SSteven Rostedt (VMware) * the percpu disable_stack_tracer here. 2744f48f8b7SLai Jiangshan */ 2758aaf1ee7SSteven Rostedt (VMware) __this_cpu_inc(disable_stack_tracer); 2764f48f8b7SLai Jiangshan 277d332736dSSteven Rostedt (Red Hat) arch_spin_lock(&stack_trace_max_lock); 278e5a81b62SSteven Rostedt *ptr = val; 279d332736dSSteven Rostedt (Red Hat) arch_spin_unlock(&stack_trace_max_lock); 2804f48f8b7SLai Jiangshan 2818aaf1ee7SSteven Rostedt (VMware) __this_cpu_dec(disable_stack_tracer); 282a5e25883SSteven Rostedt local_irq_restore(flags); 283e5a81b62SSteven Rostedt 284e5a81b62SSteven Rostedt return count; 285e5a81b62SSteven Rostedt } 286e5a81b62SSteven Rostedt 287f38f1d2aSSteven Rostedt static const struct file_operations stack_max_size_fops = { 288e5a81b62SSteven Rostedt .open = tracing_open_generic, 289e5a81b62SSteven Rostedt .read = stack_max_size_read, 290e5a81b62SSteven Rostedt .write = stack_max_size_write, 2916038f373SArnd Bergmann .llseek = default_llseek, 292e5a81b62SSteven Rostedt }; 293e5a81b62SSteven Rostedt 294e5a81b62SSteven Rostedt static void * 2952fc5f0cfSLi Zefan __next(struct seq_file *m, loff_t *pos) 296e5a81b62SSteven Rostedt { 2972fc5f0cfSLi Zefan long n = *pos - 1; 298e5a81b62SSteven Rostedt 299bb99d8ccSAKASHI Takahiro if (n > stack_trace_max.nr_entries || stack_dump_trace[n] == ULONG_MAX) 300e5a81b62SSteven Rostedt return NULL; 301e5a81b62SSteven Rostedt 3022fc5f0cfSLi Zefan m->private = (void *)n; 3031b6cced6SSteven Rostedt return &m->private; 304e5a81b62SSteven Rostedt } 305e5a81b62SSteven Rostedt 3062fc5f0cfSLi Zefan static void * 3072fc5f0cfSLi Zefan t_next(struct seq_file *m, void *v, loff_t *pos) 3082fc5f0cfSLi Zefan { 3092fc5f0cfSLi Zefan (*pos)++; 3102fc5f0cfSLi Zefan return __next(m, pos); 3112fc5f0cfSLi Zefan } 3122fc5f0cfSLi Zefan 313e5a81b62SSteven Rostedt static void *t_start(struct seq_file *m, loff_t *pos) 314e5a81b62SSteven Rostedt { 315e5a81b62SSteven Rostedt local_irq_disable(); 3164f48f8b7SLai Jiangshan 3178aaf1ee7SSteven Rostedt (VMware) __this_cpu_inc(disable_stack_tracer); 3184f48f8b7SLai Jiangshan 319d332736dSSteven Rostedt (Red Hat) arch_spin_lock(&stack_trace_max_lock); 320e5a81b62SSteven Rostedt 321522a110bSLiming Wang if (*pos == 0) 322522a110bSLiming Wang return SEQ_START_TOKEN; 323522a110bSLiming Wang 3242fc5f0cfSLi Zefan return __next(m, pos); 325e5a81b62SSteven Rostedt } 326e5a81b62SSteven Rostedt 327e5a81b62SSteven Rostedt static void t_stop(struct seq_file *m, void *p) 328e5a81b62SSteven Rostedt { 329d332736dSSteven Rostedt (Red Hat) arch_spin_unlock(&stack_trace_max_lock); 3304f48f8b7SLai Jiangshan 3318aaf1ee7SSteven Rostedt (VMware) __this_cpu_dec(disable_stack_tracer); 3324f48f8b7SLai Jiangshan 333e5a81b62SSteven Rostedt local_irq_enable(); 334e5a81b62SSteven Rostedt } 335e5a81b62SSteven Rostedt 336962e3707SJoe Perches static void trace_lookup_stack(struct seq_file *m, long i) 337e5a81b62SSteven Rostedt { 3381b6cced6SSteven Rostedt unsigned long addr = stack_dump_trace[i]; 339e5a81b62SSteven Rostedt 340962e3707SJoe Perches seq_printf(m, "%pS\n", (void *)addr); 341e5a81b62SSteven Rostedt } 342e5a81b62SSteven Rostedt 343e447e1dfSSteven Rostedt static void print_disabled(struct seq_file *m) 344e447e1dfSSteven Rostedt { 345e447e1dfSSteven Rostedt seq_puts(m, "#\n" 346e447e1dfSSteven Rostedt "# Stack tracer disabled\n" 347e447e1dfSSteven Rostedt "#\n" 348e447e1dfSSteven Rostedt "# To enable the stack tracer, either add 'stacktrace' to the\n" 349e447e1dfSSteven Rostedt "# kernel command line\n" 350e447e1dfSSteven Rostedt "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n" 351e447e1dfSSteven Rostedt "#\n"); 352e447e1dfSSteven Rostedt } 353e447e1dfSSteven Rostedt 354e5a81b62SSteven Rostedt static int t_show(struct seq_file *m, void *v) 355e5a81b62SSteven Rostedt { 356522a110bSLiming Wang long i; 3571b6cced6SSteven Rostedt int size; 358e5a81b62SSteven Rostedt 359522a110bSLiming Wang if (v == SEQ_START_TOKEN) { 3601b6cced6SSteven Rostedt seq_printf(m, " Depth Size Location" 3611b6cced6SSteven Rostedt " (%d entries)\n" 3621b6cced6SSteven Rostedt " ----- ---- --------\n", 363bb99d8ccSAKASHI Takahiro stack_trace_max.nr_entries); 364e447e1dfSSteven Rostedt 365bb99d8ccSAKASHI Takahiro if (!stack_tracer_enabled && !stack_trace_max_size) 366e447e1dfSSteven Rostedt print_disabled(m); 367e447e1dfSSteven Rostedt 3681b6cced6SSteven Rostedt return 0; 3691b6cced6SSteven Rostedt } 3701b6cced6SSteven Rostedt 371522a110bSLiming Wang i = *(long *)v; 372522a110bSLiming Wang 373bb99d8ccSAKASHI Takahiro if (i >= stack_trace_max.nr_entries || 3741b6cced6SSteven Rostedt stack_dump_trace[i] == ULONG_MAX) 375e5a81b62SSteven Rostedt return 0; 376e5a81b62SSteven Rostedt 377bb99d8ccSAKASHI Takahiro if (i+1 == stack_trace_max.nr_entries || 3781b6cced6SSteven Rostedt stack_dump_trace[i+1] == ULONG_MAX) 379bb99d8ccSAKASHI Takahiro size = stack_trace_index[i]; 3801b6cced6SSteven Rostedt else 381bb99d8ccSAKASHI Takahiro size = stack_trace_index[i] - stack_trace_index[i+1]; 3821b6cced6SSteven Rostedt 383bb99d8ccSAKASHI Takahiro seq_printf(m, "%3ld) %8d %5d ", i, stack_trace_index[i], size); 3841b6cced6SSteven Rostedt 3851b6cced6SSteven Rostedt trace_lookup_stack(m, i); 386e5a81b62SSteven Rostedt 387e5a81b62SSteven Rostedt return 0; 388e5a81b62SSteven Rostedt } 389e5a81b62SSteven Rostedt 390f38f1d2aSSteven Rostedt static const struct seq_operations stack_trace_seq_ops = { 391e5a81b62SSteven Rostedt .start = t_start, 392e5a81b62SSteven Rostedt .next = t_next, 393e5a81b62SSteven Rostedt .stop = t_stop, 394e5a81b62SSteven Rostedt .show = t_show, 395e5a81b62SSteven Rostedt }; 396e5a81b62SSteven Rostedt 397e5a81b62SSteven Rostedt static int stack_trace_open(struct inode *inode, struct file *file) 398e5a81b62SSteven Rostedt { 399d8cc1ab7SLi Zefan return seq_open(file, &stack_trace_seq_ops); 400e5a81b62SSteven Rostedt } 401e5a81b62SSteven Rostedt 402f38f1d2aSSteven Rostedt static const struct file_operations stack_trace_fops = { 403e5a81b62SSteven Rostedt .open = stack_trace_open, 404e5a81b62SSteven Rostedt .read = seq_read, 405e5a81b62SSteven Rostedt .llseek = seq_lseek, 406d8cc1ab7SLi Zefan .release = seq_release, 407e5a81b62SSteven Rostedt }; 408e5a81b62SSteven Rostedt 409*bbd1d27dSSteven Rostedt (VMware) #ifdef CONFIG_DYNAMIC_FTRACE 410*bbd1d27dSSteven Rostedt (VMware) 411d2d45c7aSSteven Rostedt static int 412d2d45c7aSSteven Rostedt stack_trace_filter_open(struct inode *inode, struct file *file) 413d2d45c7aSSteven Rostedt { 4140f179765SSteven Rostedt (VMware) struct ftrace_ops *ops = inode->i_private; 4150f179765SSteven Rostedt (VMware) 4160f179765SSteven Rostedt (VMware) return ftrace_regex_open(ops, FTRACE_ITER_FILTER, 417d2d45c7aSSteven Rostedt inode, file); 418d2d45c7aSSteven Rostedt } 419d2d45c7aSSteven Rostedt 420d2d45c7aSSteven Rostedt static const struct file_operations stack_trace_filter_fops = { 421d2d45c7aSSteven Rostedt .open = stack_trace_filter_open, 422d2d45c7aSSteven Rostedt .read = seq_read, 423d2d45c7aSSteven Rostedt .write = ftrace_filter_write, 424098c879eSSteven Rostedt (Red Hat) .llseek = tracing_lseek, 425d2d45c7aSSteven Rostedt .release = ftrace_regex_release, 426d2d45c7aSSteven Rostedt }; 427d2d45c7aSSteven Rostedt 428*bbd1d27dSSteven Rostedt (VMware) #endif /* CONFIG_DYNAMIC_FTRACE */ 429*bbd1d27dSSteven Rostedt (VMware) 430f38f1d2aSSteven Rostedt int 431f38f1d2aSSteven Rostedt stack_trace_sysctl(struct ctl_table *table, int write, 4328d65af78SAlexey Dobriyan void __user *buffer, size_t *lenp, 433f38f1d2aSSteven Rostedt loff_t *ppos) 434f38f1d2aSSteven Rostedt { 435f38f1d2aSSteven Rostedt int ret; 436f38f1d2aSSteven Rostedt 437f38f1d2aSSteven Rostedt mutex_lock(&stack_sysctl_mutex); 438f38f1d2aSSteven Rostedt 4398d65af78SAlexey Dobriyan ret = proc_dointvec(table, write, buffer, lenp, ppos); 440f38f1d2aSSteven Rostedt 441f38f1d2aSSteven Rostedt if (ret || !write || 442a32c7765SLi Zefan (last_stack_tracer_enabled == !!stack_tracer_enabled)) 443f38f1d2aSSteven Rostedt goto out; 444f38f1d2aSSteven Rostedt 445a32c7765SLi Zefan last_stack_tracer_enabled = !!stack_tracer_enabled; 446f38f1d2aSSteven Rostedt 447f38f1d2aSSteven Rostedt if (stack_tracer_enabled) 448f38f1d2aSSteven Rostedt register_ftrace_function(&trace_ops); 449f38f1d2aSSteven Rostedt else 450f38f1d2aSSteven Rostedt unregister_ftrace_function(&trace_ops); 451f38f1d2aSSteven Rostedt 452f38f1d2aSSteven Rostedt out: 453f38f1d2aSSteven Rostedt mutex_unlock(&stack_sysctl_mutex); 454f38f1d2aSSteven Rostedt return ret; 455f38f1d2aSSteven Rostedt } 456f38f1d2aSSteven Rostedt 457762e1207SSteven Rostedt static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata; 458762e1207SSteven Rostedt 459f38f1d2aSSteven Rostedt static __init int enable_stacktrace(char *str) 460f38f1d2aSSteven Rostedt { 461762e1207SSteven Rostedt if (strncmp(str, "_filter=", 8) == 0) 462762e1207SSteven Rostedt strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE); 463762e1207SSteven Rostedt 464e05a43b7SSteven Rostedt stack_tracer_enabled = 1; 465e05a43b7SSteven Rostedt last_stack_tracer_enabled = 1; 466f38f1d2aSSteven Rostedt return 1; 467f38f1d2aSSteven Rostedt } 468f38f1d2aSSteven Rostedt __setup("stacktrace", enable_stacktrace); 469f38f1d2aSSteven Rostedt 470e5a81b62SSteven Rostedt static __init int stack_trace_init(void) 471e5a81b62SSteven Rostedt { 472e5a81b62SSteven Rostedt struct dentry *d_tracer; 473e5a81b62SSteven Rostedt 474e5a81b62SSteven Rostedt d_tracer = tracing_init_dentry(); 47514a5ae40SSteven Rostedt (Red Hat) if (IS_ERR(d_tracer)) 476ed6f1c99SNamhyung Kim return 0; 477e5a81b62SSteven Rostedt 4785452af66SFrederic Weisbecker trace_create_file("stack_max_size", 0644, d_tracer, 479bb99d8ccSAKASHI Takahiro &stack_trace_max_size, &stack_max_size_fops); 480e5a81b62SSteven Rostedt 4815452af66SFrederic Weisbecker trace_create_file("stack_trace", 0444, d_tracer, 482e5a81b62SSteven Rostedt NULL, &stack_trace_fops); 483e5a81b62SSteven Rostedt 484*bbd1d27dSSteven Rostedt (VMware) #ifdef CONFIG_DYNAMIC_FTRACE 485d2d45c7aSSteven Rostedt trace_create_file("stack_trace_filter", 0444, d_tracer, 4860f179765SSteven Rostedt (VMware) &trace_ops, &stack_trace_filter_fops); 487*bbd1d27dSSteven Rostedt (VMware) #endif 488d2d45c7aSSteven Rostedt 489762e1207SSteven Rostedt if (stack_trace_filter_buf[0]) 490762e1207SSteven Rostedt ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1); 491762e1207SSteven Rostedt 492e05a43b7SSteven Rostedt if (stack_tracer_enabled) 493e5a81b62SSteven Rostedt register_ftrace_function(&trace_ops); 494e5a81b62SSteven Rostedt 495e5a81b62SSteven Rostedt return 0; 496e5a81b62SSteven Rostedt } 497e5a81b62SSteven Rostedt 498e5a81b62SSteven Rostedt device_initcall(stack_trace_init); 499