1e5a81b62SSteven Rostedt /* 2e5a81b62SSteven Rostedt * Copyright (C) 2008 Steven Rostedt <[email protected]> 3e5a81b62SSteven Rostedt * 4e5a81b62SSteven Rostedt */ 5e5a81b62SSteven Rostedt #include <linux/stacktrace.h> 6e5a81b62SSteven Rostedt #include <linux/kallsyms.h> 7e5a81b62SSteven Rostedt #include <linux/seq_file.h> 8e5a81b62SSteven Rostedt #include <linux/spinlock.h> 9e5a81b62SSteven Rostedt #include <linux/uaccess.h> 10e5a81b62SSteven Rostedt #include <linux/ftrace.h> 11e5a81b62SSteven Rostedt #include <linux/module.h> 12f38f1d2aSSteven Rostedt #include <linux/sysctl.h> 13e5a81b62SSteven Rostedt #include <linux/init.h> 14762e1207SSteven Rostedt 15762e1207SSteven Rostedt #include <asm/setup.h> 16762e1207SSteven Rostedt 17e5a81b62SSteven Rostedt #include "trace.h" 18e5a81b62SSteven Rostedt 191b6cced6SSteven Rostedt static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] = 201b6cced6SSteven Rostedt { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX }; 21bb99d8ccSAKASHI Takahiro unsigned stack_trace_index[STACK_TRACE_ENTRIES]; 221b6cced6SSteven Rostedt 234df29712SSteven Rostedt (Red Hat) /* 244df29712SSteven Rostedt (Red Hat) * Reserve one entry for the passed in ip. This will allow 254df29712SSteven Rostedt (Red Hat) * us to remove most or all of the stack size overhead 264df29712SSteven Rostedt (Red Hat) * added by the stack tracer itself. 274df29712SSteven Rostedt (Red Hat) */ 28bb99d8ccSAKASHI Takahiro struct stack_trace stack_trace_max = { 294df29712SSteven Rostedt (Red Hat) .max_entries = STACK_TRACE_ENTRIES - 1, 3072ac426aSSteven Rostedt (Red Hat) .entries = &stack_dump_trace[0], 31e5a81b62SSteven Rostedt }; 32e5a81b62SSteven Rostedt 33bb99d8ccSAKASHI Takahiro unsigned long stack_trace_max_size; 34d332736dSSteven Rostedt (Red Hat) arch_spinlock_t stack_trace_max_lock = 35edc35bd7SThomas Gleixner (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 36e5a81b62SSteven Rostedt 37e5a81b62SSteven Rostedt static DEFINE_PER_CPU(int, trace_active); 38f38f1d2aSSteven Rostedt static DEFINE_MUTEX(stack_sysctl_mutex); 39f38f1d2aSSteven Rostedt 40f38f1d2aSSteven Rostedt int stack_tracer_enabled; 41f38f1d2aSSteven Rostedt static int last_stack_tracer_enabled; 42e5a81b62SSteven Rostedt 43bb99d8ccSAKASHI Takahiro void stack_trace_print(void) 44e3172181SMinchan Kim { 45e3172181SMinchan Kim long i; 46e3172181SMinchan Kim int size; 47e3172181SMinchan Kim 48e3172181SMinchan Kim pr_emerg(" Depth Size Location (%d entries)\n" 49e3172181SMinchan Kim " ----- ---- --------\n", 50bb99d8ccSAKASHI Takahiro stack_trace_max.nr_entries); 51e3172181SMinchan Kim 52bb99d8ccSAKASHI Takahiro for (i = 0; i < stack_trace_max.nr_entries; i++) { 53e3172181SMinchan Kim if (stack_dump_trace[i] == ULONG_MAX) 54e3172181SMinchan Kim break; 55bb99d8ccSAKASHI Takahiro if (i+1 == stack_trace_max.nr_entries || 56e3172181SMinchan Kim stack_dump_trace[i+1] == ULONG_MAX) 57bb99d8ccSAKASHI Takahiro size = stack_trace_index[i]; 58e3172181SMinchan Kim else 59bb99d8ccSAKASHI Takahiro size = stack_trace_index[i] - stack_trace_index[i+1]; 60e3172181SMinchan Kim 61bb99d8ccSAKASHI Takahiro pr_emerg("%3ld) %8d %5d %pS\n", i, stack_trace_index[i], 62e3172181SMinchan Kim size, (void *)stack_dump_trace[i]); 63e3172181SMinchan Kim } 64e3172181SMinchan Kim } 65e3172181SMinchan Kim 66bb99d8ccSAKASHI Takahiro /* 67bb99d8ccSAKASHI Takahiro * When arch-specific code overides this function, the following 68d332736dSSteven Rostedt (Red Hat) * data should be filled up, assuming stack_trace_max_lock is held to 69bb99d8ccSAKASHI Takahiro * prevent concurrent updates. 70bb99d8ccSAKASHI Takahiro * stack_trace_index[] 71bb99d8ccSAKASHI Takahiro * stack_trace_max 72bb99d8ccSAKASHI Takahiro * stack_trace_max_size 73bb99d8ccSAKASHI Takahiro */ 74bb99d8ccSAKASHI Takahiro void __weak 75d4ecbfc4SSteven Rostedt (Red Hat) check_stack(unsigned long ip, unsigned long *stack) 76e5a81b62SSteven Rostedt { 77e3172181SMinchan Kim unsigned long this_size, flags; unsigned long *p, *top, *start; 784df29712SSteven Rostedt (Red Hat) static int tracer_frame; 794df29712SSteven Rostedt (Red Hat) int frame_size = ACCESS_ONCE(tracer_frame); 8072ac426aSSteven Rostedt (Red Hat) int i, x; 81e5a81b62SSteven Rostedt 8287889501SSteven Rostedt (Red Hat) this_size = ((unsigned long)stack) & (THREAD_SIZE-1); 83e5a81b62SSteven Rostedt this_size = THREAD_SIZE - this_size; 844df29712SSteven Rostedt (Red Hat) /* Remove the frame of the tracer */ 854df29712SSteven Rostedt (Red Hat) this_size -= frame_size; 86e5a81b62SSteven Rostedt 87bb99d8ccSAKASHI Takahiro if (this_size <= stack_trace_max_size) 88e5a81b62SSteven Rostedt return; 89e5a81b62SSteven Rostedt 9081520a1bSSteven Rostedt /* we do not handle interrupt stacks yet */ 9187889501SSteven Rostedt (Red Hat) if (!object_is_on_stack(stack)) 9281520a1bSSteven Rostedt return; 9381520a1bSSteven Rostedt 941904be1bSSteven Rostedt (Red Hat) /* Can't do this from NMI context (can cause deadlocks) */ 951904be1bSSteven Rostedt (Red Hat) if (in_nmi()) 961904be1bSSteven Rostedt (Red Hat) return; 971904be1bSSteven Rostedt (Red Hat) 98a5e25883SSteven Rostedt local_irq_save(flags); 99d332736dSSteven Rostedt (Red Hat) arch_spin_lock(&stack_trace_max_lock); 100e5a81b62SSteven Rostedt 101a2d76290SSteven Rostedt (Red Hat) /* 102a2d76290SSteven Rostedt (Red Hat) * RCU may not be watching, make it see us. 103a2d76290SSteven Rostedt (Red Hat) * The stack trace code uses rcu_sched. 104a2d76290SSteven Rostedt (Red Hat) */ 105a2d76290SSteven Rostedt (Red Hat) rcu_irq_enter(); 106a2d76290SSteven Rostedt (Red Hat) 1074df29712SSteven Rostedt (Red Hat) /* In case another CPU set the tracer_frame on us */ 1084df29712SSteven Rostedt (Red Hat) if (unlikely(!frame_size)) 1094df29712SSteven Rostedt (Red Hat) this_size -= tracer_frame; 1104df29712SSteven Rostedt (Red Hat) 111e5a81b62SSteven Rostedt /* a race could have already updated it */ 112bb99d8ccSAKASHI Takahiro if (this_size <= stack_trace_max_size) 113e5a81b62SSteven Rostedt goto out; 114e5a81b62SSteven Rostedt 115bb99d8ccSAKASHI Takahiro stack_trace_max_size = this_size; 116e5a81b62SSteven Rostedt 117bb99d8ccSAKASHI Takahiro stack_trace_max.nr_entries = 0; 118bb99d8ccSAKASHI Takahiro stack_trace_max.skip = 3; 119e5a81b62SSteven Rostedt 120bb99d8ccSAKASHI Takahiro save_stack_trace(&stack_trace_max); 121e5a81b62SSteven Rostedt 12272ac426aSSteven Rostedt (Red Hat) /* Skip over the overhead of the stack tracer itself */ 123bb99d8ccSAKASHI Takahiro for (i = 0; i < stack_trace_max.nr_entries; i++) { 12472ac426aSSteven Rostedt (Red Hat) if (stack_dump_trace[i] == ip) 12572ac426aSSteven Rostedt (Red Hat) break; 12672ac426aSSteven Rostedt (Red Hat) } 127d4ecbfc4SSteven Rostedt (Red Hat) 128d4ecbfc4SSteven Rostedt (Red Hat) /* 1296ccd8371SSteven Rostedt * Some archs may not have the passed in ip in the dump. 1306ccd8371SSteven Rostedt * If that happens, we need to show everything. 1316ccd8371SSteven Rostedt */ 1326ccd8371SSteven Rostedt if (i == stack_trace_max.nr_entries) 1336ccd8371SSteven Rostedt i = 0; 1346ccd8371SSteven Rostedt 1356ccd8371SSteven Rostedt /* 1361b6cced6SSteven Rostedt * Now find where in the stack these are. 1371b6cced6SSteven Rostedt */ 13872ac426aSSteven Rostedt (Red Hat) x = 0; 13987889501SSteven Rostedt (Red Hat) start = stack; 1401b6cced6SSteven Rostedt top = (unsigned long *) 1411b6cced6SSteven Rostedt (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE); 1421b6cced6SSteven Rostedt 1431b6cced6SSteven Rostedt /* 1441b6cced6SSteven Rostedt * Loop through all the entries. One of the entries may 1451b6cced6SSteven Rostedt * for some reason be missed on the stack, so we may 1461b6cced6SSteven Rostedt * have to account for them. If they are all there, this 1471b6cced6SSteven Rostedt * loop will only happen once. This code only takes place 1481b6cced6SSteven Rostedt * on a new max, so it is far from a fast path. 1491b6cced6SSteven Rostedt */ 150bb99d8ccSAKASHI Takahiro while (i < stack_trace_max.nr_entries) { 1510a37119dSSteven Rostedt int found = 0; 1521b6cced6SSteven Rostedt 153bb99d8ccSAKASHI Takahiro stack_trace_index[x] = this_size; 1541b6cced6SSteven Rostedt p = start; 1551b6cced6SSteven Rostedt 156bb99d8ccSAKASHI Takahiro for (; p < top && i < stack_trace_max.nr_entries; p++) { 15772ac426aSSteven Rostedt (Red Hat) if (stack_dump_trace[i] == ULONG_MAX) 15872ac426aSSteven Rostedt (Red Hat) break; 159*6e22c836SYang Shi /* 160*6e22c836SYang Shi * The READ_ONCE_NOCHECK is used to let KASAN know that 161*6e22c836SYang Shi * this is not a stack-out-of-bounds error. 162*6e22c836SYang Shi */ 163*6e22c836SYang Shi if ((READ_ONCE_NOCHECK(*p)) == stack_dump_trace[i]) { 16472ac426aSSteven Rostedt (Red Hat) stack_dump_trace[x] = stack_dump_trace[i++]; 165bb99d8ccSAKASHI Takahiro this_size = stack_trace_index[x++] = 1661b6cced6SSteven Rostedt (top - p) * sizeof(unsigned long); 1670a37119dSSteven Rostedt found = 1; 1681b6cced6SSteven Rostedt /* Start the search from here */ 1691b6cced6SSteven Rostedt start = p + 1; 1704df29712SSteven Rostedt (Red Hat) /* 1714df29712SSteven Rostedt (Red Hat) * We do not want to show the overhead 1724df29712SSteven Rostedt (Red Hat) * of the stack tracer stack in the 1734df29712SSteven Rostedt (Red Hat) * max stack. If we haven't figured 1744df29712SSteven Rostedt (Red Hat) * out what that is, then figure it out 1754df29712SSteven Rostedt (Red Hat) * now. 1764df29712SSteven Rostedt (Red Hat) */ 17772ac426aSSteven Rostedt (Red Hat) if (unlikely(!tracer_frame)) { 1784df29712SSteven Rostedt (Red Hat) tracer_frame = (p - stack) * 1794df29712SSteven Rostedt (Red Hat) sizeof(unsigned long); 180bb99d8ccSAKASHI Takahiro stack_trace_max_size -= tracer_frame; 1814df29712SSteven Rostedt (Red Hat) } 1821b6cced6SSteven Rostedt } 1831b6cced6SSteven Rostedt } 1841b6cced6SSteven Rostedt 1850a37119dSSteven Rostedt if (!found) 1861b6cced6SSteven Rostedt i++; 1871b6cced6SSteven Rostedt } 1881b6cced6SSteven Rostedt 189bb99d8ccSAKASHI Takahiro stack_trace_max.nr_entries = x; 19072ac426aSSteven Rostedt (Red Hat) for (; x < i; x++) 19172ac426aSSteven Rostedt (Red Hat) stack_dump_trace[x] = ULONG_MAX; 19272ac426aSSteven Rostedt (Red Hat) 193a70857e4SAaron Tomlin if (task_stack_end_corrupted(current)) { 194bb99d8ccSAKASHI Takahiro stack_trace_print(); 195e3172181SMinchan Kim BUG(); 196e3172181SMinchan Kim } 197e3172181SMinchan Kim 198e5a81b62SSteven Rostedt out: 199a2d76290SSteven Rostedt (Red Hat) rcu_irq_exit(); 200d332736dSSteven Rostedt (Red Hat) arch_spin_unlock(&stack_trace_max_lock); 201a5e25883SSteven Rostedt local_irq_restore(flags); 202e5a81b62SSteven Rostedt } 203e5a81b62SSteven Rostedt 204e5a81b62SSteven Rostedt static void 205a1e2e31dSSteven Rostedt stack_trace_call(unsigned long ip, unsigned long parent_ip, 206a1e2e31dSSteven Rostedt struct ftrace_ops *op, struct pt_regs *pt_regs) 207e5a81b62SSteven Rostedt { 20887889501SSteven Rostedt (Red Hat) unsigned long stack; 2095168ae50SSteven Rostedt int cpu; 210e5a81b62SSteven Rostedt 2115168ae50SSteven Rostedt preempt_disable_notrace(); 212e5a81b62SSteven Rostedt 213e5a81b62SSteven Rostedt cpu = raw_smp_processor_id(); 214e5a81b62SSteven Rostedt /* no atomic needed, we only modify this variable by this cpu */ 215e5a81b62SSteven Rostedt if (per_cpu(trace_active, cpu)++ != 0) 216e5a81b62SSteven Rostedt goto out; 217e5a81b62SSteven Rostedt 2184df29712SSteven Rostedt (Red Hat) ip += MCOUNT_INSN_SIZE; 2194df29712SSteven Rostedt (Red Hat) 2204df29712SSteven Rostedt (Red Hat) check_stack(ip, &stack); 221e5a81b62SSteven Rostedt 222e5a81b62SSteven Rostedt out: 223e5a81b62SSteven Rostedt per_cpu(trace_active, cpu)--; 224e5a81b62SSteven Rostedt /* prevent recursion in schedule */ 2255168ae50SSteven Rostedt preempt_enable_notrace(); 226e5a81b62SSteven Rostedt } 227e5a81b62SSteven Rostedt 228e5a81b62SSteven Rostedt static struct ftrace_ops trace_ops __read_mostly = 229e5a81b62SSteven Rostedt { 230e5a81b62SSteven Rostedt .func = stack_trace_call, 2314740974aSSteven Rostedt .flags = FTRACE_OPS_FL_RECURSION_SAFE, 232e5a81b62SSteven Rostedt }; 233e5a81b62SSteven Rostedt 234e5a81b62SSteven Rostedt static ssize_t 235e5a81b62SSteven Rostedt stack_max_size_read(struct file *filp, char __user *ubuf, 236e5a81b62SSteven Rostedt size_t count, loff_t *ppos) 237e5a81b62SSteven Rostedt { 238e5a81b62SSteven Rostedt unsigned long *ptr = filp->private_data; 239e5a81b62SSteven Rostedt char buf[64]; 240e5a81b62SSteven Rostedt int r; 241e5a81b62SSteven Rostedt 242e5a81b62SSteven Rostedt r = snprintf(buf, sizeof(buf), "%ld\n", *ptr); 243e5a81b62SSteven Rostedt if (r > sizeof(buf)) 244e5a81b62SSteven Rostedt r = sizeof(buf); 245e5a81b62SSteven Rostedt return simple_read_from_buffer(ubuf, count, ppos, buf, r); 246e5a81b62SSteven Rostedt } 247e5a81b62SSteven Rostedt 248e5a81b62SSteven Rostedt static ssize_t 249e5a81b62SSteven Rostedt stack_max_size_write(struct file *filp, const char __user *ubuf, 250e5a81b62SSteven Rostedt size_t count, loff_t *ppos) 251e5a81b62SSteven Rostedt { 252e5a81b62SSteven Rostedt long *ptr = filp->private_data; 253e5a81b62SSteven Rostedt unsigned long val, flags; 254e5a81b62SSteven Rostedt int ret; 2554f48f8b7SLai Jiangshan int cpu; 256e5a81b62SSteven Rostedt 25722fe9b54SPeter Huewe ret = kstrtoul_from_user(ubuf, count, 10, &val); 25822fe9b54SPeter Huewe if (ret) 259e5a81b62SSteven Rostedt return ret; 260e5a81b62SSteven Rostedt 261a5e25883SSteven Rostedt local_irq_save(flags); 2624f48f8b7SLai Jiangshan 2634f48f8b7SLai Jiangshan /* 2644f48f8b7SLai Jiangshan * In case we trace inside arch_spin_lock() or after (NMI), 2654f48f8b7SLai Jiangshan * we will cause circular lock, so we also need to increase 2664f48f8b7SLai Jiangshan * the percpu trace_active here. 2674f48f8b7SLai Jiangshan */ 2684f48f8b7SLai Jiangshan cpu = smp_processor_id(); 2694f48f8b7SLai Jiangshan per_cpu(trace_active, cpu)++; 2704f48f8b7SLai Jiangshan 271d332736dSSteven Rostedt (Red Hat) arch_spin_lock(&stack_trace_max_lock); 272e5a81b62SSteven Rostedt *ptr = val; 273d332736dSSteven Rostedt (Red Hat) arch_spin_unlock(&stack_trace_max_lock); 2744f48f8b7SLai Jiangshan 2754f48f8b7SLai Jiangshan per_cpu(trace_active, cpu)--; 276a5e25883SSteven Rostedt local_irq_restore(flags); 277e5a81b62SSteven Rostedt 278e5a81b62SSteven Rostedt return count; 279e5a81b62SSteven Rostedt } 280e5a81b62SSteven Rostedt 281f38f1d2aSSteven Rostedt static const struct file_operations stack_max_size_fops = { 282e5a81b62SSteven Rostedt .open = tracing_open_generic, 283e5a81b62SSteven Rostedt .read = stack_max_size_read, 284e5a81b62SSteven Rostedt .write = stack_max_size_write, 2856038f373SArnd Bergmann .llseek = default_llseek, 286e5a81b62SSteven Rostedt }; 287e5a81b62SSteven Rostedt 288e5a81b62SSteven Rostedt static void * 2892fc5f0cfSLi Zefan __next(struct seq_file *m, loff_t *pos) 290e5a81b62SSteven Rostedt { 2912fc5f0cfSLi Zefan long n = *pos - 1; 292e5a81b62SSteven Rostedt 293bb99d8ccSAKASHI Takahiro if (n > stack_trace_max.nr_entries || stack_dump_trace[n] == ULONG_MAX) 294e5a81b62SSteven Rostedt return NULL; 295e5a81b62SSteven Rostedt 2962fc5f0cfSLi Zefan m->private = (void *)n; 2971b6cced6SSteven Rostedt return &m->private; 298e5a81b62SSteven Rostedt } 299e5a81b62SSteven Rostedt 3002fc5f0cfSLi Zefan static void * 3012fc5f0cfSLi Zefan t_next(struct seq_file *m, void *v, loff_t *pos) 3022fc5f0cfSLi Zefan { 3032fc5f0cfSLi Zefan (*pos)++; 3042fc5f0cfSLi Zefan return __next(m, pos); 3052fc5f0cfSLi Zefan } 3062fc5f0cfSLi Zefan 307e5a81b62SSteven Rostedt static void *t_start(struct seq_file *m, loff_t *pos) 308e5a81b62SSteven Rostedt { 3094f48f8b7SLai Jiangshan int cpu; 3104f48f8b7SLai Jiangshan 311e5a81b62SSteven Rostedt local_irq_disable(); 3124f48f8b7SLai Jiangshan 3134f48f8b7SLai Jiangshan cpu = smp_processor_id(); 3144f48f8b7SLai Jiangshan per_cpu(trace_active, cpu)++; 3154f48f8b7SLai Jiangshan 316d332736dSSteven Rostedt (Red Hat) arch_spin_lock(&stack_trace_max_lock); 317e5a81b62SSteven Rostedt 318522a110bSLiming Wang if (*pos == 0) 319522a110bSLiming Wang return SEQ_START_TOKEN; 320522a110bSLiming Wang 3212fc5f0cfSLi Zefan return __next(m, pos); 322e5a81b62SSteven Rostedt } 323e5a81b62SSteven Rostedt 324e5a81b62SSteven Rostedt static void t_stop(struct seq_file *m, void *p) 325e5a81b62SSteven Rostedt { 3264f48f8b7SLai Jiangshan int cpu; 3274f48f8b7SLai Jiangshan 328d332736dSSteven Rostedt (Red Hat) arch_spin_unlock(&stack_trace_max_lock); 3294f48f8b7SLai Jiangshan 3304f48f8b7SLai Jiangshan cpu = smp_processor_id(); 3314f48f8b7SLai Jiangshan per_cpu(trace_active, cpu)--; 3324f48f8b7SLai Jiangshan 333e5a81b62SSteven Rostedt local_irq_enable(); 334e5a81b62SSteven Rostedt } 335e5a81b62SSteven Rostedt 336962e3707SJoe Perches static void trace_lookup_stack(struct seq_file *m, long i) 337e5a81b62SSteven Rostedt { 3381b6cced6SSteven Rostedt unsigned long addr = stack_dump_trace[i]; 339e5a81b62SSteven Rostedt 340962e3707SJoe Perches seq_printf(m, "%pS\n", (void *)addr); 341e5a81b62SSteven Rostedt } 342e5a81b62SSteven Rostedt 343e447e1dfSSteven Rostedt static void print_disabled(struct seq_file *m) 344e447e1dfSSteven Rostedt { 345e447e1dfSSteven Rostedt seq_puts(m, "#\n" 346e447e1dfSSteven Rostedt "# Stack tracer disabled\n" 347e447e1dfSSteven Rostedt "#\n" 348e447e1dfSSteven Rostedt "# To enable the stack tracer, either add 'stacktrace' to the\n" 349e447e1dfSSteven Rostedt "# kernel command line\n" 350e447e1dfSSteven Rostedt "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n" 351e447e1dfSSteven Rostedt "#\n"); 352e447e1dfSSteven Rostedt } 353e447e1dfSSteven Rostedt 354e5a81b62SSteven Rostedt static int t_show(struct seq_file *m, void *v) 355e5a81b62SSteven Rostedt { 356522a110bSLiming Wang long i; 3571b6cced6SSteven Rostedt int size; 358e5a81b62SSteven Rostedt 359522a110bSLiming Wang if (v == SEQ_START_TOKEN) { 3601b6cced6SSteven Rostedt seq_printf(m, " Depth Size Location" 3611b6cced6SSteven Rostedt " (%d entries)\n" 3621b6cced6SSteven Rostedt " ----- ---- --------\n", 363bb99d8ccSAKASHI Takahiro stack_trace_max.nr_entries); 364e447e1dfSSteven Rostedt 365bb99d8ccSAKASHI Takahiro if (!stack_tracer_enabled && !stack_trace_max_size) 366e447e1dfSSteven Rostedt print_disabled(m); 367e447e1dfSSteven Rostedt 3681b6cced6SSteven Rostedt return 0; 3691b6cced6SSteven Rostedt } 3701b6cced6SSteven Rostedt 371522a110bSLiming Wang i = *(long *)v; 372522a110bSLiming Wang 373bb99d8ccSAKASHI Takahiro if (i >= stack_trace_max.nr_entries || 3741b6cced6SSteven Rostedt stack_dump_trace[i] == ULONG_MAX) 375e5a81b62SSteven Rostedt return 0; 376e5a81b62SSteven Rostedt 377bb99d8ccSAKASHI Takahiro if (i+1 == stack_trace_max.nr_entries || 3781b6cced6SSteven Rostedt stack_dump_trace[i+1] == ULONG_MAX) 379bb99d8ccSAKASHI Takahiro size = stack_trace_index[i]; 3801b6cced6SSteven Rostedt else 381bb99d8ccSAKASHI Takahiro size = stack_trace_index[i] - stack_trace_index[i+1]; 3821b6cced6SSteven Rostedt 383bb99d8ccSAKASHI Takahiro seq_printf(m, "%3ld) %8d %5d ", i, stack_trace_index[i], size); 3841b6cced6SSteven Rostedt 3851b6cced6SSteven Rostedt trace_lookup_stack(m, i); 386e5a81b62SSteven Rostedt 387e5a81b62SSteven Rostedt return 0; 388e5a81b62SSteven Rostedt } 389e5a81b62SSteven Rostedt 390f38f1d2aSSteven Rostedt static const struct seq_operations stack_trace_seq_ops = { 391e5a81b62SSteven Rostedt .start = t_start, 392e5a81b62SSteven Rostedt .next = t_next, 393e5a81b62SSteven Rostedt .stop = t_stop, 394e5a81b62SSteven Rostedt .show = t_show, 395e5a81b62SSteven Rostedt }; 396e5a81b62SSteven Rostedt 397e5a81b62SSteven Rostedt static int stack_trace_open(struct inode *inode, struct file *file) 398e5a81b62SSteven Rostedt { 399d8cc1ab7SLi Zefan return seq_open(file, &stack_trace_seq_ops); 400e5a81b62SSteven Rostedt } 401e5a81b62SSteven Rostedt 402f38f1d2aSSteven Rostedt static const struct file_operations stack_trace_fops = { 403e5a81b62SSteven Rostedt .open = stack_trace_open, 404e5a81b62SSteven Rostedt .read = seq_read, 405e5a81b62SSteven Rostedt .llseek = seq_lseek, 406d8cc1ab7SLi Zefan .release = seq_release, 407e5a81b62SSteven Rostedt }; 408e5a81b62SSteven Rostedt 409d2d45c7aSSteven Rostedt static int 410d2d45c7aSSteven Rostedt stack_trace_filter_open(struct inode *inode, struct file *file) 411d2d45c7aSSteven Rostedt { 412d2d45c7aSSteven Rostedt return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER, 413d2d45c7aSSteven Rostedt inode, file); 414d2d45c7aSSteven Rostedt } 415d2d45c7aSSteven Rostedt 416d2d45c7aSSteven Rostedt static const struct file_operations stack_trace_filter_fops = { 417d2d45c7aSSteven Rostedt .open = stack_trace_filter_open, 418d2d45c7aSSteven Rostedt .read = seq_read, 419d2d45c7aSSteven Rostedt .write = ftrace_filter_write, 420098c879eSSteven Rostedt (Red Hat) .llseek = tracing_lseek, 421d2d45c7aSSteven Rostedt .release = ftrace_regex_release, 422d2d45c7aSSteven Rostedt }; 423d2d45c7aSSteven Rostedt 424f38f1d2aSSteven Rostedt int 425f38f1d2aSSteven Rostedt stack_trace_sysctl(struct ctl_table *table, int write, 4268d65af78SAlexey Dobriyan void __user *buffer, size_t *lenp, 427f38f1d2aSSteven Rostedt loff_t *ppos) 428f38f1d2aSSteven Rostedt { 429f38f1d2aSSteven Rostedt int ret; 430f38f1d2aSSteven Rostedt 431f38f1d2aSSteven Rostedt mutex_lock(&stack_sysctl_mutex); 432f38f1d2aSSteven Rostedt 4338d65af78SAlexey Dobriyan ret = proc_dointvec(table, write, buffer, lenp, ppos); 434f38f1d2aSSteven Rostedt 435f38f1d2aSSteven Rostedt if (ret || !write || 436a32c7765SLi Zefan (last_stack_tracer_enabled == !!stack_tracer_enabled)) 437f38f1d2aSSteven Rostedt goto out; 438f38f1d2aSSteven Rostedt 439a32c7765SLi Zefan last_stack_tracer_enabled = !!stack_tracer_enabled; 440f38f1d2aSSteven Rostedt 441f38f1d2aSSteven Rostedt if (stack_tracer_enabled) 442f38f1d2aSSteven Rostedt register_ftrace_function(&trace_ops); 443f38f1d2aSSteven Rostedt else 444f38f1d2aSSteven Rostedt unregister_ftrace_function(&trace_ops); 445f38f1d2aSSteven Rostedt 446f38f1d2aSSteven Rostedt out: 447f38f1d2aSSteven Rostedt mutex_unlock(&stack_sysctl_mutex); 448f38f1d2aSSteven Rostedt return ret; 449f38f1d2aSSteven Rostedt } 450f38f1d2aSSteven Rostedt 451762e1207SSteven Rostedt static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata; 452762e1207SSteven Rostedt 453f38f1d2aSSteven Rostedt static __init int enable_stacktrace(char *str) 454f38f1d2aSSteven Rostedt { 455762e1207SSteven Rostedt if (strncmp(str, "_filter=", 8) == 0) 456762e1207SSteven Rostedt strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE); 457762e1207SSteven Rostedt 458e05a43b7SSteven Rostedt stack_tracer_enabled = 1; 459e05a43b7SSteven Rostedt last_stack_tracer_enabled = 1; 460f38f1d2aSSteven Rostedt return 1; 461f38f1d2aSSteven Rostedt } 462f38f1d2aSSteven Rostedt __setup("stacktrace", enable_stacktrace); 463f38f1d2aSSteven Rostedt 464e5a81b62SSteven Rostedt static __init int stack_trace_init(void) 465e5a81b62SSteven Rostedt { 466e5a81b62SSteven Rostedt struct dentry *d_tracer; 467e5a81b62SSteven Rostedt 468e5a81b62SSteven Rostedt d_tracer = tracing_init_dentry(); 46914a5ae40SSteven Rostedt (Red Hat) if (IS_ERR(d_tracer)) 470ed6f1c99SNamhyung Kim return 0; 471e5a81b62SSteven Rostedt 4725452af66SFrederic Weisbecker trace_create_file("stack_max_size", 0644, d_tracer, 473bb99d8ccSAKASHI Takahiro &stack_trace_max_size, &stack_max_size_fops); 474e5a81b62SSteven Rostedt 4755452af66SFrederic Weisbecker trace_create_file("stack_trace", 0444, d_tracer, 476e5a81b62SSteven Rostedt NULL, &stack_trace_fops); 477e5a81b62SSteven Rostedt 478d2d45c7aSSteven Rostedt trace_create_file("stack_trace_filter", 0444, d_tracer, 479d2d45c7aSSteven Rostedt NULL, &stack_trace_filter_fops); 480d2d45c7aSSteven Rostedt 481762e1207SSteven Rostedt if (stack_trace_filter_buf[0]) 482762e1207SSteven Rostedt ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1); 483762e1207SSteven Rostedt 484e05a43b7SSteven Rostedt if (stack_tracer_enabled) 485e5a81b62SSteven Rostedt register_ftrace_function(&trace_ops); 486e5a81b62SSteven Rostedt 487e5a81b62SSteven Rostedt return 0; 488e5a81b62SSteven Rostedt } 489e5a81b62SSteven Rostedt 490e5a81b62SSteven Rostedt device_initcall(stack_trace_init); 491