1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2e5a81b62SSteven Rostedt /* 3e5a81b62SSteven Rostedt * Copyright (C) 2008 Steven Rostedt <[email protected]> 4e5a81b62SSteven Rostedt * 5e5a81b62SSteven Rostedt */ 668db0cf1SIngo Molnar #include <linux/sched/task_stack.h> 7e5a81b62SSteven Rostedt #include <linux/stacktrace.h> 8e5a81b62SSteven Rostedt #include <linux/kallsyms.h> 9e5a81b62SSteven Rostedt #include <linux/seq_file.h> 10e5a81b62SSteven Rostedt #include <linux/spinlock.h> 11e5a81b62SSteven Rostedt #include <linux/uaccess.h> 12e5a81b62SSteven Rostedt #include <linux/ftrace.h> 13e5a81b62SSteven Rostedt #include <linux/module.h> 14f38f1d2aSSteven Rostedt #include <linux/sysctl.h> 15e5a81b62SSteven Rostedt #include <linux/init.h> 16762e1207SSteven Rostedt 17762e1207SSteven Rostedt #include <asm/setup.h> 18762e1207SSteven Rostedt 19e5a81b62SSteven Rostedt #include "trace.h" 20e5a81b62SSteven Rostedt 211b6cced6SSteven Rostedt static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] = 221b6cced6SSteven Rostedt { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX }; 23bb99d8ccSAKASHI Takahiro unsigned stack_trace_index[STACK_TRACE_ENTRIES]; 241b6cced6SSteven Rostedt 254df29712SSteven Rostedt (Red Hat) /* 264df29712SSteven Rostedt (Red Hat) * Reserve one entry for the passed in ip. This will allow 274df29712SSteven Rostedt (Red Hat) * us to remove most or all of the stack size overhead 284df29712SSteven Rostedt (Red Hat) * added by the stack tracer itself. 294df29712SSteven Rostedt (Red Hat) */ 30bb99d8ccSAKASHI Takahiro struct stack_trace stack_trace_max = { 314df29712SSteven Rostedt (Red Hat) .max_entries = STACK_TRACE_ENTRIES - 1, 3272ac426aSSteven Rostedt (Red Hat) .entries = &stack_dump_trace[0], 33e5a81b62SSteven Rostedt }; 34e5a81b62SSteven Rostedt 35bb99d8ccSAKASHI Takahiro unsigned long stack_trace_max_size; 36d332736dSSteven Rostedt (Red Hat) arch_spinlock_t stack_trace_max_lock = 37edc35bd7SThomas Gleixner (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 38e5a81b62SSteven Rostedt 398aaf1ee7SSteven Rostedt (VMware) DEFINE_PER_CPU(int, disable_stack_tracer); 40f38f1d2aSSteven Rostedt static DEFINE_MUTEX(stack_sysctl_mutex); 41f38f1d2aSSteven Rostedt 42f38f1d2aSSteven Rostedt int stack_tracer_enabled; 43f38f1d2aSSteven Rostedt static int last_stack_tracer_enabled; 44e5a81b62SSteven Rostedt 45bb99d8ccSAKASHI Takahiro void stack_trace_print(void) 46e3172181SMinchan Kim { 47e3172181SMinchan Kim long i; 48e3172181SMinchan Kim int size; 49e3172181SMinchan Kim 50e3172181SMinchan Kim pr_emerg(" Depth Size Location (%d entries)\n" 51e3172181SMinchan Kim " ----- ---- --------\n", 52bb99d8ccSAKASHI Takahiro stack_trace_max.nr_entries); 53e3172181SMinchan Kim 54bb99d8ccSAKASHI Takahiro for (i = 0; i < stack_trace_max.nr_entries; i++) { 55e3172181SMinchan Kim if (stack_dump_trace[i] == ULONG_MAX) 56e3172181SMinchan Kim break; 57bb99d8ccSAKASHI Takahiro if (i+1 == stack_trace_max.nr_entries || 58e3172181SMinchan Kim stack_dump_trace[i+1] == ULONG_MAX) 59bb99d8ccSAKASHI Takahiro size = stack_trace_index[i]; 60e3172181SMinchan Kim else 61bb99d8ccSAKASHI Takahiro size = stack_trace_index[i] - stack_trace_index[i+1]; 62e3172181SMinchan Kim 63bb99d8ccSAKASHI Takahiro pr_emerg("%3ld) %8d %5d %pS\n", i, stack_trace_index[i], 64e3172181SMinchan Kim size, (void *)stack_dump_trace[i]); 65e3172181SMinchan Kim } 66e3172181SMinchan Kim } 67e3172181SMinchan Kim 68bb99d8ccSAKASHI Takahiro /* 69505d3085SMasahiro Yamada * When arch-specific code overrides this function, the following 70d332736dSSteven Rostedt (Red Hat) * data should be filled up, assuming stack_trace_max_lock is held to 71bb99d8ccSAKASHI Takahiro * prevent concurrent updates. 72bb99d8ccSAKASHI Takahiro * stack_trace_index[] 73bb99d8ccSAKASHI Takahiro * stack_trace_max 74bb99d8ccSAKASHI Takahiro * stack_trace_max_size 75bb99d8ccSAKASHI Takahiro */ 76bb99d8ccSAKASHI Takahiro void __weak 77d4ecbfc4SSteven Rostedt (Red Hat) check_stack(unsigned long ip, unsigned long *stack) 78e5a81b62SSteven Rostedt { 79e3172181SMinchan Kim unsigned long this_size, flags; unsigned long *p, *top, *start; 804df29712SSteven Rostedt (Red Hat) static int tracer_frame; 816aa7de05SMark Rutland int frame_size = READ_ONCE(tracer_frame); 8272ac426aSSteven Rostedt (Red Hat) int i, x; 83e5a81b62SSteven Rostedt 8487889501SSteven Rostedt (Red Hat) this_size = ((unsigned long)stack) & (THREAD_SIZE-1); 85e5a81b62SSteven Rostedt this_size = THREAD_SIZE - this_size; 864df29712SSteven Rostedt (Red Hat) /* Remove the frame of the tracer */ 874df29712SSteven Rostedt (Red Hat) this_size -= frame_size; 88e5a81b62SSteven Rostedt 89bb99d8ccSAKASHI Takahiro if (this_size <= stack_trace_max_size) 90e5a81b62SSteven Rostedt return; 91e5a81b62SSteven Rostedt 9281520a1bSSteven Rostedt /* we do not handle interrupt stacks yet */ 9387889501SSteven Rostedt (Red Hat) if (!object_is_on_stack(stack)) 9481520a1bSSteven Rostedt return; 9581520a1bSSteven Rostedt 961904be1bSSteven Rostedt (Red Hat) /* Can't do this from NMI context (can cause deadlocks) */ 971904be1bSSteven Rostedt (Red Hat) if (in_nmi()) 981904be1bSSteven Rostedt (Red Hat) return; 991904be1bSSteven Rostedt (Red Hat) 100a5e25883SSteven Rostedt local_irq_save(flags); 101d332736dSSteven Rostedt (Red Hat) arch_spin_lock(&stack_trace_max_lock); 102e5a81b62SSteven Rostedt 1034df29712SSteven Rostedt (Red Hat) /* In case another CPU set the tracer_frame on us */ 1044df29712SSteven Rostedt (Red Hat) if (unlikely(!frame_size)) 1054df29712SSteven Rostedt (Red Hat) this_size -= tracer_frame; 1064df29712SSteven Rostedt (Red Hat) 107e5a81b62SSteven Rostedt /* a race could have already updated it */ 108bb99d8ccSAKASHI Takahiro if (this_size <= stack_trace_max_size) 109e5a81b62SSteven Rostedt goto out; 110e5a81b62SSteven Rostedt 111bb99d8ccSAKASHI Takahiro stack_trace_max_size = this_size; 112e5a81b62SSteven Rostedt 113bb99d8ccSAKASHI Takahiro stack_trace_max.nr_entries = 0; 114bb99d8ccSAKASHI Takahiro stack_trace_max.skip = 3; 115e5a81b62SSteven Rostedt 116bb99d8ccSAKASHI Takahiro save_stack_trace(&stack_trace_max); 117e5a81b62SSteven Rostedt 11872ac426aSSteven Rostedt (Red Hat) /* Skip over the overhead of the stack tracer itself */ 119bb99d8ccSAKASHI Takahiro for (i = 0; i < stack_trace_max.nr_entries; i++) { 12072ac426aSSteven Rostedt (Red Hat) if (stack_dump_trace[i] == ip) 12172ac426aSSteven Rostedt (Red Hat) break; 12272ac426aSSteven Rostedt (Red Hat) } 123d4ecbfc4SSteven Rostedt (Red Hat) 124d4ecbfc4SSteven Rostedt (Red Hat) /* 1256ccd8371SSteven Rostedt * Some archs may not have the passed in ip in the dump. 1266ccd8371SSteven Rostedt * If that happens, we need to show everything. 1276ccd8371SSteven Rostedt */ 1286ccd8371SSteven Rostedt if (i == stack_trace_max.nr_entries) 1296ccd8371SSteven Rostedt i = 0; 1306ccd8371SSteven Rostedt 1316ccd8371SSteven Rostedt /* 1321b6cced6SSteven Rostedt * Now find where in the stack these are. 1331b6cced6SSteven Rostedt */ 13472ac426aSSteven Rostedt (Red Hat) x = 0; 13587889501SSteven Rostedt (Red Hat) start = stack; 1361b6cced6SSteven Rostedt top = (unsigned long *) 1371b6cced6SSteven Rostedt (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE); 1381b6cced6SSteven Rostedt 1391b6cced6SSteven Rostedt /* 1401b6cced6SSteven Rostedt * Loop through all the entries. One of the entries may 1411b6cced6SSteven Rostedt * for some reason be missed on the stack, so we may 1421b6cced6SSteven Rostedt * have to account for them. If they are all there, this 1431b6cced6SSteven Rostedt * loop will only happen once. This code only takes place 1441b6cced6SSteven Rostedt * on a new max, so it is far from a fast path. 1451b6cced6SSteven Rostedt */ 146bb99d8ccSAKASHI Takahiro while (i < stack_trace_max.nr_entries) { 1470a37119dSSteven Rostedt int found = 0; 1481b6cced6SSteven Rostedt 149bb99d8ccSAKASHI Takahiro stack_trace_index[x] = this_size; 1501b6cced6SSteven Rostedt p = start; 1511b6cced6SSteven Rostedt 152bb99d8ccSAKASHI Takahiro for (; p < top && i < stack_trace_max.nr_entries; p++) { 15372ac426aSSteven Rostedt (Red Hat) if (stack_dump_trace[i] == ULONG_MAX) 15472ac426aSSteven Rostedt (Red Hat) break; 1556e22c836SYang Shi /* 1566e22c836SYang Shi * The READ_ONCE_NOCHECK is used to let KASAN know that 1576e22c836SYang Shi * this is not a stack-out-of-bounds error. 1586e22c836SYang Shi */ 1596e22c836SYang Shi if ((READ_ONCE_NOCHECK(*p)) == stack_dump_trace[i]) { 16072ac426aSSteven Rostedt (Red Hat) stack_dump_trace[x] = stack_dump_trace[i++]; 161bb99d8ccSAKASHI Takahiro this_size = stack_trace_index[x++] = 1621b6cced6SSteven Rostedt (top - p) * sizeof(unsigned long); 1630a37119dSSteven Rostedt found = 1; 1641b6cced6SSteven Rostedt /* Start the search from here */ 1651b6cced6SSteven Rostedt start = p + 1; 1664df29712SSteven Rostedt (Red Hat) /* 1674df29712SSteven Rostedt (Red Hat) * We do not want to show the overhead 1684df29712SSteven Rostedt (Red Hat) * of the stack tracer stack in the 1694df29712SSteven Rostedt (Red Hat) * max stack. If we haven't figured 1704df29712SSteven Rostedt (Red Hat) * out what that is, then figure it out 1714df29712SSteven Rostedt (Red Hat) * now. 1724df29712SSteven Rostedt (Red Hat) */ 17372ac426aSSteven Rostedt (Red Hat) if (unlikely(!tracer_frame)) { 1744df29712SSteven Rostedt (Red Hat) tracer_frame = (p - stack) * 1754df29712SSteven Rostedt (Red Hat) sizeof(unsigned long); 176bb99d8ccSAKASHI Takahiro stack_trace_max_size -= tracer_frame; 1774df29712SSteven Rostedt (Red Hat) } 1781b6cced6SSteven Rostedt } 1791b6cced6SSteven Rostedt } 1801b6cced6SSteven Rostedt 1810a37119dSSteven Rostedt if (!found) 1821b6cced6SSteven Rostedt i++; 1831b6cced6SSteven Rostedt } 1841b6cced6SSteven Rostedt 185bb99d8ccSAKASHI Takahiro stack_trace_max.nr_entries = x; 18672ac426aSSteven Rostedt (Red Hat) for (; x < i; x++) 18772ac426aSSteven Rostedt (Red Hat) stack_dump_trace[x] = ULONG_MAX; 18872ac426aSSteven Rostedt (Red Hat) 189a70857e4SAaron Tomlin if (task_stack_end_corrupted(current)) { 190bb99d8ccSAKASHI Takahiro stack_trace_print(); 191e3172181SMinchan Kim BUG(); 192e3172181SMinchan Kim } 193e3172181SMinchan Kim 194e5a81b62SSteven Rostedt out: 195d332736dSSteven Rostedt (Red Hat) arch_spin_unlock(&stack_trace_max_lock); 196a5e25883SSteven Rostedt local_irq_restore(flags); 197e5a81b62SSteven Rostedt } 198e5a81b62SSteven Rostedt 199e5a81b62SSteven Rostedt static void 200a1e2e31dSSteven Rostedt stack_trace_call(unsigned long ip, unsigned long parent_ip, 201a1e2e31dSSteven Rostedt struct ftrace_ops *op, struct pt_regs *pt_regs) 202e5a81b62SSteven Rostedt { 20387889501SSteven Rostedt (Red Hat) unsigned long stack; 204e5a81b62SSteven Rostedt 2055168ae50SSteven Rostedt preempt_disable_notrace(); 206e5a81b62SSteven Rostedt 207e5a81b62SSteven Rostedt /* no atomic needed, we only modify this variable by this cpu */ 2088aaf1ee7SSteven Rostedt (VMware) __this_cpu_inc(disable_stack_tracer); 2098aaf1ee7SSteven Rostedt (VMware) if (__this_cpu_read(disable_stack_tracer) != 1) 210e5a81b62SSteven Rostedt goto out; 211e5a81b62SSteven Rostedt 212*b00d607bSSteven Rostedt (VMware) /* If rcu is not watching, then save stack trace can fail */ 213*b00d607bSSteven Rostedt (VMware) if (!rcu_is_watching()) 214*b00d607bSSteven Rostedt (VMware) goto out; 215*b00d607bSSteven Rostedt (VMware) 2164df29712SSteven Rostedt (Red Hat) ip += MCOUNT_INSN_SIZE; 2174df29712SSteven Rostedt (Red Hat) 2184df29712SSteven Rostedt (Red Hat) check_stack(ip, &stack); 219e5a81b62SSteven Rostedt 220e5a81b62SSteven Rostedt out: 2218aaf1ee7SSteven Rostedt (VMware) __this_cpu_dec(disable_stack_tracer); 222e5a81b62SSteven Rostedt /* prevent recursion in schedule */ 2235168ae50SSteven Rostedt preempt_enable_notrace(); 224e5a81b62SSteven Rostedt } 225e5a81b62SSteven Rostedt 226e5a81b62SSteven Rostedt static struct ftrace_ops trace_ops __read_mostly = 227e5a81b62SSteven Rostedt { 228e5a81b62SSteven Rostedt .func = stack_trace_call, 2294740974aSSteven Rostedt .flags = FTRACE_OPS_FL_RECURSION_SAFE, 230e5a81b62SSteven Rostedt }; 231e5a81b62SSteven Rostedt 232e5a81b62SSteven Rostedt static ssize_t 233e5a81b62SSteven Rostedt stack_max_size_read(struct file *filp, char __user *ubuf, 234e5a81b62SSteven Rostedt size_t count, loff_t *ppos) 235e5a81b62SSteven Rostedt { 236e5a81b62SSteven Rostedt unsigned long *ptr = filp->private_data; 237e5a81b62SSteven Rostedt char buf[64]; 238e5a81b62SSteven Rostedt int r; 239e5a81b62SSteven Rostedt 240e5a81b62SSteven Rostedt r = snprintf(buf, sizeof(buf), "%ld\n", *ptr); 241e5a81b62SSteven Rostedt if (r > sizeof(buf)) 242e5a81b62SSteven Rostedt r = sizeof(buf); 243e5a81b62SSteven Rostedt return simple_read_from_buffer(ubuf, count, ppos, buf, r); 244e5a81b62SSteven Rostedt } 245e5a81b62SSteven Rostedt 246e5a81b62SSteven Rostedt static ssize_t 247e5a81b62SSteven Rostedt stack_max_size_write(struct file *filp, const char __user *ubuf, 248e5a81b62SSteven Rostedt size_t count, loff_t *ppos) 249e5a81b62SSteven Rostedt { 250e5a81b62SSteven Rostedt long *ptr = filp->private_data; 251e5a81b62SSteven Rostedt unsigned long val, flags; 252e5a81b62SSteven Rostedt int ret; 253e5a81b62SSteven Rostedt 25422fe9b54SPeter Huewe ret = kstrtoul_from_user(ubuf, count, 10, &val); 25522fe9b54SPeter Huewe if (ret) 256e5a81b62SSteven Rostedt return ret; 257e5a81b62SSteven Rostedt 258a5e25883SSteven Rostedt local_irq_save(flags); 2594f48f8b7SLai Jiangshan 2604f48f8b7SLai Jiangshan /* 2614f48f8b7SLai Jiangshan * In case we trace inside arch_spin_lock() or after (NMI), 2624f48f8b7SLai Jiangshan * we will cause circular lock, so we also need to increase 2638aaf1ee7SSteven Rostedt (VMware) * the percpu disable_stack_tracer here. 2644f48f8b7SLai Jiangshan */ 2658aaf1ee7SSteven Rostedt (VMware) __this_cpu_inc(disable_stack_tracer); 2664f48f8b7SLai Jiangshan 267d332736dSSteven Rostedt (Red Hat) arch_spin_lock(&stack_trace_max_lock); 268e5a81b62SSteven Rostedt *ptr = val; 269d332736dSSteven Rostedt (Red Hat) arch_spin_unlock(&stack_trace_max_lock); 2704f48f8b7SLai Jiangshan 2718aaf1ee7SSteven Rostedt (VMware) __this_cpu_dec(disable_stack_tracer); 272a5e25883SSteven Rostedt local_irq_restore(flags); 273e5a81b62SSteven Rostedt 274e5a81b62SSteven Rostedt return count; 275e5a81b62SSteven Rostedt } 276e5a81b62SSteven Rostedt 277f38f1d2aSSteven Rostedt static const struct file_operations stack_max_size_fops = { 278e5a81b62SSteven Rostedt .open = tracing_open_generic, 279e5a81b62SSteven Rostedt .read = stack_max_size_read, 280e5a81b62SSteven Rostedt .write = stack_max_size_write, 2816038f373SArnd Bergmann .llseek = default_llseek, 282e5a81b62SSteven Rostedt }; 283e5a81b62SSteven Rostedt 284e5a81b62SSteven Rostedt static void * 2852fc5f0cfSLi Zefan __next(struct seq_file *m, loff_t *pos) 286e5a81b62SSteven Rostedt { 2872fc5f0cfSLi Zefan long n = *pos - 1; 288e5a81b62SSteven Rostedt 289bb99d8ccSAKASHI Takahiro if (n > stack_trace_max.nr_entries || stack_dump_trace[n] == ULONG_MAX) 290e5a81b62SSteven Rostedt return NULL; 291e5a81b62SSteven Rostedt 2922fc5f0cfSLi Zefan m->private = (void *)n; 2931b6cced6SSteven Rostedt return &m->private; 294e5a81b62SSteven Rostedt } 295e5a81b62SSteven Rostedt 2962fc5f0cfSLi Zefan static void * 2972fc5f0cfSLi Zefan t_next(struct seq_file *m, void *v, loff_t *pos) 2982fc5f0cfSLi Zefan { 2992fc5f0cfSLi Zefan (*pos)++; 3002fc5f0cfSLi Zefan return __next(m, pos); 3012fc5f0cfSLi Zefan } 3022fc5f0cfSLi Zefan 303e5a81b62SSteven Rostedt static void *t_start(struct seq_file *m, loff_t *pos) 304e5a81b62SSteven Rostedt { 305e5a81b62SSteven Rostedt local_irq_disable(); 3064f48f8b7SLai Jiangshan 3078aaf1ee7SSteven Rostedt (VMware) __this_cpu_inc(disable_stack_tracer); 3084f48f8b7SLai Jiangshan 309d332736dSSteven Rostedt (Red Hat) arch_spin_lock(&stack_trace_max_lock); 310e5a81b62SSteven Rostedt 311522a110bSLiming Wang if (*pos == 0) 312522a110bSLiming Wang return SEQ_START_TOKEN; 313522a110bSLiming Wang 3142fc5f0cfSLi Zefan return __next(m, pos); 315e5a81b62SSteven Rostedt } 316e5a81b62SSteven Rostedt 317e5a81b62SSteven Rostedt static void t_stop(struct seq_file *m, void *p) 318e5a81b62SSteven Rostedt { 319d332736dSSteven Rostedt (Red Hat) arch_spin_unlock(&stack_trace_max_lock); 3204f48f8b7SLai Jiangshan 3218aaf1ee7SSteven Rostedt (VMware) __this_cpu_dec(disable_stack_tracer); 3224f48f8b7SLai Jiangshan 323e5a81b62SSteven Rostedt local_irq_enable(); 324e5a81b62SSteven Rostedt } 325e5a81b62SSteven Rostedt 326962e3707SJoe Perches static void trace_lookup_stack(struct seq_file *m, long i) 327e5a81b62SSteven Rostedt { 3281b6cced6SSteven Rostedt unsigned long addr = stack_dump_trace[i]; 329e5a81b62SSteven Rostedt 330962e3707SJoe Perches seq_printf(m, "%pS\n", (void *)addr); 331e5a81b62SSteven Rostedt } 332e5a81b62SSteven Rostedt 333e447e1dfSSteven Rostedt static void print_disabled(struct seq_file *m) 334e447e1dfSSteven Rostedt { 335e447e1dfSSteven Rostedt seq_puts(m, "#\n" 336e447e1dfSSteven Rostedt "# Stack tracer disabled\n" 337e447e1dfSSteven Rostedt "#\n" 338e447e1dfSSteven Rostedt "# To enable the stack tracer, either add 'stacktrace' to the\n" 339e447e1dfSSteven Rostedt "# kernel command line\n" 340e447e1dfSSteven Rostedt "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n" 341e447e1dfSSteven Rostedt "#\n"); 342e447e1dfSSteven Rostedt } 343e447e1dfSSteven Rostedt 344e5a81b62SSteven Rostedt static int t_show(struct seq_file *m, void *v) 345e5a81b62SSteven Rostedt { 346522a110bSLiming Wang long i; 3471b6cced6SSteven Rostedt int size; 348e5a81b62SSteven Rostedt 349522a110bSLiming Wang if (v == SEQ_START_TOKEN) { 3501b6cced6SSteven Rostedt seq_printf(m, " Depth Size Location" 3511b6cced6SSteven Rostedt " (%d entries)\n" 3521b6cced6SSteven Rostedt " ----- ---- --------\n", 353bb99d8ccSAKASHI Takahiro stack_trace_max.nr_entries); 354e447e1dfSSteven Rostedt 355bb99d8ccSAKASHI Takahiro if (!stack_tracer_enabled && !stack_trace_max_size) 356e447e1dfSSteven Rostedt print_disabled(m); 357e447e1dfSSteven Rostedt 3581b6cced6SSteven Rostedt return 0; 3591b6cced6SSteven Rostedt } 3601b6cced6SSteven Rostedt 361522a110bSLiming Wang i = *(long *)v; 362522a110bSLiming Wang 363bb99d8ccSAKASHI Takahiro if (i >= stack_trace_max.nr_entries || 3641b6cced6SSteven Rostedt stack_dump_trace[i] == ULONG_MAX) 365e5a81b62SSteven Rostedt return 0; 366e5a81b62SSteven Rostedt 367bb99d8ccSAKASHI Takahiro if (i+1 == stack_trace_max.nr_entries || 3681b6cced6SSteven Rostedt stack_dump_trace[i+1] == ULONG_MAX) 369bb99d8ccSAKASHI Takahiro size = stack_trace_index[i]; 3701b6cced6SSteven Rostedt else 371bb99d8ccSAKASHI Takahiro size = stack_trace_index[i] - stack_trace_index[i+1]; 3721b6cced6SSteven Rostedt 373bb99d8ccSAKASHI Takahiro seq_printf(m, "%3ld) %8d %5d ", i, stack_trace_index[i], size); 3741b6cced6SSteven Rostedt 3751b6cced6SSteven Rostedt trace_lookup_stack(m, i); 376e5a81b62SSteven Rostedt 377e5a81b62SSteven Rostedt return 0; 378e5a81b62SSteven Rostedt } 379e5a81b62SSteven Rostedt 380f38f1d2aSSteven Rostedt static const struct seq_operations stack_trace_seq_ops = { 381e5a81b62SSteven Rostedt .start = t_start, 382e5a81b62SSteven Rostedt .next = t_next, 383e5a81b62SSteven Rostedt .stop = t_stop, 384e5a81b62SSteven Rostedt .show = t_show, 385e5a81b62SSteven Rostedt }; 386e5a81b62SSteven Rostedt 387e5a81b62SSteven Rostedt static int stack_trace_open(struct inode *inode, struct file *file) 388e5a81b62SSteven Rostedt { 389d8cc1ab7SLi Zefan return seq_open(file, &stack_trace_seq_ops); 390e5a81b62SSteven Rostedt } 391e5a81b62SSteven Rostedt 392f38f1d2aSSteven Rostedt static const struct file_operations stack_trace_fops = { 393e5a81b62SSteven Rostedt .open = stack_trace_open, 394e5a81b62SSteven Rostedt .read = seq_read, 395e5a81b62SSteven Rostedt .llseek = seq_lseek, 396d8cc1ab7SLi Zefan .release = seq_release, 397e5a81b62SSteven Rostedt }; 398e5a81b62SSteven Rostedt 399bbd1d27dSSteven Rostedt (VMware) #ifdef CONFIG_DYNAMIC_FTRACE 400bbd1d27dSSteven Rostedt (VMware) 401d2d45c7aSSteven Rostedt static int 402d2d45c7aSSteven Rostedt stack_trace_filter_open(struct inode *inode, struct file *file) 403d2d45c7aSSteven Rostedt { 4040f179765SSteven Rostedt (VMware) struct ftrace_ops *ops = inode->i_private; 4050f179765SSteven Rostedt (VMware) 4060f179765SSteven Rostedt (VMware) return ftrace_regex_open(ops, FTRACE_ITER_FILTER, 407d2d45c7aSSteven Rostedt inode, file); 408d2d45c7aSSteven Rostedt } 409d2d45c7aSSteven Rostedt 410d2d45c7aSSteven Rostedt static const struct file_operations stack_trace_filter_fops = { 411d2d45c7aSSteven Rostedt .open = stack_trace_filter_open, 412d2d45c7aSSteven Rostedt .read = seq_read, 413d2d45c7aSSteven Rostedt .write = ftrace_filter_write, 414098c879eSSteven Rostedt (Red Hat) .llseek = tracing_lseek, 415d2d45c7aSSteven Rostedt .release = ftrace_regex_release, 416d2d45c7aSSteven Rostedt }; 417d2d45c7aSSteven Rostedt 418bbd1d27dSSteven Rostedt (VMware) #endif /* CONFIG_DYNAMIC_FTRACE */ 419bbd1d27dSSteven Rostedt (VMware) 420f38f1d2aSSteven Rostedt int 421f38f1d2aSSteven Rostedt stack_trace_sysctl(struct ctl_table *table, int write, 4228d65af78SAlexey Dobriyan void __user *buffer, size_t *lenp, 423f38f1d2aSSteven Rostedt loff_t *ppos) 424f38f1d2aSSteven Rostedt { 425f38f1d2aSSteven Rostedt int ret; 426f38f1d2aSSteven Rostedt 427f38f1d2aSSteven Rostedt mutex_lock(&stack_sysctl_mutex); 428f38f1d2aSSteven Rostedt 4298d65af78SAlexey Dobriyan ret = proc_dointvec(table, write, buffer, lenp, ppos); 430f38f1d2aSSteven Rostedt 431f38f1d2aSSteven Rostedt if (ret || !write || 432a32c7765SLi Zefan (last_stack_tracer_enabled == !!stack_tracer_enabled)) 433f38f1d2aSSteven Rostedt goto out; 434f38f1d2aSSteven Rostedt 435a32c7765SLi Zefan last_stack_tracer_enabled = !!stack_tracer_enabled; 436f38f1d2aSSteven Rostedt 437f38f1d2aSSteven Rostedt if (stack_tracer_enabled) 438f38f1d2aSSteven Rostedt register_ftrace_function(&trace_ops); 439f38f1d2aSSteven Rostedt else 440f38f1d2aSSteven Rostedt unregister_ftrace_function(&trace_ops); 441f38f1d2aSSteven Rostedt 442f38f1d2aSSteven Rostedt out: 443f38f1d2aSSteven Rostedt mutex_unlock(&stack_sysctl_mutex); 444f38f1d2aSSteven Rostedt return ret; 445f38f1d2aSSteven Rostedt } 446f38f1d2aSSteven Rostedt 447762e1207SSteven Rostedt static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata; 448762e1207SSteven Rostedt 449f38f1d2aSSteven Rostedt static __init int enable_stacktrace(char *str) 450f38f1d2aSSteven Rostedt { 451762e1207SSteven Rostedt if (strncmp(str, "_filter=", 8) == 0) 452762e1207SSteven Rostedt strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE); 453762e1207SSteven Rostedt 454e05a43b7SSteven Rostedt stack_tracer_enabled = 1; 455e05a43b7SSteven Rostedt last_stack_tracer_enabled = 1; 456f38f1d2aSSteven Rostedt return 1; 457f38f1d2aSSteven Rostedt } 458f38f1d2aSSteven Rostedt __setup("stacktrace", enable_stacktrace); 459f38f1d2aSSteven Rostedt 460e5a81b62SSteven Rostedt static __init int stack_trace_init(void) 461e5a81b62SSteven Rostedt { 462e5a81b62SSteven Rostedt struct dentry *d_tracer; 463e5a81b62SSteven Rostedt 464e5a81b62SSteven Rostedt d_tracer = tracing_init_dentry(); 46514a5ae40SSteven Rostedt (Red Hat) if (IS_ERR(d_tracer)) 466ed6f1c99SNamhyung Kim return 0; 467e5a81b62SSteven Rostedt 4685452af66SFrederic Weisbecker trace_create_file("stack_max_size", 0644, d_tracer, 469bb99d8ccSAKASHI Takahiro &stack_trace_max_size, &stack_max_size_fops); 470e5a81b62SSteven Rostedt 4715452af66SFrederic Weisbecker trace_create_file("stack_trace", 0444, d_tracer, 472e5a81b62SSteven Rostedt NULL, &stack_trace_fops); 473e5a81b62SSteven Rostedt 474bbd1d27dSSteven Rostedt (VMware) #ifdef CONFIG_DYNAMIC_FTRACE 475d2d45c7aSSteven Rostedt trace_create_file("stack_trace_filter", 0444, d_tracer, 4760f179765SSteven Rostedt (VMware) &trace_ops, &stack_trace_filter_fops); 477bbd1d27dSSteven Rostedt (VMware) #endif 478d2d45c7aSSteven Rostedt 479762e1207SSteven Rostedt if (stack_trace_filter_buf[0]) 480762e1207SSteven Rostedt ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1); 481762e1207SSteven Rostedt 482e05a43b7SSteven Rostedt if (stack_tracer_enabled) 483e5a81b62SSteven Rostedt register_ftrace_function(&trace_ops); 484e5a81b62SSteven Rostedt 485e5a81b62SSteven Rostedt return 0; 486e5a81b62SSteven Rostedt } 487e5a81b62SSteven Rostedt 488e5a81b62SSteven Rostedt device_initcall(stack_trace_init); 489