1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2e5a81b62SSteven Rostedt /* 3e5a81b62SSteven Rostedt * Copyright (C) 2008 Steven Rostedt <[email protected]> 4e5a81b62SSteven Rostedt * 5e5a81b62SSteven Rostedt */ 668db0cf1SIngo Molnar #include <linux/sched/task_stack.h> 7e5a81b62SSteven Rostedt #include <linux/stacktrace.h> 817911ff3SSteven Rostedt (VMware) #include <linux/security.h> 9e5a81b62SSteven Rostedt #include <linux/kallsyms.h> 10e5a81b62SSteven Rostedt #include <linux/seq_file.h> 11e5a81b62SSteven Rostedt #include <linux/spinlock.h> 12e5a81b62SSteven Rostedt #include <linux/uaccess.h> 13e5a81b62SSteven Rostedt #include <linux/ftrace.h> 14e5a81b62SSteven Rostedt #include <linux/module.h> 15f38f1d2aSSteven Rostedt #include <linux/sysctl.h> 16e5a81b62SSteven Rostedt #include <linux/init.h> 17762e1207SSteven Rostedt 18762e1207SSteven Rostedt #include <asm/setup.h> 19762e1207SSteven Rostedt 20e5a81b62SSteven Rostedt #include "trace.h" 21e5a81b62SSteven Rostedt 223d9a8072SThomas Gleixner #define STACK_TRACE_ENTRIES 500 231b6cced6SSteven Rostedt 243d9a8072SThomas Gleixner static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES]; 253d9a8072SThomas Gleixner static unsigned stack_trace_index[STACK_TRACE_ENTRIES]; 263d9a8072SThomas Gleixner 279f50c91bSThomas Gleixner static unsigned int stack_trace_nr_entries; 283d9a8072SThomas Gleixner static unsigned long stack_trace_max_size; 293d9a8072SThomas Gleixner static arch_spinlock_t stack_trace_max_lock = 30edc35bd7SThomas Gleixner (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 31e5a81b62SSteven Rostedt 328aaf1ee7SSteven Rostedt (VMware) DEFINE_PER_CPU(int, disable_stack_tracer); 33f38f1d2aSSteven Rostedt static DEFINE_MUTEX(stack_sysctl_mutex); 34f38f1d2aSSteven Rostedt 35f38f1d2aSSteven Rostedt int stack_tracer_enabled; 36e5a81b62SSteven Rostedt 373d9a8072SThomas Gleixner static void print_max_stack(void) 38e3172181SMinchan Kim { 39e3172181SMinchan Kim long i; 40e3172181SMinchan Kim int size; 41e3172181SMinchan Kim 42e3172181SMinchan Kim pr_emerg(" Depth Size Location (%d entries)\n" 43e3172181SMinchan Kim " ----- ---- --------\n", 449f50c91bSThomas Gleixner stack_trace_nr_entries); 45e3172181SMinchan Kim 469f50c91bSThomas Gleixner for (i = 0; i < stack_trace_nr_entries; i++) { 479f50c91bSThomas Gleixner if (i + 1 == stack_trace_nr_entries) 48bb99d8ccSAKASHI Takahiro size = stack_trace_index[i]; 49e3172181SMinchan Kim else 50bb99d8ccSAKASHI Takahiro size = stack_trace_index[i] - stack_trace_index[i+1]; 51e3172181SMinchan Kim 52bb99d8ccSAKASHI Takahiro pr_emerg("%3ld) %8d %5d %pS\n", i, stack_trace_index[i], 53e3172181SMinchan Kim size, (void *)stack_dump_trace[i]); 54e3172181SMinchan Kim } 55e3172181SMinchan Kim } 56e3172181SMinchan Kim 5758fe7a87SSteven Rostedt (VMware) /* 5858fe7a87SSteven Rostedt (VMware) * The stack tracer looks for a maximum stack at each call from a function. It 5958fe7a87SSteven Rostedt (VMware) * registers a callback from ftrace, and in that callback it examines the stack 6058fe7a87SSteven Rostedt (VMware) * size. It determines the stack size from the variable passed in, which is the 6158fe7a87SSteven Rostedt (VMware) * address of a local variable in the stack_trace_call() callback function. 6258fe7a87SSteven Rostedt (VMware) * The stack size is calculated by the address of the local variable to the top 6358fe7a87SSteven Rostedt (VMware) * of the current stack. If that size is smaller than the currently saved max 6458fe7a87SSteven Rostedt (VMware) * stack size, nothing more is done. 6558fe7a87SSteven Rostedt (VMware) * 6658fe7a87SSteven Rostedt (VMware) * If the size of the stack is greater than the maximum recorded size, then the 6758fe7a87SSteven Rostedt (VMware) * following algorithm takes place. 6858fe7a87SSteven Rostedt (VMware) * 6958fe7a87SSteven Rostedt (VMware) * For architectures (like x86) that store the function's return address before 7058fe7a87SSteven Rostedt (VMware) * saving the function's local variables, the stack will look something like 7158fe7a87SSteven Rostedt (VMware) * this: 7258fe7a87SSteven Rostedt (VMware) * 7358fe7a87SSteven Rostedt (VMware) * [ top of stack ] 7458fe7a87SSteven Rostedt (VMware) * 0: sys call entry frame 7558fe7a87SSteven Rostedt (VMware) * 10: return addr to entry code 7658fe7a87SSteven Rostedt (VMware) * 11: start of sys_foo frame 7758fe7a87SSteven Rostedt (VMware) * 20: return addr to sys_foo 7858fe7a87SSteven Rostedt (VMware) * 21: start of kernel_func_bar frame 7958fe7a87SSteven Rostedt (VMware) * 30: return addr to kernel_func_bar 8058fe7a87SSteven Rostedt (VMware) * 31: [ do trace stack here ] 8158fe7a87SSteven Rostedt (VMware) * 8258fe7a87SSteven Rostedt (VMware) * The save_stack_trace() is called returning all the functions it finds in the 8358fe7a87SSteven Rostedt (VMware) * current stack. Which would be (from the bottom of the stack to the top): 8458fe7a87SSteven Rostedt (VMware) * 8558fe7a87SSteven Rostedt (VMware) * return addr to kernel_func_bar 8658fe7a87SSteven Rostedt (VMware) * return addr to sys_foo 8758fe7a87SSteven Rostedt (VMware) * return addr to entry code 8858fe7a87SSteven Rostedt (VMware) * 8958fe7a87SSteven Rostedt (VMware) * Now to figure out how much each of these functions' local variable size is, 9058fe7a87SSteven Rostedt (VMware) * a search of the stack is made to find these values. When a match is made, it 9158fe7a87SSteven Rostedt (VMware) * is added to the stack_dump_trace[] array. The offset into the stack is saved 9258fe7a87SSteven Rostedt (VMware) * in the stack_trace_index[] array. The above example would show: 9358fe7a87SSteven Rostedt (VMware) * 9458fe7a87SSteven Rostedt (VMware) * stack_dump_trace[] | stack_trace_index[] 9558fe7a87SSteven Rostedt (VMware) * ------------------ + ------------------- 9658fe7a87SSteven Rostedt (VMware) * return addr to kernel_func_bar | 30 9758fe7a87SSteven Rostedt (VMware) * return addr to sys_foo | 20 9858fe7a87SSteven Rostedt (VMware) * return addr to entry | 10 9958fe7a87SSteven Rostedt (VMware) * 10058fe7a87SSteven Rostedt (VMware) * The print_max_stack() function above, uses these values to print the size of 10158fe7a87SSteven Rostedt (VMware) * each function's portion of the stack. 10258fe7a87SSteven Rostedt (VMware) * 10358fe7a87SSteven Rostedt (VMware) * for (i = 0; i < nr_entries; i++) { 10458fe7a87SSteven Rostedt (VMware) * size = i == nr_entries - 1 ? stack_trace_index[i] : 10558fe7a87SSteven Rostedt (VMware) * stack_trace_index[i] - stack_trace_index[i+1] 10658fe7a87SSteven Rostedt (VMware) * print "%d %d %d %s\n", i, stack_trace_index[i], size, stack_dump_trace[i]); 10758fe7a87SSteven Rostedt (VMware) * } 10858fe7a87SSteven Rostedt (VMware) * 10958fe7a87SSteven Rostedt (VMware) * The above shows 11058fe7a87SSteven Rostedt (VMware) * 11158fe7a87SSteven Rostedt (VMware) * depth size location 11258fe7a87SSteven Rostedt (VMware) * ----- ---- -------- 11358fe7a87SSteven Rostedt (VMware) * 0 30 10 kernel_func_bar 11458fe7a87SSteven Rostedt (VMware) * 1 20 10 sys_foo 11558fe7a87SSteven Rostedt (VMware) * 2 10 10 entry code 11658fe7a87SSteven Rostedt (VMware) * 11758fe7a87SSteven Rostedt (VMware) * Now for architectures that might save the return address after the functions 11858fe7a87SSteven Rostedt (VMware) * local variables (saving the link register before calling nested functions), 11958fe7a87SSteven Rostedt (VMware) * this will cause the stack to look a little different: 12058fe7a87SSteven Rostedt (VMware) * 12158fe7a87SSteven Rostedt (VMware) * [ top of stack ] 12258fe7a87SSteven Rostedt (VMware) * 0: sys call entry frame 12358fe7a87SSteven Rostedt (VMware) * 10: start of sys_foo_frame 12458fe7a87SSteven Rostedt (VMware) * 19: return addr to entry code << lr saved before calling kernel_func_bar 12558fe7a87SSteven Rostedt (VMware) * 20: start of kernel_func_bar frame 12658fe7a87SSteven Rostedt (VMware) * 29: return addr to sys_foo_frame << lr saved before calling next function 12758fe7a87SSteven Rostedt (VMware) * 30: [ do trace stack here ] 12858fe7a87SSteven Rostedt (VMware) * 12958fe7a87SSteven Rostedt (VMware) * Although the functions returned by save_stack_trace() may be the same, the 13058fe7a87SSteven Rostedt (VMware) * placement in the stack will be different. Using the same algorithm as above 13158fe7a87SSteven Rostedt (VMware) * would yield: 13258fe7a87SSteven Rostedt (VMware) * 13358fe7a87SSteven Rostedt (VMware) * stack_dump_trace[] | stack_trace_index[] 13458fe7a87SSteven Rostedt (VMware) * ------------------ + ------------------- 13558fe7a87SSteven Rostedt (VMware) * return addr to kernel_func_bar | 30 13658fe7a87SSteven Rostedt (VMware) * return addr to sys_foo | 29 13758fe7a87SSteven Rostedt (VMware) * return addr to entry | 19 13858fe7a87SSteven Rostedt (VMware) * 13958fe7a87SSteven Rostedt (VMware) * Where the mapping is off by one: 14058fe7a87SSteven Rostedt (VMware) * 14158fe7a87SSteven Rostedt (VMware) * kernel_func_bar stack frame size is 29 - 19 not 30 - 29! 14258fe7a87SSteven Rostedt (VMware) * 14358fe7a87SSteven Rostedt (VMware) * To fix this, if the architecture sets ARCH_RET_ADDR_AFTER_LOCAL_VARS the 14458fe7a87SSteven Rostedt (VMware) * values in stack_trace_index[] are shifted by one to and the number of 14558fe7a87SSteven Rostedt (VMware) * stack trace entries is decremented by one. 14658fe7a87SSteven Rostedt (VMware) * 14758fe7a87SSteven Rostedt (VMware) * stack_dump_trace[] | stack_trace_index[] 14858fe7a87SSteven Rostedt (VMware) * ------------------ + ------------------- 14958fe7a87SSteven Rostedt (VMware) * return addr to kernel_func_bar | 29 15058fe7a87SSteven Rostedt (VMware) * return addr to sys_foo | 19 15158fe7a87SSteven Rostedt (VMware) * 15258fe7a87SSteven Rostedt (VMware) * Although the entry function is not displayed, the first function (sys_foo) 15358fe7a87SSteven Rostedt (VMware) * will still include the stack size of it. 15458fe7a87SSteven Rostedt (VMware) */ 1553d9a8072SThomas Gleixner static void check_stack(unsigned long ip, unsigned long *stack) 156e5a81b62SSteven Rostedt { 157e3172181SMinchan Kim unsigned long this_size, flags; unsigned long *p, *top, *start; 1584df29712SSteven Rostedt (Red Hat) static int tracer_frame; 1596aa7de05SMark Rutland int frame_size = READ_ONCE(tracer_frame); 16072ac426aSSteven Rostedt (Red Hat) int i, x; 161e5a81b62SSteven Rostedt 16287889501SSteven Rostedt (Red Hat) this_size = ((unsigned long)stack) & (THREAD_SIZE-1); 163e5a81b62SSteven Rostedt this_size = THREAD_SIZE - this_size; 1644df29712SSteven Rostedt (Red Hat) /* Remove the frame of the tracer */ 1654df29712SSteven Rostedt (Red Hat) this_size -= frame_size; 166e5a81b62SSteven Rostedt 167bb99d8ccSAKASHI Takahiro if (this_size <= stack_trace_max_size) 168e5a81b62SSteven Rostedt return; 169e5a81b62SSteven Rostedt 17081520a1bSSteven Rostedt /* we do not handle interrupt stacks yet */ 17187889501SSteven Rostedt (Red Hat) if (!object_is_on_stack(stack)) 17281520a1bSSteven Rostedt return; 17381520a1bSSteven Rostedt 1741904be1bSSteven Rostedt (Red Hat) /* Can't do this from NMI context (can cause deadlocks) */ 1751904be1bSSteven Rostedt (Red Hat) if (in_nmi()) 1761904be1bSSteven Rostedt (Red Hat) return; 1771904be1bSSteven Rostedt (Red Hat) 178a5e25883SSteven Rostedt local_irq_save(flags); 179d332736dSSteven Rostedt (Red Hat) arch_spin_lock(&stack_trace_max_lock); 180e5a81b62SSteven Rostedt 1814df29712SSteven Rostedt (Red Hat) /* In case another CPU set the tracer_frame on us */ 1824df29712SSteven Rostedt (Red Hat) if (unlikely(!frame_size)) 1834df29712SSteven Rostedt (Red Hat) this_size -= tracer_frame; 1844df29712SSteven Rostedt (Red Hat) 185e5a81b62SSteven Rostedt /* a race could have already updated it */ 186bb99d8ccSAKASHI Takahiro if (this_size <= stack_trace_max_size) 187e5a81b62SSteven Rostedt goto out; 188e5a81b62SSteven Rostedt 189bb99d8ccSAKASHI Takahiro stack_trace_max_size = this_size; 190e5a81b62SSteven Rostedt 1919f50c91bSThomas Gleixner stack_trace_nr_entries = stack_trace_save(stack_dump_trace, 1929f50c91bSThomas Gleixner ARRAY_SIZE(stack_dump_trace) - 1, 1939f50c91bSThomas Gleixner 0); 194e5a81b62SSteven Rostedt 19572ac426aSSteven Rostedt (Red Hat) /* Skip over the overhead of the stack tracer itself */ 1969f50c91bSThomas Gleixner for (i = 0; i < stack_trace_nr_entries; i++) { 19772ac426aSSteven Rostedt (Red Hat) if (stack_dump_trace[i] == ip) 19872ac426aSSteven Rostedt (Red Hat) break; 19972ac426aSSteven Rostedt (Red Hat) } 200d4ecbfc4SSteven Rostedt (Red Hat) 201d4ecbfc4SSteven Rostedt (Red Hat) /* 2026ccd8371SSteven Rostedt * Some archs may not have the passed in ip in the dump. 2036ccd8371SSteven Rostedt * If that happens, we need to show everything. 2046ccd8371SSteven Rostedt */ 2059f50c91bSThomas Gleixner if (i == stack_trace_nr_entries) 2066ccd8371SSteven Rostedt i = 0; 2076ccd8371SSteven Rostedt 2086ccd8371SSteven Rostedt /* 2091b6cced6SSteven Rostedt * Now find where in the stack these are. 2101b6cced6SSteven Rostedt */ 21172ac426aSSteven Rostedt (Red Hat) x = 0; 21287889501SSteven Rostedt (Red Hat) start = stack; 2131b6cced6SSteven Rostedt top = (unsigned long *) 2141b6cced6SSteven Rostedt (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE); 2151b6cced6SSteven Rostedt 2161b6cced6SSteven Rostedt /* 2171b6cced6SSteven Rostedt * Loop through all the entries. One of the entries may 2181b6cced6SSteven Rostedt * for some reason be missed on the stack, so we may 2191b6cced6SSteven Rostedt * have to account for them. If they are all there, this 2201b6cced6SSteven Rostedt * loop will only happen once. This code only takes place 2211b6cced6SSteven Rostedt * on a new max, so it is far from a fast path. 2221b6cced6SSteven Rostedt */ 2239f50c91bSThomas Gleixner while (i < stack_trace_nr_entries) { 2240a37119dSSteven Rostedt int found = 0; 2251b6cced6SSteven Rostedt 226bb99d8ccSAKASHI Takahiro stack_trace_index[x] = this_size; 2271b6cced6SSteven Rostedt p = start; 2281b6cced6SSteven Rostedt 2299f50c91bSThomas Gleixner for (; p < top && i < stack_trace_nr_entries; p++) { 2306e22c836SYang Shi /* 2316e22c836SYang Shi * The READ_ONCE_NOCHECK is used to let KASAN know that 2326e22c836SYang Shi * this is not a stack-out-of-bounds error. 2336e22c836SYang Shi */ 2346e22c836SYang Shi if ((READ_ONCE_NOCHECK(*p)) == stack_dump_trace[i]) { 23572ac426aSSteven Rostedt (Red Hat) stack_dump_trace[x] = stack_dump_trace[i++]; 236bb99d8ccSAKASHI Takahiro this_size = stack_trace_index[x++] = 2371b6cced6SSteven Rostedt (top - p) * sizeof(unsigned long); 2380a37119dSSteven Rostedt found = 1; 2391b6cced6SSteven Rostedt /* Start the search from here */ 2401b6cced6SSteven Rostedt start = p + 1; 2414df29712SSteven Rostedt (Red Hat) /* 2424df29712SSteven Rostedt (Red Hat) * We do not want to show the overhead 2434df29712SSteven Rostedt (Red Hat) * of the stack tracer stack in the 2444df29712SSteven Rostedt (Red Hat) * max stack. If we haven't figured 2454df29712SSteven Rostedt (Red Hat) * out what that is, then figure it out 2464df29712SSteven Rostedt (Red Hat) * now. 2474df29712SSteven Rostedt (Red Hat) */ 24872ac426aSSteven Rostedt (Red Hat) if (unlikely(!tracer_frame)) { 2494df29712SSteven Rostedt (Red Hat) tracer_frame = (p - stack) * 2504df29712SSteven Rostedt (Red Hat) sizeof(unsigned long); 251bb99d8ccSAKASHI Takahiro stack_trace_max_size -= tracer_frame; 2524df29712SSteven Rostedt (Red Hat) } 2531b6cced6SSteven Rostedt } 2541b6cced6SSteven Rostedt } 2551b6cced6SSteven Rostedt 2560a37119dSSteven Rostedt if (!found) 2571b6cced6SSteven Rostedt i++; 2581b6cced6SSteven Rostedt } 2591b6cced6SSteven Rostedt 260f7edb451SSteven Rostedt (VMware) #ifdef ARCH_FTRACE_SHIFT_STACK_TRACER 261f7edb451SSteven Rostedt (VMware) /* 262f7edb451SSteven Rostedt (VMware) * Some archs will store the link register before calling 263f7edb451SSteven Rostedt (VMware) * nested functions. This means the saved return address 264f7edb451SSteven Rostedt (VMware) * comes after the local storage, and we need to shift 265f7edb451SSteven Rostedt (VMware) * for that. 266f7edb451SSteven Rostedt (VMware) */ 267f7edb451SSteven Rostedt (VMware) if (x > 1) { 268f7edb451SSteven Rostedt (VMware) memmove(&stack_trace_index[0], &stack_trace_index[1], 269f7edb451SSteven Rostedt (VMware) sizeof(stack_trace_index[0]) * (x - 1)); 270f7edb451SSteven Rostedt (VMware) x--; 271f7edb451SSteven Rostedt (VMware) } 272f7edb451SSteven Rostedt (VMware) #endif 273f7edb451SSteven Rostedt (VMware) 2749f50c91bSThomas Gleixner stack_trace_nr_entries = x; 27572ac426aSSteven Rostedt (Red Hat) 276a70857e4SAaron Tomlin if (task_stack_end_corrupted(current)) { 2773d9a8072SThomas Gleixner print_max_stack(); 278e3172181SMinchan Kim BUG(); 279e3172181SMinchan Kim } 280e3172181SMinchan Kim 281e5a81b62SSteven Rostedt out: 282d332736dSSteven Rostedt (Red Hat) arch_spin_unlock(&stack_trace_max_lock); 283a5e25883SSteven Rostedt local_irq_restore(flags); 284e5a81b62SSteven Rostedt } 285e5a81b62SSteven Rostedt 286b8299d36SSteven Rostedt (VMware) /* Some archs may not define MCOUNT_INSN_SIZE */ 287b8299d36SSteven Rostedt (VMware) #ifndef MCOUNT_INSN_SIZE 288b8299d36SSteven Rostedt (VMware) # define MCOUNT_INSN_SIZE 0 289b8299d36SSteven Rostedt (VMware) #endif 290b8299d36SSteven Rostedt (VMware) 291e5a81b62SSteven Rostedt static void 292a1e2e31dSSteven Rostedt stack_trace_call(unsigned long ip, unsigned long parent_ip, 293a1e2e31dSSteven Rostedt struct ftrace_ops *op, struct pt_regs *pt_regs) 294e5a81b62SSteven Rostedt { 29587889501SSteven Rostedt (Red Hat) unsigned long stack; 296e5a81b62SSteven Rostedt 2975168ae50SSteven Rostedt preempt_disable_notrace(); 298e5a81b62SSteven Rostedt 299e5a81b62SSteven Rostedt /* no atomic needed, we only modify this variable by this cpu */ 3008aaf1ee7SSteven Rostedt (VMware) __this_cpu_inc(disable_stack_tracer); 3018aaf1ee7SSteven Rostedt (VMware) if (__this_cpu_read(disable_stack_tracer) != 1) 302e5a81b62SSteven Rostedt goto out; 303e5a81b62SSteven Rostedt 304b00d607bSSteven Rostedt (VMware) /* If rcu is not watching, then save stack trace can fail */ 305b00d607bSSteven Rostedt (VMware) if (!rcu_is_watching()) 306b00d607bSSteven Rostedt (VMware) goto out; 307b00d607bSSteven Rostedt (VMware) 3084df29712SSteven Rostedt (Red Hat) ip += MCOUNT_INSN_SIZE; 3094df29712SSteven Rostedt (Red Hat) 3104df29712SSteven Rostedt (Red Hat) check_stack(ip, &stack); 311e5a81b62SSteven Rostedt 312e5a81b62SSteven Rostedt out: 3138aaf1ee7SSteven Rostedt (VMware) __this_cpu_dec(disable_stack_tracer); 314e5a81b62SSteven Rostedt /* prevent recursion in schedule */ 3155168ae50SSteven Rostedt preempt_enable_notrace(); 316e5a81b62SSteven Rostedt } 317e5a81b62SSteven Rostedt 318e5a81b62SSteven Rostedt static struct ftrace_ops trace_ops __read_mostly = 319e5a81b62SSteven Rostedt { 320e5a81b62SSteven Rostedt .func = stack_trace_call, 3214740974aSSteven Rostedt .flags = FTRACE_OPS_FL_RECURSION_SAFE, 322e5a81b62SSteven Rostedt }; 323e5a81b62SSteven Rostedt 324e5a81b62SSteven Rostedt static ssize_t 325e5a81b62SSteven Rostedt stack_max_size_read(struct file *filp, char __user *ubuf, 326e5a81b62SSteven Rostedt size_t count, loff_t *ppos) 327e5a81b62SSteven Rostedt { 328e5a81b62SSteven Rostedt unsigned long *ptr = filp->private_data; 329e5a81b62SSteven Rostedt char buf[64]; 330e5a81b62SSteven Rostedt int r; 331e5a81b62SSteven Rostedt 332e5a81b62SSteven Rostedt r = snprintf(buf, sizeof(buf), "%ld\n", *ptr); 333e5a81b62SSteven Rostedt if (r > sizeof(buf)) 334e5a81b62SSteven Rostedt r = sizeof(buf); 335e5a81b62SSteven Rostedt return simple_read_from_buffer(ubuf, count, ppos, buf, r); 336e5a81b62SSteven Rostedt } 337e5a81b62SSteven Rostedt 338e5a81b62SSteven Rostedt static ssize_t 339e5a81b62SSteven Rostedt stack_max_size_write(struct file *filp, const char __user *ubuf, 340e5a81b62SSteven Rostedt size_t count, loff_t *ppos) 341e5a81b62SSteven Rostedt { 342e5a81b62SSteven Rostedt long *ptr = filp->private_data; 343e5a81b62SSteven Rostedt unsigned long val, flags; 344e5a81b62SSteven Rostedt int ret; 345e5a81b62SSteven Rostedt 34622fe9b54SPeter Huewe ret = kstrtoul_from_user(ubuf, count, 10, &val); 34722fe9b54SPeter Huewe if (ret) 348e5a81b62SSteven Rostedt return ret; 349e5a81b62SSteven Rostedt 350a5e25883SSteven Rostedt local_irq_save(flags); 3514f48f8b7SLai Jiangshan 3524f48f8b7SLai Jiangshan /* 3534f48f8b7SLai Jiangshan * In case we trace inside arch_spin_lock() or after (NMI), 3544f48f8b7SLai Jiangshan * we will cause circular lock, so we also need to increase 3558aaf1ee7SSteven Rostedt (VMware) * the percpu disable_stack_tracer here. 3564f48f8b7SLai Jiangshan */ 3578aaf1ee7SSteven Rostedt (VMware) __this_cpu_inc(disable_stack_tracer); 3584f48f8b7SLai Jiangshan 359d332736dSSteven Rostedt (Red Hat) arch_spin_lock(&stack_trace_max_lock); 360e5a81b62SSteven Rostedt *ptr = val; 361d332736dSSteven Rostedt (Red Hat) arch_spin_unlock(&stack_trace_max_lock); 3624f48f8b7SLai Jiangshan 3638aaf1ee7SSteven Rostedt (VMware) __this_cpu_dec(disable_stack_tracer); 364a5e25883SSteven Rostedt local_irq_restore(flags); 365e5a81b62SSteven Rostedt 366e5a81b62SSteven Rostedt return count; 367e5a81b62SSteven Rostedt } 368e5a81b62SSteven Rostedt 369f38f1d2aSSteven Rostedt static const struct file_operations stack_max_size_fops = { 370e5a81b62SSteven Rostedt .open = tracing_open_generic, 371e5a81b62SSteven Rostedt .read = stack_max_size_read, 372e5a81b62SSteven Rostedt .write = stack_max_size_write, 3736038f373SArnd Bergmann .llseek = default_llseek, 374e5a81b62SSteven Rostedt }; 375e5a81b62SSteven Rostedt 376e5a81b62SSteven Rostedt static void * 3772fc5f0cfSLi Zefan __next(struct seq_file *m, loff_t *pos) 378e5a81b62SSteven Rostedt { 3792fc5f0cfSLi Zefan long n = *pos - 1; 380e5a81b62SSteven Rostedt 3819f50c91bSThomas Gleixner if (n >= stack_trace_nr_entries) 382e5a81b62SSteven Rostedt return NULL; 383e5a81b62SSteven Rostedt 3842fc5f0cfSLi Zefan m->private = (void *)n; 3851b6cced6SSteven Rostedt return &m->private; 386e5a81b62SSteven Rostedt } 387e5a81b62SSteven Rostedt 3882fc5f0cfSLi Zefan static void * 3892fc5f0cfSLi Zefan t_next(struct seq_file *m, void *v, loff_t *pos) 3902fc5f0cfSLi Zefan { 3912fc5f0cfSLi Zefan (*pos)++; 3922fc5f0cfSLi Zefan return __next(m, pos); 3932fc5f0cfSLi Zefan } 3942fc5f0cfSLi Zefan 395e5a81b62SSteven Rostedt static void *t_start(struct seq_file *m, loff_t *pos) 396e5a81b62SSteven Rostedt { 397e5a81b62SSteven Rostedt local_irq_disable(); 3984f48f8b7SLai Jiangshan 3998aaf1ee7SSteven Rostedt (VMware) __this_cpu_inc(disable_stack_tracer); 4004f48f8b7SLai Jiangshan 401d332736dSSteven Rostedt (Red Hat) arch_spin_lock(&stack_trace_max_lock); 402e5a81b62SSteven Rostedt 403522a110bSLiming Wang if (*pos == 0) 404522a110bSLiming Wang return SEQ_START_TOKEN; 405522a110bSLiming Wang 4062fc5f0cfSLi Zefan return __next(m, pos); 407e5a81b62SSteven Rostedt } 408e5a81b62SSteven Rostedt 409e5a81b62SSteven Rostedt static void t_stop(struct seq_file *m, void *p) 410e5a81b62SSteven Rostedt { 411d332736dSSteven Rostedt (Red Hat) arch_spin_unlock(&stack_trace_max_lock); 4124f48f8b7SLai Jiangshan 4138aaf1ee7SSteven Rostedt (VMware) __this_cpu_dec(disable_stack_tracer); 4144f48f8b7SLai Jiangshan 415e5a81b62SSteven Rostedt local_irq_enable(); 416e5a81b62SSteven Rostedt } 417e5a81b62SSteven Rostedt 418962e3707SJoe Perches static void trace_lookup_stack(struct seq_file *m, long i) 419e5a81b62SSteven Rostedt { 4201b6cced6SSteven Rostedt unsigned long addr = stack_dump_trace[i]; 421e5a81b62SSteven Rostedt 422962e3707SJoe Perches seq_printf(m, "%pS\n", (void *)addr); 423e5a81b62SSteven Rostedt } 424e5a81b62SSteven Rostedt 425e447e1dfSSteven Rostedt static void print_disabled(struct seq_file *m) 426e447e1dfSSteven Rostedt { 427e447e1dfSSteven Rostedt seq_puts(m, "#\n" 428e447e1dfSSteven Rostedt "# Stack tracer disabled\n" 429e447e1dfSSteven Rostedt "#\n" 430e447e1dfSSteven Rostedt "# To enable the stack tracer, either add 'stacktrace' to the\n" 431e447e1dfSSteven Rostedt "# kernel command line\n" 432e447e1dfSSteven Rostedt "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n" 433e447e1dfSSteven Rostedt "#\n"); 434e447e1dfSSteven Rostedt } 435e447e1dfSSteven Rostedt 436e5a81b62SSteven Rostedt static int t_show(struct seq_file *m, void *v) 437e5a81b62SSteven Rostedt { 438522a110bSLiming Wang long i; 4391b6cced6SSteven Rostedt int size; 440e5a81b62SSteven Rostedt 441522a110bSLiming Wang if (v == SEQ_START_TOKEN) { 4421b6cced6SSteven Rostedt seq_printf(m, " Depth Size Location" 4431b6cced6SSteven Rostedt " (%d entries)\n" 4441b6cced6SSteven Rostedt " ----- ---- --------\n", 4459f50c91bSThomas Gleixner stack_trace_nr_entries); 446e447e1dfSSteven Rostedt 447bb99d8ccSAKASHI Takahiro if (!stack_tracer_enabled && !stack_trace_max_size) 448e447e1dfSSteven Rostedt print_disabled(m); 449e447e1dfSSteven Rostedt 4501b6cced6SSteven Rostedt return 0; 4511b6cced6SSteven Rostedt } 4521b6cced6SSteven Rostedt 453522a110bSLiming Wang i = *(long *)v; 454522a110bSLiming Wang 4559f50c91bSThomas Gleixner if (i >= stack_trace_nr_entries) 456e5a81b62SSteven Rostedt return 0; 457e5a81b62SSteven Rostedt 4589f50c91bSThomas Gleixner if (i + 1 == stack_trace_nr_entries) 459bb99d8ccSAKASHI Takahiro size = stack_trace_index[i]; 4601b6cced6SSteven Rostedt else 461bb99d8ccSAKASHI Takahiro size = stack_trace_index[i] - stack_trace_index[i+1]; 4621b6cced6SSteven Rostedt 463bb99d8ccSAKASHI Takahiro seq_printf(m, "%3ld) %8d %5d ", i, stack_trace_index[i], size); 4641b6cced6SSteven Rostedt 4651b6cced6SSteven Rostedt trace_lookup_stack(m, i); 466e5a81b62SSteven Rostedt 467e5a81b62SSteven Rostedt return 0; 468e5a81b62SSteven Rostedt } 469e5a81b62SSteven Rostedt 470f38f1d2aSSteven Rostedt static const struct seq_operations stack_trace_seq_ops = { 471e5a81b62SSteven Rostedt .start = t_start, 472e5a81b62SSteven Rostedt .next = t_next, 473e5a81b62SSteven Rostedt .stop = t_stop, 474e5a81b62SSteven Rostedt .show = t_show, 475e5a81b62SSteven Rostedt }; 476e5a81b62SSteven Rostedt 477e5a81b62SSteven Rostedt static int stack_trace_open(struct inode *inode, struct file *file) 478e5a81b62SSteven Rostedt { 47917911ff3SSteven Rostedt (VMware) int ret; 48017911ff3SSteven Rostedt (VMware) 48117911ff3SSteven Rostedt (VMware) ret = security_locked_down(LOCKDOWN_TRACEFS); 48217911ff3SSteven Rostedt (VMware) if (ret) 48317911ff3SSteven Rostedt (VMware) return ret; 48417911ff3SSteven Rostedt (VMware) 485d8cc1ab7SLi Zefan return seq_open(file, &stack_trace_seq_ops); 486e5a81b62SSteven Rostedt } 487e5a81b62SSteven Rostedt 488f38f1d2aSSteven Rostedt static const struct file_operations stack_trace_fops = { 489e5a81b62SSteven Rostedt .open = stack_trace_open, 490e5a81b62SSteven Rostedt .read = seq_read, 491e5a81b62SSteven Rostedt .llseek = seq_lseek, 492d8cc1ab7SLi Zefan .release = seq_release, 493e5a81b62SSteven Rostedt }; 494e5a81b62SSteven Rostedt 495bbd1d27dSSteven Rostedt (VMware) #ifdef CONFIG_DYNAMIC_FTRACE 496bbd1d27dSSteven Rostedt (VMware) 497d2d45c7aSSteven Rostedt static int 498d2d45c7aSSteven Rostedt stack_trace_filter_open(struct inode *inode, struct file *file) 499d2d45c7aSSteven Rostedt { 5000f179765SSteven Rostedt (VMware) struct ftrace_ops *ops = inode->i_private; 5010f179765SSteven Rostedt (VMware) 50217911ff3SSteven Rostedt (VMware) /* Checks for tracefs lockdown */ 5030f179765SSteven Rostedt (VMware) return ftrace_regex_open(ops, FTRACE_ITER_FILTER, 504d2d45c7aSSteven Rostedt inode, file); 505d2d45c7aSSteven Rostedt } 506d2d45c7aSSteven Rostedt 507d2d45c7aSSteven Rostedt static const struct file_operations stack_trace_filter_fops = { 508d2d45c7aSSteven Rostedt .open = stack_trace_filter_open, 509d2d45c7aSSteven Rostedt .read = seq_read, 510d2d45c7aSSteven Rostedt .write = ftrace_filter_write, 511098c879eSSteven Rostedt (Red Hat) .llseek = tracing_lseek, 512d2d45c7aSSteven Rostedt .release = ftrace_regex_release, 513d2d45c7aSSteven Rostedt }; 514d2d45c7aSSteven Rostedt 515bbd1d27dSSteven Rostedt (VMware) #endif /* CONFIG_DYNAMIC_FTRACE */ 516bbd1d27dSSteven Rostedt (VMware) 517f38f1d2aSSteven Rostedt int 518*7ff0d449SChristoph Hellwig stack_trace_sysctl(struct ctl_table *table, int write, void *buffer, 519*7ff0d449SChristoph Hellwig size_t *lenp, loff_t *ppos) 520f38f1d2aSSteven Rostedt { 5213d9a8072SThomas Gleixner int was_enabled; 522f38f1d2aSSteven Rostedt int ret; 523f38f1d2aSSteven Rostedt 524f38f1d2aSSteven Rostedt mutex_lock(&stack_sysctl_mutex); 5253d9a8072SThomas Gleixner was_enabled = !!stack_tracer_enabled; 526f38f1d2aSSteven Rostedt 5278d65af78SAlexey Dobriyan ret = proc_dointvec(table, write, buffer, lenp, ppos); 528f38f1d2aSSteven Rostedt 5293d9a8072SThomas Gleixner if (ret || !write || (was_enabled == !!stack_tracer_enabled)) 530f38f1d2aSSteven Rostedt goto out; 531f38f1d2aSSteven Rostedt 532f38f1d2aSSteven Rostedt if (stack_tracer_enabled) 533f38f1d2aSSteven Rostedt register_ftrace_function(&trace_ops); 534f38f1d2aSSteven Rostedt else 535f38f1d2aSSteven Rostedt unregister_ftrace_function(&trace_ops); 536f38f1d2aSSteven Rostedt out: 537f38f1d2aSSteven Rostedt mutex_unlock(&stack_sysctl_mutex); 538f38f1d2aSSteven Rostedt return ret; 539f38f1d2aSSteven Rostedt } 540f38f1d2aSSteven Rostedt 541762e1207SSteven Rostedt static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata; 542762e1207SSteven Rostedt 543f38f1d2aSSteven Rostedt static __init int enable_stacktrace(char *str) 544f38f1d2aSSteven Rostedt { 5453d739c1fSSteven Rostedt (VMware) int len; 5463d739c1fSSteven Rostedt (VMware) 5473d739c1fSSteven Rostedt (VMware) if ((len = str_has_prefix(str, "_filter="))) 5483d739c1fSSteven Rostedt (VMware) strncpy(stack_trace_filter_buf, str + len, COMMAND_LINE_SIZE); 549762e1207SSteven Rostedt 550e05a43b7SSteven Rostedt stack_tracer_enabled = 1; 551f38f1d2aSSteven Rostedt return 1; 552f38f1d2aSSteven Rostedt } 553f38f1d2aSSteven Rostedt __setup("stacktrace", enable_stacktrace); 554f38f1d2aSSteven Rostedt 555e5a81b62SSteven Rostedt static __init int stack_trace_init(void) 556e5a81b62SSteven Rostedt { 557e5a81b62SSteven Rostedt struct dentry *d_tracer; 558e5a81b62SSteven Rostedt 559e5a81b62SSteven Rostedt d_tracer = tracing_init_dentry(); 56014a5ae40SSteven Rostedt (Red Hat) if (IS_ERR(d_tracer)) 561ed6f1c99SNamhyung Kim return 0; 562e5a81b62SSteven Rostedt 5635452af66SFrederic Weisbecker trace_create_file("stack_max_size", 0644, d_tracer, 564bb99d8ccSAKASHI Takahiro &stack_trace_max_size, &stack_max_size_fops); 565e5a81b62SSteven Rostedt 5665452af66SFrederic Weisbecker trace_create_file("stack_trace", 0444, d_tracer, 567e5a81b62SSteven Rostedt NULL, &stack_trace_fops); 568e5a81b62SSteven Rostedt 569bbd1d27dSSteven Rostedt (VMware) #ifdef CONFIG_DYNAMIC_FTRACE 5700c5a9accSZhengyuan Liu trace_create_file("stack_trace_filter", 0644, d_tracer, 5710f179765SSteven Rostedt (VMware) &trace_ops, &stack_trace_filter_fops); 572bbd1d27dSSteven Rostedt (VMware) #endif 573d2d45c7aSSteven Rostedt 574762e1207SSteven Rostedt if (stack_trace_filter_buf[0]) 575762e1207SSteven Rostedt ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1); 576762e1207SSteven Rostedt 577e05a43b7SSteven Rostedt if (stack_tracer_enabled) 578e5a81b62SSteven Rostedt register_ftrace_function(&trace_ops); 579e5a81b62SSteven Rostedt 580e5a81b62SSteven Rostedt return 0; 581e5a81b62SSteven Rostedt } 582e5a81b62SSteven Rostedt 583e5a81b62SSteven Rostedt device_initcall(stack_trace_init); 584