17af76c5fSMichael Ellerman // SPDX-License-Identifier: GPL-2.0
27af76c5fSMichael Ellerman
3fd3e0bbcSChristoph Hellwig /*
47af76c5fSMichael Ellerman * Stack trace utility functions etc.
5fd3e0bbcSChristoph Hellwig *
6fd3e0bbcSChristoph Hellwig * Copyright 2008 Christoph Hellwig, IBM Corp.
7df78d3f6STorsten Duwe * Copyright 2018 SUSE Linux GmbH
87af76c5fSMichael Ellerman * Copyright 2018 Nick Piggin, Michael Ellerman, IBM Corp.
9fd3e0bbcSChristoph Hellwig */
10fd3e0bbcSChristoph Hellwig
11a6cae77fSMichal Suchanek #include <linux/delay.h>
124b16f8e2SPaul Gortmaker #include <linux/export.h>
13df78d3f6STorsten Duwe #include <linux/kallsyms.h>
14df78d3f6STorsten Duwe #include <linux/module.h>
155cc05910SMichael Ellerman #include <linux/nmi.h>
16fd3e0bbcSChristoph Hellwig #include <linux/sched.h>
17b17b0153SIngo Molnar #include <linux/sched/debug.h>
18df78d3f6STorsten Duwe #include <linux/sched/task_stack.h>
19fd3e0bbcSChristoph Hellwig #include <linux/stacktrace.h>
20fd3e0bbcSChristoph Hellwig #include <asm/ptrace.h>
2101f4b8b8SArnd Bergmann #include <asm/processor.h>
22df78d3f6STorsten Duwe #include <linux/ftrace.h>
23df78d3f6STorsten Duwe #include <asm/kprobes.h>
24*19f1bc3fSAbhishek Dubey #include <linux/rethook.h>
25fd3e0bbcSChristoph Hellwig
265cc05910SMichael Ellerman #include <asm/paca.h>
275cc05910SMichael Ellerman
arch_stack_walk(stack_trace_consume_fn consume_entry,void * cookie,struct task_struct * task,struct pt_regs * regs)28b112fb91SDaniel Axtens void __no_sanitize_address arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
29a1cdef04SChristophe Leroy struct task_struct *task, struct pt_regs *regs)
30fd3e0bbcSChristoph Hellwig {
31a1cdef04SChristophe Leroy unsigned long sp;
32a1cdef04SChristophe Leroy
33a2308836SChristophe Leroy if (regs && !consume_entry(cookie, regs->nip))
34a2308836SChristophe Leroy return;
35a2308836SChristophe Leroy
36a1cdef04SChristophe Leroy if (regs)
37a1cdef04SChristophe Leroy sp = regs->gpr[1];
38a1cdef04SChristophe Leroy else if (task == current)
39a1cdef04SChristophe Leroy sp = current_stack_frame();
40a1cdef04SChristophe Leroy else
41a1cdef04SChristophe Leroy sp = task->thread.ksp;
42a1cdef04SChristophe Leroy
43fd3e0bbcSChristoph Hellwig for (;;) {
44fd3e0bbcSChristoph Hellwig unsigned long *stack = (unsigned long *) sp;
45fd3e0bbcSChristoph Hellwig unsigned long newsp, ip;
46fd3e0bbcSChristoph Hellwig
474cefb0f6SNicholas Piggin if (!validate_sp(sp, task))
48fd3e0bbcSChristoph Hellwig return;
49fd3e0bbcSChristoph Hellwig
50fd3e0bbcSChristoph Hellwig newsp = stack[0];
51fd3e0bbcSChristoph Hellwig ip = stack[STACK_FRAME_LR_SAVE];
52fd3e0bbcSChristoph Hellwig
53a1cdef04SChristophe Leroy if (!consume_entry(cookie, ip))
54fd3e0bbcSChristoph Hellwig return;
55fd3e0bbcSChristoph Hellwig
56fd3e0bbcSChristoph Hellwig sp = newsp;
57fd3e0bbcSChristoph Hellwig }
58fd3e0bbcSChristoph Hellwig }
5901f4b8b8SArnd Bergmann
6018be3760SJoe Lawrence /*
6118be3760SJoe Lawrence * This function returns an error if it detects any unreliable features of the
6218be3760SJoe Lawrence * stack. Otherwise it guarantees that the stack trace is reliable.
6318be3760SJoe Lawrence *
6418be3760SJoe Lawrence * If the task is not 'current', the caller *must* ensure the task is inactive.
6518be3760SJoe Lawrence */
arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,void * cookie,struct task_struct * task)66b112fb91SDaniel Axtens int __no_sanitize_address arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
67a1cdef04SChristophe Leroy void *cookie, struct task_struct *task)
68df78d3f6STorsten Duwe {
69df78d3f6STorsten Duwe unsigned long sp;
7029a77bbbSJoe Lawrence unsigned long newsp;
71826a307bSChristophe Leroy unsigned long stack_page = (unsigned long)task_stack_page(task);
72df78d3f6STorsten Duwe unsigned long stack_end;
73df78d3f6STorsten Duwe int graph_idx = 0;
7429a77bbbSJoe Lawrence bool firstframe;
75df78d3f6STorsten Duwe
76df78d3f6STorsten Duwe stack_end = stack_page + THREAD_SIZE;
77c5cc3ca7SMichael Ellerman
78c5cc3ca7SMichael Ellerman // See copy_thread() for details.
79c5cc3ca7SMichael Ellerman if (task->flags & PF_KTHREAD)
8090f1b431SNicholas Piggin stack_end -= STACK_FRAME_MIN_SIZE;
81c5cc3ca7SMichael Ellerman else
82c5cc3ca7SMichael Ellerman stack_end -= STACK_USER_INT_FRAME_SIZE;
83df78d3f6STorsten Duwe
84826a307bSChristophe Leroy if (task == current)
853d13e839SMichael Ellerman sp = current_stack_frame();
8629a77bbbSJoe Lawrence else
87826a307bSChristophe Leroy sp = task->thread.ksp;
8829a77bbbSJoe Lawrence
89df78d3f6STorsten Duwe if (sp < stack_page + sizeof(struct thread_struct) ||
90df78d3f6STorsten Duwe sp > stack_end - STACK_FRAME_MIN_SIZE) {
913de27dcfSJoe Lawrence return -EINVAL;
92df78d3f6STorsten Duwe }
93df78d3f6STorsten Duwe
9429a77bbbSJoe Lawrence for (firstframe = true; sp != stack_end;
9529a77bbbSJoe Lawrence firstframe = false, sp = newsp) {
96df78d3f6STorsten Duwe unsigned long *stack = (unsigned long *) sp;
9729a77bbbSJoe Lawrence unsigned long ip;
98df78d3f6STorsten Duwe
99df78d3f6STorsten Duwe /* sanity check: ABI requires SP to be aligned 16 bytes. */
100df78d3f6STorsten Duwe if (sp & 0xF)
1013de27dcfSJoe Lawrence return -EINVAL;
102df78d3f6STorsten Duwe
103df78d3f6STorsten Duwe newsp = stack[0];
104df78d3f6STorsten Duwe /* Stack grows downwards; unwinder may only go up. */
105df78d3f6STorsten Duwe if (newsp <= sp)
1063de27dcfSJoe Lawrence return -EINVAL;
107df78d3f6STorsten Duwe
108df78d3f6STorsten Duwe if (newsp != stack_end &&
109df78d3f6STorsten Duwe newsp > stack_end - STACK_FRAME_MIN_SIZE) {
1103de27dcfSJoe Lawrence return -EINVAL; /* invalid backlink, too far up. */
111df78d3f6STorsten Duwe }
112df78d3f6STorsten Duwe
11318be3760SJoe Lawrence /*
11418be3760SJoe Lawrence * We can only trust the bottom frame's backlink, the
11518be3760SJoe Lawrence * rest of the frame may be uninitialized, continue to
11618be3760SJoe Lawrence * the next.
11718be3760SJoe Lawrence */
11829a77bbbSJoe Lawrence if (firstframe)
11929a77bbbSJoe Lawrence continue;
12018be3760SJoe Lawrence
12118be3760SJoe Lawrence /* Mark stacktraces with exception frames as unreliable. */
12218be3760SJoe Lawrence if (sp <= stack_end - STACK_INT_FRAME_SIZE &&
123e856e336SNicholas Piggin stack[STACK_INT_FRAME_MARKER_LONGS] == STACK_FRAME_REGS_MARKER) {
1243de27dcfSJoe Lawrence return -EINVAL;
12518be3760SJoe Lawrence }
12618be3760SJoe Lawrence
127df78d3f6STorsten Duwe /* Examine the saved LR: it must point into kernel code. */
128df78d3f6STorsten Duwe ip = stack[STACK_FRAME_LR_SAVE];
12918be3760SJoe Lawrence if (!__kernel_text_address(ip))
1303de27dcfSJoe Lawrence return -EINVAL;
131df78d3f6STorsten Duwe
132df78d3f6STorsten Duwe /*
133df78d3f6STorsten Duwe * FIXME: IMHO these tests do not belong in
134df78d3f6STorsten Duwe * arch-dependent code, they are generic.
135df78d3f6STorsten Duwe */
136826a307bSChristophe Leroy ip = ftrace_graph_ret_addr(task, &graph_idx, ip, stack);
137*19f1bc3fSAbhishek Dubey
138df78d3f6STorsten Duwe /*
139df78d3f6STorsten Duwe * Mark stacktraces with kretprobed functions on them
140df78d3f6STorsten Duwe * as unreliable.
141df78d3f6STorsten Duwe */
142*19f1bc3fSAbhishek Dubey #ifdef CONFIG_RETHOOK
143*19f1bc3fSAbhishek Dubey if (ip == (unsigned long)arch_rethook_trampoline)
1443de27dcfSJoe Lawrence return -EINVAL;
1455e3f0d15SAneesh Kumar K.V #endif
146df78d3f6STorsten Duwe
147a1cdef04SChristophe Leroy if (!consume_entry(cookie, ip))
148a1cdef04SChristophe Leroy return -EINVAL;
149df78d3f6STorsten Duwe }
150df78d3f6STorsten Duwe return 0;
151df78d3f6STorsten Duwe }
152018cce33SChristophe Leroy
153e08ecba1SMichael Ellerman #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI)
handle_backtrace_ipi(struct pt_regs * regs)1545cc05910SMichael Ellerman static void handle_backtrace_ipi(struct pt_regs *regs)
1555cc05910SMichael Ellerman {
1565cc05910SMichael Ellerman nmi_cpu_backtrace(regs);
1575cc05910SMichael Ellerman }
1585cc05910SMichael Ellerman
raise_backtrace_ipi(cpumask_t * mask)1595cc05910SMichael Ellerman static void raise_backtrace_ipi(cpumask_t *mask)
1605cc05910SMichael Ellerman {
1617c6986adSMichael Ellerman struct paca_struct *p;
1625cc05910SMichael Ellerman unsigned int cpu;
1637c6986adSMichael Ellerman u64 delay_us;
1645cc05910SMichael Ellerman
1655cc05910SMichael Ellerman for_each_cpu(cpu, mask) {
1667c6986adSMichael Ellerman if (cpu == smp_processor_id()) {
1675cc05910SMichael Ellerman handle_backtrace_ipi(NULL);
1687c6986adSMichael Ellerman continue;
1695cc05910SMichael Ellerman }
1705cc05910SMichael Ellerman
1717c6986adSMichael Ellerman delay_us = 5 * USEC_PER_SEC;
1727c6986adSMichael Ellerman
1737c6986adSMichael Ellerman if (smp_send_safe_nmi_ipi(cpu, handle_backtrace_ipi, delay_us)) {
1747c6986adSMichael Ellerman // Now wait up to 5s for the other CPU to do its backtrace
1757c6986adSMichael Ellerman while (cpumask_test_cpu(cpu, mask) && delay_us) {
1767c6986adSMichael Ellerman udelay(1);
1777c6986adSMichael Ellerman delay_us--;
1787c6986adSMichael Ellerman }
1797c6986adSMichael Ellerman
1807c6986adSMichael Ellerman // Other CPU cleared itself from the mask
1817c6986adSMichael Ellerman if (delay_us)
1827c6986adSMichael Ellerman continue;
1837c6986adSMichael Ellerman }
1847c6986adSMichael Ellerman
1857c6986adSMichael Ellerman p = paca_ptrs[cpu];
1865cc05910SMichael Ellerman
1875cc05910SMichael Ellerman cpumask_clear_cpu(cpu, mask);
1885cc05910SMichael Ellerman
1895cc05910SMichael Ellerman pr_warn("CPU %d didn't respond to backtrace IPI, inspecting paca.\n", cpu);
1905cc05910SMichael Ellerman if (!virt_addr_valid(p)) {
1915cc05910SMichael Ellerman pr_warn("paca pointer appears corrupt? (%px)\n", p);
1925cc05910SMichael Ellerman continue;
1935cc05910SMichael Ellerman }
1945cc05910SMichael Ellerman
1955cc05910SMichael Ellerman pr_warn("irq_soft_mask: 0x%02x in_mce: %d in_nmi: %d",
1965cc05910SMichael Ellerman p->irq_soft_mask, p->in_mce, p->in_nmi);
1975cc05910SMichael Ellerman
1985cc05910SMichael Ellerman if (virt_addr_valid(p->__current))
1995cc05910SMichael Ellerman pr_cont(" current: %d (%s)\n", p->__current->pid,
2005cc05910SMichael Ellerman p->__current->comm);
2015cc05910SMichael Ellerman else
2025cc05910SMichael Ellerman pr_cont(" current pointer corrupt? (%px)\n", p->__current);
2035cc05910SMichael Ellerman
2045cc05910SMichael Ellerman pr_warn("Back trace of paca->saved_r1 (0x%016llx) (possibly stale):\n", p->saved_r1);
2059cb8f069SDmitry Safonov show_stack(p->__current, (unsigned long *)p->saved_r1, KERN_WARNING);
2065cc05910SMichael Ellerman }
2075cc05910SMichael Ellerman }
2085cc05910SMichael Ellerman
arch_trigger_cpumask_backtrace(const cpumask_t * mask,int exclude_cpu)2098d539b84SDouglas Anderson void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu)
2105cc05910SMichael Ellerman {
2118d539b84SDouglas Anderson nmi_trigger_cpumask_backtrace(mask, exclude_cpu, raise_backtrace_ipi);
2125cc05910SMichael Ellerman }
213e08ecba1SMichael Ellerman #endif /* defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI) */
214