xref: /linux-6.15/arch/s390/kernel/stacktrace.c (revision b9be1bee)
1a17ae4c3SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
25bdc9b44SHeiko Carstens /*
35bdc9b44SHeiko Carstens  * Stack trace management functions
45bdc9b44SHeiko Carstens  *
5a53c8fabSHeiko Carstens  *  Copyright IBM Corp. 2006
65bdc9b44SHeiko Carstens  */
75bdc9b44SHeiko Carstens 
8ebd912ffSHeiko Carstens #include <linux/perf_event.h>
95bdc9b44SHeiko Carstens #include <linux/stacktrace.h>
10aa44433aSHeiko Carstens #include <linux/uaccess.h>
11aa44433aSHeiko Carstens #include <linux/compat.h>
12*b9be1beeSHeiko Carstens #include <asm/asm-offsets.h>
1378c98f90SMartin Schwidefsky #include <asm/stacktrace.h>
1478c98f90SMartin Schwidefsky #include <asm/unwind.h>
15aa137a6dSMiroslav Benes #include <asm/kprobes.h>
16aa44433aSHeiko Carstens #include <asm/ptrace.h>
1766adce8fSHeiko Carstens 
arch_stack_walk(stack_trace_consume_fn consume_entry,void * cookie,struct task_struct * task,struct pt_regs * regs)18e991e5bbSVasily Gorbik void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
19e991e5bbSVasily Gorbik 		     struct task_struct *task, struct pt_regs *regs)
2066adce8fSHeiko Carstens {
2178c98f90SMartin Schwidefsky 	struct unwind_state state;
22e991e5bbSVasily Gorbik 	unsigned long addr;
2366adce8fSHeiko Carstens 
24e991e5bbSVasily Gorbik 	unwind_for_each_frame(&state, task, regs, 0) {
25e991e5bbSVasily Gorbik 		addr = unwind_get_return_address(&state);
26264c03a2SMark Brown 		if (!addr || !consume_entry(cookie, addr))
2778c98f90SMartin Schwidefsky 			break;
2878c98f90SMartin Schwidefsky 	}
29a3afe70bSHeiko Carstens }
30aa137a6dSMiroslav Benes 
arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,void * cookie,struct task_struct * task)31aa137a6dSMiroslav Benes int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
32aa137a6dSMiroslav Benes 			     void *cookie, struct task_struct *task)
33aa137a6dSMiroslav Benes {
34aa137a6dSMiroslav Benes 	struct unwind_state state;
35aa137a6dSMiroslav Benes 	unsigned long addr;
36aa137a6dSMiroslav Benes 
37aa137a6dSMiroslav Benes 	unwind_for_each_frame(&state, task, NULL, 0) {
38aa137a6dSMiroslav Benes 		if (state.stack_info.type != STACK_TYPE_TASK)
39aa137a6dSMiroslav Benes 			return -EINVAL;
40aa137a6dSMiroslav Benes 
41aa137a6dSMiroslav Benes 		if (state.regs)
42aa137a6dSMiroslav Benes 			return -EINVAL;
43aa137a6dSMiroslav Benes 
44aa137a6dSMiroslav Benes 		addr = unwind_get_return_address(&state);
45aa137a6dSMiroslav Benes 		if (!addr)
46aa137a6dSMiroslav Benes 			return -EINVAL;
47aa137a6dSMiroslav Benes 
481a280f48SVasily Gorbik #ifdef CONFIG_RETHOOK
49aa137a6dSMiroslav Benes 		/*
501a280f48SVasily Gorbik 		 * Mark stacktraces with krethook functions on them
51aa137a6dSMiroslav Benes 		 * as unreliable.
52aa137a6dSMiroslav Benes 		 */
531a280f48SVasily Gorbik 		if (state.ip == (unsigned long)arch_rethook_trampoline)
54aa137a6dSMiroslav Benes 			return -EINVAL;
55aa137a6dSMiroslav Benes #endif
56aa137a6dSMiroslav Benes 
57264c03a2SMark Brown 		if (!consume_entry(cookie, addr))
58aa137a6dSMiroslav Benes 			return -EINVAL;
59aa137a6dSMiroslav Benes 	}
60aa137a6dSMiroslav Benes 
61aa137a6dSMiroslav Benes 	/* Check for stack corruption */
62aa137a6dSMiroslav Benes 	if (unwind_error(&state))
63aa137a6dSMiroslav Benes 		return -EINVAL;
64aa137a6dSMiroslav Benes 	return 0;
65aa137a6dSMiroslav Benes }
66aa44433aSHeiko Carstens 
store_ip(stack_trace_consume_fn consume_entry,void * cookie,struct perf_callchain_entry_ctx * entry,bool perf,unsigned long ip)67ebd912ffSHeiko Carstens static inline bool store_ip(stack_trace_consume_fn consume_entry, void *cookie,
68ebd912ffSHeiko Carstens 			    struct perf_callchain_entry_ctx *entry, bool perf,
69ebd912ffSHeiko Carstens 			    unsigned long ip)
70ebd912ffSHeiko Carstens {
71ebd912ffSHeiko Carstens #ifdef CONFIG_PERF_EVENTS
72ebd912ffSHeiko Carstens 	if (perf) {
73ebd912ffSHeiko Carstens 		if (perf_callchain_store(entry, ip))
74ebd912ffSHeiko Carstens 			return false;
75ebd912ffSHeiko Carstens 		return true;
76ebd912ffSHeiko Carstens 	}
77ebd912ffSHeiko Carstens #endif
78ebd912ffSHeiko Carstens 	return consume_entry(cookie, ip);
79ebd912ffSHeiko Carstens }
80ebd912ffSHeiko Carstens 
ip_invalid(unsigned long ip)81cd581092SHeiko Carstens static inline bool ip_invalid(unsigned long ip)
82cd581092SHeiko Carstens {
83cd581092SHeiko Carstens 	/*
84cd581092SHeiko Carstens 	 * Perform some basic checks if an instruction address taken
85cd581092SHeiko Carstens 	 * from unreliable source is invalid.
86cd581092SHeiko Carstens 	 */
87cd581092SHeiko Carstens 	if (ip & 1)
88cd581092SHeiko Carstens 		return true;
89cd581092SHeiko Carstens 	if (ip < mmap_min_addr)
90cd581092SHeiko Carstens 		return true;
91cd581092SHeiko Carstens 	if (ip >= current->mm->context.asce_limit)
92cd581092SHeiko Carstens 		return true;
93cd581092SHeiko Carstens 	return false;
94cd581092SHeiko Carstens }
95cd581092SHeiko Carstens 
ip_within_vdso(unsigned long ip)9662b672c4SHeiko Carstens static inline bool ip_within_vdso(unsigned long ip)
9762b672c4SHeiko Carstens {
9862b672c4SHeiko Carstens 	return in_range(ip, current->mm->context.vdso_base, vdso_text_size());
9962b672c4SHeiko Carstens }
10062b672c4SHeiko Carstens 
arch_stack_walk_user_common(stack_trace_consume_fn consume_entry,void * cookie,struct perf_callchain_entry_ctx * entry,const struct pt_regs * regs,bool perf)101ebd912ffSHeiko Carstens void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *cookie,
102ebd912ffSHeiko Carstens 				 struct perf_callchain_entry_ctx *entry,
103ebd912ffSHeiko Carstens 				 const struct pt_regs *regs, bool perf)
104aa44433aSHeiko Carstens {
10562b672c4SHeiko Carstens 	struct stack_frame_vdso_wrapper __user *sf_vdso;
106aa44433aSHeiko Carstens 	struct stack_frame_user __user *sf;
107aa44433aSHeiko Carstens 	unsigned long ip, sp;
108aa44433aSHeiko Carstens 	bool first = true;
109aa44433aSHeiko Carstens 
110aa44433aSHeiko Carstens 	if (is_compat_task())
111aa44433aSHeiko Carstens 		return;
112cd581092SHeiko Carstens 	if (!current->mm)
113cd581092SHeiko Carstens 		return;
114ebd912ffSHeiko Carstens 	ip = instruction_pointer(regs);
115ebd912ffSHeiko Carstens 	if (!store_ip(consume_entry, cookie, entry, perf, ip))
116aa44433aSHeiko Carstens 		return;
117aa44433aSHeiko Carstens 	sf = (void __user *)user_stack_pointer(regs);
118aa44433aSHeiko Carstens 	pagefault_disable();
119aa44433aSHeiko Carstens 	while (1) {
120aa44433aSHeiko Carstens 		if (__get_user(sp, &sf->back_chain))
121aa44433aSHeiko Carstens 			break;
12262b672c4SHeiko Carstens 		/*
12362b672c4SHeiko Carstens 		 * VDSO entry code has a non-standard stack frame layout.
12462b672c4SHeiko Carstens 		 * See VDSO user wrapper code for details.
12562b672c4SHeiko Carstens 		 */
12662b672c4SHeiko Carstens 		if (!sp && ip_within_vdso(ip)) {
12762b672c4SHeiko Carstens 			sf_vdso = (void __user *)sf;
12862b672c4SHeiko Carstens 			if (__get_user(ip, &sf_vdso->return_address))
12987eceb17SHeiko Carstens 				break;
13062b672c4SHeiko Carstens 			sp = (unsigned long)sf + STACK_FRAME_VDSO_OVERHEAD;
13162b672c4SHeiko Carstens 			sf = (void __user *)sp;
13262b672c4SHeiko Carstens 			if (__get_user(sp, &sf->back_chain))
13362b672c4SHeiko Carstens 				break;
13462b672c4SHeiko Carstens 		} else {
13587eceb17SHeiko Carstens 			sf = (void __user *)sp;
136aa44433aSHeiko Carstens 			if (__get_user(ip, &sf->gprs[8]))
137aa44433aSHeiko Carstens 				break;
13862b672c4SHeiko Carstens 		}
13962b672c4SHeiko Carstens 		/* Sanity check: ABI requires SP to be 8 byte aligned. */
14062b672c4SHeiko Carstens 		if (sp & 0x7)
14162b672c4SHeiko Carstens 			break;
142cd581092SHeiko Carstens 		if (ip_invalid(ip)) {
143aa44433aSHeiko Carstens 			/*
144aa44433aSHeiko Carstens 			 * If the instruction address is invalid, and this
145aa44433aSHeiko Carstens 			 * is the first stack frame, assume r14 has not
146aa44433aSHeiko Carstens 			 * been written to the stack yet. Otherwise exit.
147aa44433aSHeiko Carstens 			 */
148cd581092SHeiko Carstens 			if (!first)
149cd581092SHeiko Carstens 				break;
150aa44433aSHeiko Carstens 			ip = regs->gprs[14];
151cd581092SHeiko Carstens 			if (ip_invalid(ip))
152aa44433aSHeiko Carstens 				break;
153aa44433aSHeiko Carstens 		}
154ebd912ffSHeiko Carstens 		if (!store_ip(consume_entry, cookie, entry, perf, ip))
155588a9836SHeiko Carstens 			break;
156aa44433aSHeiko Carstens 		first = false;
157aa44433aSHeiko Carstens 	}
158aa44433aSHeiko Carstens 	pagefault_enable();
159aa44433aSHeiko Carstens }
160cae74ba8SSven Schnelle 
arch_stack_walk_user(stack_trace_consume_fn consume_entry,void * cookie,const struct pt_regs * regs)161ebd912ffSHeiko Carstens void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
162ebd912ffSHeiko Carstens 			  const struct pt_regs *regs)
163ebd912ffSHeiko Carstens {
164ebd912ffSHeiko Carstens 	arch_stack_walk_user_common(consume_entry, cookie, NULL, regs, false);
165ebd912ffSHeiko Carstens }
166