xref: /linux-6.15/include/linux/sched/task_stack.h (revision fbe76a65)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
268db0cf1SIngo Molnar #ifndef _LINUX_SCHED_TASK_STACK_H
368db0cf1SIngo Molnar #define _LINUX_SCHED_TASK_STACK_H
468db0cf1SIngo Molnar 
5f3ac6067SIngo Molnar /*
6f3ac6067SIngo Molnar  * task->stack (kernel stack) handling interfaces:
7f3ac6067SIngo Molnar  */
8f3ac6067SIngo Molnar 
968db0cf1SIngo Molnar #include <linux/sched.h>
1050d34394SIngo Molnar #include <linux/magic.h>
11f6120d52SKent Overstreet #include <linux/refcount.h>
1268db0cf1SIngo Molnar 
13f3ac6067SIngo Molnar #ifdef CONFIG_THREAD_INFO_IN_TASK
14f3ac6067SIngo Molnar 
15f3ac6067SIngo Molnar /*
16f3ac6067SIngo Molnar  * When accessing the stack of a non-current task that might exit, use
17f3ac6067SIngo Molnar  * try_get_task_stack() instead.  task_stack_page will return a pointer
18f3ac6067SIngo Molnar  * that could get freed out from under you.
19f3ac6067SIngo Molnar  */
20e87f4152SBorislav Petkov static __always_inline void *task_stack_page(const struct task_struct *task)
21f3ac6067SIngo Molnar {
22f3ac6067SIngo Molnar 	return task->stack;
23f3ac6067SIngo Molnar }
24f3ac6067SIngo Molnar 
25f3ac6067SIngo Molnar #define setup_thread_stack(new,old)	do { } while(0)
26f3ac6067SIngo Molnar 
27e0b081d1SJosh Poimboeuf static __always_inline unsigned long *end_of_stack(const struct task_struct *task)
28f3ac6067SIngo Molnar {
299cc2fa4fSHelge Deller #ifdef CONFIG_STACK_GROWSUP
309cc2fa4fSHelge Deller 	return (unsigned long *)((unsigned long)task->stack + THREAD_SIZE) - 1;
319cc2fa4fSHelge Deller #else
32f3ac6067SIngo Molnar 	return task->stack;
339cc2fa4fSHelge Deller #endif
34f3ac6067SIngo Molnar }
35f3ac6067SIngo Molnar 
36f3ac6067SIngo Molnar #elif !defined(__HAVE_THREAD_FUNCTIONS)
37f3ac6067SIngo Molnar 
38f3ac6067SIngo Molnar #define task_stack_page(task)	((void *)(task)->stack)
39f3ac6067SIngo Molnar 
40f3ac6067SIngo Molnar static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
41f3ac6067SIngo Molnar {
42f3ac6067SIngo Molnar 	*task_thread_info(p) = *task_thread_info(org);
43f3ac6067SIngo Molnar 	task_thread_info(p)->task = p;
44f3ac6067SIngo Molnar }
45f3ac6067SIngo Molnar 
46f3ac6067SIngo Molnar /*
47f3ac6067SIngo Molnar  * Return the address of the last usable long on the stack.
48f3ac6067SIngo Molnar  *
49f3ac6067SIngo Molnar  * When the stack grows down, this is just above the thread
50f3ac6067SIngo Molnar  * info struct. Going any lower will corrupt the threadinfo.
51f3ac6067SIngo Molnar  *
52f3ac6067SIngo Molnar  * When the stack grows up, this is the highest address.
53f3ac6067SIngo Molnar  * Beyond that position, we corrupt data on the next page.
54f3ac6067SIngo Molnar  */
55f3ac6067SIngo Molnar static inline unsigned long *end_of_stack(struct task_struct *p)
56f3ac6067SIngo Molnar {
57f3ac6067SIngo Molnar #ifdef CONFIG_STACK_GROWSUP
58f3ac6067SIngo Molnar 	return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1;
59f3ac6067SIngo Molnar #else
60f3ac6067SIngo Molnar 	return (unsigned long *)(task_thread_info(p) + 1);
61f3ac6067SIngo Molnar #endif
62f3ac6067SIngo Molnar }
63f3ac6067SIngo Molnar 
64f3ac6067SIngo Molnar #endif
65f3ac6067SIngo Molnar 
66f3ac6067SIngo Molnar #ifdef CONFIG_THREAD_INFO_IN_TASK
67f3ac6067SIngo Molnar static inline void *try_get_task_stack(struct task_struct *tsk)
68f3ac6067SIngo Molnar {
69f0b89d39SElena Reshetova 	return refcount_inc_not_zero(&tsk->stack_refcount) ?
70f3ac6067SIngo Molnar 		task_stack_page(tsk) : NULL;
71f3ac6067SIngo Molnar }
72f3ac6067SIngo Molnar 
73f3ac6067SIngo Molnar extern void put_task_stack(struct task_struct *tsk);
74f3ac6067SIngo Molnar #else
75f3ac6067SIngo Molnar static inline void *try_get_task_stack(struct task_struct *tsk)
76f3ac6067SIngo Molnar {
77f3ac6067SIngo Molnar 	return task_stack_page(tsk);
78f3ac6067SIngo Molnar }
79f3ac6067SIngo Molnar 
80f3ac6067SIngo Molnar static inline void put_task_stack(struct task_struct *tsk) {}
81f3ac6067SIngo Molnar #endif
82f3ac6067SIngo Molnar 
831a03d3f1SSebastian Andrzej Siewior void exit_task_stack_account(struct task_struct *tsk);
841a03d3f1SSebastian Andrzej Siewior 
85f3ac6067SIngo Molnar #define task_stack_end_corrupted(task) \
86f3ac6067SIngo Molnar 		(*(end_of_stack(task)) != STACK_END_MAGIC)
87f3ac6067SIngo Molnar 
8800ef0ef2SSascha Hauer static inline int object_is_on_stack(const void *obj)
89f3ac6067SIngo Molnar {
90f3ac6067SIngo Molnar 	void *stack = task_stack_page(current);
91f3ac6067SIngo Molnar 
92f3ac6067SIngo Molnar 	return (obj >= stack) && (obj < (stack + THREAD_SIZE));
93f3ac6067SIngo Molnar }
94f3ac6067SIngo Molnar 
95f3ac6067SIngo Molnar extern void thread_stack_cache_init(void);
96f3ac6067SIngo Molnar 
97f3ac6067SIngo Molnar #ifdef CONFIG_DEBUG_STACK_USAGE
98*fbe76a65SPasha Tatashin unsigned long stack_not_used(struct task_struct *p);
99*fbe76a65SPasha Tatashin #else
100f3ac6067SIngo Molnar static inline unsigned long stack_not_used(struct task_struct *p)
101f3ac6067SIngo Molnar {
102*fbe76a65SPasha Tatashin 	return 0;
103f3ac6067SIngo Molnar }
104f3ac6067SIngo Molnar #endif
105f3ac6067SIngo Molnar extern void set_task_stack_end_magic(struct task_struct *tsk);
106f3ac6067SIngo Molnar 
1079049863aSIngo Molnar #ifndef __HAVE_ARCH_KSTACK_END
1089049863aSIngo Molnar static inline int kstack_end(void *addr)
1099049863aSIngo Molnar {
1109049863aSIngo Molnar 	/* Reliable end of stack detection:
1119049863aSIngo Molnar 	 * Some APM bios versions misalign the stack
1129049863aSIngo Molnar 	 */
1139049863aSIngo Molnar 	return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
1149049863aSIngo Molnar }
1159049863aSIngo Molnar #endif
1169049863aSIngo Molnar 
11768db0cf1SIngo Molnar #endif /* _LINUX_SCHED_TASK_STACK_H */
118