xref: /linux-6.15/include/linux/oom.h (revision e00a844a)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __INCLUDE_LINUX_OOM_H
3 #define __INCLUDE_LINUX_OOM_H
4 
5 
6 #include <linux/sched/signal.h>
7 #include <linux/types.h>
8 #include <linux/nodemask.h>
9 #include <uapi/linux/oom.h>
10 #include <linux/sched/coredump.h> /* MMF_* */
11 #include <linux/mm.h> /* VM_FAULT* */
12 
13 struct zonelist;
14 struct notifier_block;
15 struct mem_cgroup;
16 struct task_struct;
17 
18 /*
19  * Details of the page allocation that triggered the oom killer that are used to
20  * determine what should be killed.
21  */
22 struct oom_control {
23 	/* Used to determine cpuset */
24 	struct zonelist *zonelist;
25 
26 	/* Used to determine mempolicy */
27 	nodemask_t *nodemask;
28 
29 	/* Memory cgroup in which oom is invoked, or NULL for global oom */
30 	struct mem_cgroup *memcg;
31 
32 	/* Used to determine cpuset and node locality requirement */
33 	const gfp_t gfp_mask;
34 
35 	/*
36 	 * order == -1 means the oom kill is required by sysrq, otherwise only
37 	 * for display purposes.
38 	 */
39 	const int order;
40 
41 	/* Used by oom implementation, do not set */
42 	unsigned long totalpages;
43 	struct task_struct *chosen;
44 	unsigned long chosen_points;
45 };
46 
47 extern struct mutex oom_lock;
48 
49 static inline void set_current_oom_origin(void)
50 {
51 	current->signal->oom_flag_origin = true;
52 }
53 
54 static inline void clear_current_oom_origin(void)
55 {
56 	current->signal->oom_flag_origin = false;
57 }
58 
59 static inline bool oom_task_origin(const struct task_struct *p)
60 {
61 	return p->signal->oom_flag_origin;
62 }
63 
64 static inline bool tsk_is_oom_victim(struct task_struct * tsk)
65 {
66 	return tsk->signal->oom_mm;
67 }
68 
69 /*
70  * Checks whether a page fault on the given mm is still reliable.
71  * This is no longer true if the oom reaper started to reap the
72  * address space which is reflected by MMF_UNSTABLE flag set in
73  * the mm. At that moment any !shared mapping would lose the content
74  * and could cause a memory corruption (zero pages instead of the
75  * original content).
76  *
77  * User should call this before establishing a page table entry for
78  * a !shared mapping and under the proper page table lock.
79  *
80  * Return 0 when the PF is safe VM_FAULT_SIGBUS otherwise.
81  */
82 static inline int check_stable_address_space(struct mm_struct *mm)
83 {
84 	if (unlikely(test_bit(MMF_UNSTABLE, &mm->flags)))
85 		return VM_FAULT_SIGBUS;
86 	return 0;
87 }
88 
89 extern unsigned long oom_badness(struct task_struct *p,
90 		struct mem_cgroup *memcg, const nodemask_t *nodemask,
91 		unsigned long totalpages);
92 
93 extern bool out_of_memory(struct oom_control *oc);
94 
95 extern void exit_oom_victim(void);
96 
97 extern int register_oom_notifier(struct notifier_block *nb);
98 extern int unregister_oom_notifier(struct notifier_block *nb);
99 
100 extern bool oom_killer_disable(signed long timeout);
101 extern void oom_killer_enable(void);
102 
103 extern struct task_struct *find_lock_task_mm(struct task_struct *p);
104 
105 /* sysctls */
106 extern int sysctl_oom_dump_tasks;
107 extern int sysctl_oom_kill_allocating_task;
108 extern int sysctl_panic_on_oom;
109 #endif /* _INCLUDE_LINUX_OOM_H */
110