xref: /linux-6.15/include/linux/oom.h (revision bb970707)
1 #ifndef __INCLUDE_LINUX_OOM_H
2 #define __INCLUDE_LINUX_OOM_H
3 
4 
5 #include <linux/sched.h>
6 #include <linux/types.h>
7 #include <linux/nodemask.h>
8 #include <uapi/linux/oom.h>
9 
10 struct zonelist;
11 struct notifier_block;
12 struct mem_cgroup;
13 struct task_struct;
14 
15 /*
16  * Details of the page allocation that triggered the oom killer that are used to
17  * determine what should be killed.
18  */
19 struct oom_control {
20 	/* Used to determine cpuset */
21 	struct zonelist *zonelist;
22 
23 	/* Used to determine mempolicy */
24 	nodemask_t *nodemask;
25 
26 	/* Used to determine cpuset and node locality requirement */
27 	const gfp_t gfp_mask;
28 
29 	/*
30 	 * order == -1 means the oom kill is required by sysrq, otherwise only
31 	 * for display purposes.
32 	 */
33 	const int order;
34 };
35 
36 /*
37  * Types of limitations to the nodes from which allocations may occur
38  */
39 enum oom_constraint {
40 	CONSTRAINT_NONE,
41 	CONSTRAINT_CPUSET,
42 	CONSTRAINT_MEMORY_POLICY,
43 	CONSTRAINT_MEMCG,
44 };
45 
46 enum oom_scan_t {
47 	OOM_SCAN_OK,		/* scan thread and find its badness */
48 	OOM_SCAN_CONTINUE,	/* do not consider thread for oom kill */
49 	OOM_SCAN_ABORT,		/* abort the iteration and return */
50 	OOM_SCAN_SELECT,	/* always select this thread first */
51 };
52 
53 extern struct mutex oom_lock;
54 
55 static inline void set_current_oom_origin(void)
56 {
57 	current->signal->oom_flag_origin = true;
58 }
59 
60 static inline void clear_current_oom_origin(void)
61 {
62 	current->signal->oom_flag_origin = false;
63 }
64 
65 static inline bool oom_task_origin(const struct task_struct *p)
66 {
67 	return p->signal->oom_flag_origin;
68 }
69 
70 extern void mark_oom_victim(struct task_struct *tsk);
71 
72 #ifdef CONFIG_MMU
73 extern void try_oom_reaper(struct task_struct *tsk);
74 #else
75 static inline void try_oom_reaper(struct task_struct *tsk)
76 {
77 }
78 #endif
79 
80 extern unsigned long oom_badness(struct task_struct *p,
81 		struct mem_cgroup *memcg, const nodemask_t *nodemask,
82 		unsigned long totalpages);
83 
84 extern void oom_kill_process(struct oom_control *oc, struct task_struct *p,
85 			     unsigned int points, unsigned long totalpages,
86 			     struct mem_cgroup *memcg, const char *message);
87 
88 extern void check_panic_on_oom(struct oom_control *oc,
89 			       enum oom_constraint constraint,
90 			       struct mem_cgroup *memcg);
91 
92 extern enum oom_scan_t oom_scan_process_thread(struct oom_control *oc,
93 		struct task_struct *task, unsigned long totalpages);
94 
95 extern bool out_of_memory(struct oom_control *oc);
96 
97 extern void exit_oom_victim(struct task_struct *tsk);
98 
99 extern int register_oom_notifier(struct notifier_block *nb);
100 extern int unregister_oom_notifier(struct notifier_block *nb);
101 
102 extern bool oom_killer_disabled;
103 extern bool oom_killer_disable(void);
104 extern void oom_killer_enable(void);
105 
106 extern struct task_struct *find_lock_task_mm(struct task_struct *p);
107 
108 static inline bool task_will_free_mem(struct task_struct *task)
109 {
110 	struct signal_struct *sig = task->signal;
111 
112 	/*
113 	 * A coredumping process may sleep for an extended period in exit_mm(),
114 	 * so the oom killer cannot assume that the process will promptly exit
115 	 * and release memory.
116 	 */
117 	if (sig->flags & SIGNAL_GROUP_COREDUMP)
118 		return false;
119 
120 	if (!(task->flags & PF_EXITING))
121 		return false;
122 
123 	/* Make sure that the whole thread group is going down */
124 	if (!thread_group_empty(task) && !(sig->flags & SIGNAL_GROUP_EXIT))
125 		return false;
126 
127 	return true;
128 }
129 
130 /* sysctls */
131 extern int sysctl_oom_dump_tasks;
132 extern int sysctl_oom_kill_allocating_task;
133 extern int sysctl_panic_on_oom;
134 #endif /* _INCLUDE_LINUX_OOM_H */
135