xref: /linux-6.15/include/linux/cpuset.h (revision bb7e5ce7)
1 #ifndef _LINUX_CPUSET_H
2 #define _LINUX_CPUSET_H
3 /*
4  *  cpuset interface
5  *
6  *  Copyright (C) 2003 BULL SA
7  *  Copyright (C) 2004-2006 Silicon Graphics, Inc.
8  *
9  */
10 
11 #include <linux/sched.h>
12 #include <linux/sched/topology.h>
13 #include <linux/sched/task.h>
14 #include <linux/cpumask.h>
15 #include <linux/nodemask.h>
16 #include <linux/mm.h>
17 #include <linux/jump_label.h>
18 
19 #ifdef CONFIG_CPUSETS
20 
21 /*
22  * Static branch rewrites can happen in an arbitrary order for a given
23  * key. In code paths where we need to loop with read_mems_allowed_begin() and
24  * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need
25  * to ensure that begin() always gets rewritten before retry() in the
26  * disabled -> enabled transition. If not, then if local irqs are disabled
27  * around the loop, we can deadlock since retry() would always be
28  * comparing the latest value of the mems_allowed seqcount against 0 as
29  * begin() still would see cpusets_enabled() as false. The enabled -> disabled
30  * transition should happen in reverse order for the same reasons (want to stop
31  * looking at real value of mems_allowed.sequence in retry() first).
32  */
33 extern struct static_key_false cpusets_pre_enable_key;
34 extern struct static_key_false cpusets_enabled_key;
35 static inline bool cpusets_enabled(void)
36 {
37 	return static_branch_unlikely(&cpusets_enabled_key);
38 }
39 
40 static inline void cpuset_inc(void)
41 {
42 	static_branch_inc(&cpusets_pre_enable_key);
43 	static_branch_inc(&cpusets_enabled_key);
44 }
45 
46 static inline void cpuset_dec(void)
47 {
48 	static_branch_dec(&cpusets_enabled_key);
49 	static_branch_dec(&cpusets_pre_enable_key);
50 }
51 
52 extern int cpuset_init(void);
53 extern void cpuset_init_smp(void);
54 extern void cpuset_force_rebuild(void);
55 extern void cpuset_update_active_cpus(void);
56 extern void cpuset_wait_for_hotplug(void);
57 extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
58 extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
59 extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
60 #define cpuset_current_mems_allowed (current->mems_allowed)
61 void cpuset_init_current_mems_allowed(void);
62 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
63 
64 extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask);
65 
66 static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
67 {
68 	if (cpusets_enabled())
69 		return __cpuset_node_allowed(node, gfp_mask);
70 	return true;
71 }
72 
73 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
74 {
75 	return __cpuset_node_allowed(zone_to_nid(z), gfp_mask);
76 }
77 
78 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
79 {
80 	if (cpusets_enabled())
81 		return __cpuset_zone_allowed(z, gfp_mask);
82 	return true;
83 }
84 
85 extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
86 					  const struct task_struct *tsk2);
87 
88 #define cpuset_memory_pressure_bump() 				\
89 	do {							\
90 		if (cpuset_memory_pressure_enabled)		\
91 			__cpuset_memory_pressure_bump();	\
92 	} while (0)
93 extern int cpuset_memory_pressure_enabled;
94 extern void __cpuset_memory_pressure_bump(void);
95 
96 extern void cpuset_task_status_allowed(struct seq_file *m,
97 					struct task_struct *task);
98 extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
99 			    struct pid *pid, struct task_struct *tsk);
100 
101 extern int cpuset_mem_spread_node(void);
102 extern int cpuset_slab_spread_node(void);
103 
104 static inline int cpuset_do_page_mem_spread(void)
105 {
106 	return task_spread_page(current);
107 }
108 
109 static inline int cpuset_do_slab_mem_spread(void)
110 {
111 	return task_spread_slab(current);
112 }
113 
114 extern int current_cpuset_is_being_rebound(void);
115 
116 extern void rebuild_sched_domains(void);
117 
118 extern void cpuset_print_current_mems_allowed(void);
119 
120 /*
121  * read_mems_allowed_begin is required when making decisions involving
122  * mems_allowed such as during page allocation. mems_allowed can be updated in
123  * parallel and depending on the new value an operation can fail potentially
124  * causing process failure. A retry loop with read_mems_allowed_begin and
125  * read_mems_allowed_retry prevents these artificial failures.
126  */
127 static inline unsigned int read_mems_allowed_begin(void)
128 {
129 	if (!static_branch_unlikely(&cpusets_pre_enable_key))
130 		return 0;
131 
132 	return read_seqcount_begin(&current->mems_allowed_seq);
133 }
134 
135 /*
136  * If this returns true, the operation that took place after
137  * read_mems_allowed_begin may have failed artificially due to a concurrent
138  * update of mems_allowed. It is up to the caller to retry the operation if
139  * appropriate.
140  */
141 static inline bool read_mems_allowed_retry(unsigned int seq)
142 {
143 	if (!static_branch_unlikely(&cpusets_enabled_key))
144 		return false;
145 
146 	return read_seqcount_retry(&current->mems_allowed_seq, seq);
147 }
148 
149 static inline void set_mems_allowed(nodemask_t nodemask)
150 {
151 	unsigned long flags;
152 
153 	task_lock(current);
154 	local_irq_save(flags);
155 	write_seqcount_begin(&current->mems_allowed_seq);
156 	current->mems_allowed = nodemask;
157 	write_seqcount_end(&current->mems_allowed_seq);
158 	local_irq_restore(flags);
159 	task_unlock(current);
160 }
161 
162 #else /* !CONFIG_CPUSETS */
163 
164 static inline bool cpusets_enabled(void) { return false; }
165 
166 static inline int cpuset_init(void) { return 0; }
167 static inline void cpuset_init_smp(void) {}
168 
169 static inline void cpuset_force_rebuild(void) { }
170 
171 static inline void cpuset_update_active_cpus(void)
172 {
173 	partition_sched_domains(1, NULL, NULL);
174 }
175 
176 static inline void cpuset_wait_for_hotplug(void) { }
177 
178 static inline void cpuset_cpus_allowed(struct task_struct *p,
179 				       struct cpumask *mask)
180 {
181 	cpumask_copy(mask, cpu_possible_mask);
182 }
183 
184 static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
185 {
186 }
187 
188 static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
189 {
190 	return node_possible_map;
191 }
192 
193 #define cpuset_current_mems_allowed (node_states[N_MEMORY])
194 static inline void cpuset_init_current_mems_allowed(void) {}
195 
196 static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
197 {
198 	return 1;
199 }
200 
201 static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
202 {
203 	return true;
204 }
205 
206 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
207 {
208 	return true;
209 }
210 
211 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
212 {
213 	return true;
214 }
215 
216 static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
217 						 const struct task_struct *tsk2)
218 {
219 	return 1;
220 }
221 
222 static inline void cpuset_memory_pressure_bump(void) {}
223 
224 static inline void cpuset_task_status_allowed(struct seq_file *m,
225 						struct task_struct *task)
226 {
227 }
228 
229 static inline int cpuset_mem_spread_node(void)
230 {
231 	return 0;
232 }
233 
234 static inline int cpuset_slab_spread_node(void)
235 {
236 	return 0;
237 }
238 
239 static inline int cpuset_do_page_mem_spread(void)
240 {
241 	return 0;
242 }
243 
244 static inline int cpuset_do_slab_mem_spread(void)
245 {
246 	return 0;
247 }
248 
249 static inline int current_cpuset_is_being_rebound(void)
250 {
251 	return 0;
252 }
253 
254 static inline void rebuild_sched_domains(void)
255 {
256 	partition_sched_domains(1, NULL, NULL);
257 }
258 
259 static inline void cpuset_print_current_mems_allowed(void)
260 {
261 }
262 
263 static inline void set_mems_allowed(nodemask_t nodemask)
264 {
265 }
266 
267 static inline unsigned int read_mems_allowed_begin(void)
268 {
269 	return 0;
270 }
271 
272 static inline bool read_mems_allowed_retry(unsigned int seq)
273 {
274 	return false;
275 }
276 
277 #endif /* !CONFIG_CPUSETS */
278 
279 #endif /* _LINUX_CPUSET_H */
280