xref: /linux-6.15/include/linux/cpuset.h (revision 3f07c014)
1 #ifndef _LINUX_CPUSET_H
2 #define _LINUX_CPUSET_H
3 /*
4  *  cpuset interface
5  *
6  *  Copyright (C) 2003 BULL SA
7  *  Copyright (C) 2004-2006 Silicon Graphics, Inc.
8  *
9  */
10 
11 #include <linux/sched.h>
12 #include <linux/sched/topology.h>
13 #include <linux/cpumask.h>
14 #include <linux/nodemask.h>
15 #include <linux/mm.h>
16 #include <linux/jump_label.h>
17 
18 #ifdef CONFIG_CPUSETS
19 
20 extern struct static_key_false cpusets_enabled_key;
21 static inline bool cpusets_enabled(void)
22 {
23 	return static_branch_unlikely(&cpusets_enabled_key);
24 }
25 
26 static inline int nr_cpusets(void)
27 {
28 	/* jump label reference count + the top-level cpuset */
29 	return static_key_count(&cpusets_enabled_key.key) + 1;
30 }
31 
32 static inline void cpuset_inc(void)
33 {
34 	static_branch_inc(&cpusets_enabled_key);
35 }
36 
37 static inline void cpuset_dec(void)
38 {
39 	static_branch_dec(&cpusets_enabled_key);
40 }
41 
42 extern int cpuset_init(void);
43 extern void cpuset_init_smp(void);
44 extern void cpuset_update_active_cpus(bool cpu_online);
45 extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
46 extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
47 extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
48 #define cpuset_current_mems_allowed (current->mems_allowed)
49 void cpuset_init_current_mems_allowed(void);
50 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
51 
52 extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask);
53 
54 static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
55 {
56 	if (cpusets_enabled())
57 		return __cpuset_node_allowed(node, gfp_mask);
58 	return true;
59 }
60 
61 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
62 {
63 	return __cpuset_node_allowed(zone_to_nid(z), gfp_mask);
64 }
65 
66 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
67 {
68 	if (cpusets_enabled())
69 		return __cpuset_zone_allowed(z, gfp_mask);
70 	return true;
71 }
72 
73 extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
74 					  const struct task_struct *tsk2);
75 
76 #define cpuset_memory_pressure_bump() 				\
77 	do {							\
78 		if (cpuset_memory_pressure_enabled)		\
79 			__cpuset_memory_pressure_bump();	\
80 	} while (0)
81 extern int cpuset_memory_pressure_enabled;
82 extern void __cpuset_memory_pressure_bump(void);
83 
84 extern void cpuset_task_status_allowed(struct seq_file *m,
85 					struct task_struct *task);
86 extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
87 			    struct pid *pid, struct task_struct *tsk);
88 
89 extern int cpuset_mem_spread_node(void);
90 extern int cpuset_slab_spread_node(void);
91 
92 static inline int cpuset_do_page_mem_spread(void)
93 {
94 	return task_spread_page(current);
95 }
96 
97 static inline int cpuset_do_slab_mem_spread(void)
98 {
99 	return task_spread_slab(current);
100 }
101 
102 extern int current_cpuset_is_being_rebound(void);
103 
104 extern void rebuild_sched_domains(void);
105 
106 extern void cpuset_print_current_mems_allowed(void);
107 
108 /*
109  * read_mems_allowed_begin is required when making decisions involving
110  * mems_allowed such as during page allocation. mems_allowed can be updated in
111  * parallel and depending on the new value an operation can fail potentially
112  * causing process failure. A retry loop with read_mems_allowed_begin and
113  * read_mems_allowed_retry prevents these artificial failures.
114  */
115 static inline unsigned int read_mems_allowed_begin(void)
116 {
117 	if (!cpusets_enabled())
118 		return 0;
119 
120 	return read_seqcount_begin(&current->mems_allowed_seq);
121 }
122 
123 /*
124  * If this returns true, the operation that took place after
125  * read_mems_allowed_begin may have failed artificially due to a concurrent
126  * update of mems_allowed. It is up to the caller to retry the operation if
127  * appropriate.
128  */
129 static inline bool read_mems_allowed_retry(unsigned int seq)
130 {
131 	if (!cpusets_enabled())
132 		return false;
133 
134 	return read_seqcount_retry(&current->mems_allowed_seq, seq);
135 }
136 
137 static inline void set_mems_allowed(nodemask_t nodemask)
138 {
139 	unsigned long flags;
140 
141 	task_lock(current);
142 	local_irq_save(flags);
143 	write_seqcount_begin(&current->mems_allowed_seq);
144 	current->mems_allowed = nodemask;
145 	write_seqcount_end(&current->mems_allowed_seq);
146 	local_irq_restore(flags);
147 	task_unlock(current);
148 }
149 
150 #else /* !CONFIG_CPUSETS */
151 
152 static inline bool cpusets_enabled(void) { return false; }
153 
154 static inline int cpuset_init(void) { return 0; }
155 static inline void cpuset_init_smp(void) {}
156 
157 static inline void cpuset_update_active_cpus(bool cpu_online)
158 {
159 	partition_sched_domains(1, NULL, NULL);
160 }
161 
162 static inline void cpuset_cpus_allowed(struct task_struct *p,
163 				       struct cpumask *mask)
164 {
165 	cpumask_copy(mask, cpu_possible_mask);
166 }
167 
168 static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
169 {
170 }
171 
172 static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
173 {
174 	return node_possible_map;
175 }
176 
177 #define cpuset_current_mems_allowed (node_states[N_MEMORY])
178 static inline void cpuset_init_current_mems_allowed(void) {}
179 
180 static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
181 {
182 	return 1;
183 }
184 
185 static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
186 {
187 	return true;
188 }
189 
190 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
191 {
192 	return true;
193 }
194 
195 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
196 {
197 	return true;
198 }
199 
200 static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
201 						 const struct task_struct *tsk2)
202 {
203 	return 1;
204 }
205 
206 static inline void cpuset_memory_pressure_bump(void) {}
207 
208 static inline void cpuset_task_status_allowed(struct seq_file *m,
209 						struct task_struct *task)
210 {
211 }
212 
213 static inline int cpuset_mem_spread_node(void)
214 {
215 	return 0;
216 }
217 
218 static inline int cpuset_slab_spread_node(void)
219 {
220 	return 0;
221 }
222 
223 static inline int cpuset_do_page_mem_spread(void)
224 {
225 	return 0;
226 }
227 
228 static inline int cpuset_do_slab_mem_spread(void)
229 {
230 	return 0;
231 }
232 
233 static inline int current_cpuset_is_being_rebound(void)
234 {
235 	return 0;
236 }
237 
238 static inline void rebuild_sched_domains(void)
239 {
240 	partition_sched_domains(1, NULL, NULL);
241 }
242 
243 static inline void cpuset_print_current_mems_allowed(void)
244 {
245 }
246 
247 static inline void set_mems_allowed(nodemask_t nodemask)
248 {
249 }
250 
251 static inline unsigned int read_mems_allowed_begin(void)
252 {
253 	return 0;
254 }
255 
256 static inline bool read_mems_allowed_retry(unsigned int seq)
257 {
258 	return false;
259 }
260 
261 #endif /* !CONFIG_CPUSETS */
262 
263 #endif /* _LINUX_CPUSET_H */
264