xref: /linux-6.15/include/linux/cpuset.h (revision eea9507a)
1 #ifndef _LINUX_CPUSET_H
2 #define _LINUX_CPUSET_H
3 /*
4  *  cpuset interface
5  *
6  *  Copyright (C) 2003 BULL SA
7  *  Copyright (C) 2004-2006 Silicon Graphics, Inc.
8  *
9  */
10 
11 #include <linux/sched.h>
12 #include <linux/cpumask.h>
13 #include <linux/nodemask.h>
14 #include <linux/mm.h>
15 #include <linux/jump_label.h>
16 
17 #ifdef CONFIG_CPUSETS
18 
19 extern struct static_key cpusets_enabled_key;
20 static inline bool cpusets_enabled(void)
21 {
22 	return static_key_false(&cpusets_enabled_key);
23 }
24 
25 static inline int nr_cpusets(void)
26 {
27 	/* jump label reference count + the top-level cpuset */
28 	return static_key_count(&cpusets_enabled_key) + 1;
29 }
30 
31 static inline void cpuset_inc(void)
32 {
33 	static_key_slow_inc(&cpusets_enabled_key);
34 }
35 
36 static inline void cpuset_dec(void)
37 {
38 	static_key_slow_dec(&cpusets_enabled_key);
39 }
40 
41 extern int cpuset_init(void);
42 extern void cpuset_init_smp(void);
43 extern void cpuset_update_active_cpus(bool cpu_online);
44 extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
45 extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
46 extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
47 #define cpuset_current_mems_allowed (current->mems_allowed)
48 void cpuset_init_current_mems_allowed(void);
49 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
50 
51 extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask);
52 extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask);
53 
54 static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
55 {
56 	return nr_cpusets() <= 1 ||
57 		__cpuset_node_allowed_softwall(node, gfp_mask);
58 }
59 
60 static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
61 {
62 	return nr_cpusets() <= 1 ||
63 		__cpuset_node_allowed_hardwall(node, gfp_mask);
64 }
65 
66 static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
67 {
68 	return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask);
69 }
70 
71 static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
72 {
73 	return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask);
74 }
75 
76 extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
77 					  const struct task_struct *tsk2);
78 
79 #define cpuset_memory_pressure_bump() 				\
80 	do {							\
81 		if (cpuset_memory_pressure_enabled)		\
82 			__cpuset_memory_pressure_bump();	\
83 	} while (0)
84 extern int cpuset_memory_pressure_enabled;
85 extern void __cpuset_memory_pressure_bump(void);
86 
87 extern void cpuset_task_status_allowed(struct seq_file *m,
88 					struct task_struct *task);
89 extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
90 			    struct pid *pid, struct task_struct *tsk);
91 
92 extern int cpuset_mem_spread_node(void);
93 extern int cpuset_slab_spread_node(void);
94 
95 static inline int cpuset_do_page_mem_spread(void)
96 {
97 	return task_spread_page(current);
98 }
99 
100 static inline int cpuset_do_slab_mem_spread(void)
101 {
102 	return task_spread_slab(current);
103 }
104 
105 extern int current_cpuset_is_being_rebound(void);
106 
107 extern void rebuild_sched_domains(void);
108 
109 extern void cpuset_print_task_mems_allowed(struct task_struct *p);
110 
111 /*
112  * read_mems_allowed_begin is required when making decisions involving
113  * mems_allowed such as during page allocation. mems_allowed can be updated in
114  * parallel and depending on the new value an operation can fail potentially
115  * causing process failure. A retry loop with read_mems_allowed_begin and
116  * read_mems_allowed_retry prevents these artificial failures.
117  */
118 static inline unsigned int read_mems_allowed_begin(void)
119 {
120 	return read_seqcount_begin(&current->mems_allowed_seq);
121 }
122 
123 /*
124  * If this returns true, the operation that took place after
125  * read_mems_allowed_begin may have failed artificially due to a concurrent
126  * update of mems_allowed. It is up to the caller to retry the operation if
127  * appropriate.
128  */
129 static inline bool read_mems_allowed_retry(unsigned int seq)
130 {
131 	return read_seqcount_retry(&current->mems_allowed_seq, seq);
132 }
133 
134 static inline void set_mems_allowed(nodemask_t nodemask)
135 {
136 	unsigned long flags;
137 
138 	task_lock(current);
139 	local_irq_save(flags);
140 	write_seqcount_begin(&current->mems_allowed_seq);
141 	current->mems_allowed = nodemask;
142 	write_seqcount_end(&current->mems_allowed_seq);
143 	local_irq_restore(flags);
144 	task_unlock(current);
145 }
146 
147 #else /* !CONFIG_CPUSETS */
148 
149 static inline bool cpusets_enabled(void) { return false; }
150 
151 static inline int cpuset_init(void) { return 0; }
152 static inline void cpuset_init_smp(void) {}
153 
154 static inline void cpuset_update_active_cpus(bool cpu_online)
155 {
156 	partition_sched_domains(1, NULL, NULL);
157 }
158 
159 static inline void cpuset_cpus_allowed(struct task_struct *p,
160 				       struct cpumask *mask)
161 {
162 	cpumask_copy(mask, cpu_possible_mask);
163 }
164 
165 static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
166 {
167 }
168 
169 static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
170 {
171 	return node_possible_map;
172 }
173 
174 #define cpuset_current_mems_allowed (node_states[N_MEMORY])
175 static inline void cpuset_init_current_mems_allowed(void) {}
176 
177 static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
178 {
179 	return 1;
180 }
181 
182 static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
183 {
184 	return 1;
185 }
186 
187 static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
188 {
189 	return 1;
190 }
191 
192 static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
193 {
194 	return 1;
195 }
196 
197 static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
198 {
199 	return 1;
200 }
201 
202 static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
203 						 const struct task_struct *tsk2)
204 {
205 	return 1;
206 }
207 
208 static inline void cpuset_memory_pressure_bump(void) {}
209 
210 static inline void cpuset_task_status_allowed(struct seq_file *m,
211 						struct task_struct *task)
212 {
213 }
214 
215 static inline int cpuset_mem_spread_node(void)
216 {
217 	return 0;
218 }
219 
220 static inline int cpuset_slab_spread_node(void)
221 {
222 	return 0;
223 }
224 
225 static inline int cpuset_do_page_mem_spread(void)
226 {
227 	return 0;
228 }
229 
230 static inline int cpuset_do_slab_mem_spread(void)
231 {
232 	return 0;
233 }
234 
235 static inline int current_cpuset_is_being_rebound(void)
236 {
237 	return 0;
238 }
239 
240 static inline void rebuild_sched_domains(void)
241 {
242 	partition_sched_domains(1, NULL, NULL);
243 }
244 
245 static inline void cpuset_print_task_mems_allowed(struct task_struct *p)
246 {
247 }
248 
249 static inline void set_mems_allowed(nodemask_t nodemask)
250 {
251 }
252 
253 static inline unsigned int read_mems_allowed_begin(void)
254 {
255 	return 0;
256 }
257 
258 static inline bool read_mems_allowed_retry(unsigned int seq)
259 {
260 	return false;
261 }
262 
263 #endif /* !CONFIG_CPUSETS */
264 
265 #endif /* _LINUX_CPUSET_H */
266