xref: /linux-6.15/include/linux/cpuset.h (revision fe2a1bb1)
1 #ifndef _LINUX_CPUSET_H
2 #define _LINUX_CPUSET_H
3 /*
4  *  cpuset interface
5  *
6  *  Copyright (C) 2003 BULL SA
7  *  Copyright (C) 2004-2006 Silicon Graphics, Inc.
8  *
9  */
10 
11 #include <linux/sched.h>
12 #include <linux/cpumask.h>
13 #include <linux/nodemask.h>
14 #include <linux/mm.h>
15 #include <linux/jump_label.h>
16 
17 #ifdef CONFIG_CPUSETS
18 
19 extern struct static_key cpusets_enabled_key;
20 static inline bool cpusets_enabled(void)
21 {
22 	return static_key_false(&cpusets_enabled_key);
23 }
24 
25 static inline int nr_cpusets(void)
26 {
27 	/* jump label reference count + the top-level cpuset */
28 	return static_key_count(&cpusets_enabled_key) + 1;
29 }
30 
31 static inline void cpuset_inc(void)
32 {
33 	static_key_slow_inc(&cpusets_enabled_key);
34 }
35 
36 static inline void cpuset_dec(void)
37 {
38 	static_key_slow_dec(&cpusets_enabled_key);
39 }
40 
41 extern int cpuset_init(void);
42 extern void cpuset_init_smp(void);
43 extern void cpuset_update_active_cpus(bool cpu_online);
44 extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
45 extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
46 extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
47 #define cpuset_current_mems_allowed (current->mems_allowed)
48 void cpuset_init_current_mems_allowed(void);
49 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
50 
51 extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask);
52 extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask);
53 
54 static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
55 {
56 	return nr_cpusets() <= 1 ||
57 		__cpuset_node_allowed_softwall(node, gfp_mask);
58 }
59 
60 static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
61 {
62 	return nr_cpusets() <= 1 ||
63 		__cpuset_node_allowed_hardwall(node, gfp_mask);
64 }
65 
66 static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
67 {
68 	return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask);
69 }
70 
71 static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
72 {
73 	return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask);
74 }
75 
76 extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
77 					  const struct task_struct *tsk2);
78 
79 #define cpuset_memory_pressure_bump() 				\
80 	do {							\
81 		if (cpuset_memory_pressure_enabled)		\
82 			__cpuset_memory_pressure_bump();	\
83 	} while (0)
84 extern int cpuset_memory_pressure_enabled;
85 extern void __cpuset_memory_pressure_bump(void);
86 
87 extern void cpuset_task_status_allowed(struct seq_file *m,
88 					struct task_struct *task);
89 extern int proc_cpuset_show(struct seq_file *, void *);
90 
91 extern int cpuset_mem_spread_node(void);
92 extern int cpuset_slab_spread_node(void);
93 
94 static inline int cpuset_do_page_mem_spread(void)
95 {
96 	return current->flags & PF_SPREAD_PAGE;
97 }
98 
99 static inline int cpuset_do_slab_mem_spread(void)
100 {
101 	return current->flags & PF_SPREAD_SLAB;
102 }
103 
104 extern int current_cpuset_is_being_rebound(void);
105 
106 extern void rebuild_sched_domains(void);
107 
108 extern void cpuset_print_task_mems_allowed(struct task_struct *p);
109 
110 /*
111  * read_mems_allowed_begin is required when making decisions involving
112  * mems_allowed such as during page allocation. mems_allowed can be updated in
113  * parallel and depending on the new value an operation can fail potentially
114  * causing process failure. A retry loop with read_mems_allowed_begin and
115  * read_mems_allowed_retry prevents these artificial failures.
116  */
117 static inline unsigned int read_mems_allowed_begin(void)
118 {
119 	return read_seqcount_begin(&current->mems_allowed_seq);
120 }
121 
122 /*
123  * If this returns true, the operation that took place after
124  * read_mems_allowed_begin may have failed artificially due to a concurrent
125  * update of mems_allowed. It is up to the caller to retry the operation if
126  * appropriate.
127  */
128 static inline bool read_mems_allowed_retry(unsigned int seq)
129 {
130 	return read_seqcount_retry(&current->mems_allowed_seq, seq);
131 }
132 
133 static inline void set_mems_allowed(nodemask_t nodemask)
134 {
135 	unsigned long flags;
136 
137 	task_lock(current);
138 	local_irq_save(flags);
139 	write_seqcount_begin(&current->mems_allowed_seq);
140 	current->mems_allowed = nodemask;
141 	write_seqcount_end(&current->mems_allowed_seq);
142 	local_irq_restore(flags);
143 	task_unlock(current);
144 }
145 
146 #else /* !CONFIG_CPUSETS */
147 
148 static inline bool cpusets_enabled(void) { return false; }
149 
150 static inline int cpuset_init(void) { return 0; }
151 static inline void cpuset_init_smp(void) {}
152 
153 static inline void cpuset_update_active_cpus(bool cpu_online)
154 {
155 	partition_sched_domains(1, NULL, NULL);
156 }
157 
158 static inline void cpuset_cpus_allowed(struct task_struct *p,
159 				       struct cpumask *mask)
160 {
161 	cpumask_copy(mask, cpu_possible_mask);
162 }
163 
164 static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
165 {
166 }
167 
168 static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
169 {
170 	return node_possible_map;
171 }
172 
173 #define cpuset_current_mems_allowed (node_states[N_MEMORY])
174 static inline void cpuset_init_current_mems_allowed(void) {}
175 
176 static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
177 {
178 	return 1;
179 }
180 
181 static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
182 {
183 	return 1;
184 }
185 
186 static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
187 {
188 	return 1;
189 }
190 
191 static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
192 {
193 	return 1;
194 }
195 
196 static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
197 {
198 	return 1;
199 }
200 
201 static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
202 						 const struct task_struct *tsk2)
203 {
204 	return 1;
205 }
206 
207 static inline void cpuset_memory_pressure_bump(void) {}
208 
209 static inline void cpuset_task_status_allowed(struct seq_file *m,
210 						struct task_struct *task)
211 {
212 }
213 
214 static inline int cpuset_mem_spread_node(void)
215 {
216 	return 0;
217 }
218 
219 static inline int cpuset_slab_spread_node(void)
220 {
221 	return 0;
222 }
223 
224 static inline int cpuset_do_page_mem_spread(void)
225 {
226 	return 0;
227 }
228 
229 static inline int cpuset_do_slab_mem_spread(void)
230 {
231 	return 0;
232 }
233 
234 static inline int current_cpuset_is_being_rebound(void)
235 {
236 	return 0;
237 }
238 
239 static inline void rebuild_sched_domains(void)
240 {
241 	partition_sched_domains(1, NULL, NULL);
242 }
243 
244 static inline void cpuset_print_task_mems_allowed(struct task_struct *p)
245 {
246 }
247 
248 static inline void set_mems_allowed(nodemask_t nodemask)
249 {
250 }
251 
252 static inline unsigned int read_mems_allowed_begin(void)
253 {
254 	return 0;
255 }
256 
257 static inline bool read_mems_allowed_retry(unsigned int seq)
258 {
259 	return false;
260 }
261 
262 #endif /* !CONFIG_CPUSETS */
263 
264 #endif /* _LINUX_CPUSET_H */
265