xref: /linux-6.15/include/linux/cpuset.h (revision dca6b414)
1 #ifndef _LINUX_CPUSET_H
2 #define _LINUX_CPUSET_H
3 /*
4  *  cpuset interface
5  *
6  *  Copyright (C) 2003 BULL SA
7  *  Copyright (C) 2004-2006 Silicon Graphics, Inc.
8  *
9  */
10 
11 #include <linux/sched.h>
12 #include <linux/cpumask.h>
13 #include <linux/nodemask.h>
14 #include <linux/mm.h>
15 #include <linux/jump_label.h>
16 
17 #ifdef CONFIG_CPUSETS
18 
19 extern struct static_key_false cpusets_enabled_key;
20 static inline bool cpusets_enabled(void)
21 {
22 	return static_branch_unlikely(&cpusets_enabled_key);
23 }
24 
25 static inline int nr_cpusets(void)
26 {
27 	/* jump label reference count + the top-level cpuset */
28 	return static_key_count(&cpusets_enabled_key.key) + 1;
29 }
30 
31 static inline void cpuset_inc(void)
32 {
33 	static_branch_inc(&cpusets_enabled_key);
34 }
35 
36 static inline void cpuset_dec(void)
37 {
38 	static_branch_dec(&cpusets_enabled_key);
39 }
40 
41 extern int cpuset_init(void);
42 extern void cpuset_init_smp(void);
43 extern void cpuset_update_active_cpus(bool cpu_online);
44 extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
45 extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
46 extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
47 #define cpuset_current_mems_allowed (current->mems_allowed)
48 void cpuset_init_current_mems_allowed(void);
49 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
50 
51 extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask);
52 
53 static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
54 {
55 	if (cpusets_enabled())
56 		return __cpuset_node_allowed(node, gfp_mask);
57 	return true;
58 }
59 
60 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
61 {
62 	return __cpuset_node_allowed(zone_to_nid(z), gfp_mask);
63 }
64 
65 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
66 {
67 	if (cpusets_enabled())
68 		return __cpuset_zone_allowed(z, gfp_mask);
69 	return true;
70 }
71 
72 extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
73 					  const struct task_struct *tsk2);
74 
75 #define cpuset_memory_pressure_bump() 				\
76 	do {							\
77 		if (cpuset_memory_pressure_enabled)		\
78 			__cpuset_memory_pressure_bump();	\
79 	} while (0)
80 extern int cpuset_memory_pressure_enabled;
81 extern void __cpuset_memory_pressure_bump(void);
82 
83 extern void cpuset_task_status_allowed(struct seq_file *m,
84 					struct task_struct *task);
85 extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
86 			    struct pid *pid, struct task_struct *tsk);
87 
88 extern int cpuset_mem_spread_node(void);
89 extern int cpuset_slab_spread_node(void);
90 
91 static inline int cpuset_do_page_mem_spread(void)
92 {
93 	return task_spread_page(current);
94 }
95 
96 static inline int cpuset_do_slab_mem_spread(void)
97 {
98 	return task_spread_slab(current);
99 }
100 
101 extern int current_cpuset_is_being_rebound(void);
102 
103 extern void rebuild_sched_domains(void);
104 
105 extern void cpuset_print_current_mems_allowed(void);
106 
107 /*
108  * read_mems_allowed_begin is required when making decisions involving
109  * mems_allowed such as during page allocation. mems_allowed can be updated in
110  * parallel and depending on the new value an operation can fail potentially
111  * causing process failure. A retry loop with read_mems_allowed_begin and
112  * read_mems_allowed_retry prevents these artificial failures.
113  */
114 static inline unsigned int read_mems_allowed_begin(void)
115 {
116 	if (!cpusets_enabled())
117 		return 0;
118 
119 	return read_seqcount_begin(&current->mems_allowed_seq);
120 }
121 
122 /*
123  * If this returns true, the operation that took place after
124  * read_mems_allowed_begin may have failed artificially due to a concurrent
125  * update of mems_allowed. It is up to the caller to retry the operation if
126  * appropriate.
127  */
128 static inline bool read_mems_allowed_retry(unsigned int seq)
129 {
130 	if (!cpusets_enabled())
131 		return false;
132 
133 	return read_seqcount_retry(&current->mems_allowed_seq, seq);
134 }
135 
136 static inline void set_mems_allowed(nodemask_t nodemask)
137 {
138 	unsigned long flags;
139 
140 	task_lock(current);
141 	local_irq_save(flags);
142 	write_seqcount_begin(&current->mems_allowed_seq);
143 	current->mems_allowed = nodemask;
144 	write_seqcount_end(&current->mems_allowed_seq);
145 	local_irq_restore(flags);
146 	task_unlock(current);
147 }
148 
149 #else /* !CONFIG_CPUSETS */
150 
151 static inline bool cpusets_enabled(void) { return false; }
152 
153 static inline int cpuset_init(void) { return 0; }
154 static inline void cpuset_init_smp(void) {}
155 
156 static inline void cpuset_update_active_cpus(bool cpu_online)
157 {
158 	partition_sched_domains(1, NULL, NULL);
159 }
160 
161 static inline void cpuset_cpus_allowed(struct task_struct *p,
162 				       struct cpumask *mask)
163 {
164 	cpumask_copy(mask, cpu_possible_mask);
165 }
166 
167 static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
168 {
169 }
170 
171 static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
172 {
173 	return node_possible_map;
174 }
175 
176 #define cpuset_current_mems_allowed (node_states[N_MEMORY])
177 static inline void cpuset_init_current_mems_allowed(void) {}
178 
179 static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
180 {
181 	return 1;
182 }
183 
184 static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
185 {
186 	return true;
187 }
188 
189 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
190 {
191 	return true;
192 }
193 
194 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
195 {
196 	return true;
197 }
198 
199 static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
200 						 const struct task_struct *tsk2)
201 {
202 	return 1;
203 }
204 
205 static inline void cpuset_memory_pressure_bump(void) {}
206 
207 static inline void cpuset_task_status_allowed(struct seq_file *m,
208 						struct task_struct *task)
209 {
210 }
211 
212 static inline int cpuset_mem_spread_node(void)
213 {
214 	return 0;
215 }
216 
217 static inline int cpuset_slab_spread_node(void)
218 {
219 	return 0;
220 }
221 
222 static inline int cpuset_do_page_mem_spread(void)
223 {
224 	return 0;
225 }
226 
227 static inline int cpuset_do_slab_mem_spread(void)
228 {
229 	return 0;
230 }
231 
232 static inline int current_cpuset_is_being_rebound(void)
233 {
234 	return 0;
235 }
236 
237 static inline void rebuild_sched_domains(void)
238 {
239 	partition_sched_domains(1, NULL, NULL);
240 }
241 
242 static inline void cpuset_print_current_mems_allowed(void)
243 {
244 }
245 
246 static inline void set_mems_allowed(nodemask_t nodemask)
247 {
248 }
249 
250 static inline unsigned int read_mems_allowed_begin(void)
251 {
252 	return 0;
253 }
254 
255 static inline bool read_mems_allowed_retry(unsigned int seq)
256 {
257 	return false;
258 }
259 
260 #endif /* !CONFIG_CPUSETS */
261 
262 #endif /* _LINUX_CPUSET_H */
263