1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_CPUSET_H 3 #define _LINUX_CPUSET_H 4 /* 5 * cpuset interface 6 * 7 * Copyright (C) 2003 BULL SA 8 * Copyright (C) 2004-2006 Silicon Graphics, Inc. 9 * 10 */ 11 12 #include <linux/sched.h> 13 #include <linux/sched/topology.h> 14 #include <linux/sched/task.h> 15 #include <linux/cpumask.h> 16 #include <linux/nodemask.h> 17 #include <linux/mm.h> 18 #include <linux/mmu_context.h> 19 #include <linux/jump_label.h> 20 21 #ifdef CONFIG_CPUSETS 22 23 /* 24 * Static branch rewrites can happen in an arbitrary order for a given 25 * key. In code paths where we need to loop with read_mems_allowed_begin() and 26 * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need 27 * to ensure that begin() always gets rewritten before retry() in the 28 * disabled -> enabled transition. If not, then if local irqs are disabled 29 * around the loop, we can deadlock since retry() would always be 30 * comparing the latest value of the mems_allowed seqcount against 0 as 31 * begin() still would see cpusets_enabled() as false. The enabled -> disabled 32 * transition should happen in reverse order for the same reasons (want to stop 33 * looking at real value of mems_allowed.sequence in retry() first). 34 */ 35 extern struct static_key_false cpusets_pre_enable_key; 36 extern struct static_key_false cpusets_enabled_key; 37 extern struct static_key_false cpusets_insane_config_key; 38 39 static inline bool cpusets_enabled(void) 40 { 41 return static_branch_unlikely(&cpusets_enabled_key); 42 } 43 44 static inline void cpuset_inc(void) 45 { 46 static_branch_inc_cpuslocked(&cpusets_pre_enable_key); 47 static_branch_inc_cpuslocked(&cpusets_enabled_key); 48 } 49 50 static inline void cpuset_dec(void) 51 { 52 static_branch_dec_cpuslocked(&cpusets_enabled_key); 53 static_branch_dec_cpuslocked(&cpusets_pre_enable_key); 54 } 55 56 /* 57 * This will get enabled whenever a cpuset configuration is considered 58 * unsupportable in general. E.g. movable only node which cannot satisfy 59 * any non movable allocations (see update_nodemask). Page allocator 60 * needs to make additional checks for those configurations and this 61 * check is meant to guard those checks without any overhead for sane 62 * configurations. 63 */ 64 static inline bool cpusets_insane_config(void) 65 { 66 return static_branch_unlikely(&cpusets_insane_config_key); 67 } 68 69 extern int cpuset_init(void); 70 extern void cpuset_init_smp(void); 71 extern void cpuset_force_rebuild(void); 72 extern void cpuset_update_active_cpus(void); 73 extern void cpuset_wait_for_hotplug(void); 74 extern void inc_dl_tasks_cs(struct task_struct *task); 75 extern void dec_dl_tasks_cs(struct task_struct *task); 76 extern void cpuset_lock(void); 77 extern void cpuset_unlock(void); 78 extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); 79 extern bool cpuset_cpus_allowed_fallback(struct task_struct *p); 80 extern bool cpuset_cpu_is_isolated(int cpu); 81 extern nodemask_t cpuset_mems_allowed(struct task_struct *p); 82 #define cpuset_current_mems_allowed (current->mems_allowed) 83 void cpuset_init_current_mems_allowed(void); 84 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); 85 86 extern bool cpuset_node_allowed(int node, gfp_t gfp_mask); 87 88 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) 89 { 90 return cpuset_node_allowed(zone_to_nid(z), gfp_mask); 91 } 92 93 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) 94 { 95 if (cpusets_enabled()) 96 return __cpuset_zone_allowed(z, gfp_mask); 97 return true; 98 } 99 100 extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, 101 const struct task_struct *tsk2); 102 103 #define cpuset_memory_pressure_bump() \ 104 do { \ 105 if (cpuset_memory_pressure_enabled) \ 106 __cpuset_memory_pressure_bump(); \ 107 } while (0) 108 extern int cpuset_memory_pressure_enabled; 109 extern void __cpuset_memory_pressure_bump(void); 110 111 extern void cpuset_task_status_allowed(struct seq_file *m, 112 struct task_struct *task); 113 extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns, 114 struct pid *pid, struct task_struct *tsk); 115 116 extern int cpuset_mem_spread_node(void); 117 extern int cpuset_slab_spread_node(void); 118 119 static inline int cpuset_do_page_mem_spread(void) 120 { 121 return task_spread_page(current); 122 } 123 124 static inline int cpuset_do_slab_mem_spread(void) 125 { 126 return task_spread_slab(current); 127 } 128 129 extern bool current_cpuset_is_being_rebound(void); 130 131 extern void rebuild_sched_domains(void); 132 133 extern void cpuset_print_current_mems_allowed(void); 134 135 /* 136 * read_mems_allowed_begin is required when making decisions involving 137 * mems_allowed such as during page allocation. mems_allowed can be updated in 138 * parallel and depending on the new value an operation can fail potentially 139 * causing process failure. A retry loop with read_mems_allowed_begin and 140 * read_mems_allowed_retry prevents these artificial failures. 141 */ 142 static inline unsigned int read_mems_allowed_begin(void) 143 { 144 if (!static_branch_unlikely(&cpusets_pre_enable_key)) 145 return 0; 146 147 return read_seqcount_begin(¤t->mems_allowed_seq); 148 } 149 150 /* 151 * If this returns true, the operation that took place after 152 * read_mems_allowed_begin may have failed artificially due to a concurrent 153 * update of mems_allowed. It is up to the caller to retry the operation if 154 * appropriate. 155 */ 156 static inline bool read_mems_allowed_retry(unsigned int seq) 157 { 158 if (!static_branch_unlikely(&cpusets_enabled_key)) 159 return false; 160 161 return read_seqcount_retry(¤t->mems_allowed_seq, seq); 162 } 163 164 static inline void set_mems_allowed(nodemask_t nodemask) 165 { 166 unsigned long flags; 167 168 task_lock(current); 169 local_irq_save(flags); 170 write_seqcount_begin(¤t->mems_allowed_seq); 171 current->mems_allowed = nodemask; 172 write_seqcount_end(¤t->mems_allowed_seq); 173 local_irq_restore(flags); 174 task_unlock(current); 175 } 176 177 #else /* !CONFIG_CPUSETS */ 178 179 static inline bool cpusets_enabled(void) { return false; } 180 181 static inline bool cpusets_insane_config(void) { return false; } 182 183 static inline int cpuset_init(void) { return 0; } 184 static inline void cpuset_init_smp(void) {} 185 186 static inline void cpuset_force_rebuild(void) { } 187 188 static inline void cpuset_update_active_cpus(void) 189 { 190 partition_sched_domains(1, NULL, NULL); 191 } 192 193 static inline void cpuset_wait_for_hotplug(void) { } 194 195 static inline void inc_dl_tasks_cs(struct task_struct *task) { } 196 static inline void dec_dl_tasks_cs(struct task_struct *task) { } 197 static inline void cpuset_lock(void) { } 198 static inline void cpuset_unlock(void) { } 199 200 static inline void cpuset_cpus_allowed(struct task_struct *p, 201 struct cpumask *mask) 202 { 203 cpumask_copy(mask, task_cpu_possible_mask(p)); 204 } 205 206 static inline bool cpuset_cpus_allowed_fallback(struct task_struct *p) 207 { 208 return false; 209 } 210 211 static inline bool cpuset_cpu_is_isolated(int cpu) 212 { 213 return false; 214 } 215 216 static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) 217 { 218 return node_possible_map; 219 } 220 221 #define cpuset_current_mems_allowed (node_states[N_MEMORY]) 222 static inline void cpuset_init_current_mems_allowed(void) {} 223 224 static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) 225 { 226 return 1; 227 } 228 229 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) 230 { 231 return true; 232 } 233 234 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) 235 { 236 return true; 237 } 238 239 static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, 240 const struct task_struct *tsk2) 241 { 242 return 1; 243 } 244 245 static inline void cpuset_memory_pressure_bump(void) {} 246 247 static inline void cpuset_task_status_allowed(struct seq_file *m, 248 struct task_struct *task) 249 { 250 } 251 252 static inline int cpuset_mem_spread_node(void) 253 { 254 return 0; 255 } 256 257 static inline int cpuset_slab_spread_node(void) 258 { 259 return 0; 260 } 261 262 static inline int cpuset_do_page_mem_spread(void) 263 { 264 return 0; 265 } 266 267 static inline int cpuset_do_slab_mem_spread(void) 268 { 269 return 0; 270 } 271 272 static inline bool current_cpuset_is_being_rebound(void) 273 { 274 return false; 275 } 276 277 static inline void rebuild_sched_domains(void) 278 { 279 partition_sched_domains(1, NULL, NULL); 280 } 281 282 static inline void cpuset_print_current_mems_allowed(void) 283 { 284 } 285 286 static inline void set_mems_allowed(nodemask_t nodemask) 287 { 288 } 289 290 static inline unsigned int read_mems_allowed_begin(void) 291 { 292 return 0; 293 } 294 295 static inline bool read_mems_allowed_retry(unsigned int seq) 296 { 297 return false; 298 } 299 300 #endif /* !CONFIG_CPUSETS */ 301 302 #endif /* _LINUX_CPUSET_H */ 303