1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21da177e4SLinus Torvalds #ifndef _LINUX_CPUSET_H
31da177e4SLinus Torvalds #define _LINUX_CPUSET_H
41da177e4SLinus Torvalds /*
51da177e4SLinus Torvalds * cpuset interface
61da177e4SLinus Torvalds *
71da177e4SLinus Torvalds * Copyright (C) 2003 BULL SA
8825a46afSPaul Jackson * Copyright (C) 2004-2006 Silicon Graphics, Inc.
91da177e4SLinus Torvalds *
101da177e4SLinus Torvalds */
111da177e4SLinus Torvalds
121da177e4SLinus Torvalds #include <linux/sched.h>
13105ab3d8SIngo Molnar #include <linux/sched/topology.h>
14f719ff9bSIngo Molnar #include <linux/sched/task.h>
151da177e4SLinus Torvalds #include <linux/cpumask.h>
161da177e4SLinus Torvalds #include <linux/nodemask.h>
17a1bc5a4eSDavid Rientjes #include <linux/mm.h>
18d4b96fb9SWill Deacon #include <linux/mmu_context.h>
19664eeddeSMel Gorman #include <linux/jump_label.h>
201da177e4SLinus Torvalds
211da177e4SLinus Torvalds #ifdef CONFIG_CPUSETS
221da177e4SLinus Torvalds
2389affbf5SDima Zavin /*
2489affbf5SDima Zavin * Static branch rewrites can happen in an arbitrary order for a given
2589affbf5SDima Zavin * key. In code paths where we need to loop with read_mems_allowed_begin() and
2689affbf5SDima Zavin * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need
2789affbf5SDima Zavin * to ensure that begin() always gets rewritten before retry() in the
2889affbf5SDima Zavin * disabled -> enabled transition. If not, then if local irqs are disabled
2989affbf5SDima Zavin * around the loop, we can deadlock since retry() would always be
3089affbf5SDima Zavin * comparing the latest value of the mems_allowed seqcount against 0 as
3189affbf5SDima Zavin * begin() still would see cpusets_enabled() as false. The enabled -> disabled
3289affbf5SDima Zavin * transition should happen in reverse order for the same reasons (want to stop
3389affbf5SDima Zavin * looking at real value of mems_allowed.sequence in retry() first).
3489affbf5SDima Zavin */
3589affbf5SDima Zavin extern struct static_key_false cpusets_pre_enable_key;
36002f2906SVlastimil Babka extern struct static_key_false cpusets_enabled_key;
378ca1b5a4SFeng Tang extern struct static_key_false cpusets_insane_config_key;
388ca1b5a4SFeng Tang
cpusets_enabled(void)39664eeddeSMel Gorman static inline bool cpusets_enabled(void)
40664eeddeSMel Gorman {
41002f2906SVlastimil Babka return static_branch_unlikely(&cpusets_enabled_key);
42664eeddeSMel Gorman }
43664eeddeSMel Gorman
cpuset_inc(void)44664eeddeSMel Gorman static inline void cpuset_inc(void)
45664eeddeSMel Gorman {
46d74b27d6SJuri Lelli static_branch_inc_cpuslocked(&cpusets_pre_enable_key);
47d74b27d6SJuri Lelli static_branch_inc_cpuslocked(&cpusets_enabled_key);
48664eeddeSMel Gorman }
49664eeddeSMel Gorman
cpuset_dec(void)50664eeddeSMel Gorman static inline void cpuset_dec(void)
51664eeddeSMel Gorman {
52d74b27d6SJuri Lelli static_branch_dec_cpuslocked(&cpusets_enabled_key);
53d74b27d6SJuri Lelli static_branch_dec_cpuslocked(&cpusets_pre_enable_key);
54664eeddeSMel Gorman }
55202f72d5SPaul Jackson
568ca1b5a4SFeng Tang /*
578ca1b5a4SFeng Tang * This will get enabled whenever a cpuset configuration is considered
588ca1b5a4SFeng Tang * unsupportable in general. E.g. movable only node which cannot satisfy
598ca1b5a4SFeng Tang * any non movable allocations (see update_nodemask). Page allocator
608ca1b5a4SFeng Tang * needs to make additional checks for those configurations and this
618ca1b5a4SFeng Tang * check is meant to guard those checks without any overhead for sane
628ca1b5a4SFeng Tang * configurations.
638ca1b5a4SFeng Tang */
cpusets_insane_config(void)648ca1b5a4SFeng Tang static inline bool cpusets_insane_config(void)
658ca1b5a4SFeng Tang {
668ca1b5a4SFeng Tang return static_branch_unlikely(&cpusets_insane_config_key);
678ca1b5a4SFeng Tang }
688ca1b5a4SFeng Tang
691da177e4SLinus Torvalds extern int cpuset_init(void);
701da177e4SLinus Torvalds extern void cpuset_init_smp(void);
7150e76632SPeter Zijlstra extern void cpuset_force_rebuild(void);
7230e03acdSRakib Mullick extern void cpuset_update_active_cpus(void);
736c24849fSJuri Lelli extern void inc_dl_tasks_cs(struct task_struct *task);
746c24849fSJuri Lelli extern void dec_dl_tasks_cs(struct task_struct *task);
75111cd11bSJuri Lelli extern void cpuset_lock(void);
76111cd11bSJuri Lelli extern void cpuset_unlock(void);
776af866afSLi Zefan extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
7897c0054dSWill Deacon extern bool cpuset_cpus_allowed_fallback(struct task_struct *p);
793232e7aaSWaiman Long extern bool cpuset_cpu_is_isolated(int cpu);
80909d75a3SPaul Jackson extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
819276b1bcSPaul Jackson #define cpuset_current_mems_allowed (current->mems_allowed)
821da177e4SLinus Torvalds void cpuset_init_current_mems_allowed(void);
8319770b32SMel Gorman int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
84202f72d5SPaul Jackson
858e464522SHaifeng Xu extern bool cpuset_node_allowed(int node, gfp_t gfp_mask);
8602a0e53dSPaul Jackson
__cpuset_zone_allowed(struct zone * z,gfp_t gfp_mask)87002f2906SVlastimil Babka static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
8802a0e53dSPaul Jackson {
898e464522SHaifeng Xu return cpuset_node_allowed(zone_to_nid(z), gfp_mask);
90002f2906SVlastimil Babka }
91002f2906SVlastimil Babka
cpuset_zone_allowed(struct zone * z,gfp_t gfp_mask)92002f2906SVlastimil Babka static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
93002f2906SVlastimil Babka {
94002f2906SVlastimil Babka if (cpusets_enabled())
95002f2906SVlastimil Babka return __cpuset_zone_allowed(z, gfp_mask);
96002f2906SVlastimil Babka return true;
97202f72d5SPaul Jackson }
98202f72d5SPaul Jackson
99bbe373f2SDavid Rientjes extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
100bbe373f2SDavid Rientjes const struct task_struct *tsk2);
1013e0d98b9SPaul Jackson
1021abab1baSChen Ridong #ifdef CONFIG_CPUSETS_V1
1033e0d98b9SPaul Jackson #define cpuset_memory_pressure_bump() \
1043e0d98b9SPaul Jackson do { \
1053e0d98b9SPaul Jackson if (cpuset_memory_pressure_enabled) \
1063e0d98b9SPaul Jackson __cpuset_memory_pressure_bump(); \
1073e0d98b9SPaul Jackson } while (0)
1083e0d98b9SPaul Jackson extern int cpuset_memory_pressure_enabled;
1093e0d98b9SPaul Jackson extern void __cpuset_memory_pressure_bump(void);
1101abab1baSChen Ridong #else
cpuset_memory_pressure_bump(void)1111abab1baSChen Ridong static inline void cpuset_memory_pressure_bump(void) { }
1121abab1baSChen Ridong #endif
1133e0d98b9SPaul Jackson
114df5f8314SEric W. Biederman extern void cpuset_task_status_allowed(struct seq_file *m,
115df5f8314SEric W. Biederman struct task_struct *task);
11652de4779SZefan Li extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
11752de4779SZefan Li struct pid *pid, struct task_struct *tsk);
1181da177e4SLinus Torvalds
119825a46afSPaul Jackson extern int cpuset_mem_spread_node(void);
120825a46afSPaul Jackson
cpuset_do_page_mem_spread(void)121825a46afSPaul Jackson static inline int cpuset_do_page_mem_spread(void)
122825a46afSPaul Jackson {
1232ad654bcSZefan Li return task_spread_page(current);
124825a46afSPaul Jackson }
125825a46afSPaul Jackson
12677ef80c6SYaowei Bai extern bool current_cpuset_is_being_rebound(void);
1278793d854SPaul Menage
128*34929a07SJuri Lelli extern void dl_rebuild_rd_accounting(void);
129e761b772SMax Krasnyansky extern void rebuild_sched_domains(void);
130e761b772SMax Krasnyansky
131da39da3aSDavid Rientjes extern void cpuset_print_current_mems_allowed(void);
1322ff899e3SJuri Lelli extern void cpuset_reset_sched_domains(void);
13375aa1994SDavid Rientjes
134c0ff7453SMiao Xie /*
135d26914d1SMel Gorman * read_mems_allowed_begin is required when making decisions involving
136d26914d1SMel Gorman * mems_allowed such as during page allocation. mems_allowed can be updated in
137d26914d1SMel Gorman * parallel and depending on the new value an operation can fail potentially
138d26914d1SMel Gorman * causing process failure. A retry loop with read_mems_allowed_begin and
139d26914d1SMel Gorman * read_mems_allowed_retry prevents these artificial failures.
140c0ff7453SMiao Xie */
read_mems_allowed_begin(void)141d26914d1SMel Gorman static inline unsigned int read_mems_allowed_begin(void)
142c0ff7453SMiao Xie {
14389affbf5SDima Zavin if (!static_branch_unlikely(&cpusets_pre_enable_key))
14446e700abSMel Gorman return 0;
14546e700abSMel Gorman
146cc9a6c87SMel Gorman return read_seqcount_begin(¤t->mems_allowed_seq);
147c0ff7453SMiao Xie }
148c0ff7453SMiao Xie
149c0ff7453SMiao Xie /*
150d26914d1SMel Gorman * If this returns true, the operation that took place after
151d26914d1SMel Gorman * read_mems_allowed_begin may have failed artificially due to a concurrent
152d26914d1SMel Gorman * update of mems_allowed. It is up to the caller to retry the operation if
153cc9a6c87SMel Gorman * appropriate.
154c0ff7453SMiao Xie */
read_mems_allowed_retry(unsigned int seq)155d26914d1SMel Gorman static inline bool read_mems_allowed_retry(unsigned int seq)
156cc9a6c87SMel Gorman {
15789affbf5SDima Zavin if (!static_branch_unlikely(&cpusets_enabled_key))
15846e700abSMel Gorman return false;
15946e700abSMel Gorman
160d26914d1SMel Gorman return read_seqcount_retry(¤t->mems_allowed_seq, seq);
161c0ff7453SMiao Xie }
162c0ff7453SMiao Xie
set_mems_allowed(nodemask_t nodemask)16358568d2aSMiao Xie static inline void set_mems_allowed(nodemask_t nodemask)
16458568d2aSMiao Xie {
165db751fe3SJohn Stultz unsigned long flags;
166db751fe3SJohn Stultz
167c0ff7453SMiao Xie task_lock(current);
168db751fe3SJohn Stultz local_irq_save(flags);
169cc9a6c87SMel Gorman write_seqcount_begin(¤t->mems_allowed_seq);
17058568d2aSMiao Xie current->mems_allowed = nodemask;
171cc9a6c87SMel Gorman write_seqcount_end(¤t->mems_allowed_seq);
172db751fe3SJohn Stultz local_irq_restore(flags);
173c0ff7453SMiao Xie task_unlock(current);
17458568d2aSMiao Xie }
17558568d2aSMiao Xie
1761da177e4SLinus Torvalds #else /* !CONFIG_CPUSETS */
1771da177e4SLinus Torvalds
cpusets_enabled(void)178664eeddeSMel Gorman static inline bool cpusets_enabled(void) { return false; }
179664eeddeSMel Gorman
cpusets_insane_config(void)1808ca1b5a4SFeng Tang static inline bool cpusets_insane_config(void) { return false; }
1818ca1b5a4SFeng Tang
cpuset_init(void)1821da177e4SLinus Torvalds static inline int cpuset_init(void) { return 0; }
cpuset_init_smp(void)1831da177e4SLinus Torvalds static inline void cpuset_init_smp(void) {}
1841da177e4SLinus Torvalds
cpuset_force_rebuild(void)18550e76632SPeter Zijlstra static inline void cpuset_force_rebuild(void) { }
18650e76632SPeter Zijlstra
cpuset_update_active_cpus(void)18730e03acdSRakib Mullick static inline void cpuset_update_active_cpus(void)
1883a101d05STejun Heo {
1893a101d05STejun Heo partition_sched_domains(1, NULL, NULL);
1903a101d05STejun Heo }
1913a101d05STejun Heo
inc_dl_tasks_cs(struct task_struct * task)1926c24849fSJuri Lelli static inline void inc_dl_tasks_cs(struct task_struct *task) { }
dec_dl_tasks_cs(struct task_struct * task)1936c24849fSJuri Lelli static inline void dec_dl_tasks_cs(struct task_struct *task) { }
cpuset_lock(void)194111cd11bSJuri Lelli static inline void cpuset_lock(void) { }
cpuset_unlock(void)195111cd11bSJuri Lelli static inline void cpuset_unlock(void) { }
196710da3c8SJuri Lelli
cpuset_cpus_allowed(struct task_struct * p,struct cpumask * mask)1976af866afSLi Zefan static inline void cpuset_cpus_allowed(struct task_struct *p,
1986af866afSLi Zefan struct cpumask *mask)
1991da177e4SLinus Torvalds {
200431c69faSWill Deacon cpumask_copy(mask, task_cpu_possible_mask(p));
2011da177e4SLinus Torvalds }
2021da177e4SLinus Torvalds
cpuset_cpus_allowed_fallback(struct task_struct * p)20397c0054dSWill Deacon static inline bool cpuset_cpus_allowed_fallback(struct task_struct *p)
2049084bb82SOleg Nesterov {
20597c0054dSWill Deacon return false;
2069084bb82SOleg Nesterov }
2079084bb82SOleg Nesterov
cpuset_cpu_is_isolated(int cpu)2083232e7aaSWaiman Long static inline bool cpuset_cpu_is_isolated(int cpu)
2093232e7aaSWaiman Long {
2103232e7aaSWaiman Long return false;
2113232e7aaSWaiman Long }
2123232e7aaSWaiman Long
cpuset_mems_allowed(struct task_struct * p)213909d75a3SPaul Jackson static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
214909d75a3SPaul Jackson {
215909d75a3SPaul Jackson return node_possible_map;
216909d75a3SPaul Jackson }
217909d75a3SPaul Jackson
21838d7bee9SLai Jiangshan #define cpuset_current_mems_allowed (node_states[N_MEMORY])
cpuset_init_current_mems_allowed(void)2191da177e4SLinus Torvalds static inline void cpuset_init_current_mems_allowed(void) {}
2201da177e4SLinus Torvalds
cpuset_nodemask_valid_mems_allowed(nodemask_t * nodemask)22119770b32SMel Gorman static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
2221da177e4SLinus Torvalds {
2231da177e4SLinus Torvalds return 1;
2241da177e4SLinus Torvalds }
2251da177e4SLinus Torvalds
__cpuset_zone_allowed(struct zone * z,gfp_t gfp_mask)226002f2906SVlastimil Babka static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
2271da177e4SLinus Torvalds {
228002f2906SVlastimil Babka return true;
229002f2906SVlastimil Babka }
230002f2906SVlastimil Babka
cpuset_zone_allowed(struct zone * z,gfp_t gfp_mask)231002f2906SVlastimil Babka static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
232002f2906SVlastimil Babka {
233002f2906SVlastimil Babka return true;
2341da177e4SLinus Torvalds }
2351da177e4SLinus Torvalds
cpuset_mems_allowed_intersects(const struct task_struct * tsk1,const struct task_struct * tsk2)236bbe373f2SDavid Rientjes static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
237bbe373f2SDavid Rientjes const struct task_struct *tsk2)
238ef08e3b4SPaul Jackson {
239ef08e3b4SPaul Jackson return 1;
240ef08e3b4SPaul Jackson }
241ef08e3b4SPaul Jackson
cpuset_memory_pressure_bump(void)2423e0d98b9SPaul Jackson static inline void cpuset_memory_pressure_bump(void) {}
2433e0d98b9SPaul Jackson
cpuset_task_status_allowed(struct seq_file * m,struct task_struct * task)244df5f8314SEric W. Biederman static inline void cpuset_task_status_allowed(struct seq_file *m,
245df5f8314SEric W. Biederman struct task_struct *task)
2461da177e4SLinus Torvalds {
2471da177e4SLinus Torvalds }
2481da177e4SLinus Torvalds
cpuset_mem_spread_node(void)249825a46afSPaul Jackson static inline int cpuset_mem_spread_node(void)
250825a46afSPaul Jackson {
251825a46afSPaul Jackson return 0;
252825a46afSPaul Jackson }
253825a46afSPaul Jackson
cpuset_do_page_mem_spread(void)254825a46afSPaul Jackson static inline int cpuset_do_page_mem_spread(void)
255825a46afSPaul Jackson {
256825a46afSPaul Jackson return 0;
257825a46afSPaul Jackson }
258825a46afSPaul Jackson
current_cpuset_is_being_rebound(void)25977ef80c6SYaowei Bai static inline bool current_cpuset_is_being_rebound(void)
2608793d854SPaul Menage {
26177ef80c6SYaowei Bai return false;
2628793d854SPaul Menage }
2638793d854SPaul Menage
dl_rebuild_rd_accounting(void)264*34929a07SJuri Lelli static inline void dl_rebuild_rd_accounting(void)
265*34929a07SJuri Lelli {
266*34929a07SJuri Lelli }
267*34929a07SJuri Lelli
rebuild_sched_domains(void)268e761b772SMax Krasnyansky static inline void rebuild_sched_domains(void)
269e761b772SMax Krasnyansky {
270dfb512ecSMax Krasnyansky partition_sched_domains(1, NULL, NULL);
271e761b772SMax Krasnyansky }
272e761b772SMax Krasnyansky
cpuset_reset_sched_domains(void)2732ff899e3SJuri Lelli static inline void cpuset_reset_sched_domains(void)
2742ff899e3SJuri Lelli {
2752ff899e3SJuri Lelli partition_sched_domains(1, NULL, NULL);
2762ff899e3SJuri Lelli }
2772ff899e3SJuri Lelli
cpuset_print_current_mems_allowed(void)278da39da3aSDavid Rientjes static inline void cpuset_print_current_mems_allowed(void)
27975aa1994SDavid Rientjes {
28075aa1994SDavid Rientjes }
28175aa1994SDavid Rientjes
set_mems_allowed(nodemask_t nodemask)28258568d2aSMiao Xie static inline void set_mems_allowed(nodemask_t nodemask)
28358568d2aSMiao Xie {
28458568d2aSMiao Xie }
28558568d2aSMiao Xie
read_mems_allowed_begin(void)286d26914d1SMel Gorman static inline unsigned int read_mems_allowed_begin(void)
287c0ff7453SMiao Xie {
288cc9a6c87SMel Gorman return 0;
289c0ff7453SMiao Xie }
290c0ff7453SMiao Xie
read_mems_allowed_retry(unsigned int seq)291d26914d1SMel Gorman static inline bool read_mems_allowed_retry(unsigned int seq)
292c0ff7453SMiao Xie {
293d26914d1SMel Gorman return false;
294c0ff7453SMiao Xie }
295c0ff7453SMiao Xie
2961da177e4SLinus Torvalds #endif /* !CONFIG_CPUSETS */
2971da177e4SLinus Torvalds
2981da177e4SLinus Torvalds #endif /* _LINUX_CPUSET_H */
299