1 #ifndef _LINUX_CPUSET_H 2 #define _LINUX_CPUSET_H 3 /* 4 * cpuset interface 5 * 6 * Copyright (C) 2003 BULL SA 7 * Copyright (C) 2004-2006 Silicon Graphics, Inc. 8 * 9 */ 10 11 #include <linux/sched.h> 12 #include <linux/cpumask.h> 13 #include <linux/nodemask.h> 14 #include <linux/cgroup.h> 15 16 #ifdef CONFIG_CPUSETS 17 18 extern int number_of_cpusets; /* How many cpusets are defined in system? */ 19 20 extern int cpuset_init_early(void); 21 extern int cpuset_init(void); 22 extern void cpuset_init_smp(void); 23 extern void cpuset_cpus_allowed(struct task_struct *p, cpumask_t *mask); 24 extern void cpuset_cpus_allowed_locked(struct task_struct *p, cpumask_t *mask); 25 extern nodemask_t cpuset_mems_allowed(struct task_struct *p); 26 #define cpuset_current_mems_allowed (current->mems_allowed) 27 void cpuset_init_current_mems_allowed(void); 28 void cpuset_update_task_memory_state(void); 29 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); 30 31 extern int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask); 32 extern int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask); 33 34 static int inline cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) 35 { 36 return number_of_cpusets <= 1 || 37 __cpuset_zone_allowed_softwall(z, gfp_mask); 38 } 39 40 static int inline cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) 41 { 42 return number_of_cpusets <= 1 || 43 __cpuset_zone_allowed_hardwall(z, gfp_mask); 44 } 45 46 extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, 47 const struct task_struct *tsk2); 48 49 #define cpuset_memory_pressure_bump() \ 50 do { \ 51 if (cpuset_memory_pressure_enabled) \ 52 __cpuset_memory_pressure_bump(); \ 53 } while (0) 54 extern int cpuset_memory_pressure_enabled; 55 extern void __cpuset_memory_pressure_bump(void); 56 57 extern const struct file_operations proc_cpuset_operations; 58 struct seq_file; 59 extern void cpuset_task_status_allowed(struct seq_file *m, 60 struct task_struct *task); 61 62 extern void cpuset_lock(void); 63 extern void cpuset_unlock(void); 64 65 extern int cpuset_mem_spread_node(void); 66 67 static inline int cpuset_do_page_mem_spread(void) 68 { 69 return current->flags & PF_SPREAD_PAGE; 70 } 71 72 static inline int cpuset_do_slab_mem_spread(void) 73 { 74 return current->flags & PF_SPREAD_SLAB; 75 } 76 77 extern int current_cpuset_is_being_rebound(void); 78 79 extern void rebuild_sched_domains(void); 80 81 #else /* !CONFIG_CPUSETS */ 82 83 static inline int cpuset_init_early(void) { return 0; } 84 static inline int cpuset_init(void) { return 0; } 85 static inline void cpuset_init_smp(void) {} 86 87 static inline void cpuset_cpus_allowed(struct task_struct *p, cpumask_t *mask) 88 { 89 *mask = cpu_possible_map; 90 } 91 static inline void cpuset_cpus_allowed_locked(struct task_struct *p, 92 cpumask_t *mask) 93 { 94 *mask = cpu_possible_map; 95 } 96 97 static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) 98 { 99 return node_possible_map; 100 } 101 102 #define cpuset_current_mems_allowed (node_states[N_HIGH_MEMORY]) 103 static inline void cpuset_init_current_mems_allowed(void) {} 104 static inline void cpuset_update_task_memory_state(void) {} 105 106 static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) 107 { 108 return 1; 109 } 110 111 static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) 112 { 113 return 1; 114 } 115 116 static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) 117 { 118 return 1; 119 } 120 121 static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, 122 const struct task_struct *tsk2) 123 { 124 return 1; 125 } 126 127 static inline void cpuset_memory_pressure_bump(void) {} 128 129 static inline void cpuset_task_status_allowed(struct seq_file *m, 130 struct task_struct *task) 131 { 132 } 133 134 static inline void cpuset_lock(void) {} 135 static inline void cpuset_unlock(void) {} 136 137 static inline int cpuset_mem_spread_node(void) 138 { 139 return 0; 140 } 141 142 static inline int cpuset_do_page_mem_spread(void) 143 { 144 return 0; 145 } 146 147 static inline int cpuset_do_slab_mem_spread(void) 148 { 149 return 0; 150 } 151 152 static inline int current_cpuset_is_being_rebound(void) 153 { 154 return 0; 155 } 156 157 static inline void rebuild_sched_domains(void) 158 { 159 partition_sched_domains(1, NULL, NULL); 160 } 161 162 #endif /* !CONFIG_CPUSETS */ 163 164 #endif /* _LINUX_CPUSET_H */ 165