1 #ifndef _LINUX_MEMPOLICY_H 2 #define _LINUX_MEMPOLICY_H 1 3 4 #include <linux/errno.h> 5 6 /* 7 * NUMA memory policies for Linux. 8 * Copyright 2003,2004 Andi Kleen SuSE Labs 9 */ 10 11 /* 12 * Both the MPOL_* mempolicy mode and the MPOL_F_* optional mode flags are 13 * passed by the user to either set_mempolicy() or mbind() in an 'int' actual. 14 * The MPOL_MODE_FLAGS macro determines the legal set of optional mode flags. 15 */ 16 17 /* Policies */ 18 enum { 19 MPOL_DEFAULT, 20 MPOL_PREFERRED, 21 MPOL_BIND, 22 MPOL_INTERLEAVE, 23 MPOL_MAX, /* always last member of enum */ 24 }; 25 26 /* Flags for set_mempolicy */ 27 #define MPOL_F_STATIC_NODES (1 << 15) 28 #define MPOL_F_RELATIVE_NODES (1 << 14) 29 30 /* 31 * MPOL_MODE_FLAGS is the union of all possible optional mode flags passed to 32 * either set_mempolicy() or mbind(). 33 */ 34 #define MPOL_MODE_FLAGS (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES) 35 36 /* Flags for get_mempolicy */ 37 #define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */ 38 #define MPOL_F_ADDR (1<<1) /* look up vma using address */ 39 #define MPOL_F_MEMS_ALLOWED (1<<2) /* return allowed memories */ 40 41 /* Flags for mbind */ 42 #define MPOL_MF_STRICT (1<<0) /* Verify existing pages in the mapping */ 43 #define MPOL_MF_MOVE (1<<1) /* Move pages owned by this process to conform to mapping */ 44 #define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */ 45 #define MPOL_MF_INTERNAL (1<<3) /* Internal flags start here */ 46 47 /* 48 * Internal flags that share the struct mempolicy flags word with 49 * "mode flags". These flags are allocated from bit 0 up, as they 50 * are never OR'ed into the mode in mempolicy API arguments. 51 */ 52 #define MPOL_F_SHARED (1 << 0) /* identify shared policies */ 53 #define MPOL_F_LOCAL (1 << 1) /* preferred local allocation */ 54 55 #ifdef __KERNEL__ 56 57 #include <linux/mmzone.h> 58 #include <linux/slab.h> 59 #include <linux/rbtree.h> 60 #include <linux/spinlock.h> 61 #include <linux/nodemask.h> 62 #include <linux/pagemap.h> 63 64 struct mm_struct; 65 66 #ifdef CONFIG_NUMA 67 68 /* 69 * Describe a memory policy. 70 * 71 * A mempolicy can be either associated with a process or with a VMA. 72 * For VMA related allocations the VMA policy is preferred, otherwise 73 * the process policy is used. Interrupts ignore the memory policy 74 * of the current process. 75 * 76 * Locking policy for interlave: 77 * In process context there is no locking because only the process accesses 78 * its own state. All vma manipulation is somewhat protected by a down_read on 79 * mmap_sem. 80 * 81 * Freeing policy: 82 * Mempolicy objects are reference counted. A mempolicy will be freed when 83 * mpol_put() decrements the reference count to zero. 84 * 85 * Duplicating policy objects: 86 * mpol_dup() allocates a new mempolicy and copies the specified mempolicy 87 * to the new storage. The reference count of the new object is initialized 88 * to 1, representing the caller of mpol_dup(). 89 */ 90 struct mempolicy { 91 atomic_t refcnt; 92 unsigned short mode; /* See MPOL_* above */ 93 unsigned short flags; /* See set_mempolicy() MPOL_F_* above */ 94 union { 95 short preferred_node; /* preferred */ 96 nodemask_t nodes; /* interleave/bind */ 97 /* undefined for default */ 98 } v; 99 union { 100 nodemask_t cpuset_mems_allowed; /* relative to these nodes */ 101 nodemask_t user_nodemask; /* nodemask passed by user */ 102 } w; 103 }; 104 105 /* 106 * Support for managing mempolicy data objects (clone, copy, destroy) 107 * The default fast path of a NULL MPOL_DEFAULT policy is always inlined. 108 */ 109 110 extern void __mpol_put(struct mempolicy *pol); 111 static inline void mpol_put(struct mempolicy *pol) 112 { 113 if (pol) 114 __mpol_put(pol); 115 } 116 117 /* 118 * Does mempolicy pol need explicit unref after use? 119 * Currently only needed for shared policies. 120 */ 121 static inline int mpol_needs_cond_ref(struct mempolicy *pol) 122 { 123 return (pol && (pol->flags & MPOL_F_SHARED)); 124 } 125 126 static inline void mpol_cond_put(struct mempolicy *pol) 127 { 128 if (mpol_needs_cond_ref(pol)) 129 __mpol_put(pol); 130 } 131 132 extern struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol, 133 struct mempolicy *frompol); 134 static inline struct mempolicy *mpol_cond_copy(struct mempolicy *tompol, 135 struct mempolicy *frompol) 136 { 137 if (!frompol) 138 return frompol; 139 return __mpol_cond_copy(tompol, frompol); 140 } 141 142 extern struct mempolicy *__mpol_dup(struct mempolicy *pol); 143 static inline struct mempolicy *mpol_dup(struct mempolicy *pol) 144 { 145 if (pol) 146 pol = __mpol_dup(pol); 147 return pol; 148 } 149 150 #define vma_policy(vma) ((vma)->vm_policy) 151 #define vma_set_policy(vma, pol) ((vma)->vm_policy = (pol)) 152 153 static inline void mpol_get(struct mempolicy *pol) 154 { 155 if (pol) 156 atomic_inc(&pol->refcnt); 157 } 158 159 extern int __mpol_equal(struct mempolicy *a, struct mempolicy *b); 160 static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b) 161 { 162 if (a == b) 163 return 1; 164 return __mpol_equal(a, b); 165 } 166 167 /* 168 * Tree of shared policies for a shared memory region. 169 * Maintain the policies in a pseudo mm that contains vmas. The vmas 170 * carry the policy. As a special twist the pseudo mm is indexed in pages, not 171 * bytes, so that we can work with shared memory segments bigger than 172 * unsigned long. 173 */ 174 175 struct sp_node { 176 struct rb_node nd; 177 unsigned long start, end; 178 struct mempolicy *policy; 179 }; 180 181 struct shared_policy { 182 struct rb_root root; 183 spinlock_t lock; 184 }; 185 186 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol); 187 int mpol_set_shared_policy(struct shared_policy *info, 188 struct vm_area_struct *vma, 189 struct mempolicy *new); 190 void mpol_free_shared_policy(struct shared_policy *p); 191 struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, 192 unsigned long idx); 193 194 extern void numa_default_policy(void); 195 extern void numa_policy_init(void); 196 extern void mpol_rebind_task(struct task_struct *tsk, 197 const nodemask_t *new); 198 extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new); 199 extern void mpol_fix_fork_child_flag(struct task_struct *p); 200 201 extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, 202 unsigned long addr, gfp_t gfp_flags, 203 struct mempolicy **mpol, nodemask_t **nodemask); 204 extern unsigned slab_node(struct mempolicy *policy); 205 206 extern enum zone_type policy_zone; 207 208 static inline void check_highest_zone(enum zone_type k) 209 { 210 if (k > policy_zone && k != ZONE_MOVABLE) 211 policy_zone = k; 212 } 213 214 int do_migrate_pages(struct mm_struct *mm, 215 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags); 216 217 218 #ifdef CONFIG_TMPFS 219 extern int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context); 220 221 extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, 222 int no_context); 223 #endif 224 225 /* Check if a vma is migratable */ 226 static inline int vma_migratable(struct vm_area_struct *vma) 227 { 228 if (vma->vm_flags & (VM_IO|VM_HUGETLB|VM_PFNMAP|VM_RESERVED)) 229 return 0; 230 /* 231 * Migration allocates pages in the highest zone. If we cannot 232 * do so then migration (at least from node to node) is not 233 * possible. 234 */ 235 if (vma->vm_file && 236 gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) 237 < policy_zone) 238 return 0; 239 return 1; 240 } 241 242 #else 243 244 struct mempolicy {}; 245 246 static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b) 247 { 248 return 1; 249 } 250 251 static inline void mpol_put(struct mempolicy *p) 252 { 253 } 254 255 static inline void mpol_cond_put(struct mempolicy *pol) 256 { 257 } 258 259 static inline struct mempolicy *mpol_cond_copy(struct mempolicy *to, 260 struct mempolicy *from) 261 { 262 return from; 263 } 264 265 static inline void mpol_get(struct mempolicy *pol) 266 { 267 } 268 269 static inline struct mempolicy *mpol_dup(struct mempolicy *old) 270 { 271 return NULL; 272 } 273 274 struct shared_policy {}; 275 276 static inline int mpol_set_shared_policy(struct shared_policy *info, 277 struct vm_area_struct *vma, 278 struct mempolicy *new) 279 { 280 return -EINVAL; 281 } 282 283 static inline void mpol_shared_policy_init(struct shared_policy *sp, 284 struct mempolicy *mpol) 285 { 286 } 287 288 static inline void mpol_free_shared_policy(struct shared_policy *p) 289 { 290 } 291 292 static inline struct mempolicy * 293 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) 294 { 295 return NULL; 296 } 297 298 #define vma_policy(vma) NULL 299 #define vma_set_policy(vma, pol) do {} while(0) 300 301 static inline void numa_policy_init(void) 302 { 303 } 304 305 static inline void numa_default_policy(void) 306 { 307 } 308 309 static inline void mpol_rebind_task(struct task_struct *tsk, 310 const nodemask_t *new) 311 { 312 } 313 314 static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) 315 { 316 } 317 318 static inline void mpol_fix_fork_child_flag(struct task_struct *p) 319 { 320 } 321 322 static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma, 323 unsigned long addr, gfp_t gfp_flags, 324 struct mempolicy **mpol, nodemask_t **nodemask) 325 { 326 *mpol = NULL; 327 *nodemask = NULL; 328 return node_zonelist(0, gfp_flags); 329 } 330 331 static inline int do_migrate_pages(struct mm_struct *mm, 332 const nodemask_t *from_nodes, 333 const nodemask_t *to_nodes, int flags) 334 { 335 return 0; 336 } 337 338 static inline void check_highest_zone(int k) 339 { 340 } 341 342 #ifdef CONFIG_TMPFS 343 static inline int mpol_parse_str(char *str, struct mempolicy **mpol, 344 int no_context) 345 { 346 return 1; /* error */ 347 } 348 349 static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, 350 int no_context) 351 { 352 return 0; 353 } 354 #endif 355 356 #endif /* CONFIG_NUMA */ 357 #endif /* __KERNEL__ */ 358 359 #endif 360