xref: /linux-6.15/include/linux/mempolicy.h (revision 71ccc212)
1 #ifndef _LINUX_MEMPOLICY_H
2 #define _LINUX_MEMPOLICY_H 1
3 
4 #include <linux/errno.h>
5 
6 /*
7  * NUMA memory policies for Linux.
8  * Copyright 2003,2004 Andi Kleen SuSE Labs
9  */
10 
11 /*
12  * Both the MPOL_* mempolicy mode and the MPOL_F_* optional mode flags are
13  * passed by the user to either set_mempolicy() or mbind() in an 'int' actual.
14  * The MPOL_MODE_FLAGS macro determines the legal set of optional mode flags.
15  */
16 
17 /* Policies */
18 enum {
19 	MPOL_DEFAULT,
20 	MPOL_PREFERRED,
21 	MPOL_BIND,
22 	MPOL_INTERLEAVE,
23 	MPOL_MAX,	/* always last member of enum */
24 };
25 
26 enum mpol_rebind_step {
27 	MPOL_REBIND_ONCE,	/* do rebind work at once(not by two step) */
28 	MPOL_REBIND_STEP1,	/* first step(set all the newly nodes) */
29 	MPOL_REBIND_STEP2,	/* second step(clean all the disallowed nodes)*/
30 	MPOL_REBIND_NSTEP,
31 };
32 
33 /* Flags for set_mempolicy */
34 #define MPOL_F_STATIC_NODES	(1 << 15)
35 #define MPOL_F_RELATIVE_NODES	(1 << 14)
36 
37 /*
38  * MPOL_MODE_FLAGS is the union of all possible optional mode flags passed to
39  * either set_mempolicy() or mbind().
40  */
41 #define MPOL_MODE_FLAGS	(MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES)
42 
43 /* Flags for get_mempolicy */
44 #define MPOL_F_NODE	(1<<0)	/* return next IL mode instead of node mask */
45 #define MPOL_F_ADDR	(1<<1)	/* look up vma using address */
46 #define MPOL_F_MEMS_ALLOWED (1<<2) /* return allowed memories */
47 
48 /* Flags for mbind */
49 #define MPOL_MF_STRICT	(1<<0)	/* Verify existing pages in the mapping */
50 #define MPOL_MF_MOVE	(1<<1)	/* Move pages owned by this process to conform to mapping */
51 #define MPOL_MF_MOVE_ALL (1<<2)	/* Move every page to conform to mapping */
52 #define MPOL_MF_INTERNAL (1<<3)	/* Internal flags start here */
53 
54 /*
55  * Internal flags that share the struct mempolicy flags word with
56  * "mode flags".  These flags are allocated from bit 0 up, as they
57  * are never OR'ed into the mode in mempolicy API arguments.
58  */
59 #define MPOL_F_SHARED  (1 << 0)	/* identify shared policies */
60 #define MPOL_F_LOCAL   (1 << 1)	/* preferred local allocation */
61 #define MPOL_F_REBINDING (1 << 2)	/* identify policies in rebinding */
62 
63 #ifdef __KERNEL__
64 
65 #include <linux/mmzone.h>
66 #include <linux/slab.h>
67 #include <linux/rbtree.h>
68 #include <linux/spinlock.h>
69 #include <linux/nodemask.h>
70 #include <linux/pagemap.h>
71 
72 struct mm_struct;
73 
74 #ifdef CONFIG_NUMA
75 
76 /*
77  * Describe a memory policy.
78  *
79  * A mempolicy can be either associated with a process or with a VMA.
80  * For VMA related allocations the VMA policy is preferred, otherwise
81  * the process policy is used. Interrupts ignore the memory policy
82  * of the current process.
83  *
84  * Locking policy for interlave:
85  * In process context there is no locking because only the process accesses
86  * its own state. All vma manipulation is somewhat protected by a down_read on
87  * mmap_sem.
88  *
89  * Freeing policy:
90  * Mempolicy objects are reference counted.  A mempolicy will be freed when
91  * mpol_put() decrements the reference count to zero.
92  *
93  * Duplicating policy objects:
94  * mpol_dup() allocates a new mempolicy and copies the specified mempolicy
95  * to the new storage.  The reference count of the new object is initialized
96  * to 1, representing the caller of mpol_dup().
97  */
98 struct mempolicy {
99 	atomic_t refcnt;
100 	unsigned short mode; 	/* See MPOL_* above */
101 	unsigned short flags;	/* See set_mempolicy() MPOL_F_* above */
102 	union {
103 		short 		 preferred_node; /* preferred */
104 		nodemask_t	 nodes;		/* interleave/bind */
105 		/* undefined for default */
106 	} v;
107 	union {
108 		nodemask_t cpuset_mems_allowed;	/* relative to these nodes */
109 		nodemask_t user_nodemask;	/* nodemask passed by user */
110 	} w;
111 };
112 
113 /*
114  * Support for managing mempolicy data objects (clone, copy, destroy)
115  * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
116  */
117 
118 extern void __mpol_put(struct mempolicy *pol);
119 static inline void mpol_put(struct mempolicy *pol)
120 {
121 	if (pol)
122 		__mpol_put(pol);
123 }
124 
125 /*
126  * Does mempolicy pol need explicit unref after use?
127  * Currently only needed for shared policies.
128  */
129 static inline int mpol_needs_cond_ref(struct mempolicy *pol)
130 {
131 	return (pol && (pol->flags & MPOL_F_SHARED));
132 }
133 
134 static inline void mpol_cond_put(struct mempolicy *pol)
135 {
136 	if (mpol_needs_cond_ref(pol))
137 		__mpol_put(pol);
138 }
139 
140 extern struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
141 					  struct mempolicy *frompol);
142 static inline struct mempolicy *mpol_cond_copy(struct mempolicy *tompol,
143 						struct mempolicy *frompol)
144 {
145 	if (!frompol)
146 		return frompol;
147 	return __mpol_cond_copy(tompol, frompol);
148 }
149 
150 extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
151 static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
152 {
153 	if (pol)
154 		pol = __mpol_dup(pol);
155 	return pol;
156 }
157 
158 #define vma_policy(vma) ((vma)->vm_policy)
159 #define vma_set_policy(vma, pol) ((vma)->vm_policy = (pol))
160 
161 static inline void mpol_get(struct mempolicy *pol)
162 {
163 	if (pol)
164 		atomic_inc(&pol->refcnt);
165 }
166 
167 extern int __mpol_equal(struct mempolicy *a, struct mempolicy *b);
168 static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
169 {
170 	if (a == b)
171 		return 1;
172 	return __mpol_equal(a, b);
173 }
174 
175 /*
176  * Tree of shared policies for a shared memory region.
177  * Maintain the policies in a pseudo mm that contains vmas. The vmas
178  * carry the policy. As a special twist the pseudo mm is indexed in pages, not
179  * bytes, so that we can work with shared memory segments bigger than
180  * unsigned long.
181  */
182 
183 struct sp_node {
184 	struct rb_node nd;
185 	unsigned long start, end;
186 	struct mempolicy *policy;
187 };
188 
189 struct shared_policy {
190 	struct rb_root root;
191 	spinlock_t lock;
192 };
193 
194 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
195 int mpol_set_shared_policy(struct shared_policy *info,
196 				struct vm_area_struct *vma,
197 				struct mempolicy *new);
198 void mpol_free_shared_policy(struct shared_policy *p);
199 struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
200 					    unsigned long idx);
201 
202 extern void numa_default_policy(void);
203 extern void numa_policy_init(void);
204 extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
205 				enum mpol_rebind_step step);
206 extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
207 extern void mpol_fix_fork_child_flag(struct task_struct *p);
208 
209 extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
210 				unsigned long addr, gfp_t gfp_flags,
211 				struct mempolicy **mpol, nodemask_t **nodemask);
212 extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
213 extern bool mempolicy_nodemask_intersects(struct task_struct *tsk,
214 				const nodemask_t *mask);
215 extern unsigned slab_node(struct mempolicy *policy);
216 
217 extern enum zone_type policy_zone;
218 
219 static inline void check_highest_zone(enum zone_type k)
220 {
221 	if (k > policy_zone && k != ZONE_MOVABLE)
222 		policy_zone = k;
223 }
224 
225 int do_migrate_pages(struct mm_struct *mm,
226 	const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags);
227 
228 
229 #ifdef CONFIG_TMPFS
230 extern int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context);
231 
232 extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol,
233 			int no_context);
234 #endif
235 
236 /* Check if a vma is migratable */
237 static inline int vma_migratable(struct vm_area_struct *vma)
238 {
239 	if (vma->vm_flags & (VM_IO|VM_HUGETLB|VM_PFNMAP|VM_RESERVED))
240 		return 0;
241 	/*
242 	 * Migration allocates pages in the highest zone. If we cannot
243 	 * do so then migration (at least from node to node) is not
244 	 * possible.
245 	 */
246 	if (vma->vm_file &&
247 		gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
248 								< policy_zone)
249 			return 0;
250 	return 1;
251 }
252 
253 #else
254 
255 struct mempolicy {};
256 
257 static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
258 {
259 	return 1;
260 }
261 
262 static inline void mpol_put(struct mempolicy *p)
263 {
264 }
265 
266 static inline void mpol_cond_put(struct mempolicy *pol)
267 {
268 }
269 
270 static inline struct mempolicy *mpol_cond_copy(struct mempolicy *to,
271 						struct mempolicy *from)
272 {
273 	return from;
274 }
275 
276 static inline void mpol_get(struct mempolicy *pol)
277 {
278 }
279 
280 static inline struct mempolicy *mpol_dup(struct mempolicy *old)
281 {
282 	return NULL;
283 }
284 
285 struct shared_policy {};
286 
287 static inline int mpol_set_shared_policy(struct shared_policy *info,
288 					struct vm_area_struct *vma,
289 					struct mempolicy *new)
290 {
291 	return -EINVAL;
292 }
293 
294 static inline void mpol_shared_policy_init(struct shared_policy *sp,
295 						struct mempolicy *mpol)
296 {
297 }
298 
299 static inline void mpol_free_shared_policy(struct shared_policy *p)
300 {
301 }
302 
303 static inline struct mempolicy *
304 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
305 {
306 	return NULL;
307 }
308 
309 #define vma_policy(vma) NULL
310 #define vma_set_policy(vma, pol) do {} while(0)
311 
312 static inline void numa_policy_init(void)
313 {
314 }
315 
316 static inline void numa_default_policy(void)
317 {
318 }
319 
320 static inline void mpol_rebind_task(struct task_struct *tsk,
321 				const nodemask_t *new,
322 				enum mpol_rebind_step step)
323 {
324 }
325 
326 static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
327 {
328 }
329 
330 static inline void mpol_fix_fork_child_flag(struct task_struct *p)
331 {
332 }
333 
334 static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
335 				unsigned long addr, gfp_t gfp_flags,
336 				struct mempolicy **mpol, nodemask_t **nodemask)
337 {
338 	*mpol = NULL;
339 	*nodemask = NULL;
340 	return node_zonelist(0, gfp_flags);
341 }
342 
343 static inline bool init_nodemask_of_mempolicy(nodemask_t *m)
344 {
345 	return false;
346 }
347 
348 static inline bool mempolicy_nodemask_intersects(struct task_struct *tsk,
349 			const nodemask_t *mask)
350 {
351 	return false;
352 }
353 
354 static inline int do_migrate_pages(struct mm_struct *mm,
355 			const nodemask_t *from_nodes,
356 			const nodemask_t *to_nodes, int flags)
357 {
358 	return 0;
359 }
360 
361 static inline void check_highest_zone(int k)
362 {
363 }
364 
365 #ifdef CONFIG_TMPFS
366 static inline int mpol_parse_str(char *str, struct mempolicy **mpol,
367 				int no_context)
368 {
369 	return 1;	/* error */
370 }
371 
372 static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol,
373 				int no_context)
374 {
375 	return 0;
376 }
377 #endif
378 
379 #endif /* CONFIG_NUMA */
380 #endif /* __KERNEL__ */
381 
382 #endif
383