xref: /linux-6.15/include/linux/mm_types.h (revision 27d9a0fd)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MM_TYPES_H
3 #define _LINUX_MM_TYPES_H
4 
5 #include <linux/mm_types_task.h>
6 
7 #include <linux/auxvec.h>
8 #include <linux/kref.h>
9 #include <linux/list.h>
10 #include <linux/spinlock.h>
11 #include <linux/rbtree.h>
12 #include <linux/maple_tree.h>
13 #include <linux/rwsem.h>
14 #include <linux/completion.h>
15 #include <linux/cpumask.h>
16 #include <linux/uprobes.h>
17 #include <linux/rcupdate.h>
18 #include <linux/page-flags-layout.h>
19 #include <linux/workqueue.h>
20 #include <linux/seqlock.h>
21 #include <linux/percpu_counter.h>
22 
23 #include <asm/mmu.h>
24 
25 #ifndef AT_VECTOR_SIZE_ARCH
26 #define AT_VECTOR_SIZE_ARCH 0
27 #endif
28 #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
29 
30 #define INIT_PASID	0
31 
32 struct address_space;
33 struct mem_cgroup;
34 
35 /*
36  * Each physical page in the system has a struct page associated with
37  * it to keep track of whatever it is we are using the page for at the
38  * moment. Note that we have no way to track which tasks are using
39  * a page, though if it is a pagecache page, rmap structures can tell us
40  * who is mapping it.
41  *
42  * If you allocate the page using alloc_pages(), you can use some of the
43  * space in struct page for your own purposes.  The five words in the main
44  * union are available, except for bit 0 of the first word which must be
45  * kept clear.  Many users use this word to store a pointer to an object
46  * which is guaranteed to be aligned.  If you use the same storage as
47  * page->mapping, you must restore it to NULL before freeing the page.
48  *
49  * If your page will not be mapped to userspace, you can also use the four
50  * bytes in the mapcount union, but you must call page_mapcount_reset()
51  * before freeing it.
52  *
53  * If you want to use the refcount field, it must be used in such a way
54  * that other CPUs temporarily incrementing and then decrementing the
55  * refcount does not cause problems.  On receiving the page from
56  * alloc_pages(), the refcount will be positive.
57  *
58  * If you allocate pages of order > 0, you can use some of the fields
59  * in each subpage, but you may need to restore some of their values
60  * afterwards.
61  *
62  * SLUB uses cmpxchg_double() to atomically update its freelist and counters.
63  * That requires that freelist & counters in struct slab be adjacent and
64  * double-word aligned. Because struct slab currently just reinterprets the
65  * bits of struct page, we align all struct pages to double-word boundaries,
66  * and ensure that 'freelist' is aligned within struct slab.
67  */
68 #ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
69 #define _struct_page_alignment	__aligned(2 * sizeof(unsigned long))
70 #else
71 #define _struct_page_alignment	__aligned(sizeof(unsigned long))
72 #endif
73 
74 struct page {
75 	unsigned long flags;		/* Atomic flags, some possibly
76 					 * updated asynchronously */
77 	/*
78 	 * Five words (20/40 bytes) are available in this union.
79 	 * WARNING: bit 0 of the first word is used for PageTail(). That
80 	 * means the other users of this union MUST NOT use the bit to
81 	 * avoid collision and false-positive PageTail().
82 	 */
83 	union {
84 		struct {	/* Page cache and anonymous pages */
85 			/**
86 			 * @lru: Pageout list, eg. active_list protected by
87 			 * lruvec->lru_lock.  Sometimes used as a generic list
88 			 * by the page owner.
89 			 */
90 			union {
91 				struct list_head lru;
92 
93 				/* Or, for the Unevictable "LRU list" slot */
94 				struct {
95 					/* Always even, to negate PageTail */
96 					void *__filler;
97 					/* Count page's or folio's mlocks */
98 					unsigned int mlock_count;
99 				};
100 
101 				/* Or, free page */
102 				struct list_head buddy_list;
103 				struct list_head pcp_list;
104 			};
105 			/* See page-flags.h for PAGE_MAPPING_FLAGS */
106 			struct address_space *mapping;
107 			union {
108 				pgoff_t index;		/* Our offset within mapping. */
109 				unsigned long share;	/* share count for fsdax */
110 			};
111 			/**
112 			 * @private: Mapping-private opaque data.
113 			 * Usually used for buffer_heads if PagePrivate.
114 			 * Used for swp_entry_t if PageSwapCache.
115 			 * Indicates order in the buddy system if PageBuddy.
116 			 */
117 			unsigned long private;
118 		};
119 		struct {	/* page_pool used by netstack */
120 			/**
121 			 * @pp_magic: magic value to avoid recycling non
122 			 * page_pool allocated pages.
123 			 */
124 			unsigned long pp_magic;
125 			struct page_pool *pp;
126 			unsigned long _pp_mapping_pad;
127 			unsigned long dma_addr;
128 			union {
129 				/**
130 				 * dma_addr_upper: might require a 64-bit
131 				 * value on 32-bit architectures.
132 				 */
133 				unsigned long dma_addr_upper;
134 				/**
135 				 * For frag page support, not supported in
136 				 * 32-bit architectures with 64-bit DMA.
137 				 */
138 				atomic_long_t pp_frag_count;
139 			};
140 		};
141 		struct {	/* Tail pages of compound page */
142 			unsigned long compound_head;	/* Bit zero is set */
143 		};
144 		struct {	/* Page table pages */
145 			unsigned long _pt_pad_1;	/* compound_head */
146 			pgtable_t pmd_huge_pte; /* protected by page->ptl */
147 			unsigned long _pt_pad_2;	/* mapping */
148 			union {
149 				struct mm_struct *pt_mm; /* x86 pgds only */
150 				atomic_t pt_frag_refcount; /* powerpc */
151 			};
152 #if ALLOC_SPLIT_PTLOCKS
153 			spinlock_t *ptl;
154 #else
155 			spinlock_t ptl;
156 #endif
157 		};
158 		struct {	/* ZONE_DEVICE pages */
159 			/** @pgmap: Points to the hosting device page map. */
160 			struct dev_pagemap *pgmap;
161 			void *zone_device_data;
162 			/*
163 			 * ZONE_DEVICE private pages are counted as being
164 			 * mapped so the next 3 words hold the mapping, index,
165 			 * and private fields from the source anonymous or
166 			 * page cache page while the page is migrated to device
167 			 * private memory.
168 			 * ZONE_DEVICE MEMORY_DEVICE_FS_DAX pages also
169 			 * use the mapping, index, and private fields when
170 			 * pmem backed DAX files are mapped.
171 			 */
172 		};
173 
174 		/** @rcu_head: You can use this to free a page by RCU. */
175 		struct rcu_head rcu_head;
176 	};
177 
178 	union {		/* This union is 4 bytes in size. */
179 		/*
180 		 * If the page can be mapped to userspace, encodes the number
181 		 * of times this page is referenced by a page table.
182 		 */
183 		atomic_t _mapcount;
184 
185 		/*
186 		 * If the page is neither PageSlab nor mappable to userspace,
187 		 * the value stored here may help determine what this page
188 		 * is used for.  See page-flags.h for a list of page types
189 		 * which are currently stored here.
190 		 */
191 		unsigned int page_type;
192 	};
193 
194 	/* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */
195 	atomic_t _refcount;
196 
197 #ifdef CONFIG_MEMCG
198 	unsigned long memcg_data;
199 #endif
200 
201 	/*
202 	 * On machines where all RAM is mapped into kernel address space,
203 	 * we can simply calculate the virtual address. On machines with
204 	 * highmem some memory is mapped into kernel virtual memory
205 	 * dynamically, so we need a place to store that address.
206 	 * Note that this field could be 16 bits on x86 ... ;)
207 	 *
208 	 * Architectures with slow multiplication can define
209 	 * WANT_PAGE_VIRTUAL in asm/page.h
210 	 */
211 #if defined(WANT_PAGE_VIRTUAL)
212 	void *virtual;			/* Kernel virtual address (NULL if
213 					   not kmapped, ie. highmem) */
214 #endif /* WANT_PAGE_VIRTUAL */
215 
216 #ifdef CONFIG_KMSAN
217 	/*
218 	 * KMSAN metadata for this page:
219 	 *  - shadow page: every bit indicates whether the corresponding
220 	 *    bit of the original page is initialized (0) or not (1);
221 	 *  - origin page: every 4 bytes contain an id of the stack trace
222 	 *    where the uninitialized value was created.
223 	 */
224 	struct page *kmsan_shadow;
225 	struct page *kmsan_origin;
226 #endif
227 
228 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
229 	int _last_cpupid;
230 #endif
231 } _struct_page_alignment;
232 
233 /*
234  * struct encoded_page - a nonexistent type marking this pointer
235  *
236  * An 'encoded_page' pointer is a pointer to a regular 'struct page', but
237  * with the low bits of the pointer indicating extra context-dependent
238  * information. Not super-common, but happens in mmu_gather and mlock
239  * handling, and this acts as a type system check on that use.
240  *
241  * We only really have two guaranteed bits in general, although you could
242  * play with 'struct page' alignment (see CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
243  * for more.
244  *
245  * Use the supplied helper functions to endcode/decode the pointer and bits.
246  */
247 struct encoded_page;
248 #define ENCODE_PAGE_BITS 3ul
249 static __always_inline struct encoded_page *encode_page(struct page *page, unsigned long flags)
250 {
251 	BUILD_BUG_ON(flags > ENCODE_PAGE_BITS);
252 	return (struct encoded_page *)(flags | (unsigned long)page);
253 }
254 
255 static inline unsigned long encoded_page_flags(struct encoded_page *page)
256 {
257 	return ENCODE_PAGE_BITS & (unsigned long)page;
258 }
259 
260 static inline struct page *encoded_page_ptr(struct encoded_page *page)
261 {
262 	return (struct page *)(~ENCODE_PAGE_BITS & (unsigned long)page);
263 }
264 
265 /**
266  * struct folio - Represents a contiguous set of bytes.
267  * @flags: Identical to the page flags.
268  * @lru: Least Recently Used list; tracks how recently this folio was used.
269  * @mlock_count: Number of times this folio has been pinned by mlock().
270  * @mapping: The file this page belongs to, or refers to the anon_vma for
271  *    anonymous memory.
272  * @index: Offset within the file, in units of pages.  For anonymous memory,
273  *    this is the index from the beginning of the mmap.
274  * @private: Filesystem per-folio data (see folio_attach_private()).
275  *    Used for swp_entry_t if folio_test_swapcache().
276  * @_mapcount: Do not access this member directly.  Use folio_mapcount() to
277  *    find out how many times this folio is mapped by userspace.
278  * @_refcount: Do not access this member directly.  Use folio_ref_count()
279  *    to find how many references there are to this folio.
280  * @memcg_data: Memory Control Group data.
281  * @_folio_dtor: Which destructor to use for this folio.
282  * @_folio_order: Do not use directly, call folio_order().
283  * @_entire_mapcount: Do not use directly, call folio_entire_mapcount().
284  * @_nr_pages_mapped: Do not use directly, call folio_mapcount().
285  * @_pincount: Do not use directly, call folio_maybe_dma_pinned().
286  * @_folio_nr_pages: Do not use directly, call folio_nr_pages().
287  * @_hugetlb_subpool: Do not use directly, use accessor in hugetlb.h.
288  * @_hugetlb_cgroup: Do not use directly, use accessor in hugetlb_cgroup.h.
289  * @_hugetlb_cgroup_rsvd: Do not use directly, use accessor in hugetlb_cgroup.h.
290  * @_hugetlb_hwpoison: Do not use directly, call raw_hwp_list_head().
291  * @_deferred_list: Folios to be split under memory pressure.
292  *
293  * A folio is a physically, virtually and logically contiguous set
294  * of bytes.  It is a power-of-two in size, and it is aligned to that
295  * same power-of-two.  It is at least as large as %PAGE_SIZE.  If it is
296  * in the page cache, it is at a file offset which is a multiple of that
297  * power-of-two.  It may be mapped into userspace at an address which is
298  * at an arbitrary page offset, but its kernel virtual address is aligned
299  * to its size.
300  */
301 struct folio {
302 	/* private: don't document the anon union */
303 	union {
304 		struct {
305 	/* public: */
306 			unsigned long flags;
307 			union {
308 				struct list_head lru;
309 	/* private: avoid cluttering the output */
310 				struct {
311 					void *__filler;
312 	/* public: */
313 					unsigned int mlock_count;
314 	/* private: */
315 				};
316 	/* public: */
317 			};
318 			struct address_space *mapping;
319 			pgoff_t index;
320 			void *private;
321 			atomic_t _mapcount;
322 			atomic_t _refcount;
323 #ifdef CONFIG_MEMCG
324 			unsigned long memcg_data;
325 #endif
326 	/* private: the union with struct page is transitional */
327 		};
328 		struct page page;
329 	};
330 	union {
331 		struct {
332 			unsigned long _flags_1;
333 			unsigned long _head_1;
334 	/* public: */
335 			unsigned char _folio_dtor;
336 			unsigned char _folio_order;
337 			atomic_t _entire_mapcount;
338 			atomic_t _nr_pages_mapped;
339 			atomic_t _pincount;
340 #ifdef CONFIG_64BIT
341 			unsigned int _folio_nr_pages;
342 #endif
343 	/* private: the union with struct page is transitional */
344 		};
345 		struct page __page_1;
346 	};
347 	union {
348 		struct {
349 			unsigned long _flags_2;
350 			unsigned long _head_2;
351 	/* public: */
352 			void *_hugetlb_subpool;
353 			void *_hugetlb_cgroup;
354 			void *_hugetlb_cgroup_rsvd;
355 			void *_hugetlb_hwpoison;
356 	/* private: the union with struct page is transitional */
357 		};
358 		struct {
359 			unsigned long _flags_2a;
360 			unsigned long _head_2a;
361 	/* public: */
362 			struct list_head _deferred_list;
363 	/* private: the union with struct page is transitional */
364 		};
365 		struct page __page_2;
366 	};
367 };
368 
369 #define FOLIO_MATCH(pg, fl)						\
370 	static_assert(offsetof(struct page, pg) == offsetof(struct folio, fl))
371 FOLIO_MATCH(flags, flags);
372 FOLIO_MATCH(lru, lru);
373 FOLIO_MATCH(mapping, mapping);
374 FOLIO_MATCH(compound_head, lru);
375 FOLIO_MATCH(index, index);
376 FOLIO_MATCH(private, private);
377 FOLIO_MATCH(_mapcount, _mapcount);
378 FOLIO_MATCH(_refcount, _refcount);
379 #ifdef CONFIG_MEMCG
380 FOLIO_MATCH(memcg_data, memcg_data);
381 #endif
382 #undef FOLIO_MATCH
383 #define FOLIO_MATCH(pg, fl)						\
384 	static_assert(offsetof(struct folio, fl) ==			\
385 			offsetof(struct page, pg) + sizeof(struct page))
386 FOLIO_MATCH(flags, _flags_1);
387 FOLIO_MATCH(compound_head, _head_1);
388 #undef FOLIO_MATCH
389 #define FOLIO_MATCH(pg, fl)						\
390 	static_assert(offsetof(struct folio, fl) ==			\
391 			offsetof(struct page, pg) + 2 * sizeof(struct page))
392 FOLIO_MATCH(flags, _flags_2);
393 FOLIO_MATCH(compound_head, _head_2);
394 #undef FOLIO_MATCH
395 
396 /*
397  * Used for sizing the vmemmap region on some architectures
398  */
399 #define STRUCT_PAGE_MAX_SHIFT	(order_base_2(sizeof(struct page)))
400 
401 #define PAGE_FRAG_CACHE_MAX_SIZE	__ALIGN_MASK(32768, ~PAGE_MASK)
402 #define PAGE_FRAG_CACHE_MAX_ORDER	get_order(PAGE_FRAG_CACHE_MAX_SIZE)
403 
404 /*
405  * page_private can be used on tail pages.  However, PagePrivate is only
406  * checked by the VM on the head page.  So page_private on the tail pages
407  * should be used for data that's ancillary to the head page (eg attaching
408  * buffer heads to tail pages after attaching buffer heads to the head page)
409  */
410 #define page_private(page)		((page)->private)
411 
412 static inline void set_page_private(struct page *page, unsigned long private)
413 {
414 	page->private = private;
415 }
416 
417 static inline void *folio_get_private(struct folio *folio)
418 {
419 	return folio->private;
420 }
421 
422 struct page_frag_cache {
423 	void * va;
424 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
425 	__u16 offset;
426 	__u16 size;
427 #else
428 	__u32 offset;
429 #endif
430 	/* we maintain a pagecount bias, so that we dont dirty cache line
431 	 * containing page->_refcount every time we allocate a fragment.
432 	 */
433 	unsigned int		pagecnt_bias;
434 	bool pfmemalloc;
435 };
436 
437 typedef unsigned long vm_flags_t;
438 
439 /*
440  * A region containing a mapping of a non-memory backed file under NOMMU
441  * conditions.  These are held in a global tree and are pinned by the VMAs that
442  * map parts of them.
443  */
444 struct vm_region {
445 	struct rb_node	vm_rb;		/* link in global region tree */
446 	vm_flags_t	vm_flags;	/* VMA vm_flags */
447 	unsigned long	vm_start;	/* start address of region */
448 	unsigned long	vm_end;		/* region initialised to here */
449 	unsigned long	vm_top;		/* region allocated to here */
450 	unsigned long	vm_pgoff;	/* the offset in vm_file corresponding to vm_start */
451 	struct file	*vm_file;	/* the backing file or NULL */
452 
453 	int		vm_usage;	/* region usage count (access under nommu_region_sem) */
454 	bool		vm_icache_flushed : 1; /* true if the icache has been flushed for
455 						* this region */
456 };
457 
458 #ifdef CONFIG_USERFAULTFD
459 #define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, })
460 struct vm_userfaultfd_ctx {
461 	struct userfaultfd_ctx *ctx;
462 };
463 #else /* CONFIG_USERFAULTFD */
464 #define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {})
465 struct vm_userfaultfd_ctx {};
466 #endif /* CONFIG_USERFAULTFD */
467 
468 struct anon_vma_name {
469 	struct kref kref;
470 	/* The name needs to be at the end because it is dynamically sized. */
471 	char name[];
472 };
473 
474 struct vma_lock {
475 	struct rw_semaphore lock;
476 };
477 
478 struct vma_numab_state {
479 	unsigned long next_scan;
480 	unsigned long next_pid_reset;
481 	unsigned long access_pids[2];
482 };
483 
484 /*
485  * This struct describes a virtual memory area. There is one of these
486  * per VM-area/task. A VM area is any part of the process virtual memory
487  * space that has a special rule for the page-fault handlers (ie a shared
488  * library, the executable area etc).
489  */
490 struct vm_area_struct {
491 	/* The first cache line has the info for VMA tree walking. */
492 
493 	union {
494 		struct {
495 			/* VMA covers [vm_start; vm_end) addresses within mm */
496 			unsigned long vm_start;
497 			unsigned long vm_end;
498 		};
499 #ifdef CONFIG_PER_VMA_LOCK
500 		struct rcu_head vm_rcu;	/* Used for deferred freeing. */
501 #endif
502 	};
503 
504 	struct mm_struct *vm_mm;	/* The address space we belong to. */
505 	pgprot_t vm_page_prot;          /* Access permissions of this VMA. */
506 
507 	/*
508 	 * Flags, see mm.h.
509 	 * To modify use vm_flags_{init|reset|set|clear|mod} functions.
510 	 */
511 	union {
512 		const vm_flags_t vm_flags;
513 		vm_flags_t __private __vm_flags;
514 	};
515 
516 #ifdef CONFIG_PER_VMA_LOCK
517 	int vm_lock_seq;
518 	struct vma_lock *vm_lock;
519 
520 	/* Flag to indicate areas detached from the mm->mm_mt tree */
521 	bool detached;
522 #endif
523 
524 	/*
525 	 * For areas with an address space and backing store,
526 	 * linkage into the address_space->i_mmap interval tree.
527 	 *
528 	 */
529 	struct {
530 		struct rb_node rb;
531 		unsigned long rb_subtree_last;
532 	} shared;
533 
534 	/*
535 	 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
536 	 * list, after a COW of one of the file pages.	A MAP_SHARED vma
537 	 * can only be in the i_mmap tree.  An anonymous MAP_PRIVATE, stack
538 	 * or brk vma (with NULL file) can only be in an anon_vma list.
539 	 */
540 	struct list_head anon_vma_chain; /* Serialized by mmap_lock &
541 					  * page_table_lock */
542 	struct anon_vma *anon_vma;	/* Serialized by page_table_lock */
543 
544 	/* Function pointers to deal with this struct. */
545 	const struct vm_operations_struct *vm_ops;
546 
547 	/* Information about our backing store: */
548 	unsigned long vm_pgoff;		/* Offset (within vm_file) in PAGE_SIZE
549 					   units */
550 	struct file * vm_file;		/* File we map to (can be NULL). */
551 	void * vm_private_data;		/* was vm_pte (shared mem) */
552 
553 #ifdef CONFIG_ANON_VMA_NAME
554 	/*
555 	 * For private and shared anonymous mappings, a pointer to a null
556 	 * terminated string containing the name given to the vma, or NULL if
557 	 * unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
558 	 */
559 	struct anon_vma_name *anon_name;
560 #endif
561 #ifdef CONFIG_SWAP
562 	atomic_long_t swap_readahead_info;
563 #endif
564 #ifndef CONFIG_MMU
565 	struct vm_region *vm_region;	/* NOMMU mapping region */
566 #endif
567 #ifdef CONFIG_NUMA
568 	struct mempolicy *vm_policy;	/* NUMA policy for the VMA */
569 #endif
570 #ifdef CONFIG_NUMA_BALANCING
571 	struct vma_numab_state *numab_state;	/* NUMA Balancing state */
572 #endif
573 	struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
574 } __randomize_layout;
575 
576 struct kioctx_table;
577 struct mm_struct {
578 	struct {
579 		struct maple_tree mm_mt;
580 #ifdef CONFIG_MMU
581 		unsigned long (*get_unmapped_area) (struct file *filp,
582 				unsigned long addr, unsigned long len,
583 				unsigned long pgoff, unsigned long flags);
584 #endif
585 		unsigned long mmap_base;	/* base of mmap area */
586 		unsigned long mmap_legacy_base;	/* base of mmap area in bottom-up allocations */
587 #ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
588 		/* Base addresses for compatible mmap() */
589 		unsigned long mmap_compat_base;
590 		unsigned long mmap_compat_legacy_base;
591 #endif
592 		unsigned long task_size;	/* size of task vm space */
593 		pgd_t * pgd;
594 
595 #ifdef CONFIG_MEMBARRIER
596 		/**
597 		 * @membarrier_state: Flags controlling membarrier behavior.
598 		 *
599 		 * This field is close to @pgd to hopefully fit in the same
600 		 * cache-line, which needs to be touched by switch_mm().
601 		 */
602 		atomic_t membarrier_state;
603 #endif
604 
605 		/**
606 		 * @mm_users: The number of users including userspace.
607 		 *
608 		 * Use mmget()/mmget_not_zero()/mmput() to modify. When this
609 		 * drops to 0 (i.e. when the task exits and there are no other
610 		 * temporary reference holders), we also release a reference on
611 		 * @mm_count (which may then free the &struct mm_struct if
612 		 * @mm_count also drops to 0).
613 		 */
614 		atomic_t mm_users;
615 
616 		/**
617 		 * @mm_count: The number of references to &struct mm_struct
618 		 * (@mm_users count as 1).
619 		 *
620 		 * Use mmgrab()/mmdrop() to modify. When this drops to 0, the
621 		 * &struct mm_struct is freed.
622 		 */
623 		atomic_t mm_count;
624 #ifdef CONFIG_SCHED_MM_CID
625 		/**
626 		 * @cid_lock: Protect cid bitmap updates vs lookups.
627 		 *
628 		 * Prevent situations where updates to the cid bitmap happen
629 		 * concurrently with lookups. Those can lead to situations
630 		 * where a lookup cannot find a free bit simply because it was
631 		 * unlucky enough to load, non-atomically, bitmap words as they
632 		 * were being concurrently updated by the updaters.
633 		 */
634 		raw_spinlock_t cid_lock;
635 #endif
636 #ifdef CONFIG_MMU
637 		atomic_long_t pgtables_bytes;	/* size of all page tables */
638 #endif
639 		int map_count;			/* number of VMAs */
640 
641 		spinlock_t page_table_lock; /* Protects page tables and some
642 					     * counters
643 					     */
644 		/*
645 		 * With some kernel config, the current mmap_lock's offset
646 		 * inside 'mm_struct' is at 0x120, which is very optimal, as
647 		 * its two hot fields 'count' and 'owner' sit in 2 different
648 		 * cachelines,  and when mmap_lock is highly contended, both
649 		 * of the 2 fields will be accessed frequently, current layout
650 		 * will help to reduce cache bouncing.
651 		 *
652 		 * So please be careful with adding new fields before
653 		 * mmap_lock, which can easily push the 2 fields into one
654 		 * cacheline.
655 		 */
656 		struct rw_semaphore mmap_lock;
657 
658 		struct list_head mmlist; /* List of maybe swapped mm's.	These
659 					  * are globally strung together off
660 					  * init_mm.mmlist, and are protected
661 					  * by mmlist_lock
662 					  */
663 #ifdef CONFIG_PER_VMA_LOCK
664 		int mm_lock_seq;
665 #endif
666 
667 
668 		unsigned long hiwater_rss; /* High-watermark of RSS usage */
669 		unsigned long hiwater_vm;  /* High-water virtual memory usage */
670 
671 		unsigned long total_vm;	   /* Total pages mapped */
672 		unsigned long locked_vm;   /* Pages that have PG_mlocked set */
673 		atomic64_t    pinned_vm;   /* Refcount permanently increased */
674 		unsigned long data_vm;	   /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
675 		unsigned long exec_vm;	   /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
676 		unsigned long stack_vm;	   /* VM_STACK */
677 		unsigned long def_flags;
678 
679 		/**
680 		 * @write_protect_seq: Locked when any thread is write
681 		 * protecting pages mapped by this mm to enforce a later COW,
682 		 * for instance during page table copying for fork().
683 		 */
684 		seqcount_t write_protect_seq;
685 
686 		spinlock_t arg_lock; /* protect the below fields */
687 
688 		unsigned long start_code, end_code, start_data, end_data;
689 		unsigned long start_brk, brk, start_stack;
690 		unsigned long arg_start, arg_end, env_start, env_end;
691 
692 		unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
693 
694 		struct percpu_counter rss_stat[NR_MM_COUNTERS];
695 
696 		struct linux_binfmt *binfmt;
697 
698 		/* Architecture-specific MM context */
699 		mm_context_t context;
700 
701 		unsigned long flags; /* Must use atomic bitops to access */
702 
703 #ifdef CONFIG_AIO
704 		spinlock_t			ioctx_lock;
705 		struct kioctx_table __rcu	*ioctx_table;
706 #endif
707 #ifdef CONFIG_MEMCG
708 		/*
709 		 * "owner" points to a task that is regarded as the canonical
710 		 * user/owner of this mm. All of the following must be true in
711 		 * order for it to be changed:
712 		 *
713 		 * current == mm->owner
714 		 * current->mm != mm
715 		 * new_owner->mm == mm
716 		 * new_owner->alloc_lock is held
717 		 */
718 		struct task_struct __rcu *owner;
719 #endif
720 		struct user_namespace *user_ns;
721 
722 		/* store ref to file /proc/<pid>/exe symlink points to */
723 		struct file __rcu *exe_file;
724 #ifdef CONFIG_MMU_NOTIFIER
725 		struct mmu_notifier_subscriptions *notifier_subscriptions;
726 #endif
727 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
728 		pgtable_t pmd_huge_pte; /* protected by page_table_lock */
729 #endif
730 #ifdef CONFIG_NUMA_BALANCING
731 		/*
732 		 * numa_next_scan is the next time that PTEs will be remapped
733 		 * PROT_NONE to trigger NUMA hinting faults; such faults gather
734 		 * statistics and migrate pages to new nodes if necessary.
735 		 */
736 		unsigned long numa_next_scan;
737 
738 		/* Restart point for scanning and remapping PTEs. */
739 		unsigned long numa_scan_offset;
740 
741 		/* numa_scan_seq prevents two threads remapping PTEs. */
742 		int numa_scan_seq;
743 #endif
744 		/*
745 		 * An operation with batched TLB flushing is going on. Anything
746 		 * that can move process memory needs to flush the TLB when
747 		 * moving a PROT_NONE mapped page.
748 		 */
749 		atomic_t tlb_flush_pending;
750 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
751 		/* See flush_tlb_batched_pending() */
752 		atomic_t tlb_flush_batched;
753 #endif
754 		struct uprobes_state uprobes_state;
755 #ifdef CONFIG_PREEMPT_RT
756 		struct rcu_head delayed_drop;
757 #endif
758 #ifdef CONFIG_HUGETLB_PAGE
759 		atomic_long_t hugetlb_usage;
760 #endif
761 		struct work_struct async_put_work;
762 
763 #ifdef CONFIG_IOMMU_SVA
764 		u32 pasid;
765 #endif
766 #ifdef CONFIG_KSM
767 		/*
768 		 * Represent how many pages of this process are involved in KSM
769 		 * merging.
770 		 */
771 		unsigned long ksm_merging_pages;
772 		/*
773 		 * Represent how many pages are checked for ksm merging
774 		 * including merged and not merged.
775 		 */
776 		unsigned long ksm_rmap_items;
777 #endif
778 #ifdef CONFIG_LRU_GEN
779 		struct {
780 			/* this mm_struct is on lru_gen_mm_list */
781 			struct list_head list;
782 			/*
783 			 * Set when switching to this mm_struct, as a hint of
784 			 * whether it has been used since the last time per-node
785 			 * page table walkers cleared the corresponding bits.
786 			 */
787 			unsigned long bitmap;
788 #ifdef CONFIG_MEMCG
789 			/* points to the memcg of "owner" above */
790 			struct mem_cgroup *memcg;
791 #endif
792 		} lru_gen;
793 #endif /* CONFIG_LRU_GEN */
794 	} __randomize_layout;
795 
796 	/*
797 	 * The mm_cpumask needs to be at the end of mm_struct, because it
798 	 * is dynamically sized based on nr_cpu_ids.
799 	 */
800 	unsigned long cpu_bitmap[];
801 };
802 
803 #define MM_MT_FLAGS	(MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN | \
804 			 MT_FLAGS_USE_RCU)
805 extern struct mm_struct init_mm;
806 
807 /* Pointer magic because the dynamic array size confuses some compilers. */
808 static inline void mm_init_cpumask(struct mm_struct *mm)
809 {
810 	unsigned long cpu_bitmap = (unsigned long)mm;
811 
812 	cpu_bitmap += offsetof(struct mm_struct, cpu_bitmap);
813 	cpumask_clear((struct cpumask *)cpu_bitmap);
814 }
815 
816 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
817 static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
818 {
819 	return (struct cpumask *)&mm->cpu_bitmap;
820 }
821 
822 #ifdef CONFIG_LRU_GEN
823 
824 struct lru_gen_mm_list {
825 	/* mm_struct list for page table walkers */
826 	struct list_head fifo;
827 	/* protects the list above */
828 	spinlock_t lock;
829 };
830 
831 void lru_gen_add_mm(struct mm_struct *mm);
832 void lru_gen_del_mm(struct mm_struct *mm);
833 #ifdef CONFIG_MEMCG
834 void lru_gen_migrate_mm(struct mm_struct *mm);
835 #endif
836 
837 static inline void lru_gen_init_mm(struct mm_struct *mm)
838 {
839 	INIT_LIST_HEAD(&mm->lru_gen.list);
840 	mm->lru_gen.bitmap = 0;
841 #ifdef CONFIG_MEMCG
842 	mm->lru_gen.memcg = NULL;
843 #endif
844 }
845 
846 static inline void lru_gen_use_mm(struct mm_struct *mm)
847 {
848 	/*
849 	 * When the bitmap is set, page reclaim knows this mm_struct has been
850 	 * used since the last time it cleared the bitmap. So it might be worth
851 	 * walking the page tables of this mm_struct to clear the accessed bit.
852 	 */
853 	WRITE_ONCE(mm->lru_gen.bitmap, -1);
854 }
855 
856 #else /* !CONFIG_LRU_GEN */
857 
858 static inline void lru_gen_add_mm(struct mm_struct *mm)
859 {
860 }
861 
862 static inline void lru_gen_del_mm(struct mm_struct *mm)
863 {
864 }
865 
866 #ifdef CONFIG_MEMCG
867 static inline void lru_gen_migrate_mm(struct mm_struct *mm)
868 {
869 }
870 #endif
871 
872 static inline void lru_gen_init_mm(struct mm_struct *mm)
873 {
874 }
875 
876 static inline void lru_gen_use_mm(struct mm_struct *mm)
877 {
878 }
879 
880 #endif /* CONFIG_LRU_GEN */
881 
882 struct vma_iterator {
883 	struct ma_state mas;
884 };
885 
886 #define VMA_ITERATOR(name, __mm, __addr)				\
887 	struct vma_iterator name = {					\
888 		.mas = {						\
889 			.tree = &(__mm)->mm_mt,				\
890 			.index = __addr,				\
891 			.node = MAS_START,				\
892 		},							\
893 	}
894 
895 static inline void vma_iter_init(struct vma_iterator *vmi,
896 		struct mm_struct *mm, unsigned long addr)
897 {
898 	mas_init(&vmi->mas, &mm->mm_mt, addr);
899 }
900 
901 #ifdef CONFIG_SCHED_MM_CID
902 /* Accessor for struct mm_struct's cidmask. */
903 static inline cpumask_t *mm_cidmask(struct mm_struct *mm)
904 {
905 	unsigned long cid_bitmap = (unsigned long)mm;
906 
907 	cid_bitmap += offsetof(struct mm_struct, cpu_bitmap);
908 	/* Skip cpu_bitmap */
909 	cid_bitmap += cpumask_size();
910 	return (struct cpumask *)cid_bitmap;
911 }
912 
913 static inline void mm_init_cid(struct mm_struct *mm)
914 {
915 	raw_spin_lock_init(&mm->cid_lock);
916 	cpumask_clear(mm_cidmask(mm));
917 }
918 
919 static inline unsigned int mm_cid_size(void)
920 {
921 	return cpumask_size();
922 }
923 #else /* CONFIG_SCHED_MM_CID */
924 static inline void mm_init_cid(struct mm_struct *mm) { }
925 static inline unsigned int mm_cid_size(void)
926 {
927 	return 0;
928 }
929 #endif /* CONFIG_SCHED_MM_CID */
930 
931 struct mmu_gather;
932 extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm);
933 extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm);
934 extern void tlb_finish_mmu(struct mmu_gather *tlb);
935 
936 struct vm_fault;
937 
938 /**
939  * typedef vm_fault_t - Return type for page fault handlers.
940  *
941  * Page fault handlers return a bitmask of %VM_FAULT values.
942  */
943 typedef __bitwise unsigned int vm_fault_t;
944 
945 /**
946  * enum vm_fault_reason - Page fault handlers return a bitmask of
947  * these values to tell the core VM what happened when handling the
948  * fault. Used to decide whether a process gets delivered SIGBUS or
949  * just gets major/minor fault counters bumped up.
950  *
951  * @VM_FAULT_OOM:		Out Of Memory
952  * @VM_FAULT_SIGBUS:		Bad access
953  * @VM_FAULT_MAJOR:		Page read from storage
954  * @VM_FAULT_HWPOISON:		Hit poisoned small page
955  * @VM_FAULT_HWPOISON_LARGE:	Hit poisoned large page. Index encoded
956  *				in upper bits
957  * @VM_FAULT_SIGSEGV:		segmentation fault
958  * @VM_FAULT_NOPAGE:		->fault installed the pte, not return page
959  * @VM_FAULT_LOCKED:		->fault locked the returned page
960  * @VM_FAULT_RETRY:		->fault blocked, must retry
961  * @VM_FAULT_FALLBACK:		huge page fault failed, fall back to small
962  * @VM_FAULT_DONE_COW:		->fault has fully handled COW
963  * @VM_FAULT_NEEDDSYNC:		->fault did not modify page tables and needs
964  *				fsync() to complete (for synchronous page faults
965  *				in DAX)
966  * @VM_FAULT_COMPLETED:		->fault completed, meanwhile mmap lock released
967  * @VM_FAULT_HINDEX_MASK:	mask HINDEX value
968  *
969  */
970 enum vm_fault_reason {
971 	VM_FAULT_OOM            = (__force vm_fault_t)0x000001,
972 	VM_FAULT_SIGBUS         = (__force vm_fault_t)0x000002,
973 	VM_FAULT_MAJOR          = (__force vm_fault_t)0x000004,
974 	VM_FAULT_HWPOISON       = (__force vm_fault_t)0x000010,
975 	VM_FAULT_HWPOISON_LARGE = (__force vm_fault_t)0x000020,
976 	VM_FAULT_SIGSEGV        = (__force vm_fault_t)0x000040,
977 	VM_FAULT_NOPAGE         = (__force vm_fault_t)0x000100,
978 	VM_FAULT_LOCKED         = (__force vm_fault_t)0x000200,
979 	VM_FAULT_RETRY          = (__force vm_fault_t)0x000400,
980 	VM_FAULT_FALLBACK       = (__force vm_fault_t)0x000800,
981 	VM_FAULT_DONE_COW       = (__force vm_fault_t)0x001000,
982 	VM_FAULT_NEEDDSYNC      = (__force vm_fault_t)0x002000,
983 	VM_FAULT_COMPLETED      = (__force vm_fault_t)0x004000,
984 	VM_FAULT_HINDEX_MASK    = (__force vm_fault_t)0x0f0000,
985 };
986 
987 /* Encode hstate index for a hwpoisoned large page */
988 #define VM_FAULT_SET_HINDEX(x) ((__force vm_fault_t)((x) << 16))
989 #define VM_FAULT_GET_HINDEX(x) (((__force unsigned int)(x) >> 16) & 0xf)
990 
991 #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS |	\
992 			VM_FAULT_SIGSEGV | VM_FAULT_HWPOISON |	\
993 			VM_FAULT_HWPOISON_LARGE | VM_FAULT_FALLBACK)
994 
995 #define VM_FAULT_RESULT_TRACE \
996 	{ VM_FAULT_OOM,                 "OOM" },	\
997 	{ VM_FAULT_SIGBUS,              "SIGBUS" },	\
998 	{ VM_FAULT_MAJOR,               "MAJOR" },	\
999 	{ VM_FAULT_HWPOISON,            "HWPOISON" },	\
1000 	{ VM_FAULT_HWPOISON_LARGE,      "HWPOISON_LARGE" },	\
1001 	{ VM_FAULT_SIGSEGV,             "SIGSEGV" },	\
1002 	{ VM_FAULT_NOPAGE,              "NOPAGE" },	\
1003 	{ VM_FAULT_LOCKED,              "LOCKED" },	\
1004 	{ VM_FAULT_RETRY,               "RETRY" },	\
1005 	{ VM_FAULT_FALLBACK,            "FALLBACK" },	\
1006 	{ VM_FAULT_DONE_COW,            "DONE_COW" },	\
1007 	{ VM_FAULT_NEEDDSYNC,           "NEEDDSYNC" }
1008 
1009 struct vm_special_mapping {
1010 	const char *name;	/* The name, e.g. "[vdso]". */
1011 
1012 	/*
1013 	 * If .fault is not provided, this points to a
1014 	 * NULL-terminated array of pages that back the special mapping.
1015 	 *
1016 	 * This must not be NULL unless .fault is provided.
1017 	 */
1018 	struct page **pages;
1019 
1020 	/*
1021 	 * If non-NULL, then this is called to resolve page faults
1022 	 * on the special mapping.  If used, .pages is not checked.
1023 	 */
1024 	vm_fault_t (*fault)(const struct vm_special_mapping *sm,
1025 				struct vm_area_struct *vma,
1026 				struct vm_fault *vmf);
1027 
1028 	int (*mremap)(const struct vm_special_mapping *sm,
1029 		     struct vm_area_struct *new_vma);
1030 };
1031 
1032 enum tlb_flush_reason {
1033 	TLB_FLUSH_ON_TASK_SWITCH,
1034 	TLB_REMOTE_SHOOTDOWN,
1035 	TLB_LOCAL_SHOOTDOWN,
1036 	TLB_LOCAL_MM_SHOOTDOWN,
1037 	TLB_REMOTE_SEND_IPI,
1038 	NR_TLB_FLUSH_REASONS,
1039 };
1040 
1041  /*
1042   * A swap entry has to fit into a "unsigned long", as the entry is hidden
1043   * in the "index" field of the swapper address space.
1044   */
1045 typedef struct {
1046 	unsigned long val;
1047 } swp_entry_t;
1048 
1049 /**
1050  * enum fault_flag - Fault flag definitions.
1051  * @FAULT_FLAG_WRITE: Fault was a write fault.
1052  * @FAULT_FLAG_MKWRITE: Fault was mkwrite of existing PTE.
1053  * @FAULT_FLAG_ALLOW_RETRY: Allow to retry the fault if blocked.
1054  * @FAULT_FLAG_RETRY_NOWAIT: Don't drop mmap_lock and wait when retrying.
1055  * @FAULT_FLAG_KILLABLE: The fault task is in SIGKILL killable region.
1056  * @FAULT_FLAG_TRIED: The fault has been tried once.
1057  * @FAULT_FLAG_USER: The fault originated in userspace.
1058  * @FAULT_FLAG_REMOTE: The fault is not for current task/mm.
1059  * @FAULT_FLAG_INSTRUCTION: The fault was during an instruction fetch.
1060  * @FAULT_FLAG_INTERRUPTIBLE: The fault can be interrupted by non-fatal signals.
1061  * @FAULT_FLAG_UNSHARE: The fault is an unsharing request to break COW in a
1062  *                      COW mapping, making sure that an exclusive anon page is
1063  *                      mapped after the fault.
1064  * @FAULT_FLAG_ORIG_PTE_VALID: whether the fault has vmf->orig_pte cached.
1065  *                        We should only access orig_pte if this flag set.
1066  * @FAULT_FLAG_VMA_LOCK: The fault is handled under VMA lock.
1067  *
1068  * About @FAULT_FLAG_ALLOW_RETRY and @FAULT_FLAG_TRIED: we can specify
1069  * whether we would allow page faults to retry by specifying these two
1070  * fault flags correctly.  Currently there can be three legal combinations:
1071  *
1072  * (a) ALLOW_RETRY and !TRIED:  this means the page fault allows retry, and
1073  *                              this is the first try
1074  *
1075  * (b) ALLOW_RETRY and TRIED:   this means the page fault allows retry, and
1076  *                              we've already tried at least once
1077  *
1078  * (c) !ALLOW_RETRY and !TRIED: this means the page fault does not allow retry
1079  *
1080  * The unlisted combination (!ALLOW_RETRY && TRIED) is illegal and should never
1081  * be used.  Note that page faults can be allowed to retry for multiple times,
1082  * in which case we'll have an initial fault with flags (a) then later on
1083  * continuous faults with flags (b).  We should always try to detect pending
1084  * signals before a retry to make sure the continuous page faults can still be
1085  * interrupted if necessary.
1086  *
1087  * The combination FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE is illegal.
1088  * FAULT_FLAG_UNSHARE is ignored and treated like an ordinary read fault when
1089  * applied to mappings that are not COW mappings.
1090  */
1091 enum fault_flag {
1092 	FAULT_FLAG_WRITE =		1 << 0,
1093 	FAULT_FLAG_MKWRITE =		1 << 1,
1094 	FAULT_FLAG_ALLOW_RETRY =	1 << 2,
1095 	FAULT_FLAG_RETRY_NOWAIT = 	1 << 3,
1096 	FAULT_FLAG_KILLABLE =		1 << 4,
1097 	FAULT_FLAG_TRIED = 		1 << 5,
1098 	FAULT_FLAG_USER =		1 << 6,
1099 	FAULT_FLAG_REMOTE =		1 << 7,
1100 	FAULT_FLAG_INSTRUCTION =	1 << 8,
1101 	FAULT_FLAG_INTERRUPTIBLE =	1 << 9,
1102 	FAULT_FLAG_UNSHARE =		1 << 10,
1103 	FAULT_FLAG_ORIG_PTE_VALID =	1 << 11,
1104 	FAULT_FLAG_VMA_LOCK =		1 << 12,
1105 };
1106 
1107 typedef unsigned int __bitwise zap_flags_t;
1108 
1109 /*
1110  * FOLL_PIN and FOLL_LONGTERM may be used in various combinations with each
1111  * other. Here is what they mean, and how to use them:
1112  *
1113  *
1114  * FIXME: For pages which are part of a filesystem, mappings are subject to the
1115  * lifetime enforced by the filesystem and we need guarantees that longterm
1116  * users like RDMA and V4L2 only establish mappings which coordinate usage with
1117  * the filesystem.  Ideas for this coordination include revoking the longterm
1118  * pin, delaying writeback, bounce buffer page writeback, etc.  As FS DAX was
1119  * added after the problem with filesystems was found FS DAX VMAs are
1120  * specifically failed.  Filesystem pages are still subject to bugs and use of
1121  * FOLL_LONGTERM should be avoided on those pages.
1122  *
1123  * In the CMA case: long term pins in a CMA region would unnecessarily fragment
1124  * that region.  And so, CMA attempts to migrate the page before pinning, when
1125  * FOLL_LONGTERM is specified.
1126  *
1127  * FOLL_PIN indicates that a special kind of tracking (not just page->_refcount,
1128  * but an additional pin counting system) will be invoked. This is intended for
1129  * anything that gets a page reference and then touches page data (for example,
1130  * Direct IO). This lets the filesystem know that some non-file-system entity is
1131  * potentially changing the pages' data. In contrast to FOLL_GET (whose pages
1132  * are released via put_page()), FOLL_PIN pages must be released, ultimately, by
1133  * a call to unpin_user_page().
1134  *
1135  * FOLL_PIN is similar to FOLL_GET: both of these pin pages. They use different
1136  * and separate refcounting mechanisms, however, and that means that each has
1137  * its own acquire and release mechanisms:
1138  *
1139  *     FOLL_GET: get_user_pages*() to acquire, and put_page() to release.
1140  *
1141  *     FOLL_PIN: pin_user_pages*() to acquire, and unpin_user_pages to release.
1142  *
1143  * FOLL_PIN and FOLL_GET are mutually exclusive for a given function call.
1144  * (The underlying pages may experience both FOLL_GET-based and FOLL_PIN-based
1145  * calls applied to them, and that's perfectly OK. This is a constraint on the
1146  * callers, not on the pages.)
1147  *
1148  * FOLL_PIN should be set internally by the pin_user_pages*() APIs, never
1149  * directly by the caller. That's in order to help avoid mismatches when
1150  * releasing pages: get_user_pages*() pages must be released via put_page(),
1151  * while pin_user_pages*() pages must be released via unpin_user_page().
1152  *
1153  * Please see Documentation/core-api/pin_user_pages.rst for more information.
1154  */
1155 
1156 enum {
1157 	/* check pte is writable */
1158 	FOLL_WRITE = 1 << 0,
1159 	/* do get_page on page */
1160 	FOLL_GET = 1 << 1,
1161 	/* give error on hole if it would be zero */
1162 	FOLL_DUMP = 1 << 2,
1163 	/* get_user_pages read/write w/o permission */
1164 	FOLL_FORCE = 1 << 3,
1165 	/*
1166 	 * if a disk transfer is needed, start the IO and return without waiting
1167 	 * upon it
1168 	 */
1169 	FOLL_NOWAIT = 1 << 4,
1170 	/* do not fault in pages */
1171 	FOLL_NOFAULT = 1 << 5,
1172 	/* check page is hwpoisoned */
1173 	FOLL_HWPOISON = 1 << 6,
1174 	/* don't do file mappings */
1175 	FOLL_ANON = 1 << 7,
1176 	/*
1177 	 * FOLL_LONGTERM indicates that the page will be held for an indefinite
1178 	 * time period _often_ under userspace control.  This is in contrast to
1179 	 * iov_iter_get_pages(), whose usages are transient.
1180 	 */
1181 	FOLL_LONGTERM = 1 << 8,
1182 	/* split huge pmd before returning */
1183 	FOLL_SPLIT_PMD = 1 << 9,
1184 	/* allow returning PCI P2PDMA pages */
1185 	FOLL_PCI_P2PDMA = 1 << 10,
1186 	/* allow interrupts from generic signals */
1187 	FOLL_INTERRUPTIBLE = 1 << 11,
1188 
1189 	/* See also internal only FOLL flags in mm/internal.h */
1190 };
1191 
1192 #endif /* _LINUX_MM_TYPES_H */
1193