xref: /linux-6.15/include/linux/mm_types.h (revision ef815d2c)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MM_TYPES_H
3 #define _LINUX_MM_TYPES_H
4 
5 #include <linux/mm_types_task.h>
6 
7 #include <linux/auxvec.h>
8 #include <linux/kref.h>
9 #include <linux/list.h>
10 #include <linux/spinlock.h>
11 #include <linux/rbtree.h>
12 #include <linux/maple_tree.h>
13 #include <linux/rwsem.h>
14 #include <linux/completion.h>
15 #include <linux/cpumask.h>
16 #include <linux/uprobes.h>
17 #include <linux/rcupdate.h>
18 #include <linux/page-flags-layout.h>
19 #include <linux/workqueue.h>
20 #include <linux/seqlock.h>
21 #include <linux/percpu_counter.h>
22 
23 #include <asm/mmu.h>
24 
25 #ifndef AT_VECTOR_SIZE_ARCH
26 #define AT_VECTOR_SIZE_ARCH 0
27 #endif
28 #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
29 
30 #define INIT_PASID	0
31 
32 struct address_space;
33 struct mem_cgroup;
34 
35 /*
36  * Each physical page in the system has a struct page associated with
37  * it to keep track of whatever it is we are using the page for at the
38  * moment. Note that we have no way to track which tasks are using
39  * a page, though if it is a pagecache page, rmap structures can tell us
40  * who is mapping it.
41  *
42  * If you allocate the page using alloc_pages(), you can use some of the
43  * space in struct page for your own purposes.  The five words in the main
44  * union are available, except for bit 0 of the first word which must be
45  * kept clear.  Many users use this word to store a pointer to an object
46  * which is guaranteed to be aligned.  If you use the same storage as
47  * page->mapping, you must restore it to NULL before freeing the page.
48  *
49  * If your page will not be mapped to userspace, you can also use the four
50  * bytes in the mapcount union, but you must call page_mapcount_reset()
51  * before freeing it.
52  *
53  * If you want to use the refcount field, it must be used in such a way
54  * that other CPUs temporarily incrementing and then decrementing the
55  * refcount does not cause problems.  On receiving the page from
56  * alloc_pages(), the refcount will be positive.
57  *
58  * If you allocate pages of order > 0, you can use some of the fields
59  * in each subpage, but you may need to restore some of their values
60  * afterwards.
61  *
62  * SLUB uses cmpxchg_double() to atomically update its freelist and counters.
63  * That requires that freelist & counters in struct slab be adjacent and
64  * double-word aligned. Because struct slab currently just reinterprets the
65  * bits of struct page, we align all struct pages to double-word boundaries,
66  * and ensure that 'freelist' is aligned within struct slab.
67  */
68 #ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
69 #define _struct_page_alignment	__aligned(2 * sizeof(unsigned long))
70 #else
71 #define _struct_page_alignment	__aligned(sizeof(unsigned long))
72 #endif
73 
74 struct page {
75 	unsigned long flags;		/* Atomic flags, some possibly
76 					 * updated asynchronously */
77 	/*
78 	 * Five words (20/40 bytes) are available in this union.
79 	 * WARNING: bit 0 of the first word is used for PageTail(). That
80 	 * means the other users of this union MUST NOT use the bit to
81 	 * avoid collision and false-positive PageTail().
82 	 */
83 	union {
84 		struct {	/* Page cache and anonymous pages */
85 			/**
86 			 * @lru: Pageout list, eg. active_list protected by
87 			 * lruvec->lru_lock.  Sometimes used as a generic list
88 			 * by the page owner.
89 			 */
90 			union {
91 				struct list_head lru;
92 
93 				/* Or, for the Unevictable "LRU list" slot */
94 				struct {
95 					/* Always even, to negate PageTail */
96 					void *__filler;
97 					/* Count page's or folio's mlocks */
98 					unsigned int mlock_count;
99 				};
100 
101 				/* Or, free page */
102 				struct list_head buddy_list;
103 				struct list_head pcp_list;
104 			};
105 			/* See page-flags.h for PAGE_MAPPING_FLAGS */
106 			struct address_space *mapping;
107 			union {
108 				pgoff_t index;		/* Our offset within mapping. */
109 				unsigned long share;	/* share count for fsdax */
110 			};
111 			/**
112 			 * @private: Mapping-private opaque data.
113 			 * Usually used for buffer_heads if PagePrivate.
114 			 * Used for swp_entry_t if PageSwapCache.
115 			 * Indicates order in the buddy system if PageBuddy.
116 			 */
117 			unsigned long private;
118 		};
119 		struct {	/* page_pool used by netstack */
120 			/**
121 			 * @pp_magic: magic value to avoid recycling non
122 			 * page_pool allocated pages.
123 			 */
124 			unsigned long pp_magic;
125 			struct page_pool *pp;
126 			unsigned long _pp_mapping_pad;
127 			unsigned long dma_addr;
128 			union {
129 				/**
130 				 * dma_addr_upper: might require a 64-bit
131 				 * value on 32-bit architectures.
132 				 */
133 				unsigned long dma_addr_upper;
134 				/**
135 				 * For frag page support, not supported in
136 				 * 32-bit architectures with 64-bit DMA.
137 				 */
138 				atomic_long_t pp_frag_count;
139 			};
140 		};
141 		struct {	/* Tail pages of compound page */
142 			unsigned long compound_head;	/* Bit zero is set */
143 		};
144 		struct {	/* Page table pages */
145 			unsigned long _pt_pad_1;	/* compound_head */
146 			pgtable_t pmd_huge_pte; /* protected by page->ptl */
147 			unsigned long _pt_pad_2;	/* mapping */
148 			union {
149 				struct mm_struct *pt_mm; /* x86 pgds only */
150 				atomic_t pt_frag_refcount; /* powerpc */
151 			};
152 #if ALLOC_SPLIT_PTLOCKS
153 			spinlock_t *ptl;
154 #else
155 			spinlock_t ptl;
156 #endif
157 		};
158 		struct {	/* ZONE_DEVICE pages */
159 			/** @pgmap: Points to the hosting device page map. */
160 			struct dev_pagemap *pgmap;
161 			void *zone_device_data;
162 			/*
163 			 * ZONE_DEVICE private pages are counted as being
164 			 * mapped so the next 3 words hold the mapping, index,
165 			 * and private fields from the source anonymous or
166 			 * page cache page while the page is migrated to device
167 			 * private memory.
168 			 * ZONE_DEVICE MEMORY_DEVICE_FS_DAX pages also
169 			 * use the mapping, index, and private fields when
170 			 * pmem backed DAX files are mapped.
171 			 */
172 		};
173 
174 		/** @rcu_head: You can use this to free a page by RCU. */
175 		struct rcu_head rcu_head;
176 	};
177 
178 	union {		/* This union is 4 bytes in size. */
179 		/*
180 		 * If the page can be mapped to userspace, encodes the number
181 		 * of times this page is referenced by a page table.
182 		 */
183 		atomic_t _mapcount;
184 
185 		/*
186 		 * If the page is neither PageSlab nor mappable to userspace,
187 		 * the value stored here may help determine what this page
188 		 * is used for.  See page-flags.h for a list of page types
189 		 * which are currently stored here.
190 		 */
191 		unsigned int page_type;
192 	};
193 
194 	/* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */
195 	atomic_t _refcount;
196 
197 #ifdef CONFIG_MEMCG
198 	unsigned long memcg_data;
199 #endif
200 
201 	/*
202 	 * On machines where all RAM is mapped into kernel address space,
203 	 * we can simply calculate the virtual address. On machines with
204 	 * highmem some memory is mapped into kernel virtual memory
205 	 * dynamically, so we need a place to store that address.
206 	 * Note that this field could be 16 bits on x86 ... ;)
207 	 *
208 	 * Architectures with slow multiplication can define
209 	 * WANT_PAGE_VIRTUAL in asm/page.h
210 	 */
211 #if defined(WANT_PAGE_VIRTUAL)
212 	void *virtual;			/* Kernel virtual address (NULL if
213 					   not kmapped, ie. highmem) */
214 #endif /* WANT_PAGE_VIRTUAL */
215 
216 #ifdef CONFIG_KMSAN
217 	/*
218 	 * KMSAN metadata for this page:
219 	 *  - shadow page: every bit indicates whether the corresponding
220 	 *    bit of the original page is initialized (0) or not (1);
221 	 *  - origin page: every 4 bytes contain an id of the stack trace
222 	 *    where the uninitialized value was created.
223 	 */
224 	struct page *kmsan_shadow;
225 	struct page *kmsan_origin;
226 #endif
227 
228 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
229 	int _last_cpupid;
230 #endif
231 } _struct_page_alignment;
232 
233 /*
234  * struct encoded_page - a nonexistent type marking this pointer
235  *
236  * An 'encoded_page' pointer is a pointer to a regular 'struct page', but
237  * with the low bits of the pointer indicating extra context-dependent
238  * information. Not super-common, but happens in mmu_gather and mlock
239  * handling, and this acts as a type system check on that use.
240  *
241  * We only really have two guaranteed bits in general, although you could
242  * play with 'struct page' alignment (see CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
243  * for more.
244  *
245  * Use the supplied helper functions to endcode/decode the pointer and bits.
246  */
247 struct encoded_page;
248 #define ENCODE_PAGE_BITS 3ul
249 static __always_inline struct encoded_page *encode_page(struct page *page, unsigned long flags)
250 {
251 	BUILD_BUG_ON(flags > ENCODE_PAGE_BITS);
252 	return (struct encoded_page *)(flags | (unsigned long)page);
253 }
254 
255 static inline unsigned long encoded_page_flags(struct encoded_page *page)
256 {
257 	return ENCODE_PAGE_BITS & (unsigned long)page;
258 }
259 
260 static inline struct page *encoded_page_ptr(struct encoded_page *page)
261 {
262 	return (struct page *)(~ENCODE_PAGE_BITS & (unsigned long)page);
263 }
264 
265 /**
266  * struct folio - Represents a contiguous set of bytes.
267  * @flags: Identical to the page flags.
268  * @lru: Least Recently Used list; tracks how recently this folio was used.
269  * @mlock_count: Number of times this folio has been pinned by mlock().
270  * @mapping: The file this page belongs to, or refers to the anon_vma for
271  *    anonymous memory.
272  * @index: Offset within the file, in units of pages.  For anonymous memory,
273  *    this is the index from the beginning of the mmap.
274  * @private: Filesystem per-folio data (see folio_attach_private()).
275  *    Used for swp_entry_t if folio_test_swapcache().
276  * @_mapcount: Do not access this member directly.  Use folio_mapcount() to
277  *    find out how many times this folio is mapped by userspace.
278  * @_refcount: Do not access this member directly.  Use folio_ref_count()
279  *    to find how many references there are to this folio.
280  * @memcg_data: Memory Control Group data.
281  * @_folio_dtor: Which destructor to use for this folio.
282  * @_folio_order: Do not use directly, call folio_order().
283  * @_entire_mapcount: Do not use directly, call folio_entire_mapcount().
284  * @_nr_pages_mapped: Do not use directly, call folio_mapcount().
285  * @_pincount: Do not use directly, call folio_maybe_dma_pinned().
286  * @_folio_nr_pages: Do not use directly, call folio_nr_pages().
287  * @_hugetlb_subpool: Do not use directly, use accessor in hugetlb.h.
288  * @_hugetlb_cgroup: Do not use directly, use accessor in hugetlb_cgroup.h.
289  * @_hugetlb_cgroup_rsvd: Do not use directly, use accessor in hugetlb_cgroup.h.
290  * @_hugetlb_hwpoison: Do not use directly, call raw_hwp_list_head().
291  * @_deferred_list: Folios to be split under memory pressure.
292  *
293  * A folio is a physically, virtually and logically contiguous set
294  * of bytes.  It is a power-of-two in size, and it is aligned to that
295  * same power-of-two.  It is at least as large as %PAGE_SIZE.  If it is
296  * in the page cache, it is at a file offset which is a multiple of that
297  * power-of-two.  It may be mapped into userspace at an address which is
298  * at an arbitrary page offset, but its kernel virtual address is aligned
299  * to its size.
300  */
301 struct folio {
302 	/* private: don't document the anon union */
303 	union {
304 		struct {
305 	/* public: */
306 			unsigned long flags;
307 			union {
308 				struct list_head lru;
309 	/* private: avoid cluttering the output */
310 				struct {
311 					void *__filler;
312 	/* public: */
313 					unsigned int mlock_count;
314 	/* private: */
315 				};
316 	/* public: */
317 			};
318 			struct address_space *mapping;
319 			pgoff_t index;
320 			void *private;
321 			atomic_t _mapcount;
322 			atomic_t _refcount;
323 #ifdef CONFIG_MEMCG
324 			unsigned long memcg_data;
325 #endif
326 	/* private: the union with struct page is transitional */
327 		};
328 		struct page page;
329 	};
330 	union {
331 		struct {
332 			unsigned long _flags_1;
333 			unsigned long _head_1;
334 	/* public: */
335 			unsigned char _folio_dtor;
336 			unsigned char _folio_order;
337 			atomic_t _entire_mapcount;
338 			atomic_t _nr_pages_mapped;
339 			atomic_t _pincount;
340 #ifdef CONFIG_64BIT
341 			unsigned int _folio_nr_pages;
342 #endif
343 	/* private: the union with struct page is transitional */
344 		};
345 		struct page __page_1;
346 	};
347 	union {
348 		struct {
349 			unsigned long _flags_2;
350 			unsigned long _head_2;
351 	/* public: */
352 			void *_hugetlb_subpool;
353 			void *_hugetlb_cgroup;
354 			void *_hugetlb_cgroup_rsvd;
355 			void *_hugetlb_hwpoison;
356 	/* private: the union with struct page is transitional */
357 		};
358 		struct {
359 			unsigned long _flags_2a;
360 			unsigned long _head_2a;
361 	/* public: */
362 			struct list_head _deferred_list;
363 	/* private: the union with struct page is transitional */
364 		};
365 		struct page __page_2;
366 	};
367 };
368 
369 #define FOLIO_MATCH(pg, fl)						\
370 	static_assert(offsetof(struct page, pg) == offsetof(struct folio, fl))
371 FOLIO_MATCH(flags, flags);
372 FOLIO_MATCH(lru, lru);
373 FOLIO_MATCH(mapping, mapping);
374 FOLIO_MATCH(compound_head, lru);
375 FOLIO_MATCH(index, index);
376 FOLIO_MATCH(private, private);
377 FOLIO_MATCH(_mapcount, _mapcount);
378 FOLIO_MATCH(_refcount, _refcount);
379 #ifdef CONFIG_MEMCG
380 FOLIO_MATCH(memcg_data, memcg_data);
381 #endif
382 #undef FOLIO_MATCH
383 #define FOLIO_MATCH(pg, fl)						\
384 	static_assert(offsetof(struct folio, fl) ==			\
385 			offsetof(struct page, pg) + sizeof(struct page))
386 FOLIO_MATCH(flags, _flags_1);
387 FOLIO_MATCH(compound_head, _head_1);
388 #undef FOLIO_MATCH
389 #define FOLIO_MATCH(pg, fl)						\
390 	static_assert(offsetof(struct folio, fl) ==			\
391 			offsetof(struct page, pg) + 2 * sizeof(struct page))
392 FOLIO_MATCH(flags, _flags_2);
393 FOLIO_MATCH(compound_head, _head_2);
394 #undef FOLIO_MATCH
395 
396 /*
397  * Used for sizing the vmemmap region on some architectures
398  */
399 #define STRUCT_PAGE_MAX_SHIFT	(order_base_2(sizeof(struct page)))
400 
401 #define PAGE_FRAG_CACHE_MAX_SIZE	__ALIGN_MASK(32768, ~PAGE_MASK)
402 #define PAGE_FRAG_CACHE_MAX_ORDER	get_order(PAGE_FRAG_CACHE_MAX_SIZE)
403 
404 /*
405  * page_private can be used on tail pages.  However, PagePrivate is only
406  * checked by the VM on the head page.  So page_private on the tail pages
407  * should be used for data that's ancillary to the head page (eg attaching
408  * buffer heads to tail pages after attaching buffer heads to the head page)
409  */
410 #define page_private(page)		((page)->private)
411 
412 static inline void set_page_private(struct page *page, unsigned long private)
413 {
414 	page->private = private;
415 }
416 
417 static inline void *folio_get_private(struct folio *folio)
418 {
419 	return folio->private;
420 }
421 
422 struct page_frag_cache {
423 	void * va;
424 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
425 	__u16 offset;
426 	__u16 size;
427 #else
428 	__u32 offset;
429 #endif
430 	/* we maintain a pagecount bias, so that we dont dirty cache line
431 	 * containing page->_refcount every time we allocate a fragment.
432 	 */
433 	unsigned int		pagecnt_bias;
434 	bool pfmemalloc;
435 };
436 
437 typedef unsigned long vm_flags_t;
438 
439 /*
440  * A region containing a mapping of a non-memory backed file under NOMMU
441  * conditions.  These are held in a global tree and are pinned by the VMAs that
442  * map parts of them.
443  */
444 struct vm_region {
445 	struct rb_node	vm_rb;		/* link in global region tree */
446 	vm_flags_t	vm_flags;	/* VMA vm_flags */
447 	unsigned long	vm_start;	/* start address of region */
448 	unsigned long	vm_end;		/* region initialised to here */
449 	unsigned long	vm_top;		/* region allocated to here */
450 	unsigned long	vm_pgoff;	/* the offset in vm_file corresponding to vm_start */
451 	struct file	*vm_file;	/* the backing file or NULL */
452 
453 	int		vm_usage;	/* region usage count (access under nommu_region_sem) */
454 	bool		vm_icache_flushed : 1; /* true if the icache has been flushed for
455 						* this region */
456 };
457 
458 #ifdef CONFIG_USERFAULTFD
459 #define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, })
460 struct vm_userfaultfd_ctx {
461 	struct userfaultfd_ctx *ctx;
462 };
463 #else /* CONFIG_USERFAULTFD */
464 #define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {})
465 struct vm_userfaultfd_ctx {};
466 #endif /* CONFIG_USERFAULTFD */
467 
468 struct anon_vma_name {
469 	struct kref kref;
470 	/* The name needs to be at the end because it is dynamically sized. */
471 	char name[];
472 };
473 
474 struct vma_lock {
475 	struct rw_semaphore lock;
476 };
477 
478 struct vma_numab_state {
479 	unsigned long next_scan;
480 	unsigned long next_pid_reset;
481 	unsigned long access_pids[2];
482 };
483 
484 /*
485  * This struct describes a virtual memory area. There is one of these
486  * per VM-area/task. A VM area is any part of the process virtual memory
487  * space that has a special rule for the page-fault handlers (ie a shared
488  * library, the executable area etc).
489  */
490 struct vm_area_struct {
491 	/* The first cache line has the info for VMA tree walking. */
492 
493 	union {
494 		struct {
495 			/* VMA covers [vm_start; vm_end) addresses within mm */
496 			unsigned long vm_start;
497 			unsigned long vm_end;
498 		};
499 #ifdef CONFIG_PER_VMA_LOCK
500 		struct rcu_head vm_rcu;	/* Used for deferred freeing. */
501 #endif
502 	};
503 
504 	struct mm_struct *vm_mm;	/* The address space we belong to. */
505 	pgprot_t vm_page_prot;          /* Access permissions of this VMA. */
506 
507 	/*
508 	 * Flags, see mm.h.
509 	 * To modify use vm_flags_{init|reset|set|clear|mod} functions.
510 	 */
511 	union {
512 		const vm_flags_t vm_flags;
513 		vm_flags_t __private __vm_flags;
514 	};
515 
516 #ifdef CONFIG_PER_VMA_LOCK
517 	/*
518 	 * Can only be written (using WRITE_ONCE()) while holding both:
519 	 *  - mmap_lock (in write mode)
520 	 *  - vm_lock->lock (in write mode)
521 	 * Can be read reliably while holding one of:
522 	 *  - mmap_lock (in read or write mode)
523 	 *  - vm_lock->lock (in read or write mode)
524 	 * Can be read unreliably (using READ_ONCE()) for pessimistic bailout
525 	 * while holding nothing (except RCU to keep the VMA struct allocated).
526 	 *
527 	 * This sequence counter is explicitly allowed to overflow; sequence
528 	 * counter reuse can only lead to occasional unnecessary use of the
529 	 * slowpath.
530 	 */
531 	int vm_lock_seq;
532 	struct vma_lock *vm_lock;
533 
534 	/* Flag to indicate areas detached from the mm->mm_mt tree */
535 	bool detached;
536 #endif
537 
538 	/*
539 	 * For areas with an address space and backing store,
540 	 * linkage into the address_space->i_mmap interval tree.
541 	 *
542 	 */
543 	struct {
544 		struct rb_node rb;
545 		unsigned long rb_subtree_last;
546 	} shared;
547 
548 	/*
549 	 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
550 	 * list, after a COW of one of the file pages.	A MAP_SHARED vma
551 	 * can only be in the i_mmap tree.  An anonymous MAP_PRIVATE, stack
552 	 * or brk vma (with NULL file) can only be in an anon_vma list.
553 	 */
554 	struct list_head anon_vma_chain; /* Serialized by mmap_lock &
555 					  * page_table_lock */
556 	struct anon_vma *anon_vma;	/* Serialized by page_table_lock */
557 
558 	/* Function pointers to deal with this struct. */
559 	const struct vm_operations_struct *vm_ops;
560 
561 	/* Information about our backing store: */
562 	unsigned long vm_pgoff;		/* Offset (within vm_file) in PAGE_SIZE
563 					   units */
564 	struct file * vm_file;		/* File we map to (can be NULL). */
565 	void * vm_private_data;		/* was vm_pte (shared mem) */
566 
567 #ifdef CONFIG_ANON_VMA_NAME
568 	/*
569 	 * For private and shared anonymous mappings, a pointer to a null
570 	 * terminated string containing the name given to the vma, or NULL if
571 	 * unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
572 	 */
573 	struct anon_vma_name *anon_name;
574 #endif
575 #ifdef CONFIG_SWAP
576 	atomic_long_t swap_readahead_info;
577 #endif
578 #ifndef CONFIG_MMU
579 	struct vm_region *vm_region;	/* NOMMU mapping region */
580 #endif
581 #ifdef CONFIG_NUMA
582 	struct mempolicy *vm_policy;	/* NUMA policy for the VMA */
583 #endif
584 #ifdef CONFIG_NUMA_BALANCING
585 	struct vma_numab_state *numab_state;	/* NUMA Balancing state */
586 #endif
587 	struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
588 } __randomize_layout;
589 
590 #ifdef CONFIG_SCHED_MM_CID
591 struct mm_cid {
592 	u64 time;
593 	int cid;
594 };
595 #endif
596 
597 struct kioctx_table;
598 struct mm_struct {
599 	struct {
600 		/*
601 		 * Fields which are often written to are placed in a separate
602 		 * cache line.
603 		 */
604 		struct {
605 			/**
606 			 * @mm_count: The number of references to &struct
607 			 * mm_struct (@mm_users count as 1).
608 			 *
609 			 * Use mmgrab()/mmdrop() to modify. When this drops to
610 			 * 0, the &struct mm_struct is freed.
611 			 */
612 			atomic_t mm_count;
613 		} ____cacheline_aligned_in_smp;
614 
615 		struct maple_tree mm_mt;
616 #ifdef CONFIG_MMU
617 		unsigned long (*get_unmapped_area) (struct file *filp,
618 				unsigned long addr, unsigned long len,
619 				unsigned long pgoff, unsigned long flags);
620 #endif
621 		unsigned long mmap_base;	/* base of mmap area */
622 		unsigned long mmap_legacy_base;	/* base of mmap area in bottom-up allocations */
623 #ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
624 		/* Base addresses for compatible mmap() */
625 		unsigned long mmap_compat_base;
626 		unsigned long mmap_compat_legacy_base;
627 #endif
628 		unsigned long task_size;	/* size of task vm space */
629 		pgd_t * pgd;
630 
631 #ifdef CONFIG_MEMBARRIER
632 		/**
633 		 * @membarrier_state: Flags controlling membarrier behavior.
634 		 *
635 		 * This field is close to @pgd to hopefully fit in the same
636 		 * cache-line, which needs to be touched by switch_mm().
637 		 */
638 		atomic_t membarrier_state;
639 #endif
640 
641 		/**
642 		 * @mm_users: The number of users including userspace.
643 		 *
644 		 * Use mmget()/mmget_not_zero()/mmput() to modify. When this
645 		 * drops to 0 (i.e. when the task exits and there are no other
646 		 * temporary reference holders), we also release a reference on
647 		 * @mm_count (which may then free the &struct mm_struct if
648 		 * @mm_count also drops to 0).
649 		 */
650 		atomic_t mm_users;
651 
652 #ifdef CONFIG_SCHED_MM_CID
653 		/**
654 		 * @pcpu_cid: Per-cpu current cid.
655 		 *
656 		 * Keep track of the currently allocated mm_cid for each cpu.
657 		 * The per-cpu mm_cid values are serialized by their respective
658 		 * runqueue locks.
659 		 */
660 		struct mm_cid __percpu *pcpu_cid;
661 		/*
662 		 * @mm_cid_next_scan: Next mm_cid scan (in jiffies).
663 		 *
664 		 * When the next mm_cid scan is due (in jiffies).
665 		 */
666 		unsigned long mm_cid_next_scan;
667 #endif
668 #ifdef CONFIG_MMU
669 		atomic_long_t pgtables_bytes;	/* size of all page tables */
670 #endif
671 		int map_count;			/* number of VMAs */
672 
673 		spinlock_t page_table_lock; /* Protects page tables and some
674 					     * counters
675 					     */
676 		/*
677 		 * With some kernel config, the current mmap_lock's offset
678 		 * inside 'mm_struct' is at 0x120, which is very optimal, as
679 		 * its two hot fields 'count' and 'owner' sit in 2 different
680 		 * cachelines,  and when mmap_lock is highly contended, both
681 		 * of the 2 fields will be accessed frequently, current layout
682 		 * will help to reduce cache bouncing.
683 		 *
684 		 * So please be careful with adding new fields before
685 		 * mmap_lock, which can easily push the 2 fields into one
686 		 * cacheline.
687 		 */
688 		struct rw_semaphore mmap_lock;
689 
690 		struct list_head mmlist; /* List of maybe swapped mm's.	These
691 					  * are globally strung together off
692 					  * init_mm.mmlist, and are protected
693 					  * by mmlist_lock
694 					  */
695 #ifdef CONFIG_PER_VMA_LOCK
696 		/*
697 		 * This field has lock-like semantics, meaning it is sometimes
698 		 * accessed with ACQUIRE/RELEASE semantics.
699 		 * Roughly speaking, incrementing the sequence number is
700 		 * equivalent to releasing locks on VMAs; reading the sequence
701 		 * number can be part of taking a read lock on a VMA.
702 		 *
703 		 * Can be modified under write mmap_lock using RELEASE
704 		 * semantics.
705 		 * Can be read with no other protection when holding write
706 		 * mmap_lock.
707 		 * Can be read with ACQUIRE semantics if not holding write
708 		 * mmap_lock.
709 		 */
710 		int mm_lock_seq;
711 #endif
712 
713 
714 		unsigned long hiwater_rss; /* High-watermark of RSS usage */
715 		unsigned long hiwater_vm;  /* High-water virtual memory usage */
716 
717 		unsigned long total_vm;	   /* Total pages mapped */
718 		unsigned long locked_vm;   /* Pages that have PG_mlocked set */
719 		atomic64_t    pinned_vm;   /* Refcount permanently increased */
720 		unsigned long data_vm;	   /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
721 		unsigned long exec_vm;	   /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
722 		unsigned long stack_vm;	   /* VM_STACK */
723 		unsigned long def_flags;
724 
725 		/**
726 		 * @write_protect_seq: Locked when any thread is write
727 		 * protecting pages mapped by this mm to enforce a later COW,
728 		 * for instance during page table copying for fork().
729 		 */
730 		seqcount_t write_protect_seq;
731 
732 		spinlock_t arg_lock; /* protect the below fields */
733 
734 		unsigned long start_code, end_code, start_data, end_data;
735 		unsigned long start_brk, brk, start_stack;
736 		unsigned long arg_start, arg_end, env_start, env_end;
737 
738 		unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
739 
740 		struct percpu_counter rss_stat[NR_MM_COUNTERS];
741 
742 		struct linux_binfmt *binfmt;
743 
744 		/* Architecture-specific MM context */
745 		mm_context_t context;
746 
747 		unsigned long flags; /* Must use atomic bitops to access */
748 
749 #ifdef CONFIG_AIO
750 		spinlock_t			ioctx_lock;
751 		struct kioctx_table __rcu	*ioctx_table;
752 #endif
753 #ifdef CONFIG_MEMCG
754 		/*
755 		 * "owner" points to a task that is regarded as the canonical
756 		 * user/owner of this mm. All of the following must be true in
757 		 * order for it to be changed:
758 		 *
759 		 * current == mm->owner
760 		 * current->mm != mm
761 		 * new_owner->mm == mm
762 		 * new_owner->alloc_lock is held
763 		 */
764 		struct task_struct __rcu *owner;
765 #endif
766 		struct user_namespace *user_ns;
767 
768 		/* store ref to file /proc/<pid>/exe symlink points to */
769 		struct file __rcu *exe_file;
770 #ifdef CONFIG_MMU_NOTIFIER
771 		struct mmu_notifier_subscriptions *notifier_subscriptions;
772 #endif
773 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
774 		pgtable_t pmd_huge_pte; /* protected by page_table_lock */
775 #endif
776 #ifdef CONFIG_NUMA_BALANCING
777 		/*
778 		 * numa_next_scan is the next time that PTEs will be remapped
779 		 * PROT_NONE to trigger NUMA hinting faults; such faults gather
780 		 * statistics and migrate pages to new nodes if necessary.
781 		 */
782 		unsigned long numa_next_scan;
783 
784 		/* Restart point for scanning and remapping PTEs. */
785 		unsigned long numa_scan_offset;
786 
787 		/* numa_scan_seq prevents two threads remapping PTEs. */
788 		int numa_scan_seq;
789 #endif
790 		/*
791 		 * An operation with batched TLB flushing is going on. Anything
792 		 * that can move process memory needs to flush the TLB when
793 		 * moving a PROT_NONE mapped page.
794 		 */
795 		atomic_t tlb_flush_pending;
796 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
797 		/* See flush_tlb_batched_pending() */
798 		atomic_t tlb_flush_batched;
799 #endif
800 		struct uprobes_state uprobes_state;
801 #ifdef CONFIG_PREEMPT_RT
802 		struct rcu_head delayed_drop;
803 #endif
804 #ifdef CONFIG_HUGETLB_PAGE
805 		atomic_long_t hugetlb_usage;
806 #endif
807 		struct work_struct async_put_work;
808 
809 #ifdef CONFIG_IOMMU_SVA
810 		u32 pasid;
811 #endif
812 #ifdef CONFIG_KSM
813 		/*
814 		 * Represent how many pages of this process are involved in KSM
815 		 * merging.
816 		 */
817 		unsigned long ksm_merging_pages;
818 		/*
819 		 * Represent how many pages are checked for ksm merging
820 		 * including merged and not merged.
821 		 */
822 		unsigned long ksm_rmap_items;
823 #endif
824 #ifdef CONFIG_LRU_GEN
825 		struct {
826 			/* this mm_struct is on lru_gen_mm_list */
827 			struct list_head list;
828 			/*
829 			 * Set when switching to this mm_struct, as a hint of
830 			 * whether it has been used since the last time per-node
831 			 * page table walkers cleared the corresponding bits.
832 			 */
833 			unsigned long bitmap;
834 #ifdef CONFIG_MEMCG
835 			/* points to the memcg of "owner" above */
836 			struct mem_cgroup *memcg;
837 #endif
838 		} lru_gen;
839 #endif /* CONFIG_LRU_GEN */
840 	} __randomize_layout;
841 
842 	/*
843 	 * The mm_cpumask needs to be at the end of mm_struct, because it
844 	 * is dynamically sized based on nr_cpu_ids.
845 	 */
846 	unsigned long cpu_bitmap[];
847 };
848 
849 #define MM_MT_FLAGS	(MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN | \
850 			 MT_FLAGS_USE_RCU)
851 extern struct mm_struct init_mm;
852 
853 /* Pointer magic because the dynamic array size confuses some compilers. */
854 static inline void mm_init_cpumask(struct mm_struct *mm)
855 {
856 	unsigned long cpu_bitmap = (unsigned long)mm;
857 
858 	cpu_bitmap += offsetof(struct mm_struct, cpu_bitmap);
859 	cpumask_clear((struct cpumask *)cpu_bitmap);
860 }
861 
862 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
863 static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
864 {
865 	return (struct cpumask *)&mm->cpu_bitmap;
866 }
867 
868 #ifdef CONFIG_LRU_GEN
869 
870 struct lru_gen_mm_list {
871 	/* mm_struct list for page table walkers */
872 	struct list_head fifo;
873 	/* protects the list above */
874 	spinlock_t lock;
875 };
876 
877 void lru_gen_add_mm(struct mm_struct *mm);
878 void lru_gen_del_mm(struct mm_struct *mm);
879 #ifdef CONFIG_MEMCG
880 void lru_gen_migrate_mm(struct mm_struct *mm);
881 #endif
882 
883 static inline void lru_gen_init_mm(struct mm_struct *mm)
884 {
885 	INIT_LIST_HEAD(&mm->lru_gen.list);
886 	mm->lru_gen.bitmap = 0;
887 #ifdef CONFIG_MEMCG
888 	mm->lru_gen.memcg = NULL;
889 #endif
890 }
891 
892 static inline void lru_gen_use_mm(struct mm_struct *mm)
893 {
894 	/*
895 	 * When the bitmap is set, page reclaim knows this mm_struct has been
896 	 * used since the last time it cleared the bitmap. So it might be worth
897 	 * walking the page tables of this mm_struct to clear the accessed bit.
898 	 */
899 	WRITE_ONCE(mm->lru_gen.bitmap, -1);
900 }
901 
902 #else /* !CONFIG_LRU_GEN */
903 
904 static inline void lru_gen_add_mm(struct mm_struct *mm)
905 {
906 }
907 
908 static inline void lru_gen_del_mm(struct mm_struct *mm)
909 {
910 }
911 
912 #ifdef CONFIG_MEMCG
913 static inline void lru_gen_migrate_mm(struct mm_struct *mm)
914 {
915 }
916 #endif
917 
918 static inline void lru_gen_init_mm(struct mm_struct *mm)
919 {
920 }
921 
922 static inline void lru_gen_use_mm(struct mm_struct *mm)
923 {
924 }
925 
926 #endif /* CONFIG_LRU_GEN */
927 
928 struct vma_iterator {
929 	struct ma_state mas;
930 };
931 
932 #define VMA_ITERATOR(name, __mm, __addr)				\
933 	struct vma_iterator name = {					\
934 		.mas = {						\
935 			.tree = &(__mm)->mm_mt,				\
936 			.index = __addr,				\
937 			.node = MAS_START,				\
938 		},							\
939 	}
940 
941 static inline void vma_iter_init(struct vma_iterator *vmi,
942 		struct mm_struct *mm, unsigned long addr)
943 {
944 	mas_init(&vmi->mas, &mm->mm_mt, addr);
945 }
946 
947 #ifdef CONFIG_SCHED_MM_CID
948 
949 enum mm_cid_state {
950 	MM_CID_UNSET = -1U,		/* Unset state has lazy_put flag set. */
951 	MM_CID_LAZY_PUT = (1U << 31),
952 };
953 
954 static inline bool mm_cid_is_unset(int cid)
955 {
956 	return cid == MM_CID_UNSET;
957 }
958 
959 static inline bool mm_cid_is_lazy_put(int cid)
960 {
961 	return !mm_cid_is_unset(cid) && (cid & MM_CID_LAZY_PUT);
962 }
963 
964 static inline bool mm_cid_is_valid(int cid)
965 {
966 	return !(cid & MM_CID_LAZY_PUT);
967 }
968 
969 static inline int mm_cid_set_lazy_put(int cid)
970 {
971 	return cid | MM_CID_LAZY_PUT;
972 }
973 
974 static inline int mm_cid_clear_lazy_put(int cid)
975 {
976 	return cid & ~MM_CID_LAZY_PUT;
977 }
978 
979 /* Accessor for struct mm_struct's cidmask. */
980 static inline cpumask_t *mm_cidmask(struct mm_struct *mm)
981 {
982 	unsigned long cid_bitmap = (unsigned long)mm;
983 
984 	cid_bitmap += offsetof(struct mm_struct, cpu_bitmap);
985 	/* Skip cpu_bitmap */
986 	cid_bitmap += cpumask_size();
987 	return (struct cpumask *)cid_bitmap;
988 }
989 
990 static inline void mm_init_cid(struct mm_struct *mm)
991 {
992 	int i;
993 
994 	for_each_possible_cpu(i) {
995 		struct mm_cid *pcpu_cid = per_cpu_ptr(mm->pcpu_cid, i);
996 
997 		pcpu_cid->cid = MM_CID_UNSET;
998 		pcpu_cid->time = 0;
999 	}
1000 	cpumask_clear(mm_cidmask(mm));
1001 }
1002 
1003 static inline int mm_alloc_cid(struct mm_struct *mm)
1004 {
1005 	mm->pcpu_cid = alloc_percpu(struct mm_cid);
1006 	if (!mm->pcpu_cid)
1007 		return -ENOMEM;
1008 	mm_init_cid(mm);
1009 	return 0;
1010 }
1011 
1012 static inline void mm_destroy_cid(struct mm_struct *mm)
1013 {
1014 	free_percpu(mm->pcpu_cid);
1015 	mm->pcpu_cid = NULL;
1016 }
1017 
1018 static inline unsigned int mm_cid_size(void)
1019 {
1020 	return cpumask_size();
1021 }
1022 #else /* CONFIG_SCHED_MM_CID */
1023 static inline void mm_init_cid(struct mm_struct *mm) { }
1024 static inline int mm_alloc_cid(struct mm_struct *mm) { return 0; }
1025 static inline void mm_destroy_cid(struct mm_struct *mm) { }
1026 static inline unsigned int mm_cid_size(void)
1027 {
1028 	return 0;
1029 }
1030 #endif /* CONFIG_SCHED_MM_CID */
1031 
1032 struct mmu_gather;
1033 extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm);
1034 extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm);
1035 extern void tlb_finish_mmu(struct mmu_gather *tlb);
1036 
1037 struct vm_fault;
1038 
1039 /**
1040  * typedef vm_fault_t - Return type for page fault handlers.
1041  *
1042  * Page fault handlers return a bitmask of %VM_FAULT values.
1043  */
1044 typedef __bitwise unsigned int vm_fault_t;
1045 
1046 /**
1047  * enum vm_fault_reason - Page fault handlers return a bitmask of
1048  * these values to tell the core VM what happened when handling the
1049  * fault. Used to decide whether a process gets delivered SIGBUS or
1050  * just gets major/minor fault counters bumped up.
1051  *
1052  * @VM_FAULT_OOM:		Out Of Memory
1053  * @VM_FAULT_SIGBUS:		Bad access
1054  * @VM_FAULT_MAJOR:		Page read from storage
1055  * @VM_FAULT_HWPOISON:		Hit poisoned small page
1056  * @VM_FAULT_HWPOISON_LARGE:	Hit poisoned large page. Index encoded
1057  *				in upper bits
1058  * @VM_FAULT_SIGSEGV:		segmentation fault
1059  * @VM_FAULT_NOPAGE:		->fault installed the pte, not return page
1060  * @VM_FAULT_LOCKED:		->fault locked the returned page
1061  * @VM_FAULT_RETRY:		->fault blocked, must retry
1062  * @VM_FAULT_FALLBACK:		huge page fault failed, fall back to small
1063  * @VM_FAULT_DONE_COW:		->fault has fully handled COW
1064  * @VM_FAULT_NEEDDSYNC:		->fault did not modify page tables and needs
1065  *				fsync() to complete (for synchronous page faults
1066  *				in DAX)
1067  * @VM_FAULT_COMPLETED:		->fault completed, meanwhile mmap lock released
1068  * @VM_FAULT_HINDEX_MASK:	mask HINDEX value
1069  *
1070  */
1071 enum vm_fault_reason {
1072 	VM_FAULT_OOM            = (__force vm_fault_t)0x000001,
1073 	VM_FAULT_SIGBUS         = (__force vm_fault_t)0x000002,
1074 	VM_FAULT_MAJOR          = (__force vm_fault_t)0x000004,
1075 	VM_FAULT_HWPOISON       = (__force vm_fault_t)0x000010,
1076 	VM_FAULT_HWPOISON_LARGE = (__force vm_fault_t)0x000020,
1077 	VM_FAULT_SIGSEGV        = (__force vm_fault_t)0x000040,
1078 	VM_FAULT_NOPAGE         = (__force vm_fault_t)0x000100,
1079 	VM_FAULT_LOCKED         = (__force vm_fault_t)0x000200,
1080 	VM_FAULT_RETRY          = (__force vm_fault_t)0x000400,
1081 	VM_FAULT_FALLBACK       = (__force vm_fault_t)0x000800,
1082 	VM_FAULT_DONE_COW       = (__force vm_fault_t)0x001000,
1083 	VM_FAULT_NEEDDSYNC      = (__force vm_fault_t)0x002000,
1084 	VM_FAULT_COMPLETED      = (__force vm_fault_t)0x004000,
1085 	VM_FAULT_HINDEX_MASK    = (__force vm_fault_t)0x0f0000,
1086 };
1087 
1088 /* Encode hstate index for a hwpoisoned large page */
1089 #define VM_FAULT_SET_HINDEX(x) ((__force vm_fault_t)((x) << 16))
1090 #define VM_FAULT_GET_HINDEX(x) (((__force unsigned int)(x) >> 16) & 0xf)
1091 
1092 #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS |	\
1093 			VM_FAULT_SIGSEGV | VM_FAULT_HWPOISON |	\
1094 			VM_FAULT_HWPOISON_LARGE | VM_FAULT_FALLBACK)
1095 
1096 #define VM_FAULT_RESULT_TRACE \
1097 	{ VM_FAULT_OOM,                 "OOM" },	\
1098 	{ VM_FAULT_SIGBUS,              "SIGBUS" },	\
1099 	{ VM_FAULT_MAJOR,               "MAJOR" },	\
1100 	{ VM_FAULT_HWPOISON,            "HWPOISON" },	\
1101 	{ VM_FAULT_HWPOISON_LARGE,      "HWPOISON_LARGE" },	\
1102 	{ VM_FAULT_SIGSEGV,             "SIGSEGV" },	\
1103 	{ VM_FAULT_NOPAGE,              "NOPAGE" },	\
1104 	{ VM_FAULT_LOCKED,              "LOCKED" },	\
1105 	{ VM_FAULT_RETRY,               "RETRY" },	\
1106 	{ VM_FAULT_FALLBACK,            "FALLBACK" },	\
1107 	{ VM_FAULT_DONE_COW,            "DONE_COW" },	\
1108 	{ VM_FAULT_NEEDDSYNC,           "NEEDDSYNC" }
1109 
1110 struct vm_special_mapping {
1111 	const char *name;	/* The name, e.g. "[vdso]". */
1112 
1113 	/*
1114 	 * If .fault is not provided, this points to a
1115 	 * NULL-terminated array of pages that back the special mapping.
1116 	 *
1117 	 * This must not be NULL unless .fault is provided.
1118 	 */
1119 	struct page **pages;
1120 
1121 	/*
1122 	 * If non-NULL, then this is called to resolve page faults
1123 	 * on the special mapping.  If used, .pages is not checked.
1124 	 */
1125 	vm_fault_t (*fault)(const struct vm_special_mapping *sm,
1126 				struct vm_area_struct *vma,
1127 				struct vm_fault *vmf);
1128 
1129 	int (*mremap)(const struct vm_special_mapping *sm,
1130 		     struct vm_area_struct *new_vma);
1131 };
1132 
1133 enum tlb_flush_reason {
1134 	TLB_FLUSH_ON_TASK_SWITCH,
1135 	TLB_REMOTE_SHOOTDOWN,
1136 	TLB_LOCAL_SHOOTDOWN,
1137 	TLB_LOCAL_MM_SHOOTDOWN,
1138 	TLB_REMOTE_SEND_IPI,
1139 	NR_TLB_FLUSH_REASONS,
1140 };
1141 
1142  /*
1143   * A swap entry has to fit into a "unsigned long", as the entry is hidden
1144   * in the "index" field of the swapper address space.
1145   */
1146 typedef struct {
1147 	unsigned long val;
1148 } swp_entry_t;
1149 
1150 /**
1151  * enum fault_flag - Fault flag definitions.
1152  * @FAULT_FLAG_WRITE: Fault was a write fault.
1153  * @FAULT_FLAG_MKWRITE: Fault was mkwrite of existing PTE.
1154  * @FAULT_FLAG_ALLOW_RETRY: Allow to retry the fault if blocked.
1155  * @FAULT_FLAG_RETRY_NOWAIT: Don't drop mmap_lock and wait when retrying.
1156  * @FAULT_FLAG_KILLABLE: The fault task is in SIGKILL killable region.
1157  * @FAULT_FLAG_TRIED: The fault has been tried once.
1158  * @FAULT_FLAG_USER: The fault originated in userspace.
1159  * @FAULT_FLAG_REMOTE: The fault is not for current task/mm.
1160  * @FAULT_FLAG_INSTRUCTION: The fault was during an instruction fetch.
1161  * @FAULT_FLAG_INTERRUPTIBLE: The fault can be interrupted by non-fatal signals.
1162  * @FAULT_FLAG_UNSHARE: The fault is an unsharing request to break COW in a
1163  *                      COW mapping, making sure that an exclusive anon page is
1164  *                      mapped after the fault.
1165  * @FAULT_FLAG_ORIG_PTE_VALID: whether the fault has vmf->orig_pte cached.
1166  *                        We should only access orig_pte if this flag set.
1167  * @FAULT_FLAG_VMA_LOCK: The fault is handled under VMA lock.
1168  *
1169  * About @FAULT_FLAG_ALLOW_RETRY and @FAULT_FLAG_TRIED: we can specify
1170  * whether we would allow page faults to retry by specifying these two
1171  * fault flags correctly.  Currently there can be three legal combinations:
1172  *
1173  * (a) ALLOW_RETRY and !TRIED:  this means the page fault allows retry, and
1174  *                              this is the first try
1175  *
1176  * (b) ALLOW_RETRY and TRIED:   this means the page fault allows retry, and
1177  *                              we've already tried at least once
1178  *
1179  * (c) !ALLOW_RETRY and !TRIED: this means the page fault does not allow retry
1180  *
1181  * The unlisted combination (!ALLOW_RETRY && TRIED) is illegal and should never
1182  * be used.  Note that page faults can be allowed to retry for multiple times,
1183  * in which case we'll have an initial fault with flags (a) then later on
1184  * continuous faults with flags (b).  We should always try to detect pending
1185  * signals before a retry to make sure the continuous page faults can still be
1186  * interrupted if necessary.
1187  *
1188  * The combination FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE is illegal.
1189  * FAULT_FLAG_UNSHARE is ignored and treated like an ordinary read fault when
1190  * applied to mappings that are not COW mappings.
1191  */
1192 enum fault_flag {
1193 	FAULT_FLAG_WRITE =		1 << 0,
1194 	FAULT_FLAG_MKWRITE =		1 << 1,
1195 	FAULT_FLAG_ALLOW_RETRY =	1 << 2,
1196 	FAULT_FLAG_RETRY_NOWAIT = 	1 << 3,
1197 	FAULT_FLAG_KILLABLE =		1 << 4,
1198 	FAULT_FLAG_TRIED = 		1 << 5,
1199 	FAULT_FLAG_USER =		1 << 6,
1200 	FAULT_FLAG_REMOTE =		1 << 7,
1201 	FAULT_FLAG_INSTRUCTION =	1 << 8,
1202 	FAULT_FLAG_INTERRUPTIBLE =	1 << 9,
1203 	FAULT_FLAG_UNSHARE =		1 << 10,
1204 	FAULT_FLAG_ORIG_PTE_VALID =	1 << 11,
1205 	FAULT_FLAG_VMA_LOCK =		1 << 12,
1206 };
1207 
1208 typedef unsigned int __bitwise zap_flags_t;
1209 
1210 /*
1211  * FOLL_PIN and FOLL_LONGTERM may be used in various combinations with each
1212  * other. Here is what they mean, and how to use them:
1213  *
1214  *
1215  * FIXME: For pages which are part of a filesystem, mappings are subject to the
1216  * lifetime enforced by the filesystem and we need guarantees that longterm
1217  * users like RDMA and V4L2 only establish mappings which coordinate usage with
1218  * the filesystem.  Ideas for this coordination include revoking the longterm
1219  * pin, delaying writeback, bounce buffer page writeback, etc.  As FS DAX was
1220  * added after the problem with filesystems was found FS DAX VMAs are
1221  * specifically failed.  Filesystem pages are still subject to bugs and use of
1222  * FOLL_LONGTERM should be avoided on those pages.
1223  *
1224  * In the CMA case: long term pins in a CMA region would unnecessarily fragment
1225  * that region.  And so, CMA attempts to migrate the page before pinning, when
1226  * FOLL_LONGTERM is specified.
1227  *
1228  * FOLL_PIN indicates that a special kind of tracking (not just page->_refcount,
1229  * but an additional pin counting system) will be invoked. This is intended for
1230  * anything that gets a page reference and then touches page data (for example,
1231  * Direct IO). This lets the filesystem know that some non-file-system entity is
1232  * potentially changing the pages' data. In contrast to FOLL_GET (whose pages
1233  * are released via put_page()), FOLL_PIN pages must be released, ultimately, by
1234  * a call to unpin_user_page().
1235  *
1236  * FOLL_PIN is similar to FOLL_GET: both of these pin pages. They use different
1237  * and separate refcounting mechanisms, however, and that means that each has
1238  * its own acquire and release mechanisms:
1239  *
1240  *     FOLL_GET: get_user_pages*() to acquire, and put_page() to release.
1241  *
1242  *     FOLL_PIN: pin_user_pages*() to acquire, and unpin_user_pages to release.
1243  *
1244  * FOLL_PIN and FOLL_GET are mutually exclusive for a given function call.
1245  * (The underlying pages may experience both FOLL_GET-based and FOLL_PIN-based
1246  * calls applied to them, and that's perfectly OK. This is a constraint on the
1247  * callers, not on the pages.)
1248  *
1249  * FOLL_PIN should be set internally by the pin_user_pages*() APIs, never
1250  * directly by the caller. That's in order to help avoid mismatches when
1251  * releasing pages: get_user_pages*() pages must be released via put_page(),
1252  * while pin_user_pages*() pages must be released via unpin_user_page().
1253  *
1254  * Please see Documentation/core-api/pin_user_pages.rst for more information.
1255  */
1256 
1257 enum {
1258 	/* check pte is writable */
1259 	FOLL_WRITE = 1 << 0,
1260 	/* do get_page on page */
1261 	FOLL_GET = 1 << 1,
1262 	/* give error on hole if it would be zero */
1263 	FOLL_DUMP = 1 << 2,
1264 	/* get_user_pages read/write w/o permission */
1265 	FOLL_FORCE = 1 << 3,
1266 	/*
1267 	 * if a disk transfer is needed, start the IO and return without waiting
1268 	 * upon it
1269 	 */
1270 	FOLL_NOWAIT = 1 << 4,
1271 	/* do not fault in pages */
1272 	FOLL_NOFAULT = 1 << 5,
1273 	/* check page is hwpoisoned */
1274 	FOLL_HWPOISON = 1 << 6,
1275 	/* don't do file mappings */
1276 	FOLL_ANON = 1 << 7,
1277 	/*
1278 	 * FOLL_LONGTERM indicates that the page will be held for an indefinite
1279 	 * time period _often_ under userspace control.  This is in contrast to
1280 	 * iov_iter_get_pages(), whose usages are transient.
1281 	 */
1282 	FOLL_LONGTERM = 1 << 8,
1283 	/* split huge pmd before returning */
1284 	FOLL_SPLIT_PMD = 1 << 9,
1285 	/* allow returning PCI P2PDMA pages */
1286 	FOLL_PCI_P2PDMA = 1 << 10,
1287 	/* allow interrupts from generic signals */
1288 	FOLL_INTERRUPTIBLE = 1 << 11,
1289 
1290 	/* See also internal only FOLL flags in mm/internal.h */
1291 };
1292 
1293 #endif /* _LINUX_MM_TYPES_H */
1294