xref: /linux-6.15/include/linux/mm.h (revision 44c2cd80)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MM_H
3 #define _LINUX_MM_H
4 
5 #include <linux/errno.h>
6 
7 #ifdef __KERNEL__
8 
9 #include <linux/mmdebug.h>
10 #include <linux/gfp.h>
11 #include <linux/bug.h>
12 #include <linux/list.h>
13 #include <linux/mmzone.h>
14 #include <linux/rbtree.h>
15 #include <linux/atomic.h>
16 #include <linux/debug_locks.h>
17 #include <linux/mm_types.h>
18 #include <linux/mmap_lock.h>
19 #include <linux/range.h>
20 #include <linux/pfn.h>
21 #include <linux/percpu-refcount.h>
22 #include <linux/bit_spinlock.h>
23 #include <linux/shrinker.h>
24 #include <linux/resource.h>
25 #include <linux/page_ext.h>
26 #include <linux/err.h>
27 #include <linux/page-flags.h>
28 #include <linux/page_ref.h>
29 #include <linux/memremap.h>
30 #include <linux/overflow.h>
31 #include <linux/sizes.h>
32 #include <linux/sched.h>
33 #include <linux/pgtable.h>
34 #include <linux/kasan.h>
35 
36 struct mempolicy;
37 struct anon_vma;
38 struct anon_vma_chain;
39 struct file_ra_state;
40 struct user_struct;
41 struct writeback_control;
42 struct bdi_writeback;
43 struct pt_regs;
44 
45 extern int sysctl_page_lock_unfairness;
46 
47 void init_mm_internals(void);
48 
49 #ifndef CONFIG_NUMA		/* Don't use mapnrs, do it properly */
50 extern unsigned long max_mapnr;
51 
52 static inline void set_max_mapnr(unsigned long limit)
53 {
54 	max_mapnr = limit;
55 }
56 #else
57 static inline void set_max_mapnr(unsigned long limit) { }
58 #endif
59 
60 extern atomic_long_t _totalram_pages;
61 static inline unsigned long totalram_pages(void)
62 {
63 	return (unsigned long)atomic_long_read(&_totalram_pages);
64 }
65 
66 static inline void totalram_pages_inc(void)
67 {
68 	atomic_long_inc(&_totalram_pages);
69 }
70 
71 static inline void totalram_pages_dec(void)
72 {
73 	atomic_long_dec(&_totalram_pages);
74 }
75 
76 static inline void totalram_pages_add(long count)
77 {
78 	atomic_long_add(count, &_totalram_pages);
79 }
80 
81 extern void * high_memory;
82 extern int page_cluster;
83 
84 #ifdef CONFIG_SYSCTL
85 extern int sysctl_legacy_va_layout;
86 #else
87 #define sysctl_legacy_va_layout 0
88 #endif
89 
90 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
91 extern const int mmap_rnd_bits_min;
92 extern const int mmap_rnd_bits_max;
93 extern int mmap_rnd_bits __read_mostly;
94 #endif
95 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
96 extern const int mmap_rnd_compat_bits_min;
97 extern const int mmap_rnd_compat_bits_max;
98 extern int mmap_rnd_compat_bits __read_mostly;
99 #endif
100 
101 #include <asm/page.h>
102 #include <asm/processor.h>
103 
104 /*
105  * Architectures that support memory tagging (assigning tags to memory regions,
106  * embedding these tags into addresses that point to these memory regions, and
107  * checking that the memory and the pointer tags match on memory accesses)
108  * redefine this macro to strip tags from pointers.
109  * It's defined as noop for architectures that don't support memory tagging.
110  */
111 #ifndef untagged_addr
112 #define untagged_addr(addr) (addr)
113 #endif
114 
115 #ifndef __pa_symbol
116 #define __pa_symbol(x)  __pa(RELOC_HIDE((unsigned long)(x), 0))
117 #endif
118 
119 #ifndef page_to_virt
120 #define page_to_virt(x)	__va(PFN_PHYS(page_to_pfn(x)))
121 #endif
122 
123 #ifndef lm_alias
124 #define lm_alias(x)	__va(__pa_symbol(x))
125 #endif
126 
127 /*
128  * To prevent common memory management code establishing
129  * a zero page mapping on a read fault.
130  * This macro should be defined within <asm/pgtable.h>.
131  * s390 does this to prevent multiplexing of hardware bits
132  * related to the physical page in case of virtualization.
133  */
134 #ifndef mm_forbids_zeropage
135 #define mm_forbids_zeropage(X)	(0)
136 #endif
137 
138 /*
139  * On some architectures it is expensive to call memset() for small sizes.
140  * If an architecture decides to implement their own version of
141  * mm_zero_struct_page they should wrap the defines below in a #ifndef and
142  * define their own version of this macro in <asm/pgtable.h>
143  */
144 #if BITS_PER_LONG == 64
145 /* This function must be updated when the size of struct page grows above 80
146  * or reduces below 56. The idea that compiler optimizes out switch()
147  * statement, and only leaves move/store instructions. Also the compiler can
148  * combine write statements if they are both assignments and can be reordered,
149  * this can result in several of the writes here being dropped.
150  */
151 #define	mm_zero_struct_page(pp) __mm_zero_struct_page(pp)
152 static inline void __mm_zero_struct_page(struct page *page)
153 {
154 	unsigned long *_pp = (void *)page;
155 
156 	 /* Check that struct page is either 56, 64, 72, or 80 bytes */
157 	BUILD_BUG_ON(sizeof(struct page) & 7);
158 	BUILD_BUG_ON(sizeof(struct page) < 56);
159 	BUILD_BUG_ON(sizeof(struct page) > 80);
160 
161 	switch (sizeof(struct page)) {
162 	case 80:
163 		_pp[9] = 0;
164 		fallthrough;
165 	case 72:
166 		_pp[8] = 0;
167 		fallthrough;
168 	case 64:
169 		_pp[7] = 0;
170 		fallthrough;
171 	case 56:
172 		_pp[6] = 0;
173 		_pp[5] = 0;
174 		_pp[4] = 0;
175 		_pp[3] = 0;
176 		_pp[2] = 0;
177 		_pp[1] = 0;
178 		_pp[0] = 0;
179 	}
180 }
181 #else
182 #define mm_zero_struct_page(pp)  ((void)memset((pp), 0, sizeof(struct page)))
183 #endif
184 
185 /*
186  * Default maximum number of active map areas, this limits the number of vmas
187  * per mm struct. Users can overwrite this number by sysctl but there is a
188  * problem.
189  *
190  * When a program's coredump is generated as ELF format, a section is created
191  * per a vma. In ELF, the number of sections is represented in unsigned short.
192  * This means the number of sections should be smaller than 65535 at coredump.
193  * Because the kernel adds some informative sections to a image of program at
194  * generating coredump, we need some margin. The number of extra sections is
195  * 1-3 now and depends on arch. We use "5" as safe margin, here.
196  *
197  * ELF extended numbering allows more than 65535 sections, so 16-bit bound is
198  * not a hard limit any more. Although some userspace tools can be surprised by
199  * that.
200  */
201 #define MAPCOUNT_ELF_CORE_MARGIN	(5)
202 #define DEFAULT_MAX_MAP_COUNT	(USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
203 
204 extern int sysctl_max_map_count;
205 
206 extern unsigned long sysctl_user_reserve_kbytes;
207 extern unsigned long sysctl_admin_reserve_kbytes;
208 
209 extern int sysctl_overcommit_memory;
210 extern int sysctl_overcommit_ratio;
211 extern unsigned long sysctl_overcommit_kbytes;
212 
213 int overcommit_ratio_handler(struct ctl_table *, int, void *, size_t *,
214 		loff_t *);
215 int overcommit_kbytes_handler(struct ctl_table *, int, void *, size_t *,
216 		loff_t *);
217 int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *,
218 		loff_t *);
219 /*
220  * Any attempt to mark this function as static leads to build failure
221  * when CONFIG_DEBUG_INFO_BTF is enabled because __add_to_page_cache_locked()
222  * is referred to by BPF code. This must be visible for error injection.
223  */
224 int __add_to_page_cache_locked(struct page *page, struct address_space *mapping,
225 		pgoff_t index, gfp_t gfp, void **shadowp);
226 
227 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
228 #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
229 #else
230 #define nth_page(page,n) ((page) + (n))
231 #endif
232 
233 /* to align the pointer to the (next) page boundary */
234 #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
235 
236 /* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */
237 #define PAGE_ALIGNED(addr)	IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
238 
239 #define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
240 
241 /*
242  * Linux kernel virtual memory manager primitives.
243  * The idea being to have a "virtual" mm in the same way
244  * we have a virtual fs - giving a cleaner interface to the
245  * mm details, and allowing different kinds of memory mappings
246  * (from shared memory to executable loading to arbitrary
247  * mmap() functions).
248  */
249 
250 struct vm_area_struct *vm_area_alloc(struct mm_struct *);
251 struct vm_area_struct *vm_area_dup(struct vm_area_struct *);
252 void vm_area_free(struct vm_area_struct *);
253 
254 #ifndef CONFIG_MMU
255 extern struct rb_root nommu_region_tree;
256 extern struct rw_semaphore nommu_region_sem;
257 
258 extern unsigned int kobjsize(const void *objp);
259 #endif
260 
261 /*
262  * vm_flags in vm_area_struct, see mm_types.h.
263  * When changing, update also include/trace/events/mmflags.h
264  */
265 #define VM_NONE		0x00000000
266 
267 #define VM_READ		0x00000001	/* currently active flags */
268 #define VM_WRITE	0x00000002
269 #define VM_EXEC		0x00000004
270 #define VM_SHARED	0x00000008
271 
272 /* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
273 #define VM_MAYREAD	0x00000010	/* limits for mprotect() etc */
274 #define VM_MAYWRITE	0x00000020
275 #define VM_MAYEXEC	0x00000040
276 #define VM_MAYSHARE	0x00000080
277 
278 #define VM_GROWSDOWN	0x00000100	/* general info on the segment */
279 #define VM_UFFD_MISSING	0x00000200	/* missing pages tracking */
280 #define VM_PFNMAP	0x00000400	/* Page-ranges managed without "struct page", just pure PFN */
281 #define VM_DENYWRITE	0x00000800	/* ETXTBSY on write attempts.. */
282 #define VM_UFFD_WP	0x00001000	/* wrprotect pages tracking */
283 
284 #define VM_LOCKED	0x00002000
285 #define VM_IO           0x00004000	/* Memory mapped I/O or similar */
286 
287 					/* Used by sys_madvise() */
288 #define VM_SEQ_READ	0x00008000	/* App will access data sequentially */
289 #define VM_RAND_READ	0x00010000	/* App will not benefit from clustered reads */
290 
291 #define VM_DONTCOPY	0x00020000      /* Do not copy this vma on fork */
292 #define VM_DONTEXPAND	0x00040000	/* Cannot expand with mremap() */
293 #define VM_LOCKONFAULT	0x00080000	/* Lock the pages covered when they are faulted in */
294 #define VM_ACCOUNT	0x00100000	/* Is a VM accounted object */
295 #define VM_NORESERVE	0x00200000	/* should the VM suppress accounting */
296 #define VM_HUGETLB	0x00400000	/* Huge TLB Page VM */
297 #define VM_SYNC		0x00800000	/* Synchronous page faults */
298 #define VM_ARCH_1	0x01000000	/* Architecture-specific flag */
299 #define VM_WIPEONFORK	0x02000000	/* Wipe VMA contents in child. */
300 #define VM_DONTDUMP	0x04000000	/* Do not include in the core dump */
301 
302 #ifdef CONFIG_MEM_SOFT_DIRTY
303 # define VM_SOFTDIRTY	0x08000000	/* Not soft dirty clean area */
304 #else
305 # define VM_SOFTDIRTY	0
306 #endif
307 
308 #define VM_MIXEDMAP	0x10000000	/* Can contain "struct page" and pure PFN pages */
309 #define VM_HUGEPAGE	0x20000000	/* MADV_HUGEPAGE marked this vma */
310 #define VM_NOHUGEPAGE	0x40000000	/* MADV_NOHUGEPAGE marked this vma */
311 #define VM_MERGEABLE	0x80000000	/* KSM may merge identical pages */
312 
313 #ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS
314 #define VM_HIGH_ARCH_BIT_0	32	/* bit only usable on 64-bit architectures */
315 #define VM_HIGH_ARCH_BIT_1	33	/* bit only usable on 64-bit architectures */
316 #define VM_HIGH_ARCH_BIT_2	34	/* bit only usable on 64-bit architectures */
317 #define VM_HIGH_ARCH_BIT_3	35	/* bit only usable on 64-bit architectures */
318 #define VM_HIGH_ARCH_BIT_4	36	/* bit only usable on 64-bit architectures */
319 #define VM_HIGH_ARCH_0	BIT(VM_HIGH_ARCH_BIT_0)
320 #define VM_HIGH_ARCH_1	BIT(VM_HIGH_ARCH_BIT_1)
321 #define VM_HIGH_ARCH_2	BIT(VM_HIGH_ARCH_BIT_2)
322 #define VM_HIGH_ARCH_3	BIT(VM_HIGH_ARCH_BIT_3)
323 #define VM_HIGH_ARCH_4	BIT(VM_HIGH_ARCH_BIT_4)
324 #endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */
325 
326 #ifdef CONFIG_ARCH_HAS_PKEYS
327 # define VM_PKEY_SHIFT	VM_HIGH_ARCH_BIT_0
328 # define VM_PKEY_BIT0	VM_HIGH_ARCH_0	/* A protection key is a 4-bit value */
329 # define VM_PKEY_BIT1	VM_HIGH_ARCH_1	/* on x86 and 5-bit value on ppc64   */
330 # define VM_PKEY_BIT2	VM_HIGH_ARCH_2
331 # define VM_PKEY_BIT3	VM_HIGH_ARCH_3
332 #ifdef CONFIG_PPC
333 # define VM_PKEY_BIT4  VM_HIGH_ARCH_4
334 #else
335 # define VM_PKEY_BIT4  0
336 #endif
337 #endif /* CONFIG_ARCH_HAS_PKEYS */
338 
339 #if defined(CONFIG_X86)
340 # define VM_PAT		VM_ARCH_1	/* PAT reserves whole VMA at once (x86) */
341 #elif defined(CONFIG_PPC)
342 # define VM_SAO		VM_ARCH_1	/* Strong Access Ordering (powerpc) */
343 #elif defined(CONFIG_PARISC)
344 # define VM_GROWSUP	VM_ARCH_1
345 #elif defined(CONFIG_IA64)
346 # define VM_GROWSUP	VM_ARCH_1
347 #elif defined(CONFIG_SPARC64)
348 # define VM_SPARC_ADI	VM_ARCH_1	/* Uses ADI tag for access control */
349 # define VM_ARCH_CLEAR	VM_SPARC_ADI
350 #elif defined(CONFIG_ARM64)
351 # define VM_ARM64_BTI	VM_ARCH_1	/* BTI guarded page, a.k.a. GP bit */
352 # define VM_ARCH_CLEAR	VM_ARM64_BTI
353 #elif !defined(CONFIG_MMU)
354 # define VM_MAPPED_COPY	VM_ARCH_1	/* T if mapped copy of data (nommu mmap) */
355 #endif
356 
357 #if defined(CONFIG_ARM64_MTE)
358 # define VM_MTE		VM_HIGH_ARCH_0	/* Use Tagged memory for access control */
359 # define VM_MTE_ALLOWED	VM_HIGH_ARCH_1	/* Tagged memory permitted */
360 #else
361 # define VM_MTE		VM_NONE
362 # define VM_MTE_ALLOWED	VM_NONE
363 #endif
364 
365 #ifndef VM_GROWSUP
366 # define VM_GROWSUP	VM_NONE
367 #endif
368 
369 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
370 # define VM_UFFD_MINOR_BIT	37
371 # define VM_UFFD_MINOR		BIT(VM_UFFD_MINOR_BIT)	/* UFFD minor faults */
372 #else /* !CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
373 # define VM_UFFD_MINOR		VM_NONE
374 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
375 
376 /* Bits set in the VMA until the stack is in its final location */
377 #define VM_STACK_INCOMPLETE_SETUP	(VM_RAND_READ | VM_SEQ_READ)
378 
379 #define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0)
380 
381 /* Common data flag combinations */
382 #define VM_DATA_FLAGS_TSK_EXEC	(VM_READ | VM_WRITE | TASK_EXEC | \
383 				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
384 #define VM_DATA_FLAGS_NON_EXEC	(VM_READ | VM_WRITE | VM_MAYREAD | \
385 				 VM_MAYWRITE | VM_MAYEXEC)
386 #define VM_DATA_FLAGS_EXEC	(VM_READ | VM_WRITE | VM_EXEC | \
387 				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
388 
389 #ifndef VM_DATA_DEFAULT_FLAGS		/* arch can override this */
390 #define VM_DATA_DEFAULT_FLAGS  VM_DATA_FLAGS_EXEC
391 #endif
392 
393 #ifndef VM_STACK_DEFAULT_FLAGS		/* arch can override this */
394 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
395 #endif
396 
397 #ifdef CONFIG_STACK_GROWSUP
398 #define VM_STACK	VM_GROWSUP
399 #else
400 #define VM_STACK	VM_GROWSDOWN
401 #endif
402 
403 #define VM_STACK_FLAGS	(VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
404 
405 /* VMA basic access permission flags */
406 #define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC)
407 
408 
409 /*
410  * Special vmas that are non-mergable, non-mlock()able.
411  */
412 #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
413 
414 /* This mask prevents VMA from being scanned with khugepaged */
415 #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
416 
417 /* This mask defines which mm->def_flags a process can inherit its parent */
418 #define VM_INIT_DEF_MASK	VM_NOHUGEPAGE
419 
420 /* This mask is used to clear all the VMA flags used by mlock */
421 #define VM_LOCKED_CLEAR_MASK	(~(VM_LOCKED | VM_LOCKONFAULT))
422 
423 /* Arch-specific flags to clear when updating VM flags on protection change */
424 #ifndef VM_ARCH_CLEAR
425 # define VM_ARCH_CLEAR	VM_NONE
426 #endif
427 #define VM_FLAGS_CLEAR	(ARCH_VM_PKEY_FLAGS | VM_ARCH_CLEAR)
428 
429 /*
430  * mapping from the currently active vm_flags protection bits (the
431  * low four bits) to a page protection mask..
432  */
433 extern pgprot_t protection_map[16];
434 
435 /**
436  * enum fault_flag - Fault flag definitions.
437  * @FAULT_FLAG_WRITE: Fault was a write fault.
438  * @FAULT_FLAG_MKWRITE: Fault was mkwrite of existing PTE.
439  * @FAULT_FLAG_ALLOW_RETRY: Allow to retry the fault if blocked.
440  * @FAULT_FLAG_RETRY_NOWAIT: Don't drop mmap_lock and wait when retrying.
441  * @FAULT_FLAG_KILLABLE: The fault task is in SIGKILL killable region.
442  * @FAULT_FLAG_TRIED: The fault has been tried once.
443  * @FAULT_FLAG_USER: The fault originated in userspace.
444  * @FAULT_FLAG_REMOTE: The fault is not for current task/mm.
445  * @FAULT_FLAG_INSTRUCTION: The fault was during an instruction fetch.
446  * @FAULT_FLAG_INTERRUPTIBLE: The fault can be interrupted by non-fatal signals.
447  *
448  * About @FAULT_FLAG_ALLOW_RETRY and @FAULT_FLAG_TRIED: we can specify
449  * whether we would allow page faults to retry by specifying these two
450  * fault flags correctly.  Currently there can be three legal combinations:
451  *
452  * (a) ALLOW_RETRY and !TRIED:  this means the page fault allows retry, and
453  *                              this is the first try
454  *
455  * (b) ALLOW_RETRY and TRIED:   this means the page fault allows retry, and
456  *                              we've already tried at least once
457  *
458  * (c) !ALLOW_RETRY and !TRIED: this means the page fault does not allow retry
459  *
460  * The unlisted combination (!ALLOW_RETRY && TRIED) is illegal and should never
461  * be used.  Note that page faults can be allowed to retry for multiple times,
462  * in which case we'll have an initial fault with flags (a) then later on
463  * continuous faults with flags (b).  We should always try to detect pending
464  * signals before a retry to make sure the continuous page faults can still be
465  * interrupted if necessary.
466  */
467 enum fault_flag {
468 	FAULT_FLAG_WRITE =		1 << 0,
469 	FAULT_FLAG_MKWRITE =		1 << 1,
470 	FAULT_FLAG_ALLOW_RETRY =	1 << 2,
471 	FAULT_FLAG_RETRY_NOWAIT = 	1 << 3,
472 	FAULT_FLAG_KILLABLE =		1 << 4,
473 	FAULT_FLAG_TRIED = 		1 << 5,
474 	FAULT_FLAG_USER =		1 << 6,
475 	FAULT_FLAG_REMOTE =		1 << 7,
476 	FAULT_FLAG_INSTRUCTION =	1 << 8,
477 	FAULT_FLAG_INTERRUPTIBLE =	1 << 9,
478 };
479 
480 /*
481  * The default fault flags that should be used by most of the
482  * arch-specific page fault handlers.
483  */
484 #define FAULT_FLAG_DEFAULT  (FAULT_FLAG_ALLOW_RETRY | \
485 			     FAULT_FLAG_KILLABLE | \
486 			     FAULT_FLAG_INTERRUPTIBLE)
487 
488 /**
489  * fault_flag_allow_retry_first - check ALLOW_RETRY the first time
490  * @flags: Fault flags.
491  *
492  * This is mostly used for places where we want to try to avoid taking
493  * the mmap_lock for too long a time when waiting for another condition
494  * to change, in which case we can try to be polite to release the
495  * mmap_lock in the first round to avoid potential starvation of other
496  * processes that would also want the mmap_lock.
497  *
498  * Return: true if the page fault allows retry and this is the first
499  * attempt of the fault handling; false otherwise.
500  */
501 static inline bool fault_flag_allow_retry_first(enum fault_flag flags)
502 {
503 	return (flags & FAULT_FLAG_ALLOW_RETRY) &&
504 	    (!(flags & FAULT_FLAG_TRIED));
505 }
506 
507 #define FAULT_FLAG_TRACE \
508 	{ FAULT_FLAG_WRITE,		"WRITE" }, \
509 	{ FAULT_FLAG_MKWRITE,		"MKWRITE" }, \
510 	{ FAULT_FLAG_ALLOW_RETRY,	"ALLOW_RETRY" }, \
511 	{ FAULT_FLAG_RETRY_NOWAIT,	"RETRY_NOWAIT" }, \
512 	{ FAULT_FLAG_KILLABLE,		"KILLABLE" }, \
513 	{ FAULT_FLAG_TRIED,		"TRIED" }, \
514 	{ FAULT_FLAG_USER,		"USER" }, \
515 	{ FAULT_FLAG_REMOTE,		"REMOTE" }, \
516 	{ FAULT_FLAG_INSTRUCTION,	"INSTRUCTION" }, \
517 	{ FAULT_FLAG_INTERRUPTIBLE,	"INTERRUPTIBLE" }
518 
519 /*
520  * vm_fault is filled by the pagefault handler and passed to the vma's
521  * ->fault function. The vma's ->fault is responsible for returning a bitmask
522  * of VM_FAULT_xxx flags that give details about how the fault was handled.
523  *
524  * MM layer fills up gfp_mask for page allocations but fault handler might
525  * alter it if its implementation requires a different allocation context.
526  *
527  * pgoff should be used in favour of virtual_address, if possible.
528  */
529 struct vm_fault {
530 	const struct {
531 		struct vm_area_struct *vma;	/* Target VMA */
532 		gfp_t gfp_mask;			/* gfp mask to be used for allocations */
533 		pgoff_t pgoff;			/* Logical page offset based on vma */
534 		unsigned long address;		/* Faulting virtual address */
535 	};
536 	enum fault_flag flags;		/* FAULT_FLAG_xxx flags
537 					 * XXX: should really be 'const' */
538 	pmd_t *pmd;			/* Pointer to pmd entry matching
539 					 * the 'address' */
540 	pud_t *pud;			/* Pointer to pud entry matching
541 					 * the 'address'
542 					 */
543 	union {
544 		pte_t orig_pte;		/* Value of PTE at the time of fault */
545 		pmd_t orig_pmd;		/* Value of PMD at the time of fault,
546 					 * used by PMD fault only.
547 					 */
548 	};
549 
550 	struct page *cow_page;		/* Page handler may use for COW fault */
551 	struct page *page;		/* ->fault handlers should return a
552 					 * page here, unless VM_FAULT_NOPAGE
553 					 * is set (which is also implied by
554 					 * VM_FAULT_ERROR).
555 					 */
556 	/* These three entries are valid only while holding ptl lock */
557 	pte_t *pte;			/* Pointer to pte entry matching
558 					 * the 'address'. NULL if the page
559 					 * table hasn't been allocated.
560 					 */
561 	spinlock_t *ptl;		/* Page table lock.
562 					 * Protects pte page table if 'pte'
563 					 * is not NULL, otherwise pmd.
564 					 */
565 	pgtable_t prealloc_pte;		/* Pre-allocated pte page table.
566 					 * vm_ops->map_pages() sets up a page
567 					 * table from atomic context.
568 					 * do_fault_around() pre-allocates
569 					 * page table to avoid allocation from
570 					 * atomic context.
571 					 */
572 };
573 
574 /* page entry size for vm->huge_fault() */
575 enum page_entry_size {
576 	PE_SIZE_PTE = 0,
577 	PE_SIZE_PMD,
578 	PE_SIZE_PUD,
579 };
580 
581 /*
582  * These are the virtual MM functions - opening of an area, closing and
583  * unmapping it (needed to keep files on disk up-to-date etc), pointer
584  * to the functions called when a no-page or a wp-page exception occurs.
585  */
586 struct vm_operations_struct {
587 	void (*open)(struct vm_area_struct * area);
588 	void (*close)(struct vm_area_struct * area);
589 	/* Called any time before splitting to check if it's allowed */
590 	int (*may_split)(struct vm_area_struct *area, unsigned long addr);
591 	int (*mremap)(struct vm_area_struct *area);
592 	/*
593 	 * Called by mprotect() to make driver-specific permission
594 	 * checks before mprotect() is finalised.   The VMA must not
595 	 * be modified.  Returns 0 if eprotect() can proceed.
596 	 */
597 	int (*mprotect)(struct vm_area_struct *vma, unsigned long start,
598 			unsigned long end, unsigned long newflags);
599 	vm_fault_t (*fault)(struct vm_fault *vmf);
600 	vm_fault_t (*huge_fault)(struct vm_fault *vmf,
601 			enum page_entry_size pe_size);
602 	vm_fault_t (*map_pages)(struct vm_fault *vmf,
603 			pgoff_t start_pgoff, pgoff_t end_pgoff);
604 	unsigned long (*pagesize)(struct vm_area_struct * area);
605 
606 	/* notification that a previously read-only page is about to become
607 	 * writable, if an error is returned it will cause a SIGBUS */
608 	vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
609 
610 	/* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
611 	vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
612 
613 	/* called by access_process_vm when get_user_pages() fails, typically
614 	 * for use by special VMAs. See also generic_access_phys() for a generic
615 	 * implementation useful for any iomem mapping.
616 	 */
617 	int (*access)(struct vm_area_struct *vma, unsigned long addr,
618 		      void *buf, int len, int write);
619 
620 	/* Called by the /proc/PID/maps code to ask the vma whether it
621 	 * has a special name.  Returning non-NULL will also cause this
622 	 * vma to be dumped unconditionally. */
623 	const char *(*name)(struct vm_area_struct *vma);
624 
625 #ifdef CONFIG_NUMA
626 	/*
627 	 * set_policy() op must add a reference to any non-NULL @new mempolicy
628 	 * to hold the policy upon return.  Caller should pass NULL @new to
629 	 * remove a policy and fall back to surrounding context--i.e. do not
630 	 * install a MPOL_DEFAULT policy, nor the task or system default
631 	 * mempolicy.
632 	 */
633 	int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
634 
635 	/*
636 	 * get_policy() op must add reference [mpol_get()] to any policy at
637 	 * (vma,addr) marked as MPOL_SHARED.  The shared policy infrastructure
638 	 * in mm/mempolicy.c will do this automatically.
639 	 * get_policy() must NOT add a ref if the policy at (vma,addr) is not
640 	 * marked as MPOL_SHARED. vma policies are protected by the mmap_lock.
641 	 * If no [shared/vma] mempolicy exists at the addr, get_policy() op
642 	 * must return NULL--i.e., do not "fallback" to task or system default
643 	 * policy.
644 	 */
645 	struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
646 					unsigned long addr);
647 #endif
648 	/*
649 	 * Called by vm_normal_page() for special PTEs to find the
650 	 * page for @addr.  This is useful if the default behavior
651 	 * (using pte_page()) would not find the correct page.
652 	 */
653 	struct page *(*find_special_page)(struct vm_area_struct *vma,
654 					  unsigned long addr);
655 };
656 
657 static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
658 {
659 	static const struct vm_operations_struct dummy_vm_ops = {};
660 
661 	memset(vma, 0, sizeof(*vma));
662 	vma->vm_mm = mm;
663 	vma->vm_ops = &dummy_vm_ops;
664 	INIT_LIST_HEAD(&vma->anon_vma_chain);
665 }
666 
667 static inline void vma_set_anonymous(struct vm_area_struct *vma)
668 {
669 	vma->vm_ops = NULL;
670 }
671 
672 static inline bool vma_is_anonymous(struct vm_area_struct *vma)
673 {
674 	return !vma->vm_ops;
675 }
676 
677 static inline bool vma_is_temporary_stack(struct vm_area_struct *vma)
678 {
679 	int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
680 
681 	if (!maybe_stack)
682 		return false;
683 
684 	if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
685 						VM_STACK_INCOMPLETE_SETUP)
686 		return true;
687 
688 	return false;
689 }
690 
691 static inline bool vma_is_foreign(struct vm_area_struct *vma)
692 {
693 	if (!current->mm)
694 		return true;
695 
696 	if (current->mm != vma->vm_mm)
697 		return true;
698 
699 	return false;
700 }
701 
702 static inline bool vma_is_accessible(struct vm_area_struct *vma)
703 {
704 	return vma->vm_flags & VM_ACCESS_FLAGS;
705 }
706 
707 #ifdef CONFIG_SHMEM
708 /*
709  * The vma_is_shmem is not inline because it is used only by slow
710  * paths in userfault.
711  */
712 bool vma_is_shmem(struct vm_area_struct *vma);
713 #else
714 static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
715 #endif
716 
717 int vma_is_stack_for_current(struct vm_area_struct *vma);
718 
719 /* flush_tlb_range() takes a vma, not a mm, and can care about flags */
720 #define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) }
721 
722 struct mmu_gather;
723 struct inode;
724 
725 #include <linux/huge_mm.h>
726 
727 /*
728  * Methods to modify the page usage count.
729  *
730  * What counts for a page usage:
731  * - cache mapping   (page->mapping)
732  * - private data    (page->private)
733  * - page mapped in a task's page tables, each mapping
734  *   is counted separately
735  *
736  * Also, many kernel routines increase the page count before a critical
737  * routine so they can be sure the page doesn't go away from under them.
738  */
739 
740 /*
741  * Drop a ref, return true if the refcount fell to zero (the page has no users)
742  */
743 static inline int put_page_testzero(struct page *page)
744 {
745 	VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
746 	return page_ref_dec_and_test(page);
747 }
748 
749 /*
750  * Try to grab a ref unless the page has a refcount of zero, return false if
751  * that is the case.
752  * This can be called when MMU is off so it must not access
753  * any of the virtual mappings.
754  */
755 static inline int get_page_unless_zero(struct page *page)
756 {
757 	return page_ref_add_unless(page, 1, 0);
758 }
759 
760 extern int page_is_ram(unsigned long pfn);
761 
762 enum {
763 	REGION_INTERSECTS,
764 	REGION_DISJOINT,
765 	REGION_MIXED,
766 };
767 
768 int region_intersects(resource_size_t offset, size_t size, unsigned long flags,
769 		      unsigned long desc);
770 
771 /* Support for virtually mapped pages */
772 struct page *vmalloc_to_page(const void *addr);
773 unsigned long vmalloc_to_pfn(const void *addr);
774 
775 /*
776  * Determine if an address is within the vmalloc range
777  *
778  * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
779  * is no special casing required.
780  */
781 
782 #ifndef is_ioremap_addr
783 #define is_ioremap_addr(x) is_vmalloc_addr(x)
784 #endif
785 
786 #ifdef CONFIG_MMU
787 extern bool is_vmalloc_addr(const void *x);
788 extern int is_vmalloc_or_module_addr(const void *x);
789 #else
790 static inline bool is_vmalloc_addr(const void *x)
791 {
792 	return false;
793 }
794 static inline int is_vmalloc_or_module_addr(const void *x)
795 {
796 	return 0;
797 }
798 #endif
799 
800 extern void *kvmalloc_node(size_t size, gfp_t flags, int node);
801 static inline void *kvmalloc(size_t size, gfp_t flags)
802 {
803 	return kvmalloc_node(size, flags, NUMA_NO_NODE);
804 }
805 static inline void *kvzalloc_node(size_t size, gfp_t flags, int node)
806 {
807 	return kvmalloc_node(size, flags | __GFP_ZERO, node);
808 }
809 static inline void *kvzalloc(size_t size, gfp_t flags)
810 {
811 	return kvmalloc(size, flags | __GFP_ZERO);
812 }
813 
814 static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
815 {
816 	size_t bytes;
817 
818 	if (unlikely(check_mul_overflow(n, size, &bytes)))
819 		return NULL;
820 
821 	return kvmalloc(bytes, flags);
822 }
823 
824 static inline void *kvcalloc(size_t n, size_t size, gfp_t flags)
825 {
826 	return kvmalloc_array(n, size, flags | __GFP_ZERO);
827 }
828 
829 extern void kvfree(const void *addr);
830 extern void kvfree_sensitive(const void *addr, size_t len);
831 
832 static inline int head_compound_mapcount(struct page *head)
833 {
834 	return atomic_read(compound_mapcount_ptr(head)) + 1;
835 }
836 
837 /*
838  * Mapcount of compound page as a whole, does not include mapped sub-pages.
839  *
840  * Must be called only for compound pages or any their tail sub-pages.
841  */
842 static inline int compound_mapcount(struct page *page)
843 {
844 	VM_BUG_ON_PAGE(!PageCompound(page), page);
845 	page = compound_head(page);
846 	return head_compound_mapcount(page);
847 }
848 
849 /*
850  * The atomic page->_mapcount, starts from -1: so that transitions
851  * both from it and to it can be tracked, using atomic_inc_and_test
852  * and atomic_add_negative(-1).
853  */
854 static inline void page_mapcount_reset(struct page *page)
855 {
856 	atomic_set(&(page)->_mapcount, -1);
857 }
858 
859 int __page_mapcount(struct page *page);
860 
861 /*
862  * Mapcount of 0-order page; when compound sub-page, includes
863  * compound_mapcount().
864  *
865  * Result is undefined for pages which cannot be mapped into userspace.
866  * For example SLAB or special types of pages. See function page_has_type().
867  * They use this place in struct page differently.
868  */
869 static inline int page_mapcount(struct page *page)
870 {
871 	if (unlikely(PageCompound(page)))
872 		return __page_mapcount(page);
873 	return atomic_read(&page->_mapcount) + 1;
874 }
875 
876 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
877 int total_mapcount(struct page *page);
878 int page_trans_huge_mapcount(struct page *page, int *total_mapcount);
879 #else
880 static inline int total_mapcount(struct page *page)
881 {
882 	return page_mapcount(page);
883 }
884 static inline int page_trans_huge_mapcount(struct page *page,
885 					   int *total_mapcount)
886 {
887 	int mapcount = page_mapcount(page);
888 	if (total_mapcount)
889 		*total_mapcount = mapcount;
890 	return mapcount;
891 }
892 #endif
893 
894 static inline struct page *virt_to_head_page(const void *x)
895 {
896 	struct page *page = virt_to_page(x);
897 
898 	return compound_head(page);
899 }
900 
901 void __put_page(struct page *page);
902 
903 void put_pages_list(struct list_head *pages);
904 
905 void split_page(struct page *page, unsigned int order);
906 
907 /*
908  * Compound pages have a destructor function.  Provide a
909  * prototype for that function and accessor functions.
910  * These are _only_ valid on the head of a compound page.
911  */
912 typedef void compound_page_dtor(struct page *);
913 
914 /* Keep the enum in sync with compound_page_dtors array in mm/page_alloc.c */
915 enum compound_dtor_id {
916 	NULL_COMPOUND_DTOR,
917 	COMPOUND_PAGE_DTOR,
918 #ifdef CONFIG_HUGETLB_PAGE
919 	HUGETLB_PAGE_DTOR,
920 #endif
921 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
922 	TRANSHUGE_PAGE_DTOR,
923 #endif
924 	NR_COMPOUND_DTORS,
925 };
926 extern compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS];
927 
928 static inline void set_compound_page_dtor(struct page *page,
929 		enum compound_dtor_id compound_dtor)
930 {
931 	VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page);
932 	page[1].compound_dtor = compound_dtor;
933 }
934 
935 static inline void destroy_compound_page(struct page *page)
936 {
937 	VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page);
938 	compound_page_dtors[page[1].compound_dtor](page);
939 }
940 
941 static inline unsigned int compound_order(struct page *page)
942 {
943 	if (!PageHead(page))
944 		return 0;
945 	return page[1].compound_order;
946 }
947 
948 static inline bool hpage_pincount_available(struct page *page)
949 {
950 	/*
951 	 * Can the page->hpage_pinned_refcount field be used? That field is in
952 	 * the 3rd page of the compound page, so the smallest (2-page) compound
953 	 * pages cannot support it.
954 	 */
955 	page = compound_head(page);
956 	return PageCompound(page) && compound_order(page) > 1;
957 }
958 
959 static inline int head_compound_pincount(struct page *head)
960 {
961 	return atomic_read(compound_pincount_ptr(head));
962 }
963 
964 static inline int compound_pincount(struct page *page)
965 {
966 	VM_BUG_ON_PAGE(!hpage_pincount_available(page), page);
967 	page = compound_head(page);
968 	return head_compound_pincount(page);
969 }
970 
971 static inline void set_compound_order(struct page *page, unsigned int order)
972 {
973 	page[1].compound_order = order;
974 	page[1].compound_nr = 1U << order;
975 }
976 
977 /* Returns the number of pages in this potentially compound page. */
978 static inline unsigned long compound_nr(struct page *page)
979 {
980 	if (!PageHead(page))
981 		return 1;
982 	return page[1].compound_nr;
983 }
984 
985 /* Returns the number of bytes in this potentially compound page. */
986 static inline unsigned long page_size(struct page *page)
987 {
988 	return PAGE_SIZE << compound_order(page);
989 }
990 
991 /* Returns the number of bits needed for the number of bytes in a page */
992 static inline unsigned int page_shift(struct page *page)
993 {
994 	return PAGE_SHIFT + compound_order(page);
995 }
996 
997 void free_compound_page(struct page *page);
998 
999 #ifdef CONFIG_MMU
1000 /*
1001  * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
1002  * servicing faults for write access.  In the normal case, do always want
1003  * pte_mkwrite.  But get_user_pages can cause write faults for mappings
1004  * that do not have writing enabled, when used by access_process_vm.
1005  */
1006 static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
1007 {
1008 	if (likely(vma->vm_flags & VM_WRITE))
1009 		pte = pte_mkwrite(pte);
1010 	return pte;
1011 }
1012 
1013 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page);
1014 void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr);
1015 
1016 vm_fault_t finish_fault(struct vm_fault *vmf);
1017 vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
1018 #endif
1019 
1020 /*
1021  * Multiple processes may "see" the same page. E.g. for untouched
1022  * mappings of /dev/null, all processes see the same page full of
1023  * zeroes, and text pages of executables and shared libraries have
1024  * only one copy in memory, at most, normally.
1025  *
1026  * For the non-reserved pages, page_count(page) denotes a reference count.
1027  *   page_count() == 0 means the page is free. page->lru is then used for
1028  *   freelist management in the buddy allocator.
1029  *   page_count() > 0  means the page has been allocated.
1030  *
1031  * Pages are allocated by the slab allocator in order to provide memory
1032  * to kmalloc and kmem_cache_alloc. In this case, the management of the
1033  * page, and the fields in 'struct page' are the responsibility of mm/slab.c
1034  * unless a particular usage is carefully commented. (the responsibility of
1035  * freeing the kmalloc memory is the caller's, of course).
1036  *
1037  * A page may be used by anyone else who does a __get_free_page().
1038  * In this case, page_count still tracks the references, and should only
1039  * be used through the normal accessor functions. The top bits of page->flags
1040  * and page->virtual store page management information, but all other fields
1041  * are unused and could be used privately, carefully. The management of this
1042  * page is the responsibility of the one who allocated it, and those who have
1043  * subsequently been given references to it.
1044  *
1045  * The other pages (we may call them "pagecache pages") are completely
1046  * managed by the Linux memory manager: I/O, buffers, swapping etc.
1047  * The following discussion applies only to them.
1048  *
1049  * A pagecache page contains an opaque `private' member, which belongs to the
1050  * page's address_space. Usually, this is the address of a circular list of
1051  * the page's disk buffers. PG_private must be set to tell the VM to call
1052  * into the filesystem to release these pages.
1053  *
1054  * A page may belong to an inode's memory mapping. In this case, page->mapping
1055  * is the pointer to the inode, and page->index is the file offset of the page,
1056  * in units of PAGE_SIZE.
1057  *
1058  * If pagecache pages are not associated with an inode, they are said to be
1059  * anonymous pages. These may become associated with the swapcache, and in that
1060  * case PG_swapcache is set, and page->private is an offset into the swapcache.
1061  *
1062  * In either case (swapcache or inode backed), the pagecache itself holds one
1063  * reference to the page. Setting PG_private should also increment the
1064  * refcount. The each user mapping also has a reference to the page.
1065  *
1066  * The pagecache pages are stored in a per-mapping radix tree, which is
1067  * rooted at mapping->i_pages, and indexed by offset.
1068  * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space
1069  * lists, we instead now tag pages as dirty/writeback in the radix tree.
1070  *
1071  * All pagecache pages may be subject to I/O:
1072  * - inode pages may need to be read from disk,
1073  * - inode pages which have been modified and are MAP_SHARED may need
1074  *   to be written back to the inode on disk,
1075  * - anonymous pages (including MAP_PRIVATE file mappings) which have been
1076  *   modified may need to be swapped out to swap space and (later) to be read
1077  *   back into memory.
1078  */
1079 
1080 /*
1081  * The zone field is never updated after free_area_init_core()
1082  * sets it, so none of the operations on it need to be atomic.
1083  */
1084 
1085 /* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */
1086 #define SECTIONS_PGOFF		((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
1087 #define NODES_PGOFF		(SECTIONS_PGOFF - NODES_WIDTH)
1088 #define ZONES_PGOFF		(NODES_PGOFF - ZONES_WIDTH)
1089 #define LAST_CPUPID_PGOFF	(ZONES_PGOFF - LAST_CPUPID_WIDTH)
1090 #define KASAN_TAG_PGOFF		(LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH)
1091 
1092 /*
1093  * Define the bit shifts to access each section.  For non-existent
1094  * sections we define the shift as 0; that plus a 0 mask ensures
1095  * the compiler will optimise away reference to them.
1096  */
1097 #define SECTIONS_PGSHIFT	(SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
1098 #define NODES_PGSHIFT		(NODES_PGOFF * (NODES_WIDTH != 0))
1099 #define ZONES_PGSHIFT		(ZONES_PGOFF * (ZONES_WIDTH != 0))
1100 #define LAST_CPUPID_PGSHIFT	(LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
1101 #define KASAN_TAG_PGSHIFT	(KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0))
1102 
1103 /* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
1104 #ifdef NODE_NOT_IN_PAGE_FLAGS
1105 #define ZONEID_SHIFT		(SECTIONS_SHIFT + ZONES_SHIFT)
1106 #define ZONEID_PGOFF		((SECTIONS_PGOFF < ZONES_PGOFF)? \
1107 						SECTIONS_PGOFF : ZONES_PGOFF)
1108 #else
1109 #define ZONEID_SHIFT		(NODES_SHIFT + ZONES_SHIFT)
1110 #define ZONEID_PGOFF		((NODES_PGOFF < ZONES_PGOFF)? \
1111 						NODES_PGOFF : ZONES_PGOFF)
1112 #endif
1113 
1114 #define ZONEID_PGSHIFT		(ZONEID_PGOFF * (ZONEID_SHIFT != 0))
1115 
1116 #define ZONES_MASK		((1UL << ZONES_WIDTH) - 1)
1117 #define NODES_MASK		((1UL << NODES_WIDTH) - 1)
1118 #define SECTIONS_MASK		((1UL << SECTIONS_WIDTH) - 1)
1119 #define LAST_CPUPID_MASK	((1UL << LAST_CPUPID_SHIFT) - 1)
1120 #define KASAN_TAG_MASK		((1UL << KASAN_TAG_WIDTH) - 1)
1121 #define ZONEID_MASK		((1UL << ZONEID_SHIFT) - 1)
1122 
1123 static inline enum zone_type page_zonenum(const struct page *page)
1124 {
1125 	ASSERT_EXCLUSIVE_BITS(page->flags, ZONES_MASK << ZONES_PGSHIFT);
1126 	return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
1127 }
1128 
1129 #ifdef CONFIG_ZONE_DEVICE
1130 static inline bool is_zone_device_page(const struct page *page)
1131 {
1132 	return page_zonenum(page) == ZONE_DEVICE;
1133 }
1134 extern void memmap_init_zone_device(struct zone *, unsigned long,
1135 				    unsigned long, struct dev_pagemap *);
1136 #else
1137 static inline bool is_zone_device_page(const struct page *page)
1138 {
1139 	return false;
1140 }
1141 #endif
1142 
1143 static inline bool is_zone_movable_page(const struct page *page)
1144 {
1145 	return page_zonenum(page) == ZONE_MOVABLE;
1146 }
1147 
1148 #ifdef CONFIG_DEV_PAGEMAP_OPS
1149 void free_devmap_managed_page(struct page *page);
1150 DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
1151 
1152 static inline bool page_is_devmap_managed(struct page *page)
1153 {
1154 	if (!static_branch_unlikely(&devmap_managed_key))
1155 		return false;
1156 	if (!is_zone_device_page(page))
1157 		return false;
1158 	switch (page->pgmap->type) {
1159 	case MEMORY_DEVICE_PRIVATE:
1160 	case MEMORY_DEVICE_FS_DAX:
1161 		return true;
1162 	default:
1163 		break;
1164 	}
1165 	return false;
1166 }
1167 
1168 void put_devmap_managed_page(struct page *page);
1169 
1170 #else /* CONFIG_DEV_PAGEMAP_OPS */
1171 static inline bool page_is_devmap_managed(struct page *page)
1172 {
1173 	return false;
1174 }
1175 
1176 static inline void put_devmap_managed_page(struct page *page)
1177 {
1178 }
1179 #endif /* CONFIG_DEV_PAGEMAP_OPS */
1180 
1181 static inline bool is_device_private_page(const struct page *page)
1182 {
1183 	return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) &&
1184 		IS_ENABLED(CONFIG_DEVICE_PRIVATE) &&
1185 		is_zone_device_page(page) &&
1186 		page->pgmap->type == MEMORY_DEVICE_PRIVATE;
1187 }
1188 
1189 static inline bool is_pci_p2pdma_page(const struct page *page)
1190 {
1191 	return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) &&
1192 		IS_ENABLED(CONFIG_PCI_P2PDMA) &&
1193 		is_zone_device_page(page) &&
1194 		page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA;
1195 }
1196 
1197 /* 127: arbitrary random number, small enough to assemble well */
1198 #define page_ref_zero_or_close_to_overflow(page) \
1199 	((unsigned int) page_ref_count(page) + 127u <= 127u)
1200 
1201 static inline void get_page(struct page *page)
1202 {
1203 	page = compound_head(page);
1204 	/*
1205 	 * Getting a normal page or the head of a compound page
1206 	 * requires to already have an elevated page->_refcount.
1207 	 */
1208 	VM_BUG_ON_PAGE(page_ref_zero_or_close_to_overflow(page), page);
1209 	page_ref_inc(page);
1210 }
1211 
1212 bool __must_check try_grab_page(struct page *page, unsigned int flags);
1213 __maybe_unused struct page *try_grab_compound_head(struct page *page, int refs,
1214 						   unsigned int flags);
1215 
1216 
1217 static inline __must_check bool try_get_page(struct page *page)
1218 {
1219 	page = compound_head(page);
1220 	if (WARN_ON_ONCE(page_ref_count(page) <= 0))
1221 		return false;
1222 	page_ref_inc(page);
1223 	return true;
1224 }
1225 
1226 static inline void put_page(struct page *page)
1227 {
1228 	page = compound_head(page);
1229 
1230 	/*
1231 	 * For devmap managed pages we need to catch refcount transition from
1232 	 * 2 to 1, when refcount reach one it means the page is free and we
1233 	 * need to inform the device driver through callback. See
1234 	 * include/linux/memremap.h and HMM for details.
1235 	 */
1236 	if (page_is_devmap_managed(page)) {
1237 		put_devmap_managed_page(page);
1238 		return;
1239 	}
1240 
1241 	if (put_page_testzero(page))
1242 		__put_page(page);
1243 }
1244 
1245 /*
1246  * GUP_PIN_COUNTING_BIAS, and the associated functions that use it, overload
1247  * the page's refcount so that two separate items are tracked: the original page
1248  * reference count, and also a new count of how many pin_user_pages() calls were
1249  * made against the page. ("gup-pinned" is another term for the latter).
1250  *
1251  * With this scheme, pin_user_pages() becomes special: such pages are marked as
1252  * distinct from normal pages. As such, the unpin_user_page() call (and its
1253  * variants) must be used in order to release gup-pinned pages.
1254  *
1255  * Choice of value:
1256  *
1257  * By making GUP_PIN_COUNTING_BIAS a power of two, debugging of page reference
1258  * counts with respect to pin_user_pages() and unpin_user_page() becomes
1259  * simpler, due to the fact that adding an even power of two to the page
1260  * refcount has the effect of using only the upper N bits, for the code that
1261  * counts up using the bias value. This means that the lower bits are left for
1262  * the exclusive use of the original code that increments and decrements by one
1263  * (or at least, by much smaller values than the bias value).
1264  *
1265  * Of course, once the lower bits overflow into the upper bits (and this is
1266  * OK, because subtraction recovers the original values), then visual inspection
1267  * no longer suffices to directly view the separate counts. However, for normal
1268  * applications that don't have huge page reference counts, this won't be an
1269  * issue.
1270  *
1271  * Locking: the lockless algorithm described in page_cache_get_speculative()
1272  * and page_cache_gup_pin_speculative() provides safe operation for
1273  * get_user_pages and page_mkclean and other calls that race to set up page
1274  * table entries.
1275  */
1276 #define GUP_PIN_COUNTING_BIAS (1U << 10)
1277 
1278 void unpin_user_page(struct page *page);
1279 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
1280 				 bool make_dirty);
1281 void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
1282 				      bool make_dirty);
1283 void unpin_user_pages(struct page **pages, unsigned long npages);
1284 
1285 /**
1286  * page_maybe_dma_pinned - Report if a page is pinned for DMA.
1287  * @page: The page.
1288  *
1289  * This function checks if a page has been pinned via a call to
1290  * a function in the pin_user_pages() family.
1291  *
1292  * For non-huge pages, the return value is partially fuzzy: false is not fuzzy,
1293  * because it means "definitely not pinned for DMA", but true means "probably
1294  * pinned for DMA, but possibly a false positive due to having at least
1295  * GUP_PIN_COUNTING_BIAS worth of normal page references".
1296  *
1297  * False positives are OK, because: a) it's unlikely for a page to get that many
1298  * refcounts, and b) all the callers of this routine are expected to be able to
1299  * deal gracefully with a false positive.
1300  *
1301  * For huge pages, the result will be exactly correct. That's because we have
1302  * more tracking data available: the 3rd struct page in the compound page is
1303  * used to track the pincount (instead using of the GUP_PIN_COUNTING_BIAS
1304  * scheme).
1305  *
1306  * For more information, please see Documentation/core-api/pin_user_pages.rst.
1307  *
1308  * Return: True, if it is likely that the page has been "dma-pinned".
1309  * False, if the page is definitely not dma-pinned.
1310  */
1311 static inline bool page_maybe_dma_pinned(struct page *page)
1312 {
1313 	if (hpage_pincount_available(page))
1314 		return compound_pincount(page) > 0;
1315 
1316 	/*
1317 	 * page_ref_count() is signed. If that refcount overflows, then
1318 	 * page_ref_count() returns a negative value, and callers will avoid
1319 	 * further incrementing the refcount.
1320 	 *
1321 	 * Here, for that overflow case, use the signed bit to count a little
1322 	 * bit higher via unsigned math, and thus still get an accurate result.
1323 	 */
1324 	return ((unsigned int)page_ref_count(compound_head(page))) >=
1325 		GUP_PIN_COUNTING_BIAS;
1326 }
1327 
1328 static inline bool is_cow_mapping(vm_flags_t flags)
1329 {
1330 	return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
1331 }
1332 
1333 /*
1334  * This should most likely only be called during fork() to see whether we
1335  * should break the cow immediately for a page on the src mm.
1336  */
1337 static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma,
1338 					  struct page *page)
1339 {
1340 	if (!is_cow_mapping(vma->vm_flags))
1341 		return false;
1342 
1343 	if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags))
1344 		return false;
1345 
1346 	return page_maybe_dma_pinned(page);
1347 }
1348 
1349 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
1350 #define SECTION_IN_PAGE_FLAGS
1351 #endif
1352 
1353 /*
1354  * The identification function is mainly used by the buddy allocator for
1355  * determining if two pages could be buddies. We are not really identifying
1356  * the zone since we could be using the section number id if we do not have
1357  * node id available in page flags.
1358  * We only guarantee that it will return the same value for two combinable
1359  * pages in a zone.
1360  */
1361 static inline int page_zone_id(struct page *page)
1362 {
1363 	return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
1364 }
1365 
1366 #ifdef NODE_NOT_IN_PAGE_FLAGS
1367 extern int page_to_nid(const struct page *page);
1368 #else
1369 static inline int page_to_nid(const struct page *page)
1370 {
1371 	struct page *p = (struct page *)page;
1372 
1373 	return (PF_POISONED_CHECK(p)->flags >> NODES_PGSHIFT) & NODES_MASK;
1374 }
1375 #endif
1376 
1377 #ifdef CONFIG_NUMA_BALANCING
1378 static inline int cpu_pid_to_cpupid(int cpu, int pid)
1379 {
1380 	return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
1381 }
1382 
1383 static inline int cpupid_to_pid(int cpupid)
1384 {
1385 	return cpupid & LAST__PID_MASK;
1386 }
1387 
1388 static inline int cpupid_to_cpu(int cpupid)
1389 {
1390 	return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
1391 }
1392 
1393 static inline int cpupid_to_nid(int cpupid)
1394 {
1395 	return cpu_to_node(cpupid_to_cpu(cpupid));
1396 }
1397 
1398 static inline bool cpupid_pid_unset(int cpupid)
1399 {
1400 	return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
1401 }
1402 
1403 static inline bool cpupid_cpu_unset(int cpupid)
1404 {
1405 	return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
1406 }
1407 
1408 static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
1409 {
1410 	return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
1411 }
1412 
1413 #define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
1414 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
1415 static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
1416 {
1417 	return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK);
1418 }
1419 
1420 static inline int page_cpupid_last(struct page *page)
1421 {
1422 	return page->_last_cpupid;
1423 }
1424 static inline void page_cpupid_reset_last(struct page *page)
1425 {
1426 	page->_last_cpupid = -1 & LAST_CPUPID_MASK;
1427 }
1428 #else
1429 static inline int page_cpupid_last(struct page *page)
1430 {
1431 	return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
1432 }
1433 
1434 extern int page_cpupid_xchg_last(struct page *page, int cpupid);
1435 
1436 static inline void page_cpupid_reset_last(struct page *page)
1437 {
1438 	page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
1439 }
1440 #endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
1441 #else /* !CONFIG_NUMA_BALANCING */
1442 static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
1443 {
1444 	return page_to_nid(page); /* XXX */
1445 }
1446 
1447 static inline int page_cpupid_last(struct page *page)
1448 {
1449 	return page_to_nid(page); /* XXX */
1450 }
1451 
1452 static inline int cpupid_to_nid(int cpupid)
1453 {
1454 	return -1;
1455 }
1456 
1457 static inline int cpupid_to_pid(int cpupid)
1458 {
1459 	return -1;
1460 }
1461 
1462 static inline int cpupid_to_cpu(int cpupid)
1463 {
1464 	return -1;
1465 }
1466 
1467 static inline int cpu_pid_to_cpupid(int nid, int pid)
1468 {
1469 	return -1;
1470 }
1471 
1472 static inline bool cpupid_pid_unset(int cpupid)
1473 {
1474 	return true;
1475 }
1476 
1477 static inline void page_cpupid_reset_last(struct page *page)
1478 {
1479 }
1480 
1481 static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
1482 {
1483 	return false;
1484 }
1485 #endif /* CONFIG_NUMA_BALANCING */
1486 
1487 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
1488 
1489 /*
1490  * KASAN per-page tags are stored xor'ed with 0xff. This allows to avoid
1491  * setting tags for all pages to native kernel tag value 0xff, as the default
1492  * value 0x00 maps to 0xff.
1493  */
1494 
1495 static inline u8 page_kasan_tag(const struct page *page)
1496 {
1497 	u8 tag = 0xff;
1498 
1499 	if (kasan_enabled()) {
1500 		tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
1501 		tag ^= 0xff;
1502 	}
1503 
1504 	return tag;
1505 }
1506 
1507 static inline void page_kasan_tag_set(struct page *page, u8 tag)
1508 {
1509 	if (kasan_enabled()) {
1510 		tag ^= 0xff;
1511 		page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
1512 		page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
1513 	}
1514 }
1515 
1516 static inline void page_kasan_tag_reset(struct page *page)
1517 {
1518 	if (kasan_enabled())
1519 		page_kasan_tag_set(page, 0xff);
1520 }
1521 
1522 #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
1523 
1524 static inline u8 page_kasan_tag(const struct page *page)
1525 {
1526 	return 0xff;
1527 }
1528 
1529 static inline void page_kasan_tag_set(struct page *page, u8 tag) { }
1530 static inline void page_kasan_tag_reset(struct page *page) { }
1531 
1532 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
1533 
1534 static inline struct zone *page_zone(const struct page *page)
1535 {
1536 	return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
1537 }
1538 
1539 static inline pg_data_t *page_pgdat(const struct page *page)
1540 {
1541 	return NODE_DATA(page_to_nid(page));
1542 }
1543 
1544 #ifdef SECTION_IN_PAGE_FLAGS
1545 static inline void set_page_section(struct page *page, unsigned long section)
1546 {
1547 	page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
1548 	page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
1549 }
1550 
1551 static inline unsigned long page_to_section(const struct page *page)
1552 {
1553 	return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
1554 }
1555 #endif
1556 
1557 /* MIGRATE_CMA and ZONE_MOVABLE do not allow pin pages */
1558 #ifdef CONFIG_MIGRATION
1559 static inline bool is_pinnable_page(struct page *page)
1560 {
1561 	return !(is_zone_movable_page(page) || is_migrate_cma_page(page)) ||
1562 		is_zero_pfn(page_to_pfn(page));
1563 }
1564 #else
1565 static inline bool is_pinnable_page(struct page *page)
1566 {
1567 	return true;
1568 }
1569 #endif
1570 
1571 static inline void set_page_zone(struct page *page, enum zone_type zone)
1572 {
1573 	page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
1574 	page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
1575 }
1576 
1577 static inline void set_page_node(struct page *page, unsigned long node)
1578 {
1579 	page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
1580 	page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
1581 }
1582 
1583 static inline void set_page_links(struct page *page, enum zone_type zone,
1584 	unsigned long node, unsigned long pfn)
1585 {
1586 	set_page_zone(page, zone);
1587 	set_page_node(page, node);
1588 #ifdef SECTION_IN_PAGE_FLAGS
1589 	set_page_section(page, pfn_to_section_nr(pfn));
1590 #endif
1591 }
1592 
1593 /*
1594  * Some inline functions in vmstat.h depend on page_zone()
1595  */
1596 #include <linux/vmstat.h>
1597 
1598 static __always_inline void *lowmem_page_address(const struct page *page)
1599 {
1600 	return page_to_virt(page);
1601 }
1602 
1603 #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
1604 #define HASHED_PAGE_VIRTUAL
1605 #endif
1606 
1607 #if defined(WANT_PAGE_VIRTUAL)
1608 static inline void *page_address(const struct page *page)
1609 {
1610 	return page->virtual;
1611 }
1612 static inline void set_page_address(struct page *page, void *address)
1613 {
1614 	page->virtual = address;
1615 }
1616 #define page_address_init()  do { } while(0)
1617 #endif
1618 
1619 #if defined(HASHED_PAGE_VIRTUAL)
1620 void *page_address(const struct page *page);
1621 void set_page_address(struct page *page, void *virtual);
1622 void page_address_init(void);
1623 #endif
1624 
1625 #if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
1626 #define page_address(page) lowmem_page_address(page)
1627 #define set_page_address(page, address)  do { } while(0)
1628 #define page_address_init()  do { } while(0)
1629 #endif
1630 
1631 extern void *page_rmapping(struct page *page);
1632 extern struct anon_vma *page_anon_vma(struct page *page);
1633 extern struct address_space *page_mapping(struct page *page);
1634 
1635 extern struct address_space *__page_file_mapping(struct page *);
1636 
1637 static inline
1638 struct address_space *page_file_mapping(struct page *page)
1639 {
1640 	if (unlikely(PageSwapCache(page)))
1641 		return __page_file_mapping(page);
1642 
1643 	return page->mapping;
1644 }
1645 
1646 extern pgoff_t __page_file_index(struct page *page);
1647 
1648 /*
1649  * Return the pagecache index of the passed page.  Regular pagecache pages
1650  * use ->index whereas swapcache pages use swp_offset(->private)
1651  */
1652 static inline pgoff_t page_index(struct page *page)
1653 {
1654 	if (unlikely(PageSwapCache(page)))
1655 		return __page_file_index(page);
1656 	return page->index;
1657 }
1658 
1659 bool page_mapped(struct page *page);
1660 struct address_space *page_mapping(struct page *page);
1661 
1662 /*
1663  * Return true only if the page has been allocated with
1664  * ALLOC_NO_WATERMARKS and the low watermark was not
1665  * met implying that the system is under some pressure.
1666  */
1667 static inline bool page_is_pfmemalloc(const struct page *page)
1668 {
1669 	/*
1670 	 * lru.next has bit 1 set if the page is allocated from the
1671 	 * pfmemalloc reserves.  Callers may simply overwrite it if
1672 	 * they do not need to preserve that information.
1673 	 */
1674 	return (uintptr_t)page->lru.next & BIT(1);
1675 }
1676 
1677 /*
1678  * Only to be called by the page allocator on a freshly allocated
1679  * page.
1680  */
1681 static inline void set_page_pfmemalloc(struct page *page)
1682 {
1683 	page->lru.next = (void *)BIT(1);
1684 }
1685 
1686 static inline void clear_page_pfmemalloc(struct page *page)
1687 {
1688 	page->lru.next = NULL;
1689 }
1690 
1691 /*
1692  * Can be called by the pagefault handler when it gets a VM_FAULT_OOM.
1693  */
1694 extern void pagefault_out_of_memory(void);
1695 
1696 #define offset_in_page(p)	((unsigned long)(p) & ~PAGE_MASK)
1697 #define offset_in_thp(page, p)	((unsigned long)(p) & (thp_size(page) - 1))
1698 
1699 /*
1700  * Flags passed to show_mem() and show_free_areas() to suppress output in
1701  * various contexts.
1702  */
1703 #define SHOW_MEM_FILTER_NODES		(0x0001u)	/* disallowed nodes */
1704 
1705 extern void show_free_areas(unsigned int flags, nodemask_t *nodemask);
1706 
1707 #ifdef CONFIG_MMU
1708 extern bool can_do_mlock(void);
1709 #else
1710 static inline bool can_do_mlock(void) { return false; }
1711 #endif
1712 extern int user_shm_lock(size_t, struct ucounts *);
1713 extern void user_shm_unlock(size_t, struct ucounts *);
1714 
1715 /*
1716  * Parameter block passed down to zap_pte_range in exceptional cases.
1717  */
1718 struct zap_details {
1719 	struct address_space *check_mapping;	/* Check page->mapping if set */
1720 	pgoff_t	first_index;			/* Lowest page->index to unmap */
1721 	pgoff_t last_index;			/* Highest page->index to unmap */
1722 	struct page *single_page;		/* Locked page to be unmapped */
1723 };
1724 
1725 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
1726 			     pte_t pte);
1727 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
1728 				pmd_t pmd);
1729 
1730 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1731 		  unsigned long size);
1732 void zap_page_range(struct vm_area_struct *vma, unsigned long address,
1733 		    unsigned long size);
1734 void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
1735 		unsigned long start, unsigned long end);
1736 
1737 struct mmu_notifier_range;
1738 
1739 void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
1740 		unsigned long end, unsigned long floor, unsigned long ceiling);
1741 int
1742 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
1743 int follow_invalidate_pte(struct mm_struct *mm, unsigned long address,
1744 			  struct mmu_notifier_range *range, pte_t **ptepp,
1745 			  pmd_t **pmdpp, spinlock_t **ptlp);
1746 int follow_pte(struct mm_struct *mm, unsigned long address,
1747 	       pte_t **ptepp, spinlock_t **ptlp);
1748 int follow_pfn(struct vm_area_struct *vma, unsigned long address,
1749 	unsigned long *pfn);
1750 int follow_phys(struct vm_area_struct *vma, unsigned long address,
1751 		unsigned int flags, unsigned long *prot, resource_size_t *phys);
1752 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
1753 			void *buf, int len, int write);
1754 
1755 extern void truncate_pagecache(struct inode *inode, loff_t new);
1756 extern void truncate_setsize(struct inode *inode, loff_t newsize);
1757 void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
1758 void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
1759 int truncate_inode_page(struct address_space *mapping, struct page *page);
1760 int generic_error_remove_page(struct address_space *mapping, struct page *page);
1761 int invalidate_inode_page(struct page *page);
1762 
1763 #ifdef CONFIG_MMU
1764 extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
1765 				  unsigned long address, unsigned int flags,
1766 				  struct pt_regs *regs);
1767 extern int fixup_user_fault(struct mm_struct *mm,
1768 			    unsigned long address, unsigned int fault_flags,
1769 			    bool *unlocked);
1770 void unmap_mapping_page(struct page *page);
1771 void unmap_mapping_pages(struct address_space *mapping,
1772 		pgoff_t start, pgoff_t nr, bool even_cows);
1773 void unmap_mapping_range(struct address_space *mapping,
1774 		loff_t const holebegin, loff_t const holelen, int even_cows);
1775 #else
1776 static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
1777 					 unsigned long address, unsigned int flags,
1778 					 struct pt_regs *regs)
1779 {
1780 	/* should never happen if there's no MMU */
1781 	BUG();
1782 	return VM_FAULT_SIGBUS;
1783 }
1784 static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address,
1785 		unsigned int fault_flags, bool *unlocked)
1786 {
1787 	/* should never happen if there's no MMU */
1788 	BUG();
1789 	return -EFAULT;
1790 }
1791 static inline void unmap_mapping_page(struct page *page) { }
1792 static inline void unmap_mapping_pages(struct address_space *mapping,
1793 		pgoff_t start, pgoff_t nr, bool even_cows) { }
1794 static inline void unmap_mapping_range(struct address_space *mapping,
1795 		loff_t const holebegin, loff_t const holelen, int even_cows) { }
1796 #endif
1797 
1798 static inline void unmap_shared_mapping_range(struct address_space *mapping,
1799 		loff_t const holebegin, loff_t const holelen)
1800 {
1801 	unmap_mapping_range(mapping, holebegin, holelen, 0);
1802 }
1803 
1804 extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
1805 		void *buf, int len, unsigned int gup_flags);
1806 extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1807 		void *buf, int len, unsigned int gup_flags);
1808 extern int __access_remote_vm(struct mm_struct *mm, unsigned long addr,
1809 			      void *buf, int len, unsigned int gup_flags);
1810 
1811 long get_user_pages_remote(struct mm_struct *mm,
1812 			    unsigned long start, unsigned long nr_pages,
1813 			    unsigned int gup_flags, struct page **pages,
1814 			    struct vm_area_struct **vmas, int *locked);
1815 long pin_user_pages_remote(struct mm_struct *mm,
1816 			   unsigned long start, unsigned long nr_pages,
1817 			   unsigned int gup_flags, struct page **pages,
1818 			   struct vm_area_struct **vmas, int *locked);
1819 long get_user_pages(unsigned long start, unsigned long nr_pages,
1820 			    unsigned int gup_flags, struct page **pages,
1821 			    struct vm_area_struct **vmas);
1822 long pin_user_pages(unsigned long start, unsigned long nr_pages,
1823 		    unsigned int gup_flags, struct page **pages,
1824 		    struct vm_area_struct **vmas);
1825 long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
1826 		    unsigned int gup_flags, struct page **pages, int *locked);
1827 long pin_user_pages_locked(unsigned long start, unsigned long nr_pages,
1828 		    unsigned int gup_flags, struct page **pages, int *locked);
1829 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
1830 		    struct page **pages, unsigned int gup_flags);
1831 long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
1832 		    struct page **pages, unsigned int gup_flags);
1833 
1834 int get_user_pages_fast(unsigned long start, int nr_pages,
1835 			unsigned int gup_flags, struct page **pages);
1836 int pin_user_pages_fast(unsigned long start, int nr_pages,
1837 			unsigned int gup_flags, struct page **pages);
1838 
1839 int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc);
1840 int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
1841 			struct task_struct *task, bool bypass_rlim);
1842 
1843 struct kvec;
1844 int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
1845 			struct page **pages);
1846 int get_kernel_page(unsigned long start, int write, struct page **pages);
1847 struct page *get_dump_page(unsigned long addr);
1848 
1849 extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
1850 extern void do_invalidatepage(struct page *page, unsigned int offset,
1851 			      unsigned int length);
1852 
1853 int redirty_page_for_writepage(struct writeback_control *wbc,
1854 				struct page *page);
1855 void account_page_cleaned(struct page *page, struct address_space *mapping,
1856 			  struct bdi_writeback *wb);
1857 int set_page_dirty(struct page *page);
1858 int set_page_dirty_lock(struct page *page);
1859 void __cancel_dirty_page(struct page *page);
1860 static inline void cancel_dirty_page(struct page *page)
1861 {
1862 	/* Avoid atomic ops, locking, etc. when not actually needed. */
1863 	if (PageDirty(page))
1864 		__cancel_dirty_page(page);
1865 }
1866 int clear_page_dirty_for_io(struct page *page);
1867 
1868 int get_cmdline(struct task_struct *task, char *buffer, int buflen);
1869 
1870 extern unsigned long move_page_tables(struct vm_area_struct *vma,
1871 		unsigned long old_addr, struct vm_area_struct *new_vma,
1872 		unsigned long new_addr, unsigned long len,
1873 		bool need_rmap_locks);
1874 
1875 /*
1876  * Flags used by change_protection().  For now we make it a bitmap so
1877  * that we can pass in multiple flags just like parameters.  However
1878  * for now all the callers are only use one of the flags at the same
1879  * time.
1880  */
1881 /* Whether we should allow dirty bit accounting */
1882 #define  MM_CP_DIRTY_ACCT                  (1UL << 0)
1883 /* Whether this protection change is for NUMA hints */
1884 #define  MM_CP_PROT_NUMA                   (1UL << 1)
1885 /* Whether this change is for write protecting */
1886 #define  MM_CP_UFFD_WP                     (1UL << 2) /* do wp */
1887 #define  MM_CP_UFFD_WP_RESOLVE             (1UL << 3) /* Resolve wp */
1888 #define  MM_CP_UFFD_WP_ALL                 (MM_CP_UFFD_WP | \
1889 					    MM_CP_UFFD_WP_RESOLVE)
1890 
1891 extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
1892 			      unsigned long end, pgprot_t newprot,
1893 			      unsigned long cp_flags);
1894 extern int mprotect_fixup(struct vm_area_struct *vma,
1895 			  struct vm_area_struct **pprev, unsigned long start,
1896 			  unsigned long end, unsigned long newflags);
1897 
1898 /*
1899  * doesn't attempt to fault and will return short.
1900  */
1901 int get_user_pages_fast_only(unsigned long start, int nr_pages,
1902 			     unsigned int gup_flags, struct page **pages);
1903 int pin_user_pages_fast_only(unsigned long start, int nr_pages,
1904 			     unsigned int gup_flags, struct page **pages);
1905 
1906 static inline bool get_user_page_fast_only(unsigned long addr,
1907 			unsigned int gup_flags, struct page **pagep)
1908 {
1909 	return get_user_pages_fast_only(addr, 1, gup_flags, pagep) == 1;
1910 }
1911 /*
1912  * per-process(per-mm_struct) statistics.
1913  */
1914 static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
1915 {
1916 	long val = atomic_long_read(&mm->rss_stat.count[member]);
1917 
1918 #ifdef SPLIT_RSS_COUNTING
1919 	/*
1920 	 * counter is updated in asynchronous manner and may go to minus.
1921 	 * But it's never be expected number for users.
1922 	 */
1923 	if (val < 0)
1924 		val = 0;
1925 #endif
1926 	return (unsigned long)val;
1927 }
1928 
1929 void mm_trace_rss_stat(struct mm_struct *mm, int member, long count);
1930 
1931 static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
1932 {
1933 	long count = atomic_long_add_return(value, &mm->rss_stat.count[member]);
1934 
1935 	mm_trace_rss_stat(mm, member, count);
1936 }
1937 
1938 static inline void inc_mm_counter(struct mm_struct *mm, int member)
1939 {
1940 	long count = atomic_long_inc_return(&mm->rss_stat.count[member]);
1941 
1942 	mm_trace_rss_stat(mm, member, count);
1943 }
1944 
1945 static inline void dec_mm_counter(struct mm_struct *mm, int member)
1946 {
1947 	long count = atomic_long_dec_return(&mm->rss_stat.count[member]);
1948 
1949 	mm_trace_rss_stat(mm, member, count);
1950 }
1951 
1952 /* Optimized variant when page is already known not to be PageAnon */
1953 static inline int mm_counter_file(struct page *page)
1954 {
1955 	if (PageSwapBacked(page))
1956 		return MM_SHMEMPAGES;
1957 	return MM_FILEPAGES;
1958 }
1959 
1960 static inline int mm_counter(struct page *page)
1961 {
1962 	if (PageAnon(page))
1963 		return MM_ANONPAGES;
1964 	return mm_counter_file(page);
1965 }
1966 
1967 static inline unsigned long get_mm_rss(struct mm_struct *mm)
1968 {
1969 	return get_mm_counter(mm, MM_FILEPAGES) +
1970 		get_mm_counter(mm, MM_ANONPAGES) +
1971 		get_mm_counter(mm, MM_SHMEMPAGES);
1972 }
1973 
1974 static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
1975 {
1976 	return max(mm->hiwater_rss, get_mm_rss(mm));
1977 }
1978 
1979 static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
1980 {
1981 	return max(mm->hiwater_vm, mm->total_vm);
1982 }
1983 
1984 static inline void update_hiwater_rss(struct mm_struct *mm)
1985 {
1986 	unsigned long _rss = get_mm_rss(mm);
1987 
1988 	if ((mm)->hiwater_rss < _rss)
1989 		(mm)->hiwater_rss = _rss;
1990 }
1991 
1992 static inline void update_hiwater_vm(struct mm_struct *mm)
1993 {
1994 	if (mm->hiwater_vm < mm->total_vm)
1995 		mm->hiwater_vm = mm->total_vm;
1996 }
1997 
1998 static inline void reset_mm_hiwater_rss(struct mm_struct *mm)
1999 {
2000 	mm->hiwater_rss = get_mm_rss(mm);
2001 }
2002 
2003 static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
2004 					 struct mm_struct *mm)
2005 {
2006 	unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
2007 
2008 	if (*maxrss < hiwater_rss)
2009 		*maxrss = hiwater_rss;
2010 }
2011 
2012 #if defined(SPLIT_RSS_COUNTING)
2013 void sync_mm_rss(struct mm_struct *mm);
2014 #else
2015 static inline void sync_mm_rss(struct mm_struct *mm)
2016 {
2017 }
2018 #endif
2019 
2020 #ifndef CONFIG_ARCH_HAS_PTE_SPECIAL
2021 static inline int pte_special(pte_t pte)
2022 {
2023 	return 0;
2024 }
2025 
2026 static inline pte_t pte_mkspecial(pte_t pte)
2027 {
2028 	return pte;
2029 }
2030 #endif
2031 
2032 #ifndef CONFIG_ARCH_HAS_PTE_DEVMAP
2033 static inline int pte_devmap(pte_t pte)
2034 {
2035 	return 0;
2036 }
2037 #endif
2038 
2039 int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
2040 
2041 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
2042 			       spinlock_t **ptl);
2043 static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
2044 				    spinlock_t **ptl)
2045 {
2046 	pte_t *ptep;
2047 	__cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
2048 	return ptep;
2049 }
2050 
2051 #ifdef __PAGETABLE_P4D_FOLDED
2052 static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
2053 						unsigned long address)
2054 {
2055 	return 0;
2056 }
2057 #else
2058 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
2059 #endif
2060 
2061 #if defined(__PAGETABLE_PUD_FOLDED) || !defined(CONFIG_MMU)
2062 static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d,
2063 						unsigned long address)
2064 {
2065 	return 0;
2066 }
2067 static inline void mm_inc_nr_puds(struct mm_struct *mm) {}
2068 static inline void mm_dec_nr_puds(struct mm_struct *mm) {}
2069 
2070 #else
2071 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
2072 
2073 static inline void mm_inc_nr_puds(struct mm_struct *mm)
2074 {
2075 	if (mm_pud_folded(mm))
2076 		return;
2077 	atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
2078 }
2079 
2080 static inline void mm_dec_nr_puds(struct mm_struct *mm)
2081 {
2082 	if (mm_pud_folded(mm))
2083 		return;
2084 	atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
2085 }
2086 #endif
2087 
2088 #if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
2089 static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
2090 						unsigned long address)
2091 {
2092 	return 0;
2093 }
2094 
2095 static inline void mm_inc_nr_pmds(struct mm_struct *mm) {}
2096 static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
2097 
2098 #else
2099 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
2100 
2101 static inline void mm_inc_nr_pmds(struct mm_struct *mm)
2102 {
2103 	if (mm_pmd_folded(mm))
2104 		return;
2105 	atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
2106 }
2107 
2108 static inline void mm_dec_nr_pmds(struct mm_struct *mm)
2109 {
2110 	if (mm_pmd_folded(mm))
2111 		return;
2112 	atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
2113 }
2114 #endif
2115 
2116 #ifdef CONFIG_MMU
2117 static inline void mm_pgtables_bytes_init(struct mm_struct *mm)
2118 {
2119 	atomic_long_set(&mm->pgtables_bytes, 0);
2120 }
2121 
2122 static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
2123 {
2124 	return atomic_long_read(&mm->pgtables_bytes);
2125 }
2126 
2127 static inline void mm_inc_nr_ptes(struct mm_struct *mm)
2128 {
2129 	atomic_long_add(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
2130 }
2131 
2132 static inline void mm_dec_nr_ptes(struct mm_struct *mm)
2133 {
2134 	atomic_long_sub(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
2135 }
2136 #else
2137 
2138 static inline void mm_pgtables_bytes_init(struct mm_struct *mm) {}
2139 static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
2140 {
2141 	return 0;
2142 }
2143 
2144 static inline void mm_inc_nr_ptes(struct mm_struct *mm) {}
2145 static inline void mm_dec_nr_ptes(struct mm_struct *mm) {}
2146 #endif
2147 
2148 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd);
2149 int __pte_alloc_kernel(pmd_t *pmd);
2150 
2151 #if defined(CONFIG_MMU)
2152 
2153 static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
2154 		unsigned long address)
2155 {
2156 	return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ?
2157 		NULL : p4d_offset(pgd, address);
2158 }
2159 
2160 static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d,
2161 		unsigned long address)
2162 {
2163 	return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ?
2164 		NULL : pud_offset(p4d, address);
2165 }
2166 
2167 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
2168 {
2169 	return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
2170 		NULL: pmd_offset(pud, address);
2171 }
2172 #endif /* CONFIG_MMU */
2173 
2174 #if USE_SPLIT_PTE_PTLOCKS
2175 #if ALLOC_SPLIT_PTLOCKS
2176 void __init ptlock_cache_init(void);
2177 extern bool ptlock_alloc(struct page *page);
2178 extern void ptlock_free(struct page *page);
2179 
2180 static inline spinlock_t *ptlock_ptr(struct page *page)
2181 {
2182 	return page->ptl;
2183 }
2184 #else /* ALLOC_SPLIT_PTLOCKS */
2185 static inline void ptlock_cache_init(void)
2186 {
2187 }
2188 
2189 static inline bool ptlock_alloc(struct page *page)
2190 {
2191 	return true;
2192 }
2193 
2194 static inline void ptlock_free(struct page *page)
2195 {
2196 }
2197 
2198 static inline spinlock_t *ptlock_ptr(struct page *page)
2199 {
2200 	return &page->ptl;
2201 }
2202 #endif /* ALLOC_SPLIT_PTLOCKS */
2203 
2204 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
2205 {
2206 	return ptlock_ptr(pmd_page(*pmd));
2207 }
2208 
2209 static inline bool ptlock_init(struct page *page)
2210 {
2211 	/*
2212 	 * prep_new_page() initialize page->private (and therefore page->ptl)
2213 	 * with 0. Make sure nobody took it in use in between.
2214 	 *
2215 	 * It can happen if arch try to use slab for page table allocation:
2216 	 * slab code uses page->slab_cache, which share storage with page->ptl.
2217 	 */
2218 	VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
2219 	if (!ptlock_alloc(page))
2220 		return false;
2221 	spin_lock_init(ptlock_ptr(page));
2222 	return true;
2223 }
2224 
2225 #else	/* !USE_SPLIT_PTE_PTLOCKS */
2226 /*
2227  * We use mm->page_table_lock to guard all pagetable pages of the mm.
2228  */
2229 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
2230 {
2231 	return &mm->page_table_lock;
2232 }
2233 static inline void ptlock_cache_init(void) {}
2234 static inline bool ptlock_init(struct page *page) { return true; }
2235 static inline void ptlock_free(struct page *page) {}
2236 #endif /* USE_SPLIT_PTE_PTLOCKS */
2237 
2238 static inline void pgtable_init(void)
2239 {
2240 	ptlock_cache_init();
2241 	pgtable_cache_init();
2242 }
2243 
2244 static inline bool pgtable_pte_page_ctor(struct page *page)
2245 {
2246 	if (!ptlock_init(page))
2247 		return false;
2248 	__SetPageTable(page);
2249 	inc_lruvec_page_state(page, NR_PAGETABLE);
2250 	return true;
2251 }
2252 
2253 static inline void pgtable_pte_page_dtor(struct page *page)
2254 {
2255 	ptlock_free(page);
2256 	__ClearPageTable(page);
2257 	dec_lruvec_page_state(page, NR_PAGETABLE);
2258 }
2259 
2260 #define pte_offset_map_lock(mm, pmd, address, ptlp)	\
2261 ({							\
2262 	spinlock_t *__ptl = pte_lockptr(mm, pmd);	\
2263 	pte_t *__pte = pte_offset_map(pmd, address);	\
2264 	*(ptlp) = __ptl;				\
2265 	spin_lock(__ptl);				\
2266 	__pte;						\
2267 })
2268 
2269 #define pte_unmap_unlock(pte, ptl)	do {		\
2270 	spin_unlock(ptl);				\
2271 	pte_unmap(pte);					\
2272 } while (0)
2273 
2274 #define pte_alloc(mm, pmd) (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd))
2275 
2276 #define pte_alloc_map(mm, pmd, address)			\
2277 	(pte_alloc(mm, pmd) ? NULL : pte_offset_map(pmd, address))
2278 
2279 #define pte_alloc_map_lock(mm, pmd, address, ptlp)	\
2280 	(pte_alloc(mm, pmd) ?			\
2281 		 NULL : pte_offset_map_lock(mm, pmd, address, ptlp))
2282 
2283 #define pte_alloc_kernel(pmd, address)			\
2284 	((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \
2285 		NULL: pte_offset_kernel(pmd, address))
2286 
2287 #if USE_SPLIT_PMD_PTLOCKS
2288 
2289 static struct page *pmd_to_page(pmd_t *pmd)
2290 {
2291 	unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
2292 	return virt_to_page((void *)((unsigned long) pmd & mask));
2293 }
2294 
2295 static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
2296 {
2297 	return ptlock_ptr(pmd_to_page(pmd));
2298 }
2299 
2300 static inline bool pmd_ptlock_init(struct page *page)
2301 {
2302 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2303 	page->pmd_huge_pte = NULL;
2304 #endif
2305 	return ptlock_init(page);
2306 }
2307 
2308 static inline void pmd_ptlock_free(struct page *page)
2309 {
2310 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2311 	VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
2312 #endif
2313 	ptlock_free(page);
2314 }
2315 
2316 #define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte)
2317 
2318 #else
2319 
2320 static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
2321 {
2322 	return &mm->page_table_lock;
2323 }
2324 
2325 static inline bool pmd_ptlock_init(struct page *page) { return true; }
2326 static inline void pmd_ptlock_free(struct page *page) {}
2327 
2328 #define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
2329 
2330 #endif
2331 
2332 static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
2333 {
2334 	spinlock_t *ptl = pmd_lockptr(mm, pmd);
2335 	spin_lock(ptl);
2336 	return ptl;
2337 }
2338 
2339 static inline bool pgtable_pmd_page_ctor(struct page *page)
2340 {
2341 	if (!pmd_ptlock_init(page))
2342 		return false;
2343 	__SetPageTable(page);
2344 	inc_lruvec_page_state(page, NR_PAGETABLE);
2345 	return true;
2346 }
2347 
2348 static inline void pgtable_pmd_page_dtor(struct page *page)
2349 {
2350 	pmd_ptlock_free(page);
2351 	__ClearPageTable(page);
2352 	dec_lruvec_page_state(page, NR_PAGETABLE);
2353 }
2354 
2355 /*
2356  * No scalability reason to split PUD locks yet, but follow the same pattern
2357  * as the PMD locks to make it easier if we decide to.  The VM should not be
2358  * considered ready to switch to split PUD locks yet; there may be places
2359  * which need to be converted from page_table_lock.
2360  */
2361 static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud)
2362 {
2363 	return &mm->page_table_lock;
2364 }
2365 
2366 static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
2367 {
2368 	spinlock_t *ptl = pud_lockptr(mm, pud);
2369 
2370 	spin_lock(ptl);
2371 	return ptl;
2372 }
2373 
2374 extern void __init pagecache_init(void);
2375 extern void __init free_area_init_memoryless_node(int nid);
2376 extern void free_initmem(void);
2377 
2378 /*
2379  * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK)
2380  * into the buddy system. The freed pages will be poisoned with pattern
2381  * "poison" if it's within range [0, UCHAR_MAX].
2382  * Return pages freed into the buddy system.
2383  */
2384 extern unsigned long free_reserved_area(void *start, void *end,
2385 					int poison, const char *s);
2386 
2387 extern void adjust_managed_page_count(struct page *page, long count);
2388 extern void mem_init_print_info(void);
2389 
2390 extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end);
2391 
2392 /* Free the reserved page into the buddy system, so it gets managed. */
2393 static inline void free_reserved_page(struct page *page)
2394 {
2395 	ClearPageReserved(page);
2396 	init_page_count(page);
2397 	__free_page(page);
2398 	adjust_managed_page_count(page, 1);
2399 }
2400 #define free_highmem_page(page) free_reserved_page(page)
2401 
2402 static inline void mark_page_reserved(struct page *page)
2403 {
2404 	SetPageReserved(page);
2405 	adjust_managed_page_count(page, -1);
2406 }
2407 
2408 /*
2409  * Default method to free all the __init memory into the buddy system.
2410  * The freed pages will be poisoned with pattern "poison" if it's within
2411  * range [0, UCHAR_MAX].
2412  * Return pages freed into the buddy system.
2413  */
2414 static inline unsigned long free_initmem_default(int poison)
2415 {
2416 	extern char __init_begin[], __init_end[];
2417 
2418 	return free_reserved_area(&__init_begin, &__init_end,
2419 				  poison, "unused kernel image (initmem)");
2420 }
2421 
2422 static inline unsigned long get_num_physpages(void)
2423 {
2424 	int nid;
2425 	unsigned long phys_pages = 0;
2426 
2427 	for_each_online_node(nid)
2428 		phys_pages += node_present_pages(nid);
2429 
2430 	return phys_pages;
2431 }
2432 
2433 /*
2434  * Using memblock node mappings, an architecture may initialise its
2435  * zones, allocate the backing mem_map and account for memory holes in an
2436  * architecture independent manner.
2437  *
2438  * An architecture is expected to register range of page frames backed by
2439  * physical memory with memblock_add[_node]() before calling
2440  * free_area_init() passing in the PFN each zone ends at. At a basic
2441  * usage, an architecture is expected to do something like
2442  *
2443  * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
2444  * 							 max_highmem_pfn};
2445  * for_each_valid_physical_page_range()
2446  * 	memblock_add_node(base, size, nid)
2447  * free_area_init(max_zone_pfns);
2448  */
2449 void free_area_init(unsigned long *max_zone_pfn);
2450 unsigned long node_map_pfn_alignment(void);
2451 unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
2452 						unsigned long end_pfn);
2453 extern unsigned long absent_pages_in_range(unsigned long start_pfn,
2454 						unsigned long end_pfn);
2455 extern void get_pfn_range_for_nid(unsigned int nid,
2456 			unsigned long *start_pfn, unsigned long *end_pfn);
2457 extern unsigned long find_min_pfn_with_active_regions(void);
2458 
2459 #ifndef CONFIG_NUMA
2460 static inline int early_pfn_to_nid(unsigned long pfn)
2461 {
2462 	return 0;
2463 }
2464 #else
2465 /* please see mm/page_alloc.c */
2466 extern int __meminit early_pfn_to_nid(unsigned long pfn);
2467 #endif
2468 
2469 extern void set_dma_reserve(unsigned long new_dma_reserve);
2470 extern void memmap_init_range(unsigned long, int, unsigned long,
2471 		unsigned long, unsigned long, enum meminit_context,
2472 		struct vmem_altmap *, int migratetype);
2473 extern void setup_per_zone_wmarks(void);
2474 extern int __meminit init_per_zone_wmark_min(void);
2475 extern void mem_init(void);
2476 extern void __init mmap_init(void);
2477 extern void show_mem(unsigned int flags, nodemask_t *nodemask);
2478 extern long si_mem_available(void);
2479 extern void si_meminfo(struct sysinfo * val);
2480 extern void si_meminfo_node(struct sysinfo *val, int nid);
2481 #ifdef __HAVE_ARCH_RESERVED_KERNEL_PAGES
2482 extern unsigned long arch_reserved_kernel_pages(void);
2483 #endif
2484 
2485 extern __printf(3, 4)
2486 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...);
2487 
2488 extern void setup_per_cpu_pageset(void);
2489 
2490 /* page_alloc.c */
2491 extern int min_free_kbytes;
2492 extern int watermark_boost_factor;
2493 extern int watermark_scale_factor;
2494 extern bool arch_has_descending_max_zone_pfns(void);
2495 
2496 /* nommu.c */
2497 extern atomic_long_t mmap_pages_allocated;
2498 extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
2499 
2500 /* interval_tree.c */
2501 void vma_interval_tree_insert(struct vm_area_struct *node,
2502 			      struct rb_root_cached *root);
2503 void vma_interval_tree_insert_after(struct vm_area_struct *node,
2504 				    struct vm_area_struct *prev,
2505 				    struct rb_root_cached *root);
2506 void vma_interval_tree_remove(struct vm_area_struct *node,
2507 			      struct rb_root_cached *root);
2508 struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root,
2509 				unsigned long start, unsigned long last);
2510 struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
2511 				unsigned long start, unsigned long last);
2512 
2513 #define vma_interval_tree_foreach(vma, root, start, last)		\
2514 	for (vma = vma_interval_tree_iter_first(root, start, last);	\
2515 	     vma; vma = vma_interval_tree_iter_next(vma, start, last))
2516 
2517 void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
2518 				   struct rb_root_cached *root);
2519 void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
2520 				   struct rb_root_cached *root);
2521 struct anon_vma_chain *
2522 anon_vma_interval_tree_iter_first(struct rb_root_cached *root,
2523 				  unsigned long start, unsigned long last);
2524 struct anon_vma_chain *anon_vma_interval_tree_iter_next(
2525 	struct anon_vma_chain *node, unsigned long start, unsigned long last);
2526 #ifdef CONFIG_DEBUG_VM_RB
2527 void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
2528 #endif
2529 
2530 #define anon_vma_interval_tree_foreach(avc, root, start, last)		 \
2531 	for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
2532 	     avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
2533 
2534 /* mmap.c */
2535 extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
2536 extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
2537 	unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
2538 	struct vm_area_struct *expand);
2539 static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
2540 	unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
2541 {
2542 	return __vma_adjust(vma, start, end, pgoff, insert, NULL);
2543 }
2544 extern struct vm_area_struct *vma_merge(struct mm_struct *,
2545 	struct vm_area_struct *prev, unsigned long addr, unsigned long end,
2546 	unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
2547 	struct mempolicy *, struct vm_userfaultfd_ctx);
2548 extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
2549 extern int __split_vma(struct mm_struct *, struct vm_area_struct *,
2550 	unsigned long addr, int new_below);
2551 extern int split_vma(struct mm_struct *, struct vm_area_struct *,
2552 	unsigned long addr, int new_below);
2553 extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
2554 extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
2555 	struct rb_node **, struct rb_node *);
2556 extern void unlink_file_vma(struct vm_area_struct *);
2557 extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
2558 	unsigned long addr, unsigned long len, pgoff_t pgoff,
2559 	bool *need_rmap_locks);
2560 extern void exit_mmap(struct mm_struct *);
2561 
2562 static inline int check_data_rlimit(unsigned long rlim,
2563 				    unsigned long new,
2564 				    unsigned long start,
2565 				    unsigned long end_data,
2566 				    unsigned long start_data)
2567 {
2568 	if (rlim < RLIM_INFINITY) {
2569 		if (((new - start) + (end_data - start_data)) > rlim)
2570 			return -ENOSPC;
2571 	}
2572 
2573 	return 0;
2574 }
2575 
2576 extern int mm_take_all_locks(struct mm_struct *mm);
2577 extern void mm_drop_all_locks(struct mm_struct *mm);
2578 
2579 extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
2580 extern struct file *get_mm_exe_file(struct mm_struct *mm);
2581 extern struct file *get_task_exe_file(struct task_struct *task);
2582 
2583 extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages);
2584 extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
2585 
2586 extern bool vma_is_special_mapping(const struct vm_area_struct *vma,
2587 				   const struct vm_special_mapping *sm);
2588 extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
2589 				   unsigned long addr, unsigned long len,
2590 				   unsigned long flags,
2591 				   const struct vm_special_mapping *spec);
2592 /* This is an obsolete alternative to _install_special_mapping. */
2593 extern int install_special_mapping(struct mm_struct *mm,
2594 				   unsigned long addr, unsigned long len,
2595 				   unsigned long flags, struct page **pages);
2596 
2597 unsigned long randomize_stack_top(unsigned long stack_top);
2598 
2599 extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
2600 
2601 extern unsigned long mmap_region(struct file *file, unsigned long addr,
2602 	unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
2603 	struct list_head *uf);
2604 extern unsigned long do_mmap(struct file *file, unsigned long addr,
2605 	unsigned long len, unsigned long prot, unsigned long flags,
2606 	unsigned long pgoff, unsigned long *populate, struct list_head *uf);
2607 extern int __do_munmap(struct mm_struct *, unsigned long, size_t,
2608 		       struct list_head *uf, bool downgrade);
2609 extern int do_munmap(struct mm_struct *, unsigned long, size_t,
2610 		     struct list_head *uf);
2611 extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior);
2612 
2613 #ifdef CONFIG_MMU
2614 extern int __mm_populate(unsigned long addr, unsigned long len,
2615 			 int ignore_errors);
2616 static inline void mm_populate(unsigned long addr, unsigned long len)
2617 {
2618 	/* Ignore errors */
2619 	(void) __mm_populate(addr, len, 1);
2620 }
2621 #else
2622 static inline void mm_populate(unsigned long addr, unsigned long len) {}
2623 #endif
2624 
2625 /* These take the mm semaphore themselves */
2626 extern int __must_check vm_brk(unsigned long, unsigned long);
2627 extern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long);
2628 extern int vm_munmap(unsigned long, size_t);
2629 extern unsigned long __must_check vm_mmap(struct file *, unsigned long,
2630         unsigned long, unsigned long,
2631         unsigned long, unsigned long);
2632 
2633 struct vm_unmapped_area_info {
2634 #define VM_UNMAPPED_AREA_TOPDOWN 1
2635 	unsigned long flags;
2636 	unsigned long length;
2637 	unsigned long low_limit;
2638 	unsigned long high_limit;
2639 	unsigned long align_mask;
2640 	unsigned long align_offset;
2641 };
2642 
2643 extern unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info);
2644 
2645 /* truncate.c */
2646 extern void truncate_inode_pages(struct address_space *, loff_t);
2647 extern void truncate_inode_pages_range(struct address_space *,
2648 				       loff_t lstart, loff_t lend);
2649 extern void truncate_inode_pages_final(struct address_space *);
2650 
2651 /* generic vm_area_ops exported for stackable file systems */
2652 extern vm_fault_t filemap_fault(struct vm_fault *vmf);
2653 extern vm_fault_t filemap_map_pages(struct vm_fault *vmf,
2654 		pgoff_t start_pgoff, pgoff_t end_pgoff);
2655 extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
2656 
2657 /* mm/page-writeback.c */
2658 int __must_check write_one_page(struct page *page);
2659 void task_dirty_inc(struct task_struct *tsk);
2660 
2661 extern unsigned long stack_guard_gap;
2662 /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
2663 extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
2664 
2665 /* CONFIG_STACK_GROWSUP still needs to grow downwards at some places */
2666 extern int expand_downwards(struct vm_area_struct *vma,
2667 		unsigned long address);
2668 #if VM_GROWSUP
2669 extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
2670 #else
2671   #define expand_upwards(vma, address) (0)
2672 #endif
2673 
2674 /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
2675 extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
2676 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
2677 					     struct vm_area_struct **pprev);
2678 
2679 /**
2680  * find_vma_intersection() - Look up the first VMA which intersects the interval
2681  * @mm: The process address space.
2682  * @start_addr: The inclusive start user address.
2683  * @end_addr: The exclusive end user address.
2684  *
2685  * Returns: The first VMA within the provided range, %NULL otherwise.  Assumes
2686  * start_addr < end_addr.
2687  */
2688 static inline
2689 struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
2690 					     unsigned long start_addr,
2691 					     unsigned long end_addr)
2692 {
2693 	struct vm_area_struct *vma = find_vma(mm, start_addr);
2694 
2695 	if (vma && end_addr <= vma->vm_start)
2696 		vma = NULL;
2697 	return vma;
2698 }
2699 
2700 /**
2701  * vma_lookup() - Find a VMA at a specific address
2702  * @mm: The process address space.
2703  * @addr: The user address.
2704  *
2705  * Return: The vm_area_struct at the given address, %NULL otherwise.
2706  */
2707 static inline
2708 struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
2709 {
2710 	struct vm_area_struct *vma = find_vma(mm, addr);
2711 
2712 	if (vma && addr < vma->vm_start)
2713 		vma = NULL;
2714 
2715 	return vma;
2716 }
2717 
2718 static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
2719 {
2720 	unsigned long vm_start = vma->vm_start;
2721 
2722 	if (vma->vm_flags & VM_GROWSDOWN) {
2723 		vm_start -= stack_guard_gap;
2724 		if (vm_start > vma->vm_start)
2725 			vm_start = 0;
2726 	}
2727 	return vm_start;
2728 }
2729 
2730 static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
2731 {
2732 	unsigned long vm_end = vma->vm_end;
2733 
2734 	if (vma->vm_flags & VM_GROWSUP) {
2735 		vm_end += stack_guard_gap;
2736 		if (vm_end < vma->vm_end)
2737 			vm_end = -PAGE_SIZE;
2738 	}
2739 	return vm_end;
2740 }
2741 
2742 static inline unsigned long vma_pages(struct vm_area_struct *vma)
2743 {
2744 	return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
2745 }
2746 
2747 /* Look up the first VMA which exactly match the interval vm_start ... vm_end */
2748 static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
2749 				unsigned long vm_start, unsigned long vm_end)
2750 {
2751 	struct vm_area_struct *vma = find_vma(mm, vm_start);
2752 
2753 	if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
2754 		vma = NULL;
2755 
2756 	return vma;
2757 }
2758 
2759 static inline bool range_in_vma(struct vm_area_struct *vma,
2760 				unsigned long start, unsigned long end)
2761 {
2762 	return (vma && vma->vm_start <= start && end <= vma->vm_end);
2763 }
2764 
2765 #ifdef CONFIG_MMU
2766 pgprot_t vm_get_page_prot(unsigned long vm_flags);
2767 void vma_set_page_prot(struct vm_area_struct *vma);
2768 #else
2769 static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
2770 {
2771 	return __pgprot(0);
2772 }
2773 static inline void vma_set_page_prot(struct vm_area_struct *vma)
2774 {
2775 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2776 }
2777 #endif
2778 
2779 void vma_set_file(struct vm_area_struct *vma, struct file *file);
2780 
2781 #ifdef CONFIG_NUMA_BALANCING
2782 unsigned long change_prot_numa(struct vm_area_struct *vma,
2783 			unsigned long start, unsigned long end);
2784 #endif
2785 
2786 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
2787 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
2788 			unsigned long pfn, unsigned long size, pgprot_t);
2789 int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
2790 		unsigned long pfn, unsigned long size, pgprot_t prot);
2791 int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
2792 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
2793 			struct page **pages, unsigned long *num);
2794 int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2795 				unsigned long num);
2796 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
2797 				unsigned long num);
2798 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2799 			unsigned long pfn);
2800 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
2801 			unsigned long pfn, pgprot_t pgprot);
2802 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2803 			pfn_t pfn);
2804 vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr,
2805 			pfn_t pfn, pgprot_t pgprot);
2806 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
2807 		unsigned long addr, pfn_t pfn);
2808 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
2809 
2810 static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
2811 				unsigned long addr, struct page *page)
2812 {
2813 	int err = vm_insert_page(vma, addr, page);
2814 
2815 	if (err == -ENOMEM)
2816 		return VM_FAULT_OOM;
2817 	if (err < 0 && err != -EBUSY)
2818 		return VM_FAULT_SIGBUS;
2819 
2820 	return VM_FAULT_NOPAGE;
2821 }
2822 
2823 #ifndef io_remap_pfn_range
2824 static inline int io_remap_pfn_range(struct vm_area_struct *vma,
2825 				     unsigned long addr, unsigned long pfn,
2826 				     unsigned long size, pgprot_t prot)
2827 {
2828 	return remap_pfn_range(vma, addr, pfn, size, pgprot_decrypted(prot));
2829 }
2830 #endif
2831 
2832 static inline vm_fault_t vmf_error(int err)
2833 {
2834 	if (err == -ENOMEM)
2835 		return VM_FAULT_OOM;
2836 	return VM_FAULT_SIGBUS;
2837 }
2838 
2839 struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
2840 			 unsigned int foll_flags);
2841 
2842 #define FOLL_WRITE	0x01	/* check pte is writable */
2843 #define FOLL_TOUCH	0x02	/* mark page accessed */
2844 #define FOLL_GET	0x04	/* do get_page on page */
2845 #define FOLL_DUMP	0x08	/* give error on hole if it would be zero */
2846 #define FOLL_FORCE	0x10	/* get_user_pages read/write w/o permission */
2847 #define FOLL_NOWAIT	0x20	/* if a disk transfer is needed, start the IO
2848 				 * and return without waiting upon it */
2849 #define FOLL_POPULATE	0x40	/* fault in page */
2850 #define FOLL_HWPOISON	0x100	/* check page is hwpoisoned */
2851 #define FOLL_NUMA	0x200	/* force NUMA hinting page fault */
2852 #define FOLL_MIGRATION	0x400	/* wait for page to replace migration entry */
2853 #define FOLL_TRIED	0x800	/* a retry, previous pass started an IO */
2854 #define FOLL_MLOCK	0x1000	/* lock present pages */
2855 #define FOLL_REMOTE	0x2000	/* we are working on non-current tsk/mm */
2856 #define FOLL_COW	0x4000	/* internal GUP flag */
2857 #define FOLL_ANON	0x8000	/* don't do file mappings */
2858 #define FOLL_LONGTERM	0x10000	/* mapping lifetime is indefinite: see below */
2859 #define FOLL_SPLIT_PMD	0x20000	/* split huge pmd before returning */
2860 #define FOLL_PIN	0x40000	/* pages must be released via unpin_user_page */
2861 #define FOLL_FAST_ONLY	0x80000	/* gup_fast: prevent fall-back to slow gup */
2862 
2863 /*
2864  * FOLL_PIN and FOLL_LONGTERM may be used in various combinations with each
2865  * other. Here is what they mean, and how to use them:
2866  *
2867  * FOLL_LONGTERM indicates that the page will be held for an indefinite time
2868  * period _often_ under userspace control.  This is in contrast to
2869  * iov_iter_get_pages(), whose usages are transient.
2870  *
2871  * FIXME: For pages which are part of a filesystem, mappings are subject to the
2872  * lifetime enforced by the filesystem and we need guarantees that longterm
2873  * users like RDMA and V4L2 only establish mappings which coordinate usage with
2874  * the filesystem.  Ideas for this coordination include revoking the longterm
2875  * pin, delaying writeback, bounce buffer page writeback, etc.  As FS DAX was
2876  * added after the problem with filesystems was found FS DAX VMAs are
2877  * specifically failed.  Filesystem pages are still subject to bugs and use of
2878  * FOLL_LONGTERM should be avoided on those pages.
2879  *
2880  * FIXME: Also NOTE that FOLL_LONGTERM is not supported in every GUP call.
2881  * Currently only get_user_pages() and get_user_pages_fast() support this flag
2882  * and calls to get_user_pages_[un]locked are specifically not allowed.  This
2883  * is due to an incompatibility with the FS DAX check and
2884  * FAULT_FLAG_ALLOW_RETRY.
2885  *
2886  * In the CMA case: long term pins in a CMA region would unnecessarily fragment
2887  * that region.  And so, CMA attempts to migrate the page before pinning, when
2888  * FOLL_LONGTERM is specified.
2889  *
2890  * FOLL_PIN indicates that a special kind of tracking (not just page->_refcount,
2891  * but an additional pin counting system) will be invoked. This is intended for
2892  * anything that gets a page reference and then touches page data (for example,
2893  * Direct IO). This lets the filesystem know that some non-file-system entity is
2894  * potentially changing the pages' data. In contrast to FOLL_GET (whose pages
2895  * are released via put_page()), FOLL_PIN pages must be released, ultimately, by
2896  * a call to unpin_user_page().
2897  *
2898  * FOLL_PIN is similar to FOLL_GET: both of these pin pages. They use different
2899  * and separate refcounting mechanisms, however, and that means that each has
2900  * its own acquire and release mechanisms:
2901  *
2902  *     FOLL_GET: get_user_pages*() to acquire, and put_page() to release.
2903  *
2904  *     FOLL_PIN: pin_user_pages*() to acquire, and unpin_user_pages to release.
2905  *
2906  * FOLL_PIN and FOLL_GET are mutually exclusive for a given function call.
2907  * (The underlying pages may experience both FOLL_GET-based and FOLL_PIN-based
2908  * calls applied to them, and that's perfectly OK. This is a constraint on the
2909  * callers, not on the pages.)
2910  *
2911  * FOLL_PIN should be set internally by the pin_user_pages*() APIs, never
2912  * directly by the caller. That's in order to help avoid mismatches when
2913  * releasing pages: get_user_pages*() pages must be released via put_page(),
2914  * while pin_user_pages*() pages must be released via unpin_user_page().
2915  *
2916  * Please see Documentation/core-api/pin_user_pages.rst for more information.
2917  */
2918 
2919 static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
2920 {
2921 	if (vm_fault & VM_FAULT_OOM)
2922 		return -ENOMEM;
2923 	if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
2924 		return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT;
2925 	if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
2926 		return -EFAULT;
2927 	return 0;
2928 }
2929 
2930 typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data);
2931 extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
2932 			       unsigned long size, pte_fn_t fn, void *data);
2933 extern int apply_to_existing_page_range(struct mm_struct *mm,
2934 				   unsigned long address, unsigned long size,
2935 				   pte_fn_t fn, void *data);
2936 
2937 extern void init_mem_debugging_and_hardening(void);
2938 #ifdef CONFIG_PAGE_POISONING
2939 extern void __kernel_poison_pages(struct page *page, int numpages);
2940 extern void __kernel_unpoison_pages(struct page *page, int numpages);
2941 extern bool _page_poisoning_enabled_early;
2942 DECLARE_STATIC_KEY_FALSE(_page_poisoning_enabled);
2943 static inline bool page_poisoning_enabled(void)
2944 {
2945 	return _page_poisoning_enabled_early;
2946 }
2947 /*
2948  * For use in fast paths after init_mem_debugging() has run, or when a
2949  * false negative result is not harmful when called too early.
2950  */
2951 static inline bool page_poisoning_enabled_static(void)
2952 {
2953 	return static_branch_unlikely(&_page_poisoning_enabled);
2954 }
2955 static inline void kernel_poison_pages(struct page *page, int numpages)
2956 {
2957 	if (page_poisoning_enabled_static())
2958 		__kernel_poison_pages(page, numpages);
2959 }
2960 static inline void kernel_unpoison_pages(struct page *page, int numpages)
2961 {
2962 	if (page_poisoning_enabled_static())
2963 		__kernel_unpoison_pages(page, numpages);
2964 }
2965 #else
2966 static inline bool page_poisoning_enabled(void) { return false; }
2967 static inline bool page_poisoning_enabled_static(void) { return false; }
2968 static inline void __kernel_poison_pages(struct page *page, int nunmpages) { }
2969 static inline void kernel_poison_pages(struct page *page, int numpages) { }
2970 static inline void kernel_unpoison_pages(struct page *page, int numpages) { }
2971 #endif
2972 
2973 DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
2974 static inline bool want_init_on_alloc(gfp_t flags)
2975 {
2976 	if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
2977 				&init_on_alloc))
2978 		return true;
2979 	return flags & __GFP_ZERO;
2980 }
2981 
2982 DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
2983 static inline bool want_init_on_free(void)
2984 {
2985 	return static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
2986 				   &init_on_free);
2987 }
2988 
2989 extern bool _debug_pagealloc_enabled_early;
2990 DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
2991 
2992 static inline bool debug_pagealloc_enabled(void)
2993 {
2994 	return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
2995 		_debug_pagealloc_enabled_early;
2996 }
2997 
2998 /*
2999  * For use in fast paths after init_debug_pagealloc() has run, or when a
3000  * false negative result is not harmful when called too early.
3001  */
3002 static inline bool debug_pagealloc_enabled_static(void)
3003 {
3004 	if (!IS_ENABLED(CONFIG_DEBUG_PAGEALLOC))
3005 		return false;
3006 
3007 	return static_branch_unlikely(&_debug_pagealloc_enabled);
3008 }
3009 
3010 #ifdef CONFIG_DEBUG_PAGEALLOC
3011 /*
3012  * To support DEBUG_PAGEALLOC architecture must ensure that
3013  * __kernel_map_pages() never fails
3014  */
3015 extern void __kernel_map_pages(struct page *page, int numpages, int enable);
3016 
3017 static inline void debug_pagealloc_map_pages(struct page *page, int numpages)
3018 {
3019 	if (debug_pagealloc_enabled_static())
3020 		__kernel_map_pages(page, numpages, 1);
3021 }
3022 
3023 static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages)
3024 {
3025 	if (debug_pagealloc_enabled_static())
3026 		__kernel_map_pages(page, numpages, 0);
3027 }
3028 #else	/* CONFIG_DEBUG_PAGEALLOC */
3029 static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {}
3030 static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {}
3031 #endif	/* CONFIG_DEBUG_PAGEALLOC */
3032 
3033 #ifdef __HAVE_ARCH_GATE_AREA
3034 extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
3035 extern int in_gate_area_no_mm(unsigned long addr);
3036 extern int in_gate_area(struct mm_struct *mm, unsigned long addr);
3037 #else
3038 static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
3039 {
3040 	return NULL;
3041 }
3042 static inline int in_gate_area_no_mm(unsigned long addr) { return 0; }
3043 static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
3044 {
3045 	return 0;
3046 }
3047 #endif	/* __HAVE_ARCH_GATE_AREA */
3048 
3049 extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm);
3050 
3051 #ifdef CONFIG_SYSCTL
3052 extern int sysctl_drop_caches;
3053 int drop_caches_sysctl_handler(struct ctl_table *, int, void *, size_t *,
3054 		loff_t *);
3055 #endif
3056 
3057 void drop_slab(void);
3058 void drop_slab_node(int nid);
3059 
3060 #ifndef CONFIG_MMU
3061 #define randomize_va_space 0
3062 #else
3063 extern int randomize_va_space;
3064 #endif
3065 
3066 const char * arch_vma_name(struct vm_area_struct *vma);
3067 #ifdef CONFIG_MMU
3068 void print_vma_addr(char *prefix, unsigned long rip);
3069 #else
3070 static inline void print_vma_addr(char *prefix, unsigned long rip)
3071 {
3072 }
3073 #endif
3074 
3075 int vmemmap_remap_free(unsigned long start, unsigned long end,
3076 		       unsigned long reuse);
3077 int vmemmap_remap_alloc(unsigned long start, unsigned long end,
3078 			unsigned long reuse, gfp_t gfp_mask);
3079 
3080 void *sparse_buffer_alloc(unsigned long size);
3081 struct page * __populate_section_memmap(unsigned long pfn,
3082 		unsigned long nr_pages, int nid, struct vmem_altmap *altmap);
3083 pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
3084 p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
3085 pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
3086 pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
3087 pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node,
3088 			    struct vmem_altmap *altmap);
3089 void *vmemmap_alloc_block(unsigned long size, int node);
3090 struct vmem_altmap;
3091 void *vmemmap_alloc_block_buf(unsigned long size, int node,
3092 			      struct vmem_altmap *altmap);
3093 void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
3094 int vmemmap_populate_basepages(unsigned long start, unsigned long end,
3095 			       int node, struct vmem_altmap *altmap);
3096 int vmemmap_populate(unsigned long start, unsigned long end, int node,
3097 		struct vmem_altmap *altmap);
3098 void vmemmap_populate_print_last(void);
3099 #ifdef CONFIG_MEMORY_HOTPLUG
3100 void vmemmap_free(unsigned long start, unsigned long end,
3101 		struct vmem_altmap *altmap);
3102 #endif
3103 void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
3104 				  unsigned long nr_pages);
3105 
3106 enum mf_flags {
3107 	MF_COUNT_INCREASED = 1 << 0,
3108 	MF_ACTION_REQUIRED = 1 << 1,
3109 	MF_MUST_KILL = 1 << 2,
3110 	MF_SOFT_OFFLINE = 1 << 3,
3111 };
3112 extern int memory_failure(unsigned long pfn, int flags);
3113 extern void memory_failure_queue(unsigned long pfn, int flags);
3114 extern void memory_failure_queue_kick(int cpu);
3115 extern int unpoison_memory(unsigned long pfn);
3116 extern int sysctl_memory_failure_early_kill;
3117 extern int sysctl_memory_failure_recovery;
3118 extern void shake_page(struct page *p, int access);
3119 extern atomic_long_t num_poisoned_pages __read_mostly;
3120 extern int soft_offline_page(unsigned long pfn, int flags);
3121 
3122 
3123 /*
3124  * Error handlers for various types of pages.
3125  */
3126 enum mf_result {
3127 	MF_IGNORED,	/* Error: cannot be handled */
3128 	MF_FAILED,	/* Error: handling failed */
3129 	MF_DELAYED,	/* Will be handled later */
3130 	MF_RECOVERED,	/* Successfully recovered */
3131 };
3132 
3133 enum mf_action_page_type {
3134 	MF_MSG_KERNEL,
3135 	MF_MSG_KERNEL_HIGH_ORDER,
3136 	MF_MSG_SLAB,
3137 	MF_MSG_DIFFERENT_COMPOUND,
3138 	MF_MSG_POISONED_HUGE,
3139 	MF_MSG_HUGE,
3140 	MF_MSG_FREE_HUGE,
3141 	MF_MSG_NON_PMD_HUGE,
3142 	MF_MSG_UNMAP_FAILED,
3143 	MF_MSG_DIRTY_SWAPCACHE,
3144 	MF_MSG_CLEAN_SWAPCACHE,
3145 	MF_MSG_DIRTY_MLOCKED_LRU,
3146 	MF_MSG_CLEAN_MLOCKED_LRU,
3147 	MF_MSG_DIRTY_UNEVICTABLE_LRU,
3148 	MF_MSG_CLEAN_UNEVICTABLE_LRU,
3149 	MF_MSG_DIRTY_LRU,
3150 	MF_MSG_CLEAN_LRU,
3151 	MF_MSG_TRUNCATED_LRU,
3152 	MF_MSG_BUDDY,
3153 	MF_MSG_BUDDY_2ND,
3154 	MF_MSG_DAX,
3155 	MF_MSG_UNSPLIT_THP,
3156 	MF_MSG_UNKNOWN,
3157 };
3158 
3159 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
3160 extern void clear_huge_page(struct page *page,
3161 			    unsigned long addr_hint,
3162 			    unsigned int pages_per_huge_page);
3163 extern void copy_user_huge_page(struct page *dst, struct page *src,
3164 				unsigned long addr_hint,
3165 				struct vm_area_struct *vma,
3166 				unsigned int pages_per_huge_page);
3167 extern long copy_huge_page_from_user(struct page *dst_page,
3168 				const void __user *usr_src,
3169 				unsigned int pages_per_huge_page,
3170 				bool allow_pagefault);
3171 
3172 /**
3173  * vma_is_special_huge - Are transhuge page-table entries considered special?
3174  * @vma: Pointer to the struct vm_area_struct to consider
3175  *
3176  * Whether transhuge page-table entries are considered "special" following
3177  * the definition in vm_normal_page().
3178  *
3179  * Return: true if transhuge page-table entries should be considered special,
3180  * false otherwise.
3181  */
3182 static inline bool vma_is_special_huge(const struct vm_area_struct *vma)
3183 {
3184 	return vma_is_dax(vma) || (vma->vm_file &&
3185 				   (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)));
3186 }
3187 
3188 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
3189 
3190 #ifdef CONFIG_DEBUG_PAGEALLOC
3191 extern unsigned int _debug_guardpage_minorder;
3192 DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
3193 
3194 static inline unsigned int debug_guardpage_minorder(void)
3195 {
3196 	return _debug_guardpage_minorder;
3197 }
3198 
3199 static inline bool debug_guardpage_enabled(void)
3200 {
3201 	return static_branch_unlikely(&_debug_guardpage_enabled);
3202 }
3203 
3204 static inline bool page_is_guard(struct page *page)
3205 {
3206 	if (!debug_guardpage_enabled())
3207 		return false;
3208 
3209 	return PageGuard(page);
3210 }
3211 #else
3212 static inline unsigned int debug_guardpage_minorder(void) { return 0; }
3213 static inline bool debug_guardpage_enabled(void) { return false; }
3214 static inline bool page_is_guard(struct page *page) { return false; }
3215 #endif /* CONFIG_DEBUG_PAGEALLOC */
3216 
3217 #if MAX_NUMNODES > 1
3218 void __init setup_nr_node_ids(void);
3219 #else
3220 static inline void setup_nr_node_ids(void) {}
3221 #endif
3222 
3223 extern int memcmp_pages(struct page *page1, struct page *page2);
3224 
3225 static inline int pages_identical(struct page *page1, struct page *page2)
3226 {
3227 	return !memcmp_pages(page1, page2);
3228 }
3229 
3230 #ifdef CONFIG_MAPPING_DIRTY_HELPERS
3231 unsigned long clean_record_shared_mapping_range(struct address_space *mapping,
3232 						pgoff_t first_index, pgoff_t nr,
3233 						pgoff_t bitmap_pgoff,
3234 						unsigned long *bitmap,
3235 						pgoff_t *start,
3236 						pgoff_t *end);
3237 
3238 unsigned long wp_shared_mapping_range(struct address_space *mapping,
3239 				      pgoff_t first_index, pgoff_t nr);
3240 #endif
3241 
3242 extern int sysctl_nr_trim_pages;
3243 
3244 #ifdef CONFIG_PRINTK
3245 void mem_dump_obj(void *object);
3246 #else
3247 static inline void mem_dump_obj(void *object) {}
3248 #endif
3249 
3250 /**
3251  * seal_check_future_write - Check for F_SEAL_FUTURE_WRITE flag and handle it
3252  * @seals: the seals to check
3253  * @vma: the vma to operate on
3254  *
3255  * Check whether F_SEAL_FUTURE_WRITE is set; if so, do proper check/handling on
3256  * the vma flags.  Return 0 if check pass, or <0 for errors.
3257  */
3258 static inline int seal_check_future_write(int seals, struct vm_area_struct *vma)
3259 {
3260 	if (seals & F_SEAL_FUTURE_WRITE) {
3261 		/*
3262 		 * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
3263 		 * "future write" seal active.
3264 		 */
3265 		if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
3266 			return -EPERM;
3267 
3268 		/*
3269 		 * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as
3270 		 * MAP_SHARED and read-only, take care to not allow mprotect to
3271 		 * revert protections on such mappings. Do this only for shared
3272 		 * mappings. For private mappings, don't need to mask
3273 		 * VM_MAYWRITE as we still want them to be COW-writable.
3274 		 */
3275 		if (vma->vm_flags & VM_SHARED)
3276 			vma->vm_flags &= ~(VM_MAYWRITE);
3277 	}
3278 
3279 	return 0;
3280 }
3281 
3282 #endif /* __KERNEL__ */
3283 #endif /* _LINUX_MM_H */
3284