xref: /linux-6.15/include/linux/mm.h (revision 121ff07b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MM_H
3 #define _LINUX_MM_H
4 
5 #include <linux/errno.h>
6 #include <linux/mmdebug.h>
7 #include <linux/gfp.h>
8 #include <linux/bug.h>
9 #include <linux/list.h>
10 #include <linux/mmzone.h>
11 #include <linux/rbtree.h>
12 #include <linux/atomic.h>
13 #include <linux/debug_locks.h>
14 #include <linux/mm_types.h>
15 #include <linux/mmap_lock.h>
16 #include <linux/range.h>
17 #include <linux/pfn.h>
18 #include <linux/percpu-refcount.h>
19 #include <linux/bit_spinlock.h>
20 #include <linux/shrinker.h>
21 #include <linux/resource.h>
22 #include <linux/page_ext.h>
23 #include <linux/err.h>
24 #include <linux/page-flags.h>
25 #include <linux/page_ref.h>
26 #include <linux/overflow.h>
27 #include <linux/sizes.h>
28 #include <linux/sched.h>
29 #include <linux/pgtable.h>
30 #include <linux/kasan.h>
31 #include <linux/memremap.h>
32 
33 struct mempolicy;
34 struct anon_vma;
35 struct anon_vma_chain;
36 struct user_struct;
37 struct pt_regs;
38 
39 extern int sysctl_page_lock_unfairness;
40 
41 void init_mm_internals(void);
42 
43 #ifndef CONFIG_NUMA		/* Don't use mapnrs, do it properly */
44 extern unsigned long max_mapnr;
45 
46 static inline void set_max_mapnr(unsigned long limit)
47 {
48 	max_mapnr = limit;
49 }
50 #else
51 static inline void set_max_mapnr(unsigned long limit) { }
52 #endif
53 
54 extern atomic_long_t _totalram_pages;
55 static inline unsigned long totalram_pages(void)
56 {
57 	return (unsigned long)atomic_long_read(&_totalram_pages);
58 }
59 
60 static inline void totalram_pages_inc(void)
61 {
62 	atomic_long_inc(&_totalram_pages);
63 }
64 
65 static inline void totalram_pages_dec(void)
66 {
67 	atomic_long_dec(&_totalram_pages);
68 }
69 
70 static inline void totalram_pages_add(long count)
71 {
72 	atomic_long_add(count, &_totalram_pages);
73 }
74 
75 extern void * high_memory;
76 extern int page_cluster;
77 extern const int page_cluster_max;
78 
79 #ifdef CONFIG_SYSCTL
80 extern int sysctl_legacy_va_layout;
81 #else
82 #define sysctl_legacy_va_layout 0
83 #endif
84 
85 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
86 extern const int mmap_rnd_bits_min;
87 extern const int mmap_rnd_bits_max;
88 extern int mmap_rnd_bits __read_mostly;
89 #endif
90 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
91 extern const int mmap_rnd_compat_bits_min;
92 extern const int mmap_rnd_compat_bits_max;
93 extern int mmap_rnd_compat_bits __read_mostly;
94 #endif
95 
96 #include <asm/page.h>
97 #include <asm/processor.h>
98 
99 /*
100  * Architectures that support memory tagging (assigning tags to memory regions,
101  * embedding these tags into addresses that point to these memory regions, and
102  * checking that the memory and the pointer tags match on memory accesses)
103  * redefine this macro to strip tags from pointers.
104  * It's defined as noop for architectures that don't support memory tagging.
105  */
106 #ifndef untagged_addr
107 #define untagged_addr(addr) (addr)
108 #endif
109 
110 #ifndef __pa_symbol
111 #define __pa_symbol(x)  __pa(RELOC_HIDE((unsigned long)(x), 0))
112 #endif
113 
114 #ifndef page_to_virt
115 #define page_to_virt(x)	__va(PFN_PHYS(page_to_pfn(x)))
116 #endif
117 
118 #ifndef lm_alias
119 #define lm_alias(x)	__va(__pa_symbol(x))
120 #endif
121 
122 /*
123  * To prevent common memory management code establishing
124  * a zero page mapping on a read fault.
125  * This macro should be defined within <asm/pgtable.h>.
126  * s390 does this to prevent multiplexing of hardware bits
127  * related to the physical page in case of virtualization.
128  */
129 #ifndef mm_forbids_zeropage
130 #define mm_forbids_zeropage(X)	(0)
131 #endif
132 
133 /*
134  * On some architectures it is expensive to call memset() for small sizes.
135  * If an architecture decides to implement their own version of
136  * mm_zero_struct_page they should wrap the defines below in a #ifndef and
137  * define their own version of this macro in <asm/pgtable.h>
138  */
139 #if BITS_PER_LONG == 64
140 /* This function must be updated when the size of struct page grows above 96
141  * or reduces below 56. The idea that compiler optimizes out switch()
142  * statement, and only leaves move/store instructions. Also the compiler can
143  * combine write statements if they are both assignments and can be reordered,
144  * this can result in several of the writes here being dropped.
145  */
146 #define	mm_zero_struct_page(pp) __mm_zero_struct_page(pp)
147 static inline void __mm_zero_struct_page(struct page *page)
148 {
149 	unsigned long *_pp = (void *)page;
150 
151 	 /* Check that struct page is either 56, 64, 72, 80, 88 or 96 bytes */
152 	BUILD_BUG_ON(sizeof(struct page) & 7);
153 	BUILD_BUG_ON(sizeof(struct page) < 56);
154 	BUILD_BUG_ON(sizeof(struct page) > 96);
155 
156 	switch (sizeof(struct page)) {
157 	case 96:
158 		_pp[11] = 0;
159 		fallthrough;
160 	case 88:
161 		_pp[10] = 0;
162 		fallthrough;
163 	case 80:
164 		_pp[9] = 0;
165 		fallthrough;
166 	case 72:
167 		_pp[8] = 0;
168 		fallthrough;
169 	case 64:
170 		_pp[7] = 0;
171 		fallthrough;
172 	case 56:
173 		_pp[6] = 0;
174 		_pp[5] = 0;
175 		_pp[4] = 0;
176 		_pp[3] = 0;
177 		_pp[2] = 0;
178 		_pp[1] = 0;
179 		_pp[0] = 0;
180 	}
181 }
182 #else
183 #define mm_zero_struct_page(pp)  ((void)memset((pp), 0, sizeof(struct page)))
184 #endif
185 
186 /*
187  * Default maximum number of active map areas, this limits the number of vmas
188  * per mm struct. Users can overwrite this number by sysctl but there is a
189  * problem.
190  *
191  * When a program's coredump is generated as ELF format, a section is created
192  * per a vma. In ELF, the number of sections is represented in unsigned short.
193  * This means the number of sections should be smaller than 65535 at coredump.
194  * Because the kernel adds some informative sections to a image of program at
195  * generating coredump, we need some margin. The number of extra sections is
196  * 1-3 now and depends on arch. We use "5" as safe margin, here.
197  *
198  * ELF extended numbering allows more than 65535 sections, so 16-bit bound is
199  * not a hard limit any more. Although some userspace tools can be surprised by
200  * that.
201  */
202 #define MAPCOUNT_ELF_CORE_MARGIN	(5)
203 #define DEFAULT_MAX_MAP_COUNT	(USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
204 
205 extern int sysctl_max_map_count;
206 
207 extern unsigned long sysctl_user_reserve_kbytes;
208 extern unsigned long sysctl_admin_reserve_kbytes;
209 
210 extern int sysctl_overcommit_memory;
211 extern int sysctl_overcommit_ratio;
212 extern unsigned long sysctl_overcommit_kbytes;
213 
214 int overcommit_ratio_handler(struct ctl_table *, int, void *, size_t *,
215 		loff_t *);
216 int overcommit_kbytes_handler(struct ctl_table *, int, void *, size_t *,
217 		loff_t *);
218 int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *,
219 		loff_t *);
220 
221 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
222 #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
223 #define folio_page_idx(folio, p)	(page_to_pfn(p) - folio_pfn(folio))
224 #else
225 #define nth_page(page,n) ((page) + (n))
226 #define folio_page_idx(folio, p)	((p) - &(folio)->page)
227 #endif
228 
229 /* to align the pointer to the (next) page boundary */
230 #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
231 
232 /* to align the pointer to the (prev) page boundary */
233 #define PAGE_ALIGN_DOWN(addr) ALIGN_DOWN(addr, PAGE_SIZE)
234 
235 /* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */
236 #define PAGE_ALIGNED(addr)	IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
237 
238 #define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
239 static inline struct folio *lru_to_folio(struct list_head *head)
240 {
241 	return list_entry((head)->prev, struct folio, lru);
242 }
243 
244 void setup_initial_init_mm(void *start_code, void *end_code,
245 			   void *end_data, void *brk);
246 
247 /*
248  * Linux kernel virtual memory manager primitives.
249  * The idea being to have a "virtual" mm in the same way
250  * we have a virtual fs - giving a cleaner interface to the
251  * mm details, and allowing different kinds of memory mappings
252  * (from shared memory to executable loading to arbitrary
253  * mmap() functions).
254  */
255 
256 struct vm_area_struct *vm_area_alloc(struct mm_struct *);
257 struct vm_area_struct *vm_area_dup(struct vm_area_struct *);
258 void vm_area_free(struct vm_area_struct *);
259 
260 #ifndef CONFIG_MMU
261 extern struct rb_root nommu_region_tree;
262 extern struct rw_semaphore nommu_region_sem;
263 
264 extern unsigned int kobjsize(const void *objp);
265 #endif
266 
267 /*
268  * vm_flags in vm_area_struct, see mm_types.h.
269  * When changing, update also include/trace/events/mmflags.h
270  */
271 #define VM_NONE		0x00000000
272 
273 #define VM_READ		0x00000001	/* currently active flags */
274 #define VM_WRITE	0x00000002
275 #define VM_EXEC		0x00000004
276 #define VM_SHARED	0x00000008
277 
278 /* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
279 #define VM_MAYREAD	0x00000010	/* limits for mprotect() etc */
280 #define VM_MAYWRITE	0x00000020
281 #define VM_MAYEXEC	0x00000040
282 #define VM_MAYSHARE	0x00000080
283 
284 #define VM_GROWSDOWN	0x00000100	/* general info on the segment */
285 #define VM_UFFD_MISSING	0x00000200	/* missing pages tracking */
286 #define VM_PFNMAP	0x00000400	/* Page-ranges managed without "struct page", just pure PFN */
287 #define VM_UFFD_WP	0x00001000	/* wrprotect pages tracking */
288 
289 #define VM_LOCKED	0x00002000
290 #define VM_IO           0x00004000	/* Memory mapped I/O or similar */
291 
292 					/* Used by sys_madvise() */
293 #define VM_SEQ_READ	0x00008000	/* App will access data sequentially */
294 #define VM_RAND_READ	0x00010000	/* App will not benefit from clustered reads */
295 
296 #define VM_DONTCOPY	0x00020000      /* Do not copy this vma on fork */
297 #define VM_DONTEXPAND	0x00040000	/* Cannot expand with mremap() */
298 #define VM_LOCKONFAULT	0x00080000	/* Lock the pages covered when they are faulted in */
299 #define VM_ACCOUNT	0x00100000	/* Is a VM accounted object */
300 #define VM_NORESERVE	0x00200000	/* should the VM suppress accounting */
301 #define VM_HUGETLB	0x00400000	/* Huge TLB Page VM */
302 #define VM_SYNC		0x00800000	/* Synchronous page faults */
303 #define VM_ARCH_1	0x01000000	/* Architecture-specific flag */
304 #define VM_WIPEONFORK	0x02000000	/* Wipe VMA contents in child. */
305 #define VM_DONTDUMP	0x04000000	/* Do not include in the core dump */
306 
307 #ifdef CONFIG_MEM_SOFT_DIRTY
308 # define VM_SOFTDIRTY	0x08000000	/* Not soft dirty clean area */
309 #else
310 # define VM_SOFTDIRTY	0
311 #endif
312 
313 #define VM_MIXEDMAP	0x10000000	/* Can contain "struct page" and pure PFN pages */
314 #define VM_HUGEPAGE	0x20000000	/* MADV_HUGEPAGE marked this vma */
315 #define VM_NOHUGEPAGE	0x40000000	/* MADV_NOHUGEPAGE marked this vma */
316 #define VM_MERGEABLE	0x80000000	/* KSM may merge identical pages */
317 
318 #ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS
319 #define VM_HIGH_ARCH_BIT_0	32	/* bit only usable on 64-bit architectures */
320 #define VM_HIGH_ARCH_BIT_1	33	/* bit only usable on 64-bit architectures */
321 #define VM_HIGH_ARCH_BIT_2	34	/* bit only usable on 64-bit architectures */
322 #define VM_HIGH_ARCH_BIT_3	35	/* bit only usable on 64-bit architectures */
323 #define VM_HIGH_ARCH_BIT_4	36	/* bit only usable on 64-bit architectures */
324 #define VM_HIGH_ARCH_0	BIT(VM_HIGH_ARCH_BIT_0)
325 #define VM_HIGH_ARCH_1	BIT(VM_HIGH_ARCH_BIT_1)
326 #define VM_HIGH_ARCH_2	BIT(VM_HIGH_ARCH_BIT_2)
327 #define VM_HIGH_ARCH_3	BIT(VM_HIGH_ARCH_BIT_3)
328 #define VM_HIGH_ARCH_4	BIT(VM_HIGH_ARCH_BIT_4)
329 #endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */
330 
331 #ifdef CONFIG_ARCH_HAS_PKEYS
332 # define VM_PKEY_SHIFT	VM_HIGH_ARCH_BIT_0
333 # define VM_PKEY_BIT0	VM_HIGH_ARCH_0	/* A protection key is a 4-bit value */
334 # define VM_PKEY_BIT1	VM_HIGH_ARCH_1	/* on x86 and 5-bit value on ppc64   */
335 # define VM_PKEY_BIT2	VM_HIGH_ARCH_2
336 # define VM_PKEY_BIT3	VM_HIGH_ARCH_3
337 #ifdef CONFIG_PPC
338 # define VM_PKEY_BIT4  VM_HIGH_ARCH_4
339 #else
340 # define VM_PKEY_BIT4  0
341 #endif
342 #endif /* CONFIG_ARCH_HAS_PKEYS */
343 
344 #if defined(CONFIG_X86)
345 # define VM_PAT		VM_ARCH_1	/* PAT reserves whole VMA at once (x86) */
346 #elif defined(CONFIG_PPC)
347 # define VM_SAO		VM_ARCH_1	/* Strong Access Ordering (powerpc) */
348 #elif defined(CONFIG_PARISC)
349 # define VM_GROWSUP	VM_ARCH_1
350 #elif defined(CONFIG_IA64)
351 # define VM_GROWSUP	VM_ARCH_1
352 #elif defined(CONFIG_SPARC64)
353 # define VM_SPARC_ADI	VM_ARCH_1	/* Uses ADI tag for access control */
354 # define VM_ARCH_CLEAR	VM_SPARC_ADI
355 #elif defined(CONFIG_ARM64)
356 # define VM_ARM64_BTI	VM_ARCH_1	/* BTI guarded page, a.k.a. GP bit */
357 # define VM_ARCH_CLEAR	VM_ARM64_BTI
358 #elif !defined(CONFIG_MMU)
359 # define VM_MAPPED_COPY	VM_ARCH_1	/* T if mapped copy of data (nommu mmap) */
360 #endif
361 
362 #if defined(CONFIG_ARM64_MTE)
363 # define VM_MTE		VM_HIGH_ARCH_0	/* Use Tagged memory for access control */
364 # define VM_MTE_ALLOWED	VM_HIGH_ARCH_1	/* Tagged memory permitted */
365 #else
366 # define VM_MTE		VM_NONE
367 # define VM_MTE_ALLOWED	VM_NONE
368 #endif
369 
370 #ifndef VM_GROWSUP
371 # define VM_GROWSUP	VM_NONE
372 #endif
373 
374 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
375 # define VM_UFFD_MINOR_BIT	37
376 # define VM_UFFD_MINOR		BIT(VM_UFFD_MINOR_BIT)	/* UFFD minor faults */
377 #else /* !CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
378 # define VM_UFFD_MINOR		VM_NONE
379 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
380 
381 /* Bits set in the VMA until the stack is in its final location */
382 #define VM_STACK_INCOMPLETE_SETUP	(VM_RAND_READ | VM_SEQ_READ)
383 
384 #define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0)
385 
386 /* Common data flag combinations */
387 #define VM_DATA_FLAGS_TSK_EXEC	(VM_READ | VM_WRITE | TASK_EXEC | \
388 				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
389 #define VM_DATA_FLAGS_NON_EXEC	(VM_READ | VM_WRITE | VM_MAYREAD | \
390 				 VM_MAYWRITE | VM_MAYEXEC)
391 #define VM_DATA_FLAGS_EXEC	(VM_READ | VM_WRITE | VM_EXEC | \
392 				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
393 
394 #ifndef VM_DATA_DEFAULT_FLAGS		/* arch can override this */
395 #define VM_DATA_DEFAULT_FLAGS  VM_DATA_FLAGS_EXEC
396 #endif
397 
398 #ifndef VM_STACK_DEFAULT_FLAGS		/* arch can override this */
399 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
400 #endif
401 
402 #ifdef CONFIG_STACK_GROWSUP
403 #define VM_STACK	VM_GROWSUP
404 #else
405 #define VM_STACK	VM_GROWSDOWN
406 #endif
407 
408 #define VM_STACK_FLAGS	(VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
409 
410 /* VMA basic access permission flags */
411 #define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC)
412 
413 
414 /*
415  * Special vmas that are non-mergable, non-mlock()able.
416  */
417 #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
418 
419 /* This mask prevents VMA from being scanned with khugepaged */
420 #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
421 
422 /* This mask defines which mm->def_flags a process can inherit its parent */
423 #define VM_INIT_DEF_MASK	VM_NOHUGEPAGE
424 
425 /* This mask is used to clear all the VMA flags used by mlock */
426 #define VM_LOCKED_CLEAR_MASK	(~(VM_LOCKED | VM_LOCKONFAULT))
427 
428 /* Arch-specific flags to clear when updating VM flags on protection change */
429 #ifndef VM_ARCH_CLEAR
430 # define VM_ARCH_CLEAR	VM_NONE
431 #endif
432 #define VM_FLAGS_CLEAR	(ARCH_VM_PKEY_FLAGS | VM_ARCH_CLEAR)
433 
434 /*
435  * mapping from the currently active vm_flags protection bits (the
436  * low four bits) to a page protection mask..
437  */
438 
439 /*
440  * The default fault flags that should be used by most of the
441  * arch-specific page fault handlers.
442  */
443 #define FAULT_FLAG_DEFAULT  (FAULT_FLAG_ALLOW_RETRY | \
444 			     FAULT_FLAG_KILLABLE | \
445 			     FAULT_FLAG_INTERRUPTIBLE)
446 
447 /**
448  * fault_flag_allow_retry_first - check ALLOW_RETRY the first time
449  * @flags: Fault flags.
450  *
451  * This is mostly used for places where we want to try to avoid taking
452  * the mmap_lock for too long a time when waiting for another condition
453  * to change, in which case we can try to be polite to release the
454  * mmap_lock in the first round to avoid potential starvation of other
455  * processes that would also want the mmap_lock.
456  *
457  * Return: true if the page fault allows retry and this is the first
458  * attempt of the fault handling; false otherwise.
459  */
460 static inline bool fault_flag_allow_retry_first(enum fault_flag flags)
461 {
462 	return (flags & FAULT_FLAG_ALLOW_RETRY) &&
463 	    (!(flags & FAULT_FLAG_TRIED));
464 }
465 
466 #define FAULT_FLAG_TRACE \
467 	{ FAULT_FLAG_WRITE,		"WRITE" }, \
468 	{ FAULT_FLAG_MKWRITE,		"MKWRITE" }, \
469 	{ FAULT_FLAG_ALLOW_RETRY,	"ALLOW_RETRY" }, \
470 	{ FAULT_FLAG_RETRY_NOWAIT,	"RETRY_NOWAIT" }, \
471 	{ FAULT_FLAG_KILLABLE,		"KILLABLE" }, \
472 	{ FAULT_FLAG_TRIED,		"TRIED" }, \
473 	{ FAULT_FLAG_USER,		"USER" }, \
474 	{ FAULT_FLAG_REMOTE,		"REMOTE" }, \
475 	{ FAULT_FLAG_INSTRUCTION,	"INSTRUCTION" }, \
476 	{ FAULT_FLAG_INTERRUPTIBLE,	"INTERRUPTIBLE" }
477 
478 /*
479  * vm_fault is filled by the pagefault handler and passed to the vma's
480  * ->fault function. The vma's ->fault is responsible for returning a bitmask
481  * of VM_FAULT_xxx flags that give details about how the fault was handled.
482  *
483  * MM layer fills up gfp_mask for page allocations but fault handler might
484  * alter it if its implementation requires a different allocation context.
485  *
486  * pgoff should be used in favour of virtual_address, if possible.
487  */
488 struct vm_fault {
489 	const struct {
490 		struct vm_area_struct *vma;	/* Target VMA */
491 		gfp_t gfp_mask;			/* gfp mask to be used for allocations */
492 		pgoff_t pgoff;			/* Logical page offset based on vma */
493 		unsigned long address;		/* Faulting virtual address - masked */
494 		unsigned long real_address;	/* Faulting virtual address - unmasked */
495 	};
496 	enum fault_flag flags;		/* FAULT_FLAG_xxx flags
497 					 * XXX: should really be 'const' */
498 	pmd_t *pmd;			/* Pointer to pmd entry matching
499 					 * the 'address' */
500 	pud_t *pud;			/* Pointer to pud entry matching
501 					 * the 'address'
502 					 */
503 	union {
504 		pte_t orig_pte;		/* Value of PTE at the time of fault */
505 		pmd_t orig_pmd;		/* Value of PMD at the time of fault,
506 					 * used by PMD fault only.
507 					 */
508 	};
509 
510 	struct page *cow_page;		/* Page handler may use for COW fault */
511 	struct page *page;		/* ->fault handlers should return a
512 					 * page here, unless VM_FAULT_NOPAGE
513 					 * is set (which is also implied by
514 					 * VM_FAULT_ERROR).
515 					 */
516 	/* These three entries are valid only while holding ptl lock */
517 	pte_t *pte;			/* Pointer to pte entry matching
518 					 * the 'address'. NULL if the page
519 					 * table hasn't been allocated.
520 					 */
521 	spinlock_t *ptl;		/* Page table lock.
522 					 * Protects pte page table if 'pte'
523 					 * is not NULL, otherwise pmd.
524 					 */
525 	pgtable_t prealloc_pte;		/* Pre-allocated pte page table.
526 					 * vm_ops->map_pages() sets up a page
527 					 * table from atomic context.
528 					 * do_fault_around() pre-allocates
529 					 * page table to avoid allocation from
530 					 * atomic context.
531 					 */
532 };
533 
534 /* page entry size for vm->huge_fault() */
535 enum page_entry_size {
536 	PE_SIZE_PTE = 0,
537 	PE_SIZE_PMD,
538 	PE_SIZE_PUD,
539 };
540 
541 /*
542  * These are the virtual MM functions - opening of an area, closing and
543  * unmapping it (needed to keep files on disk up-to-date etc), pointer
544  * to the functions called when a no-page or a wp-page exception occurs.
545  */
546 struct vm_operations_struct {
547 	void (*open)(struct vm_area_struct * area);
548 	/**
549 	 * @close: Called when the VMA is being removed from the MM.
550 	 * Context: User context.  May sleep.  Caller holds mmap_lock.
551 	 */
552 	void (*close)(struct vm_area_struct * area);
553 	/* Called any time before splitting to check if it's allowed */
554 	int (*may_split)(struct vm_area_struct *area, unsigned long addr);
555 	int (*mremap)(struct vm_area_struct *area);
556 	/*
557 	 * Called by mprotect() to make driver-specific permission
558 	 * checks before mprotect() is finalised.   The VMA must not
559 	 * be modified.  Returns 0 if mprotect() can proceed.
560 	 */
561 	int (*mprotect)(struct vm_area_struct *vma, unsigned long start,
562 			unsigned long end, unsigned long newflags);
563 	vm_fault_t (*fault)(struct vm_fault *vmf);
564 	vm_fault_t (*huge_fault)(struct vm_fault *vmf,
565 			enum page_entry_size pe_size);
566 	vm_fault_t (*map_pages)(struct vm_fault *vmf,
567 			pgoff_t start_pgoff, pgoff_t end_pgoff);
568 	unsigned long (*pagesize)(struct vm_area_struct * area);
569 
570 	/* notification that a previously read-only page is about to become
571 	 * writable, if an error is returned it will cause a SIGBUS */
572 	vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
573 
574 	/* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
575 	vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
576 
577 	/* called by access_process_vm when get_user_pages() fails, typically
578 	 * for use by special VMAs. See also generic_access_phys() for a generic
579 	 * implementation useful for any iomem mapping.
580 	 */
581 	int (*access)(struct vm_area_struct *vma, unsigned long addr,
582 		      void *buf, int len, int write);
583 
584 	/* Called by the /proc/PID/maps code to ask the vma whether it
585 	 * has a special name.  Returning non-NULL will also cause this
586 	 * vma to be dumped unconditionally. */
587 	const char *(*name)(struct vm_area_struct *vma);
588 
589 #ifdef CONFIG_NUMA
590 	/*
591 	 * set_policy() op must add a reference to any non-NULL @new mempolicy
592 	 * to hold the policy upon return.  Caller should pass NULL @new to
593 	 * remove a policy and fall back to surrounding context--i.e. do not
594 	 * install a MPOL_DEFAULT policy, nor the task or system default
595 	 * mempolicy.
596 	 */
597 	int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
598 
599 	/*
600 	 * get_policy() op must add reference [mpol_get()] to any policy at
601 	 * (vma,addr) marked as MPOL_SHARED.  The shared policy infrastructure
602 	 * in mm/mempolicy.c will do this automatically.
603 	 * get_policy() must NOT add a ref if the policy at (vma,addr) is not
604 	 * marked as MPOL_SHARED. vma policies are protected by the mmap_lock.
605 	 * If no [shared/vma] mempolicy exists at the addr, get_policy() op
606 	 * must return NULL--i.e., do not "fallback" to task or system default
607 	 * policy.
608 	 */
609 	struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
610 					unsigned long addr);
611 #endif
612 	/*
613 	 * Called by vm_normal_page() for special PTEs to find the
614 	 * page for @addr.  This is useful if the default behavior
615 	 * (using pte_page()) would not find the correct page.
616 	 */
617 	struct page *(*find_special_page)(struct vm_area_struct *vma,
618 					  unsigned long addr);
619 };
620 
621 static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
622 {
623 	static const struct vm_operations_struct dummy_vm_ops = {};
624 
625 	memset(vma, 0, sizeof(*vma));
626 	vma->vm_mm = mm;
627 	vma->vm_ops = &dummy_vm_ops;
628 	INIT_LIST_HEAD(&vma->anon_vma_chain);
629 }
630 
631 static inline void vma_set_anonymous(struct vm_area_struct *vma)
632 {
633 	vma->vm_ops = NULL;
634 }
635 
636 static inline bool vma_is_anonymous(struct vm_area_struct *vma)
637 {
638 	return !vma->vm_ops;
639 }
640 
641 static inline bool vma_is_temporary_stack(struct vm_area_struct *vma)
642 {
643 	int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
644 
645 	if (!maybe_stack)
646 		return false;
647 
648 	if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
649 						VM_STACK_INCOMPLETE_SETUP)
650 		return true;
651 
652 	return false;
653 }
654 
655 static inline bool vma_is_foreign(struct vm_area_struct *vma)
656 {
657 	if (!current->mm)
658 		return true;
659 
660 	if (current->mm != vma->vm_mm)
661 		return true;
662 
663 	return false;
664 }
665 
666 static inline bool vma_is_accessible(struct vm_area_struct *vma)
667 {
668 	return vma->vm_flags & VM_ACCESS_FLAGS;
669 }
670 
671 static inline
672 struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
673 {
674 	return mas_find(&vmi->mas, max);
675 }
676 
677 static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
678 {
679 	/*
680 	 * Uses vma_find() to get the first VMA when the iterator starts.
681 	 * Calling mas_next() could skip the first entry.
682 	 */
683 	return vma_find(vmi, ULONG_MAX);
684 }
685 
686 static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
687 {
688 	return mas_prev(&vmi->mas, 0);
689 }
690 
691 static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
692 {
693 	return vmi->mas.index;
694 }
695 
696 #define for_each_vma(__vmi, __vma)					\
697 	while (((__vma) = vma_next(&(__vmi))) != NULL)
698 
699 /* The MM code likes to work with exclusive end addresses */
700 #define for_each_vma_range(__vmi, __vma, __end)				\
701 	while (((__vma) = vma_find(&(__vmi), (__end) - 1)) != NULL)
702 
703 #ifdef CONFIG_SHMEM
704 /*
705  * The vma_is_shmem is not inline because it is used only by slow
706  * paths in userfault.
707  */
708 bool vma_is_shmem(struct vm_area_struct *vma);
709 bool vma_is_anon_shmem(struct vm_area_struct *vma);
710 #else
711 static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
712 static inline bool vma_is_anon_shmem(struct vm_area_struct *vma) { return false; }
713 #endif
714 
715 int vma_is_stack_for_current(struct vm_area_struct *vma);
716 
717 /* flush_tlb_range() takes a vma, not a mm, and can care about flags */
718 #define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) }
719 
720 struct mmu_gather;
721 struct inode;
722 
723 static inline unsigned int compound_order(struct page *page)
724 {
725 	if (!PageHead(page))
726 		return 0;
727 	return page[1].compound_order;
728 }
729 
730 /**
731  * folio_order - The allocation order of a folio.
732  * @folio: The folio.
733  *
734  * A folio is composed of 2^order pages.  See get_order() for the definition
735  * of order.
736  *
737  * Return: The order of the folio.
738  */
739 static inline unsigned int folio_order(struct folio *folio)
740 {
741 	if (!folio_test_large(folio))
742 		return 0;
743 	return folio->_folio_order;
744 }
745 
746 #include <linux/huge_mm.h>
747 
748 /*
749  * Methods to modify the page usage count.
750  *
751  * What counts for a page usage:
752  * - cache mapping   (page->mapping)
753  * - private data    (page->private)
754  * - page mapped in a task's page tables, each mapping
755  *   is counted separately
756  *
757  * Also, many kernel routines increase the page count before a critical
758  * routine so they can be sure the page doesn't go away from under them.
759  */
760 
761 /*
762  * Drop a ref, return true if the refcount fell to zero (the page has no users)
763  */
764 static inline int put_page_testzero(struct page *page)
765 {
766 	VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
767 	return page_ref_dec_and_test(page);
768 }
769 
770 static inline int folio_put_testzero(struct folio *folio)
771 {
772 	return put_page_testzero(&folio->page);
773 }
774 
775 /*
776  * Try to grab a ref unless the page has a refcount of zero, return false if
777  * that is the case.
778  * This can be called when MMU is off so it must not access
779  * any of the virtual mappings.
780  */
781 static inline bool get_page_unless_zero(struct page *page)
782 {
783 	return page_ref_add_unless(page, 1, 0);
784 }
785 
786 extern int page_is_ram(unsigned long pfn);
787 
788 enum {
789 	REGION_INTERSECTS,
790 	REGION_DISJOINT,
791 	REGION_MIXED,
792 };
793 
794 int region_intersects(resource_size_t offset, size_t size, unsigned long flags,
795 		      unsigned long desc);
796 
797 /* Support for virtually mapped pages */
798 struct page *vmalloc_to_page(const void *addr);
799 unsigned long vmalloc_to_pfn(const void *addr);
800 
801 /*
802  * Determine if an address is within the vmalloc range
803  *
804  * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
805  * is no special casing required.
806  */
807 
808 #ifndef is_ioremap_addr
809 #define is_ioremap_addr(x) is_vmalloc_addr(x)
810 #endif
811 
812 #ifdef CONFIG_MMU
813 extern bool is_vmalloc_addr(const void *x);
814 extern int is_vmalloc_or_module_addr(const void *x);
815 #else
816 static inline bool is_vmalloc_addr(const void *x)
817 {
818 	return false;
819 }
820 static inline int is_vmalloc_or_module_addr(const void *x)
821 {
822 	return 0;
823 }
824 #endif
825 
826 /*
827  * How many times the entire folio is mapped as a single unit (eg by a
828  * PMD or PUD entry).  This is probably not what you want, except for
829  * debugging purposes - it does not include PTE-mapped sub-pages; look
830  * at folio_mapcount() or page_mapcount() or total_mapcount() instead.
831  */
832 static inline int folio_entire_mapcount(struct folio *folio)
833 {
834 	VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
835 	return atomic_read(folio_mapcount_ptr(folio)) + 1;
836 }
837 
838 /*
839  * Mapcount of compound page as a whole, does not include mapped sub-pages.
840  * Must be called only on head of compound page.
841  */
842 static inline int head_compound_mapcount(struct page *head)
843 {
844 	return atomic_read(compound_mapcount_ptr(head)) + 1;
845 }
846 
847 /*
848  * If a 16GB hugetlb page were mapped by PTEs of all of its 4kB sub-pages,
849  * its subpages_mapcount would be 0x400000: choose the COMPOUND_MAPPED bit
850  * above that range, instead of 2*(PMD_SIZE/PAGE_SIZE).  Hugetlb currently
851  * leaves subpages_mapcount at 0, but avoid surprise if it participates later.
852  */
853 #define COMPOUND_MAPPED	0x800000
854 #define SUBPAGES_MAPPED	(COMPOUND_MAPPED - 1)
855 
856 /*
857  * Number of sub-pages mapped by PTE, does not include compound mapcount.
858  * Must be called only on head of compound page.
859  */
860 static inline int head_subpages_mapcount(struct page *head)
861 {
862 	return atomic_read(subpages_mapcount_ptr(head)) & SUBPAGES_MAPPED;
863 }
864 
865 /*
866  * The atomic page->_mapcount, starts from -1: so that transitions
867  * both from it and to it can be tracked, using atomic_inc_and_test
868  * and atomic_add_negative(-1).
869  */
870 static inline void page_mapcount_reset(struct page *page)
871 {
872 	atomic_set(&(page)->_mapcount, -1);
873 }
874 
875 /*
876  * Mapcount of 0-order page; when compound sub-page, includes
877  * compound_mapcount of compound_head of page.
878  *
879  * Result is undefined for pages which cannot be mapped into userspace.
880  * For example SLAB or special types of pages. See function page_has_type().
881  * They use this place in struct page differently.
882  */
883 static inline int page_mapcount(struct page *page)
884 {
885 	int mapcount = atomic_read(&page->_mapcount) + 1;
886 
887 	if (likely(!PageCompound(page)))
888 		return mapcount;
889 	page = compound_head(page);
890 	return head_compound_mapcount(page) + mapcount;
891 }
892 
893 int total_compound_mapcount(struct page *head);
894 
895 /**
896  * folio_mapcount() - Calculate the number of mappings of this folio.
897  * @folio: The folio.
898  *
899  * A large folio tracks both how many times the entire folio is mapped,
900  * and how many times each individual page in the folio is mapped.
901  * This function calculates the total number of times the folio is
902  * mapped.
903  *
904  * Return: The number of times this folio is mapped.
905  */
906 static inline int folio_mapcount(struct folio *folio)
907 {
908 	if (likely(!folio_test_large(folio)))
909 		return atomic_read(&folio->_mapcount) + 1;
910 	return total_compound_mapcount(&folio->page);
911 }
912 
913 static inline int total_mapcount(struct page *page)
914 {
915 	if (likely(!PageCompound(page)))
916 		return atomic_read(&page->_mapcount) + 1;
917 	return total_compound_mapcount(compound_head(page));
918 }
919 
920 static inline bool folio_large_is_mapped(struct folio *folio)
921 {
922 	/*
923 	 * Reading folio_mapcount_ptr() below could be omitted if hugetlb
924 	 * participated in incrementing subpages_mapcount when compound mapped.
925 	 */
926 	return atomic_read(folio_subpages_mapcount_ptr(folio)) > 0 ||
927 		atomic_read(folio_mapcount_ptr(folio)) >= 0;
928 }
929 
930 /**
931  * folio_mapped - Is this folio mapped into userspace?
932  * @folio: The folio.
933  *
934  * Return: True if any page in this folio is referenced by user page tables.
935  */
936 static inline bool folio_mapped(struct folio *folio)
937 {
938 	if (likely(!folio_test_large(folio)))
939 		return atomic_read(&folio->_mapcount) >= 0;
940 	return folio_large_is_mapped(folio);
941 }
942 
943 /*
944  * Return true if this page is mapped into pagetables.
945  * For compound page it returns true if any sub-page of compound page is mapped,
946  * even if this particular sub-page is not itself mapped by any PTE or PMD.
947  */
948 static inline bool page_mapped(struct page *page)
949 {
950 	if (likely(!PageCompound(page)))
951 		return atomic_read(&page->_mapcount) >= 0;
952 	return folio_large_is_mapped(page_folio(page));
953 }
954 
955 static inline struct page *virt_to_head_page(const void *x)
956 {
957 	struct page *page = virt_to_page(x);
958 
959 	return compound_head(page);
960 }
961 
962 static inline struct folio *virt_to_folio(const void *x)
963 {
964 	struct page *page = virt_to_page(x);
965 
966 	return page_folio(page);
967 }
968 
969 void __folio_put(struct folio *folio);
970 
971 void put_pages_list(struct list_head *pages);
972 
973 void split_page(struct page *page, unsigned int order);
974 void folio_copy(struct folio *dst, struct folio *src);
975 
976 unsigned long nr_free_buffer_pages(void);
977 
978 /*
979  * Compound pages have a destructor function.  Provide a
980  * prototype for that function and accessor functions.
981  * These are _only_ valid on the head of a compound page.
982  */
983 typedef void compound_page_dtor(struct page *);
984 
985 /* Keep the enum in sync with compound_page_dtors array in mm/page_alloc.c */
986 enum compound_dtor_id {
987 	NULL_COMPOUND_DTOR,
988 	COMPOUND_PAGE_DTOR,
989 #ifdef CONFIG_HUGETLB_PAGE
990 	HUGETLB_PAGE_DTOR,
991 #endif
992 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
993 	TRANSHUGE_PAGE_DTOR,
994 #endif
995 	NR_COMPOUND_DTORS,
996 };
997 extern compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS];
998 
999 static inline void set_compound_page_dtor(struct page *page,
1000 		enum compound_dtor_id compound_dtor)
1001 {
1002 	VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page);
1003 	page[1].compound_dtor = compound_dtor;
1004 }
1005 
1006 static inline void folio_set_compound_dtor(struct folio *folio,
1007 		enum compound_dtor_id compound_dtor)
1008 {
1009 	VM_BUG_ON_FOLIO(compound_dtor >= NR_COMPOUND_DTORS, folio);
1010 	folio->_folio_dtor = compound_dtor;
1011 }
1012 
1013 void destroy_large_folio(struct folio *folio);
1014 
1015 static inline int head_compound_pincount(struct page *head)
1016 {
1017 	return atomic_read(compound_pincount_ptr(head));
1018 }
1019 
1020 static inline void set_compound_order(struct page *page, unsigned int order)
1021 {
1022 	page[1].compound_order = order;
1023 #ifdef CONFIG_64BIT
1024 	page[1].compound_nr = 1U << order;
1025 #endif
1026 }
1027 
1028 /*
1029  * folio_set_compound_order is generally passed a non-zero order to
1030  * initialize a large folio.  However, hugetlb code abuses this by
1031  * passing in zero when 'dissolving' a large folio.
1032  */
1033 static inline void folio_set_compound_order(struct folio *folio,
1034 		unsigned int order)
1035 {
1036 	VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
1037 
1038 	folio->_folio_order = order;
1039 #ifdef CONFIG_64BIT
1040 	folio->_folio_nr_pages = order ? 1U << order : 0;
1041 #endif
1042 }
1043 
1044 /* Returns the number of pages in this potentially compound page. */
1045 static inline unsigned long compound_nr(struct page *page)
1046 {
1047 	if (!PageHead(page))
1048 		return 1;
1049 #ifdef CONFIG_64BIT
1050 	return page[1].compound_nr;
1051 #else
1052 	return 1UL << compound_order(page);
1053 #endif
1054 }
1055 
1056 /* Returns the number of bytes in this potentially compound page. */
1057 static inline unsigned long page_size(struct page *page)
1058 {
1059 	return PAGE_SIZE << compound_order(page);
1060 }
1061 
1062 /* Returns the number of bits needed for the number of bytes in a page */
1063 static inline unsigned int page_shift(struct page *page)
1064 {
1065 	return PAGE_SHIFT + compound_order(page);
1066 }
1067 
1068 /**
1069  * thp_order - Order of a transparent huge page.
1070  * @page: Head page of a transparent huge page.
1071  */
1072 static inline unsigned int thp_order(struct page *page)
1073 {
1074 	VM_BUG_ON_PGFLAGS(PageTail(page), page);
1075 	return compound_order(page);
1076 }
1077 
1078 /**
1079  * thp_nr_pages - The number of regular pages in this huge page.
1080  * @page: The head page of a huge page.
1081  */
1082 static inline int thp_nr_pages(struct page *page)
1083 {
1084 	VM_BUG_ON_PGFLAGS(PageTail(page), page);
1085 	return compound_nr(page);
1086 }
1087 
1088 /**
1089  * thp_size - Size of a transparent huge page.
1090  * @page: Head page of a transparent huge page.
1091  *
1092  * Return: Number of bytes in this page.
1093  */
1094 static inline unsigned long thp_size(struct page *page)
1095 {
1096 	return PAGE_SIZE << thp_order(page);
1097 }
1098 
1099 void free_compound_page(struct page *page);
1100 
1101 #ifdef CONFIG_MMU
1102 /*
1103  * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
1104  * servicing faults for write access.  In the normal case, do always want
1105  * pte_mkwrite.  But get_user_pages can cause write faults for mappings
1106  * that do not have writing enabled, when used by access_process_vm.
1107  */
1108 static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
1109 {
1110 	if (likely(vma->vm_flags & VM_WRITE))
1111 		pte = pte_mkwrite(pte);
1112 	return pte;
1113 }
1114 
1115 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page);
1116 void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr);
1117 
1118 vm_fault_t finish_fault(struct vm_fault *vmf);
1119 vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
1120 #endif
1121 
1122 /*
1123  * Multiple processes may "see" the same page. E.g. for untouched
1124  * mappings of /dev/null, all processes see the same page full of
1125  * zeroes, and text pages of executables and shared libraries have
1126  * only one copy in memory, at most, normally.
1127  *
1128  * For the non-reserved pages, page_count(page) denotes a reference count.
1129  *   page_count() == 0 means the page is free. page->lru is then used for
1130  *   freelist management in the buddy allocator.
1131  *   page_count() > 0  means the page has been allocated.
1132  *
1133  * Pages are allocated by the slab allocator in order to provide memory
1134  * to kmalloc and kmem_cache_alloc. In this case, the management of the
1135  * page, and the fields in 'struct page' are the responsibility of mm/slab.c
1136  * unless a particular usage is carefully commented. (the responsibility of
1137  * freeing the kmalloc memory is the caller's, of course).
1138  *
1139  * A page may be used by anyone else who does a __get_free_page().
1140  * In this case, page_count still tracks the references, and should only
1141  * be used through the normal accessor functions. The top bits of page->flags
1142  * and page->virtual store page management information, but all other fields
1143  * are unused and could be used privately, carefully. The management of this
1144  * page is the responsibility of the one who allocated it, and those who have
1145  * subsequently been given references to it.
1146  *
1147  * The other pages (we may call them "pagecache pages") are completely
1148  * managed by the Linux memory manager: I/O, buffers, swapping etc.
1149  * The following discussion applies only to them.
1150  *
1151  * A pagecache page contains an opaque `private' member, which belongs to the
1152  * page's address_space. Usually, this is the address of a circular list of
1153  * the page's disk buffers. PG_private must be set to tell the VM to call
1154  * into the filesystem to release these pages.
1155  *
1156  * A page may belong to an inode's memory mapping. In this case, page->mapping
1157  * is the pointer to the inode, and page->index is the file offset of the page,
1158  * in units of PAGE_SIZE.
1159  *
1160  * If pagecache pages are not associated with an inode, they are said to be
1161  * anonymous pages. These may become associated with the swapcache, and in that
1162  * case PG_swapcache is set, and page->private is an offset into the swapcache.
1163  *
1164  * In either case (swapcache or inode backed), the pagecache itself holds one
1165  * reference to the page. Setting PG_private should also increment the
1166  * refcount. The each user mapping also has a reference to the page.
1167  *
1168  * The pagecache pages are stored in a per-mapping radix tree, which is
1169  * rooted at mapping->i_pages, and indexed by offset.
1170  * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space
1171  * lists, we instead now tag pages as dirty/writeback in the radix tree.
1172  *
1173  * All pagecache pages may be subject to I/O:
1174  * - inode pages may need to be read from disk,
1175  * - inode pages which have been modified and are MAP_SHARED may need
1176  *   to be written back to the inode on disk,
1177  * - anonymous pages (including MAP_PRIVATE file mappings) which have been
1178  *   modified may need to be swapped out to swap space and (later) to be read
1179  *   back into memory.
1180  */
1181 
1182 #if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_FS_DAX)
1183 DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
1184 
1185 bool __put_devmap_managed_page_refs(struct page *page, int refs);
1186 static inline bool put_devmap_managed_page_refs(struct page *page, int refs)
1187 {
1188 	if (!static_branch_unlikely(&devmap_managed_key))
1189 		return false;
1190 	if (!is_zone_device_page(page))
1191 		return false;
1192 	return __put_devmap_managed_page_refs(page, refs);
1193 }
1194 #else /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */
1195 static inline bool put_devmap_managed_page_refs(struct page *page, int refs)
1196 {
1197 	return false;
1198 }
1199 #endif /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */
1200 
1201 static inline bool put_devmap_managed_page(struct page *page)
1202 {
1203 	return put_devmap_managed_page_refs(page, 1);
1204 }
1205 
1206 /* 127: arbitrary random number, small enough to assemble well */
1207 #define folio_ref_zero_or_close_to_overflow(folio) \
1208 	((unsigned int) folio_ref_count(folio) + 127u <= 127u)
1209 
1210 /**
1211  * folio_get - Increment the reference count on a folio.
1212  * @folio: The folio.
1213  *
1214  * Context: May be called in any context, as long as you know that
1215  * you have a refcount on the folio.  If you do not already have one,
1216  * folio_try_get() may be the right interface for you to use.
1217  */
1218 static inline void folio_get(struct folio *folio)
1219 {
1220 	VM_BUG_ON_FOLIO(folio_ref_zero_or_close_to_overflow(folio), folio);
1221 	folio_ref_inc(folio);
1222 }
1223 
1224 static inline void get_page(struct page *page)
1225 {
1226 	folio_get(page_folio(page));
1227 }
1228 
1229 int __must_check try_grab_page(struct page *page, unsigned int flags);
1230 
1231 static inline __must_check bool try_get_page(struct page *page)
1232 {
1233 	page = compound_head(page);
1234 	if (WARN_ON_ONCE(page_ref_count(page) <= 0))
1235 		return false;
1236 	page_ref_inc(page);
1237 	return true;
1238 }
1239 
1240 /**
1241  * folio_put - Decrement the reference count on a folio.
1242  * @folio: The folio.
1243  *
1244  * If the folio's reference count reaches zero, the memory will be
1245  * released back to the page allocator and may be used by another
1246  * allocation immediately.  Do not access the memory or the struct folio
1247  * after calling folio_put() unless you can be sure that it wasn't the
1248  * last reference.
1249  *
1250  * Context: May be called in process or interrupt context, but not in NMI
1251  * context.  May be called while holding a spinlock.
1252  */
1253 static inline void folio_put(struct folio *folio)
1254 {
1255 	if (folio_put_testzero(folio))
1256 		__folio_put(folio);
1257 }
1258 
1259 /**
1260  * folio_put_refs - Reduce the reference count on a folio.
1261  * @folio: The folio.
1262  * @refs: The amount to subtract from the folio's reference count.
1263  *
1264  * If the folio's reference count reaches zero, the memory will be
1265  * released back to the page allocator and may be used by another
1266  * allocation immediately.  Do not access the memory or the struct folio
1267  * after calling folio_put_refs() unless you can be sure that these weren't
1268  * the last references.
1269  *
1270  * Context: May be called in process or interrupt context, but not in NMI
1271  * context.  May be called while holding a spinlock.
1272  */
1273 static inline void folio_put_refs(struct folio *folio, int refs)
1274 {
1275 	if (folio_ref_sub_and_test(folio, refs))
1276 		__folio_put(folio);
1277 }
1278 
1279 /*
1280  * union release_pages_arg - an array of pages or folios
1281  *
1282  * release_pages() releases a simple array of multiple pages, and
1283  * accepts various different forms of said page array: either
1284  * a regular old boring array of pages, an array of folios, or
1285  * an array of encoded page pointers.
1286  *
1287  * The transparent union syntax for this kind of "any of these
1288  * argument types" is all kinds of ugly, so look away.
1289  */
1290 typedef union {
1291 	struct page **pages;
1292 	struct folio **folios;
1293 	struct encoded_page **encoded_pages;
1294 } release_pages_arg __attribute__ ((__transparent_union__));
1295 
1296 void release_pages(release_pages_arg, int nr);
1297 
1298 /**
1299  * folios_put - Decrement the reference count on an array of folios.
1300  * @folios: The folios.
1301  * @nr: How many folios there are.
1302  *
1303  * Like folio_put(), but for an array of folios.  This is more efficient
1304  * than writing the loop yourself as it will optimise the locks which
1305  * need to be taken if the folios are freed.
1306  *
1307  * Context: May be called in process or interrupt context, but not in NMI
1308  * context.  May be called while holding a spinlock.
1309  */
1310 static inline void folios_put(struct folio **folios, unsigned int nr)
1311 {
1312 	release_pages(folios, nr);
1313 }
1314 
1315 static inline void put_page(struct page *page)
1316 {
1317 	struct folio *folio = page_folio(page);
1318 
1319 	/*
1320 	 * For some devmap managed pages we need to catch refcount transition
1321 	 * from 2 to 1:
1322 	 */
1323 	if (put_devmap_managed_page(&folio->page))
1324 		return;
1325 	folio_put(folio);
1326 }
1327 
1328 /*
1329  * GUP_PIN_COUNTING_BIAS, and the associated functions that use it, overload
1330  * the page's refcount so that two separate items are tracked: the original page
1331  * reference count, and also a new count of how many pin_user_pages() calls were
1332  * made against the page. ("gup-pinned" is another term for the latter).
1333  *
1334  * With this scheme, pin_user_pages() becomes special: such pages are marked as
1335  * distinct from normal pages. As such, the unpin_user_page() call (and its
1336  * variants) must be used in order to release gup-pinned pages.
1337  *
1338  * Choice of value:
1339  *
1340  * By making GUP_PIN_COUNTING_BIAS a power of two, debugging of page reference
1341  * counts with respect to pin_user_pages() and unpin_user_page() becomes
1342  * simpler, due to the fact that adding an even power of two to the page
1343  * refcount has the effect of using only the upper N bits, for the code that
1344  * counts up using the bias value. This means that the lower bits are left for
1345  * the exclusive use of the original code that increments and decrements by one
1346  * (or at least, by much smaller values than the bias value).
1347  *
1348  * Of course, once the lower bits overflow into the upper bits (and this is
1349  * OK, because subtraction recovers the original values), then visual inspection
1350  * no longer suffices to directly view the separate counts. However, for normal
1351  * applications that don't have huge page reference counts, this won't be an
1352  * issue.
1353  *
1354  * Locking: the lockless algorithm described in folio_try_get_rcu()
1355  * provides safe operation for get_user_pages(), page_mkclean() and
1356  * other calls that race to set up page table entries.
1357  */
1358 #define GUP_PIN_COUNTING_BIAS (1U << 10)
1359 
1360 void unpin_user_page(struct page *page);
1361 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
1362 				 bool make_dirty);
1363 void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
1364 				      bool make_dirty);
1365 void unpin_user_pages(struct page **pages, unsigned long npages);
1366 
1367 static inline bool is_cow_mapping(vm_flags_t flags)
1368 {
1369 	return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
1370 }
1371 
1372 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
1373 #define SECTION_IN_PAGE_FLAGS
1374 #endif
1375 
1376 /*
1377  * The identification function is mainly used by the buddy allocator for
1378  * determining if two pages could be buddies. We are not really identifying
1379  * the zone since we could be using the section number id if we do not have
1380  * node id available in page flags.
1381  * We only guarantee that it will return the same value for two combinable
1382  * pages in a zone.
1383  */
1384 static inline int page_zone_id(struct page *page)
1385 {
1386 	return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
1387 }
1388 
1389 #ifdef NODE_NOT_IN_PAGE_FLAGS
1390 extern int page_to_nid(const struct page *page);
1391 #else
1392 static inline int page_to_nid(const struct page *page)
1393 {
1394 	struct page *p = (struct page *)page;
1395 
1396 	return (PF_POISONED_CHECK(p)->flags >> NODES_PGSHIFT) & NODES_MASK;
1397 }
1398 #endif
1399 
1400 static inline int folio_nid(const struct folio *folio)
1401 {
1402 	return page_to_nid(&folio->page);
1403 }
1404 
1405 #ifdef CONFIG_NUMA_BALANCING
1406 /* page access time bits needs to hold at least 4 seconds */
1407 #define PAGE_ACCESS_TIME_MIN_BITS	12
1408 #if LAST_CPUPID_SHIFT < PAGE_ACCESS_TIME_MIN_BITS
1409 #define PAGE_ACCESS_TIME_BUCKETS				\
1410 	(PAGE_ACCESS_TIME_MIN_BITS - LAST_CPUPID_SHIFT)
1411 #else
1412 #define PAGE_ACCESS_TIME_BUCKETS	0
1413 #endif
1414 
1415 #define PAGE_ACCESS_TIME_MASK				\
1416 	(LAST_CPUPID_MASK << PAGE_ACCESS_TIME_BUCKETS)
1417 
1418 static inline int cpu_pid_to_cpupid(int cpu, int pid)
1419 {
1420 	return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
1421 }
1422 
1423 static inline int cpupid_to_pid(int cpupid)
1424 {
1425 	return cpupid & LAST__PID_MASK;
1426 }
1427 
1428 static inline int cpupid_to_cpu(int cpupid)
1429 {
1430 	return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
1431 }
1432 
1433 static inline int cpupid_to_nid(int cpupid)
1434 {
1435 	return cpu_to_node(cpupid_to_cpu(cpupid));
1436 }
1437 
1438 static inline bool cpupid_pid_unset(int cpupid)
1439 {
1440 	return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
1441 }
1442 
1443 static inline bool cpupid_cpu_unset(int cpupid)
1444 {
1445 	return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
1446 }
1447 
1448 static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
1449 {
1450 	return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
1451 }
1452 
1453 #define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
1454 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
1455 static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
1456 {
1457 	return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK);
1458 }
1459 
1460 static inline int page_cpupid_last(struct page *page)
1461 {
1462 	return page->_last_cpupid;
1463 }
1464 static inline void page_cpupid_reset_last(struct page *page)
1465 {
1466 	page->_last_cpupid = -1 & LAST_CPUPID_MASK;
1467 }
1468 #else
1469 static inline int page_cpupid_last(struct page *page)
1470 {
1471 	return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
1472 }
1473 
1474 extern int page_cpupid_xchg_last(struct page *page, int cpupid);
1475 
1476 static inline void page_cpupid_reset_last(struct page *page)
1477 {
1478 	page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
1479 }
1480 #endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
1481 
1482 static inline int xchg_page_access_time(struct page *page, int time)
1483 {
1484 	int last_time;
1485 
1486 	last_time = page_cpupid_xchg_last(page, time >> PAGE_ACCESS_TIME_BUCKETS);
1487 	return last_time << PAGE_ACCESS_TIME_BUCKETS;
1488 }
1489 #else /* !CONFIG_NUMA_BALANCING */
1490 static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
1491 {
1492 	return page_to_nid(page); /* XXX */
1493 }
1494 
1495 static inline int xchg_page_access_time(struct page *page, int time)
1496 {
1497 	return 0;
1498 }
1499 
1500 static inline int page_cpupid_last(struct page *page)
1501 {
1502 	return page_to_nid(page); /* XXX */
1503 }
1504 
1505 static inline int cpupid_to_nid(int cpupid)
1506 {
1507 	return -1;
1508 }
1509 
1510 static inline int cpupid_to_pid(int cpupid)
1511 {
1512 	return -1;
1513 }
1514 
1515 static inline int cpupid_to_cpu(int cpupid)
1516 {
1517 	return -1;
1518 }
1519 
1520 static inline int cpu_pid_to_cpupid(int nid, int pid)
1521 {
1522 	return -1;
1523 }
1524 
1525 static inline bool cpupid_pid_unset(int cpupid)
1526 {
1527 	return true;
1528 }
1529 
1530 static inline void page_cpupid_reset_last(struct page *page)
1531 {
1532 }
1533 
1534 static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
1535 {
1536 	return false;
1537 }
1538 #endif /* CONFIG_NUMA_BALANCING */
1539 
1540 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
1541 
1542 /*
1543  * KASAN per-page tags are stored xor'ed with 0xff. This allows to avoid
1544  * setting tags for all pages to native kernel tag value 0xff, as the default
1545  * value 0x00 maps to 0xff.
1546  */
1547 
1548 static inline u8 page_kasan_tag(const struct page *page)
1549 {
1550 	u8 tag = 0xff;
1551 
1552 	if (kasan_enabled()) {
1553 		tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
1554 		tag ^= 0xff;
1555 	}
1556 
1557 	return tag;
1558 }
1559 
1560 static inline void page_kasan_tag_set(struct page *page, u8 tag)
1561 {
1562 	unsigned long old_flags, flags;
1563 
1564 	if (!kasan_enabled())
1565 		return;
1566 
1567 	tag ^= 0xff;
1568 	old_flags = READ_ONCE(page->flags);
1569 	do {
1570 		flags = old_flags;
1571 		flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
1572 		flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
1573 	} while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags)));
1574 }
1575 
1576 static inline void page_kasan_tag_reset(struct page *page)
1577 {
1578 	if (kasan_enabled())
1579 		page_kasan_tag_set(page, 0xff);
1580 }
1581 
1582 #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
1583 
1584 static inline u8 page_kasan_tag(const struct page *page)
1585 {
1586 	return 0xff;
1587 }
1588 
1589 static inline void page_kasan_tag_set(struct page *page, u8 tag) { }
1590 static inline void page_kasan_tag_reset(struct page *page) { }
1591 
1592 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
1593 
1594 static inline struct zone *page_zone(const struct page *page)
1595 {
1596 	return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
1597 }
1598 
1599 static inline pg_data_t *page_pgdat(const struct page *page)
1600 {
1601 	return NODE_DATA(page_to_nid(page));
1602 }
1603 
1604 static inline struct zone *folio_zone(const struct folio *folio)
1605 {
1606 	return page_zone(&folio->page);
1607 }
1608 
1609 static inline pg_data_t *folio_pgdat(const struct folio *folio)
1610 {
1611 	return page_pgdat(&folio->page);
1612 }
1613 
1614 #ifdef SECTION_IN_PAGE_FLAGS
1615 static inline void set_page_section(struct page *page, unsigned long section)
1616 {
1617 	page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
1618 	page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
1619 }
1620 
1621 static inline unsigned long page_to_section(const struct page *page)
1622 {
1623 	return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
1624 }
1625 #endif
1626 
1627 /**
1628  * folio_pfn - Return the Page Frame Number of a folio.
1629  * @folio: The folio.
1630  *
1631  * A folio may contain multiple pages.  The pages have consecutive
1632  * Page Frame Numbers.
1633  *
1634  * Return: The Page Frame Number of the first page in the folio.
1635  */
1636 static inline unsigned long folio_pfn(struct folio *folio)
1637 {
1638 	return page_to_pfn(&folio->page);
1639 }
1640 
1641 static inline struct folio *pfn_folio(unsigned long pfn)
1642 {
1643 	return page_folio(pfn_to_page(pfn));
1644 }
1645 
1646 static inline atomic_t *folio_pincount_ptr(struct folio *folio)
1647 {
1648 	return &folio_page(folio, 1)->compound_pincount;
1649 }
1650 
1651 /**
1652  * folio_maybe_dma_pinned - Report if a folio may be pinned for DMA.
1653  * @folio: The folio.
1654  *
1655  * This function checks if a folio has been pinned via a call to
1656  * a function in the pin_user_pages() family.
1657  *
1658  * For small folios, the return value is partially fuzzy: false is not fuzzy,
1659  * because it means "definitely not pinned for DMA", but true means "probably
1660  * pinned for DMA, but possibly a false positive due to having at least
1661  * GUP_PIN_COUNTING_BIAS worth of normal folio references".
1662  *
1663  * False positives are OK, because: a) it's unlikely for a folio to
1664  * get that many refcounts, and b) all the callers of this routine are
1665  * expected to be able to deal gracefully with a false positive.
1666  *
1667  * For large folios, the result will be exactly correct. That's because
1668  * we have more tracking data available: the compound_pincount is used
1669  * instead of the GUP_PIN_COUNTING_BIAS scheme.
1670  *
1671  * For more information, please see Documentation/core-api/pin_user_pages.rst.
1672  *
1673  * Return: True, if it is likely that the page has been "dma-pinned".
1674  * False, if the page is definitely not dma-pinned.
1675  */
1676 static inline bool folio_maybe_dma_pinned(struct folio *folio)
1677 {
1678 	if (folio_test_large(folio))
1679 		return atomic_read(folio_pincount_ptr(folio)) > 0;
1680 
1681 	/*
1682 	 * folio_ref_count() is signed. If that refcount overflows, then
1683 	 * folio_ref_count() returns a negative value, and callers will avoid
1684 	 * further incrementing the refcount.
1685 	 *
1686 	 * Here, for that overflow case, use the sign bit to count a little
1687 	 * bit higher via unsigned math, and thus still get an accurate result.
1688 	 */
1689 	return ((unsigned int)folio_ref_count(folio)) >=
1690 		GUP_PIN_COUNTING_BIAS;
1691 }
1692 
1693 static inline bool page_maybe_dma_pinned(struct page *page)
1694 {
1695 	return folio_maybe_dma_pinned(page_folio(page));
1696 }
1697 
1698 /*
1699  * This should most likely only be called during fork() to see whether we
1700  * should break the cow immediately for an anon page on the src mm.
1701  *
1702  * The caller has to hold the PT lock and the vma->vm_mm->->write_protect_seq.
1703  */
1704 static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma,
1705 					  struct page *page)
1706 {
1707 	VM_BUG_ON(!(raw_read_seqcount(&vma->vm_mm->write_protect_seq) & 1));
1708 
1709 	if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags))
1710 		return false;
1711 
1712 	return page_maybe_dma_pinned(page);
1713 }
1714 
1715 /* MIGRATE_CMA and ZONE_MOVABLE do not allow pin pages */
1716 #ifdef CONFIG_MIGRATION
1717 static inline bool is_longterm_pinnable_page(struct page *page)
1718 {
1719 #ifdef CONFIG_CMA
1720 	int mt = get_pageblock_migratetype(page);
1721 
1722 	if (mt == MIGRATE_CMA || mt == MIGRATE_ISOLATE)
1723 		return false;
1724 #endif
1725 	/* The zero page may always be pinned */
1726 	if (is_zero_pfn(page_to_pfn(page)))
1727 		return true;
1728 
1729 	/* Coherent device memory must always allow eviction. */
1730 	if (is_device_coherent_page(page))
1731 		return false;
1732 
1733 	/* Otherwise, non-movable zone pages can be pinned. */
1734 	return !is_zone_movable_page(page);
1735 }
1736 #else
1737 static inline bool is_longterm_pinnable_page(struct page *page)
1738 {
1739 	return true;
1740 }
1741 #endif
1742 
1743 static inline bool folio_is_longterm_pinnable(struct folio *folio)
1744 {
1745 	return is_longterm_pinnable_page(&folio->page);
1746 }
1747 
1748 static inline void set_page_zone(struct page *page, enum zone_type zone)
1749 {
1750 	page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
1751 	page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
1752 }
1753 
1754 static inline void set_page_node(struct page *page, unsigned long node)
1755 {
1756 	page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
1757 	page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
1758 }
1759 
1760 static inline void set_page_links(struct page *page, enum zone_type zone,
1761 	unsigned long node, unsigned long pfn)
1762 {
1763 	set_page_zone(page, zone);
1764 	set_page_node(page, node);
1765 #ifdef SECTION_IN_PAGE_FLAGS
1766 	set_page_section(page, pfn_to_section_nr(pfn));
1767 #endif
1768 }
1769 
1770 /**
1771  * folio_nr_pages - The number of pages in the folio.
1772  * @folio: The folio.
1773  *
1774  * Return: A positive power of two.
1775  */
1776 static inline long folio_nr_pages(struct folio *folio)
1777 {
1778 	if (!folio_test_large(folio))
1779 		return 1;
1780 #ifdef CONFIG_64BIT
1781 	return folio->_folio_nr_pages;
1782 #else
1783 	return 1L << folio->_folio_order;
1784 #endif
1785 }
1786 
1787 /**
1788  * folio_next - Move to the next physical folio.
1789  * @folio: The folio we're currently operating on.
1790  *
1791  * If you have physically contiguous memory which may span more than
1792  * one folio (eg a &struct bio_vec), use this function to move from one
1793  * folio to the next.  Do not use it if the memory is only virtually
1794  * contiguous as the folios are almost certainly not adjacent to each
1795  * other.  This is the folio equivalent to writing ``page++``.
1796  *
1797  * Context: We assume that the folios are refcounted and/or locked at a
1798  * higher level and do not adjust the reference counts.
1799  * Return: The next struct folio.
1800  */
1801 static inline struct folio *folio_next(struct folio *folio)
1802 {
1803 	return (struct folio *)folio_page(folio, folio_nr_pages(folio));
1804 }
1805 
1806 /**
1807  * folio_shift - The size of the memory described by this folio.
1808  * @folio: The folio.
1809  *
1810  * A folio represents a number of bytes which is a power-of-two in size.
1811  * This function tells you which power-of-two the folio is.  See also
1812  * folio_size() and folio_order().
1813  *
1814  * Context: The caller should have a reference on the folio to prevent
1815  * it from being split.  It is not necessary for the folio to be locked.
1816  * Return: The base-2 logarithm of the size of this folio.
1817  */
1818 static inline unsigned int folio_shift(struct folio *folio)
1819 {
1820 	return PAGE_SHIFT + folio_order(folio);
1821 }
1822 
1823 /**
1824  * folio_size - The number of bytes in a folio.
1825  * @folio: The folio.
1826  *
1827  * Context: The caller should have a reference on the folio to prevent
1828  * it from being split.  It is not necessary for the folio to be locked.
1829  * Return: The number of bytes in this folio.
1830  */
1831 static inline size_t folio_size(struct folio *folio)
1832 {
1833 	return PAGE_SIZE << folio_order(folio);
1834 }
1835 
1836 #ifndef HAVE_ARCH_MAKE_PAGE_ACCESSIBLE
1837 static inline int arch_make_page_accessible(struct page *page)
1838 {
1839 	return 0;
1840 }
1841 #endif
1842 
1843 #ifndef HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE
1844 static inline int arch_make_folio_accessible(struct folio *folio)
1845 {
1846 	int ret;
1847 	long i, nr = folio_nr_pages(folio);
1848 
1849 	for (i = 0; i < nr; i++) {
1850 		ret = arch_make_page_accessible(folio_page(folio, i));
1851 		if (ret)
1852 			break;
1853 	}
1854 
1855 	return ret;
1856 }
1857 #endif
1858 
1859 /*
1860  * Some inline functions in vmstat.h depend on page_zone()
1861  */
1862 #include <linux/vmstat.h>
1863 
1864 static __always_inline void *lowmem_page_address(const struct page *page)
1865 {
1866 	return page_to_virt(page);
1867 }
1868 
1869 #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
1870 #define HASHED_PAGE_VIRTUAL
1871 #endif
1872 
1873 #if defined(WANT_PAGE_VIRTUAL)
1874 static inline void *page_address(const struct page *page)
1875 {
1876 	return page->virtual;
1877 }
1878 static inline void set_page_address(struct page *page, void *address)
1879 {
1880 	page->virtual = address;
1881 }
1882 #define page_address_init()  do { } while(0)
1883 #endif
1884 
1885 #if defined(HASHED_PAGE_VIRTUAL)
1886 void *page_address(const struct page *page);
1887 void set_page_address(struct page *page, void *virtual);
1888 void page_address_init(void);
1889 #endif
1890 
1891 #if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
1892 #define page_address(page) lowmem_page_address(page)
1893 #define set_page_address(page, address)  do { } while(0)
1894 #define page_address_init()  do { } while(0)
1895 #endif
1896 
1897 static inline void *folio_address(const struct folio *folio)
1898 {
1899 	return page_address(&folio->page);
1900 }
1901 
1902 extern void *page_rmapping(struct page *page);
1903 extern pgoff_t __page_file_index(struct page *page);
1904 
1905 /*
1906  * Return the pagecache index of the passed page.  Regular pagecache pages
1907  * use ->index whereas swapcache pages use swp_offset(->private)
1908  */
1909 static inline pgoff_t page_index(struct page *page)
1910 {
1911 	if (unlikely(PageSwapCache(page)))
1912 		return __page_file_index(page);
1913 	return page->index;
1914 }
1915 
1916 /*
1917  * Return true only if the page has been allocated with
1918  * ALLOC_NO_WATERMARKS and the low watermark was not
1919  * met implying that the system is under some pressure.
1920  */
1921 static inline bool page_is_pfmemalloc(const struct page *page)
1922 {
1923 	/*
1924 	 * lru.next has bit 1 set if the page is allocated from the
1925 	 * pfmemalloc reserves.  Callers may simply overwrite it if
1926 	 * they do not need to preserve that information.
1927 	 */
1928 	return (uintptr_t)page->lru.next & BIT(1);
1929 }
1930 
1931 /*
1932  * Only to be called by the page allocator on a freshly allocated
1933  * page.
1934  */
1935 static inline void set_page_pfmemalloc(struct page *page)
1936 {
1937 	page->lru.next = (void *)BIT(1);
1938 }
1939 
1940 static inline void clear_page_pfmemalloc(struct page *page)
1941 {
1942 	page->lru.next = NULL;
1943 }
1944 
1945 /*
1946  * Can be called by the pagefault handler when it gets a VM_FAULT_OOM.
1947  */
1948 extern void pagefault_out_of_memory(void);
1949 
1950 #define offset_in_page(p)	((unsigned long)(p) & ~PAGE_MASK)
1951 #define offset_in_thp(page, p)	((unsigned long)(p) & (thp_size(page) - 1))
1952 #define offset_in_folio(folio, p) ((unsigned long)(p) & (folio_size(folio) - 1))
1953 
1954 /*
1955  * Flags passed to show_mem() and show_free_areas() to suppress output in
1956  * various contexts.
1957  */
1958 #define SHOW_MEM_FILTER_NODES		(0x0001u)	/* disallowed nodes */
1959 
1960 extern void __show_free_areas(unsigned int flags, nodemask_t *nodemask, int max_zone_idx);
1961 static void __maybe_unused show_free_areas(unsigned int flags, nodemask_t *nodemask)
1962 {
1963 	__show_free_areas(flags, nodemask, MAX_NR_ZONES - 1);
1964 }
1965 
1966 /*
1967  * Parameter block passed down to zap_pte_range in exceptional cases.
1968  */
1969 struct zap_details {
1970 	struct folio *single_folio;	/* Locked folio to be unmapped */
1971 	bool even_cows;			/* Zap COWed private pages too? */
1972 	zap_flags_t zap_flags;		/* Extra flags for zapping */
1973 };
1974 
1975 /*
1976  * Whether to drop the pte markers, for example, the uffd-wp information for
1977  * file-backed memory.  This should only be specified when we will completely
1978  * drop the page in the mm, either by truncation or unmapping of the vma.  By
1979  * default, the flag is not set.
1980  */
1981 #define  ZAP_FLAG_DROP_MARKER        ((__force zap_flags_t) BIT(0))
1982 /* Set in unmap_vmas() to indicate a final unmap call.  Only used by hugetlb */
1983 #define  ZAP_FLAG_UNMAP              ((__force zap_flags_t) BIT(1))
1984 
1985 #ifdef CONFIG_MMU
1986 extern bool can_do_mlock(void);
1987 #else
1988 static inline bool can_do_mlock(void) { return false; }
1989 #endif
1990 extern int user_shm_lock(size_t, struct ucounts *);
1991 extern void user_shm_unlock(size_t, struct ucounts *);
1992 
1993 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
1994 			     pte_t pte);
1995 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
1996 				pmd_t pmd);
1997 
1998 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1999 		  unsigned long size);
2000 void zap_page_range(struct vm_area_struct *vma, unsigned long address,
2001 		    unsigned long size);
2002 void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
2003 			   unsigned long size, struct zap_details *details);
2004 void unmap_vmas(struct mmu_gather *tlb, struct maple_tree *mt,
2005 		struct vm_area_struct *start_vma, unsigned long start,
2006 		unsigned long end);
2007 
2008 struct mmu_notifier_range;
2009 
2010 void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
2011 		unsigned long end, unsigned long floor, unsigned long ceiling);
2012 int
2013 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
2014 int follow_pte(struct mm_struct *mm, unsigned long address,
2015 	       pte_t **ptepp, spinlock_t **ptlp);
2016 int follow_pfn(struct vm_area_struct *vma, unsigned long address,
2017 	unsigned long *pfn);
2018 int follow_phys(struct vm_area_struct *vma, unsigned long address,
2019 		unsigned int flags, unsigned long *prot, resource_size_t *phys);
2020 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
2021 			void *buf, int len, int write);
2022 
2023 extern void truncate_pagecache(struct inode *inode, loff_t new);
2024 extern void truncate_setsize(struct inode *inode, loff_t newsize);
2025 void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
2026 void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
2027 int generic_error_remove_page(struct address_space *mapping, struct page *page);
2028 
2029 #ifdef CONFIG_MMU
2030 extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
2031 				  unsigned long address, unsigned int flags,
2032 				  struct pt_regs *regs);
2033 extern int fixup_user_fault(struct mm_struct *mm,
2034 			    unsigned long address, unsigned int fault_flags,
2035 			    bool *unlocked);
2036 void unmap_mapping_pages(struct address_space *mapping,
2037 		pgoff_t start, pgoff_t nr, bool even_cows);
2038 void unmap_mapping_range(struct address_space *mapping,
2039 		loff_t const holebegin, loff_t const holelen, int even_cows);
2040 #else
2041 static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
2042 					 unsigned long address, unsigned int flags,
2043 					 struct pt_regs *regs)
2044 {
2045 	/* should never happen if there's no MMU */
2046 	BUG();
2047 	return VM_FAULT_SIGBUS;
2048 }
2049 static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address,
2050 		unsigned int fault_flags, bool *unlocked)
2051 {
2052 	/* should never happen if there's no MMU */
2053 	BUG();
2054 	return -EFAULT;
2055 }
2056 static inline void unmap_mapping_pages(struct address_space *mapping,
2057 		pgoff_t start, pgoff_t nr, bool even_cows) { }
2058 static inline void unmap_mapping_range(struct address_space *mapping,
2059 		loff_t const holebegin, loff_t const holelen, int even_cows) { }
2060 #endif
2061 
2062 static inline void unmap_shared_mapping_range(struct address_space *mapping,
2063 		loff_t const holebegin, loff_t const holelen)
2064 {
2065 	unmap_mapping_range(mapping, holebegin, holelen, 0);
2066 }
2067 
2068 extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
2069 		void *buf, int len, unsigned int gup_flags);
2070 extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
2071 		void *buf, int len, unsigned int gup_flags);
2072 extern int __access_remote_vm(struct mm_struct *mm, unsigned long addr,
2073 			      void *buf, int len, unsigned int gup_flags);
2074 
2075 long get_user_pages_remote(struct mm_struct *mm,
2076 			    unsigned long start, unsigned long nr_pages,
2077 			    unsigned int gup_flags, struct page **pages,
2078 			    struct vm_area_struct **vmas, int *locked);
2079 long pin_user_pages_remote(struct mm_struct *mm,
2080 			   unsigned long start, unsigned long nr_pages,
2081 			   unsigned int gup_flags, struct page **pages,
2082 			   struct vm_area_struct **vmas, int *locked);
2083 long get_user_pages(unsigned long start, unsigned long nr_pages,
2084 			    unsigned int gup_flags, struct page **pages,
2085 			    struct vm_area_struct **vmas);
2086 long pin_user_pages(unsigned long start, unsigned long nr_pages,
2087 		    unsigned int gup_flags, struct page **pages,
2088 		    struct vm_area_struct **vmas);
2089 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
2090 		    struct page **pages, unsigned int gup_flags);
2091 long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
2092 		    struct page **pages, unsigned int gup_flags);
2093 
2094 int get_user_pages_fast(unsigned long start, int nr_pages,
2095 			unsigned int gup_flags, struct page **pages);
2096 int pin_user_pages_fast(unsigned long start, int nr_pages,
2097 			unsigned int gup_flags, struct page **pages);
2098 
2099 int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc);
2100 int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
2101 			struct task_struct *task, bool bypass_rlim);
2102 
2103 struct kvec;
2104 int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
2105 			struct page **pages);
2106 struct page *get_dump_page(unsigned long addr);
2107 
2108 bool folio_mark_dirty(struct folio *folio);
2109 bool set_page_dirty(struct page *page);
2110 int set_page_dirty_lock(struct page *page);
2111 
2112 int get_cmdline(struct task_struct *task, char *buffer, int buflen);
2113 
2114 extern unsigned long move_page_tables(struct vm_area_struct *vma,
2115 		unsigned long old_addr, struct vm_area_struct *new_vma,
2116 		unsigned long new_addr, unsigned long len,
2117 		bool need_rmap_locks);
2118 
2119 /*
2120  * Flags used by change_protection().  For now we make it a bitmap so
2121  * that we can pass in multiple flags just like parameters.  However
2122  * for now all the callers are only use one of the flags at the same
2123  * time.
2124  */
2125 /*
2126  * Whether we should manually check if we can map individual PTEs writable,
2127  * because something (e.g., COW, uffd-wp) blocks that from happening for all
2128  * PTEs automatically in a writable mapping.
2129  */
2130 #define  MM_CP_TRY_CHANGE_WRITABLE	   (1UL << 0)
2131 /* Whether this protection change is for NUMA hints */
2132 #define  MM_CP_PROT_NUMA                   (1UL << 1)
2133 /* Whether this change is for write protecting */
2134 #define  MM_CP_UFFD_WP                     (1UL << 2) /* do wp */
2135 #define  MM_CP_UFFD_WP_RESOLVE             (1UL << 3) /* Resolve wp */
2136 #define  MM_CP_UFFD_WP_ALL                 (MM_CP_UFFD_WP | \
2137 					    MM_CP_UFFD_WP_RESOLVE)
2138 
2139 int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
2140 static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma)
2141 {
2142 	/*
2143 	 * We want to check manually if we can change individual PTEs writable
2144 	 * if we can't do that automatically for all PTEs in a mapping. For
2145 	 * private mappings, that's always the case when we have write
2146 	 * permissions as we properly have to handle COW.
2147 	 */
2148 	if (vma->vm_flags & VM_SHARED)
2149 		return vma_wants_writenotify(vma, vma->vm_page_prot);
2150 	return !!(vma->vm_flags & VM_WRITE);
2151 
2152 }
2153 bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr,
2154 			     pte_t pte);
2155 extern unsigned long change_protection(struct mmu_gather *tlb,
2156 			      struct vm_area_struct *vma, unsigned long start,
2157 			      unsigned long end, pgprot_t newprot,
2158 			      unsigned long cp_flags);
2159 extern int mprotect_fixup(struct mmu_gather *tlb, struct vm_area_struct *vma,
2160 			  struct vm_area_struct **pprev, unsigned long start,
2161 			  unsigned long end, unsigned long newflags);
2162 
2163 /*
2164  * doesn't attempt to fault and will return short.
2165  */
2166 int get_user_pages_fast_only(unsigned long start, int nr_pages,
2167 			     unsigned int gup_flags, struct page **pages);
2168 int pin_user_pages_fast_only(unsigned long start, int nr_pages,
2169 			     unsigned int gup_flags, struct page **pages);
2170 
2171 static inline bool get_user_page_fast_only(unsigned long addr,
2172 			unsigned int gup_flags, struct page **pagep)
2173 {
2174 	return get_user_pages_fast_only(addr, 1, gup_flags, pagep) == 1;
2175 }
2176 /*
2177  * per-process(per-mm_struct) statistics.
2178  */
2179 static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
2180 {
2181 	return percpu_counter_read_positive(&mm->rss_stat[member]);
2182 }
2183 
2184 void mm_trace_rss_stat(struct mm_struct *mm, int member);
2185 
2186 static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
2187 {
2188 	percpu_counter_add(&mm->rss_stat[member], value);
2189 
2190 	mm_trace_rss_stat(mm, member);
2191 }
2192 
2193 static inline void inc_mm_counter(struct mm_struct *mm, int member)
2194 {
2195 	percpu_counter_inc(&mm->rss_stat[member]);
2196 
2197 	mm_trace_rss_stat(mm, member);
2198 }
2199 
2200 static inline void dec_mm_counter(struct mm_struct *mm, int member)
2201 {
2202 	percpu_counter_dec(&mm->rss_stat[member]);
2203 
2204 	mm_trace_rss_stat(mm, member);
2205 }
2206 
2207 /* Optimized variant when page is already known not to be PageAnon */
2208 static inline int mm_counter_file(struct page *page)
2209 {
2210 	if (PageSwapBacked(page))
2211 		return MM_SHMEMPAGES;
2212 	return MM_FILEPAGES;
2213 }
2214 
2215 static inline int mm_counter(struct page *page)
2216 {
2217 	if (PageAnon(page))
2218 		return MM_ANONPAGES;
2219 	return mm_counter_file(page);
2220 }
2221 
2222 static inline unsigned long get_mm_rss(struct mm_struct *mm)
2223 {
2224 	return get_mm_counter(mm, MM_FILEPAGES) +
2225 		get_mm_counter(mm, MM_ANONPAGES) +
2226 		get_mm_counter(mm, MM_SHMEMPAGES);
2227 }
2228 
2229 static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
2230 {
2231 	return max(mm->hiwater_rss, get_mm_rss(mm));
2232 }
2233 
2234 static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
2235 {
2236 	return max(mm->hiwater_vm, mm->total_vm);
2237 }
2238 
2239 static inline void update_hiwater_rss(struct mm_struct *mm)
2240 {
2241 	unsigned long _rss = get_mm_rss(mm);
2242 
2243 	if ((mm)->hiwater_rss < _rss)
2244 		(mm)->hiwater_rss = _rss;
2245 }
2246 
2247 static inline void update_hiwater_vm(struct mm_struct *mm)
2248 {
2249 	if (mm->hiwater_vm < mm->total_vm)
2250 		mm->hiwater_vm = mm->total_vm;
2251 }
2252 
2253 static inline void reset_mm_hiwater_rss(struct mm_struct *mm)
2254 {
2255 	mm->hiwater_rss = get_mm_rss(mm);
2256 }
2257 
2258 static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
2259 					 struct mm_struct *mm)
2260 {
2261 	unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
2262 
2263 	if (*maxrss < hiwater_rss)
2264 		*maxrss = hiwater_rss;
2265 }
2266 
2267 #if defined(SPLIT_RSS_COUNTING)
2268 void sync_mm_rss(struct mm_struct *mm);
2269 #else
2270 static inline void sync_mm_rss(struct mm_struct *mm)
2271 {
2272 }
2273 #endif
2274 
2275 #ifndef CONFIG_ARCH_HAS_PTE_SPECIAL
2276 static inline int pte_special(pte_t pte)
2277 {
2278 	return 0;
2279 }
2280 
2281 static inline pte_t pte_mkspecial(pte_t pte)
2282 {
2283 	return pte;
2284 }
2285 #endif
2286 
2287 #ifndef CONFIG_ARCH_HAS_PTE_DEVMAP
2288 static inline int pte_devmap(pte_t pte)
2289 {
2290 	return 0;
2291 }
2292 #endif
2293 
2294 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
2295 			       spinlock_t **ptl);
2296 static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
2297 				    spinlock_t **ptl)
2298 {
2299 	pte_t *ptep;
2300 	__cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
2301 	return ptep;
2302 }
2303 
2304 #ifdef __PAGETABLE_P4D_FOLDED
2305 static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
2306 						unsigned long address)
2307 {
2308 	return 0;
2309 }
2310 #else
2311 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
2312 #endif
2313 
2314 #if defined(__PAGETABLE_PUD_FOLDED) || !defined(CONFIG_MMU)
2315 static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d,
2316 						unsigned long address)
2317 {
2318 	return 0;
2319 }
2320 static inline void mm_inc_nr_puds(struct mm_struct *mm) {}
2321 static inline void mm_dec_nr_puds(struct mm_struct *mm) {}
2322 
2323 #else
2324 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
2325 
2326 static inline void mm_inc_nr_puds(struct mm_struct *mm)
2327 {
2328 	if (mm_pud_folded(mm))
2329 		return;
2330 	atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
2331 }
2332 
2333 static inline void mm_dec_nr_puds(struct mm_struct *mm)
2334 {
2335 	if (mm_pud_folded(mm))
2336 		return;
2337 	atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
2338 }
2339 #endif
2340 
2341 #if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
2342 static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
2343 						unsigned long address)
2344 {
2345 	return 0;
2346 }
2347 
2348 static inline void mm_inc_nr_pmds(struct mm_struct *mm) {}
2349 static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
2350 
2351 #else
2352 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
2353 
2354 static inline void mm_inc_nr_pmds(struct mm_struct *mm)
2355 {
2356 	if (mm_pmd_folded(mm))
2357 		return;
2358 	atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
2359 }
2360 
2361 static inline void mm_dec_nr_pmds(struct mm_struct *mm)
2362 {
2363 	if (mm_pmd_folded(mm))
2364 		return;
2365 	atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
2366 }
2367 #endif
2368 
2369 #ifdef CONFIG_MMU
2370 static inline void mm_pgtables_bytes_init(struct mm_struct *mm)
2371 {
2372 	atomic_long_set(&mm->pgtables_bytes, 0);
2373 }
2374 
2375 static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
2376 {
2377 	return atomic_long_read(&mm->pgtables_bytes);
2378 }
2379 
2380 static inline void mm_inc_nr_ptes(struct mm_struct *mm)
2381 {
2382 	atomic_long_add(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
2383 }
2384 
2385 static inline void mm_dec_nr_ptes(struct mm_struct *mm)
2386 {
2387 	atomic_long_sub(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
2388 }
2389 #else
2390 
2391 static inline void mm_pgtables_bytes_init(struct mm_struct *mm) {}
2392 static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
2393 {
2394 	return 0;
2395 }
2396 
2397 static inline void mm_inc_nr_ptes(struct mm_struct *mm) {}
2398 static inline void mm_dec_nr_ptes(struct mm_struct *mm) {}
2399 #endif
2400 
2401 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd);
2402 int __pte_alloc_kernel(pmd_t *pmd);
2403 
2404 #if defined(CONFIG_MMU)
2405 
2406 static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
2407 		unsigned long address)
2408 {
2409 	return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ?
2410 		NULL : p4d_offset(pgd, address);
2411 }
2412 
2413 static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d,
2414 		unsigned long address)
2415 {
2416 	return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ?
2417 		NULL : pud_offset(p4d, address);
2418 }
2419 
2420 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
2421 {
2422 	return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
2423 		NULL: pmd_offset(pud, address);
2424 }
2425 #endif /* CONFIG_MMU */
2426 
2427 #if USE_SPLIT_PTE_PTLOCKS
2428 #if ALLOC_SPLIT_PTLOCKS
2429 void __init ptlock_cache_init(void);
2430 extern bool ptlock_alloc(struct page *page);
2431 extern void ptlock_free(struct page *page);
2432 
2433 static inline spinlock_t *ptlock_ptr(struct page *page)
2434 {
2435 	return page->ptl;
2436 }
2437 #else /* ALLOC_SPLIT_PTLOCKS */
2438 static inline void ptlock_cache_init(void)
2439 {
2440 }
2441 
2442 static inline bool ptlock_alloc(struct page *page)
2443 {
2444 	return true;
2445 }
2446 
2447 static inline void ptlock_free(struct page *page)
2448 {
2449 }
2450 
2451 static inline spinlock_t *ptlock_ptr(struct page *page)
2452 {
2453 	return &page->ptl;
2454 }
2455 #endif /* ALLOC_SPLIT_PTLOCKS */
2456 
2457 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
2458 {
2459 	return ptlock_ptr(pmd_page(*pmd));
2460 }
2461 
2462 static inline bool ptlock_init(struct page *page)
2463 {
2464 	/*
2465 	 * prep_new_page() initialize page->private (and therefore page->ptl)
2466 	 * with 0. Make sure nobody took it in use in between.
2467 	 *
2468 	 * It can happen if arch try to use slab for page table allocation:
2469 	 * slab code uses page->slab_cache, which share storage with page->ptl.
2470 	 */
2471 	VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
2472 	if (!ptlock_alloc(page))
2473 		return false;
2474 	spin_lock_init(ptlock_ptr(page));
2475 	return true;
2476 }
2477 
2478 #else	/* !USE_SPLIT_PTE_PTLOCKS */
2479 /*
2480  * We use mm->page_table_lock to guard all pagetable pages of the mm.
2481  */
2482 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
2483 {
2484 	return &mm->page_table_lock;
2485 }
2486 static inline void ptlock_cache_init(void) {}
2487 static inline bool ptlock_init(struct page *page) { return true; }
2488 static inline void ptlock_free(struct page *page) {}
2489 #endif /* USE_SPLIT_PTE_PTLOCKS */
2490 
2491 static inline void pgtable_init(void)
2492 {
2493 	ptlock_cache_init();
2494 	pgtable_cache_init();
2495 }
2496 
2497 static inline bool pgtable_pte_page_ctor(struct page *page)
2498 {
2499 	if (!ptlock_init(page))
2500 		return false;
2501 	__SetPageTable(page);
2502 	inc_lruvec_page_state(page, NR_PAGETABLE);
2503 	return true;
2504 }
2505 
2506 static inline void pgtable_pte_page_dtor(struct page *page)
2507 {
2508 	ptlock_free(page);
2509 	__ClearPageTable(page);
2510 	dec_lruvec_page_state(page, NR_PAGETABLE);
2511 }
2512 
2513 #define pte_offset_map_lock(mm, pmd, address, ptlp)	\
2514 ({							\
2515 	spinlock_t *__ptl = pte_lockptr(mm, pmd);	\
2516 	pte_t *__pte = pte_offset_map(pmd, address);	\
2517 	*(ptlp) = __ptl;				\
2518 	spin_lock(__ptl);				\
2519 	__pte;						\
2520 })
2521 
2522 #define pte_unmap_unlock(pte, ptl)	do {		\
2523 	spin_unlock(ptl);				\
2524 	pte_unmap(pte);					\
2525 } while (0)
2526 
2527 #define pte_alloc(mm, pmd) (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd))
2528 
2529 #define pte_alloc_map(mm, pmd, address)			\
2530 	(pte_alloc(mm, pmd) ? NULL : pte_offset_map(pmd, address))
2531 
2532 #define pte_alloc_map_lock(mm, pmd, address, ptlp)	\
2533 	(pte_alloc(mm, pmd) ?			\
2534 		 NULL : pte_offset_map_lock(mm, pmd, address, ptlp))
2535 
2536 #define pte_alloc_kernel(pmd, address)			\
2537 	((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \
2538 		NULL: pte_offset_kernel(pmd, address))
2539 
2540 #if USE_SPLIT_PMD_PTLOCKS
2541 
2542 static inline struct page *pmd_pgtable_page(pmd_t *pmd)
2543 {
2544 	unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
2545 	return virt_to_page((void *)((unsigned long) pmd & mask));
2546 }
2547 
2548 static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
2549 {
2550 	return ptlock_ptr(pmd_pgtable_page(pmd));
2551 }
2552 
2553 static inline bool pmd_ptlock_init(struct page *page)
2554 {
2555 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2556 	page->pmd_huge_pte = NULL;
2557 #endif
2558 	return ptlock_init(page);
2559 }
2560 
2561 static inline void pmd_ptlock_free(struct page *page)
2562 {
2563 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2564 	VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
2565 #endif
2566 	ptlock_free(page);
2567 }
2568 
2569 #define pmd_huge_pte(mm, pmd) (pmd_pgtable_page(pmd)->pmd_huge_pte)
2570 
2571 #else
2572 
2573 static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
2574 {
2575 	return &mm->page_table_lock;
2576 }
2577 
2578 static inline bool pmd_ptlock_init(struct page *page) { return true; }
2579 static inline void pmd_ptlock_free(struct page *page) {}
2580 
2581 #define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
2582 
2583 #endif
2584 
2585 static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
2586 {
2587 	spinlock_t *ptl = pmd_lockptr(mm, pmd);
2588 	spin_lock(ptl);
2589 	return ptl;
2590 }
2591 
2592 static inline bool pgtable_pmd_page_ctor(struct page *page)
2593 {
2594 	if (!pmd_ptlock_init(page))
2595 		return false;
2596 	__SetPageTable(page);
2597 	inc_lruvec_page_state(page, NR_PAGETABLE);
2598 	return true;
2599 }
2600 
2601 static inline void pgtable_pmd_page_dtor(struct page *page)
2602 {
2603 	pmd_ptlock_free(page);
2604 	__ClearPageTable(page);
2605 	dec_lruvec_page_state(page, NR_PAGETABLE);
2606 }
2607 
2608 /*
2609  * No scalability reason to split PUD locks yet, but follow the same pattern
2610  * as the PMD locks to make it easier if we decide to.  The VM should not be
2611  * considered ready to switch to split PUD locks yet; there may be places
2612  * which need to be converted from page_table_lock.
2613  */
2614 static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud)
2615 {
2616 	return &mm->page_table_lock;
2617 }
2618 
2619 static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
2620 {
2621 	spinlock_t *ptl = pud_lockptr(mm, pud);
2622 
2623 	spin_lock(ptl);
2624 	return ptl;
2625 }
2626 
2627 extern void __init pagecache_init(void);
2628 extern void free_initmem(void);
2629 
2630 /*
2631  * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK)
2632  * into the buddy system. The freed pages will be poisoned with pattern
2633  * "poison" if it's within range [0, UCHAR_MAX].
2634  * Return pages freed into the buddy system.
2635  */
2636 extern unsigned long free_reserved_area(void *start, void *end,
2637 					int poison, const char *s);
2638 
2639 extern void adjust_managed_page_count(struct page *page, long count);
2640 extern void mem_init_print_info(void);
2641 
2642 extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end);
2643 
2644 /* Free the reserved page into the buddy system, so it gets managed. */
2645 static inline void free_reserved_page(struct page *page)
2646 {
2647 	ClearPageReserved(page);
2648 	init_page_count(page);
2649 	__free_page(page);
2650 	adjust_managed_page_count(page, 1);
2651 }
2652 #define free_highmem_page(page) free_reserved_page(page)
2653 
2654 static inline void mark_page_reserved(struct page *page)
2655 {
2656 	SetPageReserved(page);
2657 	adjust_managed_page_count(page, -1);
2658 }
2659 
2660 /*
2661  * Default method to free all the __init memory into the buddy system.
2662  * The freed pages will be poisoned with pattern "poison" if it's within
2663  * range [0, UCHAR_MAX].
2664  * Return pages freed into the buddy system.
2665  */
2666 static inline unsigned long free_initmem_default(int poison)
2667 {
2668 	extern char __init_begin[], __init_end[];
2669 
2670 	return free_reserved_area(&__init_begin, &__init_end,
2671 				  poison, "unused kernel image (initmem)");
2672 }
2673 
2674 static inline unsigned long get_num_physpages(void)
2675 {
2676 	int nid;
2677 	unsigned long phys_pages = 0;
2678 
2679 	for_each_online_node(nid)
2680 		phys_pages += node_present_pages(nid);
2681 
2682 	return phys_pages;
2683 }
2684 
2685 /*
2686  * Using memblock node mappings, an architecture may initialise its
2687  * zones, allocate the backing mem_map and account for memory holes in an
2688  * architecture independent manner.
2689  *
2690  * An architecture is expected to register range of page frames backed by
2691  * physical memory with memblock_add[_node]() before calling
2692  * free_area_init() passing in the PFN each zone ends at. At a basic
2693  * usage, an architecture is expected to do something like
2694  *
2695  * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
2696  * 							 max_highmem_pfn};
2697  * for_each_valid_physical_page_range()
2698  *	memblock_add_node(base, size, nid, MEMBLOCK_NONE)
2699  * free_area_init(max_zone_pfns);
2700  */
2701 void free_area_init(unsigned long *max_zone_pfn);
2702 unsigned long node_map_pfn_alignment(void);
2703 unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
2704 						unsigned long end_pfn);
2705 extern unsigned long absent_pages_in_range(unsigned long start_pfn,
2706 						unsigned long end_pfn);
2707 extern void get_pfn_range_for_nid(unsigned int nid,
2708 			unsigned long *start_pfn, unsigned long *end_pfn);
2709 
2710 #ifndef CONFIG_NUMA
2711 static inline int early_pfn_to_nid(unsigned long pfn)
2712 {
2713 	return 0;
2714 }
2715 #else
2716 /* please see mm/page_alloc.c */
2717 extern int __meminit early_pfn_to_nid(unsigned long pfn);
2718 #endif
2719 
2720 extern void set_dma_reserve(unsigned long new_dma_reserve);
2721 extern void memmap_init_range(unsigned long, int, unsigned long,
2722 		unsigned long, unsigned long, enum meminit_context,
2723 		struct vmem_altmap *, int migratetype);
2724 extern void setup_per_zone_wmarks(void);
2725 extern void calculate_min_free_kbytes(void);
2726 extern int __meminit init_per_zone_wmark_min(void);
2727 extern void mem_init(void);
2728 extern void __init mmap_init(void);
2729 
2730 extern void __show_mem(unsigned int flags, nodemask_t *nodemask, int max_zone_idx);
2731 static inline void show_mem(unsigned int flags, nodemask_t *nodemask)
2732 {
2733 	__show_mem(flags, nodemask, MAX_NR_ZONES - 1);
2734 }
2735 extern long si_mem_available(void);
2736 extern void si_meminfo(struct sysinfo * val);
2737 extern void si_meminfo_node(struct sysinfo *val, int nid);
2738 #ifdef __HAVE_ARCH_RESERVED_KERNEL_PAGES
2739 extern unsigned long arch_reserved_kernel_pages(void);
2740 #endif
2741 
2742 extern __printf(3, 4)
2743 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...);
2744 
2745 extern void setup_per_cpu_pageset(void);
2746 
2747 /* page_alloc.c */
2748 extern int min_free_kbytes;
2749 extern int watermark_boost_factor;
2750 extern int watermark_scale_factor;
2751 extern bool arch_has_descending_max_zone_pfns(void);
2752 
2753 /* nommu.c */
2754 extern atomic_long_t mmap_pages_allocated;
2755 extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
2756 
2757 /* interval_tree.c */
2758 void vma_interval_tree_insert(struct vm_area_struct *node,
2759 			      struct rb_root_cached *root);
2760 void vma_interval_tree_insert_after(struct vm_area_struct *node,
2761 				    struct vm_area_struct *prev,
2762 				    struct rb_root_cached *root);
2763 void vma_interval_tree_remove(struct vm_area_struct *node,
2764 			      struct rb_root_cached *root);
2765 struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root,
2766 				unsigned long start, unsigned long last);
2767 struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
2768 				unsigned long start, unsigned long last);
2769 
2770 #define vma_interval_tree_foreach(vma, root, start, last)		\
2771 	for (vma = vma_interval_tree_iter_first(root, start, last);	\
2772 	     vma; vma = vma_interval_tree_iter_next(vma, start, last))
2773 
2774 void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
2775 				   struct rb_root_cached *root);
2776 void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
2777 				   struct rb_root_cached *root);
2778 struct anon_vma_chain *
2779 anon_vma_interval_tree_iter_first(struct rb_root_cached *root,
2780 				  unsigned long start, unsigned long last);
2781 struct anon_vma_chain *anon_vma_interval_tree_iter_next(
2782 	struct anon_vma_chain *node, unsigned long start, unsigned long last);
2783 #ifdef CONFIG_DEBUG_VM_RB
2784 void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
2785 #endif
2786 
2787 #define anon_vma_interval_tree_foreach(avc, root, start, last)		 \
2788 	for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
2789 	     avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
2790 
2791 /* mmap.c */
2792 extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
2793 extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
2794 	unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
2795 	struct vm_area_struct *expand);
2796 static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
2797 	unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
2798 {
2799 	return __vma_adjust(vma, start, end, pgoff, insert, NULL);
2800 }
2801 extern struct vm_area_struct *vma_merge(struct mm_struct *,
2802 	struct vm_area_struct *prev, unsigned long addr, unsigned long end,
2803 	unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
2804 	struct mempolicy *, struct vm_userfaultfd_ctx, struct anon_vma_name *);
2805 extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
2806 extern int __split_vma(struct mm_struct *, struct vm_area_struct *,
2807 	unsigned long addr, int new_below);
2808 extern int split_vma(struct mm_struct *, struct vm_area_struct *,
2809 	unsigned long addr, int new_below);
2810 extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
2811 extern void unlink_file_vma(struct vm_area_struct *);
2812 extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
2813 	unsigned long addr, unsigned long len, pgoff_t pgoff,
2814 	bool *need_rmap_locks);
2815 extern void exit_mmap(struct mm_struct *);
2816 
2817 void vma_mas_store(struct vm_area_struct *vma, struct ma_state *mas);
2818 void vma_mas_remove(struct vm_area_struct *vma, struct ma_state *mas);
2819 
2820 static inline int check_data_rlimit(unsigned long rlim,
2821 				    unsigned long new,
2822 				    unsigned long start,
2823 				    unsigned long end_data,
2824 				    unsigned long start_data)
2825 {
2826 	if (rlim < RLIM_INFINITY) {
2827 		if (((new - start) + (end_data - start_data)) > rlim)
2828 			return -ENOSPC;
2829 	}
2830 
2831 	return 0;
2832 }
2833 
2834 extern int mm_take_all_locks(struct mm_struct *mm);
2835 extern void mm_drop_all_locks(struct mm_struct *mm);
2836 
2837 extern int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
2838 extern int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
2839 extern struct file *get_mm_exe_file(struct mm_struct *mm);
2840 extern struct file *get_task_exe_file(struct task_struct *task);
2841 
2842 extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages);
2843 extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
2844 
2845 extern bool vma_is_special_mapping(const struct vm_area_struct *vma,
2846 				   const struct vm_special_mapping *sm);
2847 extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
2848 				   unsigned long addr, unsigned long len,
2849 				   unsigned long flags,
2850 				   const struct vm_special_mapping *spec);
2851 /* This is an obsolete alternative to _install_special_mapping. */
2852 extern int install_special_mapping(struct mm_struct *mm,
2853 				   unsigned long addr, unsigned long len,
2854 				   unsigned long flags, struct page **pages);
2855 
2856 unsigned long randomize_stack_top(unsigned long stack_top);
2857 unsigned long randomize_page(unsigned long start, unsigned long range);
2858 
2859 extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
2860 
2861 extern unsigned long mmap_region(struct file *file, unsigned long addr,
2862 	unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
2863 	struct list_head *uf);
2864 extern unsigned long do_mmap(struct file *file, unsigned long addr,
2865 	unsigned long len, unsigned long prot, unsigned long flags,
2866 	unsigned long pgoff, unsigned long *populate, struct list_head *uf);
2867 extern int do_mas_munmap(struct ma_state *mas, struct mm_struct *mm,
2868 			 unsigned long start, size_t len, struct list_head *uf,
2869 			 bool downgrade);
2870 extern int do_munmap(struct mm_struct *, unsigned long, size_t,
2871 		     struct list_head *uf);
2872 extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior);
2873 
2874 #ifdef CONFIG_MMU
2875 extern int __mm_populate(unsigned long addr, unsigned long len,
2876 			 int ignore_errors);
2877 static inline void mm_populate(unsigned long addr, unsigned long len)
2878 {
2879 	/* Ignore errors */
2880 	(void) __mm_populate(addr, len, 1);
2881 }
2882 #else
2883 static inline void mm_populate(unsigned long addr, unsigned long len) {}
2884 #endif
2885 
2886 /* These take the mm semaphore themselves */
2887 extern int __must_check vm_brk(unsigned long, unsigned long);
2888 extern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long);
2889 extern int vm_munmap(unsigned long, size_t);
2890 extern unsigned long __must_check vm_mmap(struct file *, unsigned long,
2891         unsigned long, unsigned long,
2892         unsigned long, unsigned long);
2893 
2894 struct vm_unmapped_area_info {
2895 #define VM_UNMAPPED_AREA_TOPDOWN 1
2896 	unsigned long flags;
2897 	unsigned long length;
2898 	unsigned long low_limit;
2899 	unsigned long high_limit;
2900 	unsigned long align_mask;
2901 	unsigned long align_offset;
2902 };
2903 
2904 extern unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info);
2905 
2906 /* truncate.c */
2907 extern void truncate_inode_pages(struct address_space *, loff_t);
2908 extern void truncate_inode_pages_range(struct address_space *,
2909 				       loff_t lstart, loff_t lend);
2910 extern void truncate_inode_pages_final(struct address_space *);
2911 
2912 /* generic vm_area_ops exported for stackable file systems */
2913 extern vm_fault_t filemap_fault(struct vm_fault *vmf);
2914 extern vm_fault_t filemap_map_pages(struct vm_fault *vmf,
2915 		pgoff_t start_pgoff, pgoff_t end_pgoff);
2916 extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
2917 
2918 extern unsigned long stack_guard_gap;
2919 /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
2920 extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
2921 
2922 /* CONFIG_STACK_GROWSUP still needs to grow downwards at some places */
2923 extern int expand_downwards(struct vm_area_struct *vma,
2924 		unsigned long address);
2925 #if VM_GROWSUP
2926 extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
2927 #else
2928   #define expand_upwards(vma, address) (0)
2929 #endif
2930 
2931 /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
2932 extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
2933 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
2934 					     struct vm_area_struct **pprev);
2935 
2936 /*
2937  * Look up the first VMA which intersects the interval [start_addr, end_addr)
2938  * NULL if none.  Assume start_addr < end_addr.
2939  */
2940 struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
2941 			unsigned long start_addr, unsigned long end_addr);
2942 
2943 /**
2944  * vma_lookup() - Find a VMA at a specific address
2945  * @mm: The process address space.
2946  * @addr: The user address.
2947  *
2948  * Return: The vm_area_struct at the given address, %NULL otherwise.
2949  */
2950 static inline
2951 struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
2952 {
2953 	return mtree_load(&mm->mm_mt, addr);
2954 }
2955 
2956 static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
2957 {
2958 	unsigned long vm_start = vma->vm_start;
2959 
2960 	if (vma->vm_flags & VM_GROWSDOWN) {
2961 		vm_start -= stack_guard_gap;
2962 		if (vm_start > vma->vm_start)
2963 			vm_start = 0;
2964 	}
2965 	return vm_start;
2966 }
2967 
2968 static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
2969 {
2970 	unsigned long vm_end = vma->vm_end;
2971 
2972 	if (vma->vm_flags & VM_GROWSUP) {
2973 		vm_end += stack_guard_gap;
2974 		if (vm_end < vma->vm_end)
2975 			vm_end = -PAGE_SIZE;
2976 	}
2977 	return vm_end;
2978 }
2979 
2980 static inline unsigned long vma_pages(struct vm_area_struct *vma)
2981 {
2982 	return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
2983 }
2984 
2985 /* Look up the first VMA which exactly match the interval vm_start ... vm_end */
2986 static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
2987 				unsigned long vm_start, unsigned long vm_end)
2988 {
2989 	struct vm_area_struct *vma = vma_lookup(mm, vm_start);
2990 
2991 	if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
2992 		vma = NULL;
2993 
2994 	return vma;
2995 }
2996 
2997 static inline bool range_in_vma(struct vm_area_struct *vma,
2998 				unsigned long start, unsigned long end)
2999 {
3000 	return (vma && vma->vm_start <= start && end <= vma->vm_end);
3001 }
3002 
3003 #ifdef CONFIG_MMU
3004 pgprot_t vm_get_page_prot(unsigned long vm_flags);
3005 void vma_set_page_prot(struct vm_area_struct *vma);
3006 #else
3007 static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
3008 {
3009 	return __pgprot(0);
3010 }
3011 static inline void vma_set_page_prot(struct vm_area_struct *vma)
3012 {
3013 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
3014 }
3015 #endif
3016 
3017 void vma_set_file(struct vm_area_struct *vma, struct file *file);
3018 
3019 #ifdef CONFIG_NUMA_BALANCING
3020 unsigned long change_prot_numa(struct vm_area_struct *vma,
3021 			unsigned long start, unsigned long end);
3022 #endif
3023 
3024 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
3025 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
3026 			unsigned long pfn, unsigned long size, pgprot_t);
3027 int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
3028 		unsigned long pfn, unsigned long size, pgprot_t prot);
3029 int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
3030 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
3031 			struct page **pages, unsigned long *num);
3032 int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
3033 				unsigned long num);
3034 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
3035 				unsigned long num);
3036 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
3037 			unsigned long pfn);
3038 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
3039 			unsigned long pfn, pgprot_t pgprot);
3040 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
3041 			pfn_t pfn);
3042 vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr,
3043 			pfn_t pfn, pgprot_t pgprot);
3044 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
3045 		unsigned long addr, pfn_t pfn);
3046 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
3047 
3048 static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
3049 				unsigned long addr, struct page *page)
3050 {
3051 	int err = vm_insert_page(vma, addr, page);
3052 
3053 	if (err == -ENOMEM)
3054 		return VM_FAULT_OOM;
3055 	if (err < 0 && err != -EBUSY)
3056 		return VM_FAULT_SIGBUS;
3057 
3058 	return VM_FAULT_NOPAGE;
3059 }
3060 
3061 #ifndef io_remap_pfn_range
3062 static inline int io_remap_pfn_range(struct vm_area_struct *vma,
3063 				     unsigned long addr, unsigned long pfn,
3064 				     unsigned long size, pgprot_t prot)
3065 {
3066 	return remap_pfn_range(vma, addr, pfn, size, pgprot_decrypted(prot));
3067 }
3068 #endif
3069 
3070 static inline vm_fault_t vmf_error(int err)
3071 {
3072 	if (err == -ENOMEM)
3073 		return VM_FAULT_OOM;
3074 	return VM_FAULT_SIGBUS;
3075 }
3076 
3077 struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
3078 			 unsigned int foll_flags);
3079 
3080 #define FOLL_WRITE	0x01	/* check pte is writable */
3081 #define FOLL_TOUCH	0x02	/* mark page accessed */
3082 #define FOLL_GET	0x04	/* do get_page on page */
3083 #define FOLL_DUMP	0x08	/* give error on hole if it would be zero */
3084 #define FOLL_FORCE	0x10	/* get_user_pages read/write w/o permission */
3085 #define FOLL_NOWAIT	0x20	/* if a disk transfer is needed, start the IO
3086 				 * and return without waiting upon it */
3087 #define FOLL_NOFAULT	0x80	/* do not fault in pages */
3088 #define FOLL_HWPOISON	0x100	/* check page is hwpoisoned */
3089 #define FOLL_TRIED	0x800	/* a retry, previous pass started an IO */
3090 #define FOLL_REMOTE	0x2000	/* we are working on non-current tsk/mm */
3091 #define FOLL_ANON	0x8000	/* don't do file mappings */
3092 #define FOLL_LONGTERM	0x10000	/* mapping lifetime is indefinite: see below */
3093 #define FOLL_SPLIT_PMD	0x20000	/* split huge pmd before returning */
3094 #define FOLL_PIN	0x40000	/* pages must be released via unpin_user_page */
3095 #define FOLL_FAST_ONLY	0x80000	/* gup_fast: prevent fall-back to slow gup */
3096 #define FOLL_PCI_P2PDMA	0x100000 /* allow returning PCI P2PDMA pages */
3097 #define FOLL_INTERRUPTIBLE  0x200000 /* allow interrupts from generic signals */
3098 
3099 /*
3100  * FOLL_PIN and FOLL_LONGTERM may be used in various combinations with each
3101  * other. Here is what they mean, and how to use them:
3102  *
3103  * FOLL_LONGTERM indicates that the page will be held for an indefinite time
3104  * period _often_ under userspace control.  This is in contrast to
3105  * iov_iter_get_pages(), whose usages are transient.
3106  *
3107  * FIXME: For pages which are part of a filesystem, mappings are subject to the
3108  * lifetime enforced by the filesystem and we need guarantees that longterm
3109  * users like RDMA and V4L2 only establish mappings which coordinate usage with
3110  * the filesystem.  Ideas for this coordination include revoking the longterm
3111  * pin, delaying writeback, bounce buffer page writeback, etc.  As FS DAX was
3112  * added after the problem with filesystems was found FS DAX VMAs are
3113  * specifically failed.  Filesystem pages are still subject to bugs and use of
3114  * FOLL_LONGTERM should be avoided on those pages.
3115  *
3116  * FIXME: Also NOTE that FOLL_LONGTERM is not supported in every GUP call.
3117  * Currently only get_user_pages() and get_user_pages_fast() support this flag
3118  * and calls to get_user_pages_[un]locked are specifically not allowed.  This
3119  * is due to an incompatibility with the FS DAX check and
3120  * FAULT_FLAG_ALLOW_RETRY.
3121  *
3122  * In the CMA case: long term pins in a CMA region would unnecessarily fragment
3123  * that region.  And so, CMA attempts to migrate the page before pinning, when
3124  * FOLL_LONGTERM is specified.
3125  *
3126  * FOLL_PIN indicates that a special kind of tracking (not just page->_refcount,
3127  * but an additional pin counting system) will be invoked. This is intended for
3128  * anything that gets a page reference and then touches page data (for example,
3129  * Direct IO). This lets the filesystem know that some non-file-system entity is
3130  * potentially changing the pages' data. In contrast to FOLL_GET (whose pages
3131  * are released via put_page()), FOLL_PIN pages must be released, ultimately, by
3132  * a call to unpin_user_page().
3133  *
3134  * FOLL_PIN is similar to FOLL_GET: both of these pin pages. They use different
3135  * and separate refcounting mechanisms, however, and that means that each has
3136  * its own acquire and release mechanisms:
3137  *
3138  *     FOLL_GET: get_user_pages*() to acquire, and put_page() to release.
3139  *
3140  *     FOLL_PIN: pin_user_pages*() to acquire, and unpin_user_pages to release.
3141  *
3142  * FOLL_PIN and FOLL_GET are mutually exclusive for a given function call.
3143  * (The underlying pages may experience both FOLL_GET-based and FOLL_PIN-based
3144  * calls applied to them, and that's perfectly OK. This is a constraint on the
3145  * callers, not on the pages.)
3146  *
3147  * FOLL_PIN should be set internally by the pin_user_pages*() APIs, never
3148  * directly by the caller. That's in order to help avoid mismatches when
3149  * releasing pages: get_user_pages*() pages must be released via put_page(),
3150  * while pin_user_pages*() pages must be released via unpin_user_page().
3151  *
3152  * Please see Documentation/core-api/pin_user_pages.rst for more information.
3153  */
3154 
3155 static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
3156 {
3157 	if (vm_fault & VM_FAULT_OOM)
3158 		return -ENOMEM;
3159 	if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
3160 		return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT;
3161 	if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
3162 		return -EFAULT;
3163 	return 0;
3164 }
3165 
3166 /*
3167  * Indicates for which pages that are write-protected in the page table,
3168  * whether GUP has to trigger unsharing via FAULT_FLAG_UNSHARE such that the
3169  * GUP pin will remain consistent with the pages mapped into the page tables
3170  * of the MM.
3171  *
3172  * Temporary unmapping of PageAnonExclusive() pages or clearing of
3173  * PageAnonExclusive() has to protect against concurrent GUP:
3174  * * Ordinary GUP: Using the PT lock
3175  * * GUP-fast and fork(): mm->write_protect_seq
3176  * * GUP-fast and KSM or temporary unmapping (swap, migration): see
3177  *    page_try_share_anon_rmap()
3178  *
3179  * Must be called with the (sub)page that's actually referenced via the
3180  * page table entry, which might not necessarily be the head page for a
3181  * PTE-mapped THP.
3182  *
3183  * If the vma is NULL, we're coming from the GUP-fast path and might have
3184  * to fallback to the slow path just to lookup the vma.
3185  */
3186 static inline bool gup_must_unshare(struct vm_area_struct *vma,
3187 				    unsigned int flags, struct page *page)
3188 {
3189 	/*
3190 	 * FOLL_WRITE is implicitly handled correctly as the page table entry
3191 	 * has to be writable -- and if it references (part of) an anonymous
3192 	 * folio, that part is required to be marked exclusive.
3193 	 */
3194 	if ((flags & (FOLL_WRITE | FOLL_PIN)) != FOLL_PIN)
3195 		return false;
3196 	/*
3197 	 * Note: PageAnon(page) is stable until the page is actually getting
3198 	 * freed.
3199 	 */
3200 	if (!PageAnon(page)) {
3201 		/*
3202 		 * We only care about R/O long-term pining: R/O short-term
3203 		 * pinning does not have the semantics to observe successive
3204 		 * changes through the process page tables.
3205 		 */
3206 		if (!(flags & FOLL_LONGTERM))
3207 			return false;
3208 
3209 		/* We really need the vma ... */
3210 		if (!vma)
3211 			return true;
3212 
3213 		/*
3214 		 * ... because we only care about writable private ("COW")
3215 		 * mappings where we have to break COW early.
3216 		 */
3217 		return is_cow_mapping(vma->vm_flags);
3218 	}
3219 
3220 	/* Paired with a memory barrier in page_try_share_anon_rmap(). */
3221 	if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
3222 		smp_rmb();
3223 
3224 	/*
3225 	 * Note that PageKsm() pages cannot be exclusive, and consequently,
3226 	 * cannot get pinned.
3227 	 */
3228 	return !PageAnonExclusive(page);
3229 }
3230 
3231 /*
3232  * Indicates whether GUP can follow a PROT_NONE mapped page, or whether
3233  * a (NUMA hinting) fault is required.
3234  */
3235 static inline bool gup_can_follow_protnone(unsigned int flags)
3236 {
3237 	/*
3238 	 * FOLL_FORCE has to be able to make progress even if the VMA is
3239 	 * inaccessible. Further, FOLL_FORCE access usually does not represent
3240 	 * application behaviour and we should avoid triggering NUMA hinting
3241 	 * faults.
3242 	 */
3243 	return flags & FOLL_FORCE;
3244 }
3245 
3246 typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data);
3247 extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
3248 			       unsigned long size, pte_fn_t fn, void *data);
3249 extern int apply_to_existing_page_range(struct mm_struct *mm,
3250 				   unsigned long address, unsigned long size,
3251 				   pte_fn_t fn, void *data);
3252 
3253 extern void __init init_mem_debugging_and_hardening(void);
3254 #ifdef CONFIG_PAGE_POISONING
3255 extern void __kernel_poison_pages(struct page *page, int numpages);
3256 extern void __kernel_unpoison_pages(struct page *page, int numpages);
3257 extern bool _page_poisoning_enabled_early;
3258 DECLARE_STATIC_KEY_FALSE(_page_poisoning_enabled);
3259 static inline bool page_poisoning_enabled(void)
3260 {
3261 	return _page_poisoning_enabled_early;
3262 }
3263 /*
3264  * For use in fast paths after init_mem_debugging() has run, or when a
3265  * false negative result is not harmful when called too early.
3266  */
3267 static inline bool page_poisoning_enabled_static(void)
3268 {
3269 	return static_branch_unlikely(&_page_poisoning_enabled);
3270 }
3271 static inline void kernel_poison_pages(struct page *page, int numpages)
3272 {
3273 	if (page_poisoning_enabled_static())
3274 		__kernel_poison_pages(page, numpages);
3275 }
3276 static inline void kernel_unpoison_pages(struct page *page, int numpages)
3277 {
3278 	if (page_poisoning_enabled_static())
3279 		__kernel_unpoison_pages(page, numpages);
3280 }
3281 #else
3282 static inline bool page_poisoning_enabled(void) { return false; }
3283 static inline bool page_poisoning_enabled_static(void) { return false; }
3284 static inline void __kernel_poison_pages(struct page *page, int nunmpages) { }
3285 static inline void kernel_poison_pages(struct page *page, int numpages) { }
3286 static inline void kernel_unpoison_pages(struct page *page, int numpages) { }
3287 #endif
3288 
3289 DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
3290 static inline bool want_init_on_alloc(gfp_t flags)
3291 {
3292 	if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
3293 				&init_on_alloc))
3294 		return true;
3295 	return flags & __GFP_ZERO;
3296 }
3297 
3298 DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
3299 static inline bool want_init_on_free(void)
3300 {
3301 	return static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
3302 				   &init_on_free);
3303 }
3304 
3305 extern bool _debug_pagealloc_enabled_early;
3306 DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
3307 
3308 static inline bool debug_pagealloc_enabled(void)
3309 {
3310 	return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
3311 		_debug_pagealloc_enabled_early;
3312 }
3313 
3314 /*
3315  * For use in fast paths after init_debug_pagealloc() has run, or when a
3316  * false negative result is not harmful when called too early.
3317  */
3318 static inline bool debug_pagealloc_enabled_static(void)
3319 {
3320 	if (!IS_ENABLED(CONFIG_DEBUG_PAGEALLOC))
3321 		return false;
3322 
3323 	return static_branch_unlikely(&_debug_pagealloc_enabled);
3324 }
3325 
3326 #ifdef CONFIG_DEBUG_PAGEALLOC
3327 /*
3328  * To support DEBUG_PAGEALLOC architecture must ensure that
3329  * __kernel_map_pages() never fails
3330  */
3331 extern void __kernel_map_pages(struct page *page, int numpages, int enable);
3332 
3333 static inline void debug_pagealloc_map_pages(struct page *page, int numpages)
3334 {
3335 	if (debug_pagealloc_enabled_static())
3336 		__kernel_map_pages(page, numpages, 1);
3337 }
3338 
3339 static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages)
3340 {
3341 	if (debug_pagealloc_enabled_static())
3342 		__kernel_map_pages(page, numpages, 0);
3343 }
3344 #else	/* CONFIG_DEBUG_PAGEALLOC */
3345 static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {}
3346 static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {}
3347 #endif	/* CONFIG_DEBUG_PAGEALLOC */
3348 
3349 #ifdef __HAVE_ARCH_GATE_AREA
3350 extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
3351 extern int in_gate_area_no_mm(unsigned long addr);
3352 extern int in_gate_area(struct mm_struct *mm, unsigned long addr);
3353 #else
3354 static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
3355 {
3356 	return NULL;
3357 }
3358 static inline int in_gate_area_no_mm(unsigned long addr) { return 0; }
3359 static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
3360 {
3361 	return 0;
3362 }
3363 #endif	/* __HAVE_ARCH_GATE_AREA */
3364 
3365 extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm);
3366 
3367 #ifdef CONFIG_SYSCTL
3368 extern int sysctl_drop_caches;
3369 int drop_caches_sysctl_handler(struct ctl_table *, int, void *, size_t *,
3370 		loff_t *);
3371 #endif
3372 
3373 void drop_slab(void);
3374 
3375 #ifndef CONFIG_MMU
3376 #define randomize_va_space 0
3377 #else
3378 extern int randomize_va_space;
3379 #endif
3380 
3381 const char * arch_vma_name(struct vm_area_struct *vma);
3382 #ifdef CONFIG_MMU
3383 void print_vma_addr(char *prefix, unsigned long rip);
3384 #else
3385 static inline void print_vma_addr(char *prefix, unsigned long rip)
3386 {
3387 }
3388 #endif
3389 
3390 void *sparse_buffer_alloc(unsigned long size);
3391 struct page * __populate_section_memmap(unsigned long pfn,
3392 		unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
3393 		struct dev_pagemap *pgmap);
3394 void pmd_init(void *addr);
3395 void pud_init(void *addr);
3396 pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
3397 p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
3398 pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
3399 pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
3400 pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node,
3401 			    struct vmem_altmap *altmap, struct page *reuse);
3402 void *vmemmap_alloc_block(unsigned long size, int node);
3403 struct vmem_altmap;
3404 void *vmemmap_alloc_block_buf(unsigned long size, int node,
3405 			      struct vmem_altmap *altmap);
3406 void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
3407 void vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
3408 		     unsigned long addr, unsigned long next);
3409 int vmemmap_check_pmd(pmd_t *pmd, int node,
3410 		      unsigned long addr, unsigned long next);
3411 int vmemmap_populate_basepages(unsigned long start, unsigned long end,
3412 			       int node, struct vmem_altmap *altmap);
3413 int vmemmap_populate_hugepages(unsigned long start, unsigned long end,
3414 			       int node, struct vmem_altmap *altmap);
3415 int vmemmap_populate(unsigned long start, unsigned long end, int node,
3416 		struct vmem_altmap *altmap);
3417 void vmemmap_populate_print_last(void);
3418 #ifdef CONFIG_MEMORY_HOTPLUG
3419 void vmemmap_free(unsigned long start, unsigned long end,
3420 		struct vmem_altmap *altmap);
3421 #endif
3422 void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
3423 				  unsigned long nr_pages);
3424 
3425 enum mf_flags {
3426 	MF_COUNT_INCREASED = 1 << 0,
3427 	MF_ACTION_REQUIRED = 1 << 1,
3428 	MF_MUST_KILL = 1 << 2,
3429 	MF_SOFT_OFFLINE = 1 << 3,
3430 	MF_UNPOISON = 1 << 4,
3431 	MF_SW_SIMULATED = 1 << 5,
3432 	MF_NO_RETRY = 1 << 6,
3433 };
3434 int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
3435 		      unsigned long count, int mf_flags);
3436 extern int memory_failure(unsigned long pfn, int flags);
3437 extern void memory_failure_queue_kick(int cpu);
3438 extern int unpoison_memory(unsigned long pfn);
3439 extern int sysctl_memory_failure_early_kill;
3440 extern int sysctl_memory_failure_recovery;
3441 extern void shake_page(struct page *p);
3442 extern atomic_long_t num_poisoned_pages __read_mostly;
3443 extern int soft_offline_page(unsigned long pfn, int flags);
3444 #ifdef CONFIG_MEMORY_FAILURE
3445 extern void memory_failure_queue(unsigned long pfn, int flags);
3446 extern int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
3447 					bool *migratable_cleared);
3448 void num_poisoned_pages_inc(unsigned long pfn);
3449 void num_poisoned_pages_sub(unsigned long pfn, long i);
3450 #else
3451 static inline void memory_failure_queue(unsigned long pfn, int flags)
3452 {
3453 }
3454 
3455 static inline int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
3456 					bool *migratable_cleared)
3457 {
3458 	return 0;
3459 }
3460 
3461 static inline void num_poisoned_pages_inc(unsigned long pfn)
3462 {
3463 }
3464 
3465 static inline void num_poisoned_pages_sub(unsigned long pfn, long i)
3466 {
3467 }
3468 #endif
3469 
3470 #if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG)
3471 extern void memblk_nr_poison_inc(unsigned long pfn);
3472 extern void memblk_nr_poison_sub(unsigned long pfn, long i);
3473 #else
3474 static inline void memblk_nr_poison_inc(unsigned long pfn)
3475 {
3476 }
3477 
3478 static inline void memblk_nr_poison_sub(unsigned long pfn, long i)
3479 {
3480 }
3481 #endif
3482 
3483 #ifndef arch_memory_failure
3484 static inline int arch_memory_failure(unsigned long pfn, int flags)
3485 {
3486 	return -ENXIO;
3487 }
3488 #endif
3489 
3490 #ifndef arch_is_platform_page
3491 static inline bool arch_is_platform_page(u64 paddr)
3492 {
3493 	return false;
3494 }
3495 #endif
3496 
3497 /*
3498  * Error handlers for various types of pages.
3499  */
3500 enum mf_result {
3501 	MF_IGNORED,	/* Error: cannot be handled */
3502 	MF_FAILED,	/* Error: handling failed */
3503 	MF_DELAYED,	/* Will be handled later */
3504 	MF_RECOVERED,	/* Successfully recovered */
3505 };
3506 
3507 enum mf_action_page_type {
3508 	MF_MSG_KERNEL,
3509 	MF_MSG_KERNEL_HIGH_ORDER,
3510 	MF_MSG_SLAB,
3511 	MF_MSG_DIFFERENT_COMPOUND,
3512 	MF_MSG_HUGE,
3513 	MF_MSG_FREE_HUGE,
3514 	MF_MSG_UNMAP_FAILED,
3515 	MF_MSG_DIRTY_SWAPCACHE,
3516 	MF_MSG_CLEAN_SWAPCACHE,
3517 	MF_MSG_DIRTY_MLOCKED_LRU,
3518 	MF_MSG_CLEAN_MLOCKED_LRU,
3519 	MF_MSG_DIRTY_UNEVICTABLE_LRU,
3520 	MF_MSG_CLEAN_UNEVICTABLE_LRU,
3521 	MF_MSG_DIRTY_LRU,
3522 	MF_MSG_CLEAN_LRU,
3523 	MF_MSG_TRUNCATED_LRU,
3524 	MF_MSG_BUDDY,
3525 	MF_MSG_DAX,
3526 	MF_MSG_UNSPLIT_THP,
3527 	MF_MSG_UNKNOWN,
3528 };
3529 
3530 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
3531 extern void clear_huge_page(struct page *page,
3532 			    unsigned long addr_hint,
3533 			    unsigned int pages_per_huge_page);
3534 extern void copy_user_huge_page(struct page *dst, struct page *src,
3535 				unsigned long addr_hint,
3536 				struct vm_area_struct *vma,
3537 				unsigned int pages_per_huge_page);
3538 extern long copy_huge_page_from_user(struct page *dst_page,
3539 				const void __user *usr_src,
3540 				unsigned int pages_per_huge_page,
3541 				bool allow_pagefault);
3542 
3543 /**
3544  * vma_is_special_huge - Are transhuge page-table entries considered special?
3545  * @vma: Pointer to the struct vm_area_struct to consider
3546  *
3547  * Whether transhuge page-table entries are considered "special" following
3548  * the definition in vm_normal_page().
3549  *
3550  * Return: true if transhuge page-table entries should be considered special,
3551  * false otherwise.
3552  */
3553 static inline bool vma_is_special_huge(const struct vm_area_struct *vma)
3554 {
3555 	return vma_is_dax(vma) || (vma->vm_file &&
3556 				   (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)));
3557 }
3558 
3559 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
3560 
3561 #ifdef CONFIG_DEBUG_PAGEALLOC
3562 extern unsigned int _debug_guardpage_minorder;
3563 DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
3564 
3565 static inline unsigned int debug_guardpage_minorder(void)
3566 {
3567 	return _debug_guardpage_minorder;
3568 }
3569 
3570 static inline bool debug_guardpage_enabled(void)
3571 {
3572 	return static_branch_unlikely(&_debug_guardpage_enabled);
3573 }
3574 
3575 static inline bool page_is_guard(struct page *page)
3576 {
3577 	if (!debug_guardpage_enabled())
3578 		return false;
3579 
3580 	return PageGuard(page);
3581 }
3582 #else
3583 static inline unsigned int debug_guardpage_minorder(void) { return 0; }
3584 static inline bool debug_guardpage_enabled(void) { return false; }
3585 static inline bool page_is_guard(struct page *page) { return false; }
3586 #endif /* CONFIG_DEBUG_PAGEALLOC */
3587 
3588 #if MAX_NUMNODES > 1
3589 void __init setup_nr_node_ids(void);
3590 #else
3591 static inline void setup_nr_node_ids(void) {}
3592 #endif
3593 
3594 extern int memcmp_pages(struct page *page1, struct page *page2);
3595 
3596 static inline int pages_identical(struct page *page1, struct page *page2)
3597 {
3598 	return !memcmp_pages(page1, page2);
3599 }
3600 
3601 #ifdef CONFIG_MAPPING_DIRTY_HELPERS
3602 unsigned long clean_record_shared_mapping_range(struct address_space *mapping,
3603 						pgoff_t first_index, pgoff_t nr,
3604 						pgoff_t bitmap_pgoff,
3605 						unsigned long *bitmap,
3606 						pgoff_t *start,
3607 						pgoff_t *end);
3608 
3609 unsigned long wp_shared_mapping_range(struct address_space *mapping,
3610 				      pgoff_t first_index, pgoff_t nr);
3611 #endif
3612 
3613 extern int sysctl_nr_trim_pages;
3614 
3615 #ifdef CONFIG_PRINTK
3616 void mem_dump_obj(void *object);
3617 #else
3618 static inline void mem_dump_obj(void *object) {}
3619 #endif
3620 
3621 /**
3622  * seal_check_future_write - Check for F_SEAL_FUTURE_WRITE flag and handle it
3623  * @seals: the seals to check
3624  * @vma: the vma to operate on
3625  *
3626  * Check whether F_SEAL_FUTURE_WRITE is set; if so, do proper check/handling on
3627  * the vma flags.  Return 0 if check pass, or <0 for errors.
3628  */
3629 static inline int seal_check_future_write(int seals, struct vm_area_struct *vma)
3630 {
3631 	if (seals & F_SEAL_FUTURE_WRITE) {
3632 		/*
3633 		 * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
3634 		 * "future write" seal active.
3635 		 */
3636 		if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
3637 			return -EPERM;
3638 
3639 		/*
3640 		 * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as
3641 		 * MAP_SHARED and read-only, take care to not allow mprotect to
3642 		 * revert protections on such mappings. Do this only for shared
3643 		 * mappings. For private mappings, don't need to mask
3644 		 * VM_MAYWRITE as we still want them to be COW-writable.
3645 		 */
3646 		if (vma->vm_flags & VM_SHARED)
3647 			vma->vm_flags &= ~(VM_MAYWRITE);
3648 	}
3649 
3650 	return 0;
3651 }
3652 
3653 #ifdef CONFIG_ANON_VMA_NAME
3654 int madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
3655 			  unsigned long len_in,
3656 			  struct anon_vma_name *anon_name);
3657 #else
3658 static inline int
3659 madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
3660 		      unsigned long len_in, struct anon_vma_name *anon_name) {
3661 	return 0;
3662 }
3663 #endif
3664 
3665 #endif /* _LINUX_MM_H */
3666