xref: /linux-6.15/include/linux/mm.h (revision 34ec4344)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MM_H
3 #define _LINUX_MM_H
4 
5 #include <linux/errno.h>
6 #include <linux/mmdebug.h>
7 #include <linux/gfp.h>
8 #include <linux/pgalloc_tag.h>
9 #include <linux/bug.h>
10 #include <linux/list.h>
11 #include <linux/mmzone.h>
12 #include <linux/rbtree.h>
13 #include <linux/atomic.h>
14 #include <linux/debug_locks.h>
15 #include <linux/mm_types.h>
16 #include <linux/mmap_lock.h>
17 #include <linux/range.h>
18 #include <linux/pfn.h>
19 #include <linux/percpu-refcount.h>
20 #include <linux/bit_spinlock.h>
21 #include <linux/shrinker.h>
22 #include <linux/resource.h>
23 #include <linux/page_ext.h>
24 #include <linux/err.h>
25 #include <linux/page-flags.h>
26 #include <linux/page_ref.h>
27 #include <linux/overflow.h>
28 #include <linux/sizes.h>
29 #include <linux/sched.h>
30 #include <linux/pgtable.h>
31 #include <linux/kasan.h>
32 #include <linux/memremap.h>
33 #include <linux/slab.h>
34 
35 struct mempolicy;
36 struct anon_vma;
37 struct anon_vma_chain;
38 struct user_struct;
39 struct pt_regs;
40 struct folio_batch;
41 
42 extern int sysctl_page_lock_unfairness;
43 
44 void mm_core_init(void);
45 void init_mm_internals(void);
46 
47 #ifndef CONFIG_NUMA		/* Don't use mapnrs, do it properly */
48 extern unsigned long max_mapnr;
49 
50 static inline void set_max_mapnr(unsigned long limit)
51 {
52 	max_mapnr = limit;
53 }
54 #else
55 static inline void set_max_mapnr(unsigned long limit) { }
56 #endif
57 
58 extern atomic_long_t _totalram_pages;
59 static inline unsigned long totalram_pages(void)
60 {
61 	return (unsigned long)atomic_long_read(&_totalram_pages);
62 }
63 
64 static inline void totalram_pages_inc(void)
65 {
66 	atomic_long_inc(&_totalram_pages);
67 }
68 
69 static inline void totalram_pages_dec(void)
70 {
71 	atomic_long_dec(&_totalram_pages);
72 }
73 
74 static inline void totalram_pages_add(long count)
75 {
76 	atomic_long_add(count, &_totalram_pages);
77 }
78 
79 extern void * high_memory;
80 extern int page_cluster;
81 extern const int page_cluster_max;
82 
83 #ifdef CONFIG_SYSCTL
84 extern int sysctl_legacy_va_layout;
85 #else
86 #define sysctl_legacy_va_layout 0
87 #endif
88 
89 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
90 extern const int mmap_rnd_bits_min;
91 extern int mmap_rnd_bits_max __ro_after_init;
92 extern int mmap_rnd_bits __read_mostly;
93 #endif
94 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
95 extern const int mmap_rnd_compat_bits_min;
96 extern const int mmap_rnd_compat_bits_max;
97 extern int mmap_rnd_compat_bits __read_mostly;
98 #endif
99 
100 #include <asm/page.h>
101 #include <asm/processor.h>
102 
103 #ifndef __pa_symbol
104 #define __pa_symbol(x)  __pa(RELOC_HIDE((unsigned long)(x), 0))
105 #endif
106 
107 #ifndef page_to_virt
108 #define page_to_virt(x)	__va(PFN_PHYS(page_to_pfn(x)))
109 #endif
110 
111 #ifndef lm_alias
112 #define lm_alias(x)	__va(__pa_symbol(x))
113 #endif
114 
115 /*
116  * To prevent common memory management code establishing
117  * a zero page mapping on a read fault.
118  * This macro should be defined within <asm/pgtable.h>.
119  * s390 does this to prevent multiplexing of hardware bits
120  * related to the physical page in case of virtualization.
121  */
122 #ifndef mm_forbids_zeropage
123 #define mm_forbids_zeropage(X)	(0)
124 #endif
125 
126 /*
127  * On some architectures it is expensive to call memset() for small sizes.
128  * If an architecture decides to implement their own version of
129  * mm_zero_struct_page they should wrap the defines below in a #ifndef and
130  * define their own version of this macro in <asm/pgtable.h>
131  */
132 #if BITS_PER_LONG == 64
133 /* This function must be updated when the size of struct page grows above 96
134  * or reduces below 56. The idea that compiler optimizes out switch()
135  * statement, and only leaves move/store instructions. Also the compiler can
136  * combine write statements if they are both assignments and can be reordered,
137  * this can result in several of the writes here being dropped.
138  */
139 #define	mm_zero_struct_page(pp) __mm_zero_struct_page(pp)
140 static inline void __mm_zero_struct_page(struct page *page)
141 {
142 	unsigned long *_pp = (void *)page;
143 
144 	 /* Check that struct page is either 56, 64, 72, 80, 88 or 96 bytes */
145 	BUILD_BUG_ON(sizeof(struct page) & 7);
146 	BUILD_BUG_ON(sizeof(struct page) < 56);
147 	BUILD_BUG_ON(sizeof(struct page) > 96);
148 
149 	switch (sizeof(struct page)) {
150 	case 96:
151 		_pp[11] = 0;
152 		fallthrough;
153 	case 88:
154 		_pp[10] = 0;
155 		fallthrough;
156 	case 80:
157 		_pp[9] = 0;
158 		fallthrough;
159 	case 72:
160 		_pp[8] = 0;
161 		fallthrough;
162 	case 64:
163 		_pp[7] = 0;
164 		fallthrough;
165 	case 56:
166 		_pp[6] = 0;
167 		_pp[5] = 0;
168 		_pp[4] = 0;
169 		_pp[3] = 0;
170 		_pp[2] = 0;
171 		_pp[1] = 0;
172 		_pp[0] = 0;
173 	}
174 }
175 #else
176 #define mm_zero_struct_page(pp)  ((void)memset((pp), 0, sizeof(struct page)))
177 #endif
178 
179 /*
180  * Default maximum number of active map areas, this limits the number of vmas
181  * per mm struct. Users can overwrite this number by sysctl but there is a
182  * problem.
183  *
184  * When a program's coredump is generated as ELF format, a section is created
185  * per a vma. In ELF, the number of sections is represented in unsigned short.
186  * This means the number of sections should be smaller than 65535 at coredump.
187  * Because the kernel adds some informative sections to a image of program at
188  * generating coredump, we need some margin. The number of extra sections is
189  * 1-3 now and depends on arch. We use "5" as safe margin, here.
190  *
191  * ELF extended numbering allows more than 65535 sections, so 16-bit bound is
192  * not a hard limit any more. Although some userspace tools can be surprised by
193  * that.
194  */
195 #define MAPCOUNT_ELF_CORE_MARGIN	(5)
196 #define DEFAULT_MAX_MAP_COUNT	(USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
197 
198 extern int sysctl_max_map_count;
199 
200 extern unsigned long sysctl_user_reserve_kbytes;
201 extern unsigned long sysctl_admin_reserve_kbytes;
202 
203 extern int sysctl_overcommit_memory;
204 extern int sysctl_overcommit_ratio;
205 extern unsigned long sysctl_overcommit_kbytes;
206 
207 int overcommit_ratio_handler(struct ctl_table *, int, void *, size_t *,
208 		loff_t *);
209 int overcommit_kbytes_handler(struct ctl_table *, int, void *, size_t *,
210 		loff_t *);
211 int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *,
212 		loff_t *);
213 
214 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
215 #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
216 #define folio_page_idx(folio, p)	(page_to_pfn(p) - folio_pfn(folio))
217 #else
218 #define nth_page(page,n) ((page) + (n))
219 #define folio_page_idx(folio, p)	((p) - &(folio)->page)
220 #endif
221 
222 /* to align the pointer to the (next) page boundary */
223 #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
224 
225 /* to align the pointer to the (prev) page boundary */
226 #define PAGE_ALIGN_DOWN(addr) ALIGN_DOWN(addr, PAGE_SIZE)
227 
228 /* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */
229 #define PAGE_ALIGNED(addr)	IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
230 
231 static inline struct folio *lru_to_folio(struct list_head *head)
232 {
233 	return list_entry((head)->prev, struct folio, lru);
234 }
235 
236 void setup_initial_init_mm(void *start_code, void *end_code,
237 			   void *end_data, void *brk);
238 
239 /*
240  * Linux kernel virtual memory manager primitives.
241  * The idea being to have a "virtual" mm in the same way
242  * we have a virtual fs - giving a cleaner interface to the
243  * mm details, and allowing different kinds of memory mappings
244  * (from shared memory to executable loading to arbitrary
245  * mmap() functions).
246  */
247 
248 struct vm_area_struct *vm_area_alloc(struct mm_struct *);
249 struct vm_area_struct *vm_area_dup(struct vm_area_struct *);
250 void vm_area_free(struct vm_area_struct *);
251 /* Use only if VMA has no other users */
252 void __vm_area_free(struct vm_area_struct *vma);
253 
254 #ifndef CONFIG_MMU
255 extern struct rb_root nommu_region_tree;
256 extern struct rw_semaphore nommu_region_sem;
257 
258 extern unsigned int kobjsize(const void *objp);
259 #endif
260 
261 /*
262  * vm_flags in vm_area_struct, see mm_types.h.
263  * When changing, update also include/trace/events/mmflags.h
264  */
265 #define VM_NONE		0x00000000
266 
267 #define VM_READ		0x00000001	/* currently active flags */
268 #define VM_WRITE	0x00000002
269 #define VM_EXEC		0x00000004
270 #define VM_SHARED	0x00000008
271 
272 /* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
273 #define VM_MAYREAD	0x00000010	/* limits for mprotect() etc */
274 #define VM_MAYWRITE	0x00000020
275 #define VM_MAYEXEC	0x00000040
276 #define VM_MAYSHARE	0x00000080
277 
278 #define VM_GROWSDOWN	0x00000100	/* general info on the segment */
279 #ifdef CONFIG_MMU
280 #define VM_UFFD_MISSING	0x00000200	/* missing pages tracking */
281 #else /* CONFIG_MMU */
282 #define VM_MAYOVERLAY	0x00000200	/* nommu: R/O MAP_PRIVATE mapping that might overlay a file mapping */
283 #define VM_UFFD_MISSING	0
284 #endif /* CONFIG_MMU */
285 #define VM_PFNMAP	0x00000400	/* Page-ranges managed without "struct page", just pure PFN */
286 #define VM_UFFD_WP	0x00001000	/* wrprotect pages tracking */
287 
288 #define VM_LOCKED	0x00002000
289 #define VM_IO           0x00004000	/* Memory mapped I/O or similar */
290 
291 					/* Used by sys_madvise() */
292 #define VM_SEQ_READ	0x00008000	/* App will access data sequentially */
293 #define VM_RAND_READ	0x00010000	/* App will not benefit from clustered reads */
294 
295 #define VM_DONTCOPY	0x00020000      /* Do not copy this vma on fork */
296 #define VM_DONTEXPAND	0x00040000	/* Cannot expand with mremap() */
297 #define VM_LOCKONFAULT	0x00080000	/* Lock the pages covered when they are faulted in */
298 #define VM_ACCOUNT	0x00100000	/* Is a VM accounted object */
299 #define VM_NORESERVE	0x00200000	/* should the VM suppress accounting */
300 #define VM_HUGETLB	0x00400000	/* Huge TLB Page VM */
301 #define VM_SYNC		0x00800000	/* Synchronous page faults */
302 #define VM_ARCH_1	0x01000000	/* Architecture-specific flag */
303 #define VM_WIPEONFORK	0x02000000	/* Wipe VMA contents in child. */
304 #define VM_DONTDUMP	0x04000000	/* Do not include in the core dump */
305 
306 #ifdef CONFIG_MEM_SOFT_DIRTY
307 # define VM_SOFTDIRTY	0x08000000	/* Not soft dirty clean area */
308 #else
309 # define VM_SOFTDIRTY	0
310 #endif
311 
312 #define VM_MIXEDMAP	0x10000000	/* Can contain "struct page" and pure PFN pages */
313 #define VM_HUGEPAGE	0x20000000	/* MADV_HUGEPAGE marked this vma */
314 #define VM_NOHUGEPAGE	0x40000000	/* MADV_NOHUGEPAGE marked this vma */
315 #define VM_MERGEABLE	0x80000000	/* KSM may merge identical pages */
316 
317 #ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS
318 #define VM_HIGH_ARCH_BIT_0	32	/* bit only usable on 64-bit architectures */
319 #define VM_HIGH_ARCH_BIT_1	33	/* bit only usable on 64-bit architectures */
320 #define VM_HIGH_ARCH_BIT_2	34	/* bit only usable on 64-bit architectures */
321 #define VM_HIGH_ARCH_BIT_3	35	/* bit only usable on 64-bit architectures */
322 #define VM_HIGH_ARCH_BIT_4	36	/* bit only usable on 64-bit architectures */
323 #define VM_HIGH_ARCH_BIT_5	37	/* bit only usable on 64-bit architectures */
324 #define VM_HIGH_ARCH_0	BIT(VM_HIGH_ARCH_BIT_0)
325 #define VM_HIGH_ARCH_1	BIT(VM_HIGH_ARCH_BIT_1)
326 #define VM_HIGH_ARCH_2	BIT(VM_HIGH_ARCH_BIT_2)
327 #define VM_HIGH_ARCH_3	BIT(VM_HIGH_ARCH_BIT_3)
328 #define VM_HIGH_ARCH_4	BIT(VM_HIGH_ARCH_BIT_4)
329 #define VM_HIGH_ARCH_5	BIT(VM_HIGH_ARCH_BIT_5)
330 #endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */
331 
332 #ifdef CONFIG_ARCH_HAS_PKEYS
333 # define VM_PKEY_SHIFT	VM_HIGH_ARCH_BIT_0
334 # define VM_PKEY_BIT0	VM_HIGH_ARCH_0	/* A protection key is a 4-bit value */
335 # define VM_PKEY_BIT1	VM_HIGH_ARCH_1	/* on x86 and 5-bit value on ppc64   */
336 # define VM_PKEY_BIT2	VM_HIGH_ARCH_2
337 # define VM_PKEY_BIT3	VM_HIGH_ARCH_3
338 #ifdef CONFIG_PPC
339 # define VM_PKEY_BIT4  VM_HIGH_ARCH_4
340 #else
341 # define VM_PKEY_BIT4  0
342 #endif
343 #endif /* CONFIG_ARCH_HAS_PKEYS */
344 
345 #ifdef CONFIG_X86_USER_SHADOW_STACK
346 /*
347  * VM_SHADOW_STACK should not be set with VM_SHARED because of lack of
348  * support core mm.
349  *
350  * These VMAs will get a single end guard page. This helps userspace protect
351  * itself from attacks. A single page is enough for current shadow stack archs
352  * (x86). See the comments near alloc_shstk() in arch/x86/kernel/shstk.c
353  * for more details on the guard size.
354  */
355 # define VM_SHADOW_STACK	VM_HIGH_ARCH_5
356 #else
357 # define VM_SHADOW_STACK	VM_NONE
358 #endif
359 
360 #if defined(CONFIG_X86)
361 # define VM_PAT		VM_ARCH_1	/* PAT reserves whole VMA at once (x86) */
362 #elif defined(CONFIG_PPC)
363 # define VM_SAO		VM_ARCH_1	/* Strong Access Ordering (powerpc) */
364 #elif defined(CONFIG_PARISC)
365 # define VM_GROWSUP	VM_ARCH_1
366 #elif defined(CONFIG_SPARC64)
367 # define VM_SPARC_ADI	VM_ARCH_1	/* Uses ADI tag for access control */
368 # define VM_ARCH_CLEAR	VM_SPARC_ADI
369 #elif defined(CONFIG_ARM64)
370 # define VM_ARM64_BTI	VM_ARCH_1	/* BTI guarded page, a.k.a. GP bit */
371 # define VM_ARCH_CLEAR	VM_ARM64_BTI
372 #elif !defined(CONFIG_MMU)
373 # define VM_MAPPED_COPY	VM_ARCH_1	/* T if mapped copy of data (nommu mmap) */
374 #endif
375 
376 #if defined(CONFIG_ARM64_MTE)
377 # define VM_MTE		VM_HIGH_ARCH_0	/* Use Tagged memory for access control */
378 # define VM_MTE_ALLOWED	VM_HIGH_ARCH_1	/* Tagged memory permitted */
379 #else
380 # define VM_MTE		VM_NONE
381 # define VM_MTE_ALLOWED	VM_NONE
382 #endif
383 
384 #ifndef VM_GROWSUP
385 # define VM_GROWSUP	VM_NONE
386 #endif
387 
388 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
389 # define VM_UFFD_MINOR_BIT	38
390 # define VM_UFFD_MINOR		BIT(VM_UFFD_MINOR_BIT)	/* UFFD minor faults */
391 #else /* !CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
392 # define VM_UFFD_MINOR		VM_NONE
393 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
394 
395 /*
396  * This flag is used to connect VFIO to arch specific KVM code. It
397  * indicates that the memory under this VMA is safe for use with any
398  * non-cachable memory type inside KVM. Some VFIO devices, on some
399  * platforms, are thought to be unsafe and can cause machine crashes
400  * if KVM does not lock down the memory type.
401  */
402 #ifdef CONFIG_64BIT
403 #define VM_ALLOW_ANY_UNCACHED_BIT	39
404 #define VM_ALLOW_ANY_UNCACHED		BIT(VM_ALLOW_ANY_UNCACHED_BIT)
405 #else
406 #define VM_ALLOW_ANY_UNCACHED		VM_NONE
407 #endif
408 
409 #ifdef CONFIG_64BIT
410 /* VM is sealed, in vm_flags */
411 #define VM_SEALED	_BITUL(63)
412 #endif
413 
414 /* Bits set in the VMA until the stack is in its final location */
415 #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY)
416 
417 #define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0)
418 
419 /* Common data flag combinations */
420 #define VM_DATA_FLAGS_TSK_EXEC	(VM_READ | VM_WRITE | TASK_EXEC | \
421 				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
422 #define VM_DATA_FLAGS_NON_EXEC	(VM_READ | VM_WRITE | VM_MAYREAD | \
423 				 VM_MAYWRITE | VM_MAYEXEC)
424 #define VM_DATA_FLAGS_EXEC	(VM_READ | VM_WRITE | VM_EXEC | \
425 				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
426 
427 #ifndef VM_DATA_DEFAULT_FLAGS		/* arch can override this */
428 #define VM_DATA_DEFAULT_FLAGS  VM_DATA_FLAGS_EXEC
429 #endif
430 
431 #ifndef VM_STACK_DEFAULT_FLAGS		/* arch can override this */
432 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
433 #endif
434 
435 #define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK)
436 
437 #ifdef CONFIG_STACK_GROWSUP
438 #define VM_STACK	VM_GROWSUP
439 #define VM_STACK_EARLY	VM_GROWSDOWN
440 #else
441 #define VM_STACK	VM_GROWSDOWN
442 #define VM_STACK_EARLY	0
443 #endif
444 
445 #define VM_STACK_FLAGS	(VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
446 
447 /* VMA basic access permission flags */
448 #define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC)
449 
450 
451 /*
452  * Special vmas that are non-mergable, non-mlock()able.
453  */
454 #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
455 
456 /* This mask prevents VMA from being scanned with khugepaged */
457 #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
458 
459 /* This mask defines which mm->def_flags a process can inherit its parent */
460 #define VM_INIT_DEF_MASK	VM_NOHUGEPAGE
461 
462 /* This mask represents all the VMA flag bits used by mlock */
463 #define VM_LOCKED_MASK	(VM_LOCKED | VM_LOCKONFAULT)
464 
465 /* Arch-specific flags to clear when updating VM flags on protection change */
466 #ifndef VM_ARCH_CLEAR
467 # define VM_ARCH_CLEAR	VM_NONE
468 #endif
469 #define VM_FLAGS_CLEAR	(ARCH_VM_PKEY_FLAGS | VM_ARCH_CLEAR)
470 
471 /*
472  * mapping from the currently active vm_flags protection bits (the
473  * low four bits) to a page protection mask..
474  */
475 
476 /*
477  * The default fault flags that should be used by most of the
478  * arch-specific page fault handlers.
479  */
480 #define FAULT_FLAG_DEFAULT  (FAULT_FLAG_ALLOW_RETRY | \
481 			     FAULT_FLAG_KILLABLE | \
482 			     FAULT_FLAG_INTERRUPTIBLE)
483 
484 /**
485  * fault_flag_allow_retry_first - check ALLOW_RETRY the first time
486  * @flags: Fault flags.
487  *
488  * This is mostly used for places where we want to try to avoid taking
489  * the mmap_lock for too long a time when waiting for another condition
490  * to change, in which case we can try to be polite to release the
491  * mmap_lock in the first round to avoid potential starvation of other
492  * processes that would also want the mmap_lock.
493  *
494  * Return: true if the page fault allows retry and this is the first
495  * attempt of the fault handling; false otherwise.
496  */
497 static inline bool fault_flag_allow_retry_first(enum fault_flag flags)
498 {
499 	return (flags & FAULT_FLAG_ALLOW_RETRY) &&
500 	    (!(flags & FAULT_FLAG_TRIED));
501 }
502 
503 #define FAULT_FLAG_TRACE \
504 	{ FAULT_FLAG_WRITE,		"WRITE" }, \
505 	{ FAULT_FLAG_MKWRITE,		"MKWRITE" }, \
506 	{ FAULT_FLAG_ALLOW_RETRY,	"ALLOW_RETRY" }, \
507 	{ FAULT_FLAG_RETRY_NOWAIT,	"RETRY_NOWAIT" }, \
508 	{ FAULT_FLAG_KILLABLE,		"KILLABLE" }, \
509 	{ FAULT_FLAG_TRIED,		"TRIED" }, \
510 	{ FAULT_FLAG_USER,		"USER" }, \
511 	{ FAULT_FLAG_REMOTE,		"REMOTE" }, \
512 	{ FAULT_FLAG_INSTRUCTION,	"INSTRUCTION" }, \
513 	{ FAULT_FLAG_INTERRUPTIBLE,	"INTERRUPTIBLE" }, \
514 	{ FAULT_FLAG_VMA_LOCK,		"VMA_LOCK" }
515 
516 /*
517  * vm_fault is filled by the pagefault handler and passed to the vma's
518  * ->fault function. The vma's ->fault is responsible for returning a bitmask
519  * of VM_FAULT_xxx flags that give details about how the fault was handled.
520  *
521  * MM layer fills up gfp_mask for page allocations but fault handler might
522  * alter it if its implementation requires a different allocation context.
523  *
524  * pgoff should be used in favour of virtual_address, if possible.
525  */
526 struct vm_fault {
527 	const struct {
528 		struct vm_area_struct *vma;	/* Target VMA */
529 		gfp_t gfp_mask;			/* gfp mask to be used for allocations */
530 		pgoff_t pgoff;			/* Logical page offset based on vma */
531 		unsigned long address;		/* Faulting virtual address - masked */
532 		unsigned long real_address;	/* Faulting virtual address - unmasked */
533 	};
534 	enum fault_flag flags;		/* FAULT_FLAG_xxx flags
535 					 * XXX: should really be 'const' */
536 	pmd_t *pmd;			/* Pointer to pmd entry matching
537 					 * the 'address' */
538 	pud_t *pud;			/* Pointer to pud entry matching
539 					 * the 'address'
540 					 */
541 	union {
542 		pte_t orig_pte;		/* Value of PTE at the time of fault */
543 		pmd_t orig_pmd;		/* Value of PMD at the time of fault,
544 					 * used by PMD fault only.
545 					 */
546 	};
547 
548 	struct page *cow_page;		/* Page handler may use for COW fault */
549 	struct page *page;		/* ->fault handlers should return a
550 					 * page here, unless VM_FAULT_NOPAGE
551 					 * is set (which is also implied by
552 					 * VM_FAULT_ERROR).
553 					 */
554 	/* These three entries are valid only while holding ptl lock */
555 	pte_t *pte;			/* Pointer to pte entry matching
556 					 * the 'address'. NULL if the page
557 					 * table hasn't been allocated.
558 					 */
559 	spinlock_t *ptl;		/* Page table lock.
560 					 * Protects pte page table if 'pte'
561 					 * is not NULL, otherwise pmd.
562 					 */
563 	pgtable_t prealloc_pte;		/* Pre-allocated pte page table.
564 					 * vm_ops->map_pages() sets up a page
565 					 * table from atomic context.
566 					 * do_fault_around() pre-allocates
567 					 * page table to avoid allocation from
568 					 * atomic context.
569 					 */
570 };
571 
572 /*
573  * These are the virtual MM functions - opening of an area, closing and
574  * unmapping it (needed to keep files on disk up-to-date etc), pointer
575  * to the functions called when a no-page or a wp-page exception occurs.
576  */
577 struct vm_operations_struct {
578 	void (*open)(struct vm_area_struct * area);
579 	/**
580 	 * @close: Called when the VMA is being removed from the MM.
581 	 * Context: User context.  May sleep.  Caller holds mmap_lock.
582 	 */
583 	void (*close)(struct vm_area_struct * area);
584 	/* Called any time before splitting to check if it's allowed */
585 	int (*may_split)(struct vm_area_struct *area, unsigned long addr);
586 	int (*mremap)(struct vm_area_struct *area);
587 	/*
588 	 * Called by mprotect() to make driver-specific permission
589 	 * checks before mprotect() is finalised.   The VMA must not
590 	 * be modified.  Returns 0 if mprotect() can proceed.
591 	 */
592 	int (*mprotect)(struct vm_area_struct *vma, unsigned long start,
593 			unsigned long end, unsigned long newflags);
594 	vm_fault_t (*fault)(struct vm_fault *vmf);
595 	vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order);
596 	vm_fault_t (*map_pages)(struct vm_fault *vmf,
597 			pgoff_t start_pgoff, pgoff_t end_pgoff);
598 	unsigned long (*pagesize)(struct vm_area_struct * area);
599 
600 	/* notification that a previously read-only page is about to become
601 	 * writable, if an error is returned it will cause a SIGBUS */
602 	vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
603 
604 	/* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
605 	vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
606 
607 	/* called by access_process_vm when get_user_pages() fails, typically
608 	 * for use by special VMAs. See also generic_access_phys() for a generic
609 	 * implementation useful for any iomem mapping.
610 	 */
611 	int (*access)(struct vm_area_struct *vma, unsigned long addr,
612 		      void *buf, int len, int write);
613 
614 	/* Called by the /proc/PID/maps code to ask the vma whether it
615 	 * has a special name.  Returning non-NULL will also cause this
616 	 * vma to be dumped unconditionally. */
617 	const char *(*name)(struct vm_area_struct *vma);
618 
619 #ifdef CONFIG_NUMA
620 	/*
621 	 * set_policy() op must add a reference to any non-NULL @new mempolicy
622 	 * to hold the policy upon return.  Caller should pass NULL @new to
623 	 * remove a policy and fall back to surrounding context--i.e. do not
624 	 * install a MPOL_DEFAULT policy, nor the task or system default
625 	 * mempolicy.
626 	 */
627 	int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
628 
629 	/*
630 	 * get_policy() op must add reference [mpol_get()] to any policy at
631 	 * (vma,addr) marked as MPOL_SHARED.  The shared policy infrastructure
632 	 * in mm/mempolicy.c will do this automatically.
633 	 * get_policy() must NOT add a ref if the policy at (vma,addr) is not
634 	 * marked as MPOL_SHARED. vma policies are protected by the mmap_lock.
635 	 * If no [shared/vma] mempolicy exists at the addr, get_policy() op
636 	 * must return NULL--i.e., do not "fallback" to task or system default
637 	 * policy.
638 	 */
639 	struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
640 					unsigned long addr, pgoff_t *ilx);
641 #endif
642 	/*
643 	 * Called by vm_normal_page() for special PTEs to find the
644 	 * page for @addr.  This is useful if the default behavior
645 	 * (using pte_page()) would not find the correct page.
646 	 */
647 	struct page *(*find_special_page)(struct vm_area_struct *vma,
648 					  unsigned long addr);
649 };
650 
651 #ifdef CONFIG_NUMA_BALANCING
652 static inline void vma_numab_state_init(struct vm_area_struct *vma)
653 {
654 	vma->numab_state = NULL;
655 }
656 static inline void vma_numab_state_free(struct vm_area_struct *vma)
657 {
658 	kfree(vma->numab_state);
659 }
660 #else
661 static inline void vma_numab_state_init(struct vm_area_struct *vma) {}
662 static inline void vma_numab_state_free(struct vm_area_struct *vma) {}
663 #endif /* CONFIG_NUMA_BALANCING */
664 
665 #ifdef CONFIG_PER_VMA_LOCK
666 /*
667  * Try to read-lock a vma. The function is allowed to occasionally yield false
668  * locked result to avoid performance overhead, in which case we fall back to
669  * using mmap_lock. The function should never yield false unlocked result.
670  */
671 static inline bool vma_start_read(struct vm_area_struct *vma)
672 {
673 	/*
674 	 * Check before locking. A race might cause false locked result.
675 	 * We can use READ_ONCE() for the mm_lock_seq here, and don't need
676 	 * ACQUIRE semantics, because this is just a lockless check whose result
677 	 * we don't rely on for anything - the mm_lock_seq read against which we
678 	 * need ordering is below.
679 	 */
680 	if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(vma->vm_mm->mm_lock_seq))
681 		return false;
682 
683 	if (unlikely(down_read_trylock(&vma->vm_lock->lock) == 0))
684 		return false;
685 
686 	/*
687 	 * Overflow might produce false locked result.
688 	 * False unlocked result is impossible because we modify and check
689 	 * vma->vm_lock_seq under vma->vm_lock protection and mm->mm_lock_seq
690 	 * modification invalidates all existing locks.
691 	 *
692 	 * We must use ACQUIRE semantics for the mm_lock_seq so that if we are
693 	 * racing with vma_end_write_all(), we only start reading from the VMA
694 	 * after it has been unlocked.
695 	 * This pairs with RELEASE semantics in vma_end_write_all().
696 	 */
697 	if (unlikely(vma->vm_lock_seq == smp_load_acquire(&vma->vm_mm->mm_lock_seq))) {
698 		up_read(&vma->vm_lock->lock);
699 		return false;
700 	}
701 	return true;
702 }
703 
704 static inline void vma_end_read(struct vm_area_struct *vma)
705 {
706 	rcu_read_lock(); /* keeps vma alive till the end of up_read */
707 	up_read(&vma->vm_lock->lock);
708 	rcu_read_unlock();
709 }
710 
711 /* WARNING! Can only be used if mmap_lock is expected to be write-locked */
712 static bool __is_vma_write_locked(struct vm_area_struct *vma, int *mm_lock_seq)
713 {
714 	mmap_assert_write_locked(vma->vm_mm);
715 
716 	/*
717 	 * current task is holding mmap_write_lock, both vma->vm_lock_seq and
718 	 * mm->mm_lock_seq can't be concurrently modified.
719 	 */
720 	*mm_lock_seq = vma->vm_mm->mm_lock_seq;
721 	return (vma->vm_lock_seq == *mm_lock_seq);
722 }
723 
724 /*
725  * Begin writing to a VMA.
726  * Exclude concurrent readers under the per-VMA lock until the currently
727  * write-locked mmap_lock is dropped or downgraded.
728  */
729 static inline void vma_start_write(struct vm_area_struct *vma)
730 {
731 	int mm_lock_seq;
732 
733 	if (__is_vma_write_locked(vma, &mm_lock_seq))
734 		return;
735 
736 	down_write(&vma->vm_lock->lock);
737 	/*
738 	 * We should use WRITE_ONCE() here because we can have concurrent reads
739 	 * from the early lockless pessimistic check in vma_start_read().
740 	 * We don't really care about the correctness of that early check, but
741 	 * we should use WRITE_ONCE() for cleanliness and to keep KCSAN happy.
742 	 */
743 	WRITE_ONCE(vma->vm_lock_seq, mm_lock_seq);
744 	up_write(&vma->vm_lock->lock);
745 }
746 
747 static inline void vma_assert_write_locked(struct vm_area_struct *vma)
748 {
749 	int mm_lock_seq;
750 
751 	VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma);
752 }
753 
754 static inline void vma_assert_locked(struct vm_area_struct *vma)
755 {
756 	if (!rwsem_is_locked(&vma->vm_lock->lock))
757 		vma_assert_write_locked(vma);
758 }
759 
760 static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached)
761 {
762 	/* When detaching vma should be write-locked */
763 	if (detached)
764 		vma_assert_write_locked(vma);
765 	vma->detached = detached;
766 }
767 
768 static inline void release_fault_lock(struct vm_fault *vmf)
769 {
770 	if (vmf->flags & FAULT_FLAG_VMA_LOCK)
771 		vma_end_read(vmf->vma);
772 	else
773 		mmap_read_unlock(vmf->vma->vm_mm);
774 }
775 
776 static inline void assert_fault_locked(struct vm_fault *vmf)
777 {
778 	if (vmf->flags & FAULT_FLAG_VMA_LOCK)
779 		vma_assert_locked(vmf->vma);
780 	else
781 		mmap_assert_locked(vmf->vma->vm_mm);
782 }
783 
784 struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
785 					  unsigned long address);
786 
787 #else /* CONFIG_PER_VMA_LOCK */
788 
789 static inline bool vma_start_read(struct vm_area_struct *vma)
790 		{ return false; }
791 static inline void vma_end_read(struct vm_area_struct *vma) {}
792 static inline void vma_start_write(struct vm_area_struct *vma) {}
793 static inline void vma_assert_write_locked(struct vm_area_struct *vma)
794 		{ mmap_assert_write_locked(vma->vm_mm); }
795 static inline void vma_mark_detached(struct vm_area_struct *vma,
796 				     bool detached) {}
797 
798 static inline struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
799 		unsigned long address)
800 {
801 	return NULL;
802 }
803 
804 static inline void vma_assert_locked(struct vm_area_struct *vma)
805 {
806 	mmap_assert_locked(vma->vm_mm);
807 }
808 
809 static inline void release_fault_lock(struct vm_fault *vmf)
810 {
811 	mmap_read_unlock(vmf->vma->vm_mm);
812 }
813 
814 static inline void assert_fault_locked(struct vm_fault *vmf)
815 {
816 	mmap_assert_locked(vmf->vma->vm_mm);
817 }
818 
819 #endif /* CONFIG_PER_VMA_LOCK */
820 
821 extern const struct vm_operations_struct vma_dummy_vm_ops;
822 
823 /*
824  * WARNING: vma_init does not initialize vma->vm_lock.
825  * Use vm_area_alloc()/vm_area_free() if vma needs locking.
826  */
827 static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
828 {
829 	memset(vma, 0, sizeof(*vma));
830 	vma->vm_mm = mm;
831 	vma->vm_ops = &vma_dummy_vm_ops;
832 	INIT_LIST_HEAD(&vma->anon_vma_chain);
833 	vma_mark_detached(vma, false);
834 	vma_numab_state_init(vma);
835 }
836 
837 /* Use when VMA is not part of the VMA tree and needs no locking */
838 static inline void vm_flags_init(struct vm_area_struct *vma,
839 				 vm_flags_t flags)
840 {
841 	ACCESS_PRIVATE(vma, __vm_flags) = flags;
842 }
843 
844 /*
845  * Use when VMA is part of the VMA tree and modifications need coordination
846  * Note: vm_flags_reset and vm_flags_reset_once do not lock the vma and
847  * it should be locked explicitly beforehand.
848  */
849 static inline void vm_flags_reset(struct vm_area_struct *vma,
850 				  vm_flags_t flags)
851 {
852 	vma_assert_write_locked(vma);
853 	vm_flags_init(vma, flags);
854 }
855 
856 static inline void vm_flags_reset_once(struct vm_area_struct *vma,
857 				       vm_flags_t flags)
858 {
859 	vma_assert_write_locked(vma);
860 	WRITE_ONCE(ACCESS_PRIVATE(vma, __vm_flags), flags);
861 }
862 
863 static inline void vm_flags_set(struct vm_area_struct *vma,
864 				vm_flags_t flags)
865 {
866 	vma_start_write(vma);
867 	ACCESS_PRIVATE(vma, __vm_flags) |= flags;
868 }
869 
870 static inline void vm_flags_clear(struct vm_area_struct *vma,
871 				  vm_flags_t flags)
872 {
873 	vma_start_write(vma);
874 	ACCESS_PRIVATE(vma, __vm_flags) &= ~flags;
875 }
876 
877 /*
878  * Use only if VMA is not part of the VMA tree or has no other users and
879  * therefore needs no locking.
880  */
881 static inline void __vm_flags_mod(struct vm_area_struct *vma,
882 				  vm_flags_t set, vm_flags_t clear)
883 {
884 	vm_flags_init(vma, (vma->vm_flags | set) & ~clear);
885 }
886 
887 /*
888  * Use only when the order of set/clear operations is unimportant, otherwise
889  * use vm_flags_{set|clear} explicitly.
890  */
891 static inline void vm_flags_mod(struct vm_area_struct *vma,
892 				vm_flags_t set, vm_flags_t clear)
893 {
894 	vma_start_write(vma);
895 	__vm_flags_mod(vma, set, clear);
896 }
897 
898 static inline void vma_set_anonymous(struct vm_area_struct *vma)
899 {
900 	vma->vm_ops = NULL;
901 }
902 
903 static inline bool vma_is_anonymous(struct vm_area_struct *vma)
904 {
905 	return !vma->vm_ops;
906 }
907 
908 /*
909  * Indicate if the VMA is a heap for the given task; for
910  * /proc/PID/maps that is the heap of the main task.
911  */
912 static inline bool vma_is_initial_heap(const struct vm_area_struct *vma)
913 {
914 	return vma->vm_start < vma->vm_mm->brk &&
915 		vma->vm_end > vma->vm_mm->start_brk;
916 }
917 
918 /*
919  * Indicate if the VMA is a stack for the given task; for
920  * /proc/PID/maps that is the stack of the main task.
921  */
922 static inline bool vma_is_initial_stack(const struct vm_area_struct *vma)
923 {
924 	/*
925 	 * We make no effort to guess what a given thread considers to be
926 	 * its "stack".  It's not even well-defined for programs written
927 	 * languages like Go.
928 	 */
929 	return vma->vm_start <= vma->vm_mm->start_stack &&
930 		vma->vm_end >= vma->vm_mm->start_stack;
931 }
932 
933 static inline bool vma_is_temporary_stack(struct vm_area_struct *vma)
934 {
935 	int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
936 
937 	if (!maybe_stack)
938 		return false;
939 
940 	if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
941 						VM_STACK_INCOMPLETE_SETUP)
942 		return true;
943 
944 	return false;
945 }
946 
947 static inline bool vma_is_foreign(struct vm_area_struct *vma)
948 {
949 	if (!current->mm)
950 		return true;
951 
952 	if (current->mm != vma->vm_mm)
953 		return true;
954 
955 	return false;
956 }
957 
958 static inline bool vma_is_accessible(struct vm_area_struct *vma)
959 {
960 	return vma->vm_flags & VM_ACCESS_FLAGS;
961 }
962 
963 static inline bool is_shared_maywrite(vm_flags_t vm_flags)
964 {
965 	return (vm_flags & (VM_SHARED | VM_MAYWRITE)) ==
966 		(VM_SHARED | VM_MAYWRITE);
967 }
968 
969 static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma)
970 {
971 	return is_shared_maywrite(vma->vm_flags);
972 }
973 
974 static inline
975 struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
976 {
977 	return mas_find(&vmi->mas, max - 1);
978 }
979 
980 static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
981 {
982 	/*
983 	 * Uses mas_find() to get the first VMA when the iterator starts.
984 	 * Calling mas_next() could skip the first entry.
985 	 */
986 	return mas_find(&vmi->mas, ULONG_MAX);
987 }
988 
989 static inline
990 struct vm_area_struct *vma_iter_next_range(struct vma_iterator *vmi)
991 {
992 	return mas_next_range(&vmi->mas, ULONG_MAX);
993 }
994 
995 
996 static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
997 {
998 	return mas_prev(&vmi->mas, 0);
999 }
1000 
1001 static inline
1002 struct vm_area_struct *vma_iter_prev_range(struct vma_iterator *vmi)
1003 {
1004 	return mas_prev_range(&vmi->mas, 0);
1005 }
1006 
1007 static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
1008 {
1009 	return vmi->mas.index;
1010 }
1011 
1012 static inline unsigned long vma_iter_end(struct vma_iterator *vmi)
1013 {
1014 	return vmi->mas.last + 1;
1015 }
1016 static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi,
1017 				      unsigned long count)
1018 {
1019 	return mas_expected_entries(&vmi->mas, count);
1020 }
1021 
1022 static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
1023 			unsigned long start, unsigned long end, gfp_t gfp)
1024 {
1025 	__mas_set_range(&vmi->mas, start, end - 1);
1026 	mas_store_gfp(&vmi->mas, NULL, gfp);
1027 	if (unlikely(mas_is_err(&vmi->mas)))
1028 		return -ENOMEM;
1029 
1030 	return 0;
1031 }
1032 
1033 /* Free any unused preallocations */
1034 static inline void vma_iter_free(struct vma_iterator *vmi)
1035 {
1036 	mas_destroy(&vmi->mas);
1037 }
1038 
1039 static inline int vma_iter_bulk_store(struct vma_iterator *vmi,
1040 				      struct vm_area_struct *vma)
1041 {
1042 	vmi->mas.index = vma->vm_start;
1043 	vmi->mas.last = vma->vm_end - 1;
1044 	mas_store(&vmi->mas, vma);
1045 	if (unlikely(mas_is_err(&vmi->mas)))
1046 		return -ENOMEM;
1047 
1048 	return 0;
1049 }
1050 
1051 static inline void vma_iter_invalidate(struct vma_iterator *vmi)
1052 {
1053 	mas_pause(&vmi->mas);
1054 }
1055 
1056 static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr)
1057 {
1058 	mas_set(&vmi->mas, addr);
1059 }
1060 
1061 #define for_each_vma(__vmi, __vma)					\
1062 	while (((__vma) = vma_next(&(__vmi))) != NULL)
1063 
1064 /* The MM code likes to work with exclusive end addresses */
1065 #define for_each_vma_range(__vmi, __vma, __end)				\
1066 	while (((__vma) = vma_find(&(__vmi), (__end))) != NULL)
1067 
1068 #ifdef CONFIG_SHMEM
1069 /*
1070  * The vma_is_shmem is not inline because it is used only by slow
1071  * paths in userfault.
1072  */
1073 bool vma_is_shmem(struct vm_area_struct *vma);
1074 bool vma_is_anon_shmem(struct vm_area_struct *vma);
1075 #else
1076 static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
1077 static inline bool vma_is_anon_shmem(struct vm_area_struct *vma) { return false; }
1078 #endif
1079 
1080 int vma_is_stack_for_current(struct vm_area_struct *vma);
1081 
1082 /* flush_tlb_range() takes a vma, not a mm, and can care about flags */
1083 #define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) }
1084 
1085 struct mmu_gather;
1086 struct inode;
1087 
1088 /*
1089  * compound_order() can be called without holding a reference, which means
1090  * that niceties like page_folio() don't work.  These callers should be
1091  * prepared to handle wild return values.  For example, PG_head may be
1092  * set before the order is initialised, or this may be a tail page.
1093  * See compaction.c for some good examples.
1094  */
1095 static inline unsigned int compound_order(struct page *page)
1096 {
1097 	struct folio *folio = (struct folio *)page;
1098 
1099 	if (!test_bit(PG_head, &folio->flags))
1100 		return 0;
1101 	return folio->_flags_1 & 0xff;
1102 }
1103 
1104 /**
1105  * folio_order - The allocation order of a folio.
1106  * @folio: The folio.
1107  *
1108  * A folio is composed of 2^order pages.  See get_order() for the definition
1109  * of order.
1110  *
1111  * Return: The order of the folio.
1112  */
1113 static inline unsigned int folio_order(struct folio *folio)
1114 {
1115 	if (!folio_test_large(folio))
1116 		return 0;
1117 	return folio->_flags_1 & 0xff;
1118 }
1119 
1120 #include <linux/huge_mm.h>
1121 
1122 /*
1123  * Methods to modify the page usage count.
1124  *
1125  * What counts for a page usage:
1126  * - cache mapping   (page->mapping)
1127  * - private data    (page->private)
1128  * - page mapped in a task's page tables, each mapping
1129  *   is counted separately
1130  *
1131  * Also, many kernel routines increase the page count before a critical
1132  * routine so they can be sure the page doesn't go away from under them.
1133  */
1134 
1135 /*
1136  * Drop a ref, return true if the refcount fell to zero (the page has no users)
1137  */
1138 static inline int put_page_testzero(struct page *page)
1139 {
1140 	VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
1141 	return page_ref_dec_and_test(page);
1142 }
1143 
1144 static inline int folio_put_testzero(struct folio *folio)
1145 {
1146 	return put_page_testzero(&folio->page);
1147 }
1148 
1149 /*
1150  * Try to grab a ref unless the page has a refcount of zero, return false if
1151  * that is the case.
1152  * This can be called when MMU is off so it must not access
1153  * any of the virtual mappings.
1154  */
1155 static inline bool get_page_unless_zero(struct page *page)
1156 {
1157 	return page_ref_add_unless(page, 1, 0);
1158 }
1159 
1160 static inline struct folio *folio_get_nontail_page(struct page *page)
1161 {
1162 	if (unlikely(!get_page_unless_zero(page)))
1163 		return NULL;
1164 	return (struct folio *)page;
1165 }
1166 
1167 extern int page_is_ram(unsigned long pfn);
1168 
1169 enum {
1170 	REGION_INTERSECTS,
1171 	REGION_DISJOINT,
1172 	REGION_MIXED,
1173 };
1174 
1175 int region_intersects(resource_size_t offset, size_t size, unsigned long flags,
1176 		      unsigned long desc);
1177 
1178 /* Support for virtually mapped pages */
1179 struct page *vmalloc_to_page(const void *addr);
1180 unsigned long vmalloc_to_pfn(const void *addr);
1181 
1182 /*
1183  * Determine if an address is within the vmalloc range
1184  *
1185  * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
1186  * is no special casing required.
1187  */
1188 #ifdef CONFIG_MMU
1189 extern bool is_vmalloc_addr(const void *x);
1190 extern int is_vmalloc_or_module_addr(const void *x);
1191 #else
1192 static inline bool is_vmalloc_addr(const void *x)
1193 {
1194 	return false;
1195 }
1196 static inline int is_vmalloc_or_module_addr(const void *x)
1197 {
1198 	return 0;
1199 }
1200 #endif
1201 
1202 /*
1203  * How many times the entire folio is mapped as a single unit (eg by a
1204  * PMD or PUD entry).  This is probably not what you want, except for
1205  * debugging purposes or implementation of other core folio_*() primitives.
1206  */
1207 static inline int folio_entire_mapcount(const struct folio *folio)
1208 {
1209 	VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
1210 	return atomic_read(&folio->_entire_mapcount) + 1;
1211 }
1212 
1213 static inline int folio_large_mapcount(const struct folio *folio)
1214 {
1215 	VM_WARN_ON_FOLIO(!folio_test_large(folio), folio);
1216 	return atomic_read(&folio->_large_mapcount) + 1;
1217 }
1218 
1219 /**
1220  * folio_mapcount() - Number of mappings of this folio.
1221  * @folio: The folio.
1222  *
1223  * The folio mapcount corresponds to the number of present user page table
1224  * entries that reference any part of a folio. Each such present user page
1225  * table entry must be paired with exactly on folio reference.
1226  *
1227  * For ordindary folios, each user page table entry (PTE/PMD/PUD/...) counts
1228  * exactly once.
1229  *
1230  * For hugetlb folios, each abstracted "hugetlb" user page table entry that
1231  * references the entire folio counts exactly once, even when such special
1232  * page table entries are comprised of multiple ordinary page table entries.
1233  *
1234  * Will report 0 for pages which cannot be mapped into userspace, such as
1235  * slab, page tables and similar.
1236  *
1237  * Return: The number of times this folio is mapped.
1238  */
1239 static inline int folio_mapcount(const struct folio *folio)
1240 {
1241 	int mapcount;
1242 
1243 	if (likely(!folio_test_large(folio))) {
1244 		mapcount = atomic_read(&folio->_mapcount) + 1;
1245 		/* Handle page_has_type() pages */
1246 		if (mapcount < PAGE_MAPCOUNT_RESERVE + 1)
1247 			mapcount = 0;
1248 		return mapcount;
1249 	}
1250 	return folio_large_mapcount(folio);
1251 }
1252 
1253 /**
1254  * folio_mapped - Is this folio mapped into userspace?
1255  * @folio: The folio.
1256  *
1257  * Return: True if any page in this folio is referenced by user page tables.
1258  */
1259 static inline bool folio_mapped(const struct folio *folio)
1260 {
1261 	return folio_mapcount(folio) >= 1;
1262 }
1263 
1264 /*
1265  * Return true if this page is mapped into pagetables.
1266  * For compound page it returns true if any sub-page of compound page is mapped,
1267  * even if this particular sub-page is not itself mapped by any PTE or PMD.
1268  */
1269 static inline bool page_mapped(const struct page *page)
1270 {
1271 	return folio_mapped(page_folio(page));
1272 }
1273 
1274 static inline struct page *virt_to_head_page(const void *x)
1275 {
1276 	struct page *page = virt_to_page(x);
1277 
1278 	return compound_head(page);
1279 }
1280 
1281 static inline struct folio *virt_to_folio(const void *x)
1282 {
1283 	struct page *page = virt_to_page(x);
1284 
1285 	return page_folio(page);
1286 }
1287 
1288 void __folio_put(struct folio *folio);
1289 
1290 void put_pages_list(struct list_head *pages);
1291 
1292 void split_page(struct page *page, unsigned int order);
1293 void folio_copy(struct folio *dst, struct folio *src);
1294 
1295 unsigned long nr_free_buffer_pages(void);
1296 
1297 /* Returns the number of bytes in this potentially compound page. */
1298 static inline unsigned long page_size(struct page *page)
1299 {
1300 	return PAGE_SIZE << compound_order(page);
1301 }
1302 
1303 /* Returns the number of bits needed for the number of bytes in a page */
1304 static inline unsigned int page_shift(struct page *page)
1305 {
1306 	return PAGE_SHIFT + compound_order(page);
1307 }
1308 
1309 /**
1310  * thp_order - Order of a transparent huge page.
1311  * @page: Head page of a transparent huge page.
1312  */
1313 static inline unsigned int thp_order(struct page *page)
1314 {
1315 	VM_BUG_ON_PGFLAGS(PageTail(page), page);
1316 	return compound_order(page);
1317 }
1318 
1319 /**
1320  * thp_size - Size of a transparent huge page.
1321  * @page: Head page of a transparent huge page.
1322  *
1323  * Return: Number of bytes in this page.
1324  */
1325 static inline unsigned long thp_size(struct page *page)
1326 {
1327 	return PAGE_SIZE << thp_order(page);
1328 }
1329 
1330 #ifdef CONFIG_MMU
1331 /*
1332  * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
1333  * servicing faults for write access.  In the normal case, do always want
1334  * pte_mkwrite.  But get_user_pages can cause write faults for mappings
1335  * that do not have writing enabled, when used by access_process_vm.
1336  */
1337 static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
1338 {
1339 	if (likely(vma->vm_flags & VM_WRITE))
1340 		pte = pte_mkwrite(pte, vma);
1341 	return pte;
1342 }
1343 
1344 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page);
1345 void set_pte_range(struct vm_fault *vmf, struct folio *folio,
1346 		struct page *page, unsigned int nr, unsigned long addr);
1347 
1348 vm_fault_t finish_fault(struct vm_fault *vmf);
1349 #endif
1350 
1351 /*
1352  * Multiple processes may "see" the same page. E.g. for untouched
1353  * mappings of /dev/null, all processes see the same page full of
1354  * zeroes, and text pages of executables and shared libraries have
1355  * only one copy in memory, at most, normally.
1356  *
1357  * For the non-reserved pages, page_count(page) denotes a reference count.
1358  *   page_count() == 0 means the page is free. page->lru is then used for
1359  *   freelist management in the buddy allocator.
1360  *   page_count() > 0  means the page has been allocated.
1361  *
1362  * Pages are allocated by the slab allocator in order to provide memory
1363  * to kmalloc and kmem_cache_alloc. In this case, the management of the
1364  * page, and the fields in 'struct page' are the responsibility of mm/slab.c
1365  * unless a particular usage is carefully commented. (the responsibility of
1366  * freeing the kmalloc memory is the caller's, of course).
1367  *
1368  * A page may be used by anyone else who does a __get_free_page().
1369  * In this case, page_count still tracks the references, and should only
1370  * be used through the normal accessor functions. The top bits of page->flags
1371  * and page->virtual store page management information, but all other fields
1372  * are unused and could be used privately, carefully. The management of this
1373  * page is the responsibility of the one who allocated it, and those who have
1374  * subsequently been given references to it.
1375  *
1376  * The other pages (we may call them "pagecache pages") are completely
1377  * managed by the Linux memory manager: I/O, buffers, swapping etc.
1378  * The following discussion applies only to them.
1379  *
1380  * A pagecache page contains an opaque `private' member, which belongs to the
1381  * page's address_space. Usually, this is the address of a circular list of
1382  * the page's disk buffers. PG_private must be set to tell the VM to call
1383  * into the filesystem to release these pages.
1384  *
1385  * A page may belong to an inode's memory mapping. In this case, page->mapping
1386  * is the pointer to the inode, and page->index is the file offset of the page,
1387  * in units of PAGE_SIZE.
1388  *
1389  * If pagecache pages are not associated with an inode, they are said to be
1390  * anonymous pages. These may become associated with the swapcache, and in that
1391  * case PG_swapcache is set, and page->private is an offset into the swapcache.
1392  *
1393  * In either case (swapcache or inode backed), the pagecache itself holds one
1394  * reference to the page. Setting PG_private should also increment the
1395  * refcount. The each user mapping also has a reference to the page.
1396  *
1397  * The pagecache pages are stored in a per-mapping radix tree, which is
1398  * rooted at mapping->i_pages, and indexed by offset.
1399  * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space
1400  * lists, we instead now tag pages as dirty/writeback in the radix tree.
1401  *
1402  * All pagecache pages may be subject to I/O:
1403  * - inode pages may need to be read from disk,
1404  * - inode pages which have been modified and are MAP_SHARED may need
1405  *   to be written back to the inode on disk,
1406  * - anonymous pages (including MAP_PRIVATE file mappings) which have been
1407  *   modified may need to be swapped out to swap space and (later) to be read
1408  *   back into memory.
1409  */
1410 
1411 #if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_FS_DAX)
1412 DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
1413 
1414 bool __put_devmap_managed_folio_refs(struct folio *folio, int refs);
1415 static inline bool put_devmap_managed_folio_refs(struct folio *folio, int refs)
1416 {
1417 	if (!static_branch_unlikely(&devmap_managed_key))
1418 		return false;
1419 	if (!folio_is_zone_device(folio))
1420 		return false;
1421 	return __put_devmap_managed_folio_refs(folio, refs);
1422 }
1423 #else /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */
1424 static inline bool put_devmap_managed_folio_refs(struct folio *folio, int refs)
1425 {
1426 	return false;
1427 }
1428 #endif /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */
1429 
1430 /* 127: arbitrary random number, small enough to assemble well */
1431 #define folio_ref_zero_or_close_to_overflow(folio) \
1432 	((unsigned int) folio_ref_count(folio) + 127u <= 127u)
1433 
1434 /**
1435  * folio_get - Increment the reference count on a folio.
1436  * @folio: The folio.
1437  *
1438  * Context: May be called in any context, as long as you know that
1439  * you have a refcount on the folio.  If you do not already have one,
1440  * folio_try_get() may be the right interface for you to use.
1441  */
1442 static inline void folio_get(struct folio *folio)
1443 {
1444 	VM_BUG_ON_FOLIO(folio_ref_zero_or_close_to_overflow(folio), folio);
1445 	folio_ref_inc(folio);
1446 }
1447 
1448 static inline void get_page(struct page *page)
1449 {
1450 	folio_get(page_folio(page));
1451 }
1452 
1453 static inline __must_check bool try_get_page(struct page *page)
1454 {
1455 	page = compound_head(page);
1456 	if (WARN_ON_ONCE(page_ref_count(page) <= 0))
1457 		return false;
1458 	page_ref_inc(page);
1459 	return true;
1460 }
1461 
1462 /**
1463  * folio_put - Decrement the reference count on a folio.
1464  * @folio: The folio.
1465  *
1466  * If the folio's reference count reaches zero, the memory will be
1467  * released back to the page allocator and may be used by another
1468  * allocation immediately.  Do not access the memory or the struct folio
1469  * after calling folio_put() unless you can be sure that it wasn't the
1470  * last reference.
1471  *
1472  * Context: May be called in process or interrupt context, but not in NMI
1473  * context.  May be called while holding a spinlock.
1474  */
1475 static inline void folio_put(struct folio *folio)
1476 {
1477 	if (folio_put_testzero(folio))
1478 		__folio_put(folio);
1479 }
1480 
1481 /**
1482  * folio_put_refs - Reduce the reference count on a folio.
1483  * @folio: The folio.
1484  * @refs: The amount to subtract from the folio's reference count.
1485  *
1486  * If the folio's reference count reaches zero, the memory will be
1487  * released back to the page allocator and may be used by another
1488  * allocation immediately.  Do not access the memory or the struct folio
1489  * after calling folio_put_refs() unless you can be sure that these weren't
1490  * the last references.
1491  *
1492  * Context: May be called in process or interrupt context, but not in NMI
1493  * context.  May be called while holding a spinlock.
1494  */
1495 static inline void folio_put_refs(struct folio *folio, int refs)
1496 {
1497 	if (folio_ref_sub_and_test(folio, refs))
1498 		__folio_put(folio);
1499 }
1500 
1501 void folios_put_refs(struct folio_batch *folios, unsigned int *refs);
1502 
1503 /*
1504  * union release_pages_arg - an array of pages or folios
1505  *
1506  * release_pages() releases a simple array of multiple pages, and
1507  * accepts various different forms of said page array: either
1508  * a regular old boring array of pages, an array of folios, or
1509  * an array of encoded page pointers.
1510  *
1511  * The transparent union syntax for this kind of "any of these
1512  * argument types" is all kinds of ugly, so look away.
1513  */
1514 typedef union {
1515 	struct page **pages;
1516 	struct folio **folios;
1517 	struct encoded_page **encoded_pages;
1518 } release_pages_arg __attribute__ ((__transparent_union__));
1519 
1520 void release_pages(release_pages_arg, int nr);
1521 
1522 /**
1523  * folios_put - Decrement the reference count on an array of folios.
1524  * @folios: The folios.
1525  *
1526  * Like folio_put(), but for a batch of folios.  This is more efficient
1527  * than writing the loop yourself as it will optimise the locks which need
1528  * to be taken if the folios are freed.  The folios batch is returned
1529  * empty and ready to be reused for another batch; there is no need to
1530  * reinitialise it.
1531  *
1532  * Context: May be called in process or interrupt context, but not in NMI
1533  * context.  May be called while holding a spinlock.
1534  */
1535 static inline void folios_put(struct folio_batch *folios)
1536 {
1537 	folios_put_refs(folios, NULL);
1538 }
1539 
1540 static inline void put_page(struct page *page)
1541 {
1542 	struct folio *folio = page_folio(page);
1543 
1544 	/*
1545 	 * For some devmap managed pages we need to catch refcount transition
1546 	 * from 2 to 1:
1547 	 */
1548 	if (put_devmap_managed_folio_refs(folio, 1))
1549 		return;
1550 	folio_put(folio);
1551 }
1552 
1553 /*
1554  * GUP_PIN_COUNTING_BIAS, and the associated functions that use it, overload
1555  * the page's refcount so that two separate items are tracked: the original page
1556  * reference count, and also a new count of how many pin_user_pages() calls were
1557  * made against the page. ("gup-pinned" is another term for the latter).
1558  *
1559  * With this scheme, pin_user_pages() becomes special: such pages are marked as
1560  * distinct from normal pages. As such, the unpin_user_page() call (and its
1561  * variants) must be used in order to release gup-pinned pages.
1562  *
1563  * Choice of value:
1564  *
1565  * By making GUP_PIN_COUNTING_BIAS a power of two, debugging of page reference
1566  * counts with respect to pin_user_pages() and unpin_user_page() becomes
1567  * simpler, due to the fact that adding an even power of two to the page
1568  * refcount has the effect of using only the upper N bits, for the code that
1569  * counts up using the bias value. This means that the lower bits are left for
1570  * the exclusive use of the original code that increments and decrements by one
1571  * (or at least, by much smaller values than the bias value).
1572  *
1573  * Of course, once the lower bits overflow into the upper bits (and this is
1574  * OK, because subtraction recovers the original values), then visual inspection
1575  * no longer suffices to directly view the separate counts. However, for normal
1576  * applications that don't have huge page reference counts, this won't be an
1577  * issue.
1578  *
1579  * Locking: the lockless algorithm described in folio_try_get_rcu()
1580  * provides safe operation for get_user_pages(), folio_mkclean() and
1581  * other calls that race to set up page table entries.
1582  */
1583 #define GUP_PIN_COUNTING_BIAS (1U << 10)
1584 
1585 void unpin_user_page(struct page *page);
1586 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
1587 				 bool make_dirty);
1588 void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
1589 				      bool make_dirty);
1590 void unpin_user_pages(struct page **pages, unsigned long npages);
1591 
1592 static inline bool is_cow_mapping(vm_flags_t flags)
1593 {
1594 	return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
1595 }
1596 
1597 #ifndef CONFIG_MMU
1598 static inline bool is_nommu_shared_mapping(vm_flags_t flags)
1599 {
1600 	/*
1601 	 * NOMMU shared mappings are ordinary MAP_SHARED mappings and selected
1602 	 * R/O MAP_PRIVATE file mappings that are an effective R/O overlay of
1603 	 * a file mapping. R/O MAP_PRIVATE mappings might still modify
1604 	 * underlying memory if ptrace is active, so this is only possible if
1605 	 * ptrace does not apply. Note that there is no mprotect() to upgrade
1606 	 * write permissions later.
1607 	 */
1608 	return flags & (VM_MAYSHARE | VM_MAYOVERLAY);
1609 }
1610 #endif
1611 
1612 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
1613 #define SECTION_IN_PAGE_FLAGS
1614 #endif
1615 
1616 /*
1617  * The identification function is mainly used by the buddy allocator for
1618  * determining if two pages could be buddies. We are not really identifying
1619  * the zone since we could be using the section number id if we do not have
1620  * node id available in page flags.
1621  * We only guarantee that it will return the same value for two combinable
1622  * pages in a zone.
1623  */
1624 static inline int page_zone_id(struct page *page)
1625 {
1626 	return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
1627 }
1628 
1629 #ifdef NODE_NOT_IN_PAGE_FLAGS
1630 int page_to_nid(const struct page *page);
1631 #else
1632 static inline int page_to_nid(const struct page *page)
1633 {
1634 	return (PF_POISONED_CHECK(page)->flags >> NODES_PGSHIFT) & NODES_MASK;
1635 }
1636 #endif
1637 
1638 static inline int folio_nid(const struct folio *folio)
1639 {
1640 	return page_to_nid(&folio->page);
1641 }
1642 
1643 #ifdef CONFIG_NUMA_BALANCING
1644 /* page access time bits needs to hold at least 4 seconds */
1645 #define PAGE_ACCESS_TIME_MIN_BITS	12
1646 #if LAST_CPUPID_SHIFT < PAGE_ACCESS_TIME_MIN_BITS
1647 #define PAGE_ACCESS_TIME_BUCKETS				\
1648 	(PAGE_ACCESS_TIME_MIN_BITS - LAST_CPUPID_SHIFT)
1649 #else
1650 #define PAGE_ACCESS_TIME_BUCKETS	0
1651 #endif
1652 
1653 #define PAGE_ACCESS_TIME_MASK				\
1654 	(LAST_CPUPID_MASK << PAGE_ACCESS_TIME_BUCKETS)
1655 
1656 static inline int cpu_pid_to_cpupid(int cpu, int pid)
1657 {
1658 	return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
1659 }
1660 
1661 static inline int cpupid_to_pid(int cpupid)
1662 {
1663 	return cpupid & LAST__PID_MASK;
1664 }
1665 
1666 static inline int cpupid_to_cpu(int cpupid)
1667 {
1668 	return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
1669 }
1670 
1671 static inline int cpupid_to_nid(int cpupid)
1672 {
1673 	return cpu_to_node(cpupid_to_cpu(cpupid));
1674 }
1675 
1676 static inline bool cpupid_pid_unset(int cpupid)
1677 {
1678 	return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
1679 }
1680 
1681 static inline bool cpupid_cpu_unset(int cpupid)
1682 {
1683 	return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
1684 }
1685 
1686 static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
1687 {
1688 	return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
1689 }
1690 
1691 #define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
1692 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
1693 static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid)
1694 {
1695 	return xchg(&folio->_last_cpupid, cpupid & LAST_CPUPID_MASK);
1696 }
1697 
1698 static inline int folio_last_cpupid(struct folio *folio)
1699 {
1700 	return folio->_last_cpupid;
1701 }
1702 static inline void page_cpupid_reset_last(struct page *page)
1703 {
1704 	page->_last_cpupid = -1 & LAST_CPUPID_MASK;
1705 }
1706 #else
1707 static inline int folio_last_cpupid(struct folio *folio)
1708 {
1709 	return (folio->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
1710 }
1711 
1712 int folio_xchg_last_cpupid(struct folio *folio, int cpupid);
1713 
1714 static inline void page_cpupid_reset_last(struct page *page)
1715 {
1716 	page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
1717 }
1718 #endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
1719 
1720 static inline int folio_xchg_access_time(struct folio *folio, int time)
1721 {
1722 	int last_time;
1723 
1724 	last_time = folio_xchg_last_cpupid(folio,
1725 					   time >> PAGE_ACCESS_TIME_BUCKETS);
1726 	return last_time << PAGE_ACCESS_TIME_BUCKETS;
1727 }
1728 
1729 static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
1730 {
1731 	unsigned int pid_bit;
1732 
1733 	pid_bit = hash_32(current->pid, ilog2(BITS_PER_LONG));
1734 	if (vma->numab_state && !test_bit(pid_bit, &vma->numab_state->pids_active[1])) {
1735 		__set_bit(pid_bit, &vma->numab_state->pids_active[1]);
1736 	}
1737 }
1738 #else /* !CONFIG_NUMA_BALANCING */
1739 static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid)
1740 {
1741 	return folio_nid(folio); /* XXX */
1742 }
1743 
1744 static inline int folio_xchg_access_time(struct folio *folio, int time)
1745 {
1746 	return 0;
1747 }
1748 
1749 static inline int folio_last_cpupid(struct folio *folio)
1750 {
1751 	return folio_nid(folio); /* XXX */
1752 }
1753 
1754 static inline int cpupid_to_nid(int cpupid)
1755 {
1756 	return -1;
1757 }
1758 
1759 static inline int cpupid_to_pid(int cpupid)
1760 {
1761 	return -1;
1762 }
1763 
1764 static inline int cpupid_to_cpu(int cpupid)
1765 {
1766 	return -1;
1767 }
1768 
1769 static inline int cpu_pid_to_cpupid(int nid, int pid)
1770 {
1771 	return -1;
1772 }
1773 
1774 static inline bool cpupid_pid_unset(int cpupid)
1775 {
1776 	return true;
1777 }
1778 
1779 static inline void page_cpupid_reset_last(struct page *page)
1780 {
1781 }
1782 
1783 static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
1784 {
1785 	return false;
1786 }
1787 
1788 static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
1789 {
1790 }
1791 #endif /* CONFIG_NUMA_BALANCING */
1792 
1793 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
1794 
1795 /*
1796  * KASAN per-page tags are stored xor'ed with 0xff. This allows to avoid
1797  * setting tags for all pages to native kernel tag value 0xff, as the default
1798  * value 0x00 maps to 0xff.
1799  */
1800 
1801 static inline u8 page_kasan_tag(const struct page *page)
1802 {
1803 	u8 tag = KASAN_TAG_KERNEL;
1804 
1805 	if (kasan_enabled()) {
1806 		tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
1807 		tag ^= 0xff;
1808 	}
1809 
1810 	return tag;
1811 }
1812 
1813 static inline void page_kasan_tag_set(struct page *page, u8 tag)
1814 {
1815 	unsigned long old_flags, flags;
1816 
1817 	if (!kasan_enabled())
1818 		return;
1819 
1820 	tag ^= 0xff;
1821 	old_flags = READ_ONCE(page->flags);
1822 	do {
1823 		flags = old_flags;
1824 		flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
1825 		flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
1826 	} while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags)));
1827 }
1828 
1829 static inline void page_kasan_tag_reset(struct page *page)
1830 {
1831 	if (kasan_enabled())
1832 		page_kasan_tag_set(page, KASAN_TAG_KERNEL);
1833 }
1834 
1835 #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
1836 
1837 static inline u8 page_kasan_tag(const struct page *page)
1838 {
1839 	return 0xff;
1840 }
1841 
1842 static inline void page_kasan_tag_set(struct page *page, u8 tag) { }
1843 static inline void page_kasan_tag_reset(struct page *page) { }
1844 
1845 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
1846 
1847 static inline struct zone *page_zone(const struct page *page)
1848 {
1849 	return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
1850 }
1851 
1852 static inline pg_data_t *page_pgdat(const struct page *page)
1853 {
1854 	return NODE_DATA(page_to_nid(page));
1855 }
1856 
1857 static inline struct zone *folio_zone(const struct folio *folio)
1858 {
1859 	return page_zone(&folio->page);
1860 }
1861 
1862 static inline pg_data_t *folio_pgdat(const struct folio *folio)
1863 {
1864 	return page_pgdat(&folio->page);
1865 }
1866 
1867 #ifdef SECTION_IN_PAGE_FLAGS
1868 static inline void set_page_section(struct page *page, unsigned long section)
1869 {
1870 	page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
1871 	page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
1872 }
1873 
1874 static inline unsigned long page_to_section(const struct page *page)
1875 {
1876 	return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
1877 }
1878 #endif
1879 
1880 /**
1881  * folio_pfn - Return the Page Frame Number of a folio.
1882  * @folio: The folio.
1883  *
1884  * A folio may contain multiple pages.  The pages have consecutive
1885  * Page Frame Numbers.
1886  *
1887  * Return: The Page Frame Number of the first page in the folio.
1888  */
1889 static inline unsigned long folio_pfn(struct folio *folio)
1890 {
1891 	return page_to_pfn(&folio->page);
1892 }
1893 
1894 static inline struct folio *pfn_folio(unsigned long pfn)
1895 {
1896 	return page_folio(pfn_to_page(pfn));
1897 }
1898 
1899 /**
1900  * folio_maybe_dma_pinned - Report if a folio may be pinned for DMA.
1901  * @folio: The folio.
1902  *
1903  * This function checks if a folio has been pinned via a call to
1904  * a function in the pin_user_pages() family.
1905  *
1906  * For small folios, the return value is partially fuzzy: false is not fuzzy,
1907  * because it means "definitely not pinned for DMA", but true means "probably
1908  * pinned for DMA, but possibly a false positive due to having at least
1909  * GUP_PIN_COUNTING_BIAS worth of normal folio references".
1910  *
1911  * False positives are OK, because: a) it's unlikely for a folio to
1912  * get that many refcounts, and b) all the callers of this routine are
1913  * expected to be able to deal gracefully with a false positive.
1914  *
1915  * For large folios, the result will be exactly correct. That's because
1916  * we have more tracking data available: the _pincount field is used
1917  * instead of the GUP_PIN_COUNTING_BIAS scheme.
1918  *
1919  * For more information, please see Documentation/core-api/pin_user_pages.rst.
1920  *
1921  * Return: True, if it is likely that the folio has been "dma-pinned".
1922  * False, if the folio is definitely not dma-pinned.
1923  */
1924 static inline bool folio_maybe_dma_pinned(struct folio *folio)
1925 {
1926 	if (folio_test_large(folio))
1927 		return atomic_read(&folio->_pincount) > 0;
1928 
1929 	/*
1930 	 * folio_ref_count() is signed. If that refcount overflows, then
1931 	 * folio_ref_count() returns a negative value, and callers will avoid
1932 	 * further incrementing the refcount.
1933 	 *
1934 	 * Here, for that overflow case, use the sign bit to count a little
1935 	 * bit higher via unsigned math, and thus still get an accurate result.
1936 	 */
1937 	return ((unsigned int)folio_ref_count(folio)) >=
1938 		GUP_PIN_COUNTING_BIAS;
1939 }
1940 
1941 /*
1942  * This should most likely only be called during fork() to see whether we
1943  * should break the cow immediately for an anon page on the src mm.
1944  *
1945  * The caller has to hold the PT lock and the vma->vm_mm->->write_protect_seq.
1946  */
1947 static inline bool folio_needs_cow_for_dma(struct vm_area_struct *vma,
1948 					  struct folio *folio)
1949 {
1950 	VM_BUG_ON(!(raw_read_seqcount(&vma->vm_mm->write_protect_seq) & 1));
1951 
1952 	if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags))
1953 		return false;
1954 
1955 	return folio_maybe_dma_pinned(folio);
1956 }
1957 
1958 /**
1959  * is_zero_page - Query if a page is a zero page
1960  * @page: The page to query
1961  *
1962  * This returns true if @page is one of the permanent zero pages.
1963  */
1964 static inline bool is_zero_page(const struct page *page)
1965 {
1966 	return is_zero_pfn(page_to_pfn(page));
1967 }
1968 
1969 /**
1970  * is_zero_folio - Query if a folio is a zero page
1971  * @folio: The folio to query
1972  *
1973  * This returns true if @folio is one of the permanent zero pages.
1974  */
1975 static inline bool is_zero_folio(const struct folio *folio)
1976 {
1977 	return is_zero_page(&folio->page);
1978 }
1979 
1980 /* MIGRATE_CMA and ZONE_MOVABLE do not allow pin folios */
1981 #ifdef CONFIG_MIGRATION
1982 static inline bool folio_is_longterm_pinnable(struct folio *folio)
1983 {
1984 #ifdef CONFIG_CMA
1985 	int mt = folio_migratetype(folio);
1986 
1987 	if (mt == MIGRATE_CMA || mt == MIGRATE_ISOLATE)
1988 		return false;
1989 #endif
1990 	/* The zero page can be "pinned" but gets special handling. */
1991 	if (is_zero_folio(folio))
1992 		return true;
1993 
1994 	/* Coherent device memory must always allow eviction. */
1995 	if (folio_is_device_coherent(folio))
1996 		return false;
1997 
1998 	/* Otherwise, non-movable zone folios can be pinned. */
1999 	return !folio_is_zone_movable(folio);
2000 
2001 }
2002 #else
2003 static inline bool folio_is_longterm_pinnable(struct folio *folio)
2004 {
2005 	return true;
2006 }
2007 #endif
2008 
2009 static inline void set_page_zone(struct page *page, enum zone_type zone)
2010 {
2011 	page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
2012 	page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
2013 }
2014 
2015 static inline void set_page_node(struct page *page, unsigned long node)
2016 {
2017 	page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
2018 	page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
2019 }
2020 
2021 static inline void set_page_links(struct page *page, enum zone_type zone,
2022 	unsigned long node, unsigned long pfn)
2023 {
2024 	set_page_zone(page, zone);
2025 	set_page_node(page, node);
2026 #ifdef SECTION_IN_PAGE_FLAGS
2027 	set_page_section(page, pfn_to_section_nr(pfn));
2028 #endif
2029 }
2030 
2031 /**
2032  * folio_nr_pages - The number of pages in the folio.
2033  * @folio: The folio.
2034  *
2035  * Return: A positive power of two.
2036  */
2037 static inline long folio_nr_pages(const struct folio *folio)
2038 {
2039 	if (!folio_test_large(folio))
2040 		return 1;
2041 #ifdef CONFIG_64BIT
2042 	return folio->_folio_nr_pages;
2043 #else
2044 	return 1L << (folio->_flags_1 & 0xff);
2045 #endif
2046 }
2047 
2048 /* Only hugetlbfs can allocate folios larger than MAX_ORDER */
2049 #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
2050 #define MAX_FOLIO_NR_PAGES	(1UL << PUD_ORDER)
2051 #else
2052 #define MAX_FOLIO_NR_PAGES	MAX_ORDER_NR_PAGES
2053 #endif
2054 
2055 /*
2056  * compound_nr() returns the number of pages in this potentially compound
2057  * page.  compound_nr() can be called on a tail page, and is defined to
2058  * return 1 in that case.
2059  */
2060 static inline unsigned long compound_nr(struct page *page)
2061 {
2062 	struct folio *folio = (struct folio *)page;
2063 
2064 	if (!test_bit(PG_head, &folio->flags))
2065 		return 1;
2066 #ifdef CONFIG_64BIT
2067 	return folio->_folio_nr_pages;
2068 #else
2069 	return 1L << (folio->_flags_1 & 0xff);
2070 #endif
2071 }
2072 
2073 /**
2074  * thp_nr_pages - The number of regular pages in this huge page.
2075  * @page: The head page of a huge page.
2076  */
2077 static inline int thp_nr_pages(struct page *page)
2078 {
2079 	return folio_nr_pages((struct folio *)page);
2080 }
2081 
2082 /**
2083  * folio_next - Move to the next physical folio.
2084  * @folio: The folio we're currently operating on.
2085  *
2086  * If you have physically contiguous memory which may span more than
2087  * one folio (eg a &struct bio_vec), use this function to move from one
2088  * folio to the next.  Do not use it if the memory is only virtually
2089  * contiguous as the folios are almost certainly not adjacent to each
2090  * other.  This is the folio equivalent to writing ``page++``.
2091  *
2092  * Context: We assume that the folios are refcounted and/or locked at a
2093  * higher level and do not adjust the reference counts.
2094  * Return: The next struct folio.
2095  */
2096 static inline struct folio *folio_next(struct folio *folio)
2097 {
2098 	return (struct folio *)folio_page(folio, folio_nr_pages(folio));
2099 }
2100 
2101 /**
2102  * folio_shift - The size of the memory described by this folio.
2103  * @folio: The folio.
2104  *
2105  * A folio represents a number of bytes which is a power-of-two in size.
2106  * This function tells you which power-of-two the folio is.  See also
2107  * folio_size() and folio_order().
2108  *
2109  * Context: The caller should have a reference on the folio to prevent
2110  * it from being split.  It is not necessary for the folio to be locked.
2111  * Return: The base-2 logarithm of the size of this folio.
2112  */
2113 static inline unsigned int folio_shift(struct folio *folio)
2114 {
2115 	return PAGE_SHIFT + folio_order(folio);
2116 }
2117 
2118 /**
2119  * folio_size - The number of bytes in a folio.
2120  * @folio: The folio.
2121  *
2122  * Context: The caller should have a reference on the folio to prevent
2123  * it from being split.  It is not necessary for the folio to be locked.
2124  * Return: The number of bytes in this folio.
2125  */
2126 static inline size_t folio_size(struct folio *folio)
2127 {
2128 	return PAGE_SIZE << folio_order(folio);
2129 }
2130 
2131 /**
2132  * folio_likely_mapped_shared - Estimate if the folio is mapped into the page
2133  *				tables of more than one MM
2134  * @folio: The folio.
2135  *
2136  * This function checks if the folio is currently mapped into more than one
2137  * MM ("mapped shared"), or if the folio is only mapped into a single MM
2138  * ("mapped exclusively").
2139  *
2140  * As precise information is not easily available for all folios, this function
2141  * estimates the number of MMs ("sharers") that are currently mapping a folio
2142  * using the number of times the first page of the folio is currently mapped
2143  * into page tables.
2144  *
2145  * For small anonymous folios (except KSM folios) and anonymous hugetlb folios,
2146  * the return value will be exactly correct, because they can only be mapped
2147  * at most once into an MM, and they cannot be partially mapped.
2148  *
2149  * For other folios, the result can be fuzzy:
2150  *    #. For partially-mappable large folios (THP), the return value can wrongly
2151  *       indicate "mapped exclusively" (false negative) when the folio is
2152  *       only partially mapped into at least one MM.
2153  *    #. For pagecache folios (including hugetlb), the return value can wrongly
2154  *       indicate "mapped shared" (false positive) when two VMAs in the same MM
2155  *       cover the same file range.
2156  *    #. For (small) KSM folios, the return value can wrongly indicate "mapped
2157  *       shared" (false positive), when the folio is mapped multiple times into
2158  *       the same MM.
2159  *
2160  * Further, this function only considers current page table mappings that
2161  * are tracked using the folio mapcount(s).
2162  *
2163  * This function does not consider:
2164  *    #. If the folio might get mapped in the (near) future (e.g., swapcache,
2165  *       pagecache, temporary unmapping for migration).
2166  *    #. If the folio is mapped differently (VM_PFNMAP).
2167  *    #. If hugetlb page table sharing applies. Callers might want to check
2168  *       hugetlb_pmd_shared().
2169  *
2170  * Return: Whether the folio is estimated to be mapped into more than one MM.
2171  */
2172 static inline bool folio_likely_mapped_shared(struct folio *folio)
2173 {
2174 	int mapcount = folio_mapcount(folio);
2175 
2176 	/* Only partially-mappable folios require more care. */
2177 	if (!folio_test_large(folio) || unlikely(folio_test_hugetlb(folio)))
2178 		return mapcount > 1;
2179 
2180 	/* A single mapping implies "mapped exclusively". */
2181 	if (mapcount <= 1)
2182 		return false;
2183 
2184 	/* If any page is mapped more than once we treat it "mapped shared". */
2185 	if (folio_entire_mapcount(folio) || mapcount > folio_nr_pages(folio))
2186 		return true;
2187 
2188 	/* Let's guess based on the first subpage. */
2189 	return atomic_read(&folio->_mapcount) > 0;
2190 }
2191 
2192 #ifndef HAVE_ARCH_MAKE_PAGE_ACCESSIBLE
2193 static inline int arch_make_page_accessible(struct page *page)
2194 {
2195 	return 0;
2196 }
2197 #endif
2198 
2199 #ifndef HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE
2200 static inline int arch_make_folio_accessible(struct folio *folio)
2201 {
2202 	int ret;
2203 	long i, nr = folio_nr_pages(folio);
2204 
2205 	for (i = 0; i < nr; i++) {
2206 		ret = arch_make_page_accessible(folio_page(folio, i));
2207 		if (ret)
2208 			break;
2209 	}
2210 
2211 	return ret;
2212 }
2213 #endif
2214 
2215 /*
2216  * Some inline functions in vmstat.h depend on page_zone()
2217  */
2218 #include <linux/vmstat.h>
2219 
2220 #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
2221 #define HASHED_PAGE_VIRTUAL
2222 #endif
2223 
2224 #if defined(WANT_PAGE_VIRTUAL)
2225 static inline void *page_address(const struct page *page)
2226 {
2227 	return page->virtual;
2228 }
2229 static inline void set_page_address(struct page *page, void *address)
2230 {
2231 	page->virtual = address;
2232 }
2233 #define page_address_init()  do { } while(0)
2234 #endif
2235 
2236 #if defined(HASHED_PAGE_VIRTUAL)
2237 void *page_address(const struct page *page);
2238 void set_page_address(struct page *page, void *virtual);
2239 void page_address_init(void);
2240 #endif
2241 
2242 static __always_inline void *lowmem_page_address(const struct page *page)
2243 {
2244 	return page_to_virt(page);
2245 }
2246 
2247 #if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
2248 #define page_address(page) lowmem_page_address(page)
2249 #define set_page_address(page, address)  do { } while(0)
2250 #define page_address_init()  do { } while(0)
2251 #endif
2252 
2253 static inline void *folio_address(const struct folio *folio)
2254 {
2255 	return page_address(&folio->page);
2256 }
2257 
2258 /*
2259  * Return true only if the page has been allocated with
2260  * ALLOC_NO_WATERMARKS and the low watermark was not
2261  * met implying that the system is under some pressure.
2262  */
2263 static inline bool page_is_pfmemalloc(const struct page *page)
2264 {
2265 	/*
2266 	 * lru.next has bit 1 set if the page is allocated from the
2267 	 * pfmemalloc reserves.  Callers may simply overwrite it if
2268 	 * they do not need to preserve that information.
2269 	 */
2270 	return (uintptr_t)page->lru.next & BIT(1);
2271 }
2272 
2273 /*
2274  * Return true only if the folio has been allocated with
2275  * ALLOC_NO_WATERMARKS and the low watermark was not
2276  * met implying that the system is under some pressure.
2277  */
2278 static inline bool folio_is_pfmemalloc(const struct folio *folio)
2279 {
2280 	/*
2281 	 * lru.next has bit 1 set if the page is allocated from the
2282 	 * pfmemalloc reserves.  Callers may simply overwrite it if
2283 	 * they do not need to preserve that information.
2284 	 */
2285 	return (uintptr_t)folio->lru.next & BIT(1);
2286 }
2287 
2288 /*
2289  * Only to be called by the page allocator on a freshly allocated
2290  * page.
2291  */
2292 static inline void set_page_pfmemalloc(struct page *page)
2293 {
2294 	page->lru.next = (void *)BIT(1);
2295 }
2296 
2297 static inline void clear_page_pfmemalloc(struct page *page)
2298 {
2299 	page->lru.next = NULL;
2300 }
2301 
2302 /*
2303  * Can be called by the pagefault handler when it gets a VM_FAULT_OOM.
2304  */
2305 extern void pagefault_out_of_memory(void);
2306 
2307 #define offset_in_page(p)	((unsigned long)(p) & ~PAGE_MASK)
2308 #define offset_in_thp(page, p)	((unsigned long)(p) & (thp_size(page) - 1))
2309 #define offset_in_folio(folio, p) ((unsigned long)(p) & (folio_size(folio) - 1))
2310 
2311 /*
2312  * Parameter block passed down to zap_pte_range in exceptional cases.
2313  */
2314 struct zap_details {
2315 	struct folio *single_folio;	/* Locked folio to be unmapped */
2316 	bool even_cows;			/* Zap COWed private pages too? */
2317 	zap_flags_t zap_flags;		/* Extra flags for zapping */
2318 };
2319 
2320 /*
2321  * Whether to drop the pte markers, for example, the uffd-wp information for
2322  * file-backed memory.  This should only be specified when we will completely
2323  * drop the page in the mm, either by truncation or unmapping of the vma.  By
2324  * default, the flag is not set.
2325  */
2326 #define  ZAP_FLAG_DROP_MARKER        ((__force zap_flags_t) BIT(0))
2327 /* Set in unmap_vmas() to indicate a final unmap call.  Only used by hugetlb */
2328 #define  ZAP_FLAG_UNMAP              ((__force zap_flags_t) BIT(1))
2329 
2330 #ifdef CONFIG_SCHED_MM_CID
2331 void sched_mm_cid_before_execve(struct task_struct *t);
2332 void sched_mm_cid_after_execve(struct task_struct *t);
2333 void sched_mm_cid_fork(struct task_struct *t);
2334 void sched_mm_cid_exit_signals(struct task_struct *t);
2335 static inline int task_mm_cid(struct task_struct *t)
2336 {
2337 	return t->mm_cid;
2338 }
2339 #else
2340 static inline void sched_mm_cid_before_execve(struct task_struct *t) { }
2341 static inline void sched_mm_cid_after_execve(struct task_struct *t) { }
2342 static inline void sched_mm_cid_fork(struct task_struct *t) { }
2343 static inline void sched_mm_cid_exit_signals(struct task_struct *t) { }
2344 static inline int task_mm_cid(struct task_struct *t)
2345 {
2346 	/*
2347 	 * Use the processor id as a fall-back when the mm cid feature is
2348 	 * disabled. This provides functional per-cpu data structure accesses
2349 	 * in user-space, althrough it won't provide the memory usage benefits.
2350 	 */
2351 	return raw_smp_processor_id();
2352 }
2353 #endif
2354 
2355 #ifdef CONFIG_MMU
2356 extern bool can_do_mlock(void);
2357 #else
2358 static inline bool can_do_mlock(void) { return false; }
2359 #endif
2360 extern int user_shm_lock(size_t, struct ucounts *);
2361 extern void user_shm_unlock(size_t, struct ucounts *);
2362 
2363 struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
2364 			     pte_t pte);
2365 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
2366 			     pte_t pte);
2367 struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
2368 				  unsigned long addr, pmd_t pmd);
2369 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
2370 				pmd_t pmd);
2371 
2372 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
2373 		  unsigned long size);
2374 void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
2375 			   unsigned long size, struct zap_details *details);
2376 static inline void zap_vma_pages(struct vm_area_struct *vma)
2377 {
2378 	zap_page_range_single(vma, vma->vm_start,
2379 			      vma->vm_end - vma->vm_start, NULL);
2380 }
2381 void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
2382 		struct vm_area_struct *start_vma, unsigned long start,
2383 		unsigned long end, unsigned long tree_end, bool mm_wr_locked);
2384 
2385 struct mmu_notifier_range;
2386 
2387 void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
2388 		unsigned long end, unsigned long floor, unsigned long ceiling);
2389 int
2390 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
2391 int follow_pte(struct vm_area_struct *vma, unsigned long address,
2392 	       pte_t **ptepp, spinlock_t **ptlp);
2393 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
2394 			void *buf, int len, int write);
2395 
2396 extern void truncate_pagecache(struct inode *inode, loff_t new);
2397 extern void truncate_setsize(struct inode *inode, loff_t newsize);
2398 void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
2399 void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
2400 int generic_error_remove_folio(struct address_space *mapping,
2401 		struct folio *folio);
2402 
2403 struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
2404 		unsigned long address, struct pt_regs *regs);
2405 
2406 #ifdef CONFIG_MMU
2407 extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
2408 				  unsigned long address, unsigned int flags,
2409 				  struct pt_regs *regs);
2410 extern int fixup_user_fault(struct mm_struct *mm,
2411 			    unsigned long address, unsigned int fault_flags,
2412 			    bool *unlocked);
2413 void unmap_mapping_pages(struct address_space *mapping,
2414 		pgoff_t start, pgoff_t nr, bool even_cows);
2415 void unmap_mapping_range(struct address_space *mapping,
2416 		loff_t const holebegin, loff_t const holelen, int even_cows);
2417 #else
2418 static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
2419 					 unsigned long address, unsigned int flags,
2420 					 struct pt_regs *regs)
2421 {
2422 	/* should never happen if there's no MMU */
2423 	BUG();
2424 	return VM_FAULT_SIGBUS;
2425 }
2426 static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address,
2427 		unsigned int fault_flags, bool *unlocked)
2428 {
2429 	/* should never happen if there's no MMU */
2430 	BUG();
2431 	return -EFAULT;
2432 }
2433 static inline void unmap_mapping_pages(struct address_space *mapping,
2434 		pgoff_t start, pgoff_t nr, bool even_cows) { }
2435 static inline void unmap_mapping_range(struct address_space *mapping,
2436 		loff_t const holebegin, loff_t const holelen, int even_cows) { }
2437 #endif
2438 
2439 static inline void unmap_shared_mapping_range(struct address_space *mapping,
2440 		loff_t const holebegin, loff_t const holelen)
2441 {
2442 	unmap_mapping_range(mapping, holebegin, holelen, 0);
2443 }
2444 
2445 static inline struct vm_area_struct *vma_lookup(struct mm_struct *mm,
2446 						unsigned long addr);
2447 
2448 extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
2449 		void *buf, int len, unsigned int gup_flags);
2450 extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
2451 		void *buf, int len, unsigned int gup_flags);
2452 
2453 long get_user_pages_remote(struct mm_struct *mm,
2454 			   unsigned long start, unsigned long nr_pages,
2455 			   unsigned int gup_flags, struct page **pages,
2456 			   int *locked);
2457 long pin_user_pages_remote(struct mm_struct *mm,
2458 			   unsigned long start, unsigned long nr_pages,
2459 			   unsigned int gup_flags, struct page **pages,
2460 			   int *locked);
2461 
2462 /*
2463  * Retrieves a single page alongside its VMA. Does not support FOLL_NOWAIT.
2464  */
2465 static inline struct page *get_user_page_vma_remote(struct mm_struct *mm,
2466 						    unsigned long addr,
2467 						    int gup_flags,
2468 						    struct vm_area_struct **vmap)
2469 {
2470 	struct page *page;
2471 	struct vm_area_struct *vma;
2472 	int got;
2473 
2474 	if (WARN_ON_ONCE(unlikely(gup_flags & FOLL_NOWAIT)))
2475 		return ERR_PTR(-EINVAL);
2476 
2477 	got = get_user_pages_remote(mm, addr, 1, gup_flags, &page, NULL);
2478 
2479 	if (got < 0)
2480 		return ERR_PTR(got);
2481 
2482 	vma = vma_lookup(mm, addr);
2483 	if (WARN_ON_ONCE(!vma)) {
2484 		put_page(page);
2485 		return ERR_PTR(-EINVAL);
2486 	}
2487 
2488 	*vmap = vma;
2489 	return page;
2490 }
2491 
2492 long get_user_pages(unsigned long start, unsigned long nr_pages,
2493 		    unsigned int gup_flags, struct page **pages);
2494 long pin_user_pages(unsigned long start, unsigned long nr_pages,
2495 		    unsigned int gup_flags, struct page **pages);
2496 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
2497 		    struct page **pages, unsigned int gup_flags);
2498 long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
2499 		    struct page **pages, unsigned int gup_flags);
2500 
2501 int get_user_pages_fast(unsigned long start, int nr_pages,
2502 			unsigned int gup_flags, struct page **pages);
2503 int pin_user_pages_fast(unsigned long start, int nr_pages,
2504 			unsigned int gup_flags, struct page **pages);
2505 void folio_add_pin(struct folio *folio);
2506 
2507 int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc);
2508 int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
2509 			struct task_struct *task, bool bypass_rlim);
2510 
2511 struct kvec;
2512 struct page *get_dump_page(unsigned long addr);
2513 
2514 bool folio_mark_dirty(struct folio *folio);
2515 bool set_page_dirty(struct page *page);
2516 int set_page_dirty_lock(struct page *page);
2517 
2518 int get_cmdline(struct task_struct *task, char *buffer, int buflen);
2519 
2520 extern unsigned long move_page_tables(struct vm_area_struct *vma,
2521 		unsigned long old_addr, struct vm_area_struct *new_vma,
2522 		unsigned long new_addr, unsigned long len,
2523 		bool need_rmap_locks, bool for_stack);
2524 
2525 /*
2526  * Flags used by change_protection().  For now we make it a bitmap so
2527  * that we can pass in multiple flags just like parameters.  However
2528  * for now all the callers are only use one of the flags at the same
2529  * time.
2530  */
2531 /*
2532  * Whether we should manually check if we can map individual PTEs writable,
2533  * because something (e.g., COW, uffd-wp) blocks that from happening for all
2534  * PTEs automatically in a writable mapping.
2535  */
2536 #define  MM_CP_TRY_CHANGE_WRITABLE	   (1UL << 0)
2537 /* Whether this protection change is for NUMA hints */
2538 #define  MM_CP_PROT_NUMA                   (1UL << 1)
2539 /* Whether this change is for write protecting */
2540 #define  MM_CP_UFFD_WP                     (1UL << 2) /* do wp */
2541 #define  MM_CP_UFFD_WP_RESOLVE             (1UL << 3) /* Resolve wp */
2542 #define  MM_CP_UFFD_WP_ALL                 (MM_CP_UFFD_WP | \
2543 					    MM_CP_UFFD_WP_RESOLVE)
2544 
2545 bool vma_needs_dirty_tracking(struct vm_area_struct *vma);
2546 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
2547 static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma)
2548 {
2549 	/*
2550 	 * We want to check manually if we can change individual PTEs writable
2551 	 * if we can't do that automatically for all PTEs in a mapping. For
2552 	 * private mappings, that's always the case when we have write
2553 	 * permissions as we properly have to handle COW.
2554 	 */
2555 	if (vma->vm_flags & VM_SHARED)
2556 		return vma_wants_writenotify(vma, vma->vm_page_prot);
2557 	return !!(vma->vm_flags & VM_WRITE);
2558 
2559 }
2560 bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr,
2561 			     pte_t pte);
2562 extern long change_protection(struct mmu_gather *tlb,
2563 			      struct vm_area_struct *vma, unsigned long start,
2564 			      unsigned long end, unsigned long cp_flags);
2565 extern int mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb,
2566 	  struct vm_area_struct *vma, struct vm_area_struct **pprev,
2567 	  unsigned long start, unsigned long end, unsigned long newflags);
2568 
2569 /*
2570  * doesn't attempt to fault and will return short.
2571  */
2572 int get_user_pages_fast_only(unsigned long start, int nr_pages,
2573 			     unsigned int gup_flags, struct page **pages);
2574 
2575 static inline bool get_user_page_fast_only(unsigned long addr,
2576 			unsigned int gup_flags, struct page **pagep)
2577 {
2578 	return get_user_pages_fast_only(addr, 1, gup_flags, pagep) == 1;
2579 }
2580 /*
2581  * per-process(per-mm_struct) statistics.
2582  */
2583 static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
2584 {
2585 	return percpu_counter_read_positive(&mm->rss_stat[member]);
2586 }
2587 
2588 void mm_trace_rss_stat(struct mm_struct *mm, int member);
2589 
2590 static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
2591 {
2592 	percpu_counter_add(&mm->rss_stat[member], value);
2593 
2594 	mm_trace_rss_stat(mm, member);
2595 }
2596 
2597 static inline void inc_mm_counter(struct mm_struct *mm, int member)
2598 {
2599 	percpu_counter_inc(&mm->rss_stat[member]);
2600 
2601 	mm_trace_rss_stat(mm, member);
2602 }
2603 
2604 static inline void dec_mm_counter(struct mm_struct *mm, int member)
2605 {
2606 	percpu_counter_dec(&mm->rss_stat[member]);
2607 
2608 	mm_trace_rss_stat(mm, member);
2609 }
2610 
2611 /* Optimized variant when folio is already known not to be anon */
2612 static inline int mm_counter_file(struct folio *folio)
2613 {
2614 	if (folio_test_swapbacked(folio))
2615 		return MM_SHMEMPAGES;
2616 	return MM_FILEPAGES;
2617 }
2618 
2619 static inline int mm_counter(struct folio *folio)
2620 {
2621 	if (folio_test_anon(folio))
2622 		return MM_ANONPAGES;
2623 	return mm_counter_file(folio);
2624 }
2625 
2626 static inline unsigned long get_mm_rss(struct mm_struct *mm)
2627 {
2628 	return get_mm_counter(mm, MM_FILEPAGES) +
2629 		get_mm_counter(mm, MM_ANONPAGES) +
2630 		get_mm_counter(mm, MM_SHMEMPAGES);
2631 }
2632 
2633 static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
2634 {
2635 	return max(mm->hiwater_rss, get_mm_rss(mm));
2636 }
2637 
2638 static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
2639 {
2640 	return max(mm->hiwater_vm, mm->total_vm);
2641 }
2642 
2643 static inline void update_hiwater_rss(struct mm_struct *mm)
2644 {
2645 	unsigned long _rss = get_mm_rss(mm);
2646 
2647 	if ((mm)->hiwater_rss < _rss)
2648 		(mm)->hiwater_rss = _rss;
2649 }
2650 
2651 static inline void update_hiwater_vm(struct mm_struct *mm)
2652 {
2653 	if (mm->hiwater_vm < mm->total_vm)
2654 		mm->hiwater_vm = mm->total_vm;
2655 }
2656 
2657 static inline void reset_mm_hiwater_rss(struct mm_struct *mm)
2658 {
2659 	mm->hiwater_rss = get_mm_rss(mm);
2660 }
2661 
2662 static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
2663 					 struct mm_struct *mm)
2664 {
2665 	unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
2666 
2667 	if (*maxrss < hiwater_rss)
2668 		*maxrss = hiwater_rss;
2669 }
2670 
2671 #ifndef CONFIG_ARCH_HAS_PTE_SPECIAL
2672 static inline int pte_special(pte_t pte)
2673 {
2674 	return 0;
2675 }
2676 
2677 static inline pte_t pte_mkspecial(pte_t pte)
2678 {
2679 	return pte;
2680 }
2681 #endif
2682 
2683 #ifndef CONFIG_ARCH_HAS_PTE_DEVMAP
2684 static inline int pte_devmap(pte_t pte)
2685 {
2686 	return 0;
2687 }
2688 #endif
2689 
2690 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
2691 			       spinlock_t **ptl);
2692 static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
2693 				    spinlock_t **ptl)
2694 {
2695 	pte_t *ptep;
2696 	__cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
2697 	return ptep;
2698 }
2699 
2700 #ifdef __PAGETABLE_P4D_FOLDED
2701 static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
2702 						unsigned long address)
2703 {
2704 	return 0;
2705 }
2706 #else
2707 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
2708 #endif
2709 
2710 #if defined(__PAGETABLE_PUD_FOLDED) || !defined(CONFIG_MMU)
2711 static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d,
2712 						unsigned long address)
2713 {
2714 	return 0;
2715 }
2716 static inline void mm_inc_nr_puds(struct mm_struct *mm) {}
2717 static inline void mm_dec_nr_puds(struct mm_struct *mm) {}
2718 
2719 #else
2720 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
2721 
2722 static inline void mm_inc_nr_puds(struct mm_struct *mm)
2723 {
2724 	if (mm_pud_folded(mm))
2725 		return;
2726 	atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
2727 }
2728 
2729 static inline void mm_dec_nr_puds(struct mm_struct *mm)
2730 {
2731 	if (mm_pud_folded(mm))
2732 		return;
2733 	atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
2734 }
2735 #endif
2736 
2737 #if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
2738 static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
2739 						unsigned long address)
2740 {
2741 	return 0;
2742 }
2743 
2744 static inline void mm_inc_nr_pmds(struct mm_struct *mm) {}
2745 static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
2746 
2747 #else
2748 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
2749 
2750 static inline void mm_inc_nr_pmds(struct mm_struct *mm)
2751 {
2752 	if (mm_pmd_folded(mm))
2753 		return;
2754 	atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
2755 }
2756 
2757 static inline void mm_dec_nr_pmds(struct mm_struct *mm)
2758 {
2759 	if (mm_pmd_folded(mm))
2760 		return;
2761 	atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
2762 }
2763 #endif
2764 
2765 #ifdef CONFIG_MMU
2766 static inline void mm_pgtables_bytes_init(struct mm_struct *mm)
2767 {
2768 	atomic_long_set(&mm->pgtables_bytes, 0);
2769 }
2770 
2771 static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
2772 {
2773 	return atomic_long_read(&mm->pgtables_bytes);
2774 }
2775 
2776 static inline void mm_inc_nr_ptes(struct mm_struct *mm)
2777 {
2778 	atomic_long_add(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
2779 }
2780 
2781 static inline void mm_dec_nr_ptes(struct mm_struct *mm)
2782 {
2783 	atomic_long_sub(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
2784 }
2785 #else
2786 
2787 static inline void mm_pgtables_bytes_init(struct mm_struct *mm) {}
2788 static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
2789 {
2790 	return 0;
2791 }
2792 
2793 static inline void mm_inc_nr_ptes(struct mm_struct *mm) {}
2794 static inline void mm_dec_nr_ptes(struct mm_struct *mm) {}
2795 #endif
2796 
2797 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd);
2798 int __pte_alloc_kernel(pmd_t *pmd);
2799 
2800 #if defined(CONFIG_MMU)
2801 
2802 static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
2803 		unsigned long address)
2804 {
2805 	return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ?
2806 		NULL : p4d_offset(pgd, address);
2807 }
2808 
2809 static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d,
2810 		unsigned long address)
2811 {
2812 	return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ?
2813 		NULL : pud_offset(p4d, address);
2814 }
2815 
2816 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
2817 {
2818 	return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
2819 		NULL: pmd_offset(pud, address);
2820 }
2821 #endif /* CONFIG_MMU */
2822 
2823 static inline struct ptdesc *virt_to_ptdesc(const void *x)
2824 {
2825 	return page_ptdesc(virt_to_page(x));
2826 }
2827 
2828 static inline void *ptdesc_to_virt(const struct ptdesc *pt)
2829 {
2830 	return page_to_virt(ptdesc_page(pt));
2831 }
2832 
2833 static inline void *ptdesc_address(const struct ptdesc *pt)
2834 {
2835 	return folio_address(ptdesc_folio(pt));
2836 }
2837 
2838 static inline bool pagetable_is_reserved(struct ptdesc *pt)
2839 {
2840 	return folio_test_reserved(ptdesc_folio(pt));
2841 }
2842 
2843 /**
2844  * pagetable_alloc - Allocate pagetables
2845  * @gfp:    GFP flags
2846  * @order:  desired pagetable order
2847  *
2848  * pagetable_alloc allocates memory for page tables as well as a page table
2849  * descriptor to describe that memory.
2850  *
2851  * Return: The ptdesc describing the allocated page tables.
2852  */
2853 static inline struct ptdesc *pagetable_alloc_noprof(gfp_t gfp, unsigned int order)
2854 {
2855 	struct page *page = alloc_pages_noprof(gfp | __GFP_COMP, order);
2856 
2857 	return page_ptdesc(page);
2858 }
2859 #define pagetable_alloc(...)	alloc_hooks(pagetable_alloc_noprof(__VA_ARGS__))
2860 
2861 /**
2862  * pagetable_free - Free pagetables
2863  * @pt:	The page table descriptor
2864  *
2865  * pagetable_free frees the memory of all page tables described by a page
2866  * table descriptor and the memory for the descriptor itself.
2867  */
2868 static inline void pagetable_free(struct ptdesc *pt)
2869 {
2870 	struct page *page = ptdesc_page(pt);
2871 
2872 	__free_pages(page, compound_order(page));
2873 }
2874 
2875 #if USE_SPLIT_PTE_PTLOCKS
2876 #if ALLOC_SPLIT_PTLOCKS
2877 void __init ptlock_cache_init(void);
2878 bool ptlock_alloc(struct ptdesc *ptdesc);
2879 void ptlock_free(struct ptdesc *ptdesc);
2880 
2881 static inline spinlock_t *ptlock_ptr(struct ptdesc *ptdesc)
2882 {
2883 	return ptdesc->ptl;
2884 }
2885 #else /* ALLOC_SPLIT_PTLOCKS */
2886 static inline void ptlock_cache_init(void)
2887 {
2888 }
2889 
2890 static inline bool ptlock_alloc(struct ptdesc *ptdesc)
2891 {
2892 	return true;
2893 }
2894 
2895 static inline void ptlock_free(struct ptdesc *ptdesc)
2896 {
2897 }
2898 
2899 static inline spinlock_t *ptlock_ptr(struct ptdesc *ptdesc)
2900 {
2901 	return &ptdesc->ptl;
2902 }
2903 #endif /* ALLOC_SPLIT_PTLOCKS */
2904 
2905 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
2906 {
2907 	return ptlock_ptr(page_ptdesc(pmd_page(*pmd)));
2908 }
2909 
2910 static inline bool ptlock_init(struct ptdesc *ptdesc)
2911 {
2912 	/*
2913 	 * prep_new_page() initialize page->private (and therefore page->ptl)
2914 	 * with 0. Make sure nobody took it in use in between.
2915 	 *
2916 	 * It can happen if arch try to use slab for page table allocation:
2917 	 * slab code uses page->slab_cache, which share storage with page->ptl.
2918 	 */
2919 	VM_BUG_ON_PAGE(*(unsigned long *)&ptdesc->ptl, ptdesc_page(ptdesc));
2920 	if (!ptlock_alloc(ptdesc))
2921 		return false;
2922 	spin_lock_init(ptlock_ptr(ptdesc));
2923 	return true;
2924 }
2925 
2926 #else	/* !USE_SPLIT_PTE_PTLOCKS */
2927 /*
2928  * We use mm->page_table_lock to guard all pagetable pages of the mm.
2929  */
2930 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
2931 {
2932 	return &mm->page_table_lock;
2933 }
2934 static inline void ptlock_cache_init(void) {}
2935 static inline bool ptlock_init(struct ptdesc *ptdesc) { return true; }
2936 static inline void ptlock_free(struct ptdesc *ptdesc) {}
2937 #endif /* USE_SPLIT_PTE_PTLOCKS */
2938 
2939 static inline bool pagetable_pte_ctor(struct ptdesc *ptdesc)
2940 {
2941 	struct folio *folio = ptdesc_folio(ptdesc);
2942 
2943 	if (!ptlock_init(ptdesc))
2944 		return false;
2945 	__folio_set_pgtable(folio);
2946 	lruvec_stat_add_folio(folio, NR_PAGETABLE);
2947 	return true;
2948 }
2949 
2950 static inline void pagetable_pte_dtor(struct ptdesc *ptdesc)
2951 {
2952 	struct folio *folio = ptdesc_folio(ptdesc);
2953 
2954 	ptlock_free(ptdesc);
2955 	__folio_clear_pgtable(folio);
2956 	lruvec_stat_sub_folio(folio, NR_PAGETABLE);
2957 }
2958 
2959 pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp);
2960 static inline pte_t *pte_offset_map(pmd_t *pmd, unsigned long addr)
2961 {
2962 	return __pte_offset_map(pmd, addr, NULL);
2963 }
2964 
2965 pte_t *__pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
2966 			unsigned long addr, spinlock_t **ptlp);
2967 static inline pte_t *pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
2968 			unsigned long addr, spinlock_t **ptlp)
2969 {
2970 	pte_t *pte;
2971 
2972 	__cond_lock(*ptlp, pte = __pte_offset_map_lock(mm, pmd, addr, ptlp));
2973 	return pte;
2974 }
2975 
2976 pte_t *pte_offset_map_nolock(struct mm_struct *mm, pmd_t *pmd,
2977 			unsigned long addr, spinlock_t **ptlp);
2978 
2979 #define pte_unmap_unlock(pte, ptl)	do {		\
2980 	spin_unlock(ptl);				\
2981 	pte_unmap(pte);					\
2982 } while (0)
2983 
2984 #define pte_alloc(mm, pmd) (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd))
2985 
2986 #define pte_alloc_map(mm, pmd, address)			\
2987 	(pte_alloc(mm, pmd) ? NULL : pte_offset_map(pmd, address))
2988 
2989 #define pte_alloc_map_lock(mm, pmd, address, ptlp)	\
2990 	(pte_alloc(mm, pmd) ?			\
2991 		 NULL : pte_offset_map_lock(mm, pmd, address, ptlp))
2992 
2993 #define pte_alloc_kernel(pmd, address)			\
2994 	((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \
2995 		NULL: pte_offset_kernel(pmd, address))
2996 
2997 #if USE_SPLIT_PMD_PTLOCKS
2998 
2999 static inline struct page *pmd_pgtable_page(pmd_t *pmd)
3000 {
3001 	unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
3002 	return virt_to_page((void *)((unsigned long) pmd & mask));
3003 }
3004 
3005 static inline struct ptdesc *pmd_ptdesc(pmd_t *pmd)
3006 {
3007 	return page_ptdesc(pmd_pgtable_page(pmd));
3008 }
3009 
3010 static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
3011 {
3012 	return ptlock_ptr(pmd_ptdesc(pmd));
3013 }
3014 
3015 static inline bool pmd_ptlock_init(struct ptdesc *ptdesc)
3016 {
3017 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3018 	ptdesc->pmd_huge_pte = NULL;
3019 #endif
3020 	return ptlock_init(ptdesc);
3021 }
3022 
3023 static inline void pmd_ptlock_free(struct ptdesc *ptdesc)
3024 {
3025 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3026 	VM_BUG_ON_PAGE(ptdesc->pmd_huge_pte, ptdesc_page(ptdesc));
3027 #endif
3028 	ptlock_free(ptdesc);
3029 }
3030 
3031 #define pmd_huge_pte(mm, pmd) (pmd_ptdesc(pmd)->pmd_huge_pte)
3032 
3033 #else
3034 
3035 static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
3036 {
3037 	return &mm->page_table_lock;
3038 }
3039 
3040 static inline bool pmd_ptlock_init(struct ptdesc *ptdesc) { return true; }
3041 static inline void pmd_ptlock_free(struct ptdesc *ptdesc) {}
3042 
3043 #define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
3044 
3045 #endif
3046 
3047 static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
3048 {
3049 	spinlock_t *ptl = pmd_lockptr(mm, pmd);
3050 	spin_lock(ptl);
3051 	return ptl;
3052 }
3053 
3054 static inline bool pagetable_pmd_ctor(struct ptdesc *ptdesc)
3055 {
3056 	struct folio *folio = ptdesc_folio(ptdesc);
3057 
3058 	if (!pmd_ptlock_init(ptdesc))
3059 		return false;
3060 	__folio_set_pgtable(folio);
3061 	lruvec_stat_add_folio(folio, NR_PAGETABLE);
3062 	return true;
3063 }
3064 
3065 static inline void pagetable_pmd_dtor(struct ptdesc *ptdesc)
3066 {
3067 	struct folio *folio = ptdesc_folio(ptdesc);
3068 
3069 	pmd_ptlock_free(ptdesc);
3070 	__folio_clear_pgtable(folio);
3071 	lruvec_stat_sub_folio(folio, NR_PAGETABLE);
3072 }
3073 
3074 /*
3075  * No scalability reason to split PUD locks yet, but follow the same pattern
3076  * as the PMD locks to make it easier if we decide to.  The VM should not be
3077  * considered ready to switch to split PUD locks yet; there may be places
3078  * which need to be converted from page_table_lock.
3079  */
3080 static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud)
3081 {
3082 	return &mm->page_table_lock;
3083 }
3084 
3085 static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
3086 {
3087 	spinlock_t *ptl = pud_lockptr(mm, pud);
3088 
3089 	spin_lock(ptl);
3090 	return ptl;
3091 }
3092 
3093 static inline void pagetable_pud_ctor(struct ptdesc *ptdesc)
3094 {
3095 	struct folio *folio = ptdesc_folio(ptdesc);
3096 
3097 	__folio_set_pgtable(folio);
3098 	lruvec_stat_add_folio(folio, NR_PAGETABLE);
3099 }
3100 
3101 static inline void pagetable_pud_dtor(struct ptdesc *ptdesc)
3102 {
3103 	struct folio *folio = ptdesc_folio(ptdesc);
3104 
3105 	__folio_clear_pgtable(folio);
3106 	lruvec_stat_sub_folio(folio, NR_PAGETABLE);
3107 }
3108 
3109 extern void __init pagecache_init(void);
3110 extern void free_initmem(void);
3111 
3112 /*
3113  * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK)
3114  * into the buddy system. The freed pages will be poisoned with pattern
3115  * "poison" if it's within range [0, UCHAR_MAX].
3116  * Return pages freed into the buddy system.
3117  */
3118 extern unsigned long free_reserved_area(void *start, void *end,
3119 					int poison, const char *s);
3120 
3121 extern void adjust_managed_page_count(struct page *page, long count);
3122 
3123 extern void reserve_bootmem_region(phys_addr_t start,
3124 				   phys_addr_t end, int nid);
3125 
3126 /* Free the reserved page into the buddy system, so it gets managed. */
3127 static inline void free_reserved_page(struct page *page)
3128 {
3129 	if (mem_alloc_profiling_enabled()) {
3130 		union codetag_ref *ref = get_page_tag_ref(page);
3131 
3132 		if (ref) {
3133 			set_codetag_empty(ref);
3134 			put_page_tag_ref(ref);
3135 		}
3136 	}
3137 	ClearPageReserved(page);
3138 	init_page_count(page);
3139 	__free_page(page);
3140 	adjust_managed_page_count(page, 1);
3141 }
3142 #define free_highmem_page(page) free_reserved_page(page)
3143 
3144 static inline void mark_page_reserved(struct page *page)
3145 {
3146 	SetPageReserved(page);
3147 	adjust_managed_page_count(page, -1);
3148 }
3149 
3150 static inline void free_reserved_ptdesc(struct ptdesc *pt)
3151 {
3152 	free_reserved_page(ptdesc_page(pt));
3153 }
3154 
3155 /*
3156  * Default method to free all the __init memory into the buddy system.
3157  * The freed pages will be poisoned with pattern "poison" if it's within
3158  * range [0, UCHAR_MAX].
3159  * Return pages freed into the buddy system.
3160  */
3161 static inline unsigned long free_initmem_default(int poison)
3162 {
3163 	extern char __init_begin[], __init_end[];
3164 
3165 	return free_reserved_area(&__init_begin, &__init_end,
3166 				  poison, "unused kernel image (initmem)");
3167 }
3168 
3169 static inline unsigned long get_num_physpages(void)
3170 {
3171 	int nid;
3172 	unsigned long phys_pages = 0;
3173 
3174 	for_each_online_node(nid)
3175 		phys_pages += node_present_pages(nid);
3176 
3177 	return phys_pages;
3178 }
3179 
3180 /*
3181  * Using memblock node mappings, an architecture may initialise its
3182  * zones, allocate the backing mem_map and account for memory holes in an
3183  * architecture independent manner.
3184  *
3185  * An architecture is expected to register range of page frames backed by
3186  * physical memory with memblock_add[_node]() before calling
3187  * free_area_init() passing in the PFN each zone ends at. At a basic
3188  * usage, an architecture is expected to do something like
3189  *
3190  * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
3191  * 							 max_highmem_pfn};
3192  * for_each_valid_physical_page_range()
3193  *	memblock_add_node(base, size, nid, MEMBLOCK_NONE)
3194  * free_area_init(max_zone_pfns);
3195  */
3196 void free_area_init(unsigned long *max_zone_pfn);
3197 unsigned long node_map_pfn_alignment(void);
3198 extern unsigned long absent_pages_in_range(unsigned long start_pfn,
3199 						unsigned long end_pfn);
3200 extern void get_pfn_range_for_nid(unsigned int nid,
3201 			unsigned long *start_pfn, unsigned long *end_pfn);
3202 
3203 #ifndef CONFIG_NUMA
3204 static inline int early_pfn_to_nid(unsigned long pfn)
3205 {
3206 	return 0;
3207 }
3208 #else
3209 /* please see mm/page_alloc.c */
3210 extern int __meminit early_pfn_to_nid(unsigned long pfn);
3211 #endif
3212 
3213 extern void mem_init(void);
3214 extern void __init mmap_init(void);
3215 
3216 extern void __show_mem(unsigned int flags, nodemask_t *nodemask, int max_zone_idx);
3217 static inline void show_mem(void)
3218 {
3219 	__show_mem(0, NULL, MAX_NR_ZONES - 1);
3220 }
3221 extern long si_mem_available(void);
3222 extern void si_meminfo(struct sysinfo * val);
3223 extern void si_meminfo_node(struct sysinfo *val, int nid);
3224 
3225 extern __printf(3, 4)
3226 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...);
3227 
3228 extern void setup_per_cpu_pageset(void);
3229 
3230 /* nommu.c */
3231 extern atomic_long_t mmap_pages_allocated;
3232 extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
3233 
3234 /* interval_tree.c */
3235 void vma_interval_tree_insert(struct vm_area_struct *node,
3236 			      struct rb_root_cached *root);
3237 void vma_interval_tree_insert_after(struct vm_area_struct *node,
3238 				    struct vm_area_struct *prev,
3239 				    struct rb_root_cached *root);
3240 void vma_interval_tree_remove(struct vm_area_struct *node,
3241 			      struct rb_root_cached *root);
3242 struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root,
3243 				unsigned long start, unsigned long last);
3244 struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
3245 				unsigned long start, unsigned long last);
3246 
3247 #define vma_interval_tree_foreach(vma, root, start, last)		\
3248 	for (vma = vma_interval_tree_iter_first(root, start, last);	\
3249 	     vma; vma = vma_interval_tree_iter_next(vma, start, last))
3250 
3251 void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
3252 				   struct rb_root_cached *root);
3253 void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
3254 				   struct rb_root_cached *root);
3255 struct anon_vma_chain *
3256 anon_vma_interval_tree_iter_first(struct rb_root_cached *root,
3257 				  unsigned long start, unsigned long last);
3258 struct anon_vma_chain *anon_vma_interval_tree_iter_next(
3259 	struct anon_vma_chain *node, unsigned long start, unsigned long last);
3260 #ifdef CONFIG_DEBUG_VM_RB
3261 void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
3262 #endif
3263 
3264 #define anon_vma_interval_tree_foreach(avc, root, start, last)		 \
3265 	for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
3266 	     avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
3267 
3268 /* mmap.c */
3269 extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
3270 extern int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
3271 		      unsigned long start, unsigned long end, pgoff_t pgoff,
3272 		      struct vm_area_struct *next);
3273 extern int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
3274 		       unsigned long start, unsigned long end, pgoff_t pgoff);
3275 extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
3276 extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
3277 extern void unlink_file_vma(struct vm_area_struct *);
3278 extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
3279 	unsigned long addr, unsigned long len, pgoff_t pgoff,
3280 	bool *need_rmap_locks);
3281 extern void exit_mmap(struct mm_struct *);
3282 struct vm_area_struct *vma_modify(struct vma_iterator *vmi,
3283 				  struct vm_area_struct *prev,
3284 				  struct vm_area_struct *vma,
3285 				  unsigned long start, unsigned long end,
3286 				  unsigned long vm_flags,
3287 				  struct mempolicy *policy,
3288 				  struct vm_userfaultfd_ctx uffd_ctx,
3289 				  struct anon_vma_name *anon_name);
3290 
3291 /* We are about to modify the VMA's flags. */
3292 static inline struct vm_area_struct
3293 *vma_modify_flags(struct vma_iterator *vmi,
3294 		  struct vm_area_struct *prev,
3295 		  struct vm_area_struct *vma,
3296 		  unsigned long start, unsigned long end,
3297 		  unsigned long new_flags)
3298 {
3299 	return vma_modify(vmi, prev, vma, start, end, new_flags,
3300 			  vma_policy(vma), vma->vm_userfaultfd_ctx,
3301 			  anon_vma_name(vma));
3302 }
3303 
3304 /* We are about to modify the VMA's flags and/or anon_name. */
3305 static inline struct vm_area_struct
3306 *vma_modify_flags_name(struct vma_iterator *vmi,
3307 		       struct vm_area_struct *prev,
3308 		       struct vm_area_struct *vma,
3309 		       unsigned long start,
3310 		       unsigned long end,
3311 		       unsigned long new_flags,
3312 		       struct anon_vma_name *new_name)
3313 {
3314 	return vma_modify(vmi, prev, vma, start, end, new_flags,
3315 			  vma_policy(vma), vma->vm_userfaultfd_ctx, new_name);
3316 }
3317 
3318 /* We are about to modify the VMA's memory policy. */
3319 static inline struct vm_area_struct
3320 *vma_modify_policy(struct vma_iterator *vmi,
3321 		   struct vm_area_struct *prev,
3322 		   struct vm_area_struct *vma,
3323 		   unsigned long start, unsigned long end,
3324 		   struct mempolicy *new_pol)
3325 {
3326 	return vma_modify(vmi, prev, vma, start, end, vma->vm_flags,
3327 			  new_pol, vma->vm_userfaultfd_ctx, anon_vma_name(vma));
3328 }
3329 
3330 /* We are about to modify the VMA's flags and/or uffd context. */
3331 static inline struct vm_area_struct
3332 *vma_modify_flags_uffd(struct vma_iterator *vmi,
3333 		       struct vm_area_struct *prev,
3334 		       struct vm_area_struct *vma,
3335 		       unsigned long start, unsigned long end,
3336 		       unsigned long new_flags,
3337 		       struct vm_userfaultfd_ctx new_ctx)
3338 {
3339 	return vma_modify(vmi, prev, vma, start, end, new_flags,
3340 			  vma_policy(vma), new_ctx, anon_vma_name(vma));
3341 }
3342 
3343 static inline int check_data_rlimit(unsigned long rlim,
3344 				    unsigned long new,
3345 				    unsigned long start,
3346 				    unsigned long end_data,
3347 				    unsigned long start_data)
3348 {
3349 	if (rlim < RLIM_INFINITY) {
3350 		if (((new - start) + (end_data - start_data)) > rlim)
3351 			return -ENOSPC;
3352 	}
3353 
3354 	return 0;
3355 }
3356 
3357 extern int mm_take_all_locks(struct mm_struct *mm);
3358 extern void mm_drop_all_locks(struct mm_struct *mm);
3359 
3360 extern int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
3361 extern int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
3362 extern struct file *get_mm_exe_file(struct mm_struct *mm);
3363 extern struct file *get_task_exe_file(struct task_struct *task);
3364 
3365 extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages);
3366 extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
3367 
3368 extern bool vma_is_special_mapping(const struct vm_area_struct *vma,
3369 				   const struct vm_special_mapping *sm);
3370 extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
3371 				   unsigned long addr, unsigned long len,
3372 				   unsigned long flags,
3373 				   const struct vm_special_mapping *spec);
3374 /* This is an obsolete alternative to _install_special_mapping. */
3375 extern int install_special_mapping(struct mm_struct *mm,
3376 				   unsigned long addr, unsigned long len,
3377 				   unsigned long flags, struct page **pages);
3378 
3379 unsigned long randomize_stack_top(unsigned long stack_top);
3380 unsigned long randomize_page(unsigned long start, unsigned long range);
3381 
3382 unsigned long
3383 __get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
3384 		    unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags);
3385 
3386 static inline unsigned long
3387 get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
3388 		  unsigned long pgoff, unsigned long flags)
3389 {
3390 	return __get_unmapped_area(file, addr, len, pgoff, flags, 0);
3391 }
3392 
3393 extern unsigned long mmap_region(struct file *file, unsigned long addr,
3394 	unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
3395 	struct list_head *uf);
3396 extern unsigned long do_mmap(struct file *file, unsigned long addr,
3397 	unsigned long len, unsigned long prot, unsigned long flags,
3398 	vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate,
3399 	struct list_head *uf);
3400 extern int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
3401 			 unsigned long start, size_t len, struct list_head *uf,
3402 			 bool unlock);
3403 extern int do_munmap(struct mm_struct *, unsigned long, size_t,
3404 		     struct list_head *uf);
3405 extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior);
3406 
3407 #ifdef CONFIG_MMU
3408 extern int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
3409 			 unsigned long start, unsigned long end,
3410 			 struct list_head *uf, bool unlock);
3411 extern int __mm_populate(unsigned long addr, unsigned long len,
3412 			 int ignore_errors);
3413 static inline void mm_populate(unsigned long addr, unsigned long len)
3414 {
3415 	/* Ignore errors */
3416 	(void) __mm_populate(addr, len, 1);
3417 }
3418 #else
3419 static inline void mm_populate(unsigned long addr, unsigned long len) {}
3420 #endif
3421 
3422 /* This takes the mm semaphore itself */
3423 extern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long);
3424 extern int vm_munmap(unsigned long, size_t);
3425 extern unsigned long __must_check vm_mmap(struct file *, unsigned long,
3426         unsigned long, unsigned long,
3427         unsigned long, unsigned long);
3428 
3429 struct vm_unmapped_area_info {
3430 #define VM_UNMAPPED_AREA_TOPDOWN 1
3431 	unsigned long flags;
3432 	unsigned long length;
3433 	unsigned long low_limit;
3434 	unsigned long high_limit;
3435 	unsigned long align_mask;
3436 	unsigned long align_offset;
3437 	unsigned long start_gap;
3438 };
3439 
3440 extern unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info);
3441 
3442 /* truncate.c */
3443 extern void truncate_inode_pages(struct address_space *, loff_t);
3444 extern void truncate_inode_pages_range(struct address_space *,
3445 				       loff_t lstart, loff_t lend);
3446 extern void truncate_inode_pages_final(struct address_space *);
3447 
3448 /* generic vm_area_ops exported for stackable file systems */
3449 extern vm_fault_t filemap_fault(struct vm_fault *vmf);
3450 extern vm_fault_t filemap_map_pages(struct vm_fault *vmf,
3451 		pgoff_t start_pgoff, pgoff_t end_pgoff);
3452 extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
3453 
3454 extern unsigned long stack_guard_gap;
3455 /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
3456 int expand_stack_locked(struct vm_area_struct *vma, unsigned long address);
3457 struct vm_area_struct *expand_stack(struct mm_struct * mm, unsigned long addr);
3458 
3459 /* CONFIG_STACK_GROWSUP still needs to grow downwards at some places */
3460 int expand_downwards(struct vm_area_struct *vma, unsigned long address);
3461 
3462 /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
3463 extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
3464 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
3465 					     struct vm_area_struct **pprev);
3466 
3467 /*
3468  * Look up the first VMA which intersects the interval [start_addr, end_addr)
3469  * NULL if none.  Assume start_addr < end_addr.
3470  */
3471 struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
3472 			unsigned long start_addr, unsigned long end_addr);
3473 
3474 /**
3475  * vma_lookup() - Find a VMA at a specific address
3476  * @mm: The process address space.
3477  * @addr: The user address.
3478  *
3479  * Return: The vm_area_struct at the given address, %NULL otherwise.
3480  */
3481 static inline
3482 struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
3483 {
3484 	return mtree_load(&mm->mm_mt, addr);
3485 }
3486 
3487 static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma)
3488 {
3489 	if (vma->vm_flags & VM_GROWSDOWN)
3490 		return stack_guard_gap;
3491 
3492 	/* See reasoning around the VM_SHADOW_STACK definition */
3493 	if (vma->vm_flags & VM_SHADOW_STACK)
3494 		return PAGE_SIZE;
3495 
3496 	return 0;
3497 }
3498 
3499 static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
3500 {
3501 	unsigned long gap = stack_guard_start_gap(vma);
3502 	unsigned long vm_start = vma->vm_start;
3503 
3504 	vm_start -= gap;
3505 	if (vm_start > vma->vm_start)
3506 		vm_start = 0;
3507 	return vm_start;
3508 }
3509 
3510 static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
3511 {
3512 	unsigned long vm_end = vma->vm_end;
3513 
3514 	if (vma->vm_flags & VM_GROWSUP) {
3515 		vm_end += stack_guard_gap;
3516 		if (vm_end < vma->vm_end)
3517 			vm_end = -PAGE_SIZE;
3518 	}
3519 	return vm_end;
3520 }
3521 
3522 static inline unsigned long vma_pages(struct vm_area_struct *vma)
3523 {
3524 	return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
3525 }
3526 
3527 /* Look up the first VMA which exactly match the interval vm_start ... vm_end */
3528 static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
3529 				unsigned long vm_start, unsigned long vm_end)
3530 {
3531 	struct vm_area_struct *vma = vma_lookup(mm, vm_start);
3532 
3533 	if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
3534 		vma = NULL;
3535 
3536 	return vma;
3537 }
3538 
3539 static inline bool range_in_vma(struct vm_area_struct *vma,
3540 				unsigned long start, unsigned long end)
3541 {
3542 	return (vma && vma->vm_start <= start && end <= vma->vm_end);
3543 }
3544 
3545 #ifdef CONFIG_MMU
3546 pgprot_t vm_get_page_prot(unsigned long vm_flags);
3547 void vma_set_page_prot(struct vm_area_struct *vma);
3548 #else
3549 static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
3550 {
3551 	return __pgprot(0);
3552 }
3553 static inline void vma_set_page_prot(struct vm_area_struct *vma)
3554 {
3555 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
3556 }
3557 #endif
3558 
3559 void vma_set_file(struct vm_area_struct *vma, struct file *file);
3560 
3561 #ifdef CONFIG_NUMA_BALANCING
3562 unsigned long change_prot_numa(struct vm_area_struct *vma,
3563 			unsigned long start, unsigned long end);
3564 #endif
3565 
3566 struct vm_area_struct *find_extend_vma_locked(struct mm_struct *,
3567 		unsigned long addr);
3568 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
3569 			unsigned long pfn, unsigned long size, pgprot_t);
3570 int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
3571 		unsigned long pfn, unsigned long size, pgprot_t prot);
3572 int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
3573 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
3574 			struct page **pages, unsigned long *num);
3575 int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
3576 				unsigned long num);
3577 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
3578 				unsigned long num);
3579 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
3580 			unsigned long pfn);
3581 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
3582 			unsigned long pfn, pgprot_t pgprot);
3583 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
3584 			pfn_t pfn);
3585 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
3586 		unsigned long addr, pfn_t pfn);
3587 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
3588 
3589 static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
3590 				unsigned long addr, struct page *page)
3591 {
3592 	int err = vm_insert_page(vma, addr, page);
3593 
3594 	if (err == -ENOMEM)
3595 		return VM_FAULT_OOM;
3596 	if (err < 0 && err != -EBUSY)
3597 		return VM_FAULT_SIGBUS;
3598 
3599 	return VM_FAULT_NOPAGE;
3600 }
3601 
3602 #ifndef io_remap_pfn_range
3603 static inline int io_remap_pfn_range(struct vm_area_struct *vma,
3604 				     unsigned long addr, unsigned long pfn,
3605 				     unsigned long size, pgprot_t prot)
3606 {
3607 	return remap_pfn_range(vma, addr, pfn, size, pgprot_decrypted(prot));
3608 }
3609 #endif
3610 
3611 static inline vm_fault_t vmf_error(int err)
3612 {
3613 	if (err == -ENOMEM)
3614 		return VM_FAULT_OOM;
3615 	else if (err == -EHWPOISON)
3616 		return VM_FAULT_HWPOISON;
3617 	return VM_FAULT_SIGBUS;
3618 }
3619 
3620 /*
3621  * Convert errno to return value for ->page_mkwrite() calls.
3622  *
3623  * This should eventually be merged with vmf_error() above, but will need a
3624  * careful audit of all vmf_error() callers.
3625  */
3626 static inline vm_fault_t vmf_fs_error(int err)
3627 {
3628 	if (err == 0)
3629 		return VM_FAULT_LOCKED;
3630 	if (err == -EFAULT || err == -EAGAIN)
3631 		return VM_FAULT_NOPAGE;
3632 	if (err == -ENOMEM)
3633 		return VM_FAULT_OOM;
3634 	/* -ENOSPC, -EDQUOT, -EIO ... */
3635 	return VM_FAULT_SIGBUS;
3636 }
3637 
3638 struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
3639 			 unsigned int foll_flags);
3640 
3641 static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
3642 {
3643 	if (vm_fault & VM_FAULT_OOM)
3644 		return -ENOMEM;
3645 	if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
3646 		return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT;
3647 	if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
3648 		return -EFAULT;
3649 	return 0;
3650 }
3651 
3652 /*
3653  * Indicates whether GUP can follow a PROT_NONE mapped page, or whether
3654  * a (NUMA hinting) fault is required.
3655  */
3656 static inline bool gup_can_follow_protnone(struct vm_area_struct *vma,
3657 					   unsigned int flags)
3658 {
3659 	/*
3660 	 * If callers don't want to honor NUMA hinting faults, no need to
3661 	 * determine if we would actually have to trigger a NUMA hinting fault.
3662 	 */
3663 	if (!(flags & FOLL_HONOR_NUMA_FAULT))
3664 		return true;
3665 
3666 	/*
3667 	 * NUMA hinting faults don't apply in inaccessible (PROT_NONE) VMAs.
3668 	 *
3669 	 * Requiring a fault here even for inaccessible VMAs would mean that
3670 	 * FOLL_FORCE cannot make any progress, because handle_mm_fault()
3671 	 * refuses to process NUMA hinting faults in inaccessible VMAs.
3672 	 */
3673 	return !vma_is_accessible(vma);
3674 }
3675 
3676 typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data);
3677 extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
3678 			       unsigned long size, pte_fn_t fn, void *data);
3679 extern int apply_to_existing_page_range(struct mm_struct *mm,
3680 				   unsigned long address, unsigned long size,
3681 				   pte_fn_t fn, void *data);
3682 
3683 #ifdef CONFIG_PAGE_POISONING
3684 extern void __kernel_poison_pages(struct page *page, int numpages);
3685 extern void __kernel_unpoison_pages(struct page *page, int numpages);
3686 extern bool _page_poisoning_enabled_early;
3687 DECLARE_STATIC_KEY_FALSE(_page_poisoning_enabled);
3688 static inline bool page_poisoning_enabled(void)
3689 {
3690 	return _page_poisoning_enabled_early;
3691 }
3692 /*
3693  * For use in fast paths after init_mem_debugging() has run, or when a
3694  * false negative result is not harmful when called too early.
3695  */
3696 static inline bool page_poisoning_enabled_static(void)
3697 {
3698 	return static_branch_unlikely(&_page_poisoning_enabled);
3699 }
3700 static inline void kernel_poison_pages(struct page *page, int numpages)
3701 {
3702 	if (page_poisoning_enabled_static())
3703 		__kernel_poison_pages(page, numpages);
3704 }
3705 static inline void kernel_unpoison_pages(struct page *page, int numpages)
3706 {
3707 	if (page_poisoning_enabled_static())
3708 		__kernel_unpoison_pages(page, numpages);
3709 }
3710 #else
3711 static inline bool page_poisoning_enabled(void) { return false; }
3712 static inline bool page_poisoning_enabled_static(void) { return false; }
3713 static inline void __kernel_poison_pages(struct page *page, int nunmpages) { }
3714 static inline void kernel_poison_pages(struct page *page, int numpages) { }
3715 static inline void kernel_unpoison_pages(struct page *page, int numpages) { }
3716 #endif
3717 
3718 DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
3719 static inline bool want_init_on_alloc(gfp_t flags)
3720 {
3721 	if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
3722 				&init_on_alloc))
3723 		return true;
3724 	return flags & __GFP_ZERO;
3725 }
3726 
3727 DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
3728 static inline bool want_init_on_free(void)
3729 {
3730 	return static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
3731 				   &init_on_free);
3732 }
3733 
3734 extern bool _debug_pagealloc_enabled_early;
3735 DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
3736 
3737 static inline bool debug_pagealloc_enabled(void)
3738 {
3739 	return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
3740 		_debug_pagealloc_enabled_early;
3741 }
3742 
3743 /*
3744  * For use in fast paths after mem_debugging_and_hardening_init() has run,
3745  * or when a false negative result is not harmful when called too early.
3746  */
3747 static inline bool debug_pagealloc_enabled_static(void)
3748 {
3749 	if (!IS_ENABLED(CONFIG_DEBUG_PAGEALLOC))
3750 		return false;
3751 
3752 	return static_branch_unlikely(&_debug_pagealloc_enabled);
3753 }
3754 
3755 /*
3756  * To support DEBUG_PAGEALLOC architecture must ensure that
3757  * __kernel_map_pages() never fails
3758  */
3759 extern void __kernel_map_pages(struct page *page, int numpages, int enable);
3760 #ifdef CONFIG_DEBUG_PAGEALLOC
3761 static inline void debug_pagealloc_map_pages(struct page *page, int numpages)
3762 {
3763 	if (debug_pagealloc_enabled_static())
3764 		__kernel_map_pages(page, numpages, 1);
3765 }
3766 
3767 static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages)
3768 {
3769 	if (debug_pagealloc_enabled_static())
3770 		__kernel_map_pages(page, numpages, 0);
3771 }
3772 
3773 extern unsigned int _debug_guardpage_minorder;
3774 DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
3775 
3776 static inline unsigned int debug_guardpage_minorder(void)
3777 {
3778 	return _debug_guardpage_minorder;
3779 }
3780 
3781 static inline bool debug_guardpage_enabled(void)
3782 {
3783 	return static_branch_unlikely(&_debug_guardpage_enabled);
3784 }
3785 
3786 static inline bool page_is_guard(struct page *page)
3787 {
3788 	if (!debug_guardpage_enabled())
3789 		return false;
3790 
3791 	return PageGuard(page);
3792 }
3793 
3794 bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order);
3795 static inline bool set_page_guard(struct zone *zone, struct page *page,
3796 				  unsigned int order)
3797 {
3798 	if (!debug_guardpage_enabled())
3799 		return false;
3800 	return __set_page_guard(zone, page, order);
3801 }
3802 
3803 void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order);
3804 static inline void clear_page_guard(struct zone *zone, struct page *page,
3805 				    unsigned int order)
3806 {
3807 	if (!debug_guardpage_enabled())
3808 		return;
3809 	__clear_page_guard(zone, page, order);
3810 }
3811 
3812 #else	/* CONFIG_DEBUG_PAGEALLOC */
3813 static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {}
3814 static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {}
3815 static inline unsigned int debug_guardpage_minorder(void) { return 0; }
3816 static inline bool debug_guardpage_enabled(void) { return false; }
3817 static inline bool page_is_guard(struct page *page) { return false; }
3818 static inline bool set_page_guard(struct zone *zone, struct page *page,
3819 			unsigned int order) { return false; }
3820 static inline void clear_page_guard(struct zone *zone, struct page *page,
3821 				unsigned int order) {}
3822 #endif	/* CONFIG_DEBUG_PAGEALLOC */
3823 
3824 #ifdef __HAVE_ARCH_GATE_AREA
3825 extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
3826 extern int in_gate_area_no_mm(unsigned long addr);
3827 extern int in_gate_area(struct mm_struct *mm, unsigned long addr);
3828 #else
3829 static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
3830 {
3831 	return NULL;
3832 }
3833 static inline int in_gate_area_no_mm(unsigned long addr) { return 0; }
3834 static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
3835 {
3836 	return 0;
3837 }
3838 #endif	/* __HAVE_ARCH_GATE_AREA */
3839 
3840 extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm);
3841 
3842 #ifdef CONFIG_SYSCTL
3843 extern int sysctl_drop_caches;
3844 int drop_caches_sysctl_handler(struct ctl_table *, int, void *, size_t *,
3845 		loff_t *);
3846 #endif
3847 
3848 void drop_slab(void);
3849 
3850 #ifndef CONFIG_MMU
3851 #define randomize_va_space 0
3852 #else
3853 extern int randomize_va_space;
3854 #endif
3855 
3856 const char * arch_vma_name(struct vm_area_struct *vma);
3857 #ifdef CONFIG_MMU
3858 void print_vma_addr(char *prefix, unsigned long rip);
3859 #else
3860 static inline void print_vma_addr(char *prefix, unsigned long rip)
3861 {
3862 }
3863 #endif
3864 
3865 void *sparse_buffer_alloc(unsigned long size);
3866 struct page * __populate_section_memmap(unsigned long pfn,
3867 		unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
3868 		struct dev_pagemap *pgmap);
3869 void pmd_init(void *addr);
3870 void pud_init(void *addr);
3871 pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
3872 p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
3873 pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
3874 pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
3875 pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node,
3876 			    struct vmem_altmap *altmap, struct page *reuse);
3877 void *vmemmap_alloc_block(unsigned long size, int node);
3878 struct vmem_altmap;
3879 void *vmemmap_alloc_block_buf(unsigned long size, int node,
3880 			      struct vmem_altmap *altmap);
3881 void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
3882 void vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
3883 		     unsigned long addr, unsigned long next);
3884 int vmemmap_check_pmd(pmd_t *pmd, int node,
3885 		      unsigned long addr, unsigned long next);
3886 int vmemmap_populate_basepages(unsigned long start, unsigned long end,
3887 			       int node, struct vmem_altmap *altmap);
3888 int vmemmap_populate_hugepages(unsigned long start, unsigned long end,
3889 			       int node, struct vmem_altmap *altmap);
3890 int vmemmap_populate(unsigned long start, unsigned long end, int node,
3891 		struct vmem_altmap *altmap);
3892 void vmemmap_populate_print_last(void);
3893 #ifdef CONFIG_MEMORY_HOTPLUG
3894 void vmemmap_free(unsigned long start, unsigned long end,
3895 		struct vmem_altmap *altmap);
3896 #endif
3897 
3898 #ifdef CONFIG_SPARSEMEM_VMEMMAP
3899 static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
3900 {
3901 	/* number of pfns from base where pfn_to_page() is valid */
3902 	if (altmap)
3903 		return altmap->reserve + altmap->free;
3904 	return 0;
3905 }
3906 
3907 static inline void vmem_altmap_free(struct vmem_altmap *altmap,
3908 				    unsigned long nr_pfns)
3909 {
3910 	altmap->alloc -= nr_pfns;
3911 }
3912 #else
3913 static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
3914 {
3915 	return 0;
3916 }
3917 
3918 static inline void vmem_altmap_free(struct vmem_altmap *altmap,
3919 				    unsigned long nr_pfns)
3920 {
3921 }
3922 #endif
3923 
3924 #define VMEMMAP_RESERVE_NR	2
3925 #ifdef CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP
3926 static inline bool __vmemmap_can_optimize(struct vmem_altmap *altmap,
3927 					  struct dev_pagemap *pgmap)
3928 {
3929 	unsigned long nr_pages;
3930 	unsigned long nr_vmemmap_pages;
3931 
3932 	if (!pgmap || !is_power_of_2(sizeof(struct page)))
3933 		return false;
3934 
3935 	nr_pages = pgmap_vmemmap_nr(pgmap);
3936 	nr_vmemmap_pages = ((nr_pages * sizeof(struct page)) >> PAGE_SHIFT);
3937 	/*
3938 	 * For vmemmap optimization with DAX we need minimum 2 vmemmap
3939 	 * pages. See layout diagram in Documentation/mm/vmemmap_dedup.rst
3940 	 */
3941 	return !altmap && (nr_vmemmap_pages > VMEMMAP_RESERVE_NR);
3942 }
3943 /*
3944  * If we don't have an architecture override, use the generic rule
3945  */
3946 #ifndef vmemmap_can_optimize
3947 #define vmemmap_can_optimize __vmemmap_can_optimize
3948 #endif
3949 
3950 #else
3951 static inline bool vmemmap_can_optimize(struct vmem_altmap *altmap,
3952 					   struct dev_pagemap *pgmap)
3953 {
3954 	return false;
3955 }
3956 #endif
3957 
3958 void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
3959 				  unsigned long nr_pages);
3960 
3961 enum mf_flags {
3962 	MF_COUNT_INCREASED = 1 << 0,
3963 	MF_ACTION_REQUIRED = 1 << 1,
3964 	MF_MUST_KILL = 1 << 2,
3965 	MF_SOFT_OFFLINE = 1 << 3,
3966 	MF_UNPOISON = 1 << 4,
3967 	MF_SW_SIMULATED = 1 << 5,
3968 	MF_NO_RETRY = 1 << 6,
3969 	MF_MEM_PRE_REMOVE = 1 << 7,
3970 };
3971 int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
3972 		      unsigned long count, int mf_flags);
3973 extern int memory_failure(unsigned long pfn, int flags);
3974 extern void memory_failure_queue_kick(int cpu);
3975 extern int unpoison_memory(unsigned long pfn);
3976 extern atomic_long_t num_poisoned_pages __read_mostly;
3977 extern int soft_offline_page(unsigned long pfn, int flags);
3978 #ifdef CONFIG_MEMORY_FAILURE
3979 /*
3980  * Sysfs entries for memory failure handling statistics.
3981  */
3982 extern const struct attribute_group memory_failure_attr_group;
3983 extern void memory_failure_queue(unsigned long pfn, int flags);
3984 extern int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
3985 					bool *migratable_cleared);
3986 void num_poisoned_pages_inc(unsigned long pfn);
3987 void num_poisoned_pages_sub(unsigned long pfn, long i);
3988 #else
3989 static inline void memory_failure_queue(unsigned long pfn, int flags)
3990 {
3991 }
3992 
3993 static inline int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
3994 					bool *migratable_cleared)
3995 {
3996 	return 0;
3997 }
3998 
3999 static inline void num_poisoned_pages_inc(unsigned long pfn)
4000 {
4001 }
4002 
4003 static inline void num_poisoned_pages_sub(unsigned long pfn, long i)
4004 {
4005 }
4006 #endif
4007 
4008 #if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG)
4009 extern void memblk_nr_poison_inc(unsigned long pfn);
4010 extern void memblk_nr_poison_sub(unsigned long pfn, long i);
4011 #else
4012 static inline void memblk_nr_poison_inc(unsigned long pfn)
4013 {
4014 }
4015 
4016 static inline void memblk_nr_poison_sub(unsigned long pfn, long i)
4017 {
4018 }
4019 #endif
4020 
4021 #ifndef arch_memory_failure
4022 static inline int arch_memory_failure(unsigned long pfn, int flags)
4023 {
4024 	return -ENXIO;
4025 }
4026 #endif
4027 
4028 #ifndef arch_is_platform_page
4029 static inline bool arch_is_platform_page(u64 paddr)
4030 {
4031 	return false;
4032 }
4033 #endif
4034 
4035 /*
4036  * Error handlers for various types of pages.
4037  */
4038 enum mf_result {
4039 	MF_IGNORED,	/* Error: cannot be handled */
4040 	MF_FAILED,	/* Error: handling failed */
4041 	MF_DELAYED,	/* Will be handled later */
4042 	MF_RECOVERED,	/* Successfully recovered */
4043 };
4044 
4045 enum mf_action_page_type {
4046 	MF_MSG_KERNEL,
4047 	MF_MSG_KERNEL_HIGH_ORDER,
4048 	MF_MSG_DIFFERENT_COMPOUND,
4049 	MF_MSG_HUGE,
4050 	MF_MSG_FREE_HUGE,
4051 	MF_MSG_GET_HWPOISON,
4052 	MF_MSG_UNMAP_FAILED,
4053 	MF_MSG_DIRTY_SWAPCACHE,
4054 	MF_MSG_CLEAN_SWAPCACHE,
4055 	MF_MSG_DIRTY_MLOCKED_LRU,
4056 	MF_MSG_CLEAN_MLOCKED_LRU,
4057 	MF_MSG_DIRTY_UNEVICTABLE_LRU,
4058 	MF_MSG_CLEAN_UNEVICTABLE_LRU,
4059 	MF_MSG_DIRTY_LRU,
4060 	MF_MSG_CLEAN_LRU,
4061 	MF_MSG_TRUNCATED_LRU,
4062 	MF_MSG_BUDDY,
4063 	MF_MSG_DAX,
4064 	MF_MSG_UNSPLIT_THP,
4065 	MF_MSG_ALREADY_POISONED,
4066 	MF_MSG_UNKNOWN,
4067 };
4068 
4069 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
4070 void folio_zero_user(struct folio *folio, unsigned long addr_hint);
4071 int copy_user_large_folio(struct folio *dst, struct folio *src,
4072 			  unsigned long addr_hint,
4073 			  struct vm_area_struct *vma);
4074 long copy_folio_from_user(struct folio *dst_folio,
4075 			   const void __user *usr_src,
4076 			   bool allow_pagefault);
4077 
4078 /**
4079  * vma_is_special_huge - Are transhuge page-table entries considered special?
4080  * @vma: Pointer to the struct vm_area_struct to consider
4081  *
4082  * Whether transhuge page-table entries are considered "special" following
4083  * the definition in vm_normal_page().
4084  *
4085  * Return: true if transhuge page-table entries should be considered special,
4086  * false otherwise.
4087  */
4088 static inline bool vma_is_special_huge(const struct vm_area_struct *vma)
4089 {
4090 	return vma_is_dax(vma) || (vma->vm_file &&
4091 				   (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)));
4092 }
4093 
4094 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
4095 
4096 #if MAX_NUMNODES > 1
4097 void __init setup_nr_node_ids(void);
4098 #else
4099 static inline void setup_nr_node_ids(void) {}
4100 #endif
4101 
4102 extern int memcmp_pages(struct page *page1, struct page *page2);
4103 
4104 static inline int pages_identical(struct page *page1, struct page *page2)
4105 {
4106 	return !memcmp_pages(page1, page2);
4107 }
4108 
4109 #ifdef CONFIG_MAPPING_DIRTY_HELPERS
4110 unsigned long clean_record_shared_mapping_range(struct address_space *mapping,
4111 						pgoff_t first_index, pgoff_t nr,
4112 						pgoff_t bitmap_pgoff,
4113 						unsigned long *bitmap,
4114 						pgoff_t *start,
4115 						pgoff_t *end);
4116 
4117 unsigned long wp_shared_mapping_range(struct address_space *mapping,
4118 				      pgoff_t first_index, pgoff_t nr);
4119 #endif
4120 
4121 extern int sysctl_nr_trim_pages;
4122 
4123 #ifdef CONFIG_PRINTK
4124 void mem_dump_obj(void *object);
4125 #else
4126 static inline void mem_dump_obj(void *object) {}
4127 #endif
4128 
4129 /**
4130  * seal_check_write - Check for F_SEAL_WRITE or F_SEAL_FUTURE_WRITE flags and
4131  *                    handle them.
4132  * @seals: the seals to check
4133  * @vma: the vma to operate on
4134  *
4135  * Check whether F_SEAL_WRITE or F_SEAL_FUTURE_WRITE are set; if so, do proper
4136  * check/handling on the vma flags.  Return 0 if check pass, or <0 for errors.
4137  */
4138 static inline int seal_check_write(int seals, struct vm_area_struct *vma)
4139 {
4140 	if (seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
4141 		/*
4142 		 * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
4143 		 * write seals are active.
4144 		 */
4145 		if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
4146 			return -EPERM;
4147 
4148 		/*
4149 		 * Since an F_SEAL_[FUTURE_]WRITE sealed memfd can be mapped as
4150 		 * MAP_SHARED and read-only, take care to not allow mprotect to
4151 		 * revert protections on such mappings. Do this only for shared
4152 		 * mappings. For private mappings, don't need to mask
4153 		 * VM_MAYWRITE as we still want them to be COW-writable.
4154 		 */
4155 		if (vma->vm_flags & VM_SHARED)
4156 			vm_flags_clear(vma, VM_MAYWRITE);
4157 	}
4158 
4159 	return 0;
4160 }
4161 
4162 #ifdef CONFIG_ANON_VMA_NAME
4163 int madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
4164 			  unsigned long len_in,
4165 			  struct anon_vma_name *anon_name);
4166 #else
4167 static inline int
4168 madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
4169 		      unsigned long len_in, struct anon_vma_name *anon_name) {
4170 	return 0;
4171 }
4172 #endif
4173 
4174 #ifdef CONFIG_UNACCEPTED_MEMORY
4175 
4176 bool range_contains_unaccepted_memory(phys_addr_t start, phys_addr_t end);
4177 void accept_memory(phys_addr_t start, phys_addr_t end);
4178 
4179 #else
4180 
4181 static inline bool range_contains_unaccepted_memory(phys_addr_t start,
4182 						    phys_addr_t end)
4183 {
4184 	return false;
4185 }
4186 
4187 static inline void accept_memory(phys_addr_t start, phys_addr_t end)
4188 {
4189 }
4190 
4191 #endif
4192 
4193 static inline bool pfn_is_unaccepted_memory(unsigned long pfn)
4194 {
4195 	phys_addr_t paddr = pfn << PAGE_SHIFT;
4196 
4197 	return range_contains_unaccepted_memory(paddr, paddr + PAGE_SIZE);
4198 }
4199 
4200 void vma_pgtable_walk_begin(struct vm_area_struct *vma);
4201 void vma_pgtable_walk_end(struct vm_area_struct *vma);
4202 
4203 #endif /* _LINUX_MM_H */
4204