1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_MM_H 3 #define _LINUX_MM_H 4 5 #include <linux/errno.h> 6 #include <linux/mmdebug.h> 7 #include <linux/gfp.h> 8 #include <linux/bug.h> 9 #include <linux/list.h> 10 #include <linux/mmzone.h> 11 #include <linux/rbtree.h> 12 #include <linux/atomic.h> 13 #include <linux/debug_locks.h> 14 #include <linux/mm_types.h> 15 #include <linux/mmap_lock.h> 16 #include <linux/range.h> 17 #include <linux/pfn.h> 18 #include <linux/percpu-refcount.h> 19 #include <linux/bit_spinlock.h> 20 #include <linux/shrinker.h> 21 #include <linux/resource.h> 22 #include <linux/page_ext.h> 23 #include <linux/err.h> 24 #include <linux/page-flags.h> 25 #include <linux/page_ref.h> 26 #include <linux/overflow.h> 27 #include <linux/sizes.h> 28 #include <linux/sched.h> 29 #include <linux/pgtable.h> 30 #include <linux/kasan.h> 31 #include <linux/memremap.h> 32 33 struct mempolicy; 34 struct anon_vma; 35 struct anon_vma_chain; 36 struct user_struct; 37 struct pt_regs; 38 39 extern int sysctl_page_lock_unfairness; 40 41 void init_mm_internals(void); 42 43 #ifndef CONFIG_NUMA /* Don't use mapnrs, do it properly */ 44 extern unsigned long max_mapnr; 45 46 static inline void set_max_mapnr(unsigned long limit) 47 { 48 max_mapnr = limit; 49 } 50 #else 51 static inline void set_max_mapnr(unsigned long limit) { } 52 #endif 53 54 extern atomic_long_t _totalram_pages; 55 static inline unsigned long totalram_pages(void) 56 { 57 return (unsigned long)atomic_long_read(&_totalram_pages); 58 } 59 60 static inline void totalram_pages_inc(void) 61 { 62 atomic_long_inc(&_totalram_pages); 63 } 64 65 static inline void totalram_pages_dec(void) 66 { 67 atomic_long_dec(&_totalram_pages); 68 } 69 70 static inline void totalram_pages_add(long count) 71 { 72 atomic_long_add(count, &_totalram_pages); 73 } 74 75 extern void * high_memory; 76 extern int page_cluster; 77 extern const int page_cluster_max; 78 79 #ifdef CONFIG_SYSCTL 80 extern int sysctl_legacy_va_layout; 81 #else 82 #define sysctl_legacy_va_layout 0 83 #endif 84 85 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS 86 extern const int mmap_rnd_bits_min; 87 extern const int mmap_rnd_bits_max; 88 extern int mmap_rnd_bits __read_mostly; 89 #endif 90 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS 91 extern const int mmap_rnd_compat_bits_min; 92 extern const int mmap_rnd_compat_bits_max; 93 extern int mmap_rnd_compat_bits __read_mostly; 94 #endif 95 96 #include <asm/page.h> 97 #include <asm/processor.h> 98 99 /* 100 * Architectures that support memory tagging (assigning tags to memory regions, 101 * embedding these tags into addresses that point to these memory regions, and 102 * checking that the memory and the pointer tags match on memory accesses) 103 * redefine this macro to strip tags from pointers. 104 * It's defined as noop for architectures that don't support memory tagging. 105 */ 106 #ifndef untagged_addr 107 #define untagged_addr(addr) (addr) 108 #endif 109 110 #ifndef __pa_symbol 111 #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) 112 #endif 113 114 #ifndef page_to_virt 115 #define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x))) 116 #endif 117 118 #ifndef lm_alias 119 #define lm_alias(x) __va(__pa_symbol(x)) 120 #endif 121 122 /* 123 * To prevent common memory management code establishing 124 * a zero page mapping on a read fault. 125 * This macro should be defined within <asm/pgtable.h>. 126 * s390 does this to prevent multiplexing of hardware bits 127 * related to the physical page in case of virtualization. 128 */ 129 #ifndef mm_forbids_zeropage 130 #define mm_forbids_zeropage(X) (0) 131 #endif 132 133 /* 134 * On some architectures it is expensive to call memset() for small sizes. 135 * If an architecture decides to implement their own version of 136 * mm_zero_struct_page they should wrap the defines below in a #ifndef and 137 * define their own version of this macro in <asm/pgtable.h> 138 */ 139 #if BITS_PER_LONG == 64 140 /* This function must be updated when the size of struct page grows above 80 141 * or reduces below 56. The idea that compiler optimizes out switch() 142 * statement, and only leaves move/store instructions. Also the compiler can 143 * combine write statements if they are both assignments and can be reordered, 144 * this can result in several of the writes here being dropped. 145 */ 146 #define mm_zero_struct_page(pp) __mm_zero_struct_page(pp) 147 static inline void __mm_zero_struct_page(struct page *page) 148 { 149 unsigned long *_pp = (void *)page; 150 151 /* Check that struct page is either 56, 64, 72, or 80 bytes */ 152 BUILD_BUG_ON(sizeof(struct page) & 7); 153 BUILD_BUG_ON(sizeof(struct page) < 56); 154 BUILD_BUG_ON(sizeof(struct page) > 80); 155 156 switch (sizeof(struct page)) { 157 case 80: 158 _pp[9] = 0; 159 fallthrough; 160 case 72: 161 _pp[8] = 0; 162 fallthrough; 163 case 64: 164 _pp[7] = 0; 165 fallthrough; 166 case 56: 167 _pp[6] = 0; 168 _pp[5] = 0; 169 _pp[4] = 0; 170 _pp[3] = 0; 171 _pp[2] = 0; 172 _pp[1] = 0; 173 _pp[0] = 0; 174 } 175 } 176 #else 177 #define mm_zero_struct_page(pp) ((void)memset((pp), 0, sizeof(struct page))) 178 #endif 179 180 /* 181 * Default maximum number of active map areas, this limits the number of vmas 182 * per mm struct. Users can overwrite this number by sysctl but there is a 183 * problem. 184 * 185 * When a program's coredump is generated as ELF format, a section is created 186 * per a vma. In ELF, the number of sections is represented in unsigned short. 187 * This means the number of sections should be smaller than 65535 at coredump. 188 * Because the kernel adds some informative sections to a image of program at 189 * generating coredump, we need some margin. The number of extra sections is 190 * 1-3 now and depends on arch. We use "5" as safe margin, here. 191 * 192 * ELF extended numbering allows more than 65535 sections, so 16-bit bound is 193 * not a hard limit any more. Although some userspace tools can be surprised by 194 * that. 195 */ 196 #define MAPCOUNT_ELF_CORE_MARGIN (5) 197 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) 198 199 extern int sysctl_max_map_count; 200 201 extern unsigned long sysctl_user_reserve_kbytes; 202 extern unsigned long sysctl_admin_reserve_kbytes; 203 204 extern int sysctl_overcommit_memory; 205 extern int sysctl_overcommit_ratio; 206 extern unsigned long sysctl_overcommit_kbytes; 207 208 int overcommit_ratio_handler(struct ctl_table *, int, void *, size_t *, 209 loff_t *); 210 int overcommit_kbytes_handler(struct ctl_table *, int, void *, size_t *, 211 loff_t *); 212 int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *, 213 loff_t *); 214 215 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) 216 #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) 217 #define folio_page_idx(folio, p) (page_to_pfn(p) - folio_pfn(folio)) 218 #else 219 #define nth_page(page,n) ((page) + (n)) 220 #define folio_page_idx(folio, p) ((p) - &(folio)->page) 221 #endif 222 223 /* to align the pointer to the (next) page boundary */ 224 #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) 225 226 /* to align the pointer to the (prev) page boundary */ 227 #define PAGE_ALIGN_DOWN(addr) ALIGN_DOWN(addr, PAGE_SIZE) 228 229 /* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */ 230 #define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE) 231 232 #define lru_to_page(head) (list_entry((head)->prev, struct page, lru)) 233 static inline struct folio *lru_to_folio(struct list_head *head) 234 { 235 return list_entry((head)->prev, struct folio, lru); 236 } 237 238 void setup_initial_init_mm(void *start_code, void *end_code, 239 void *end_data, void *brk); 240 241 /* 242 * Linux kernel virtual memory manager primitives. 243 * The idea being to have a "virtual" mm in the same way 244 * we have a virtual fs - giving a cleaner interface to the 245 * mm details, and allowing different kinds of memory mappings 246 * (from shared memory to executable loading to arbitrary 247 * mmap() functions). 248 */ 249 250 struct vm_area_struct *vm_area_alloc(struct mm_struct *); 251 struct vm_area_struct *vm_area_dup(struct vm_area_struct *); 252 void vm_area_free(struct vm_area_struct *); 253 254 #ifndef CONFIG_MMU 255 extern struct rb_root nommu_region_tree; 256 extern struct rw_semaphore nommu_region_sem; 257 258 extern unsigned int kobjsize(const void *objp); 259 #endif 260 261 /* 262 * vm_flags in vm_area_struct, see mm_types.h. 263 * When changing, update also include/trace/events/mmflags.h 264 */ 265 #define VM_NONE 0x00000000 266 267 #define VM_READ 0x00000001 /* currently active flags */ 268 #define VM_WRITE 0x00000002 269 #define VM_EXEC 0x00000004 270 #define VM_SHARED 0x00000008 271 272 /* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */ 273 #define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */ 274 #define VM_MAYWRITE 0x00000020 275 #define VM_MAYEXEC 0x00000040 276 #define VM_MAYSHARE 0x00000080 277 278 #define VM_GROWSDOWN 0x00000100 /* general info on the segment */ 279 #define VM_UFFD_MISSING 0x00000200 /* missing pages tracking */ 280 #define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ 281 #define VM_UFFD_WP 0x00001000 /* wrprotect pages tracking */ 282 283 #define VM_LOCKED 0x00002000 284 #define VM_IO 0x00004000 /* Memory mapped I/O or similar */ 285 286 /* Used by sys_madvise() */ 287 #define VM_SEQ_READ 0x00008000 /* App will access data sequentially */ 288 #define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */ 289 290 #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */ 291 #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ 292 #define VM_LOCKONFAULT 0x00080000 /* Lock the pages covered when they are faulted in */ 293 #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ 294 #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */ 295 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ 296 #define VM_SYNC 0x00800000 /* Synchronous page faults */ 297 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */ 298 #define VM_WIPEONFORK 0x02000000 /* Wipe VMA contents in child. */ 299 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */ 300 301 #ifdef CONFIG_MEM_SOFT_DIRTY 302 # define VM_SOFTDIRTY 0x08000000 /* Not soft dirty clean area */ 303 #else 304 # define VM_SOFTDIRTY 0 305 #endif 306 307 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ 308 #define VM_HUGEPAGE 0x20000000 /* MADV_HUGEPAGE marked this vma */ 309 #define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */ 310 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */ 311 312 #ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS 313 #define VM_HIGH_ARCH_BIT_0 32 /* bit only usable on 64-bit architectures */ 314 #define VM_HIGH_ARCH_BIT_1 33 /* bit only usable on 64-bit architectures */ 315 #define VM_HIGH_ARCH_BIT_2 34 /* bit only usable on 64-bit architectures */ 316 #define VM_HIGH_ARCH_BIT_3 35 /* bit only usable on 64-bit architectures */ 317 #define VM_HIGH_ARCH_BIT_4 36 /* bit only usable on 64-bit architectures */ 318 #define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0) 319 #define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1) 320 #define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2) 321 #define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3) 322 #define VM_HIGH_ARCH_4 BIT(VM_HIGH_ARCH_BIT_4) 323 #endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */ 324 325 #ifdef CONFIG_ARCH_HAS_PKEYS 326 # define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0 327 # define VM_PKEY_BIT0 VM_HIGH_ARCH_0 /* A protection key is a 4-bit value */ 328 # define VM_PKEY_BIT1 VM_HIGH_ARCH_1 /* on x86 and 5-bit value on ppc64 */ 329 # define VM_PKEY_BIT2 VM_HIGH_ARCH_2 330 # define VM_PKEY_BIT3 VM_HIGH_ARCH_3 331 #ifdef CONFIG_PPC 332 # define VM_PKEY_BIT4 VM_HIGH_ARCH_4 333 #else 334 # define VM_PKEY_BIT4 0 335 #endif 336 #endif /* CONFIG_ARCH_HAS_PKEYS */ 337 338 #if defined(CONFIG_X86) 339 # define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */ 340 #elif defined(CONFIG_PPC) 341 # define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */ 342 #elif defined(CONFIG_PARISC) 343 # define VM_GROWSUP VM_ARCH_1 344 #elif defined(CONFIG_IA64) 345 # define VM_GROWSUP VM_ARCH_1 346 #elif defined(CONFIG_SPARC64) 347 # define VM_SPARC_ADI VM_ARCH_1 /* Uses ADI tag for access control */ 348 # define VM_ARCH_CLEAR VM_SPARC_ADI 349 #elif defined(CONFIG_ARM64) 350 # define VM_ARM64_BTI VM_ARCH_1 /* BTI guarded page, a.k.a. GP bit */ 351 # define VM_ARCH_CLEAR VM_ARM64_BTI 352 #elif !defined(CONFIG_MMU) 353 # define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */ 354 #endif 355 356 #if defined(CONFIG_ARM64_MTE) 357 # define VM_MTE VM_HIGH_ARCH_0 /* Use Tagged memory for access control */ 358 # define VM_MTE_ALLOWED VM_HIGH_ARCH_1 /* Tagged memory permitted */ 359 #else 360 # define VM_MTE VM_NONE 361 # define VM_MTE_ALLOWED VM_NONE 362 #endif 363 364 #ifndef VM_GROWSUP 365 # define VM_GROWSUP VM_NONE 366 #endif 367 368 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR 369 # define VM_UFFD_MINOR_BIT 37 370 # define VM_UFFD_MINOR BIT(VM_UFFD_MINOR_BIT) /* UFFD minor faults */ 371 #else /* !CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */ 372 # define VM_UFFD_MINOR VM_NONE 373 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */ 374 375 /* Bits set in the VMA until the stack is in its final location */ 376 #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ) 377 378 #define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) 379 380 /* Common data flag combinations */ 381 #define VM_DATA_FLAGS_TSK_EXEC (VM_READ | VM_WRITE | TASK_EXEC | \ 382 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 383 #define VM_DATA_FLAGS_NON_EXEC (VM_READ | VM_WRITE | VM_MAYREAD | \ 384 VM_MAYWRITE | VM_MAYEXEC) 385 #define VM_DATA_FLAGS_EXEC (VM_READ | VM_WRITE | VM_EXEC | \ 386 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 387 388 #ifndef VM_DATA_DEFAULT_FLAGS /* arch can override this */ 389 #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_EXEC 390 #endif 391 392 #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ 393 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS 394 #endif 395 396 #ifdef CONFIG_STACK_GROWSUP 397 #define VM_STACK VM_GROWSUP 398 #else 399 #define VM_STACK VM_GROWSDOWN 400 #endif 401 402 #define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT) 403 404 /* VMA basic access permission flags */ 405 #define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC) 406 407 408 /* 409 * Special vmas that are non-mergable, non-mlock()able. 410 */ 411 #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP) 412 413 /* This mask prevents VMA from being scanned with khugepaged */ 414 #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB) 415 416 /* This mask defines which mm->def_flags a process can inherit its parent */ 417 #define VM_INIT_DEF_MASK VM_NOHUGEPAGE 418 419 /* This mask is used to clear all the VMA flags used by mlock */ 420 #define VM_LOCKED_CLEAR_MASK (~(VM_LOCKED | VM_LOCKONFAULT)) 421 422 /* Arch-specific flags to clear when updating VM flags on protection change */ 423 #ifndef VM_ARCH_CLEAR 424 # define VM_ARCH_CLEAR VM_NONE 425 #endif 426 #define VM_FLAGS_CLEAR (ARCH_VM_PKEY_FLAGS | VM_ARCH_CLEAR) 427 428 /* 429 * mapping from the currently active vm_flags protection bits (the 430 * low four bits) to a page protection mask.. 431 */ 432 433 /* 434 * The default fault flags that should be used by most of the 435 * arch-specific page fault handlers. 436 */ 437 #define FAULT_FLAG_DEFAULT (FAULT_FLAG_ALLOW_RETRY | \ 438 FAULT_FLAG_KILLABLE | \ 439 FAULT_FLAG_INTERRUPTIBLE) 440 441 /** 442 * fault_flag_allow_retry_first - check ALLOW_RETRY the first time 443 * @flags: Fault flags. 444 * 445 * This is mostly used for places where we want to try to avoid taking 446 * the mmap_lock for too long a time when waiting for another condition 447 * to change, in which case we can try to be polite to release the 448 * mmap_lock in the first round to avoid potential starvation of other 449 * processes that would also want the mmap_lock. 450 * 451 * Return: true if the page fault allows retry and this is the first 452 * attempt of the fault handling; false otherwise. 453 */ 454 static inline bool fault_flag_allow_retry_first(enum fault_flag flags) 455 { 456 return (flags & FAULT_FLAG_ALLOW_RETRY) && 457 (!(flags & FAULT_FLAG_TRIED)); 458 } 459 460 #define FAULT_FLAG_TRACE \ 461 { FAULT_FLAG_WRITE, "WRITE" }, \ 462 { FAULT_FLAG_MKWRITE, "MKWRITE" }, \ 463 { FAULT_FLAG_ALLOW_RETRY, "ALLOW_RETRY" }, \ 464 { FAULT_FLAG_RETRY_NOWAIT, "RETRY_NOWAIT" }, \ 465 { FAULT_FLAG_KILLABLE, "KILLABLE" }, \ 466 { FAULT_FLAG_TRIED, "TRIED" }, \ 467 { FAULT_FLAG_USER, "USER" }, \ 468 { FAULT_FLAG_REMOTE, "REMOTE" }, \ 469 { FAULT_FLAG_INSTRUCTION, "INSTRUCTION" }, \ 470 { FAULT_FLAG_INTERRUPTIBLE, "INTERRUPTIBLE" } 471 472 /* 473 * vm_fault is filled by the pagefault handler and passed to the vma's 474 * ->fault function. The vma's ->fault is responsible for returning a bitmask 475 * of VM_FAULT_xxx flags that give details about how the fault was handled. 476 * 477 * MM layer fills up gfp_mask for page allocations but fault handler might 478 * alter it if its implementation requires a different allocation context. 479 * 480 * pgoff should be used in favour of virtual_address, if possible. 481 */ 482 struct vm_fault { 483 const struct { 484 struct vm_area_struct *vma; /* Target VMA */ 485 gfp_t gfp_mask; /* gfp mask to be used for allocations */ 486 pgoff_t pgoff; /* Logical page offset based on vma */ 487 unsigned long address; /* Faulting virtual address - masked */ 488 unsigned long real_address; /* Faulting virtual address - unmasked */ 489 }; 490 enum fault_flag flags; /* FAULT_FLAG_xxx flags 491 * XXX: should really be 'const' */ 492 pmd_t *pmd; /* Pointer to pmd entry matching 493 * the 'address' */ 494 pud_t *pud; /* Pointer to pud entry matching 495 * the 'address' 496 */ 497 union { 498 pte_t orig_pte; /* Value of PTE at the time of fault */ 499 pmd_t orig_pmd; /* Value of PMD at the time of fault, 500 * used by PMD fault only. 501 */ 502 }; 503 504 struct page *cow_page; /* Page handler may use for COW fault */ 505 struct page *page; /* ->fault handlers should return a 506 * page here, unless VM_FAULT_NOPAGE 507 * is set (which is also implied by 508 * VM_FAULT_ERROR). 509 */ 510 /* These three entries are valid only while holding ptl lock */ 511 pte_t *pte; /* Pointer to pte entry matching 512 * the 'address'. NULL if the page 513 * table hasn't been allocated. 514 */ 515 spinlock_t *ptl; /* Page table lock. 516 * Protects pte page table if 'pte' 517 * is not NULL, otherwise pmd. 518 */ 519 pgtable_t prealloc_pte; /* Pre-allocated pte page table. 520 * vm_ops->map_pages() sets up a page 521 * table from atomic context. 522 * do_fault_around() pre-allocates 523 * page table to avoid allocation from 524 * atomic context. 525 */ 526 }; 527 528 /* page entry size for vm->huge_fault() */ 529 enum page_entry_size { 530 PE_SIZE_PTE = 0, 531 PE_SIZE_PMD, 532 PE_SIZE_PUD, 533 }; 534 535 /* 536 * These are the virtual MM functions - opening of an area, closing and 537 * unmapping it (needed to keep files on disk up-to-date etc), pointer 538 * to the functions called when a no-page or a wp-page exception occurs. 539 */ 540 struct vm_operations_struct { 541 void (*open)(struct vm_area_struct * area); 542 /** 543 * @close: Called when the VMA is being removed from the MM. 544 * Context: User context. May sleep. Caller holds mmap_lock. 545 */ 546 void (*close)(struct vm_area_struct * area); 547 /* Called any time before splitting to check if it's allowed */ 548 int (*may_split)(struct vm_area_struct *area, unsigned long addr); 549 int (*mremap)(struct vm_area_struct *area); 550 /* 551 * Called by mprotect() to make driver-specific permission 552 * checks before mprotect() is finalised. The VMA must not 553 * be modified. Returns 0 if mprotect() can proceed. 554 */ 555 int (*mprotect)(struct vm_area_struct *vma, unsigned long start, 556 unsigned long end, unsigned long newflags); 557 vm_fault_t (*fault)(struct vm_fault *vmf); 558 vm_fault_t (*huge_fault)(struct vm_fault *vmf, 559 enum page_entry_size pe_size); 560 vm_fault_t (*map_pages)(struct vm_fault *vmf, 561 pgoff_t start_pgoff, pgoff_t end_pgoff); 562 unsigned long (*pagesize)(struct vm_area_struct * area); 563 564 /* notification that a previously read-only page is about to become 565 * writable, if an error is returned it will cause a SIGBUS */ 566 vm_fault_t (*page_mkwrite)(struct vm_fault *vmf); 567 568 /* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */ 569 vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf); 570 571 /* called by access_process_vm when get_user_pages() fails, typically 572 * for use by special VMAs. See also generic_access_phys() for a generic 573 * implementation useful for any iomem mapping. 574 */ 575 int (*access)(struct vm_area_struct *vma, unsigned long addr, 576 void *buf, int len, int write); 577 578 /* Called by the /proc/PID/maps code to ask the vma whether it 579 * has a special name. Returning non-NULL will also cause this 580 * vma to be dumped unconditionally. */ 581 const char *(*name)(struct vm_area_struct *vma); 582 583 #ifdef CONFIG_NUMA 584 /* 585 * set_policy() op must add a reference to any non-NULL @new mempolicy 586 * to hold the policy upon return. Caller should pass NULL @new to 587 * remove a policy and fall back to surrounding context--i.e. do not 588 * install a MPOL_DEFAULT policy, nor the task or system default 589 * mempolicy. 590 */ 591 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); 592 593 /* 594 * get_policy() op must add reference [mpol_get()] to any policy at 595 * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure 596 * in mm/mempolicy.c will do this automatically. 597 * get_policy() must NOT add a ref if the policy at (vma,addr) is not 598 * marked as MPOL_SHARED. vma policies are protected by the mmap_lock. 599 * If no [shared/vma] mempolicy exists at the addr, get_policy() op 600 * must return NULL--i.e., do not "fallback" to task or system default 601 * policy. 602 */ 603 struct mempolicy *(*get_policy)(struct vm_area_struct *vma, 604 unsigned long addr); 605 #endif 606 /* 607 * Called by vm_normal_page() for special PTEs to find the 608 * page for @addr. This is useful if the default behavior 609 * (using pte_page()) would not find the correct page. 610 */ 611 struct page *(*find_special_page)(struct vm_area_struct *vma, 612 unsigned long addr); 613 }; 614 615 static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) 616 { 617 static const struct vm_operations_struct dummy_vm_ops = {}; 618 619 memset(vma, 0, sizeof(*vma)); 620 vma->vm_mm = mm; 621 vma->vm_ops = &dummy_vm_ops; 622 INIT_LIST_HEAD(&vma->anon_vma_chain); 623 } 624 625 static inline void vma_set_anonymous(struct vm_area_struct *vma) 626 { 627 vma->vm_ops = NULL; 628 } 629 630 static inline bool vma_is_anonymous(struct vm_area_struct *vma) 631 { 632 return !vma->vm_ops; 633 } 634 635 static inline bool vma_is_temporary_stack(struct vm_area_struct *vma) 636 { 637 int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP); 638 639 if (!maybe_stack) 640 return false; 641 642 if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) == 643 VM_STACK_INCOMPLETE_SETUP) 644 return true; 645 646 return false; 647 } 648 649 static inline bool vma_is_foreign(struct vm_area_struct *vma) 650 { 651 if (!current->mm) 652 return true; 653 654 if (current->mm != vma->vm_mm) 655 return true; 656 657 return false; 658 } 659 660 static inline bool vma_is_accessible(struct vm_area_struct *vma) 661 { 662 return vma->vm_flags & VM_ACCESS_FLAGS; 663 } 664 665 static inline 666 struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max) 667 { 668 return mas_find(&vmi->mas, max); 669 } 670 671 static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi) 672 { 673 /* 674 * Uses vma_find() to get the first VMA when the iterator starts. 675 * Calling mas_next() could skip the first entry. 676 */ 677 return vma_find(vmi, ULONG_MAX); 678 } 679 680 static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi) 681 { 682 return mas_prev(&vmi->mas, 0); 683 } 684 685 static inline unsigned long vma_iter_addr(struct vma_iterator *vmi) 686 { 687 return vmi->mas.index; 688 } 689 690 #define for_each_vma(__vmi, __vma) \ 691 while (((__vma) = vma_next(&(__vmi))) != NULL) 692 693 /* The MM code likes to work with exclusive end addresses */ 694 #define for_each_vma_range(__vmi, __vma, __end) \ 695 while (((__vma) = vma_find(&(__vmi), (__end) - 1)) != NULL) 696 697 #ifdef CONFIG_SHMEM 698 /* 699 * The vma_is_shmem is not inline because it is used only by slow 700 * paths in userfault. 701 */ 702 bool vma_is_shmem(struct vm_area_struct *vma); 703 bool vma_is_anon_shmem(struct vm_area_struct *vma); 704 #else 705 static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; } 706 static inline bool vma_is_anon_shmem(struct vm_area_struct *vma) { return false; } 707 #endif 708 709 int vma_is_stack_for_current(struct vm_area_struct *vma); 710 711 /* flush_tlb_range() takes a vma, not a mm, and can care about flags */ 712 #define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) } 713 714 struct mmu_gather; 715 struct inode; 716 717 static inline unsigned int compound_order(struct page *page) 718 { 719 if (!PageHead(page)) 720 return 0; 721 return page[1].compound_order; 722 } 723 724 /** 725 * folio_order - The allocation order of a folio. 726 * @folio: The folio. 727 * 728 * A folio is composed of 2^order pages. See get_order() for the definition 729 * of order. 730 * 731 * Return: The order of the folio. 732 */ 733 static inline unsigned int folio_order(struct folio *folio) 734 { 735 if (!folio_test_large(folio)) 736 return 0; 737 return folio->_folio_order; 738 } 739 740 #include <linux/huge_mm.h> 741 742 /* 743 * Methods to modify the page usage count. 744 * 745 * What counts for a page usage: 746 * - cache mapping (page->mapping) 747 * - private data (page->private) 748 * - page mapped in a task's page tables, each mapping 749 * is counted separately 750 * 751 * Also, many kernel routines increase the page count before a critical 752 * routine so they can be sure the page doesn't go away from under them. 753 */ 754 755 /* 756 * Drop a ref, return true if the refcount fell to zero (the page has no users) 757 */ 758 static inline int put_page_testzero(struct page *page) 759 { 760 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); 761 return page_ref_dec_and_test(page); 762 } 763 764 static inline int folio_put_testzero(struct folio *folio) 765 { 766 return put_page_testzero(&folio->page); 767 } 768 769 /* 770 * Try to grab a ref unless the page has a refcount of zero, return false if 771 * that is the case. 772 * This can be called when MMU is off so it must not access 773 * any of the virtual mappings. 774 */ 775 static inline bool get_page_unless_zero(struct page *page) 776 { 777 return page_ref_add_unless(page, 1, 0); 778 } 779 780 extern int page_is_ram(unsigned long pfn); 781 782 enum { 783 REGION_INTERSECTS, 784 REGION_DISJOINT, 785 REGION_MIXED, 786 }; 787 788 int region_intersects(resource_size_t offset, size_t size, unsigned long flags, 789 unsigned long desc); 790 791 /* Support for virtually mapped pages */ 792 struct page *vmalloc_to_page(const void *addr); 793 unsigned long vmalloc_to_pfn(const void *addr); 794 795 /* 796 * Determine if an address is within the vmalloc range 797 * 798 * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there 799 * is no special casing required. 800 */ 801 802 #ifndef is_ioremap_addr 803 #define is_ioremap_addr(x) is_vmalloc_addr(x) 804 #endif 805 806 #ifdef CONFIG_MMU 807 extern bool is_vmalloc_addr(const void *x); 808 extern int is_vmalloc_or_module_addr(const void *x); 809 #else 810 static inline bool is_vmalloc_addr(const void *x) 811 { 812 return false; 813 } 814 static inline int is_vmalloc_or_module_addr(const void *x) 815 { 816 return 0; 817 } 818 #endif 819 820 /* 821 * How many times the entire folio is mapped as a single unit (eg by a 822 * PMD or PUD entry). This is probably not what you want, except for 823 * debugging purposes - it does not include PTE-mapped sub-pages; look 824 * at folio_mapcount() or page_mapcount() or total_mapcount() instead. 825 */ 826 static inline int folio_entire_mapcount(struct folio *folio) 827 { 828 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); 829 return atomic_read(folio_mapcount_ptr(folio)) + 1; 830 } 831 832 /* 833 * Mapcount of compound page as a whole, does not include mapped sub-pages. 834 * Must be called only on head of compound page. 835 */ 836 static inline int head_compound_mapcount(struct page *head) 837 { 838 return atomic_read(compound_mapcount_ptr(head)) + 1; 839 } 840 841 /* 842 * If a 16GB hugetlb page were mapped by PTEs of all of its 4kB sub-pages, 843 * its subpages_mapcount would be 0x400000: choose the COMPOUND_MAPPED bit 844 * above that range, instead of 2*(PMD_SIZE/PAGE_SIZE). Hugetlb currently 845 * leaves subpages_mapcount at 0, but avoid surprise if it participates later. 846 */ 847 #define COMPOUND_MAPPED 0x800000 848 #define SUBPAGES_MAPPED (COMPOUND_MAPPED - 1) 849 850 /* 851 * Number of sub-pages mapped by PTE, does not include compound mapcount. 852 * Must be called only on head of compound page. 853 */ 854 static inline int head_subpages_mapcount(struct page *head) 855 { 856 return atomic_read(subpages_mapcount_ptr(head)) & SUBPAGES_MAPPED; 857 } 858 859 /* 860 * The atomic page->_mapcount, starts from -1: so that transitions 861 * both from it and to it can be tracked, using atomic_inc_and_test 862 * and atomic_add_negative(-1). 863 */ 864 static inline void page_mapcount_reset(struct page *page) 865 { 866 atomic_set(&(page)->_mapcount, -1); 867 } 868 869 /* 870 * Mapcount of 0-order page; when compound sub-page, includes 871 * compound_mapcount of compound_head of page. 872 * 873 * Result is undefined for pages which cannot be mapped into userspace. 874 * For example SLAB or special types of pages. See function page_has_type(). 875 * They use this place in struct page differently. 876 */ 877 static inline int page_mapcount(struct page *page) 878 { 879 int mapcount = atomic_read(&page->_mapcount) + 1; 880 881 if (likely(!PageCompound(page))) 882 return mapcount; 883 page = compound_head(page); 884 return head_compound_mapcount(page) + mapcount; 885 } 886 887 int total_compound_mapcount(struct page *head); 888 889 /** 890 * folio_mapcount() - Calculate the number of mappings of this folio. 891 * @folio: The folio. 892 * 893 * A large folio tracks both how many times the entire folio is mapped, 894 * and how many times each individual page in the folio is mapped. 895 * This function calculates the total number of times the folio is 896 * mapped. 897 * 898 * Return: The number of times this folio is mapped. 899 */ 900 static inline int folio_mapcount(struct folio *folio) 901 { 902 if (likely(!folio_test_large(folio))) 903 return atomic_read(&folio->_mapcount) + 1; 904 return total_compound_mapcount(&folio->page); 905 } 906 907 static inline int total_mapcount(struct page *page) 908 { 909 if (likely(!PageCompound(page))) 910 return atomic_read(&page->_mapcount) + 1; 911 return total_compound_mapcount(compound_head(page)); 912 } 913 914 static inline bool folio_large_is_mapped(struct folio *folio) 915 { 916 /* 917 * Reading folio_mapcount_ptr() below could be omitted if hugetlb 918 * participated in incrementing subpages_mapcount when compound mapped. 919 */ 920 return atomic_read(folio_subpages_mapcount_ptr(folio)) > 0 || 921 atomic_read(folio_mapcount_ptr(folio)) >= 0; 922 } 923 924 /** 925 * folio_mapped - Is this folio mapped into userspace? 926 * @folio: The folio. 927 * 928 * Return: True if any page in this folio is referenced by user page tables. 929 */ 930 static inline bool folio_mapped(struct folio *folio) 931 { 932 if (likely(!folio_test_large(folio))) 933 return atomic_read(&folio->_mapcount) >= 0; 934 return folio_large_is_mapped(folio); 935 } 936 937 /* 938 * Return true if this page is mapped into pagetables. 939 * For compound page it returns true if any sub-page of compound page is mapped, 940 * even if this particular sub-page is not itself mapped by any PTE or PMD. 941 */ 942 static inline bool page_mapped(struct page *page) 943 { 944 if (likely(!PageCompound(page))) 945 return atomic_read(&page->_mapcount) >= 0; 946 return folio_large_is_mapped(page_folio(page)); 947 } 948 949 static inline struct page *virt_to_head_page(const void *x) 950 { 951 struct page *page = virt_to_page(x); 952 953 return compound_head(page); 954 } 955 956 static inline struct folio *virt_to_folio(const void *x) 957 { 958 struct page *page = virt_to_page(x); 959 960 return page_folio(page); 961 } 962 963 void __folio_put(struct folio *folio); 964 965 void put_pages_list(struct list_head *pages); 966 967 void split_page(struct page *page, unsigned int order); 968 void folio_copy(struct folio *dst, struct folio *src); 969 970 unsigned long nr_free_buffer_pages(void); 971 972 /* 973 * Compound pages have a destructor function. Provide a 974 * prototype for that function and accessor functions. 975 * These are _only_ valid on the head of a compound page. 976 */ 977 typedef void compound_page_dtor(struct page *); 978 979 /* Keep the enum in sync with compound_page_dtors array in mm/page_alloc.c */ 980 enum compound_dtor_id { 981 NULL_COMPOUND_DTOR, 982 COMPOUND_PAGE_DTOR, 983 #ifdef CONFIG_HUGETLB_PAGE 984 HUGETLB_PAGE_DTOR, 985 #endif 986 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 987 TRANSHUGE_PAGE_DTOR, 988 #endif 989 NR_COMPOUND_DTORS, 990 }; 991 extern compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS]; 992 993 static inline void set_compound_page_dtor(struct page *page, 994 enum compound_dtor_id compound_dtor) 995 { 996 VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page); 997 page[1].compound_dtor = compound_dtor; 998 } 999 1000 static inline void folio_set_compound_dtor(struct folio *folio, 1001 enum compound_dtor_id compound_dtor) 1002 { 1003 VM_BUG_ON_FOLIO(compound_dtor >= NR_COMPOUND_DTORS, folio); 1004 folio->_folio_dtor = compound_dtor; 1005 } 1006 1007 void destroy_large_folio(struct folio *folio); 1008 1009 static inline int head_compound_pincount(struct page *head) 1010 { 1011 return atomic_read(compound_pincount_ptr(head)); 1012 } 1013 1014 static inline void set_compound_order(struct page *page, unsigned int order) 1015 { 1016 page[1].compound_order = order; 1017 #ifdef CONFIG_64BIT 1018 page[1].compound_nr = 1U << order; 1019 #endif 1020 } 1021 1022 /* Returns the number of pages in this potentially compound page. */ 1023 static inline unsigned long compound_nr(struct page *page) 1024 { 1025 if (!PageHead(page)) 1026 return 1; 1027 #ifdef CONFIG_64BIT 1028 return page[1].compound_nr; 1029 #else 1030 return 1UL << compound_order(page); 1031 #endif 1032 } 1033 1034 /* Returns the number of bytes in this potentially compound page. */ 1035 static inline unsigned long page_size(struct page *page) 1036 { 1037 return PAGE_SIZE << compound_order(page); 1038 } 1039 1040 /* Returns the number of bits needed for the number of bytes in a page */ 1041 static inline unsigned int page_shift(struct page *page) 1042 { 1043 return PAGE_SHIFT + compound_order(page); 1044 } 1045 1046 /** 1047 * thp_order - Order of a transparent huge page. 1048 * @page: Head page of a transparent huge page. 1049 */ 1050 static inline unsigned int thp_order(struct page *page) 1051 { 1052 VM_BUG_ON_PGFLAGS(PageTail(page), page); 1053 return compound_order(page); 1054 } 1055 1056 /** 1057 * thp_nr_pages - The number of regular pages in this huge page. 1058 * @page: The head page of a huge page. 1059 */ 1060 static inline int thp_nr_pages(struct page *page) 1061 { 1062 VM_BUG_ON_PGFLAGS(PageTail(page), page); 1063 return compound_nr(page); 1064 } 1065 1066 /** 1067 * thp_size - Size of a transparent huge page. 1068 * @page: Head page of a transparent huge page. 1069 * 1070 * Return: Number of bytes in this page. 1071 */ 1072 static inline unsigned long thp_size(struct page *page) 1073 { 1074 return PAGE_SIZE << thp_order(page); 1075 } 1076 1077 void free_compound_page(struct page *page); 1078 1079 #ifdef CONFIG_MMU 1080 /* 1081 * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when 1082 * servicing faults for write access. In the normal case, do always want 1083 * pte_mkwrite. But get_user_pages can cause write faults for mappings 1084 * that do not have writing enabled, when used by access_process_vm. 1085 */ 1086 static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) 1087 { 1088 if (likely(vma->vm_flags & VM_WRITE)) 1089 pte = pte_mkwrite(pte); 1090 return pte; 1091 } 1092 1093 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page); 1094 void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr); 1095 1096 vm_fault_t finish_fault(struct vm_fault *vmf); 1097 vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf); 1098 #endif 1099 1100 /* 1101 * Multiple processes may "see" the same page. E.g. for untouched 1102 * mappings of /dev/null, all processes see the same page full of 1103 * zeroes, and text pages of executables and shared libraries have 1104 * only one copy in memory, at most, normally. 1105 * 1106 * For the non-reserved pages, page_count(page) denotes a reference count. 1107 * page_count() == 0 means the page is free. page->lru is then used for 1108 * freelist management in the buddy allocator. 1109 * page_count() > 0 means the page has been allocated. 1110 * 1111 * Pages are allocated by the slab allocator in order to provide memory 1112 * to kmalloc and kmem_cache_alloc. In this case, the management of the 1113 * page, and the fields in 'struct page' are the responsibility of mm/slab.c 1114 * unless a particular usage is carefully commented. (the responsibility of 1115 * freeing the kmalloc memory is the caller's, of course). 1116 * 1117 * A page may be used by anyone else who does a __get_free_page(). 1118 * In this case, page_count still tracks the references, and should only 1119 * be used through the normal accessor functions. The top bits of page->flags 1120 * and page->virtual store page management information, but all other fields 1121 * are unused and could be used privately, carefully. The management of this 1122 * page is the responsibility of the one who allocated it, and those who have 1123 * subsequently been given references to it. 1124 * 1125 * The other pages (we may call them "pagecache pages") are completely 1126 * managed by the Linux memory manager: I/O, buffers, swapping etc. 1127 * The following discussion applies only to them. 1128 * 1129 * A pagecache page contains an opaque `private' member, which belongs to the 1130 * page's address_space. Usually, this is the address of a circular list of 1131 * the page's disk buffers. PG_private must be set to tell the VM to call 1132 * into the filesystem to release these pages. 1133 * 1134 * A page may belong to an inode's memory mapping. In this case, page->mapping 1135 * is the pointer to the inode, and page->index is the file offset of the page, 1136 * in units of PAGE_SIZE. 1137 * 1138 * If pagecache pages are not associated with an inode, they are said to be 1139 * anonymous pages. These may become associated with the swapcache, and in that 1140 * case PG_swapcache is set, and page->private is an offset into the swapcache. 1141 * 1142 * In either case (swapcache or inode backed), the pagecache itself holds one 1143 * reference to the page. Setting PG_private should also increment the 1144 * refcount. The each user mapping also has a reference to the page. 1145 * 1146 * The pagecache pages are stored in a per-mapping radix tree, which is 1147 * rooted at mapping->i_pages, and indexed by offset. 1148 * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space 1149 * lists, we instead now tag pages as dirty/writeback in the radix tree. 1150 * 1151 * All pagecache pages may be subject to I/O: 1152 * - inode pages may need to be read from disk, 1153 * - inode pages which have been modified and are MAP_SHARED may need 1154 * to be written back to the inode on disk, 1155 * - anonymous pages (including MAP_PRIVATE file mappings) which have been 1156 * modified may need to be swapped out to swap space and (later) to be read 1157 * back into memory. 1158 */ 1159 1160 #if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_FS_DAX) 1161 DECLARE_STATIC_KEY_FALSE(devmap_managed_key); 1162 1163 bool __put_devmap_managed_page_refs(struct page *page, int refs); 1164 static inline bool put_devmap_managed_page_refs(struct page *page, int refs) 1165 { 1166 if (!static_branch_unlikely(&devmap_managed_key)) 1167 return false; 1168 if (!is_zone_device_page(page)) 1169 return false; 1170 return __put_devmap_managed_page_refs(page, refs); 1171 } 1172 #else /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */ 1173 static inline bool put_devmap_managed_page_refs(struct page *page, int refs) 1174 { 1175 return false; 1176 } 1177 #endif /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */ 1178 1179 static inline bool put_devmap_managed_page(struct page *page) 1180 { 1181 return put_devmap_managed_page_refs(page, 1); 1182 } 1183 1184 /* 127: arbitrary random number, small enough to assemble well */ 1185 #define folio_ref_zero_or_close_to_overflow(folio) \ 1186 ((unsigned int) folio_ref_count(folio) + 127u <= 127u) 1187 1188 /** 1189 * folio_get - Increment the reference count on a folio. 1190 * @folio: The folio. 1191 * 1192 * Context: May be called in any context, as long as you know that 1193 * you have a refcount on the folio. If you do not already have one, 1194 * folio_try_get() may be the right interface for you to use. 1195 */ 1196 static inline void folio_get(struct folio *folio) 1197 { 1198 VM_BUG_ON_FOLIO(folio_ref_zero_or_close_to_overflow(folio), folio); 1199 folio_ref_inc(folio); 1200 } 1201 1202 static inline void get_page(struct page *page) 1203 { 1204 folio_get(page_folio(page)); 1205 } 1206 1207 int __must_check try_grab_page(struct page *page, unsigned int flags); 1208 1209 static inline __must_check bool try_get_page(struct page *page) 1210 { 1211 page = compound_head(page); 1212 if (WARN_ON_ONCE(page_ref_count(page) <= 0)) 1213 return false; 1214 page_ref_inc(page); 1215 return true; 1216 } 1217 1218 /** 1219 * folio_put - Decrement the reference count on a folio. 1220 * @folio: The folio. 1221 * 1222 * If the folio's reference count reaches zero, the memory will be 1223 * released back to the page allocator and may be used by another 1224 * allocation immediately. Do not access the memory or the struct folio 1225 * after calling folio_put() unless you can be sure that it wasn't the 1226 * last reference. 1227 * 1228 * Context: May be called in process or interrupt context, but not in NMI 1229 * context. May be called while holding a spinlock. 1230 */ 1231 static inline void folio_put(struct folio *folio) 1232 { 1233 if (folio_put_testzero(folio)) 1234 __folio_put(folio); 1235 } 1236 1237 /** 1238 * folio_put_refs - Reduce the reference count on a folio. 1239 * @folio: The folio. 1240 * @refs: The amount to subtract from the folio's reference count. 1241 * 1242 * If the folio's reference count reaches zero, the memory will be 1243 * released back to the page allocator and may be used by another 1244 * allocation immediately. Do not access the memory or the struct folio 1245 * after calling folio_put_refs() unless you can be sure that these weren't 1246 * the last references. 1247 * 1248 * Context: May be called in process or interrupt context, but not in NMI 1249 * context. May be called while holding a spinlock. 1250 */ 1251 static inline void folio_put_refs(struct folio *folio, int refs) 1252 { 1253 if (folio_ref_sub_and_test(folio, refs)) 1254 __folio_put(folio); 1255 } 1256 1257 /* 1258 * union release_pages_arg - an array of pages or folios 1259 * 1260 * release_pages() releases a simple array of multiple pages, and 1261 * accepts various different forms of said page array: either 1262 * a regular old boring array of pages, an array of folios, or 1263 * an array of encoded page pointers. 1264 * 1265 * The transparent union syntax for this kind of "any of these 1266 * argument types" is all kinds of ugly, so look away. 1267 */ 1268 typedef union { 1269 struct page **pages; 1270 struct folio **folios; 1271 struct encoded_page **encoded_pages; 1272 } release_pages_arg __attribute__ ((__transparent_union__)); 1273 1274 void release_pages(release_pages_arg, int nr); 1275 1276 /** 1277 * folios_put - Decrement the reference count on an array of folios. 1278 * @folios: The folios. 1279 * @nr: How many folios there are. 1280 * 1281 * Like folio_put(), but for an array of folios. This is more efficient 1282 * than writing the loop yourself as it will optimise the locks which 1283 * need to be taken if the folios are freed. 1284 * 1285 * Context: May be called in process or interrupt context, but not in NMI 1286 * context. May be called while holding a spinlock. 1287 */ 1288 static inline void folios_put(struct folio **folios, unsigned int nr) 1289 { 1290 release_pages(folios, nr); 1291 } 1292 1293 static inline void put_page(struct page *page) 1294 { 1295 struct folio *folio = page_folio(page); 1296 1297 /* 1298 * For some devmap managed pages we need to catch refcount transition 1299 * from 2 to 1: 1300 */ 1301 if (put_devmap_managed_page(&folio->page)) 1302 return; 1303 folio_put(folio); 1304 } 1305 1306 /* 1307 * GUP_PIN_COUNTING_BIAS, and the associated functions that use it, overload 1308 * the page's refcount so that two separate items are tracked: the original page 1309 * reference count, and also a new count of how many pin_user_pages() calls were 1310 * made against the page. ("gup-pinned" is another term for the latter). 1311 * 1312 * With this scheme, pin_user_pages() becomes special: such pages are marked as 1313 * distinct from normal pages. As such, the unpin_user_page() call (and its 1314 * variants) must be used in order to release gup-pinned pages. 1315 * 1316 * Choice of value: 1317 * 1318 * By making GUP_PIN_COUNTING_BIAS a power of two, debugging of page reference 1319 * counts with respect to pin_user_pages() and unpin_user_page() becomes 1320 * simpler, due to the fact that adding an even power of two to the page 1321 * refcount has the effect of using only the upper N bits, for the code that 1322 * counts up using the bias value. This means that the lower bits are left for 1323 * the exclusive use of the original code that increments and decrements by one 1324 * (or at least, by much smaller values than the bias value). 1325 * 1326 * Of course, once the lower bits overflow into the upper bits (and this is 1327 * OK, because subtraction recovers the original values), then visual inspection 1328 * no longer suffices to directly view the separate counts. However, for normal 1329 * applications that don't have huge page reference counts, this won't be an 1330 * issue. 1331 * 1332 * Locking: the lockless algorithm described in folio_try_get_rcu() 1333 * provides safe operation for get_user_pages(), page_mkclean() and 1334 * other calls that race to set up page table entries. 1335 */ 1336 #define GUP_PIN_COUNTING_BIAS (1U << 10) 1337 1338 void unpin_user_page(struct page *page); 1339 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, 1340 bool make_dirty); 1341 void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages, 1342 bool make_dirty); 1343 void unpin_user_pages(struct page **pages, unsigned long npages); 1344 1345 static inline bool is_cow_mapping(vm_flags_t flags) 1346 { 1347 return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; 1348 } 1349 1350 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) 1351 #define SECTION_IN_PAGE_FLAGS 1352 #endif 1353 1354 /* 1355 * The identification function is mainly used by the buddy allocator for 1356 * determining if two pages could be buddies. We are not really identifying 1357 * the zone since we could be using the section number id if we do not have 1358 * node id available in page flags. 1359 * We only guarantee that it will return the same value for two combinable 1360 * pages in a zone. 1361 */ 1362 static inline int page_zone_id(struct page *page) 1363 { 1364 return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK; 1365 } 1366 1367 #ifdef NODE_NOT_IN_PAGE_FLAGS 1368 extern int page_to_nid(const struct page *page); 1369 #else 1370 static inline int page_to_nid(const struct page *page) 1371 { 1372 struct page *p = (struct page *)page; 1373 1374 return (PF_POISONED_CHECK(p)->flags >> NODES_PGSHIFT) & NODES_MASK; 1375 } 1376 #endif 1377 1378 static inline int folio_nid(const struct folio *folio) 1379 { 1380 return page_to_nid(&folio->page); 1381 } 1382 1383 #ifdef CONFIG_NUMA_BALANCING 1384 /* page access time bits needs to hold at least 4 seconds */ 1385 #define PAGE_ACCESS_TIME_MIN_BITS 12 1386 #if LAST_CPUPID_SHIFT < PAGE_ACCESS_TIME_MIN_BITS 1387 #define PAGE_ACCESS_TIME_BUCKETS \ 1388 (PAGE_ACCESS_TIME_MIN_BITS - LAST_CPUPID_SHIFT) 1389 #else 1390 #define PAGE_ACCESS_TIME_BUCKETS 0 1391 #endif 1392 1393 #define PAGE_ACCESS_TIME_MASK \ 1394 (LAST_CPUPID_MASK << PAGE_ACCESS_TIME_BUCKETS) 1395 1396 static inline int cpu_pid_to_cpupid(int cpu, int pid) 1397 { 1398 return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK); 1399 } 1400 1401 static inline int cpupid_to_pid(int cpupid) 1402 { 1403 return cpupid & LAST__PID_MASK; 1404 } 1405 1406 static inline int cpupid_to_cpu(int cpupid) 1407 { 1408 return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK; 1409 } 1410 1411 static inline int cpupid_to_nid(int cpupid) 1412 { 1413 return cpu_to_node(cpupid_to_cpu(cpupid)); 1414 } 1415 1416 static inline bool cpupid_pid_unset(int cpupid) 1417 { 1418 return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK); 1419 } 1420 1421 static inline bool cpupid_cpu_unset(int cpupid) 1422 { 1423 return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK); 1424 } 1425 1426 static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid) 1427 { 1428 return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid); 1429 } 1430 1431 #define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid) 1432 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS 1433 static inline int page_cpupid_xchg_last(struct page *page, int cpupid) 1434 { 1435 return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK); 1436 } 1437 1438 static inline int page_cpupid_last(struct page *page) 1439 { 1440 return page->_last_cpupid; 1441 } 1442 static inline void page_cpupid_reset_last(struct page *page) 1443 { 1444 page->_last_cpupid = -1 & LAST_CPUPID_MASK; 1445 } 1446 #else 1447 static inline int page_cpupid_last(struct page *page) 1448 { 1449 return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK; 1450 } 1451 1452 extern int page_cpupid_xchg_last(struct page *page, int cpupid); 1453 1454 static inline void page_cpupid_reset_last(struct page *page) 1455 { 1456 page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT; 1457 } 1458 #endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */ 1459 1460 static inline int xchg_page_access_time(struct page *page, int time) 1461 { 1462 int last_time; 1463 1464 last_time = page_cpupid_xchg_last(page, time >> PAGE_ACCESS_TIME_BUCKETS); 1465 return last_time << PAGE_ACCESS_TIME_BUCKETS; 1466 } 1467 #else /* !CONFIG_NUMA_BALANCING */ 1468 static inline int page_cpupid_xchg_last(struct page *page, int cpupid) 1469 { 1470 return page_to_nid(page); /* XXX */ 1471 } 1472 1473 static inline int xchg_page_access_time(struct page *page, int time) 1474 { 1475 return 0; 1476 } 1477 1478 static inline int page_cpupid_last(struct page *page) 1479 { 1480 return page_to_nid(page); /* XXX */ 1481 } 1482 1483 static inline int cpupid_to_nid(int cpupid) 1484 { 1485 return -1; 1486 } 1487 1488 static inline int cpupid_to_pid(int cpupid) 1489 { 1490 return -1; 1491 } 1492 1493 static inline int cpupid_to_cpu(int cpupid) 1494 { 1495 return -1; 1496 } 1497 1498 static inline int cpu_pid_to_cpupid(int nid, int pid) 1499 { 1500 return -1; 1501 } 1502 1503 static inline bool cpupid_pid_unset(int cpupid) 1504 { 1505 return true; 1506 } 1507 1508 static inline void page_cpupid_reset_last(struct page *page) 1509 { 1510 } 1511 1512 static inline bool cpupid_match_pid(struct task_struct *task, int cpupid) 1513 { 1514 return false; 1515 } 1516 #endif /* CONFIG_NUMA_BALANCING */ 1517 1518 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) 1519 1520 /* 1521 * KASAN per-page tags are stored xor'ed with 0xff. This allows to avoid 1522 * setting tags for all pages to native kernel tag value 0xff, as the default 1523 * value 0x00 maps to 0xff. 1524 */ 1525 1526 static inline u8 page_kasan_tag(const struct page *page) 1527 { 1528 u8 tag = 0xff; 1529 1530 if (kasan_enabled()) { 1531 tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK; 1532 tag ^= 0xff; 1533 } 1534 1535 return tag; 1536 } 1537 1538 static inline void page_kasan_tag_set(struct page *page, u8 tag) 1539 { 1540 unsigned long old_flags, flags; 1541 1542 if (!kasan_enabled()) 1543 return; 1544 1545 tag ^= 0xff; 1546 old_flags = READ_ONCE(page->flags); 1547 do { 1548 flags = old_flags; 1549 flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT); 1550 flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT; 1551 } while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags))); 1552 } 1553 1554 static inline void page_kasan_tag_reset(struct page *page) 1555 { 1556 if (kasan_enabled()) 1557 page_kasan_tag_set(page, 0xff); 1558 } 1559 1560 #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */ 1561 1562 static inline u8 page_kasan_tag(const struct page *page) 1563 { 1564 return 0xff; 1565 } 1566 1567 static inline void page_kasan_tag_set(struct page *page, u8 tag) { } 1568 static inline void page_kasan_tag_reset(struct page *page) { } 1569 1570 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */ 1571 1572 static inline struct zone *page_zone(const struct page *page) 1573 { 1574 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; 1575 } 1576 1577 static inline pg_data_t *page_pgdat(const struct page *page) 1578 { 1579 return NODE_DATA(page_to_nid(page)); 1580 } 1581 1582 static inline struct zone *folio_zone(const struct folio *folio) 1583 { 1584 return page_zone(&folio->page); 1585 } 1586 1587 static inline pg_data_t *folio_pgdat(const struct folio *folio) 1588 { 1589 return page_pgdat(&folio->page); 1590 } 1591 1592 #ifdef SECTION_IN_PAGE_FLAGS 1593 static inline void set_page_section(struct page *page, unsigned long section) 1594 { 1595 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT); 1596 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT; 1597 } 1598 1599 static inline unsigned long page_to_section(const struct page *page) 1600 { 1601 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; 1602 } 1603 #endif 1604 1605 /** 1606 * folio_pfn - Return the Page Frame Number of a folio. 1607 * @folio: The folio. 1608 * 1609 * A folio may contain multiple pages. The pages have consecutive 1610 * Page Frame Numbers. 1611 * 1612 * Return: The Page Frame Number of the first page in the folio. 1613 */ 1614 static inline unsigned long folio_pfn(struct folio *folio) 1615 { 1616 return page_to_pfn(&folio->page); 1617 } 1618 1619 static inline struct folio *pfn_folio(unsigned long pfn) 1620 { 1621 return page_folio(pfn_to_page(pfn)); 1622 } 1623 1624 static inline atomic_t *folio_pincount_ptr(struct folio *folio) 1625 { 1626 return &folio_page(folio, 1)->compound_pincount; 1627 } 1628 1629 /** 1630 * folio_maybe_dma_pinned - Report if a folio may be pinned for DMA. 1631 * @folio: The folio. 1632 * 1633 * This function checks if a folio has been pinned via a call to 1634 * a function in the pin_user_pages() family. 1635 * 1636 * For small folios, the return value is partially fuzzy: false is not fuzzy, 1637 * because it means "definitely not pinned for DMA", but true means "probably 1638 * pinned for DMA, but possibly a false positive due to having at least 1639 * GUP_PIN_COUNTING_BIAS worth of normal folio references". 1640 * 1641 * False positives are OK, because: a) it's unlikely for a folio to 1642 * get that many refcounts, and b) all the callers of this routine are 1643 * expected to be able to deal gracefully with a false positive. 1644 * 1645 * For large folios, the result will be exactly correct. That's because 1646 * we have more tracking data available: the compound_pincount is used 1647 * instead of the GUP_PIN_COUNTING_BIAS scheme. 1648 * 1649 * For more information, please see Documentation/core-api/pin_user_pages.rst. 1650 * 1651 * Return: True, if it is likely that the page has been "dma-pinned". 1652 * False, if the page is definitely not dma-pinned. 1653 */ 1654 static inline bool folio_maybe_dma_pinned(struct folio *folio) 1655 { 1656 if (folio_test_large(folio)) 1657 return atomic_read(folio_pincount_ptr(folio)) > 0; 1658 1659 /* 1660 * folio_ref_count() is signed. If that refcount overflows, then 1661 * folio_ref_count() returns a negative value, and callers will avoid 1662 * further incrementing the refcount. 1663 * 1664 * Here, for that overflow case, use the sign bit to count a little 1665 * bit higher via unsigned math, and thus still get an accurate result. 1666 */ 1667 return ((unsigned int)folio_ref_count(folio)) >= 1668 GUP_PIN_COUNTING_BIAS; 1669 } 1670 1671 static inline bool page_maybe_dma_pinned(struct page *page) 1672 { 1673 return folio_maybe_dma_pinned(page_folio(page)); 1674 } 1675 1676 /* 1677 * This should most likely only be called during fork() to see whether we 1678 * should break the cow immediately for an anon page on the src mm. 1679 * 1680 * The caller has to hold the PT lock and the vma->vm_mm->->write_protect_seq. 1681 */ 1682 static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma, 1683 struct page *page) 1684 { 1685 VM_BUG_ON(!(raw_read_seqcount(&vma->vm_mm->write_protect_seq) & 1)); 1686 1687 if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags)) 1688 return false; 1689 1690 return page_maybe_dma_pinned(page); 1691 } 1692 1693 /* MIGRATE_CMA and ZONE_MOVABLE do not allow pin pages */ 1694 #ifdef CONFIG_MIGRATION 1695 static inline bool is_longterm_pinnable_page(struct page *page) 1696 { 1697 #ifdef CONFIG_CMA 1698 int mt = get_pageblock_migratetype(page); 1699 1700 if (mt == MIGRATE_CMA || mt == MIGRATE_ISOLATE) 1701 return false; 1702 #endif 1703 /* The zero page may always be pinned */ 1704 if (is_zero_pfn(page_to_pfn(page))) 1705 return true; 1706 1707 /* Coherent device memory must always allow eviction. */ 1708 if (is_device_coherent_page(page)) 1709 return false; 1710 1711 /* Otherwise, non-movable zone pages can be pinned. */ 1712 return !is_zone_movable_page(page); 1713 } 1714 #else 1715 static inline bool is_longterm_pinnable_page(struct page *page) 1716 { 1717 return true; 1718 } 1719 #endif 1720 1721 static inline bool folio_is_longterm_pinnable(struct folio *folio) 1722 { 1723 return is_longterm_pinnable_page(&folio->page); 1724 } 1725 1726 static inline void set_page_zone(struct page *page, enum zone_type zone) 1727 { 1728 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT); 1729 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT; 1730 } 1731 1732 static inline void set_page_node(struct page *page, unsigned long node) 1733 { 1734 page->flags &= ~(NODES_MASK << NODES_PGSHIFT); 1735 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT; 1736 } 1737 1738 static inline void set_page_links(struct page *page, enum zone_type zone, 1739 unsigned long node, unsigned long pfn) 1740 { 1741 set_page_zone(page, zone); 1742 set_page_node(page, node); 1743 #ifdef SECTION_IN_PAGE_FLAGS 1744 set_page_section(page, pfn_to_section_nr(pfn)); 1745 #endif 1746 } 1747 1748 /** 1749 * folio_nr_pages - The number of pages in the folio. 1750 * @folio: The folio. 1751 * 1752 * Return: A positive power of two. 1753 */ 1754 static inline long folio_nr_pages(struct folio *folio) 1755 { 1756 if (!folio_test_large(folio)) 1757 return 1; 1758 #ifdef CONFIG_64BIT 1759 return folio->_folio_nr_pages; 1760 #else 1761 return 1L << folio->_folio_order; 1762 #endif 1763 } 1764 1765 /** 1766 * folio_next - Move to the next physical folio. 1767 * @folio: The folio we're currently operating on. 1768 * 1769 * If you have physically contiguous memory which may span more than 1770 * one folio (eg a &struct bio_vec), use this function to move from one 1771 * folio to the next. Do not use it if the memory is only virtually 1772 * contiguous as the folios are almost certainly not adjacent to each 1773 * other. This is the folio equivalent to writing ``page++``. 1774 * 1775 * Context: We assume that the folios are refcounted and/or locked at a 1776 * higher level and do not adjust the reference counts. 1777 * Return: The next struct folio. 1778 */ 1779 static inline struct folio *folio_next(struct folio *folio) 1780 { 1781 return (struct folio *)folio_page(folio, folio_nr_pages(folio)); 1782 } 1783 1784 /** 1785 * folio_shift - The size of the memory described by this folio. 1786 * @folio: The folio. 1787 * 1788 * A folio represents a number of bytes which is a power-of-two in size. 1789 * This function tells you which power-of-two the folio is. See also 1790 * folio_size() and folio_order(). 1791 * 1792 * Context: The caller should have a reference on the folio to prevent 1793 * it from being split. It is not necessary for the folio to be locked. 1794 * Return: The base-2 logarithm of the size of this folio. 1795 */ 1796 static inline unsigned int folio_shift(struct folio *folio) 1797 { 1798 return PAGE_SHIFT + folio_order(folio); 1799 } 1800 1801 /** 1802 * folio_size - The number of bytes in a folio. 1803 * @folio: The folio. 1804 * 1805 * Context: The caller should have a reference on the folio to prevent 1806 * it from being split. It is not necessary for the folio to be locked. 1807 * Return: The number of bytes in this folio. 1808 */ 1809 static inline size_t folio_size(struct folio *folio) 1810 { 1811 return PAGE_SIZE << folio_order(folio); 1812 } 1813 1814 #ifndef HAVE_ARCH_MAKE_PAGE_ACCESSIBLE 1815 static inline int arch_make_page_accessible(struct page *page) 1816 { 1817 return 0; 1818 } 1819 #endif 1820 1821 #ifndef HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE 1822 static inline int arch_make_folio_accessible(struct folio *folio) 1823 { 1824 int ret; 1825 long i, nr = folio_nr_pages(folio); 1826 1827 for (i = 0; i < nr; i++) { 1828 ret = arch_make_page_accessible(folio_page(folio, i)); 1829 if (ret) 1830 break; 1831 } 1832 1833 return ret; 1834 } 1835 #endif 1836 1837 /* 1838 * Some inline functions in vmstat.h depend on page_zone() 1839 */ 1840 #include <linux/vmstat.h> 1841 1842 static __always_inline void *lowmem_page_address(const struct page *page) 1843 { 1844 return page_to_virt(page); 1845 } 1846 1847 #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) 1848 #define HASHED_PAGE_VIRTUAL 1849 #endif 1850 1851 #if defined(WANT_PAGE_VIRTUAL) 1852 static inline void *page_address(const struct page *page) 1853 { 1854 return page->virtual; 1855 } 1856 static inline void set_page_address(struct page *page, void *address) 1857 { 1858 page->virtual = address; 1859 } 1860 #define page_address_init() do { } while(0) 1861 #endif 1862 1863 #if defined(HASHED_PAGE_VIRTUAL) 1864 void *page_address(const struct page *page); 1865 void set_page_address(struct page *page, void *virtual); 1866 void page_address_init(void); 1867 #endif 1868 1869 #if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL) 1870 #define page_address(page) lowmem_page_address(page) 1871 #define set_page_address(page, address) do { } while(0) 1872 #define page_address_init() do { } while(0) 1873 #endif 1874 1875 static inline void *folio_address(const struct folio *folio) 1876 { 1877 return page_address(&folio->page); 1878 } 1879 1880 extern void *page_rmapping(struct page *page); 1881 extern pgoff_t __page_file_index(struct page *page); 1882 1883 /* 1884 * Return the pagecache index of the passed page. Regular pagecache pages 1885 * use ->index whereas swapcache pages use swp_offset(->private) 1886 */ 1887 static inline pgoff_t page_index(struct page *page) 1888 { 1889 if (unlikely(PageSwapCache(page))) 1890 return __page_file_index(page); 1891 return page->index; 1892 } 1893 1894 /* 1895 * Return true only if the page has been allocated with 1896 * ALLOC_NO_WATERMARKS and the low watermark was not 1897 * met implying that the system is under some pressure. 1898 */ 1899 static inline bool page_is_pfmemalloc(const struct page *page) 1900 { 1901 /* 1902 * lru.next has bit 1 set if the page is allocated from the 1903 * pfmemalloc reserves. Callers may simply overwrite it if 1904 * they do not need to preserve that information. 1905 */ 1906 return (uintptr_t)page->lru.next & BIT(1); 1907 } 1908 1909 /* 1910 * Only to be called by the page allocator on a freshly allocated 1911 * page. 1912 */ 1913 static inline void set_page_pfmemalloc(struct page *page) 1914 { 1915 page->lru.next = (void *)BIT(1); 1916 } 1917 1918 static inline void clear_page_pfmemalloc(struct page *page) 1919 { 1920 page->lru.next = NULL; 1921 } 1922 1923 /* 1924 * Can be called by the pagefault handler when it gets a VM_FAULT_OOM. 1925 */ 1926 extern void pagefault_out_of_memory(void); 1927 1928 #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) 1929 #define offset_in_thp(page, p) ((unsigned long)(p) & (thp_size(page) - 1)) 1930 #define offset_in_folio(folio, p) ((unsigned long)(p) & (folio_size(folio) - 1)) 1931 1932 /* 1933 * Flags passed to show_mem() and show_free_areas() to suppress output in 1934 * various contexts. 1935 */ 1936 #define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */ 1937 1938 extern void __show_free_areas(unsigned int flags, nodemask_t *nodemask, int max_zone_idx); 1939 static void __maybe_unused show_free_areas(unsigned int flags, nodemask_t *nodemask) 1940 { 1941 __show_free_areas(flags, nodemask, MAX_NR_ZONES - 1); 1942 } 1943 1944 /* 1945 * Parameter block passed down to zap_pte_range in exceptional cases. 1946 */ 1947 struct zap_details { 1948 struct folio *single_folio; /* Locked folio to be unmapped */ 1949 bool even_cows; /* Zap COWed private pages too? */ 1950 zap_flags_t zap_flags; /* Extra flags for zapping */ 1951 }; 1952 1953 /* 1954 * Whether to drop the pte markers, for example, the uffd-wp information for 1955 * file-backed memory. This should only be specified when we will completely 1956 * drop the page in the mm, either by truncation or unmapping of the vma. By 1957 * default, the flag is not set. 1958 */ 1959 #define ZAP_FLAG_DROP_MARKER ((__force zap_flags_t) BIT(0)) 1960 /* Set in unmap_vmas() to indicate a final unmap call. Only used by hugetlb */ 1961 #define ZAP_FLAG_UNMAP ((__force zap_flags_t) BIT(1)) 1962 1963 #ifdef CONFIG_MMU 1964 extern bool can_do_mlock(void); 1965 #else 1966 static inline bool can_do_mlock(void) { return false; } 1967 #endif 1968 extern int user_shm_lock(size_t, struct ucounts *); 1969 extern void user_shm_unlock(size_t, struct ucounts *); 1970 1971 struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr, 1972 pte_t pte); 1973 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, 1974 pte_t pte); 1975 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, 1976 pmd_t pmd); 1977 1978 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 1979 unsigned long size); 1980 void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, 1981 unsigned long size, struct zap_details *details); 1982 static inline void zap_vma_pages(struct vm_area_struct *vma) 1983 { 1984 zap_page_range_single(vma, vma->vm_start, 1985 vma->vm_end - vma->vm_start, NULL); 1986 } 1987 void unmap_vmas(struct mmu_gather *tlb, struct maple_tree *mt, 1988 struct vm_area_struct *start_vma, unsigned long start, 1989 unsigned long end); 1990 1991 struct mmu_notifier_range; 1992 1993 void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, 1994 unsigned long end, unsigned long floor, unsigned long ceiling); 1995 int 1996 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma); 1997 int follow_pte(struct mm_struct *mm, unsigned long address, 1998 pte_t **ptepp, spinlock_t **ptlp); 1999 int follow_pfn(struct vm_area_struct *vma, unsigned long address, 2000 unsigned long *pfn); 2001 int follow_phys(struct vm_area_struct *vma, unsigned long address, 2002 unsigned int flags, unsigned long *prot, resource_size_t *phys); 2003 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, 2004 void *buf, int len, int write); 2005 2006 extern void truncate_pagecache(struct inode *inode, loff_t new); 2007 extern void truncate_setsize(struct inode *inode, loff_t newsize); 2008 void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to); 2009 void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end); 2010 int generic_error_remove_page(struct address_space *mapping, struct page *page); 2011 2012 #ifdef CONFIG_MMU 2013 extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma, 2014 unsigned long address, unsigned int flags, 2015 struct pt_regs *regs); 2016 extern int fixup_user_fault(struct mm_struct *mm, 2017 unsigned long address, unsigned int fault_flags, 2018 bool *unlocked); 2019 void unmap_mapping_pages(struct address_space *mapping, 2020 pgoff_t start, pgoff_t nr, bool even_cows); 2021 void unmap_mapping_range(struct address_space *mapping, 2022 loff_t const holebegin, loff_t const holelen, int even_cows); 2023 #else 2024 static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma, 2025 unsigned long address, unsigned int flags, 2026 struct pt_regs *regs) 2027 { 2028 /* should never happen if there's no MMU */ 2029 BUG(); 2030 return VM_FAULT_SIGBUS; 2031 } 2032 static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address, 2033 unsigned int fault_flags, bool *unlocked) 2034 { 2035 /* should never happen if there's no MMU */ 2036 BUG(); 2037 return -EFAULT; 2038 } 2039 static inline void unmap_mapping_pages(struct address_space *mapping, 2040 pgoff_t start, pgoff_t nr, bool even_cows) { } 2041 static inline void unmap_mapping_range(struct address_space *mapping, 2042 loff_t const holebegin, loff_t const holelen, int even_cows) { } 2043 #endif 2044 2045 static inline void unmap_shared_mapping_range(struct address_space *mapping, 2046 loff_t const holebegin, loff_t const holelen) 2047 { 2048 unmap_mapping_range(mapping, holebegin, holelen, 0); 2049 } 2050 2051 extern int access_process_vm(struct task_struct *tsk, unsigned long addr, 2052 void *buf, int len, unsigned int gup_flags); 2053 extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, 2054 void *buf, int len, unsigned int gup_flags); 2055 extern int __access_remote_vm(struct mm_struct *mm, unsigned long addr, 2056 void *buf, int len, unsigned int gup_flags); 2057 2058 long get_user_pages_remote(struct mm_struct *mm, 2059 unsigned long start, unsigned long nr_pages, 2060 unsigned int gup_flags, struct page **pages, 2061 struct vm_area_struct **vmas, int *locked); 2062 long pin_user_pages_remote(struct mm_struct *mm, 2063 unsigned long start, unsigned long nr_pages, 2064 unsigned int gup_flags, struct page **pages, 2065 struct vm_area_struct **vmas, int *locked); 2066 long get_user_pages(unsigned long start, unsigned long nr_pages, 2067 unsigned int gup_flags, struct page **pages, 2068 struct vm_area_struct **vmas); 2069 long pin_user_pages(unsigned long start, unsigned long nr_pages, 2070 unsigned int gup_flags, struct page **pages, 2071 struct vm_area_struct **vmas); 2072 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, 2073 struct page **pages, unsigned int gup_flags); 2074 long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages, 2075 struct page **pages, unsigned int gup_flags); 2076 2077 int get_user_pages_fast(unsigned long start, int nr_pages, 2078 unsigned int gup_flags, struct page **pages); 2079 int pin_user_pages_fast(unsigned long start, int nr_pages, 2080 unsigned int gup_flags, struct page **pages); 2081 2082 int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc); 2083 int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc, 2084 struct task_struct *task, bool bypass_rlim); 2085 2086 struct kvec; 2087 int get_kernel_pages(const struct kvec *iov, int nr_pages, int write, 2088 struct page **pages); 2089 struct page *get_dump_page(unsigned long addr); 2090 2091 bool folio_mark_dirty(struct folio *folio); 2092 bool set_page_dirty(struct page *page); 2093 int set_page_dirty_lock(struct page *page); 2094 2095 int get_cmdline(struct task_struct *task, char *buffer, int buflen); 2096 2097 extern unsigned long move_page_tables(struct vm_area_struct *vma, 2098 unsigned long old_addr, struct vm_area_struct *new_vma, 2099 unsigned long new_addr, unsigned long len, 2100 bool need_rmap_locks); 2101 2102 /* 2103 * Flags used by change_protection(). For now we make it a bitmap so 2104 * that we can pass in multiple flags just like parameters. However 2105 * for now all the callers are only use one of the flags at the same 2106 * time. 2107 */ 2108 /* 2109 * Whether we should manually check if we can map individual PTEs writable, 2110 * because something (e.g., COW, uffd-wp) blocks that from happening for all 2111 * PTEs automatically in a writable mapping. 2112 */ 2113 #define MM_CP_TRY_CHANGE_WRITABLE (1UL << 0) 2114 /* Whether this protection change is for NUMA hints */ 2115 #define MM_CP_PROT_NUMA (1UL << 1) 2116 /* Whether this change is for write protecting */ 2117 #define MM_CP_UFFD_WP (1UL << 2) /* do wp */ 2118 #define MM_CP_UFFD_WP_RESOLVE (1UL << 3) /* Resolve wp */ 2119 #define MM_CP_UFFD_WP_ALL (MM_CP_UFFD_WP | \ 2120 MM_CP_UFFD_WP_RESOLVE) 2121 2122 int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot); 2123 static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma) 2124 { 2125 /* 2126 * We want to check manually if we can change individual PTEs writable 2127 * if we can't do that automatically for all PTEs in a mapping. For 2128 * private mappings, that's always the case when we have write 2129 * permissions as we properly have to handle COW. 2130 */ 2131 if (vma->vm_flags & VM_SHARED) 2132 return vma_wants_writenotify(vma, vma->vm_page_prot); 2133 return !!(vma->vm_flags & VM_WRITE); 2134 2135 } 2136 bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr, 2137 pte_t pte); 2138 extern long change_protection(struct mmu_gather *tlb, 2139 struct vm_area_struct *vma, unsigned long start, 2140 unsigned long end, unsigned long cp_flags); 2141 extern int mprotect_fixup(struct mmu_gather *tlb, struct vm_area_struct *vma, 2142 struct vm_area_struct **pprev, unsigned long start, 2143 unsigned long end, unsigned long newflags); 2144 2145 /* 2146 * doesn't attempt to fault and will return short. 2147 */ 2148 int get_user_pages_fast_only(unsigned long start, int nr_pages, 2149 unsigned int gup_flags, struct page **pages); 2150 int pin_user_pages_fast_only(unsigned long start, int nr_pages, 2151 unsigned int gup_flags, struct page **pages); 2152 2153 static inline bool get_user_page_fast_only(unsigned long addr, 2154 unsigned int gup_flags, struct page **pagep) 2155 { 2156 return get_user_pages_fast_only(addr, 1, gup_flags, pagep) == 1; 2157 } 2158 /* 2159 * per-process(per-mm_struct) statistics. 2160 */ 2161 static inline unsigned long get_mm_counter(struct mm_struct *mm, int member) 2162 { 2163 return percpu_counter_read_positive(&mm->rss_stat[member]); 2164 } 2165 2166 void mm_trace_rss_stat(struct mm_struct *mm, int member); 2167 2168 static inline void add_mm_counter(struct mm_struct *mm, int member, long value) 2169 { 2170 percpu_counter_add(&mm->rss_stat[member], value); 2171 2172 mm_trace_rss_stat(mm, member); 2173 } 2174 2175 static inline void inc_mm_counter(struct mm_struct *mm, int member) 2176 { 2177 percpu_counter_inc(&mm->rss_stat[member]); 2178 2179 mm_trace_rss_stat(mm, member); 2180 } 2181 2182 static inline void dec_mm_counter(struct mm_struct *mm, int member) 2183 { 2184 percpu_counter_dec(&mm->rss_stat[member]); 2185 2186 mm_trace_rss_stat(mm, member); 2187 } 2188 2189 /* Optimized variant when page is already known not to be PageAnon */ 2190 static inline int mm_counter_file(struct page *page) 2191 { 2192 if (PageSwapBacked(page)) 2193 return MM_SHMEMPAGES; 2194 return MM_FILEPAGES; 2195 } 2196 2197 static inline int mm_counter(struct page *page) 2198 { 2199 if (PageAnon(page)) 2200 return MM_ANONPAGES; 2201 return mm_counter_file(page); 2202 } 2203 2204 static inline unsigned long get_mm_rss(struct mm_struct *mm) 2205 { 2206 return get_mm_counter(mm, MM_FILEPAGES) + 2207 get_mm_counter(mm, MM_ANONPAGES) + 2208 get_mm_counter(mm, MM_SHMEMPAGES); 2209 } 2210 2211 static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm) 2212 { 2213 return max(mm->hiwater_rss, get_mm_rss(mm)); 2214 } 2215 2216 static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm) 2217 { 2218 return max(mm->hiwater_vm, mm->total_vm); 2219 } 2220 2221 static inline void update_hiwater_rss(struct mm_struct *mm) 2222 { 2223 unsigned long _rss = get_mm_rss(mm); 2224 2225 if ((mm)->hiwater_rss < _rss) 2226 (mm)->hiwater_rss = _rss; 2227 } 2228 2229 static inline void update_hiwater_vm(struct mm_struct *mm) 2230 { 2231 if (mm->hiwater_vm < mm->total_vm) 2232 mm->hiwater_vm = mm->total_vm; 2233 } 2234 2235 static inline void reset_mm_hiwater_rss(struct mm_struct *mm) 2236 { 2237 mm->hiwater_rss = get_mm_rss(mm); 2238 } 2239 2240 static inline void setmax_mm_hiwater_rss(unsigned long *maxrss, 2241 struct mm_struct *mm) 2242 { 2243 unsigned long hiwater_rss = get_mm_hiwater_rss(mm); 2244 2245 if (*maxrss < hiwater_rss) 2246 *maxrss = hiwater_rss; 2247 } 2248 2249 #if defined(SPLIT_RSS_COUNTING) 2250 void sync_mm_rss(struct mm_struct *mm); 2251 #else 2252 static inline void sync_mm_rss(struct mm_struct *mm) 2253 { 2254 } 2255 #endif 2256 2257 #ifndef CONFIG_ARCH_HAS_PTE_SPECIAL 2258 static inline int pte_special(pte_t pte) 2259 { 2260 return 0; 2261 } 2262 2263 static inline pte_t pte_mkspecial(pte_t pte) 2264 { 2265 return pte; 2266 } 2267 #endif 2268 2269 #ifndef CONFIG_ARCH_HAS_PTE_DEVMAP 2270 static inline int pte_devmap(pte_t pte) 2271 { 2272 return 0; 2273 } 2274 #endif 2275 2276 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, 2277 spinlock_t **ptl); 2278 static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, 2279 spinlock_t **ptl) 2280 { 2281 pte_t *ptep; 2282 __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl)); 2283 return ptep; 2284 } 2285 2286 #ifdef __PAGETABLE_P4D_FOLDED 2287 static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, 2288 unsigned long address) 2289 { 2290 return 0; 2291 } 2292 #else 2293 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); 2294 #endif 2295 2296 #if defined(__PAGETABLE_PUD_FOLDED) || !defined(CONFIG_MMU) 2297 static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, 2298 unsigned long address) 2299 { 2300 return 0; 2301 } 2302 static inline void mm_inc_nr_puds(struct mm_struct *mm) {} 2303 static inline void mm_dec_nr_puds(struct mm_struct *mm) {} 2304 2305 #else 2306 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address); 2307 2308 static inline void mm_inc_nr_puds(struct mm_struct *mm) 2309 { 2310 if (mm_pud_folded(mm)) 2311 return; 2312 atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes); 2313 } 2314 2315 static inline void mm_dec_nr_puds(struct mm_struct *mm) 2316 { 2317 if (mm_pud_folded(mm)) 2318 return; 2319 atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes); 2320 } 2321 #endif 2322 2323 #if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU) 2324 static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud, 2325 unsigned long address) 2326 { 2327 return 0; 2328 } 2329 2330 static inline void mm_inc_nr_pmds(struct mm_struct *mm) {} 2331 static inline void mm_dec_nr_pmds(struct mm_struct *mm) {} 2332 2333 #else 2334 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address); 2335 2336 static inline void mm_inc_nr_pmds(struct mm_struct *mm) 2337 { 2338 if (mm_pmd_folded(mm)) 2339 return; 2340 atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes); 2341 } 2342 2343 static inline void mm_dec_nr_pmds(struct mm_struct *mm) 2344 { 2345 if (mm_pmd_folded(mm)) 2346 return; 2347 atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes); 2348 } 2349 #endif 2350 2351 #ifdef CONFIG_MMU 2352 static inline void mm_pgtables_bytes_init(struct mm_struct *mm) 2353 { 2354 atomic_long_set(&mm->pgtables_bytes, 0); 2355 } 2356 2357 static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm) 2358 { 2359 return atomic_long_read(&mm->pgtables_bytes); 2360 } 2361 2362 static inline void mm_inc_nr_ptes(struct mm_struct *mm) 2363 { 2364 atomic_long_add(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes); 2365 } 2366 2367 static inline void mm_dec_nr_ptes(struct mm_struct *mm) 2368 { 2369 atomic_long_sub(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes); 2370 } 2371 #else 2372 2373 static inline void mm_pgtables_bytes_init(struct mm_struct *mm) {} 2374 static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm) 2375 { 2376 return 0; 2377 } 2378 2379 static inline void mm_inc_nr_ptes(struct mm_struct *mm) {} 2380 static inline void mm_dec_nr_ptes(struct mm_struct *mm) {} 2381 #endif 2382 2383 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd); 2384 int __pte_alloc_kernel(pmd_t *pmd); 2385 2386 #if defined(CONFIG_MMU) 2387 2388 static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd, 2389 unsigned long address) 2390 { 2391 return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ? 2392 NULL : p4d_offset(pgd, address); 2393 } 2394 2395 static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d, 2396 unsigned long address) 2397 { 2398 return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ? 2399 NULL : pud_offset(p4d, address); 2400 } 2401 2402 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) 2403 { 2404 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))? 2405 NULL: pmd_offset(pud, address); 2406 } 2407 #endif /* CONFIG_MMU */ 2408 2409 #if USE_SPLIT_PTE_PTLOCKS 2410 #if ALLOC_SPLIT_PTLOCKS 2411 void __init ptlock_cache_init(void); 2412 extern bool ptlock_alloc(struct page *page); 2413 extern void ptlock_free(struct page *page); 2414 2415 static inline spinlock_t *ptlock_ptr(struct page *page) 2416 { 2417 return page->ptl; 2418 } 2419 #else /* ALLOC_SPLIT_PTLOCKS */ 2420 static inline void ptlock_cache_init(void) 2421 { 2422 } 2423 2424 static inline bool ptlock_alloc(struct page *page) 2425 { 2426 return true; 2427 } 2428 2429 static inline void ptlock_free(struct page *page) 2430 { 2431 } 2432 2433 static inline spinlock_t *ptlock_ptr(struct page *page) 2434 { 2435 return &page->ptl; 2436 } 2437 #endif /* ALLOC_SPLIT_PTLOCKS */ 2438 2439 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) 2440 { 2441 return ptlock_ptr(pmd_page(*pmd)); 2442 } 2443 2444 static inline bool ptlock_init(struct page *page) 2445 { 2446 /* 2447 * prep_new_page() initialize page->private (and therefore page->ptl) 2448 * with 0. Make sure nobody took it in use in between. 2449 * 2450 * It can happen if arch try to use slab for page table allocation: 2451 * slab code uses page->slab_cache, which share storage with page->ptl. 2452 */ 2453 VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page); 2454 if (!ptlock_alloc(page)) 2455 return false; 2456 spin_lock_init(ptlock_ptr(page)); 2457 return true; 2458 } 2459 2460 #else /* !USE_SPLIT_PTE_PTLOCKS */ 2461 /* 2462 * We use mm->page_table_lock to guard all pagetable pages of the mm. 2463 */ 2464 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) 2465 { 2466 return &mm->page_table_lock; 2467 } 2468 static inline void ptlock_cache_init(void) {} 2469 static inline bool ptlock_init(struct page *page) { return true; } 2470 static inline void ptlock_free(struct page *page) {} 2471 #endif /* USE_SPLIT_PTE_PTLOCKS */ 2472 2473 static inline void pgtable_init(void) 2474 { 2475 ptlock_cache_init(); 2476 pgtable_cache_init(); 2477 } 2478 2479 static inline bool pgtable_pte_page_ctor(struct page *page) 2480 { 2481 if (!ptlock_init(page)) 2482 return false; 2483 __SetPageTable(page); 2484 inc_lruvec_page_state(page, NR_PAGETABLE); 2485 return true; 2486 } 2487 2488 static inline void pgtable_pte_page_dtor(struct page *page) 2489 { 2490 ptlock_free(page); 2491 __ClearPageTable(page); 2492 dec_lruvec_page_state(page, NR_PAGETABLE); 2493 } 2494 2495 #define pte_offset_map_lock(mm, pmd, address, ptlp) \ 2496 ({ \ 2497 spinlock_t *__ptl = pte_lockptr(mm, pmd); \ 2498 pte_t *__pte = pte_offset_map(pmd, address); \ 2499 *(ptlp) = __ptl; \ 2500 spin_lock(__ptl); \ 2501 __pte; \ 2502 }) 2503 2504 #define pte_unmap_unlock(pte, ptl) do { \ 2505 spin_unlock(ptl); \ 2506 pte_unmap(pte); \ 2507 } while (0) 2508 2509 #define pte_alloc(mm, pmd) (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd)) 2510 2511 #define pte_alloc_map(mm, pmd, address) \ 2512 (pte_alloc(mm, pmd) ? NULL : pte_offset_map(pmd, address)) 2513 2514 #define pte_alloc_map_lock(mm, pmd, address, ptlp) \ 2515 (pte_alloc(mm, pmd) ? \ 2516 NULL : pte_offset_map_lock(mm, pmd, address, ptlp)) 2517 2518 #define pte_alloc_kernel(pmd, address) \ 2519 ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \ 2520 NULL: pte_offset_kernel(pmd, address)) 2521 2522 #if USE_SPLIT_PMD_PTLOCKS 2523 2524 static inline struct page *pmd_pgtable_page(pmd_t *pmd) 2525 { 2526 unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1); 2527 return virt_to_page((void *)((unsigned long) pmd & mask)); 2528 } 2529 2530 static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) 2531 { 2532 return ptlock_ptr(pmd_pgtable_page(pmd)); 2533 } 2534 2535 static inline bool pmd_ptlock_init(struct page *page) 2536 { 2537 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 2538 page->pmd_huge_pte = NULL; 2539 #endif 2540 return ptlock_init(page); 2541 } 2542 2543 static inline void pmd_ptlock_free(struct page *page) 2544 { 2545 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 2546 VM_BUG_ON_PAGE(page->pmd_huge_pte, page); 2547 #endif 2548 ptlock_free(page); 2549 } 2550 2551 #define pmd_huge_pte(mm, pmd) (pmd_pgtable_page(pmd)->pmd_huge_pte) 2552 2553 #else 2554 2555 static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) 2556 { 2557 return &mm->page_table_lock; 2558 } 2559 2560 static inline bool pmd_ptlock_init(struct page *page) { return true; } 2561 static inline void pmd_ptlock_free(struct page *page) {} 2562 2563 #define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte) 2564 2565 #endif 2566 2567 static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd) 2568 { 2569 spinlock_t *ptl = pmd_lockptr(mm, pmd); 2570 spin_lock(ptl); 2571 return ptl; 2572 } 2573 2574 static inline bool pgtable_pmd_page_ctor(struct page *page) 2575 { 2576 if (!pmd_ptlock_init(page)) 2577 return false; 2578 __SetPageTable(page); 2579 inc_lruvec_page_state(page, NR_PAGETABLE); 2580 return true; 2581 } 2582 2583 static inline void pgtable_pmd_page_dtor(struct page *page) 2584 { 2585 pmd_ptlock_free(page); 2586 __ClearPageTable(page); 2587 dec_lruvec_page_state(page, NR_PAGETABLE); 2588 } 2589 2590 /* 2591 * No scalability reason to split PUD locks yet, but follow the same pattern 2592 * as the PMD locks to make it easier if we decide to. The VM should not be 2593 * considered ready to switch to split PUD locks yet; there may be places 2594 * which need to be converted from page_table_lock. 2595 */ 2596 static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud) 2597 { 2598 return &mm->page_table_lock; 2599 } 2600 2601 static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud) 2602 { 2603 spinlock_t *ptl = pud_lockptr(mm, pud); 2604 2605 spin_lock(ptl); 2606 return ptl; 2607 } 2608 2609 extern void __init pagecache_init(void); 2610 extern void free_initmem(void); 2611 2612 /* 2613 * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK) 2614 * into the buddy system. The freed pages will be poisoned with pattern 2615 * "poison" if it's within range [0, UCHAR_MAX]. 2616 * Return pages freed into the buddy system. 2617 */ 2618 extern unsigned long free_reserved_area(void *start, void *end, 2619 int poison, const char *s); 2620 2621 extern void adjust_managed_page_count(struct page *page, long count); 2622 extern void mem_init_print_info(void); 2623 2624 extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end); 2625 2626 /* Free the reserved page into the buddy system, so it gets managed. */ 2627 static inline void free_reserved_page(struct page *page) 2628 { 2629 ClearPageReserved(page); 2630 init_page_count(page); 2631 __free_page(page); 2632 adjust_managed_page_count(page, 1); 2633 } 2634 #define free_highmem_page(page) free_reserved_page(page) 2635 2636 static inline void mark_page_reserved(struct page *page) 2637 { 2638 SetPageReserved(page); 2639 adjust_managed_page_count(page, -1); 2640 } 2641 2642 /* 2643 * Default method to free all the __init memory into the buddy system. 2644 * The freed pages will be poisoned with pattern "poison" if it's within 2645 * range [0, UCHAR_MAX]. 2646 * Return pages freed into the buddy system. 2647 */ 2648 static inline unsigned long free_initmem_default(int poison) 2649 { 2650 extern char __init_begin[], __init_end[]; 2651 2652 return free_reserved_area(&__init_begin, &__init_end, 2653 poison, "unused kernel image (initmem)"); 2654 } 2655 2656 static inline unsigned long get_num_physpages(void) 2657 { 2658 int nid; 2659 unsigned long phys_pages = 0; 2660 2661 for_each_online_node(nid) 2662 phys_pages += node_present_pages(nid); 2663 2664 return phys_pages; 2665 } 2666 2667 /* 2668 * Using memblock node mappings, an architecture may initialise its 2669 * zones, allocate the backing mem_map and account for memory holes in an 2670 * architecture independent manner. 2671 * 2672 * An architecture is expected to register range of page frames backed by 2673 * physical memory with memblock_add[_node]() before calling 2674 * free_area_init() passing in the PFN each zone ends at. At a basic 2675 * usage, an architecture is expected to do something like 2676 * 2677 * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn, 2678 * max_highmem_pfn}; 2679 * for_each_valid_physical_page_range() 2680 * memblock_add_node(base, size, nid, MEMBLOCK_NONE) 2681 * free_area_init(max_zone_pfns); 2682 */ 2683 void free_area_init(unsigned long *max_zone_pfn); 2684 unsigned long node_map_pfn_alignment(void); 2685 unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn, 2686 unsigned long end_pfn); 2687 extern unsigned long absent_pages_in_range(unsigned long start_pfn, 2688 unsigned long end_pfn); 2689 extern void get_pfn_range_for_nid(unsigned int nid, 2690 unsigned long *start_pfn, unsigned long *end_pfn); 2691 2692 #ifndef CONFIG_NUMA 2693 static inline int early_pfn_to_nid(unsigned long pfn) 2694 { 2695 return 0; 2696 } 2697 #else 2698 /* please see mm/page_alloc.c */ 2699 extern int __meminit early_pfn_to_nid(unsigned long pfn); 2700 #endif 2701 2702 extern void set_dma_reserve(unsigned long new_dma_reserve); 2703 extern void memmap_init_range(unsigned long, int, unsigned long, 2704 unsigned long, unsigned long, enum meminit_context, 2705 struct vmem_altmap *, int migratetype); 2706 extern void setup_per_zone_wmarks(void); 2707 extern void calculate_min_free_kbytes(void); 2708 extern int __meminit init_per_zone_wmark_min(void); 2709 extern void mem_init(void); 2710 extern void __init mmap_init(void); 2711 2712 extern void __show_mem(unsigned int flags, nodemask_t *nodemask, int max_zone_idx); 2713 static inline void show_mem(unsigned int flags, nodemask_t *nodemask) 2714 { 2715 __show_mem(flags, nodemask, MAX_NR_ZONES - 1); 2716 } 2717 extern long si_mem_available(void); 2718 extern void si_meminfo(struct sysinfo * val); 2719 extern void si_meminfo_node(struct sysinfo *val, int nid); 2720 #ifdef __HAVE_ARCH_RESERVED_KERNEL_PAGES 2721 extern unsigned long arch_reserved_kernel_pages(void); 2722 #endif 2723 2724 extern __printf(3, 4) 2725 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...); 2726 2727 extern void setup_per_cpu_pageset(void); 2728 2729 /* page_alloc.c */ 2730 extern int min_free_kbytes; 2731 extern int watermark_boost_factor; 2732 extern int watermark_scale_factor; 2733 extern bool arch_has_descending_max_zone_pfns(void); 2734 2735 /* nommu.c */ 2736 extern atomic_long_t mmap_pages_allocated; 2737 extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t); 2738 2739 /* interval_tree.c */ 2740 void vma_interval_tree_insert(struct vm_area_struct *node, 2741 struct rb_root_cached *root); 2742 void vma_interval_tree_insert_after(struct vm_area_struct *node, 2743 struct vm_area_struct *prev, 2744 struct rb_root_cached *root); 2745 void vma_interval_tree_remove(struct vm_area_struct *node, 2746 struct rb_root_cached *root); 2747 struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root, 2748 unsigned long start, unsigned long last); 2749 struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node, 2750 unsigned long start, unsigned long last); 2751 2752 #define vma_interval_tree_foreach(vma, root, start, last) \ 2753 for (vma = vma_interval_tree_iter_first(root, start, last); \ 2754 vma; vma = vma_interval_tree_iter_next(vma, start, last)) 2755 2756 void anon_vma_interval_tree_insert(struct anon_vma_chain *node, 2757 struct rb_root_cached *root); 2758 void anon_vma_interval_tree_remove(struct anon_vma_chain *node, 2759 struct rb_root_cached *root); 2760 struct anon_vma_chain * 2761 anon_vma_interval_tree_iter_first(struct rb_root_cached *root, 2762 unsigned long start, unsigned long last); 2763 struct anon_vma_chain *anon_vma_interval_tree_iter_next( 2764 struct anon_vma_chain *node, unsigned long start, unsigned long last); 2765 #ifdef CONFIG_DEBUG_VM_RB 2766 void anon_vma_interval_tree_verify(struct anon_vma_chain *node); 2767 #endif 2768 2769 #define anon_vma_interval_tree_foreach(avc, root, start, last) \ 2770 for (avc = anon_vma_interval_tree_iter_first(root, start, last); \ 2771 avc; avc = anon_vma_interval_tree_iter_next(avc, start, last)) 2772 2773 /* mmap.c */ 2774 extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin); 2775 extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start, 2776 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert, 2777 struct vm_area_struct *expand); 2778 static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start, 2779 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert) 2780 { 2781 return __vma_adjust(vma, start, end, pgoff, insert, NULL); 2782 } 2783 extern struct vm_area_struct *vma_merge(struct mm_struct *, 2784 struct vm_area_struct *prev, unsigned long addr, unsigned long end, 2785 unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t, 2786 struct mempolicy *, struct vm_userfaultfd_ctx, struct anon_vma_name *); 2787 extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *); 2788 extern int __split_vma(struct mm_struct *, struct vm_area_struct *, 2789 unsigned long addr, int new_below); 2790 extern int split_vma(struct mm_struct *, struct vm_area_struct *, 2791 unsigned long addr, int new_below); 2792 extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *); 2793 extern void unlink_file_vma(struct vm_area_struct *); 2794 extern struct vm_area_struct *copy_vma(struct vm_area_struct **, 2795 unsigned long addr, unsigned long len, pgoff_t pgoff, 2796 bool *need_rmap_locks); 2797 extern void exit_mmap(struct mm_struct *); 2798 2799 void vma_mas_store(struct vm_area_struct *vma, struct ma_state *mas); 2800 void vma_mas_remove(struct vm_area_struct *vma, struct ma_state *mas); 2801 2802 static inline int check_data_rlimit(unsigned long rlim, 2803 unsigned long new, 2804 unsigned long start, 2805 unsigned long end_data, 2806 unsigned long start_data) 2807 { 2808 if (rlim < RLIM_INFINITY) { 2809 if (((new - start) + (end_data - start_data)) > rlim) 2810 return -ENOSPC; 2811 } 2812 2813 return 0; 2814 } 2815 2816 extern int mm_take_all_locks(struct mm_struct *mm); 2817 extern void mm_drop_all_locks(struct mm_struct *mm); 2818 2819 extern int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); 2820 extern int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); 2821 extern struct file *get_mm_exe_file(struct mm_struct *mm); 2822 extern struct file *get_task_exe_file(struct task_struct *task); 2823 2824 extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages); 2825 extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages); 2826 2827 extern bool vma_is_special_mapping(const struct vm_area_struct *vma, 2828 const struct vm_special_mapping *sm); 2829 extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm, 2830 unsigned long addr, unsigned long len, 2831 unsigned long flags, 2832 const struct vm_special_mapping *spec); 2833 /* This is an obsolete alternative to _install_special_mapping. */ 2834 extern int install_special_mapping(struct mm_struct *mm, 2835 unsigned long addr, unsigned long len, 2836 unsigned long flags, struct page **pages); 2837 2838 unsigned long randomize_stack_top(unsigned long stack_top); 2839 unsigned long randomize_page(unsigned long start, unsigned long range); 2840 2841 extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); 2842 2843 extern unsigned long mmap_region(struct file *file, unsigned long addr, 2844 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, 2845 struct list_head *uf); 2846 extern unsigned long do_mmap(struct file *file, unsigned long addr, 2847 unsigned long len, unsigned long prot, unsigned long flags, 2848 unsigned long pgoff, unsigned long *populate, struct list_head *uf); 2849 extern int do_mas_munmap(struct ma_state *mas, struct mm_struct *mm, 2850 unsigned long start, size_t len, struct list_head *uf, 2851 bool downgrade); 2852 extern int do_munmap(struct mm_struct *, unsigned long, size_t, 2853 struct list_head *uf); 2854 extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior); 2855 2856 #ifdef CONFIG_MMU 2857 extern int __mm_populate(unsigned long addr, unsigned long len, 2858 int ignore_errors); 2859 static inline void mm_populate(unsigned long addr, unsigned long len) 2860 { 2861 /* Ignore errors */ 2862 (void) __mm_populate(addr, len, 1); 2863 } 2864 #else 2865 static inline void mm_populate(unsigned long addr, unsigned long len) {} 2866 #endif 2867 2868 /* These take the mm semaphore themselves */ 2869 extern int __must_check vm_brk(unsigned long, unsigned long); 2870 extern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long); 2871 extern int vm_munmap(unsigned long, size_t); 2872 extern unsigned long __must_check vm_mmap(struct file *, unsigned long, 2873 unsigned long, unsigned long, 2874 unsigned long, unsigned long); 2875 2876 struct vm_unmapped_area_info { 2877 #define VM_UNMAPPED_AREA_TOPDOWN 1 2878 unsigned long flags; 2879 unsigned long length; 2880 unsigned long low_limit; 2881 unsigned long high_limit; 2882 unsigned long align_mask; 2883 unsigned long align_offset; 2884 }; 2885 2886 extern unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info); 2887 2888 /* truncate.c */ 2889 extern void truncate_inode_pages(struct address_space *, loff_t); 2890 extern void truncate_inode_pages_range(struct address_space *, 2891 loff_t lstart, loff_t lend); 2892 extern void truncate_inode_pages_final(struct address_space *); 2893 2894 /* generic vm_area_ops exported for stackable file systems */ 2895 extern vm_fault_t filemap_fault(struct vm_fault *vmf); 2896 extern vm_fault_t filemap_map_pages(struct vm_fault *vmf, 2897 pgoff_t start_pgoff, pgoff_t end_pgoff); 2898 extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf); 2899 2900 extern unsigned long stack_guard_gap; 2901 /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ 2902 extern int expand_stack(struct vm_area_struct *vma, unsigned long address); 2903 2904 /* CONFIG_STACK_GROWSUP still needs to grow downwards at some places */ 2905 extern int expand_downwards(struct vm_area_struct *vma, 2906 unsigned long address); 2907 #if VM_GROWSUP 2908 extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); 2909 #else 2910 #define expand_upwards(vma, address) (0) 2911 #endif 2912 2913 /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ 2914 extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); 2915 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr, 2916 struct vm_area_struct **pprev); 2917 2918 /* 2919 * Look up the first VMA which intersects the interval [start_addr, end_addr) 2920 * NULL if none. Assume start_addr < end_addr. 2921 */ 2922 struct vm_area_struct *find_vma_intersection(struct mm_struct *mm, 2923 unsigned long start_addr, unsigned long end_addr); 2924 2925 /** 2926 * vma_lookup() - Find a VMA at a specific address 2927 * @mm: The process address space. 2928 * @addr: The user address. 2929 * 2930 * Return: The vm_area_struct at the given address, %NULL otherwise. 2931 */ 2932 static inline 2933 struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr) 2934 { 2935 return mtree_load(&mm->mm_mt, addr); 2936 } 2937 2938 static inline unsigned long vm_start_gap(struct vm_area_struct *vma) 2939 { 2940 unsigned long vm_start = vma->vm_start; 2941 2942 if (vma->vm_flags & VM_GROWSDOWN) { 2943 vm_start -= stack_guard_gap; 2944 if (vm_start > vma->vm_start) 2945 vm_start = 0; 2946 } 2947 return vm_start; 2948 } 2949 2950 static inline unsigned long vm_end_gap(struct vm_area_struct *vma) 2951 { 2952 unsigned long vm_end = vma->vm_end; 2953 2954 if (vma->vm_flags & VM_GROWSUP) { 2955 vm_end += stack_guard_gap; 2956 if (vm_end < vma->vm_end) 2957 vm_end = -PAGE_SIZE; 2958 } 2959 return vm_end; 2960 } 2961 2962 static inline unsigned long vma_pages(struct vm_area_struct *vma) 2963 { 2964 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 2965 } 2966 2967 /* Look up the first VMA which exactly match the interval vm_start ... vm_end */ 2968 static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm, 2969 unsigned long vm_start, unsigned long vm_end) 2970 { 2971 struct vm_area_struct *vma = vma_lookup(mm, vm_start); 2972 2973 if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end)) 2974 vma = NULL; 2975 2976 return vma; 2977 } 2978 2979 static inline bool range_in_vma(struct vm_area_struct *vma, 2980 unsigned long start, unsigned long end) 2981 { 2982 return (vma && vma->vm_start <= start && end <= vma->vm_end); 2983 } 2984 2985 #ifdef CONFIG_MMU 2986 pgprot_t vm_get_page_prot(unsigned long vm_flags); 2987 void vma_set_page_prot(struct vm_area_struct *vma); 2988 #else 2989 static inline pgprot_t vm_get_page_prot(unsigned long vm_flags) 2990 { 2991 return __pgprot(0); 2992 } 2993 static inline void vma_set_page_prot(struct vm_area_struct *vma) 2994 { 2995 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 2996 } 2997 #endif 2998 2999 void vma_set_file(struct vm_area_struct *vma, struct file *file); 3000 3001 #ifdef CONFIG_NUMA_BALANCING 3002 unsigned long change_prot_numa(struct vm_area_struct *vma, 3003 unsigned long start, unsigned long end); 3004 #endif 3005 3006 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); 3007 int remap_pfn_range(struct vm_area_struct *, unsigned long addr, 3008 unsigned long pfn, unsigned long size, pgprot_t); 3009 int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr, 3010 unsigned long pfn, unsigned long size, pgprot_t prot); 3011 int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); 3012 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, 3013 struct page **pages, unsigned long *num); 3014 int vm_map_pages(struct vm_area_struct *vma, struct page **pages, 3015 unsigned long num); 3016 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages, 3017 unsigned long num); 3018 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr, 3019 unsigned long pfn); 3020 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, 3021 unsigned long pfn, pgprot_t pgprot); 3022 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr, 3023 pfn_t pfn); 3024 vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr, 3025 pfn_t pfn, pgprot_t pgprot); 3026 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma, 3027 unsigned long addr, pfn_t pfn); 3028 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len); 3029 3030 static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma, 3031 unsigned long addr, struct page *page) 3032 { 3033 int err = vm_insert_page(vma, addr, page); 3034 3035 if (err == -ENOMEM) 3036 return VM_FAULT_OOM; 3037 if (err < 0 && err != -EBUSY) 3038 return VM_FAULT_SIGBUS; 3039 3040 return VM_FAULT_NOPAGE; 3041 } 3042 3043 #ifndef io_remap_pfn_range 3044 static inline int io_remap_pfn_range(struct vm_area_struct *vma, 3045 unsigned long addr, unsigned long pfn, 3046 unsigned long size, pgprot_t prot) 3047 { 3048 return remap_pfn_range(vma, addr, pfn, size, pgprot_decrypted(prot)); 3049 } 3050 #endif 3051 3052 static inline vm_fault_t vmf_error(int err) 3053 { 3054 if (err == -ENOMEM) 3055 return VM_FAULT_OOM; 3056 return VM_FAULT_SIGBUS; 3057 } 3058 3059 struct page *follow_page(struct vm_area_struct *vma, unsigned long address, 3060 unsigned int foll_flags); 3061 3062 static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags) 3063 { 3064 if (vm_fault & VM_FAULT_OOM) 3065 return -ENOMEM; 3066 if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) 3067 return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT; 3068 if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) 3069 return -EFAULT; 3070 return 0; 3071 } 3072 3073 /* 3074 * Indicates for which pages that are write-protected in the page table, 3075 * whether GUP has to trigger unsharing via FAULT_FLAG_UNSHARE such that the 3076 * GUP pin will remain consistent with the pages mapped into the page tables 3077 * of the MM. 3078 * 3079 * Temporary unmapping of PageAnonExclusive() pages or clearing of 3080 * PageAnonExclusive() has to protect against concurrent GUP: 3081 * * Ordinary GUP: Using the PT lock 3082 * * GUP-fast and fork(): mm->write_protect_seq 3083 * * GUP-fast and KSM or temporary unmapping (swap, migration): see 3084 * page_try_share_anon_rmap() 3085 * 3086 * Must be called with the (sub)page that's actually referenced via the 3087 * page table entry, which might not necessarily be the head page for a 3088 * PTE-mapped THP. 3089 * 3090 * If the vma is NULL, we're coming from the GUP-fast path and might have 3091 * to fallback to the slow path just to lookup the vma. 3092 */ 3093 static inline bool gup_must_unshare(struct vm_area_struct *vma, 3094 unsigned int flags, struct page *page) 3095 { 3096 /* 3097 * FOLL_WRITE is implicitly handled correctly as the page table entry 3098 * has to be writable -- and if it references (part of) an anonymous 3099 * folio, that part is required to be marked exclusive. 3100 */ 3101 if ((flags & (FOLL_WRITE | FOLL_PIN)) != FOLL_PIN) 3102 return false; 3103 /* 3104 * Note: PageAnon(page) is stable until the page is actually getting 3105 * freed. 3106 */ 3107 if (!PageAnon(page)) { 3108 /* 3109 * We only care about R/O long-term pining: R/O short-term 3110 * pinning does not have the semantics to observe successive 3111 * changes through the process page tables. 3112 */ 3113 if (!(flags & FOLL_LONGTERM)) 3114 return false; 3115 3116 /* We really need the vma ... */ 3117 if (!vma) 3118 return true; 3119 3120 /* 3121 * ... because we only care about writable private ("COW") 3122 * mappings where we have to break COW early. 3123 */ 3124 return is_cow_mapping(vma->vm_flags); 3125 } 3126 3127 /* Paired with a memory barrier in page_try_share_anon_rmap(). */ 3128 if (IS_ENABLED(CONFIG_HAVE_FAST_GUP)) 3129 smp_rmb(); 3130 3131 /* 3132 * Note that PageKsm() pages cannot be exclusive, and consequently, 3133 * cannot get pinned. 3134 */ 3135 return !PageAnonExclusive(page); 3136 } 3137 3138 /* 3139 * Indicates whether GUP can follow a PROT_NONE mapped page, or whether 3140 * a (NUMA hinting) fault is required. 3141 */ 3142 static inline bool gup_can_follow_protnone(unsigned int flags) 3143 { 3144 /* 3145 * FOLL_FORCE has to be able to make progress even if the VMA is 3146 * inaccessible. Further, FOLL_FORCE access usually does not represent 3147 * application behaviour and we should avoid triggering NUMA hinting 3148 * faults. 3149 */ 3150 return flags & FOLL_FORCE; 3151 } 3152 3153 typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data); 3154 extern int apply_to_page_range(struct mm_struct *mm, unsigned long address, 3155 unsigned long size, pte_fn_t fn, void *data); 3156 extern int apply_to_existing_page_range(struct mm_struct *mm, 3157 unsigned long address, unsigned long size, 3158 pte_fn_t fn, void *data); 3159 3160 extern void __init init_mem_debugging_and_hardening(void); 3161 #ifdef CONFIG_PAGE_POISONING 3162 extern void __kernel_poison_pages(struct page *page, int numpages); 3163 extern void __kernel_unpoison_pages(struct page *page, int numpages); 3164 extern bool _page_poisoning_enabled_early; 3165 DECLARE_STATIC_KEY_FALSE(_page_poisoning_enabled); 3166 static inline bool page_poisoning_enabled(void) 3167 { 3168 return _page_poisoning_enabled_early; 3169 } 3170 /* 3171 * For use in fast paths after init_mem_debugging() has run, or when a 3172 * false negative result is not harmful when called too early. 3173 */ 3174 static inline bool page_poisoning_enabled_static(void) 3175 { 3176 return static_branch_unlikely(&_page_poisoning_enabled); 3177 } 3178 static inline void kernel_poison_pages(struct page *page, int numpages) 3179 { 3180 if (page_poisoning_enabled_static()) 3181 __kernel_poison_pages(page, numpages); 3182 } 3183 static inline void kernel_unpoison_pages(struct page *page, int numpages) 3184 { 3185 if (page_poisoning_enabled_static()) 3186 __kernel_unpoison_pages(page, numpages); 3187 } 3188 #else 3189 static inline bool page_poisoning_enabled(void) { return false; } 3190 static inline bool page_poisoning_enabled_static(void) { return false; } 3191 static inline void __kernel_poison_pages(struct page *page, int nunmpages) { } 3192 static inline void kernel_poison_pages(struct page *page, int numpages) { } 3193 static inline void kernel_unpoison_pages(struct page *page, int numpages) { } 3194 #endif 3195 3196 DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc); 3197 static inline bool want_init_on_alloc(gfp_t flags) 3198 { 3199 if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, 3200 &init_on_alloc)) 3201 return true; 3202 return flags & __GFP_ZERO; 3203 } 3204 3205 DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free); 3206 static inline bool want_init_on_free(void) 3207 { 3208 return static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON, 3209 &init_on_free); 3210 } 3211 3212 extern bool _debug_pagealloc_enabled_early; 3213 DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled); 3214 3215 static inline bool debug_pagealloc_enabled(void) 3216 { 3217 return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) && 3218 _debug_pagealloc_enabled_early; 3219 } 3220 3221 /* 3222 * For use in fast paths after init_debug_pagealloc() has run, or when a 3223 * false negative result is not harmful when called too early. 3224 */ 3225 static inline bool debug_pagealloc_enabled_static(void) 3226 { 3227 if (!IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) 3228 return false; 3229 3230 return static_branch_unlikely(&_debug_pagealloc_enabled); 3231 } 3232 3233 #ifdef CONFIG_DEBUG_PAGEALLOC 3234 /* 3235 * To support DEBUG_PAGEALLOC architecture must ensure that 3236 * __kernel_map_pages() never fails 3237 */ 3238 extern void __kernel_map_pages(struct page *page, int numpages, int enable); 3239 3240 static inline void debug_pagealloc_map_pages(struct page *page, int numpages) 3241 { 3242 if (debug_pagealloc_enabled_static()) 3243 __kernel_map_pages(page, numpages, 1); 3244 } 3245 3246 static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) 3247 { 3248 if (debug_pagealloc_enabled_static()) 3249 __kernel_map_pages(page, numpages, 0); 3250 } 3251 #else /* CONFIG_DEBUG_PAGEALLOC */ 3252 static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {} 3253 static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {} 3254 #endif /* CONFIG_DEBUG_PAGEALLOC */ 3255 3256 #ifdef __HAVE_ARCH_GATE_AREA 3257 extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm); 3258 extern int in_gate_area_no_mm(unsigned long addr); 3259 extern int in_gate_area(struct mm_struct *mm, unsigned long addr); 3260 #else 3261 static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm) 3262 { 3263 return NULL; 3264 } 3265 static inline int in_gate_area_no_mm(unsigned long addr) { return 0; } 3266 static inline int in_gate_area(struct mm_struct *mm, unsigned long addr) 3267 { 3268 return 0; 3269 } 3270 #endif /* __HAVE_ARCH_GATE_AREA */ 3271 3272 extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm); 3273 3274 #ifdef CONFIG_SYSCTL 3275 extern int sysctl_drop_caches; 3276 int drop_caches_sysctl_handler(struct ctl_table *, int, void *, size_t *, 3277 loff_t *); 3278 #endif 3279 3280 void drop_slab(void); 3281 3282 #ifndef CONFIG_MMU 3283 #define randomize_va_space 0 3284 #else 3285 extern int randomize_va_space; 3286 #endif 3287 3288 const char * arch_vma_name(struct vm_area_struct *vma); 3289 #ifdef CONFIG_MMU 3290 void print_vma_addr(char *prefix, unsigned long rip); 3291 #else 3292 static inline void print_vma_addr(char *prefix, unsigned long rip) 3293 { 3294 } 3295 #endif 3296 3297 void *sparse_buffer_alloc(unsigned long size); 3298 struct page * __populate_section_memmap(unsigned long pfn, 3299 unsigned long nr_pages, int nid, struct vmem_altmap *altmap, 3300 struct dev_pagemap *pgmap); 3301 void pmd_init(void *addr); 3302 void pud_init(void *addr); 3303 pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); 3304 p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node); 3305 pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node); 3306 pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); 3307 pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node, 3308 struct vmem_altmap *altmap, struct page *reuse); 3309 void *vmemmap_alloc_block(unsigned long size, int node); 3310 struct vmem_altmap; 3311 void *vmemmap_alloc_block_buf(unsigned long size, int node, 3312 struct vmem_altmap *altmap); 3313 void vmemmap_verify(pte_t *, int, unsigned long, unsigned long); 3314 void vmemmap_set_pmd(pmd_t *pmd, void *p, int node, 3315 unsigned long addr, unsigned long next); 3316 int vmemmap_check_pmd(pmd_t *pmd, int node, 3317 unsigned long addr, unsigned long next); 3318 int vmemmap_populate_basepages(unsigned long start, unsigned long end, 3319 int node, struct vmem_altmap *altmap); 3320 int vmemmap_populate_hugepages(unsigned long start, unsigned long end, 3321 int node, struct vmem_altmap *altmap); 3322 int vmemmap_populate(unsigned long start, unsigned long end, int node, 3323 struct vmem_altmap *altmap); 3324 void vmemmap_populate_print_last(void); 3325 #ifdef CONFIG_MEMORY_HOTPLUG 3326 void vmemmap_free(unsigned long start, unsigned long end, 3327 struct vmem_altmap *altmap); 3328 #endif 3329 void register_page_bootmem_memmap(unsigned long section_nr, struct page *map, 3330 unsigned long nr_pages); 3331 3332 enum mf_flags { 3333 MF_COUNT_INCREASED = 1 << 0, 3334 MF_ACTION_REQUIRED = 1 << 1, 3335 MF_MUST_KILL = 1 << 2, 3336 MF_SOFT_OFFLINE = 1 << 3, 3337 MF_UNPOISON = 1 << 4, 3338 MF_SW_SIMULATED = 1 << 5, 3339 MF_NO_RETRY = 1 << 6, 3340 }; 3341 int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index, 3342 unsigned long count, int mf_flags); 3343 extern int memory_failure(unsigned long pfn, int flags); 3344 extern void memory_failure_queue_kick(int cpu); 3345 extern int unpoison_memory(unsigned long pfn); 3346 extern int sysctl_memory_failure_early_kill; 3347 extern int sysctl_memory_failure_recovery; 3348 extern void shake_page(struct page *p); 3349 extern atomic_long_t num_poisoned_pages __read_mostly; 3350 extern int soft_offline_page(unsigned long pfn, int flags); 3351 #ifdef CONFIG_MEMORY_FAILURE 3352 extern void memory_failure_queue(unsigned long pfn, int flags); 3353 extern int __get_huge_page_for_hwpoison(unsigned long pfn, int flags, 3354 bool *migratable_cleared); 3355 void num_poisoned_pages_inc(unsigned long pfn); 3356 void num_poisoned_pages_sub(unsigned long pfn, long i); 3357 #else 3358 static inline void memory_failure_queue(unsigned long pfn, int flags) 3359 { 3360 } 3361 3362 static inline int __get_huge_page_for_hwpoison(unsigned long pfn, int flags, 3363 bool *migratable_cleared) 3364 { 3365 return 0; 3366 } 3367 3368 static inline void num_poisoned_pages_inc(unsigned long pfn) 3369 { 3370 } 3371 3372 static inline void num_poisoned_pages_sub(unsigned long pfn, long i) 3373 { 3374 } 3375 #endif 3376 3377 #if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG) 3378 extern void memblk_nr_poison_inc(unsigned long pfn); 3379 extern void memblk_nr_poison_sub(unsigned long pfn, long i); 3380 #else 3381 static inline void memblk_nr_poison_inc(unsigned long pfn) 3382 { 3383 } 3384 3385 static inline void memblk_nr_poison_sub(unsigned long pfn, long i) 3386 { 3387 } 3388 #endif 3389 3390 #ifndef arch_memory_failure 3391 static inline int arch_memory_failure(unsigned long pfn, int flags) 3392 { 3393 return -ENXIO; 3394 } 3395 #endif 3396 3397 #ifndef arch_is_platform_page 3398 static inline bool arch_is_platform_page(u64 paddr) 3399 { 3400 return false; 3401 } 3402 #endif 3403 3404 /* 3405 * Error handlers for various types of pages. 3406 */ 3407 enum mf_result { 3408 MF_IGNORED, /* Error: cannot be handled */ 3409 MF_FAILED, /* Error: handling failed */ 3410 MF_DELAYED, /* Will be handled later */ 3411 MF_RECOVERED, /* Successfully recovered */ 3412 }; 3413 3414 enum mf_action_page_type { 3415 MF_MSG_KERNEL, 3416 MF_MSG_KERNEL_HIGH_ORDER, 3417 MF_MSG_SLAB, 3418 MF_MSG_DIFFERENT_COMPOUND, 3419 MF_MSG_HUGE, 3420 MF_MSG_FREE_HUGE, 3421 MF_MSG_UNMAP_FAILED, 3422 MF_MSG_DIRTY_SWAPCACHE, 3423 MF_MSG_CLEAN_SWAPCACHE, 3424 MF_MSG_DIRTY_MLOCKED_LRU, 3425 MF_MSG_CLEAN_MLOCKED_LRU, 3426 MF_MSG_DIRTY_UNEVICTABLE_LRU, 3427 MF_MSG_CLEAN_UNEVICTABLE_LRU, 3428 MF_MSG_DIRTY_LRU, 3429 MF_MSG_CLEAN_LRU, 3430 MF_MSG_TRUNCATED_LRU, 3431 MF_MSG_BUDDY, 3432 MF_MSG_DAX, 3433 MF_MSG_UNSPLIT_THP, 3434 MF_MSG_UNKNOWN, 3435 }; 3436 3437 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) 3438 extern void clear_huge_page(struct page *page, 3439 unsigned long addr_hint, 3440 unsigned int pages_per_huge_page); 3441 extern void copy_user_huge_page(struct page *dst, struct page *src, 3442 unsigned long addr_hint, 3443 struct vm_area_struct *vma, 3444 unsigned int pages_per_huge_page); 3445 extern long copy_huge_page_from_user(struct page *dst_page, 3446 const void __user *usr_src, 3447 unsigned int pages_per_huge_page, 3448 bool allow_pagefault); 3449 3450 /** 3451 * vma_is_special_huge - Are transhuge page-table entries considered special? 3452 * @vma: Pointer to the struct vm_area_struct to consider 3453 * 3454 * Whether transhuge page-table entries are considered "special" following 3455 * the definition in vm_normal_page(). 3456 * 3457 * Return: true if transhuge page-table entries should be considered special, 3458 * false otherwise. 3459 */ 3460 static inline bool vma_is_special_huge(const struct vm_area_struct *vma) 3461 { 3462 return vma_is_dax(vma) || (vma->vm_file && 3463 (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))); 3464 } 3465 3466 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ 3467 3468 #ifdef CONFIG_DEBUG_PAGEALLOC 3469 extern unsigned int _debug_guardpage_minorder; 3470 DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled); 3471 3472 static inline unsigned int debug_guardpage_minorder(void) 3473 { 3474 return _debug_guardpage_minorder; 3475 } 3476 3477 static inline bool debug_guardpage_enabled(void) 3478 { 3479 return static_branch_unlikely(&_debug_guardpage_enabled); 3480 } 3481 3482 static inline bool page_is_guard(struct page *page) 3483 { 3484 if (!debug_guardpage_enabled()) 3485 return false; 3486 3487 return PageGuard(page); 3488 } 3489 #else 3490 static inline unsigned int debug_guardpage_minorder(void) { return 0; } 3491 static inline bool debug_guardpage_enabled(void) { return false; } 3492 static inline bool page_is_guard(struct page *page) { return false; } 3493 #endif /* CONFIG_DEBUG_PAGEALLOC */ 3494 3495 #if MAX_NUMNODES > 1 3496 void __init setup_nr_node_ids(void); 3497 #else 3498 static inline void setup_nr_node_ids(void) {} 3499 #endif 3500 3501 extern int memcmp_pages(struct page *page1, struct page *page2); 3502 3503 static inline int pages_identical(struct page *page1, struct page *page2) 3504 { 3505 return !memcmp_pages(page1, page2); 3506 } 3507 3508 #ifdef CONFIG_MAPPING_DIRTY_HELPERS 3509 unsigned long clean_record_shared_mapping_range(struct address_space *mapping, 3510 pgoff_t first_index, pgoff_t nr, 3511 pgoff_t bitmap_pgoff, 3512 unsigned long *bitmap, 3513 pgoff_t *start, 3514 pgoff_t *end); 3515 3516 unsigned long wp_shared_mapping_range(struct address_space *mapping, 3517 pgoff_t first_index, pgoff_t nr); 3518 #endif 3519 3520 extern int sysctl_nr_trim_pages; 3521 3522 #ifdef CONFIG_PRINTK 3523 void mem_dump_obj(void *object); 3524 #else 3525 static inline void mem_dump_obj(void *object) {} 3526 #endif 3527 3528 /** 3529 * seal_check_future_write - Check for F_SEAL_FUTURE_WRITE flag and handle it 3530 * @seals: the seals to check 3531 * @vma: the vma to operate on 3532 * 3533 * Check whether F_SEAL_FUTURE_WRITE is set; if so, do proper check/handling on 3534 * the vma flags. Return 0 if check pass, or <0 for errors. 3535 */ 3536 static inline int seal_check_future_write(int seals, struct vm_area_struct *vma) 3537 { 3538 if (seals & F_SEAL_FUTURE_WRITE) { 3539 /* 3540 * New PROT_WRITE and MAP_SHARED mmaps are not allowed when 3541 * "future write" seal active. 3542 */ 3543 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE)) 3544 return -EPERM; 3545 3546 /* 3547 * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as 3548 * MAP_SHARED and read-only, take care to not allow mprotect to 3549 * revert protections on such mappings. Do this only for shared 3550 * mappings. For private mappings, don't need to mask 3551 * VM_MAYWRITE as we still want them to be COW-writable. 3552 */ 3553 if (vma->vm_flags & VM_SHARED) 3554 vma->vm_flags &= ~(VM_MAYWRITE); 3555 } 3556 3557 return 0; 3558 } 3559 3560 #ifdef CONFIG_ANON_VMA_NAME 3561 int madvise_set_anon_name(struct mm_struct *mm, unsigned long start, 3562 unsigned long len_in, 3563 struct anon_vma_name *anon_name); 3564 #else 3565 static inline int 3566 madvise_set_anon_name(struct mm_struct *mm, unsigned long start, 3567 unsigned long len_in, struct anon_vma_name *anon_name) { 3568 return 0; 3569 } 3570 #endif 3571 3572 #endif /* _LINUX_MM_H */ 3573