1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_MM_H 3 #define _LINUX_MM_H 4 5 #include <linux/errno.h> 6 7 #ifdef __KERNEL__ 8 9 #include <linux/mmdebug.h> 10 #include <linux/gfp.h> 11 #include <linux/bug.h> 12 #include <linux/list.h> 13 #include <linux/mmzone.h> 14 #include <linux/rbtree.h> 15 #include <linux/atomic.h> 16 #include <linux/debug_locks.h> 17 #include <linux/mm_types.h> 18 #include <linux/mmap_lock.h> 19 #include <linux/range.h> 20 #include <linux/pfn.h> 21 #include <linux/percpu-refcount.h> 22 #include <linux/bit_spinlock.h> 23 #include <linux/shrinker.h> 24 #include <linux/resource.h> 25 #include <linux/page_ext.h> 26 #include <linux/err.h> 27 #include <linux/page-flags.h> 28 #include <linux/page_ref.h> 29 #include <linux/memremap.h> 30 #include <linux/overflow.h> 31 #include <linux/sizes.h> 32 #include <linux/sched.h> 33 #include <linux/pgtable.h> 34 #include <linux/kasan.h> 35 36 struct mempolicy; 37 struct anon_vma; 38 struct anon_vma_chain; 39 struct file_ra_state; 40 struct user_struct; 41 struct writeback_control; 42 struct bdi_writeback; 43 struct pt_regs; 44 45 extern int sysctl_page_lock_unfairness; 46 47 void init_mm_internals(void); 48 49 #ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */ 50 extern unsigned long max_mapnr; 51 52 static inline void set_max_mapnr(unsigned long limit) 53 { 54 max_mapnr = limit; 55 } 56 #else 57 static inline void set_max_mapnr(unsigned long limit) { } 58 #endif 59 60 extern atomic_long_t _totalram_pages; 61 static inline unsigned long totalram_pages(void) 62 { 63 return (unsigned long)atomic_long_read(&_totalram_pages); 64 } 65 66 static inline void totalram_pages_inc(void) 67 { 68 atomic_long_inc(&_totalram_pages); 69 } 70 71 static inline void totalram_pages_dec(void) 72 { 73 atomic_long_dec(&_totalram_pages); 74 } 75 76 static inline void totalram_pages_add(long count) 77 { 78 atomic_long_add(count, &_totalram_pages); 79 } 80 81 extern void * high_memory; 82 extern int page_cluster; 83 84 #ifdef CONFIG_SYSCTL 85 extern int sysctl_legacy_va_layout; 86 #else 87 #define sysctl_legacy_va_layout 0 88 #endif 89 90 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS 91 extern const int mmap_rnd_bits_min; 92 extern const int mmap_rnd_bits_max; 93 extern int mmap_rnd_bits __read_mostly; 94 #endif 95 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS 96 extern const int mmap_rnd_compat_bits_min; 97 extern const int mmap_rnd_compat_bits_max; 98 extern int mmap_rnd_compat_bits __read_mostly; 99 #endif 100 101 #include <asm/page.h> 102 #include <asm/processor.h> 103 104 /* 105 * Architectures that support memory tagging (assigning tags to memory regions, 106 * embedding these tags into addresses that point to these memory regions, and 107 * checking that the memory and the pointer tags match on memory accesses) 108 * redefine this macro to strip tags from pointers. 109 * It's defined as noop for architectures that don't support memory tagging. 110 */ 111 #ifndef untagged_addr 112 #define untagged_addr(addr) (addr) 113 #endif 114 115 #ifndef __pa_symbol 116 #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) 117 #endif 118 119 #ifndef page_to_virt 120 #define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x))) 121 #endif 122 123 #ifndef lm_alias 124 #define lm_alias(x) __va(__pa_symbol(x)) 125 #endif 126 127 /* 128 * With CONFIG_CFI_CLANG, the compiler replaces function addresses in 129 * instrumented C code with jump table addresses. Architectures that 130 * support CFI can define this macro to return the actual function address 131 * when needed. 132 */ 133 #ifndef function_nocfi 134 #define function_nocfi(x) (x) 135 #endif 136 137 /* 138 * To prevent common memory management code establishing 139 * a zero page mapping on a read fault. 140 * This macro should be defined within <asm/pgtable.h>. 141 * s390 does this to prevent multiplexing of hardware bits 142 * related to the physical page in case of virtualization. 143 */ 144 #ifndef mm_forbids_zeropage 145 #define mm_forbids_zeropage(X) (0) 146 #endif 147 148 /* 149 * On some architectures it is expensive to call memset() for small sizes. 150 * If an architecture decides to implement their own version of 151 * mm_zero_struct_page they should wrap the defines below in a #ifndef and 152 * define their own version of this macro in <asm/pgtable.h> 153 */ 154 #if BITS_PER_LONG == 64 155 /* This function must be updated when the size of struct page grows above 80 156 * or reduces below 56. The idea that compiler optimizes out switch() 157 * statement, and only leaves move/store instructions. Also the compiler can 158 * combine write statments if they are both assignments and can be reordered, 159 * this can result in several of the writes here being dropped. 160 */ 161 #define mm_zero_struct_page(pp) __mm_zero_struct_page(pp) 162 static inline void __mm_zero_struct_page(struct page *page) 163 { 164 unsigned long *_pp = (void *)page; 165 166 /* Check that struct page is either 56, 64, 72, or 80 bytes */ 167 BUILD_BUG_ON(sizeof(struct page) & 7); 168 BUILD_BUG_ON(sizeof(struct page) < 56); 169 BUILD_BUG_ON(sizeof(struct page) > 80); 170 171 switch (sizeof(struct page)) { 172 case 80: 173 _pp[9] = 0; 174 fallthrough; 175 case 72: 176 _pp[8] = 0; 177 fallthrough; 178 case 64: 179 _pp[7] = 0; 180 fallthrough; 181 case 56: 182 _pp[6] = 0; 183 _pp[5] = 0; 184 _pp[4] = 0; 185 _pp[3] = 0; 186 _pp[2] = 0; 187 _pp[1] = 0; 188 _pp[0] = 0; 189 } 190 } 191 #else 192 #define mm_zero_struct_page(pp) ((void)memset((pp), 0, sizeof(struct page))) 193 #endif 194 195 /* 196 * Default maximum number of active map areas, this limits the number of vmas 197 * per mm struct. Users can overwrite this number by sysctl but there is a 198 * problem. 199 * 200 * When a program's coredump is generated as ELF format, a section is created 201 * per a vma. In ELF, the number of sections is represented in unsigned short. 202 * This means the number of sections should be smaller than 65535 at coredump. 203 * Because the kernel adds some informative sections to a image of program at 204 * generating coredump, we need some margin. The number of extra sections is 205 * 1-3 now and depends on arch. We use "5" as safe margin, here. 206 * 207 * ELF extended numbering allows more than 65535 sections, so 16-bit bound is 208 * not a hard limit any more. Although some userspace tools can be surprised by 209 * that. 210 */ 211 #define MAPCOUNT_ELF_CORE_MARGIN (5) 212 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) 213 214 extern int sysctl_max_map_count; 215 216 extern unsigned long sysctl_user_reserve_kbytes; 217 extern unsigned long sysctl_admin_reserve_kbytes; 218 219 extern int sysctl_overcommit_memory; 220 extern int sysctl_overcommit_ratio; 221 extern unsigned long sysctl_overcommit_kbytes; 222 223 int overcommit_ratio_handler(struct ctl_table *, int, void *, size_t *, 224 loff_t *); 225 int overcommit_kbytes_handler(struct ctl_table *, int, void *, size_t *, 226 loff_t *); 227 int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *, 228 loff_t *); 229 /* 230 * Any attempt to mark this function as static leads to build failure 231 * when CONFIG_DEBUG_INFO_BTF is enabled because __add_to_page_cache_locked() 232 * is referred to by BPF code. This must be visible for error injection. 233 */ 234 int __add_to_page_cache_locked(struct page *page, struct address_space *mapping, 235 pgoff_t index, gfp_t gfp, void **shadowp); 236 237 #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) 238 239 /* to align the pointer to the (next) page boundary */ 240 #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) 241 242 /* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */ 243 #define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE) 244 245 #define lru_to_page(head) (list_entry((head)->prev, struct page, lru)) 246 247 /* 248 * Linux kernel virtual memory manager primitives. 249 * The idea being to have a "virtual" mm in the same way 250 * we have a virtual fs - giving a cleaner interface to the 251 * mm details, and allowing different kinds of memory mappings 252 * (from shared memory to executable loading to arbitrary 253 * mmap() functions). 254 */ 255 256 struct vm_area_struct *vm_area_alloc(struct mm_struct *); 257 struct vm_area_struct *vm_area_dup(struct vm_area_struct *); 258 void vm_area_free(struct vm_area_struct *); 259 260 #ifndef CONFIG_MMU 261 extern struct rb_root nommu_region_tree; 262 extern struct rw_semaphore nommu_region_sem; 263 264 extern unsigned int kobjsize(const void *objp); 265 #endif 266 267 /* 268 * vm_flags in vm_area_struct, see mm_types.h. 269 * When changing, update also include/trace/events/mmflags.h 270 */ 271 #define VM_NONE 0x00000000 272 273 #define VM_READ 0x00000001 /* currently active flags */ 274 #define VM_WRITE 0x00000002 275 #define VM_EXEC 0x00000004 276 #define VM_SHARED 0x00000008 277 278 /* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */ 279 #define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */ 280 #define VM_MAYWRITE 0x00000020 281 #define VM_MAYEXEC 0x00000040 282 #define VM_MAYSHARE 0x00000080 283 284 #define VM_GROWSDOWN 0x00000100 /* general info on the segment */ 285 #define VM_UFFD_MISSING 0x00000200 /* missing pages tracking */ 286 #define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ 287 #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ 288 #define VM_UFFD_WP 0x00001000 /* wrprotect pages tracking */ 289 290 #define VM_LOCKED 0x00002000 291 #define VM_IO 0x00004000 /* Memory mapped I/O or similar */ 292 293 /* Used by sys_madvise() */ 294 #define VM_SEQ_READ 0x00008000 /* App will access data sequentially */ 295 #define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */ 296 297 #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */ 298 #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ 299 #define VM_LOCKONFAULT 0x00080000 /* Lock the pages covered when they are faulted in */ 300 #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ 301 #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */ 302 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ 303 #define VM_SYNC 0x00800000 /* Synchronous page faults */ 304 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */ 305 #define VM_WIPEONFORK 0x02000000 /* Wipe VMA contents in child. */ 306 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */ 307 308 #ifdef CONFIG_MEM_SOFT_DIRTY 309 # define VM_SOFTDIRTY 0x08000000 /* Not soft dirty clean area */ 310 #else 311 # define VM_SOFTDIRTY 0 312 #endif 313 314 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ 315 #define VM_HUGEPAGE 0x20000000 /* MADV_HUGEPAGE marked this vma */ 316 #define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */ 317 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */ 318 319 #ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS 320 #define VM_HIGH_ARCH_BIT_0 32 /* bit only usable on 64-bit architectures */ 321 #define VM_HIGH_ARCH_BIT_1 33 /* bit only usable on 64-bit architectures */ 322 #define VM_HIGH_ARCH_BIT_2 34 /* bit only usable on 64-bit architectures */ 323 #define VM_HIGH_ARCH_BIT_3 35 /* bit only usable on 64-bit architectures */ 324 #define VM_HIGH_ARCH_BIT_4 36 /* bit only usable on 64-bit architectures */ 325 #define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0) 326 #define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1) 327 #define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2) 328 #define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3) 329 #define VM_HIGH_ARCH_4 BIT(VM_HIGH_ARCH_BIT_4) 330 #endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */ 331 332 #ifdef CONFIG_ARCH_HAS_PKEYS 333 # define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0 334 # define VM_PKEY_BIT0 VM_HIGH_ARCH_0 /* A protection key is a 4-bit value */ 335 # define VM_PKEY_BIT1 VM_HIGH_ARCH_1 /* on x86 and 5-bit value on ppc64 */ 336 # define VM_PKEY_BIT2 VM_HIGH_ARCH_2 337 # define VM_PKEY_BIT3 VM_HIGH_ARCH_3 338 #ifdef CONFIG_PPC 339 # define VM_PKEY_BIT4 VM_HIGH_ARCH_4 340 #else 341 # define VM_PKEY_BIT4 0 342 #endif 343 #endif /* CONFIG_ARCH_HAS_PKEYS */ 344 345 #if defined(CONFIG_X86) 346 # define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */ 347 #elif defined(CONFIG_PPC) 348 # define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */ 349 #elif defined(CONFIG_PARISC) 350 # define VM_GROWSUP VM_ARCH_1 351 #elif defined(CONFIG_IA64) 352 # define VM_GROWSUP VM_ARCH_1 353 #elif defined(CONFIG_SPARC64) 354 # define VM_SPARC_ADI VM_ARCH_1 /* Uses ADI tag for access control */ 355 # define VM_ARCH_CLEAR VM_SPARC_ADI 356 #elif defined(CONFIG_ARM64) 357 # define VM_ARM64_BTI VM_ARCH_1 /* BTI guarded page, a.k.a. GP bit */ 358 # define VM_ARCH_CLEAR VM_ARM64_BTI 359 #elif !defined(CONFIG_MMU) 360 # define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */ 361 #endif 362 363 #if defined(CONFIG_ARM64_MTE) 364 # define VM_MTE VM_HIGH_ARCH_0 /* Use Tagged memory for access control */ 365 # define VM_MTE_ALLOWED VM_HIGH_ARCH_1 /* Tagged memory permitted */ 366 #else 367 # define VM_MTE VM_NONE 368 # define VM_MTE_ALLOWED VM_NONE 369 #endif 370 371 #ifndef VM_GROWSUP 372 # define VM_GROWSUP VM_NONE 373 #endif 374 375 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR 376 # define VM_UFFD_MINOR_BIT 37 377 # define VM_UFFD_MINOR BIT(VM_UFFD_MINOR_BIT) /* UFFD minor faults */ 378 #else /* !CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */ 379 # define VM_UFFD_MINOR VM_NONE 380 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */ 381 382 /* Bits set in the VMA until the stack is in its final location */ 383 #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ) 384 385 #define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) 386 387 /* Common data flag combinations */ 388 #define VM_DATA_FLAGS_TSK_EXEC (VM_READ | VM_WRITE | TASK_EXEC | \ 389 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 390 #define VM_DATA_FLAGS_NON_EXEC (VM_READ | VM_WRITE | VM_MAYREAD | \ 391 VM_MAYWRITE | VM_MAYEXEC) 392 #define VM_DATA_FLAGS_EXEC (VM_READ | VM_WRITE | VM_EXEC | \ 393 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 394 395 #ifndef VM_DATA_DEFAULT_FLAGS /* arch can override this */ 396 #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_EXEC 397 #endif 398 399 #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ 400 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS 401 #endif 402 403 #ifdef CONFIG_STACK_GROWSUP 404 #define VM_STACK VM_GROWSUP 405 #else 406 #define VM_STACK VM_GROWSDOWN 407 #endif 408 409 #define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT) 410 411 /* VMA basic access permission flags */ 412 #define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC) 413 414 415 /* 416 * Special vmas that are non-mergable, non-mlock()able. 417 */ 418 #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP) 419 420 /* This mask prevents VMA from being scanned with khugepaged */ 421 #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB) 422 423 /* This mask defines which mm->def_flags a process can inherit its parent */ 424 #define VM_INIT_DEF_MASK VM_NOHUGEPAGE 425 426 /* This mask is used to clear all the VMA flags used by mlock */ 427 #define VM_LOCKED_CLEAR_MASK (~(VM_LOCKED | VM_LOCKONFAULT)) 428 429 /* Arch-specific flags to clear when updating VM flags on protection change */ 430 #ifndef VM_ARCH_CLEAR 431 # define VM_ARCH_CLEAR VM_NONE 432 #endif 433 #define VM_FLAGS_CLEAR (ARCH_VM_PKEY_FLAGS | VM_ARCH_CLEAR) 434 435 /* 436 * mapping from the currently active vm_flags protection bits (the 437 * low four bits) to a page protection mask.. 438 */ 439 extern pgprot_t protection_map[16]; 440 441 /** 442 * enum fault_flag - Fault flag definitions. 443 * @FAULT_FLAG_WRITE: Fault was a write fault. 444 * @FAULT_FLAG_MKWRITE: Fault was mkwrite of existing PTE. 445 * @FAULT_FLAG_ALLOW_RETRY: Allow to retry the fault if blocked. 446 * @FAULT_FLAG_RETRY_NOWAIT: Don't drop mmap_lock and wait when retrying. 447 * @FAULT_FLAG_KILLABLE: The fault task is in SIGKILL killable region. 448 * @FAULT_FLAG_TRIED: The fault has been tried once. 449 * @FAULT_FLAG_USER: The fault originated in userspace. 450 * @FAULT_FLAG_REMOTE: The fault is not for current task/mm. 451 * @FAULT_FLAG_INSTRUCTION: The fault was during an instruction fetch. 452 * @FAULT_FLAG_INTERRUPTIBLE: The fault can be interrupted by non-fatal signals. 453 * 454 * About @FAULT_FLAG_ALLOW_RETRY and @FAULT_FLAG_TRIED: we can specify 455 * whether we would allow page faults to retry by specifying these two 456 * fault flags correctly. Currently there can be three legal combinations: 457 * 458 * (a) ALLOW_RETRY and !TRIED: this means the page fault allows retry, and 459 * this is the first try 460 * 461 * (b) ALLOW_RETRY and TRIED: this means the page fault allows retry, and 462 * we've already tried at least once 463 * 464 * (c) !ALLOW_RETRY and !TRIED: this means the page fault does not allow retry 465 * 466 * The unlisted combination (!ALLOW_RETRY && TRIED) is illegal and should never 467 * be used. Note that page faults can be allowed to retry for multiple times, 468 * in which case we'll have an initial fault with flags (a) then later on 469 * continuous faults with flags (b). We should always try to detect pending 470 * signals before a retry to make sure the continuous page faults can still be 471 * interrupted if necessary. 472 */ 473 enum fault_flag { 474 FAULT_FLAG_WRITE = 1 << 0, 475 FAULT_FLAG_MKWRITE = 1 << 1, 476 FAULT_FLAG_ALLOW_RETRY = 1 << 2, 477 FAULT_FLAG_RETRY_NOWAIT = 1 << 3, 478 FAULT_FLAG_KILLABLE = 1 << 4, 479 FAULT_FLAG_TRIED = 1 << 5, 480 FAULT_FLAG_USER = 1 << 6, 481 FAULT_FLAG_REMOTE = 1 << 7, 482 FAULT_FLAG_INSTRUCTION = 1 << 8, 483 FAULT_FLAG_INTERRUPTIBLE = 1 << 9, 484 }; 485 486 /* 487 * The default fault flags that should be used by most of the 488 * arch-specific page fault handlers. 489 */ 490 #define FAULT_FLAG_DEFAULT (FAULT_FLAG_ALLOW_RETRY | \ 491 FAULT_FLAG_KILLABLE | \ 492 FAULT_FLAG_INTERRUPTIBLE) 493 494 /** 495 * fault_flag_allow_retry_first - check ALLOW_RETRY the first time 496 * @flags: Fault flags. 497 * 498 * This is mostly used for places where we want to try to avoid taking 499 * the mmap_lock for too long a time when waiting for another condition 500 * to change, in which case we can try to be polite to release the 501 * mmap_lock in the first round to avoid potential starvation of other 502 * processes that would also want the mmap_lock. 503 * 504 * Return: true if the page fault allows retry and this is the first 505 * attempt of the fault handling; false otherwise. 506 */ 507 static inline bool fault_flag_allow_retry_first(enum fault_flag flags) 508 { 509 return (flags & FAULT_FLAG_ALLOW_RETRY) && 510 (!(flags & FAULT_FLAG_TRIED)); 511 } 512 513 #define FAULT_FLAG_TRACE \ 514 { FAULT_FLAG_WRITE, "WRITE" }, \ 515 { FAULT_FLAG_MKWRITE, "MKWRITE" }, \ 516 { FAULT_FLAG_ALLOW_RETRY, "ALLOW_RETRY" }, \ 517 { FAULT_FLAG_RETRY_NOWAIT, "RETRY_NOWAIT" }, \ 518 { FAULT_FLAG_KILLABLE, "KILLABLE" }, \ 519 { FAULT_FLAG_TRIED, "TRIED" }, \ 520 { FAULT_FLAG_USER, "USER" }, \ 521 { FAULT_FLAG_REMOTE, "REMOTE" }, \ 522 { FAULT_FLAG_INSTRUCTION, "INSTRUCTION" }, \ 523 { FAULT_FLAG_INTERRUPTIBLE, "INTERRUPTIBLE" } 524 525 /* 526 * vm_fault is filled by the pagefault handler and passed to the vma's 527 * ->fault function. The vma's ->fault is responsible for returning a bitmask 528 * of VM_FAULT_xxx flags that give details about how the fault was handled. 529 * 530 * MM layer fills up gfp_mask for page allocations but fault handler might 531 * alter it if its implementation requires a different allocation context. 532 * 533 * pgoff should be used in favour of virtual_address, if possible. 534 */ 535 struct vm_fault { 536 const struct { 537 struct vm_area_struct *vma; /* Target VMA */ 538 gfp_t gfp_mask; /* gfp mask to be used for allocations */ 539 pgoff_t pgoff; /* Logical page offset based on vma */ 540 unsigned long address; /* Faulting virtual address */ 541 }; 542 enum fault_flag flags; /* FAULT_FLAG_xxx flags 543 * XXX: should really be 'const' */ 544 pmd_t *pmd; /* Pointer to pmd entry matching 545 * the 'address' */ 546 pud_t *pud; /* Pointer to pud entry matching 547 * the 'address' 548 */ 549 pte_t orig_pte; /* Value of PTE at the time of fault */ 550 551 struct page *cow_page; /* Page handler may use for COW fault */ 552 struct page *page; /* ->fault handlers should return a 553 * page here, unless VM_FAULT_NOPAGE 554 * is set (which is also implied by 555 * VM_FAULT_ERROR). 556 */ 557 /* These three entries are valid only while holding ptl lock */ 558 pte_t *pte; /* Pointer to pte entry matching 559 * the 'address'. NULL if the page 560 * table hasn't been allocated. 561 */ 562 spinlock_t *ptl; /* Page table lock. 563 * Protects pte page table if 'pte' 564 * is not NULL, otherwise pmd. 565 */ 566 pgtable_t prealloc_pte; /* Pre-allocated pte page table. 567 * vm_ops->map_pages() sets up a page 568 * table from atomic context. 569 * do_fault_around() pre-allocates 570 * page table to avoid allocation from 571 * atomic context. 572 */ 573 }; 574 575 /* page entry size for vm->huge_fault() */ 576 enum page_entry_size { 577 PE_SIZE_PTE = 0, 578 PE_SIZE_PMD, 579 PE_SIZE_PUD, 580 }; 581 582 /* 583 * These are the virtual MM functions - opening of an area, closing and 584 * unmapping it (needed to keep files on disk up-to-date etc), pointer 585 * to the functions called when a no-page or a wp-page exception occurs. 586 */ 587 struct vm_operations_struct { 588 void (*open)(struct vm_area_struct * area); 589 void (*close)(struct vm_area_struct * area); 590 /* Called any time before splitting to check if it's allowed */ 591 int (*may_split)(struct vm_area_struct *area, unsigned long addr); 592 int (*mremap)(struct vm_area_struct *area); 593 /* 594 * Called by mprotect() to make driver-specific permission 595 * checks before mprotect() is finalised. The VMA must not 596 * be modified. Returns 0 if eprotect() can proceed. 597 */ 598 int (*mprotect)(struct vm_area_struct *vma, unsigned long start, 599 unsigned long end, unsigned long newflags); 600 vm_fault_t (*fault)(struct vm_fault *vmf); 601 vm_fault_t (*huge_fault)(struct vm_fault *vmf, 602 enum page_entry_size pe_size); 603 vm_fault_t (*map_pages)(struct vm_fault *vmf, 604 pgoff_t start_pgoff, pgoff_t end_pgoff); 605 unsigned long (*pagesize)(struct vm_area_struct * area); 606 607 /* notification that a previously read-only page is about to become 608 * writable, if an error is returned it will cause a SIGBUS */ 609 vm_fault_t (*page_mkwrite)(struct vm_fault *vmf); 610 611 /* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */ 612 vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf); 613 614 /* called by access_process_vm when get_user_pages() fails, typically 615 * for use by special VMAs. See also generic_access_phys() for a generic 616 * implementation useful for any iomem mapping. 617 */ 618 int (*access)(struct vm_area_struct *vma, unsigned long addr, 619 void *buf, int len, int write); 620 621 /* Called by the /proc/PID/maps code to ask the vma whether it 622 * has a special name. Returning non-NULL will also cause this 623 * vma to be dumped unconditionally. */ 624 const char *(*name)(struct vm_area_struct *vma); 625 626 #ifdef CONFIG_NUMA 627 /* 628 * set_policy() op must add a reference to any non-NULL @new mempolicy 629 * to hold the policy upon return. Caller should pass NULL @new to 630 * remove a policy and fall back to surrounding context--i.e. do not 631 * install a MPOL_DEFAULT policy, nor the task or system default 632 * mempolicy. 633 */ 634 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); 635 636 /* 637 * get_policy() op must add reference [mpol_get()] to any policy at 638 * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure 639 * in mm/mempolicy.c will do this automatically. 640 * get_policy() must NOT add a ref if the policy at (vma,addr) is not 641 * marked as MPOL_SHARED. vma policies are protected by the mmap_lock. 642 * If no [shared/vma] mempolicy exists at the addr, get_policy() op 643 * must return NULL--i.e., do not "fallback" to task or system default 644 * policy. 645 */ 646 struct mempolicy *(*get_policy)(struct vm_area_struct *vma, 647 unsigned long addr); 648 #endif 649 /* 650 * Called by vm_normal_page() for special PTEs to find the 651 * page for @addr. This is useful if the default behavior 652 * (using pte_page()) would not find the correct page. 653 */ 654 struct page *(*find_special_page)(struct vm_area_struct *vma, 655 unsigned long addr); 656 }; 657 658 static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) 659 { 660 static const struct vm_operations_struct dummy_vm_ops = {}; 661 662 memset(vma, 0, sizeof(*vma)); 663 vma->vm_mm = mm; 664 vma->vm_ops = &dummy_vm_ops; 665 INIT_LIST_HEAD(&vma->anon_vma_chain); 666 } 667 668 static inline void vma_set_anonymous(struct vm_area_struct *vma) 669 { 670 vma->vm_ops = NULL; 671 } 672 673 static inline bool vma_is_anonymous(struct vm_area_struct *vma) 674 { 675 return !vma->vm_ops; 676 } 677 678 static inline bool vma_is_temporary_stack(struct vm_area_struct *vma) 679 { 680 int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP); 681 682 if (!maybe_stack) 683 return false; 684 685 if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) == 686 VM_STACK_INCOMPLETE_SETUP) 687 return true; 688 689 return false; 690 } 691 692 static inline bool vma_is_foreign(struct vm_area_struct *vma) 693 { 694 if (!current->mm) 695 return true; 696 697 if (current->mm != vma->vm_mm) 698 return true; 699 700 return false; 701 } 702 703 static inline bool vma_is_accessible(struct vm_area_struct *vma) 704 { 705 return vma->vm_flags & VM_ACCESS_FLAGS; 706 } 707 708 #ifdef CONFIG_SHMEM 709 /* 710 * The vma_is_shmem is not inline because it is used only by slow 711 * paths in userfault. 712 */ 713 bool vma_is_shmem(struct vm_area_struct *vma); 714 #else 715 static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; } 716 #endif 717 718 int vma_is_stack_for_current(struct vm_area_struct *vma); 719 720 /* flush_tlb_range() takes a vma, not a mm, and can care about flags */ 721 #define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) } 722 723 struct mmu_gather; 724 struct inode; 725 726 #include <linux/huge_mm.h> 727 728 /* 729 * Methods to modify the page usage count. 730 * 731 * What counts for a page usage: 732 * - cache mapping (page->mapping) 733 * - private data (page->private) 734 * - page mapped in a task's page tables, each mapping 735 * is counted separately 736 * 737 * Also, many kernel routines increase the page count before a critical 738 * routine so they can be sure the page doesn't go away from under them. 739 */ 740 741 /* 742 * Drop a ref, return true if the refcount fell to zero (the page has no users) 743 */ 744 static inline int put_page_testzero(struct page *page) 745 { 746 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); 747 return page_ref_dec_and_test(page); 748 } 749 750 /* 751 * Try to grab a ref unless the page has a refcount of zero, return false if 752 * that is the case. 753 * This can be called when MMU is off so it must not access 754 * any of the virtual mappings. 755 */ 756 static inline int get_page_unless_zero(struct page *page) 757 { 758 return page_ref_add_unless(page, 1, 0); 759 } 760 761 extern int page_is_ram(unsigned long pfn); 762 763 enum { 764 REGION_INTERSECTS, 765 REGION_DISJOINT, 766 REGION_MIXED, 767 }; 768 769 int region_intersects(resource_size_t offset, size_t size, unsigned long flags, 770 unsigned long desc); 771 772 /* Support for virtually mapped pages */ 773 struct page *vmalloc_to_page(const void *addr); 774 unsigned long vmalloc_to_pfn(const void *addr); 775 776 /* 777 * Determine if an address is within the vmalloc range 778 * 779 * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there 780 * is no special casing required. 781 */ 782 783 #ifndef is_ioremap_addr 784 #define is_ioremap_addr(x) is_vmalloc_addr(x) 785 #endif 786 787 #ifdef CONFIG_MMU 788 extern bool is_vmalloc_addr(const void *x); 789 extern int is_vmalloc_or_module_addr(const void *x); 790 #else 791 static inline bool is_vmalloc_addr(const void *x) 792 { 793 return false; 794 } 795 static inline int is_vmalloc_or_module_addr(const void *x) 796 { 797 return 0; 798 } 799 #endif 800 801 extern void *kvmalloc_node(size_t size, gfp_t flags, int node); 802 static inline void *kvmalloc(size_t size, gfp_t flags) 803 { 804 return kvmalloc_node(size, flags, NUMA_NO_NODE); 805 } 806 static inline void *kvzalloc_node(size_t size, gfp_t flags, int node) 807 { 808 return kvmalloc_node(size, flags | __GFP_ZERO, node); 809 } 810 static inline void *kvzalloc(size_t size, gfp_t flags) 811 { 812 return kvmalloc(size, flags | __GFP_ZERO); 813 } 814 815 static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags) 816 { 817 size_t bytes; 818 819 if (unlikely(check_mul_overflow(n, size, &bytes))) 820 return NULL; 821 822 return kvmalloc(bytes, flags); 823 } 824 825 static inline void *kvcalloc(size_t n, size_t size, gfp_t flags) 826 { 827 return kvmalloc_array(n, size, flags | __GFP_ZERO); 828 } 829 830 extern void kvfree(const void *addr); 831 extern void kvfree_sensitive(const void *addr, size_t len); 832 833 static inline int head_compound_mapcount(struct page *head) 834 { 835 return atomic_read(compound_mapcount_ptr(head)) + 1; 836 } 837 838 /* 839 * Mapcount of compound page as a whole, does not include mapped sub-pages. 840 * 841 * Must be called only for compound pages or any their tail sub-pages. 842 */ 843 static inline int compound_mapcount(struct page *page) 844 { 845 VM_BUG_ON_PAGE(!PageCompound(page), page); 846 page = compound_head(page); 847 return head_compound_mapcount(page); 848 } 849 850 /* 851 * The atomic page->_mapcount, starts from -1: so that transitions 852 * both from it and to it can be tracked, using atomic_inc_and_test 853 * and atomic_add_negative(-1). 854 */ 855 static inline void page_mapcount_reset(struct page *page) 856 { 857 atomic_set(&(page)->_mapcount, -1); 858 } 859 860 int __page_mapcount(struct page *page); 861 862 /* 863 * Mapcount of 0-order page; when compound sub-page, includes 864 * compound_mapcount(). 865 * 866 * Result is undefined for pages which cannot be mapped into userspace. 867 * For example SLAB or special types of pages. See function page_has_type(). 868 * They use this place in struct page differently. 869 */ 870 static inline int page_mapcount(struct page *page) 871 { 872 if (unlikely(PageCompound(page))) 873 return __page_mapcount(page); 874 return atomic_read(&page->_mapcount) + 1; 875 } 876 877 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 878 int total_mapcount(struct page *page); 879 int page_trans_huge_mapcount(struct page *page, int *total_mapcount); 880 #else 881 static inline int total_mapcount(struct page *page) 882 { 883 return page_mapcount(page); 884 } 885 static inline int page_trans_huge_mapcount(struct page *page, 886 int *total_mapcount) 887 { 888 int mapcount = page_mapcount(page); 889 if (total_mapcount) 890 *total_mapcount = mapcount; 891 return mapcount; 892 } 893 #endif 894 895 static inline struct page *virt_to_head_page(const void *x) 896 { 897 struct page *page = virt_to_page(x); 898 899 return compound_head(page); 900 } 901 902 void __put_page(struct page *page); 903 904 void put_pages_list(struct list_head *pages); 905 906 void split_page(struct page *page, unsigned int order); 907 908 /* 909 * Compound pages have a destructor function. Provide a 910 * prototype for that function and accessor functions. 911 * These are _only_ valid on the head of a compound page. 912 */ 913 typedef void compound_page_dtor(struct page *); 914 915 /* Keep the enum in sync with compound_page_dtors array in mm/page_alloc.c */ 916 enum compound_dtor_id { 917 NULL_COMPOUND_DTOR, 918 COMPOUND_PAGE_DTOR, 919 #ifdef CONFIG_HUGETLB_PAGE 920 HUGETLB_PAGE_DTOR, 921 #endif 922 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 923 TRANSHUGE_PAGE_DTOR, 924 #endif 925 NR_COMPOUND_DTORS, 926 }; 927 extern compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS]; 928 929 static inline void set_compound_page_dtor(struct page *page, 930 enum compound_dtor_id compound_dtor) 931 { 932 VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page); 933 page[1].compound_dtor = compound_dtor; 934 } 935 936 static inline void destroy_compound_page(struct page *page) 937 { 938 VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page); 939 compound_page_dtors[page[1].compound_dtor](page); 940 } 941 942 static inline unsigned int compound_order(struct page *page) 943 { 944 if (!PageHead(page)) 945 return 0; 946 return page[1].compound_order; 947 } 948 949 static inline bool hpage_pincount_available(struct page *page) 950 { 951 /* 952 * Can the page->hpage_pinned_refcount field be used? That field is in 953 * the 3rd page of the compound page, so the smallest (2-page) compound 954 * pages cannot support it. 955 */ 956 page = compound_head(page); 957 return PageCompound(page) && compound_order(page) > 1; 958 } 959 960 static inline int head_compound_pincount(struct page *head) 961 { 962 return atomic_read(compound_pincount_ptr(head)); 963 } 964 965 static inline int compound_pincount(struct page *page) 966 { 967 VM_BUG_ON_PAGE(!hpage_pincount_available(page), page); 968 page = compound_head(page); 969 return head_compound_pincount(page); 970 } 971 972 static inline void set_compound_order(struct page *page, unsigned int order) 973 { 974 page[1].compound_order = order; 975 page[1].compound_nr = 1U << order; 976 } 977 978 /* Returns the number of pages in this potentially compound page. */ 979 static inline unsigned long compound_nr(struct page *page) 980 { 981 if (!PageHead(page)) 982 return 1; 983 return page[1].compound_nr; 984 } 985 986 /* Returns the number of bytes in this potentially compound page. */ 987 static inline unsigned long page_size(struct page *page) 988 { 989 return PAGE_SIZE << compound_order(page); 990 } 991 992 /* Returns the number of bits needed for the number of bytes in a page */ 993 static inline unsigned int page_shift(struct page *page) 994 { 995 return PAGE_SHIFT + compound_order(page); 996 } 997 998 void free_compound_page(struct page *page); 999 1000 #ifdef CONFIG_MMU 1001 /* 1002 * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when 1003 * servicing faults for write access. In the normal case, do always want 1004 * pte_mkwrite. But get_user_pages can cause write faults for mappings 1005 * that do not have writing enabled, when used by access_process_vm. 1006 */ 1007 static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) 1008 { 1009 if (likely(vma->vm_flags & VM_WRITE)) 1010 pte = pte_mkwrite(pte); 1011 return pte; 1012 } 1013 1014 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page); 1015 void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr); 1016 1017 vm_fault_t finish_fault(struct vm_fault *vmf); 1018 vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf); 1019 #endif 1020 1021 /* 1022 * Multiple processes may "see" the same page. E.g. for untouched 1023 * mappings of /dev/null, all processes see the same page full of 1024 * zeroes, and text pages of executables and shared libraries have 1025 * only one copy in memory, at most, normally. 1026 * 1027 * For the non-reserved pages, page_count(page) denotes a reference count. 1028 * page_count() == 0 means the page is free. page->lru is then used for 1029 * freelist management in the buddy allocator. 1030 * page_count() > 0 means the page has been allocated. 1031 * 1032 * Pages are allocated by the slab allocator in order to provide memory 1033 * to kmalloc and kmem_cache_alloc. In this case, the management of the 1034 * page, and the fields in 'struct page' are the responsibility of mm/slab.c 1035 * unless a particular usage is carefully commented. (the responsibility of 1036 * freeing the kmalloc memory is the caller's, of course). 1037 * 1038 * A page may be used by anyone else who does a __get_free_page(). 1039 * In this case, page_count still tracks the references, and should only 1040 * be used through the normal accessor functions. The top bits of page->flags 1041 * and page->virtual store page management information, but all other fields 1042 * are unused and could be used privately, carefully. The management of this 1043 * page is the responsibility of the one who allocated it, and those who have 1044 * subsequently been given references to it. 1045 * 1046 * The other pages (we may call them "pagecache pages") are completely 1047 * managed by the Linux memory manager: I/O, buffers, swapping etc. 1048 * The following discussion applies only to them. 1049 * 1050 * A pagecache page contains an opaque `private' member, which belongs to the 1051 * page's address_space. Usually, this is the address of a circular list of 1052 * the page's disk buffers. PG_private must be set to tell the VM to call 1053 * into the filesystem to release these pages. 1054 * 1055 * A page may belong to an inode's memory mapping. In this case, page->mapping 1056 * is the pointer to the inode, and page->index is the file offset of the page, 1057 * in units of PAGE_SIZE. 1058 * 1059 * If pagecache pages are not associated with an inode, they are said to be 1060 * anonymous pages. These may become associated with the swapcache, and in that 1061 * case PG_swapcache is set, and page->private is an offset into the swapcache. 1062 * 1063 * In either case (swapcache or inode backed), the pagecache itself holds one 1064 * reference to the page. Setting PG_private should also increment the 1065 * refcount. The each user mapping also has a reference to the page. 1066 * 1067 * The pagecache pages are stored in a per-mapping radix tree, which is 1068 * rooted at mapping->i_pages, and indexed by offset. 1069 * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space 1070 * lists, we instead now tag pages as dirty/writeback in the radix tree. 1071 * 1072 * All pagecache pages may be subject to I/O: 1073 * - inode pages may need to be read from disk, 1074 * - inode pages which have been modified and are MAP_SHARED may need 1075 * to be written back to the inode on disk, 1076 * - anonymous pages (including MAP_PRIVATE file mappings) which have been 1077 * modified may need to be swapped out to swap space and (later) to be read 1078 * back into memory. 1079 */ 1080 1081 /* 1082 * The zone field is never updated after free_area_init_core() 1083 * sets it, so none of the operations on it need to be atomic. 1084 */ 1085 1086 /* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */ 1087 #define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH) 1088 #define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH) 1089 #define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH) 1090 #define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH) 1091 #define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH) 1092 1093 /* 1094 * Define the bit shifts to access each section. For non-existent 1095 * sections we define the shift as 0; that plus a 0 mask ensures 1096 * the compiler will optimise away reference to them. 1097 */ 1098 #define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0)) 1099 #define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) 1100 #define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) 1101 #define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0)) 1102 #define KASAN_TAG_PGSHIFT (KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0)) 1103 1104 /* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */ 1105 #ifdef NODE_NOT_IN_PAGE_FLAGS 1106 #define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT) 1107 #define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \ 1108 SECTIONS_PGOFF : ZONES_PGOFF) 1109 #else 1110 #define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT) 1111 #define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \ 1112 NODES_PGOFF : ZONES_PGOFF) 1113 #endif 1114 1115 #define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0)) 1116 1117 #define ZONES_MASK ((1UL << ZONES_WIDTH) - 1) 1118 #define NODES_MASK ((1UL << NODES_WIDTH) - 1) 1119 #define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) 1120 #define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1) 1121 #define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1) 1122 #define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1) 1123 1124 static inline enum zone_type page_zonenum(const struct page *page) 1125 { 1126 ASSERT_EXCLUSIVE_BITS(page->flags, ZONES_MASK << ZONES_PGSHIFT); 1127 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; 1128 } 1129 1130 #ifdef CONFIG_ZONE_DEVICE 1131 static inline bool is_zone_device_page(const struct page *page) 1132 { 1133 return page_zonenum(page) == ZONE_DEVICE; 1134 } 1135 extern void memmap_init_zone_device(struct zone *, unsigned long, 1136 unsigned long, struct dev_pagemap *); 1137 #else 1138 static inline bool is_zone_device_page(const struct page *page) 1139 { 1140 return false; 1141 } 1142 #endif 1143 1144 static inline bool is_zone_movable_page(const struct page *page) 1145 { 1146 return page_zonenum(page) == ZONE_MOVABLE; 1147 } 1148 1149 #ifdef CONFIG_DEV_PAGEMAP_OPS 1150 void free_devmap_managed_page(struct page *page); 1151 DECLARE_STATIC_KEY_FALSE(devmap_managed_key); 1152 1153 static inline bool page_is_devmap_managed(struct page *page) 1154 { 1155 if (!static_branch_unlikely(&devmap_managed_key)) 1156 return false; 1157 if (!is_zone_device_page(page)) 1158 return false; 1159 switch (page->pgmap->type) { 1160 case MEMORY_DEVICE_PRIVATE: 1161 case MEMORY_DEVICE_FS_DAX: 1162 return true; 1163 default: 1164 break; 1165 } 1166 return false; 1167 } 1168 1169 void put_devmap_managed_page(struct page *page); 1170 1171 #else /* CONFIG_DEV_PAGEMAP_OPS */ 1172 static inline bool page_is_devmap_managed(struct page *page) 1173 { 1174 return false; 1175 } 1176 1177 static inline void put_devmap_managed_page(struct page *page) 1178 { 1179 } 1180 #endif /* CONFIG_DEV_PAGEMAP_OPS */ 1181 1182 static inline bool is_device_private_page(const struct page *page) 1183 { 1184 return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) && 1185 IS_ENABLED(CONFIG_DEVICE_PRIVATE) && 1186 is_zone_device_page(page) && 1187 page->pgmap->type == MEMORY_DEVICE_PRIVATE; 1188 } 1189 1190 static inline bool is_pci_p2pdma_page(const struct page *page) 1191 { 1192 return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) && 1193 IS_ENABLED(CONFIG_PCI_P2PDMA) && 1194 is_zone_device_page(page) && 1195 page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA; 1196 } 1197 1198 /* 127: arbitrary random number, small enough to assemble well */ 1199 #define page_ref_zero_or_close_to_overflow(page) \ 1200 ((unsigned int) page_ref_count(page) + 127u <= 127u) 1201 1202 static inline void get_page(struct page *page) 1203 { 1204 page = compound_head(page); 1205 /* 1206 * Getting a normal page or the head of a compound page 1207 * requires to already have an elevated page->_refcount. 1208 */ 1209 VM_BUG_ON_PAGE(page_ref_zero_or_close_to_overflow(page), page); 1210 page_ref_inc(page); 1211 } 1212 1213 bool __must_check try_grab_page(struct page *page, unsigned int flags); 1214 __maybe_unused struct page *try_grab_compound_head(struct page *page, int refs, 1215 unsigned int flags); 1216 1217 1218 static inline __must_check bool try_get_page(struct page *page) 1219 { 1220 page = compound_head(page); 1221 if (WARN_ON_ONCE(page_ref_count(page) <= 0)) 1222 return false; 1223 page_ref_inc(page); 1224 return true; 1225 } 1226 1227 static inline void put_page(struct page *page) 1228 { 1229 page = compound_head(page); 1230 1231 /* 1232 * For devmap managed pages we need to catch refcount transition from 1233 * 2 to 1, when refcount reach one it means the page is free and we 1234 * need to inform the device driver through callback. See 1235 * include/linux/memremap.h and HMM for details. 1236 */ 1237 if (page_is_devmap_managed(page)) { 1238 put_devmap_managed_page(page); 1239 return; 1240 } 1241 1242 if (put_page_testzero(page)) 1243 __put_page(page); 1244 } 1245 1246 /* 1247 * GUP_PIN_COUNTING_BIAS, and the associated functions that use it, overload 1248 * the page's refcount so that two separate items are tracked: the original page 1249 * reference count, and also a new count of how many pin_user_pages() calls were 1250 * made against the page. ("gup-pinned" is another term for the latter). 1251 * 1252 * With this scheme, pin_user_pages() becomes special: such pages are marked as 1253 * distinct from normal pages. As such, the unpin_user_page() call (and its 1254 * variants) must be used in order to release gup-pinned pages. 1255 * 1256 * Choice of value: 1257 * 1258 * By making GUP_PIN_COUNTING_BIAS a power of two, debugging of page reference 1259 * counts with respect to pin_user_pages() and unpin_user_page() becomes 1260 * simpler, due to the fact that adding an even power of two to the page 1261 * refcount has the effect of using only the upper N bits, for the code that 1262 * counts up using the bias value. This means that the lower bits are left for 1263 * the exclusive use of the original code that increments and decrements by one 1264 * (or at least, by much smaller values than the bias value). 1265 * 1266 * Of course, once the lower bits overflow into the upper bits (and this is 1267 * OK, because subtraction recovers the original values), then visual inspection 1268 * no longer suffices to directly view the separate counts. However, for normal 1269 * applications that don't have huge page reference counts, this won't be an 1270 * issue. 1271 * 1272 * Locking: the lockless algorithm described in page_cache_get_speculative() 1273 * and page_cache_gup_pin_speculative() provides safe operation for 1274 * get_user_pages and page_mkclean and other calls that race to set up page 1275 * table entries. 1276 */ 1277 #define GUP_PIN_COUNTING_BIAS (1U << 10) 1278 1279 void unpin_user_page(struct page *page); 1280 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, 1281 bool make_dirty); 1282 void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages, 1283 bool make_dirty); 1284 void unpin_user_pages(struct page **pages, unsigned long npages); 1285 1286 /** 1287 * page_maybe_dma_pinned - Report if a page is pinned for DMA. 1288 * @page: The page. 1289 * 1290 * This function checks if a page has been pinned via a call to 1291 * a function in the pin_user_pages() family. 1292 * 1293 * For non-huge pages, the return value is partially fuzzy: false is not fuzzy, 1294 * because it means "definitely not pinned for DMA", but true means "probably 1295 * pinned for DMA, but possibly a false positive due to having at least 1296 * GUP_PIN_COUNTING_BIAS worth of normal page references". 1297 * 1298 * False positives are OK, because: a) it's unlikely for a page to get that many 1299 * refcounts, and b) all the callers of this routine are expected to be able to 1300 * deal gracefully with a false positive. 1301 * 1302 * For huge pages, the result will be exactly correct. That's because we have 1303 * more tracking data available: the 3rd struct page in the compound page is 1304 * used to track the pincount (instead using of the GUP_PIN_COUNTING_BIAS 1305 * scheme). 1306 * 1307 * For more information, please see Documentation/core-api/pin_user_pages.rst. 1308 * 1309 * Return: True, if it is likely that the page has been "dma-pinned". 1310 * False, if the page is definitely not dma-pinned. 1311 */ 1312 static inline bool page_maybe_dma_pinned(struct page *page) 1313 { 1314 if (hpage_pincount_available(page)) 1315 return compound_pincount(page) > 0; 1316 1317 /* 1318 * page_ref_count() is signed. If that refcount overflows, then 1319 * page_ref_count() returns a negative value, and callers will avoid 1320 * further incrementing the refcount. 1321 * 1322 * Here, for that overflow case, use the signed bit to count a little 1323 * bit higher via unsigned math, and thus still get an accurate result. 1324 */ 1325 return ((unsigned int)page_ref_count(compound_head(page))) >= 1326 GUP_PIN_COUNTING_BIAS; 1327 } 1328 1329 static inline bool is_cow_mapping(vm_flags_t flags) 1330 { 1331 return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; 1332 } 1333 1334 /* 1335 * This should most likely only be called during fork() to see whether we 1336 * should break the cow immediately for a page on the src mm. 1337 */ 1338 static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma, 1339 struct page *page) 1340 { 1341 if (!is_cow_mapping(vma->vm_flags)) 1342 return false; 1343 1344 if (!atomic_read(&vma->vm_mm->has_pinned)) 1345 return false; 1346 1347 return page_maybe_dma_pinned(page); 1348 } 1349 1350 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) 1351 #define SECTION_IN_PAGE_FLAGS 1352 #endif 1353 1354 /* 1355 * The identification function is mainly used by the buddy allocator for 1356 * determining if two pages could be buddies. We are not really identifying 1357 * the zone since we could be using the section number id if we do not have 1358 * node id available in page flags. 1359 * We only guarantee that it will return the same value for two combinable 1360 * pages in a zone. 1361 */ 1362 static inline int page_zone_id(struct page *page) 1363 { 1364 return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK; 1365 } 1366 1367 #ifdef NODE_NOT_IN_PAGE_FLAGS 1368 extern int page_to_nid(const struct page *page); 1369 #else 1370 static inline int page_to_nid(const struct page *page) 1371 { 1372 struct page *p = (struct page *)page; 1373 1374 return (PF_POISONED_CHECK(p)->flags >> NODES_PGSHIFT) & NODES_MASK; 1375 } 1376 #endif 1377 1378 #ifdef CONFIG_NUMA_BALANCING 1379 static inline int cpu_pid_to_cpupid(int cpu, int pid) 1380 { 1381 return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK); 1382 } 1383 1384 static inline int cpupid_to_pid(int cpupid) 1385 { 1386 return cpupid & LAST__PID_MASK; 1387 } 1388 1389 static inline int cpupid_to_cpu(int cpupid) 1390 { 1391 return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK; 1392 } 1393 1394 static inline int cpupid_to_nid(int cpupid) 1395 { 1396 return cpu_to_node(cpupid_to_cpu(cpupid)); 1397 } 1398 1399 static inline bool cpupid_pid_unset(int cpupid) 1400 { 1401 return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK); 1402 } 1403 1404 static inline bool cpupid_cpu_unset(int cpupid) 1405 { 1406 return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK); 1407 } 1408 1409 static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid) 1410 { 1411 return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid); 1412 } 1413 1414 #define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid) 1415 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS 1416 static inline int page_cpupid_xchg_last(struct page *page, int cpupid) 1417 { 1418 return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK); 1419 } 1420 1421 static inline int page_cpupid_last(struct page *page) 1422 { 1423 return page->_last_cpupid; 1424 } 1425 static inline void page_cpupid_reset_last(struct page *page) 1426 { 1427 page->_last_cpupid = -1 & LAST_CPUPID_MASK; 1428 } 1429 #else 1430 static inline int page_cpupid_last(struct page *page) 1431 { 1432 return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK; 1433 } 1434 1435 extern int page_cpupid_xchg_last(struct page *page, int cpupid); 1436 1437 static inline void page_cpupid_reset_last(struct page *page) 1438 { 1439 page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT; 1440 } 1441 #endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */ 1442 #else /* !CONFIG_NUMA_BALANCING */ 1443 static inline int page_cpupid_xchg_last(struct page *page, int cpupid) 1444 { 1445 return page_to_nid(page); /* XXX */ 1446 } 1447 1448 static inline int page_cpupid_last(struct page *page) 1449 { 1450 return page_to_nid(page); /* XXX */ 1451 } 1452 1453 static inline int cpupid_to_nid(int cpupid) 1454 { 1455 return -1; 1456 } 1457 1458 static inline int cpupid_to_pid(int cpupid) 1459 { 1460 return -1; 1461 } 1462 1463 static inline int cpupid_to_cpu(int cpupid) 1464 { 1465 return -1; 1466 } 1467 1468 static inline int cpu_pid_to_cpupid(int nid, int pid) 1469 { 1470 return -1; 1471 } 1472 1473 static inline bool cpupid_pid_unset(int cpupid) 1474 { 1475 return true; 1476 } 1477 1478 static inline void page_cpupid_reset_last(struct page *page) 1479 { 1480 } 1481 1482 static inline bool cpupid_match_pid(struct task_struct *task, int cpupid) 1483 { 1484 return false; 1485 } 1486 #endif /* CONFIG_NUMA_BALANCING */ 1487 1488 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) 1489 1490 /* 1491 * KASAN per-page tags are stored xor'ed with 0xff. This allows to avoid 1492 * setting tags for all pages to native kernel tag value 0xff, as the default 1493 * value 0x00 maps to 0xff. 1494 */ 1495 1496 static inline u8 page_kasan_tag(const struct page *page) 1497 { 1498 u8 tag = 0xff; 1499 1500 if (kasan_enabled()) { 1501 tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK; 1502 tag ^= 0xff; 1503 } 1504 1505 return tag; 1506 } 1507 1508 static inline void page_kasan_tag_set(struct page *page, u8 tag) 1509 { 1510 if (kasan_enabled()) { 1511 tag ^= 0xff; 1512 page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT); 1513 page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT; 1514 } 1515 } 1516 1517 static inline void page_kasan_tag_reset(struct page *page) 1518 { 1519 if (kasan_enabled()) 1520 page_kasan_tag_set(page, 0xff); 1521 } 1522 1523 #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */ 1524 1525 static inline u8 page_kasan_tag(const struct page *page) 1526 { 1527 return 0xff; 1528 } 1529 1530 static inline void page_kasan_tag_set(struct page *page, u8 tag) { } 1531 static inline void page_kasan_tag_reset(struct page *page) { } 1532 1533 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */ 1534 1535 static inline struct zone *page_zone(const struct page *page) 1536 { 1537 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; 1538 } 1539 1540 static inline pg_data_t *page_pgdat(const struct page *page) 1541 { 1542 return NODE_DATA(page_to_nid(page)); 1543 } 1544 1545 #ifdef SECTION_IN_PAGE_FLAGS 1546 static inline void set_page_section(struct page *page, unsigned long section) 1547 { 1548 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT); 1549 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT; 1550 } 1551 1552 static inline unsigned long page_to_section(const struct page *page) 1553 { 1554 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; 1555 } 1556 #endif 1557 1558 /* MIGRATE_CMA and ZONE_MOVABLE do not allow pin pages */ 1559 #ifdef CONFIG_MIGRATION 1560 static inline bool is_pinnable_page(struct page *page) 1561 { 1562 return !(is_zone_movable_page(page) || is_migrate_cma_page(page)) || 1563 is_zero_pfn(page_to_pfn(page)); 1564 } 1565 #else 1566 static inline bool is_pinnable_page(struct page *page) 1567 { 1568 return true; 1569 } 1570 #endif 1571 1572 static inline void set_page_zone(struct page *page, enum zone_type zone) 1573 { 1574 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT); 1575 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT; 1576 } 1577 1578 static inline void set_page_node(struct page *page, unsigned long node) 1579 { 1580 page->flags &= ~(NODES_MASK << NODES_PGSHIFT); 1581 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT; 1582 } 1583 1584 static inline void set_page_links(struct page *page, enum zone_type zone, 1585 unsigned long node, unsigned long pfn) 1586 { 1587 set_page_zone(page, zone); 1588 set_page_node(page, node); 1589 #ifdef SECTION_IN_PAGE_FLAGS 1590 set_page_section(page, pfn_to_section_nr(pfn)); 1591 #endif 1592 } 1593 1594 /* 1595 * Some inline functions in vmstat.h depend on page_zone() 1596 */ 1597 #include <linux/vmstat.h> 1598 1599 static __always_inline void *lowmem_page_address(const struct page *page) 1600 { 1601 return page_to_virt(page); 1602 } 1603 1604 #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) 1605 #define HASHED_PAGE_VIRTUAL 1606 #endif 1607 1608 #if defined(WANT_PAGE_VIRTUAL) 1609 static inline void *page_address(const struct page *page) 1610 { 1611 return page->virtual; 1612 } 1613 static inline void set_page_address(struct page *page, void *address) 1614 { 1615 page->virtual = address; 1616 } 1617 #define page_address_init() do { } while(0) 1618 #endif 1619 1620 #if defined(HASHED_PAGE_VIRTUAL) 1621 void *page_address(const struct page *page); 1622 void set_page_address(struct page *page, void *virtual); 1623 void page_address_init(void); 1624 #endif 1625 1626 #if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL) 1627 #define page_address(page) lowmem_page_address(page) 1628 #define set_page_address(page, address) do { } while(0) 1629 #define page_address_init() do { } while(0) 1630 #endif 1631 1632 extern void *page_rmapping(struct page *page); 1633 extern struct anon_vma *page_anon_vma(struct page *page); 1634 extern struct address_space *page_mapping(struct page *page); 1635 1636 extern struct address_space *__page_file_mapping(struct page *); 1637 1638 static inline 1639 struct address_space *page_file_mapping(struct page *page) 1640 { 1641 if (unlikely(PageSwapCache(page))) 1642 return __page_file_mapping(page); 1643 1644 return page->mapping; 1645 } 1646 1647 extern pgoff_t __page_file_index(struct page *page); 1648 1649 /* 1650 * Return the pagecache index of the passed page. Regular pagecache pages 1651 * use ->index whereas swapcache pages use swp_offset(->private) 1652 */ 1653 static inline pgoff_t page_index(struct page *page) 1654 { 1655 if (unlikely(PageSwapCache(page))) 1656 return __page_file_index(page); 1657 return page->index; 1658 } 1659 1660 bool page_mapped(struct page *page); 1661 struct address_space *page_mapping(struct page *page); 1662 1663 /* 1664 * Return true only if the page has been allocated with 1665 * ALLOC_NO_WATERMARKS and the low watermark was not 1666 * met implying that the system is under some pressure. 1667 */ 1668 static inline bool page_is_pfmemalloc(const struct page *page) 1669 { 1670 /* 1671 * lru.next has bit 1 set if the page is allocated from the 1672 * pfmemalloc reserves. Callers may simply overwrite it if 1673 * they do not need to preserve that information. 1674 */ 1675 return (uintptr_t)page->lru.next & BIT(1); 1676 } 1677 1678 /* 1679 * Only to be called by the page allocator on a freshly allocated 1680 * page. 1681 */ 1682 static inline void set_page_pfmemalloc(struct page *page) 1683 { 1684 page->lru.next = (void *)BIT(1); 1685 } 1686 1687 static inline void clear_page_pfmemalloc(struct page *page) 1688 { 1689 page->lru.next = NULL; 1690 } 1691 1692 /* 1693 * Can be called by the pagefault handler when it gets a VM_FAULT_OOM. 1694 */ 1695 extern void pagefault_out_of_memory(void); 1696 1697 #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) 1698 #define offset_in_thp(page, p) ((unsigned long)(p) & (thp_size(page) - 1)) 1699 1700 /* 1701 * Flags passed to show_mem() and show_free_areas() to suppress output in 1702 * various contexts. 1703 */ 1704 #define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */ 1705 1706 extern void show_free_areas(unsigned int flags, nodemask_t *nodemask); 1707 1708 #ifdef CONFIG_MMU 1709 extern bool can_do_mlock(void); 1710 #else 1711 static inline bool can_do_mlock(void) { return false; } 1712 #endif 1713 extern int user_shm_lock(size_t, struct user_struct *); 1714 extern void user_shm_unlock(size_t, struct user_struct *); 1715 1716 /* 1717 * Parameter block passed down to zap_pte_range in exceptional cases. 1718 */ 1719 struct zap_details { 1720 struct address_space *check_mapping; /* Check page->mapping if set */ 1721 pgoff_t first_index; /* Lowest page->index to unmap */ 1722 pgoff_t last_index; /* Highest page->index to unmap */ 1723 struct page *single_page; /* Locked page to be unmapped */ 1724 }; 1725 1726 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, 1727 pte_t pte); 1728 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, 1729 pmd_t pmd); 1730 1731 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 1732 unsigned long size); 1733 void zap_page_range(struct vm_area_struct *vma, unsigned long address, 1734 unsigned long size); 1735 void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma, 1736 unsigned long start, unsigned long end); 1737 1738 struct mmu_notifier_range; 1739 1740 void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, 1741 unsigned long end, unsigned long floor, unsigned long ceiling); 1742 int 1743 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma); 1744 int follow_invalidate_pte(struct mm_struct *mm, unsigned long address, 1745 struct mmu_notifier_range *range, pte_t **ptepp, 1746 pmd_t **pmdpp, spinlock_t **ptlp); 1747 int follow_pte(struct mm_struct *mm, unsigned long address, 1748 pte_t **ptepp, spinlock_t **ptlp); 1749 int follow_pfn(struct vm_area_struct *vma, unsigned long address, 1750 unsigned long *pfn); 1751 int follow_phys(struct vm_area_struct *vma, unsigned long address, 1752 unsigned int flags, unsigned long *prot, resource_size_t *phys); 1753 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, 1754 void *buf, int len, int write); 1755 1756 extern void truncate_pagecache(struct inode *inode, loff_t new); 1757 extern void truncate_setsize(struct inode *inode, loff_t newsize); 1758 void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to); 1759 void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end); 1760 int truncate_inode_page(struct address_space *mapping, struct page *page); 1761 int generic_error_remove_page(struct address_space *mapping, struct page *page); 1762 int invalidate_inode_page(struct page *page); 1763 1764 #ifdef CONFIG_MMU 1765 extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma, 1766 unsigned long address, unsigned int flags, 1767 struct pt_regs *regs); 1768 extern int fixup_user_fault(struct mm_struct *mm, 1769 unsigned long address, unsigned int fault_flags, 1770 bool *unlocked); 1771 void unmap_mapping_page(struct page *page); 1772 void unmap_mapping_pages(struct address_space *mapping, 1773 pgoff_t start, pgoff_t nr, bool even_cows); 1774 void unmap_mapping_range(struct address_space *mapping, 1775 loff_t const holebegin, loff_t const holelen, int even_cows); 1776 #else 1777 static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma, 1778 unsigned long address, unsigned int flags, 1779 struct pt_regs *regs) 1780 { 1781 /* should never happen if there's no MMU */ 1782 BUG(); 1783 return VM_FAULT_SIGBUS; 1784 } 1785 static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address, 1786 unsigned int fault_flags, bool *unlocked) 1787 { 1788 /* should never happen if there's no MMU */ 1789 BUG(); 1790 return -EFAULT; 1791 } 1792 static inline void unmap_mapping_page(struct page *page) { } 1793 static inline void unmap_mapping_pages(struct address_space *mapping, 1794 pgoff_t start, pgoff_t nr, bool even_cows) { } 1795 static inline void unmap_mapping_range(struct address_space *mapping, 1796 loff_t const holebegin, loff_t const holelen, int even_cows) { } 1797 #endif 1798 1799 static inline void unmap_shared_mapping_range(struct address_space *mapping, 1800 loff_t const holebegin, loff_t const holelen) 1801 { 1802 unmap_mapping_range(mapping, holebegin, holelen, 0); 1803 } 1804 1805 extern int access_process_vm(struct task_struct *tsk, unsigned long addr, 1806 void *buf, int len, unsigned int gup_flags); 1807 extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, 1808 void *buf, int len, unsigned int gup_flags); 1809 extern int __access_remote_vm(struct mm_struct *mm, unsigned long addr, 1810 void *buf, int len, unsigned int gup_flags); 1811 1812 long get_user_pages_remote(struct mm_struct *mm, 1813 unsigned long start, unsigned long nr_pages, 1814 unsigned int gup_flags, struct page **pages, 1815 struct vm_area_struct **vmas, int *locked); 1816 long pin_user_pages_remote(struct mm_struct *mm, 1817 unsigned long start, unsigned long nr_pages, 1818 unsigned int gup_flags, struct page **pages, 1819 struct vm_area_struct **vmas, int *locked); 1820 long get_user_pages(unsigned long start, unsigned long nr_pages, 1821 unsigned int gup_flags, struct page **pages, 1822 struct vm_area_struct **vmas); 1823 long pin_user_pages(unsigned long start, unsigned long nr_pages, 1824 unsigned int gup_flags, struct page **pages, 1825 struct vm_area_struct **vmas); 1826 long get_user_pages_locked(unsigned long start, unsigned long nr_pages, 1827 unsigned int gup_flags, struct page **pages, int *locked); 1828 long pin_user_pages_locked(unsigned long start, unsigned long nr_pages, 1829 unsigned int gup_flags, struct page **pages, int *locked); 1830 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, 1831 struct page **pages, unsigned int gup_flags); 1832 long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages, 1833 struct page **pages, unsigned int gup_flags); 1834 1835 int get_user_pages_fast(unsigned long start, int nr_pages, 1836 unsigned int gup_flags, struct page **pages); 1837 int pin_user_pages_fast(unsigned long start, int nr_pages, 1838 unsigned int gup_flags, struct page **pages); 1839 1840 int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc); 1841 int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc, 1842 struct task_struct *task, bool bypass_rlim); 1843 1844 struct kvec; 1845 int get_kernel_pages(const struct kvec *iov, int nr_pages, int write, 1846 struct page **pages); 1847 int get_kernel_page(unsigned long start, int write, struct page **pages); 1848 struct page *get_dump_page(unsigned long addr); 1849 1850 extern int try_to_release_page(struct page * page, gfp_t gfp_mask); 1851 extern void do_invalidatepage(struct page *page, unsigned int offset, 1852 unsigned int length); 1853 1854 void __set_page_dirty(struct page *, struct address_space *, int warn); 1855 int __set_page_dirty_nobuffers(struct page *page); 1856 int __set_page_dirty_no_writeback(struct page *page); 1857 int redirty_page_for_writepage(struct writeback_control *wbc, 1858 struct page *page); 1859 void account_page_dirtied(struct page *page, struct address_space *mapping); 1860 void account_page_cleaned(struct page *page, struct address_space *mapping, 1861 struct bdi_writeback *wb); 1862 int set_page_dirty(struct page *page); 1863 int set_page_dirty_lock(struct page *page); 1864 void __cancel_dirty_page(struct page *page); 1865 static inline void cancel_dirty_page(struct page *page) 1866 { 1867 /* Avoid atomic ops, locking, etc. when not actually needed. */ 1868 if (PageDirty(page)) 1869 __cancel_dirty_page(page); 1870 } 1871 int clear_page_dirty_for_io(struct page *page); 1872 1873 int get_cmdline(struct task_struct *task, char *buffer, int buflen); 1874 1875 extern unsigned long move_page_tables(struct vm_area_struct *vma, 1876 unsigned long old_addr, struct vm_area_struct *new_vma, 1877 unsigned long new_addr, unsigned long len, 1878 bool need_rmap_locks); 1879 1880 /* 1881 * Flags used by change_protection(). For now we make it a bitmap so 1882 * that we can pass in multiple flags just like parameters. However 1883 * for now all the callers are only use one of the flags at the same 1884 * time. 1885 */ 1886 /* Whether we should allow dirty bit accounting */ 1887 #define MM_CP_DIRTY_ACCT (1UL << 0) 1888 /* Whether this protection change is for NUMA hints */ 1889 #define MM_CP_PROT_NUMA (1UL << 1) 1890 /* Whether this change is for write protecting */ 1891 #define MM_CP_UFFD_WP (1UL << 2) /* do wp */ 1892 #define MM_CP_UFFD_WP_RESOLVE (1UL << 3) /* Resolve wp */ 1893 #define MM_CP_UFFD_WP_ALL (MM_CP_UFFD_WP | \ 1894 MM_CP_UFFD_WP_RESOLVE) 1895 1896 extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, 1897 unsigned long end, pgprot_t newprot, 1898 unsigned long cp_flags); 1899 extern int mprotect_fixup(struct vm_area_struct *vma, 1900 struct vm_area_struct **pprev, unsigned long start, 1901 unsigned long end, unsigned long newflags); 1902 1903 /* 1904 * doesn't attempt to fault and will return short. 1905 */ 1906 int get_user_pages_fast_only(unsigned long start, int nr_pages, 1907 unsigned int gup_flags, struct page **pages); 1908 int pin_user_pages_fast_only(unsigned long start, int nr_pages, 1909 unsigned int gup_flags, struct page **pages); 1910 1911 static inline bool get_user_page_fast_only(unsigned long addr, 1912 unsigned int gup_flags, struct page **pagep) 1913 { 1914 return get_user_pages_fast_only(addr, 1, gup_flags, pagep) == 1; 1915 } 1916 /* 1917 * per-process(per-mm_struct) statistics. 1918 */ 1919 static inline unsigned long get_mm_counter(struct mm_struct *mm, int member) 1920 { 1921 long val = atomic_long_read(&mm->rss_stat.count[member]); 1922 1923 #ifdef SPLIT_RSS_COUNTING 1924 /* 1925 * counter is updated in asynchronous manner and may go to minus. 1926 * But it's never be expected number for users. 1927 */ 1928 if (val < 0) 1929 val = 0; 1930 #endif 1931 return (unsigned long)val; 1932 } 1933 1934 void mm_trace_rss_stat(struct mm_struct *mm, int member, long count); 1935 1936 static inline void add_mm_counter(struct mm_struct *mm, int member, long value) 1937 { 1938 long count = atomic_long_add_return(value, &mm->rss_stat.count[member]); 1939 1940 mm_trace_rss_stat(mm, member, count); 1941 } 1942 1943 static inline void inc_mm_counter(struct mm_struct *mm, int member) 1944 { 1945 long count = atomic_long_inc_return(&mm->rss_stat.count[member]); 1946 1947 mm_trace_rss_stat(mm, member, count); 1948 } 1949 1950 static inline void dec_mm_counter(struct mm_struct *mm, int member) 1951 { 1952 long count = atomic_long_dec_return(&mm->rss_stat.count[member]); 1953 1954 mm_trace_rss_stat(mm, member, count); 1955 } 1956 1957 /* Optimized variant when page is already known not to be PageAnon */ 1958 static inline int mm_counter_file(struct page *page) 1959 { 1960 if (PageSwapBacked(page)) 1961 return MM_SHMEMPAGES; 1962 return MM_FILEPAGES; 1963 } 1964 1965 static inline int mm_counter(struct page *page) 1966 { 1967 if (PageAnon(page)) 1968 return MM_ANONPAGES; 1969 return mm_counter_file(page); 1970 } 1971 1972 static inline unsigned long get_mm_rss(struct mm_struct *mm) 1973 { 1974 return get_mm_counter(mm, MM_FILEPAGES) + 1975 get_mm_counter(mm, MM_ANONPAGES) + 1976 get_mm_counter(mm, MM_SHMEMPAGES); 1977 } 1978 1979 static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm) 1980 { 1981 return max(mm->hiwater_rss, get_mm_rss(mm)); 1982 } 1983 1984 static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm) 1985 { 1986 return max(mm->hiwater_vm, mm->total_vm); 1987 } 1988 1989 static inline void update_hiwater_rss(struct mm_struct *mm) 1990 { 1991 unsigned long _rss = get_mm_rss(mm); 1992 1993 if ((mm)->hiwater_rss < _rss) 1994 (mm)->hiwater_rss = _rss; 1995 } 1996 1997 static inline void update_hiwater_vm(struct mm_struct *mm) 1998 { 1999 if (mm->hiwater_vm < mm->total_vm) 2000 mm->hiwater_vm = mm->total_vm; 2001 } 2002 2003 static inline void reset_mm_hiwater_rss(struct mm_struct *mm) 2004 { 2005 mm->hiwater_rss = get_mm_rss(mm); 2006 } 2007 2008 static inline void setmax_mm_hiwater_rss(unsigned long *maxrss, 2009 struct mm_struct *mm) 2010 { 2011 unsigned long hiwater_rss = get_mm_hiwater_rss(mm); 2012 2013 if (*maxrss < hiwater_rss) 2014 *maxrss = hiwater_rss; 2015 } 2016 2017 #if defined(SPLIT_RSS_COUNTING) 2018 void sync_mm_rss(struct mm_struct *mm); 2019 #else 2020 static inline void sync_mm_rss(struct mm_struct *mm) 2021 { 2022 } 2023 #endif 2024 2025 #ifndef CONFIG_ARCH_HAS_PTE_SPECIAL 2026 static inline int pte_special(pte_t pte) 2027 { 2028 return 0; 2029 } 2030 2031 static inline pte_t pte_mkspecial(pte_t pte) 2032 { 2033 return pte; 2034 } 2035 #endif 2036 2037 #ifndef CONFIG_ARCH_HAS_PTE_DEVMAP 2038 static inline int pte_devmap(pte_t pte) 2039 { 2040 return 0; 2041 } 2042 #endif 2043 2044 int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot); 2045 2046 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, 2047 spinlock_t **ptl); 2048 static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, 2049 spinlock_t **ptl) 2050 { 2051 pte_t *ptep; 2052 __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl)); 2053 return ptep; 2054 } 2055 2056 #ifdef __PAGETABLE_P4D_FOLDED 2057 static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, 2058 unsigned long address) 2059 { 2060 return 0; 2061 } 2062 #else 2063 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); 2064 #endif 2065 2066 #if defined(__PAGETABLE_PUD_FOLDED) || !defined(CONFIG_MMU) 2067 static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, 2068 unsigned long address) 2069 { 2070 return 0; 2071 } 2072 static inline void mm_inc_nr_puds(struct mm_struct *mm) {} 2073 static inline void mm_dec_nr_puds(struct mm_struct *mm) {} 2074 2075 #else 2076 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address); 2077 2078 static inline void mm_inc_nr_puds(struct mm_struct *mm) 2079 { 2080 if (mm_pud_folded(mm)) 2081 return; 2082 atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes); 2083 } 2084 2085 static inline void mm_dec_nr_puds(struct mm_struct *mm) 2086 { 2087 if (mm_pud_folded(mm)) 2088 return; 2089 atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes); 2090 } 2091 #endif 2092 2093 #if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU) 2094 static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud, 2095 unsigned long address) 2096 { 2097 return 0; 2098 } 2099 2100 static inline void mm_inc_nr_pmds(struct mm_struct *mm) {} 2101 static inline void mm_dec_nr_pmds(struct mm_struct *mm) {} 2102 2103 #else 2104 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address); 2105 2106 static inline void mm_inc_nr_pmds(struct mm_struct *mm) 2107 { 2108 if (mm_pmd_folded(mm)) 2109 return; 2110 atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes); 2111 } 2112 2113 static inline void mm_dec_nr_pmds(struct mm_struct *mm) 2114 { 2115 if (mm_pmd_folded(mm)) 2116 return; 2117 atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes); 2118 } 2119 #endif 2120 2121 #ifdef CONFIG_MMU 2122 static inline void mm_pgtables_bytes_init(struct mm_struct *mm) 2123 { 2124 atomic_long_set(&mm->pgtables_bytes, 0); 2125 } 2126 2127 static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm) 2128 { 2129 return atomic_long_read(&mm->pgtables_bytes); 2130 } 2131 2132 static inline void mm_inc_nr_ptes(struct mm_struct *mm) 2133 { 2134 atomic_long_add(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes); 2135 } 2136 2137 static inline void mm_dec_nr_ptes(struct mm_struct *mm) 2138 { 2139 atomic_long_sub(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes); 2140 } 2141 #else 2142 2143 static inline void mm_pgtables_bytes_init(struct mm_struct *mm) {} 2144 static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm) 2145 { 2146 return 0; 2147 } 2148 2149 static inline void mm_inc_nr_ptes(struct mm_struct *mm) {} 2150 static inline void mm_dec_nr_ptes(struct mm_struct *mm) {} 2151 #endif 2152 2153 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd); 2154 int __pte_alloc_kernel(pmd_t *pmd); 2155 2156 #if defined(CONFIG_MMU) 2157 2158 static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd, 2159 unsigned long address) 2160 { 2161 return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ? 2162 NULL : p4d_offset(pgd, address); 2163 } 2164 2165 static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d, 2166 unsigned long address) 2167 { 2168 return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ? 2169 NULL : pud_offset(p4d, address); 2170 } 2171 2172 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) 2173 { 2174 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))? 2175 NULL: pmd_offset(pud, address); 2176 } 2177 #endif /* CONFIG_MMU */ 2178 2179 #if USE_SPLIT_PTE_PTLOCKS 2180 #if ALLOC_SPLIT_PTLOCKS 2181 void __init ptlock_cache_init(void); 2182 extern bool ptlock_alloc(struct page *page); 2183 extern void ptlock_free(struct page *page); 2184 2185 static inline spinlock_t *ptlock_ptr(struct page *page) 2186 { 2187 return page->ptl; 2188 } 2189 #else /* ALLOC_SPLIT_PTLOCKS */ 2190 static inline void ptlock_cache_init(void) 2191 { 2192 } 2193 2194 static inline bool ptlock_alloc(struct page *page) 2195 { 2196 return true; 2197 } 2198 2199 static inline void ptlock_free(struct page *page) 2200 { 2201 } 2202 2203 static inline spinlock_t *ptlock_ptr(struct page *page) 2204 { 2205 return &page->ptl; 2206 } 2207 #endif /* ALLOC_SPLIT_PTLOCKS */ 2208 2209 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) 2210 { 2211 return ptlock_ptr(pmd_page(*pmd)); 2212 } 2213 2214 static inline bool ptlock_init(struct page *page) 2215 { 2216 /* 2217 * prep_new_page() initialize page->private (and therefore page->ptl) 2218 * with 0. Make sure nobody took it in use in between. 2219 * 2220 * It can happen if arch try to use slab for page table allocation: 2221 * slab code uses page->slab_cache, which share storage with page->ptl. 2222 */ 2223 VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page); 2224 if (!ptlock_alloc(page)) 2225 return false; 2226 spin_lock_init(ptlock_ptr(page)); 2227 return true; 2228 } 2229 2230 #else /* !USE_SPLIT_PTE_PTLOCKS */ 2231 /* 2232 * We use mm->page_table_lock to guard all pagetable pages of the mm. 2233 */ 2234 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) 2235 { 2236 return &mm->page_table_lock; 2237 } 2238 static inline void ptlock_cache_init(void) {} 2239 static inline bool ptlock_init(struct page *page) { return true; } 2240 static inline void ptlock_free(struct page *page) {} 2241 #endif /* USE_SPLIT_PTE_PTLOCKS */ 2242 2243 static inline void pgtable_init(void) 2244 { 2245 ptlock_cache_init(); 2246 pgtable_cache_init(); 2247 } 2248 2249 static inline bool pgtable_pte_page_ctor(struct page *page) 2250 { 2251 if (!ptlock_init(page)) 2252 return false; 2253 __SetPageTable(page); 2254 inc_lruvec_page_state(page, NR_PAGETABLE); 2255 return true; 2256 } 2257 2258 static inline void pgtable_pte_page_dtor(struct page *page) 2259 { 2260 ptlock_free(page); 2261 __ClearPageTable(page); 2262 dec_lruvec_page_state(page, NR_PAGETABLE); 2263 } 2264 2265 #define pte_offset_map_lock(mm, pmd, address, ptlp) \ 2266 ({ \ 2267 spinlock_t *__ptl = pte_lockptr(mm, pmd); \ 2268 pte_t *__pte = pte_offset_map(pmd, address); \ 2269 *(ptlp) = __ptl; \ 2270 spin_lock(__ptl); \ 2271 __pte; \ 2272 }) 2273 2274 #define pte_unmap_unlock(pte, ptl) do { \ 2275 spin_unlock(ptl); \ 2276 pte_unmap(pte); \ 2277 } while (0) 2278 2279 #define pte_alloc(mm, pmd) (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd)) 2280 2281 #define pte_alloc_map(mm, pmd, address) \ 2282 (pte_alloc(mm, pmd) ? NULL : pte_offset_map(pmd, address)) 2283 2284 #define pte_alloc_map_lock(mm, pmd, address, ptlp) \ 2285 (pte_alloc(mm, pmd) ? \ 2286 NULL : pte_offset_map_lock(mm, pmd, address, ptlp)) 2287 2288 #define pte_alloc_kernel(pmd, address) \ 2289 ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \ 2290 NULL: pte_offset_kernel(pmd, address)) 2291 2292 #if USE_SPLIT_PMD_PTLOCKS 2293 2294 static struct page *pmd_to_page(pmd_t *pmd) 2295 { 2296 unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1); 2297 return virt_to_page((void *)((unsigned long) pmd & mask)); 2298 } 2299 2300 static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) 2301 { 2302 return ptlock_ptr(pmd_to_page(pmd)); 2303 } 2304 2305 static inline bool pmd_ptlock_init(struct page *page) 2306 { 2307 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 2308 page->pmd_huge_pte = NULL; 2309 #endif 2310 return ptlock_init(page); 2311 } 2312 2313 static inline void pmd_ptlock_free(struct page *page) 2314 { 2315 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 2316 VM_BUG_ON_PAGE(page->pmd_huge_pte, page); 2317 #endif 2318 ptlock_free(page); 2319 } 2320 2321 #define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte) 2322 2323 #else 2324 2325 static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) 2326 { 2327 return &mm->page_table_lock; 2328 } 2329 2330 static inline bool pmd_ptlock_init(struct page *page) { return true; } 2331 static inline void pmd_ptlock_free(struct page *page) {} 2332 2333 #define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte) 2334 2335 #endif 2336 2337 static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd) 2338 { 2339 spinlock_t *ptl = pmd_lockptr(mm, pmd); 2340 spin_lock(ptl); 2341 return ptl; 2342 } 2343 2344 static inline bool pgtable_pmd_page_ctor(struct page *page) 2345 { 2346 if (!pmd_ptlock_init(page)) 2347 return false; 2348 __SetPageTable(page); 2349 inc_lruvec_page_state(page, NR_PAGETABLE); 2350 return true; 2351 } 2352 2353 static inline void pgtable_pmd_page_dtor(struct page *page) 2354 { 2355 pmd_ptlock_free(page); 2356 __ClearPageTable(page); 2357 dec_lruvec_page_state(page, NR_PAGETABLE); 2358 } 2359 2360 /* 2361 * No scalability reason to split PUD locks yet, but follow the same pattern 2362 * as the PMD locks to make it easier if we decide to. The VM should not be 2363 * considered ready to switch to split PUD locks yet; there may be places 2364 * which need to be converted from page_table_lock. 2365 */ 2366 static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud) 2367 { 2368 return &mm->page_table_lock; 2369 } 2370 2371 static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud) 2372 { 2373 spinlock_t *ptl = pud_lockptr(mm, pud); 2374 2375 spin_lock(ptl); 2376 return ptl; 2377 } 2378 2379 extern void __init pagecache_init(void); 2380 extern void __init free_area_init_memoryless_node(int nid); 2381 extern void free_initmem(void); 2382 2383 /* 2384 * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK) 2385 * into the buddy system. The freed pages will be poisoned with pattern 2386 * "poison" if it's within range [0, UCHAR_MAX]. 2387 * Return pages freed into the buddy system. 2388 */ 2389 extern unsigned long free_reserved_area(void *start, void *end, 2390 int poison, const char *s); 2391 2392 extern void adjust_managed_page_count(struct page *page, long count); 2393 extern void mem_init_print_info(void); 2394 2395 extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end); 2396 2397 /* Free the reserved page into the buddy system, so it gets managed. */ 2398 static inline void free_reserved_page(struct page *page) 2399 { 2400 ClearPageReserved(page); 2401 init_page_count(page); 2402 __free_page(page); 2403 adjust_managed_page_count(page, 1); 2404 } 2405 #define free_highmem_page(page) free_reserved_page(page) 2406 2407 static inline void mark_page_reserved(struct page *page) 2408 { 2409 SetPageReserved(page); 2410 adjust_managed_page_count(page, -1); 2411 } 2412 2413 /* 2414 * Default method to free all the __init memory into the buddy system. 2415 * The freed pages will be poisoned with pattern "poison" if it's within 2416 * range [0, UCHAR_MAX]. 2417 * Return pages freed into the buddy system. 2418 */ 2419 static inline unsigned long free_initmem_default(int poison) 2420 { 2421 extern char __init_begin[], __init_end[]; 2422 2423 return free_reserved_area(&__init_begin, &__init_end, 2424 poison, "unused kernel"); 2425 } 2426 2427 static inline unsigned long get_num_physpages(void) 2428 { 2429 int nid; 2430 unsigned long phys_pages = 0; 2431 2432 for_each_online_node(nid) 2433 phys_pages += node_present_pages(nid); 2434 2435 return phys_pages; 2436 } 2437 2438 /* 2439 * Using memblock node mappings, an architecture may initialise its 2440 * zones, allocate the backing mem_map and account for memory holes in an 2441 * architecture independent manner. 2442 * 2443 * An architecture is expected to register range of page frames backed by 2444 * physical memory with memblock_add[_node]() before calling 2445 * free_area_init() passing in the PFN each zone ends at. At a basic 2446 * usage, an architecture is expected to do something like 2447 * 2448 * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn, 2449 * max_highmem_pfn}; 2450 * for_each_valid_physical_page_range() 2451 * memblock_add_node(base, size, nid) 2452 * free_area_init(max_zone_pfns); 2453 */ 2454 void free_area_init(unsigned long *max_zone_pfn); 2455 unsigned long node_map_pfn_alignment(void); 2456 unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn, 2457 unsigned long end_pfn); 2458 extern unsigned long absent_pages_in_range(unsigned long start_pfn, 2459 unsigned long end_pfn); 2460 extern void get_pfn_range_for_nid(unsigned int nid, 2461 unsigned long *start_pfn, unsigned long *end_pfn); 2462 extern unsigned long find_min_pfn_with_active_regions(void); 2463 2464 #ifndef CONFIG_NEED_MULTIPLE_NODES 2465 static inline int early_pfn_to_nid(unsigned long pfn) 2466 { 2467 return 0; 2468 } 2469 #else 2470 /* please see mm/page_alloc.c */ 2471 extern int __meminit early_pfn_to_nid(unsigned long pfn); 2472 #endif 2473 2474 extern void set_dma_reserve(unsigned long new_dma_reserve); 2475 extern void memmap_init_range(unsigned long, int, unsigned long, 2476 unsigned long, unsigned long, enum meminit_context, 2477 struct vmem_altmap *, int migratetype); 2478 extern void memmap_init_zone(struct zone *zone); 2479 extern void setup_per_zone_wmarks(void); 2480 extern int __meminit init_per_zone_wmark_min(void); 2481 extern void mem_init(void); 2482 extern void __init mmap_init(void); 2483 extern void show_mem(unsigned int flags, nodemask_t *nodemask); 2484 extern long si_mem_available(void); 2485 extern void si_meminfo(struct sysinfo * val); 2486 extern void si_meminfo_node(struct sysinfo *val, int nid); 2487 #ifdef __HAVE_ARCH_RESERVED_KERNEL_PAGES 2488 extern unsigned long arch_reserved_kernel_pages(void); 2489 #endif 2490 2491 extern __printf(3, 4) 2492 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...); 2493 2494 extern void setup_per_cpu_pageset(void); 2495 2496 /* page_alloc.c */ 2497 extern int min_free_kbytes; 2498 extern int watermark_boost_factor; 2499 extern int watermark_scale_factor; 2500 extern bool arch_has_descending_max_zone_pfns(void); 2501 2502 /* nommu.c */ 2503 extern atomic_long_t mmap_pages_allocated; 2504 extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t); 2505 2506 /* interval_tree.c */ 2507 void vma_interval_tree_insert(struct vm_area_struct *node, 2508 struct rb_root_cached *root); 2509 void vma_interval_tree_insert_after(struct vm_area_struct *node, 2510 struct vm_area_struct *prev, 2511 struct rb_root_cached *root); 2512 void vma_interval_tree_remove(struct vm_area_struct *node, 2513 struct rb_root_cached *root); 2514 struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root, 2515 unsigned long start, unsigned long last); 2516 struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node, 2517 unsigned long start, unsigned long last); 2518 2519 #define vma_interval_tree_foreach(vma, root, start, last) \ 2520 for (vma = vma_interval_tree_iter_first(root, start, last); \ 2521 vma; vma = vma_interval_tree_iter_next(vma, start, last)) 2522 2523 void anon_vma_interval_tree_insert(struct anon_vma_chain *node, 2524 struct rb_root_cached *root); 2525 void anon_vma_interval_tree_remove(struct anon_vma_chain *node, 2526 struct rb_root_cached *root); 2527 struct anon_vma_chain * 2528 anon_vma_interval_tree_iter_first(struct rb_root_cached *root, 2529 unsigned long start, unsigned long last); 2530 struct anon_vma_chain *anon_vma_interval_tree_iter_next( 2531 struct anon_vma_chain *node, unsigned long start, unsigned long last); 2532 #ifdef CONFIG_DEBUG_VM_RB 2533 void anon_vma_interval_tree_verify(struct anon_vma_chain *node); 2534 #endif 2535 2536 #define anon_vma_interval_tree_foreach(avc, root, start, last) \ 2537 for (avc = anon_vma_interval_tree_iter_first(root, start, last); \ 2538 avc; avc = anon_vma_interval_tree_iter_next(avc, start, last)) 2539 2540 /* mmap.c */ 2541 extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin); 2542 extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start, 2543 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert, 2544 struct vm_area_struct *expand); 2545 static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start, 2546 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert) 2547 { 2548 return __vma_adjust(vma, start, end, pgoff, insert, NULL); 2549 } 2550 extern struct vm_area_struct *vma_merge(struct mm_struct *, 2551 struct vm_area_struct *prev, unsigned long addr, unsigned long end, 2552 unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t, 2553 struct mempolicy *, struct vm_userfaultfd_ctx); 2554 extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *); 2555 extern int __split_vma(struct mm_struct *, struct vm_area_struct *, 2556 unsigned long addr, int new_below); 2557 extern int split_vma(struct mm_struct *, struct vm_area_struct *, 2558 unsigned long addr, int new_below); 2559 extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *); 2560 extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *, 2561 struct rb_node **, struct rb_node *); 2562 extern void unlink_file_vma(struct vm_area_struct *); 2563 extern struct vm_area_struct *copy_vma(struct vm_area_struct **, 2564 unsigned long addr, unsigned long len, pgoff_t pgoff, 2565 bool *need_rmap_locks); 2566 extern void exit_mmap(struct mm_struct *); 2567 2568 static inline int check_data_rlimit(unsigned long rlim, 2569 unsigned long new, 2570 unsigned long start, 2571 unsigned long end_data, 2572 unsigned long start_data) 2573 { 2574 if (rlim < RLIM_INFINITY) { 2575 if (((new - start) + (end_data - start_data)) > rlim) 2576 return -ENOSPC; 2577 } 2578 2579 return 0; 2580 } 2581 2582 extern int mm_take_all_locks(struct mm_struct *mm); 2583 extern void mm_drop_all_locks(struct mm_struct *mm); 2584 2585 extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); 2586 extern struct file *get_mm_exe_file(struct mm_struct *mm); 2587 extern struct file *get_task_exe_file(struct task_struct *task); 2588 2589 extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages); 2590 extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages); 2591 2592 extern bool vma_is_special_mapping(const struct vm_area_struct *vma, 2593 const struct vm_special_mapping *sm); 2594 extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm, 2595 unsigned long addr, unsigned long len, 2596 unsigned long flags, 2597 const struct vm_special_mapping *spec); 2598 /* This is an obsolete alternative to _install_special_mapping. */ 2599 extern int install_special_mapping(struct mm_struct *mm, 2600 unsigned long addr, unsigned long len, 2601 unsigned long flags, struct page **pages); 2602 2603 unsigned long randomize_stack_top(unsigned long stack_top); 2604 2605 extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); 2606 2607 extern unsigned long mmap_region(struct file *file, unsigned long addr, 2608 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, 2609 struct list_head *uf); 2610 extern unsigned long do_mmap(struct file *file, unsigned long addr, 2611 unsigned long len, unsigned long prot, unsigned long flags, 2612 unsigned long pgoff, unsigned long *populate, struct list_head *uf); 2613 extern int __do_munmap(struct mm_struct *, unsigned long, size_t, 2614 struct list_head *uf, bool downgrade); 2615 extern int do_munmap(struct mm_struct *, unsigned long, size_t, 2616 struct list_head *uf); 2617 extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior); 2618 2619 #ifdef CONFIG_MMU 2620 extern int __mm_populate(unsigned long addr, unsigned long len, 2621 int ignore_errors); 2622 static inline void mm_populate(unsigned long addr, unsigned long len) 2623 { 2624 /* Ignore errors */ 2625 (void) __mm_populate(addr, len, 1); 2626 } 2627 #else 2628 static inline void mm_populate(unsigned long addr, unsigned long len) {} 2629 #endif 2630 2631 /* These take the mm semaphore themselves */ 2632 extern int __must_check vm_brk(unsigned long, unsigned long); 2633 extern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long); 2634 extern int vm_munmap(unsigned long, size_t); 2635 extern unsigned long __must_check vm_mmap(struct file *, unsigned long, 2636 unsigned long, unsigned long, 2637 unsigned long, unsigned long); 2638 2639 struct vm_unmapped_area_info { 2640 #define VM_UNMAPPED_AREA_TOPDOWN 1 2641 unsigned long flags; 2642 unsigned long length; 2643 unsigned long low_limit; 2644 unsigned long high_limit; 2645 unsigned long align_mask; 2646 unsigned long align_offset; 2647 }; 2648 2649 extern unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info); 2650 2651 /* truncate.c */ 2652 extern void truncate_inode_pages(struct address_space *, loff_t); 2653 extern void truncate_inode_pages_range(struct address_space *, 2654 loff_t lstart, loff_t lend); 2655 extern void truncate_inode_pages_final(struct address_space *); 2656 2657 /* generic vm_area_ops exported for stackable file systems */ 2658 extern vm_fault_t filemap_fault(struct vm_fault *vmf); 2659 extern vm_fault_t filemap_map_pages(struct vm_fault *vmf, 2660 pgoff_t start_pgoff, pgoff_t end_pgoff); 2661 extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf); 2662 2663 /* mm/page-writeback.c */ 2664 int __must_check write_one_page(struct page *page); 2665 void task_dirty_inc(struct task_struct *tsk); 2666 2667 extern unsigned long stack_guard_gap; 2668 /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ 2669 extern int expand_stack(struct vm_area_struct *vma, unsigned long address); 2670 2671 /* CONFIG_STACK_GROWSUP still needs to grow downwards at some places */ 2672 extern int expand_downwards(struct vm_area_struct *vma, 2673 unsigned long address); 2674 #if VM_GROWSUP 2675 extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); 2676 #else 2677 #define expand_upwards(vma, address) (0) 2678 #endif 2679 2680 /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ 2681 extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); 2682 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr, 2683 struct vm_area_struct **pprev); 2684 2685 /* Look up the first VMA which intersects the interval start_addr..end_addr-1, 2686 NULL if none. Assume start_addr < end_addr. */ 2687 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr) 2688 { 2689 struct vm_area_struct * vma = find_vma(mm,start_addr); 2690 2691 if (vma && end_addr <= vma->vm_start) 2692 vma = NULL; 2693 return vma; 2694 } 2695 2696 static inline unsigned long vm_start_gap(struct vm_area_struct *vma) 2697 { 2698 unsigned long vm_start = vma->vm_start; 2699 2700 if (vma->vm_flags & VM_GROWSDOWN) { 2701 vm_start -= stack_guard_gap; 2702 if (vm_start > vma->vm_start) 2703 vm_start = 0; 2704 } 2705 return vm_start; 2706 } 2707 2708 static inline unsigned long vm_end_gap(struct vm_area_struct *vma) 2709 { 2710 unsigned long vm_end = vma->vm_end; 2711 2712 if (vma->vm_flags & VM_GROWSUP) { 2713 vm_end += stack_guard_gap; 2714 if (vm_end < vma->vm_end) 2715 vm_end = -PAGE_SIZE; 2716 } 2717 return vm_end; 2718 } 2719 2720 static inline unsigned long vma_pages(struct vm_area_struct *vma) 2721 { 2722 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 2723 } 2724 2725 /* Look up the first VMA which exactly match the interval vm_start ... vm_end */ 2726 static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm, 2727 unsigned long vm_start, unsigned long vm_end) 2728 { 2729 struct vm_area_struct *vma = find_vma(mm, vm_start); 2730 2731 if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end)) 2732 vma = NULL; 2733 2734 return vma; 2735 } 2736 2737 static inline bool range_in_vma(struct vm_area_struct *vma, 2738 unsigned long start, unsigned long end) 2739 { 2740 return (vma && vma->vm_start <= start && end <= vma->vm_end); 2741 } 2742 2743 #ifdef CONFIG_MMU 2744 pgprot_t vm_get_page_prot(unsigned long vm_flags); 2745 void vma_set_page_prot(struct vm_area_struct *vma); 2746 #else 2747 static inline pgprot_t vm_get_page_prot(unsigned long vm_flags) 2748 { 2749 return __pgprot(0); 2750 } 2751 static inline void vma_set_page_prot(struct vm_area_struct *vma) 2752 { 2753 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 2754 } 2755 #endif 2756 2757 void vma_set_file(struct vm_area_struct *vma, struct file *file); 2758 2759 #ifdef CONFIG_NUMA_BALANCING 2760 unsigned long change_prot_numa(struct vm_area_struct *vma, 2761 unsigned long start, unsigned long end); 2762 #endif 2763 2764 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); 2765 int remap_pfn_range(struct vm_area_struct *, unsigned long addr, 2766 unsigned long pfn, unsigned long size, pgprot_t); 2767 int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr, 2768 unsigned long pfn, unsigned long size, pgprot_t prot); 2769 int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); 2770 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, 2771 struct page **pages, unsigned long *num); 2772 int vm_map_pages(struct vm_area_struct *vma, struct page **pages, 2773 unsigned long num); 2774 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages, 2775 unsigned long num); 2776 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr, 2777 unsigned long pfn); 2778 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, 2779 unsigned long pfn, pgprot_t pgprot); 2780 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr, 2781 pfn_t pfn); 2782 vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr, 2783 pfn_t pfn, pgprot_t pgprot); 2784 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma, 2785 unsigned long addr, pfn_t pfn); 2786 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len); 2787 2788 static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma, 2789 unsigned long addr, struct page *page) 2790 { 2791 int err = vm_insert_page(vma, addr, page); 2792 2793 if (err == -ENOMEM) 2794 return VM_FAULT_OOM; 2795 if (err < 0 && err != -EBUSY) 2796 return VM_FAULT_SIGBUS; 2797 2798 return VM_FAULT_NOPAGE; 2799 } 2800 2801 #ifndef io_remap_pfn_range 2802 static inline int io_remap_pfn_range(struct vm_area_struct *vma, 2803 unsigned long addr, unsigned long pfn, 2804 unsigned long size, pgprot_t prot) 2805 { 2806 return remap_pfn_range(vma, addr, pfn, size, pgprot_decrypted(prot)); 2807 } 2808 #endif 2809 2810 static inline vm_fault_t vmf_error(int err) 2811 { 2812 if (err == -ENOMEM) 2813 return VM_FAULT_OOM; 2814 return VM_FAULT_SIGBUS; 2815 } 2816 2817 struct page *follow_page(struct vm_area_struct *vma, unsigned long address, 2818 unsigned int foll_flags); 2819 2820 #define FOLL_WRITE 0x01 /* check pte is writable */ 2821 #define FOLL_TOUCH 0x02 /* mark page accessed */ 2822 #define FOLL_GET 0x04 /* do get_page on page */ 2823 #define FOLL_DUMP 0x08 /* give error on hole if it would be zero */ 2824 #define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */ 2825 #define FOLL_NOWAIT 0x20 /* if a disk transfer is needed, start the IO 2826 * and return without waiting upon it */ 2827 #define FOLL_POPULATE 0x40 /* fault in page */ 2828 #define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */ 2829 #define FOLL_NUMA 0x200 /* force NUMA hinting page fault */ 2830 #define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */ 2831 #define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */ 2832 #define FOLL_MLOCK 0x1000 /* lock present pages */ 2833 #define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */ 2834 #define FOLL_COW 0x4000 /* internal GUP flag */ 2835 #define FOLL_ANON 0x8000 /* don't do file mappings */ 2836 #define FOLL_LONGTERM 0x10000 /* mapping lifetime is indefinite: see below */ 2837 #define FOLL_SPLIT_PMD 0x20000 /* split huge pmd before returning */ 2838 #define FOLL_PIN 0x40000 /* pages must be released via unpin_user_page */ 2839 #define FOLL_FAST_ONLY 0x80000 /* gup_fast: prevent fall-back to slow gup */ 2840 2841 /* 2842 * FOLL_PIN and FOLL_LONGTERM may be used in various combinations with each 2843 * other. Here is what they mean, and how to use them: 2844 * 2845 * FOLL_LONGTERM indicates that the page will be held for an indefinite time 2846 * period _often_ under userspace control. This is in contrast to 2847 * iov_iter_get_pages(), whose usages are transient. 2848 * 2849 * FIXME: For pages which are part of a filesystem, mappings are subject to the 2850 * lifetime enforced by the filesystem and we need guarantees that longterm 2851 * users like RDMA and V4L2 only establish mappings which coordinate usage with 2852 * the filesystem. Ideas for this coordination include revoking the longterm 2853 * pin, delaying writeback, bounce buffer page writeback, etc. As FS DAX was 2854 * added after the problem with filesystems was found FS DAX VMAs are 2855 * specifically failed. Filesystem pages are still subject to bugs and use of 2856 * FOLL_LONGTERM should be avoided on those pages. 2857 * 2858 * FIXME: Also NOTE that FOLL_LONGTERM is not supported in every GUP call. 2859 * Currently only get_user_pages() and get_user_pages_fast() support this flag 2860 * and calls to get_user_pages_[un]locked are specifically not allowed. This 2861 * is due to an incompatibility with the FS DAX check and 2862 * FAULT_FLAG_ALLOW_RETRY. 2863 * 2864 * In the CMA case: long term pins in a CMA region would unnecessarily fragment 2865 * that region. And so, CMA attempts to migrate the page before pinning, when 2866 * FOLL_LONGTERM is specified. 2867 * 2868 * FOLL_PIN indicates that a special kind of tracking (not just page->_refcount, 2869 * but an additional pin counting system) will be invoked. This is intended for 2870 * anything that gets a page reference and then touches page data (for example, 2871 * Direct IO). This lets the filesystem know that some non-file-system entity is 2872 * potentially changing the pages' data. In contrast to FOLL_GET (whose pages 2873 * are released via put_page()), FOLL_PIN pages must be released, ultimately, by 2874 * a call to unpin_user_page(). 2875 * 2876 * FOLL_PIN is similar to FOLL_GET: both of these pin pages. They use different 2877 * and separate refcounting mechanisms, however, and that means that each has 2878 * its own acquire and release mechanisms: 2879 * 2880 * FOLL_GET: get_user_pages*() to acquire, and put_page() to release. 2881 * 2882 * FOLL_PIN: pin_user_pages*() to acquire, and unpin_user_pages to release. 2883 * 2884 * FOLL_PIN and FOLL_GET are mutually exclusive for a given function call. 2885 * (The underlying pages may experience both FOLL_GET-based and FOLL_PIN-based 2886 * calls applied to them, and that's perfectly OK. This is a constraint on the 2887 * callers, not on the pages.) 2888 * 2889 * FOLL_PIN should be set internally by the pin_user_pages*() APIs, never 2890 * directly by the caller. That's in order to help avoid mismatches when 2891 * releasing pages: get_user_pages*() pages must be released via put_page(), 2892 * while pin_user_pages*() pages must be released via unpin_user_page(). 2893 * 2894 * Please see Documentation/core-api/pin_user_pages.rst for more information. 2895 */ 2896 2897 static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags) 2898 { 2899 if (vm_fault & VM_FAULT_OOM) 2900 return -ENOMEM; 2901 if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) 2902 return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT; 2903 if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) 2904 return -EFAULT; 2905 return 0; 2906 } 2907 2908 typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data); 2909 extern int apply_to_page_range(struct mm_struct *mm, unsigned long address, 2910 unsigned long size, pte_fn_t fn, void *data); 2911 extern int apply_to_existing_page_range(struct mm_struct *mm, 2912 unsigned long address, unsigned long size, 2913 pte_fn_t fn, void *data); 2914 2915 extern void init_mem_debugging_and_hardening(void); 2916 #ifdef CONFIG_PAGE_POISONING 2917 extern void __kernel_poison_pages(struct page *page, int numpages); 2918 extern void __kernel_unpoison_pages(struct page *page, int numpages); 2919 extern bool _page_poisoning_enabled_early; 2920 DECLARE_STATIC_KEY_FALSE(_page_poisoning_enabled); 2921 static inline bool page_poisoning_enabled(void) 2922 { 2923 return _page_poisoning_enabled_early; 2924 } 2925 /* 2926 * For use in fast paths after init_mem_debugging() has run, or when a 2927 * false negative result is not harmful when called too early. 2928 */ 2929 static inline bool page_poisoning_enabled_static(void) 2930 { 2931 return static_branch_unlikely(&_page_poisoning_enabled); 2932 } 2933 static inline void kernel_poison_pages(struct page *page, int numpages) 2934 { 2935 if (page_poisoning_enabled_static()) 2936 __kernel_poison_pages(page, numpages); 2937 } 2938 static inline void kernel_unpoison_pages(struct page *page, int numpages) 2939 { 2940 if (page_poisoning_enabled_static()) 2941 __kernel_unpoison_pages(page, numpages); 2942 } 2943 #else 2944 static inline bool page_poisoning_enabled(void) { return false; } 2945 static inline bool page_poisoning_enabled_static(void) { return false; } 2946 static inline void __kernel_poison_pages(struct page *page, int nunmpages) { } 2947 static inline void kernel_poison_pages(struct page *page, int numpages) { } 2948 static inline void kernel_unpoison_pages(struct page *page, int numpages) { } 2949 #endif 2950 2951 DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc); 2952 static inline bool want_init_on_alloc(gfp_t flags) 2953 { 2954 if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, 2955 &init_on_alloc)) 2956 return true; 2957 return flags & __GFP_ZERO; 2958 } 2959 2960 DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free); 2961 static inline bool want_init_on_free(void) 2962 { 2963 return static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON, 2964 &init_on_free); 2965 } 2966 2967 extern bool _debug_pagealloc_enabled_early; 2968 DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled); 2969 2970 static inline bool debug_pagealloc_enabled(void) 2971 { 2972 return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) && 2973 _debug_pagealloc_enabled_early; 2974 } 2975 2976 /* 2977 * For use in fast paths after init_debug_pagealloc() has run, or when a 2978 * false negative result is not harmful when called too early. 2979 */ 2980 static inline bool debug_pagealloc_enabled_static(void) 2981 { 2982 if (!IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) 2983 return false; 2984 2985 return static_branch_unlikely(&_debug_pagealloc_enabled); 2986 } 2987 2988 #ifdef CONFIG_DEBUG_PAGEALLOC 2989 /* 2990 * To support DEBUG_PAGEALLOC architecture must ensure that 2991 * __kernel_map_pages() never fails 2992 */ 2993 extern void __kernel_map_pages(struct page *page, int numpages, int enable); 2994 2995 static inline void debug_pagealloc_map_pages(struct page *page, int numpages) 2996 { 2997 if (debug_pagealloc_enabled_static()) 2998 __kernel_map_pages(page, numpages, 1); 2999 } 3000 3001 static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) 3002 { 3003 if (debug_pagealloc_enabled_static()) 3004 __kernel_map_pages(page, numpages, 0); 3005 } 3006 #else /* CONFIG_DEBUG_PAGEALLOC */ 3007 static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {} 3008 static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {} 3009 #endif /* CONFIG_DEBUG_PAGEALLOC */ 3010 3011 #ifdef __HAVE_ARCH_GATE_AREA 3012 extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm); 3013 extern int in_gate_area_no_mm(unsigned long addr); 3014 extern int in_gate_area(struct mm_struct *mm, unsigned long addr); 3015 #else 3016 static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm) 3017 { 3018 return NULL; 3019 } 3020 static inline int in_gate_area_no_mm(unsigned long addr) { return 0; } 3021 static inline int in_gate_area(struct mm_struct *mm, unsigned long addr) 3022 { 3023 return 0; 3024 } 3025 #endif /* __HAVE_ARCH_GATE_AREA */ 3026 3027 extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm); 3028 3029 #ifdef CONFIG_SYSCTL 3030 extern int sysctl_drop_caches; 3031 int drop_caches_sysctl_handler(struct ctl_table *, int, void *, size_t *, 3032 loff_t *); 3033 #endif 3034 3035 void drop_slab(void); 3036 void drop_slab_node(int nid); 3037 3038 #ifndef CONFIG_MMU 3039 #define randomize_va_space 0 3040 #else 3041 extern int randomize_va_space; 3042 #endif 3043 3044 const char * arch_vma_name(struct vm_area_struct *vma); 3045 #ifdef CONFIG_MMU 3046 void print_vma_addr(char *prefix, unsigned long rip); 3047 #else 3048 static inline void print_vma_addr(char *prefix, unsigned long rip) 3049 { 3050 } 3051 #endif 3052 3053 void *sparse_buffer_alloc(unsigned long size); 3054 struct page * __populate_section_memmap(unsigned long pfn, 3055 unsigned long nr_pages, int nid, struct vmem_altmap *altmap); 3056 pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); 3057 p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node); 3058 pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node); 3059 pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); 3060 pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node, 3061 struct vmem_altmap *altmap); 3062 void *vmemmap_alloc_block(unsigned long size, int node); 3063 struct vmem_altmap; 3064 void *vmemmap_alloc_block_buf(unsigned long size, int node, 3065 struct vmem_altmap *altmap); 3066 void vmemmap_verify(pte_t *, int, unsigned long, unsigned long); 3067 int vmemmap_populate_basepages(unsigned long start, unsigned long end, 3068 int node, struct vmem_altmap *altmap); 3069 int vmemmap_populate(unsigned long start, unsigned long end, int node, 3070 struct vmem_altmap *altmap); 3071 void vmemmap_populate_print_last(void); 3072 #ifdef CONFIG_MEMORY_HOTPLUG 3073 void vmemmap_free(unsigned long start, unsigned long end, 3074 struct vmem_altmap *altmap); 3075 #endif 3076 void register_page_bootmem_memmap(unsigned long section_nr, struct page *map, 3077 unsigned long nr_pages); 3078 3079 enum mf_flags { 3080 MF_COUNT_INCREASED = 1 << 0, 3081 MF_ACTION_REQUIRED = 1 << 1, 3082 MF_MUST_KILL = 1 << 2, 3083 MF_SOFT_OFFLINE = 1 << 3, 3084 }; 3085 extern int memory_failure(unsigned long pfn, int flags); 3086 extern void memory_failure_queue(unsigned long pfn, int flags); 3087 extern void memory_failure_queue_kick(int cpu); 3088 extern int unpoison_memory(unsigned long pfn); 3089 extern int sysctl_memory_failure_early_kill; 3090 extern int sysctl_memory_failure_recovery; 3091 extern void shake_page(struct page *p, int access); 3092 extern atomic_long_t num_poisoned_pages __read_mostly; 3093 extern int soft_offline_page(unsigned long pfn, int flags); 3094 3095 3096 /* 3097 * Error handlers for various types of pages. 3098 */ 3099 enum mf_result { 3100 MF_IGNORED, /* Error: cannot be handled */ 3101 MF_FAILED, /* Error: handling failed */ 3102 MF_DELAYED, /* Will be handled later */ 3103 MF_RECOVERED, /* Successfully recovered */ 3104 }; 3105 3106 enum mf_action_page_type { 3107 MF_MSG_KERNEL, 3108 MF_MSG_KERNEL_HIGH_ORDER, 3109 MF_MSG_SLAB, 3110 MF_MSG_DIFFERENT_COMPOUND, 3111 MF_MSG_POISONED_HUGE, 3112 MF_MSG_HUGE, 3113 MF_MSG_FREE_HUGE, 3114 MF_MSG_NON_PMD_HUGE, 3115 MF_MSG_UNMAP_FAILED, 3116 MF_MSG_DIRTY_SWAPCACHE, 3117 MF_MSG_CLEAN_SWAPCACHE, 3118 MF_MSG_DIRTY_MLOCKED_LRU, 3119 MF_MSG_CLEAN_MLOCKED_LRU, 3120 MF_MSG_DIRTY_UNEVICTABLE_LRU, 3121 MF_MSG_CLEAN_UNEVICTABLE_LRU, 3122 MF_MSG_DIRTY_LRU, 3123 MF_MSG_CLEAN_LRU, 3124 MF_MSG_TRUNCATED_LRU, 3125 MF_MSG_BUDDY, 3126 MF_MSG_BUDDY_2ND, 3127 MF_MSG_DAX, 3128 MF_MSG_UNSPLIT_THP, 3129 MF_MSG_UNKNOWN, 3130 }; 3131 3132 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) 3133 extern void clear_huge_page(struct page *page, 3134 unsigned long addr_hint, 3135 unsigned int pages_per_huge_page); 3136 extern void copy_user_huge_page(struct page *dst, struct page *src, 3137 unsigned long addr_hint, 3138 struct vm_area_struct *vma, 3139 unsigned int pages_per_huge_page); 3140 extern long copy_huge_page_from_user(struct page *dst_page, 3141 const void __user *usr_src, 3142 unsigned int pages_per_huge_page, 3143 bool allow_pagefault); 3144 3145 /** 3146 * vma_is_special_huge - Are transhuge page-table entries considered special? 3147 * @vma: Pointer to the struct vm_area_struct to consider 3148 * 3149 * Whether transhuge page-table entries are considered "special" following 3150 * the definition in vm_normal_page(). 3151 * 3152 * Return: true if transhuge page-table entries should be considered special, 3153 * false otherwise. 3154 */ 3155 static inline bool vma_is_special_huge(const struct vm_area_struct *vma) 3156 { 3157 return vma_is_dax(vma) || (vma->vm_file && 3158 (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))); 3159 } 3160 3161 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ 3162 3163 #ifdef CONFIG_DEBUG_PAGEALLOC 3164 extern unsigned int _debug_guardpage_minorder; 3165 DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled); 3166 3167 static inline unsigned int debug_guardpage_minorder(void) 3168 { 3169 return _debug_guardpage_minorder; 3170 } 3171 3172 static inline bool debug_guardpage_enabled(void) 3173 { 3174 return static_branch_unlikely(&_debug_guardpage_enabled); 3175 } 3176 3177 static inline bool page_is_guard(struct page *page) 3178 { 3179 if (!debug_guardpage_enabled()) 3180 return false; 3181 3182 return PageGuard(page); 3183 } 3184 #else 3185 static inline unsigned int debug_guardpage_minorder(void) { return 0; } 3186 static inline bool debug_guardpage_enabled(void) { return false; } 3187 static inline bool page_is_guard(struct page *page) { return false; } 3188 #endif /* CONFIG_DEBUG_PAGEALLOC */ 3189 3190 #if MAX_NUMNODES > 1 3191 void __init setup_nr_node_ids(void); 3192 #else 3193 static inline void setup_nr_node_ids(void) {} 3194 #endif 3195 3196 extern int memcmp_pages(struct page *page1, struct page *page2); 3197 3198 static inline int pages_identical(struct page *page1, struct page *page2) 3199 { 3200 return !memcmp_pages(page1, page2); 3201 } 3202 3203 #ifdef CONFIG_MAPPING_DIRTY_HELPERS 3204 unsigned long clean_record_shared_mapping_range(struct address_space *mapping, 3205 pgoff_t first_index, pgoff_t nr, 3206 pgoff_t bitmap_pgoff, 3207 unsigned long *bitmap, 3208 pgoff_t *start, 3209 pgoff_t *end); 3210 3211 unsigned long wp_shared_mapping_range(struct address_space *mapping, 3212 pgoff_t first_index, pgoff_t nr); 3213 #endif 3214 3215 extern int sysctl_nr_trim_pages; 3216 3217 #ifdef CONFIG_PRINTK 3218 void mem_dump_obj(void *object); 3219 #else 3220 static inline void mem_dump_obj(void *object) {} 3221 #endif 3222 3223 /** 3224 * seal_check_future_write - Check for F_SEAL_FUTURE_WRITE flag and handle it 3225 * @seals: the seals to check 3226 * @vma: the vma to operate on 3227 * 3228 * Check whether F_SEAL_FUTURE_WRITE is set; if so, do proper check/handling on 3229 * the vma flags. Return 0 if check pass, or <0 for errors. 3230 */ 3231 static inline int seal_check_future_write(int seals, struct vm_area_struct *vma) 3232 { 3233 if (seals & F_SEAL_FUTURE_WRITE) { 3234 /* 3235 * New PROT_WRITE and MAP_SHARED mmaps are not allowed when 3236 * "future write" seal active. 3237 */ 3238 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE)) 3239 return -EPERM; 3240 3241 /* 3242 * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as 3243 * MAP_SHARED and read-only, take care to not allow mprotect to 3244 * revert protections on such mappings. Do this only for shared 3245 * mappings. For private mappings, don't need to mask 3246 * VM_MAYWRITE as we still want them to be COW-writable. 3247 */ 3248 if (vma->vm_flags & VM_SHARED) 3249 vma->vm_flags &= ~(VM_MAYWRITE); 3250 } 3251 3252 return 0; 3253 } 3254 3255 #endif /* __KERNEL__ */ 3256 #endif /* _LINUX_MM_H */ 3257