1 #ifndef _LINUX_MM_H 2 #define _LINUX_MM_H 3 4 #include <linux/errno.h> 5 6 #ifdef __KERNEL__ 7 8 #include <linux/mmdebug.h> 9 #include <linux/gfp.h> 10 #include <linux/bug.h> 11 #include <linux/list.h> 12 #include <linux/mmzone.h> 13 #include <linux/rbtree.h> 14 #include <linux/atomic.h> 15 #include <linux/debug_locks.h> 16 #include <linux/mm_types.h> 17 #include <linux/range.h> 18 #include <linux/pfn.h> 19 #include <linux/percpu-refcount.h> 20 #include <linux/bit_spinlock.h> 21 #include <linux/shrinker.h> 22 #include <linux/resource.h> 23 #include <linux/page_ext.h> 24 #include <linux/err.h> 25 #include <linux/page_ref.h> 26 27 struct mempolicy; 28 struct anon_vma; 29 struct anon_vma_chain; 30 struct file_ra_state; 31 struct user_struct; 32 struct writeback_control; 33 struct bdi_writeback; 34 35 void init_mm_internals(void); 36 37 #ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */ 38 extern unsigned long max_mapnr; 39 40 static inline void set_max_mapnr(unsigned long limit) 41 { 42 max_mapnr = limit; 43 } 44 #else 45 static inline void set_max_mapnr(unsigned long limit) { } 46 #endif 47 48 extern unsigned long totalram_pages; 49 extern void * high_memory; 50 extern int page_cluster; 51 52 #ifdef CONFIG_SYSCTL 53 extern int sysctl_legacy_va_layout; 54 #else 55 #define sysctl_legacy_va_layout 0 56 #endif 57 58 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS 59 extern const int mmap_rnd_bits_min; 60 extern const int mmap_rnd_bits_max; 61 extern int mmap_rnd_bits __read_mostly; 62 #endif 63 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS 64 extern const int mmap_rnd_compat_bits_min; 65 extern const int mmap_rnd_compat_bits_max; 66 extern int mmap_rnd_compat_bits __read_mostly; 67 #endif 68 69 #include <asm/page.h> 70 #include <asm/pgtable.h> 71 #include <asm/processor.h> 72 73 #ifndef __pa_symbol 74 #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) 75 #endif 76 77 #ifndef page_to_virt 78 #define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x))) 79 #endif 80 81 #ifndef lm_alias 82 #define lm_alias(x) __va(__pa_symbol(x)) 83 #endif 84 85 /* 86 * To prevent common memory management code establishing 87 * a zero page mapping on a read fault. 88 * This macro should be defined within <asm/pgtable.h>. 89 * s390 does this to prevent multiplexing of hardware bits 90 * related to the physical page in case of virtualization. 91 */ 92 #ifndef mm_forbids_zeropage 93 #define mm_forbids_zeropage(X) (0) 94 #endif 95 96 /* 97 * Default maximum number of active map areas, this limits the number of vmas 98 * per mm struct. Users can overwrite this number by sysctl but there is a 99 * problem. 100 * 101 * When a program's coredump is generated as ELF format, a section is created 102 * per a vma. In ELF, the number of sections is represented in unsigned short. 103 * This means the number of sections should be smaller than 65535 at coredump. 104 * Because the kernel adds some informative sections to a image of program at 105 * generating coredump, we need some margin. The number of extra sections is 106 * 1-3 now and depends on arch. We use "5" as safe margin, here. 107 * 108 * ELF extended numbering allows more than 65535 sections, so 16-bit bound is 109 * not a hard limit any more. Although some userspace tools can be surprised by 110 * that. 111 */ 112 #define MAPCOUNT_ELF_CORE_MARGIN (5) 113 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) 114 115 extern int sysctl_max_map_count; 116 117 extern unsigned long sysctl_user_reserve_kbytes; 118 extern unsigned long sysctl_admin_reserve_kbytes; 119 120 extern int sysctl_overcommit_memory; 121 extern int sysctl_overcommit_ratio; 122 extern unsigned long sysctl_overcommit_kbytes; 123 124 extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *, 125 size_t *, loff_t *); 126 extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *, 127 size_t *, loff_t *); 128 129 #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) 130 131 /* to align the pointer to the (next) page boundary */ 132 #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) 133 134 /* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */ 135 #define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE) 136 137 /* 138 * Linux kernel virtual memory manager primitives. 139 * The idea being to have a "virtual" mm in the same way 140 * we have a virtual fs - giving a cleaner interface to the 141 * mm details, and allowing different kinds of memory mappings 142 * (from shared memory to executable loading to arbitrary 143 * mmap() functions). 144 */ 145 146 extern struct kmem_cache *vm_area_cachep; 147 148 #ifndef CONFIG_MMU 149 extern struct rb_root nommu_region_tree; 150 extern struct rw_semaphore nommu_region_sem; 151 152 extern unsigned int kobjsize(const void *objp); 153 #endif 154 155 /* 156 * vm_flags in vm_area_struct, see mm_types.h. 157 * When changing, update also include/trace/events/mmflags.h 158 */ 159 #define VM_NONE 0x00000000 160 161 #define VM_READ 0x00000001 /* currently active flags */ 162 #define VM_WRITE 0x00000002 163 #define VM_EXEC 0x00000004 164 #define VM_SHARED 0x00000008 165 166 /* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */ 167 #define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */ 168 #define VM_MAYWRITE 0x00000020 169 #define VM_MAYEXEC 0x00000040 170 #define VM_MAYSHARE 0x00000080 171 172 #define VM_GROWSDOWN 0x00000100 /* general info on the segment */ 173 #define VM_UFFD_MISSING 0x00000200 /* missing pages tracking */ 174 #define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ 175 #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ 176 #define VM_UFFD_WP 0x00001000 /* wrprotect pages tracking */ 177 178 #define VM_LOCKED 0x00002000 179 #define VM_IO 0x00004000 /* Memory mapped I/O or similar */ 180 181 /* Used by sys_madvise() */ 182 #define VM_SEQ_READ 0x00008000 /* App will access data sequentially */ 183 #define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */ 184 185 #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */ 186 #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ 187 #define VM_LOCKONFAULT 0x00080000 /* Lock the pages covered when they are faulted in */ 188 #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ 189 #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */ 190 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ 191 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */ 192 #define VM_ARCH_2 0x02000000 193 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */ 194 195 #ifdef CONFIG_MEM_SOFT_DIRTY 196 # define VM_SOFTDIRTY 0x08000000 /* Not soft dirty clean area */ 197 #else 198 # define VM_SOFTDIRTY 0 199 #endif 200 201 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ 202 #define VM_HUGEPAGE 0x20000000 /* MADV_HUGEPAGE marked this vma */ 203 #define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */ 204 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */ 205 206 #ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS 207 #define VM_HIGH_ARCH_BIT_0 32 /* bit only usable on 64-bit architectures */ 208 #define VM_HIGH_ARCH_BIT_1 33 /* bit only usable on 64-bit architectures */ 209 #define VM_HIGH_ARCH_BIT_2 34 /* bit only usable on 64-bit architectures */ 210 #define VM_HIGH_ARCH_BIT_3 35 /* bit only usable on 64-bit architectures */ 211 #define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0) 212 #define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1) 213 #define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2) 214 #define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3) 215 #endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */ 216 217 #if defined(CONFIG_X86) 218 # define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */ 219 #if defined (CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS) 220 # define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0 221 # define VM_PKEY_BIT0 VM_HIGH_ARCH_0 /* A protection key is a 4-bit value */ 222 # define VM_PKEY_BIT1 VM_HIGH_ARCH_1 223 # define VM_PKEY_BIT2 VM_HIGH_ARCH_2 224 # define VM_PKEY_BIT3 VM_HIGH_ARCH_3 225 #endif 226 #elif defined(CONFIG_PPC) 227 # define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */ 228 #elif defined(CONFIG_PARISC) 229 # define VM_GROWSUP VM_ARCH_1 230 #elif defined(CONFIG_METAG) 231 # define VM_GROWSUP VM_ARCH_1 232 #elif defined(CONFIG_IA64) 233 # define VM_GROWSUP VM_ARCH_1 234 #elif !defined(CONFIG_MMU) 235 # define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */ 236 #endif 237 238 #if defined(CONFIG_X86) 239 /* MPX specific bounds table or bounds directory */ 240 # define VM_MPX VM_ARCH_2 241 #endif 242 243 #ifndef VM_GROWSUP 244 # define VM_GROWSUP VM_NONE 245 #endif 246 247 /* Bits set in the VMA until the stack is in its final location */ 248 #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ) 249 250 #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ 251 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS 252 #endif 253 254 #ifdef CONFIG_STACK_GROWSUP 255 #define VM_STACK VM_GROWSUP 256 #else 257 #define VM_STACK VM_GROWSDOWN 258 #endif 259 260 #define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT) 261 262 /* 263 * Special vmas that are non-mergable, non-mlock()able. 264 * Note: mm/huge_memory.c VM_NO_THP depends on this definition. 265 */ 266 #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP) 267 268 /* This mask defines which mm->def_flags a process can inherit its parent */ 269 #define VM_INIT_DEF_MASK VM_NOHUGEPAGE 270 271 /* This mask is used to clear all the VMA flags used by mlock */ 272 #define VM_LOCKED_CLEAR_MASK (~(VM_LOCKED | VM_LOCKONFAULT)) 273 274 /* 275 * mapping from the currently active vm_flags protection bits (the 276 * low four bits) to a page protection mask.. 277 */ 278 extern pgprot_t protection_map[16]; 279 280 #define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */ 281 #define FAULT_FLAG_MKWRITE 0x02 /* Fault was mkwrite of existing pte */ 282 #define FAULT_FLAG_ALLOW_RETRY 0x04 /* Retry fault if blocking */ 283 #define FAULT_FLAG_RETRY_NOWAIT 0x08 /* Don't drop mmap_sem and wait when retrying */ 284 #define FAULT_FLAG_KILLABLE 0x10 /* The fault task is in SIGKILL killable region */ 285 #define FAULT_FLAG_TRIED 0x20 /* Second try */ 286 #define FAULT_FLAG_USER 0x40 /* The fault originated in userspace */ 287 #define FAULT_FLAG_REMOTE 0x80 /* faulting for non current tsk/mm */ 288 #define FAULT_FLAG_INSTRUCTION 0x100 /* The fault was during an instruction fetch */ 289 290 #define FAULT_FLAG_TRACE \ 291 { FAULT_FLAG_WRITE, "WRITE" }, \ 292 { FAULT_FLAG_MKWRITE, "MKWRITE" }, \ 293 { FAULT_FLAG_ALLOW_RETRY, "ALLOW_RETRY" }, \ 294 { FAULT_FLAG_RETRY_NOWAIT, "RETRY_NOWAIT" }, \ 295 { FAULT_FLAG_KILLABLE, "KILLABLE" }, \ 296 { FAULT_FLAG_TRIED, "TRIED" }, \ 297 { FAULT_FLAG_USER, "USER" }, \ 298 { FAULT_FLAG_REMOTE, "REMOTE" }, \ 299 { FAULT_FLAG_INSTRUCTION, "INSTRUCTION" } 300 301 /* 302 * vm_fault is filled by the the pagefault handler and passed to the vma's 303 * ->fault function. The vma's ->fault is responsible for returning a bitmask 304 * of VM_FAULT_xxx flags that give details about how the fault was handled. 305 * 306 * MM layer fills up gfp_mask for page allocations but fault handler might 307 * alter it if its implementation requires a different allocation context. 308 * 309 * pgoff should be used in favour of virtual_address, if possible. 310 */ 311 struct vm_fault { 312 struct vm_area_struct *vma; /* Target VMA */ 313 unsigned int flags; /* FAULT_FLAG_xxx flags */ 314 gfp_t gfp_mask; /* gfp mask to be used for allocations */ 315 pgoff_t pgoff; /* Logical page offset based on vma */ 316 unsigned long address; /* Faulting virtual address */ 317 pmd_t *pmd; /* Pointer to pmd entry matching 318 * the 'address' */ 319 pud_t *pud; /* Pointer to pud entry matching 320 * the 'address' 321 */ 322 pte_t orig_pte; /* Value of PTE at the time of fault */ 323 324 struct page *cow_page; /* Page handler may use for COW fault */ 325 struct mem_cgroup *memcg; /* Cgroup cow_page belongs to */ 326 struct page *page; /* ->fault handlers should return a 327 * page here, unless VM_FAULT_NOPAGE 328 * is set (which is also implied by 329 * VM_FAULT_ERROR). 330 */ 331 /* These three entries are valid only while holding ptl lock */ 332 pte_t *pte; /* Pointer to pte entry matching 333 * the 'address'. NULL if the page 334 * table hasn't been allocated. 335 */ 336 spinlock_t *ptl; /* Page table lock. 337 * Protects pte page table if 'pte' 338 * is not NULL, otherwise pmd. 339 */ 340 pgtable_t prealloc_pte; /* Pre-allocated pte page table. 341 * vm_ops->map_pages() calls 342 * alloc_set_pte() from atomic context. 343 * do_fault_around() pre-allocates 344 * page table to avoid allocation from 345 * atomic context. 346 */ 347 }; 348 349 /* page entry size for vm->huge_fault() */ 350 enum page_entry_size { 351 PE_SIZE_PTE = 0, 352 PE_SIZE_PMD, 353 PE_SIZE_PUD, 354 }; 355 356 /* 357 * These are the virtual MM functions - opening of an area, closing and 358 * unmapping it (needed to keep files on disk up-to-date etc), pointer 359 * to the functions called when a no-page or a wp-page exception occurs. 360 */ 361 struct vm_operations_struct { 362 void (*open)(struct vm_area_struct * area); 363 void (*close)(struct vm_area_struct * area); 364 int (*mremap)(struct vm_area_struct * area); 365 int (*fault)(struct vm_fault *vmf); 366 int (*huge_fault)(struct vm_fault *vmf, enum page_entry_size pe_size); 367 void (*map_pages)(struct vm_fault *vmf, 368 pgoff_t start_pgoff, pgoff_t end_pgoff); 369 370 /* notification that a previously read-only page is about to become 371 * writable, if an error is returned it will cause a SIGBUS */ 372 int (*page_mkwrite)(struct vm_fault *vmf); 373 374 /* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */ 375 int (*pfn_mkwrite)(struct vm_fault *vmf); 376 377 /* called by access_process_vm when get_user_pages() fails, typically 378 * for use by special VMAs that can switch between memory and hardware 379 */ 380 int (*access)(struct vm_area_struct *vma, unsigned long addr, 381 void *buf, int len, int write); 382 383 /* Called by the /proc/PID/maps code to ask the vma whether it 384 * has a special name. Returning non-NULL will also cause this 385 * vma to be dumped unconditionally. */ 386 const char *(*name)(struct vm_area_struct *vma); 387 388 #ifdef CONFIG_NUMA 389 /* 390 * set_policy() op must add a reference to any non-NULL @new mempolicy 391 * to hold the policy upon return. Caller should pass NULL @new to 392 * remove a policy and fall back to surrounding context--i.e. do not 393 * install a MPOL_DEFAULT policy, nor the task or system default 394 * mempolicy. 395 */ 396 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); 397 398 /* 399 * get_policy() op must add reference [mpol_get()] to any policy at 400 * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure 401 * in mm/mempolicy.c will do this automatically. 402 * get_policy() must NOT add a ref if the policy at (vma,addr) is not 403 * marked as MPOL_SHARED. vma policies are protected by the mmap_sem. 404 * If no [shared/vma] mempolicy exists at the addr, get_policy() op 405 * must return NULL--i.e., do not "fallback" to task or system default 406 * policy. 407 */ 408 struct mempolicy *(*get_policy)(struct vm_area_struct *vma, 409 unsigned long addr); 410 #endif 411 /* 412 * Called by vm_normal_page() for special PTEs to find the 413 * page for @addr. This is useful if the default behavior 414 * (using pte_page()) would not find the correct page. 415 */ 416 struct page *(*find_special_page)(struct vm_area_struct *vma, 417 unsigned long addr); 418 }; 419 420 struct mmu_gather; 421 struct inode; 422 423 #define page_private(page) ((page)->private) 424 #define set_page_private(page, v) ((page)->private = (v)) 425 426 #if !defined(__HAVE_ARCH_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE) 427 static inline int pmd_devmap(pmd_t pmd) 428 { 429 return 0; 430 } 431 static inline int pud_devmap(pud_t pud) 432 { 433 return 0; 434 } 435 static inline int pgd_devmap(pgd_t pgd) 436 { 437 return 0; 438 } 439 #endif 440 441 /* 442 * FIXME: take this include out, include page-flags.h in 443 * files which need it (119 of them) 444 */ 445 #include <linux/page-flags.h> 446 #include <linux/huge_mm.h> 447 448 /* 449 * Methods to modify the page usage count. 450 * 451 * What counts for a page usage: 452 * - cache mapping (page->mapping) 453 * - private data (page->private) 454 * - page mapped in a task's page tables, each mapping 455 * is counted separately 456 * 457 * Also, many kernel routines increase the page count before a critical 458 * routine so they can be sure the page doesn't go away from under them. 459 */ 460 461 /* 462 * Drop a ref, return true if the refcount fell to zero (the page has no users) 463 */ 464 static inline int put_page_testzero(struct page *page) 465 { 466 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); 467 return page_ref_dec_and_test(page); 468 } 469 470 /* 471 * Try to grab a ref unless the page has a refcount of zero, return false if 472 * that is the case. 473 * This can be called when MMU is off so it must not access 474 * any of the virtual mappings. 475 */ 476 static inline int get_page_unless_zero(struct page *page) 477 { 478 return page_ref_add_unless(page, 1, 0); 479 } 480 481 extern int page_is_ram(unsigned long pfn); 482 483 enum { 484 REGION_INTERSECTS, 485 REGION_DISJOINT, 486 REGION_MIXED, 487 }; 488 489 int region_intersects(resource_size_t offset, size_t size, unsigned long flags, 490 unsigned long desc); 491 492 /* Support for virtually mapped pages */ 493 struct page *vmalloc_to_page(const void *addr); 494 unsigned long vmalloc_to_pfn(const void *addr); 495 496 /* 497 * Determine if an address is within the vmalloc range 498 * 499 * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there 500 * is no special casing required. 501 */ 502 static inline bool is_vmalloc_addr(const void *x) 503 { 504 #ifdef CONFIG_MMU 505 unsigned long addr = (unsigned long)x; 506 507 return addr >= VMALLOC_START && addr < VMALLOC_END; 508 #else 509 return false; 510 #endif 511 } 512 #ifdef CONFIG_MMU 513 extern int is_vmalloc_or_module_addr(const void *x); 514 #else 515 static inline int is_vmalloc_or_module_addr(const void *x) 516 { 517 return 0; 518 } 519 #endif 520 521 extern void kvfree(const void *addr); 522 523 static inline atomic_t *compound_mapcount_ptr(struct page *page) 524 { 525 return &page[1].compound_mapcount; 526 } 527 528 static inline int compound_mapcount(struct page *page) 529 { 530 VM_BUG_ON_PAGE(!PageCompound(page), page); 531 page = compound_head(page); 532 return atomic_read(compound_mapcount_ptr(page)) + 1; 533 } 534 535 /* 536 * The atomic page->_mapcount, starts from -1: so that transitions 537 * both from it and to it can be tracked, using atomic_inc_and_test 538 * and atomic_add_negative(-1). 539 */ 540 static inline void page_mapcount_reset(struct page *page) 541 { 542 atomic_set(&(page)->_mapcount, -1); 543 } 544 545 int __page_mapcount(struct page *page); 546 547 static inline int page_mapcount(struct page *page) 548 { 549 VM_BUG_ON_PAGE(PageSlab(page), page); 550 551 if (unlikely(PageCompound(page))) 552 return __page_mapcount(page); 553 return atomic_read(&page->_mapcount) + 1; 554 } 555 556 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 557 int total_mapcount(struct page *page); 558 int page_trans_huge_mapcount(struct page *page, int *total_mapcount); 559 #else 560 static inline int total_mapcount(struct page *page) 561 { 562 return page_mapcount(page); 563 } 564 static inline int page_trans_huge_mapcount(struct page *page, 565 int *total_mapcount) 566 { 567 int mapcount = page_mapcount(page); 568 if (total_mapcount) 569 *total_mapcount = mapcount; 570 return mapcount; 571 } 572 #endif 573 574 static inline struct page *virt_to_head_page(const void *x) 575 { 576 struct page *page = virt_to_page(x); 577 578 return compound_head(page); 579 } 580 581 void __put_page(struct page *page); 582 583 void put_pages_list(struct list_head *pages); 584 585 void split_page(struct page *page, unsigned int order); 586 587 /* 588 * Compound pages have a destructor function. Provide a 589 * prototype for that function and accessor functions. 590 * These are _only_ valid on the head of a compound page. 591 */ 592 typedef void compound_page_dtor(struct page *); 593 594 /* Keep the enum in sync with compound_page_dtors array in mm/page_alloc.c */ 595 enum compound_dtor_id { 596 NULL_COMPOUND_DTOR, 597 COMPOUND_PAGE_DTOR, 598 #ifdef CONFIG_HUGETLB_PAGE 599 HUGETLB_PAGE_DTOR, 600 #endif 601 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 602 TRANSHUGE_PAGE_DTOR, 603 #endif 604 NR_COMPOUND_DTORS, 605 }; 606 extern compound_page_dtor * const compound_page_dtors[]; 607 608 static inline void set_compound_page_dtor(struct page *page, 609 enum compound_dtor_id compound_dtor) 610 { 611 VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page); 612 page[1].compound_dtor = compound_dtor; 613 } 614 615 static inline compound_page_dtor *get_compound_page_dtor(struct page *page) 616 { 617 VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page); 618 return compound_page_dtors[page[1].compound_dtor]; 619 } 620 621 static inline unsigned int compound_order(struct page *page) 622 { 623 if (!PageHead(page)) 624 return 0; 625 return page[1].compound_order; 626 } 627 628 static inline void set_compound_order(struct page *page, unsigned int order) 629 { 630 page[1].compound_order = order; 631 } 632 633 void free_compound_page(struct page *page); 634 635 #ifdef CONFIG_MMU 636 /* 637 * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when 638 * servicing faults for write access. In the normal case, do always want 639 * pte_mkwrite. But get_user_pages can cause write faults for mappings 640 * that do not have writing enabled, when used by access_process_vm. 641 */ 642 static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) 643 { 644 if (likely(vma->vm_flags & VM_WRITE)) 645 pte = pte_mkwrite(pte); 646 return pte; 647 } 648 649 int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg, 650 struct page *page); 651 int finish_fault(struct vm_fault *vmf); 652 int finish_mkwrite_fault(struct vm_fault *vmf); 653 #endif 654 655 /* 656 * Multiple processes may "see" the same page. E.g. for untouched 657 * mappings of /dev/null, all processes see the same page full of 658 * zeroes, and text pages of executables and shared libraries have 659 * only one copy in memory, at most, normally. 660 * 661 * For the non-reserved pages, page_count(page) denotes a reference count. 662 * page_count() == 0 means the page is free. page->lru is then used for 663 * freelist management in the buddy allocator. 664 * page_count() > 0 means the page has been allocated. 665 * 666 * Pages are allocated by the slab allocator in order to provide memory 667 * to kmalloc and kmem_cache_alloc. In this case, the management of the 668 * page, and the fields in 'struct page' are the responsibility of mm/slab.c 669 * unless a particular usage is carefully commented. (the responsibility of 670 * freeing the kmalloc memory is the caller's, of course). 671 * 672 * A page may be used by anyone else who does a __get_free_page(). 673 * In this case, page_count still tracks the references, and should only 674 * be used through the normal accessor functions. The top bits of page->flags 675 * and page->virtual store page management information, but all other fields 676 * are unused and could be used privately, carefully. The management of this 677 * page is the responsibility of the one who allocated it, and those who have 678 * subsequently been given references to it. 679 * 680 * The other pages (we may call them "pagecache pages") are completely 681 * managed by the Linux memory manager: I/O, buffers, swapping etc. 682 * The following discussion applies only to them. 683 * 684 * A pagecache page contains an opaque `private' member, which belongs to the 685 * page's address_space. Usually, this is the address of a circular list of 686 * the page's disk buffers. PG_private must be set to tell the VM to call 687 * into the filesystem to release these pages. 688 * 689 * A page may belong to an inode's memory mapping. In this case, page->mapping 690 * is the pointer to the inode, and page->index is the file offset of the page, 691 * in units of PAGE_SIZE. 692 * 693 * If pagecache pages are not associated with an inode, they are said to be 694 * anonymous pages. These may become associated with the swapcache, and in that 695 * case PG_swapcache is set, and page->private is an offset into the swapcache. 696 * 697 * In either case (swapcache or inode backed), the pagecache itself holds one 698 * reference to the page. Setting PG_private should also increment the 699 * refcount. The each user mapping also has a reference to the page. 700 * 701 * The pagecache pages are stored in a per-mapping radix tree, which is 702 * rooted at mapping->page_tree, and indexed by offset. 703 * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space 704 * lists, we instead now tag pages as dirty/writeback in the radix tree. 705 * 706 * All pagecache pages may be subject to I/O: 707 * - inode pages may need to be read from disk, 708 * - inode pages which have been modified and are MAP_SHARED may need 709 * to be written back to the inode on disk, 710 * - anonymous pages (including MAP_PRIVATE file mappings) which have been 711 * modified may need to be swapped out to swap space and (later) to be read 712 * back into memory. 713 */ 714 715 /* 716 * The zone field is never updated after free_area_init_core() 717 * sets it, so none of the operations on it need to be atomic. 718 */ 719 720 /* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */ 721 #define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH) 722 #define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH) 723 #define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH) 724 #define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH) 725 726 /* 727 * Define the bit shifts to access each section. For non-existent 728 * sections we define the shift as 0; that plus a 0 mask ensures 729 * the compiler will optimise away reference to them. 730 */ 731 #define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0)) 732 #define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) 733 #define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) 734 #define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0)) 735 736 /* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */ 737 #ifdef NODE_NOT_IN_PAGE_FLAGS 738 #define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT) 739 #define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \ 740 SECTIONS_PGOFF : ZONES_PGOFF) 741 #else 742 #define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT) 743 #define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \ 744 NODES_PGOFF : ZONES_PGOFF) 745 #endif 746 747 #define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0)) 748 749 #if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS 750 #error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS 751 #endif 752 753 #define ZONES_MASK ((1UL << ZONES_WIDTH) - 1) 754 #define NODES_MASK ((1UL << NODES_WIDTH) - 1) 755 #define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) 756 #define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1) 757 #define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1) 758 759 static inline enum zone_type page_zonenum(const struct page *page) 760 { 761 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; 762 } 763 764 #ifdef CONFIG_ZONE_DEVICE 765 static inline bool is_zone_device_page(const struct page *page) 766 { 767 return page_zonenum(page) == ZONE_DEVICE; 768 } 769 #else 770 static inline bool is_zone_device_page(const struct page *page) 771 { 772 return false; 773 } 774 #endif 775 776 static inline void get_page(struct page *page) 777 { 778 page = compound_head(page); 779 /* 780 * Getting a normal page or the head of a compound page 781 * requires to already have an elevated page->_refcount. 782 */ 783 VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page); 784 page_ref_inc(page); 785 } 786 787 static inline void put_page(struct page *page) 788 { 789 page = compound_head(page); 790 791 if (put_page_testzero(page)) 792 __put_page(page); 793 } 794 795 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) 796 #define SECTION_IN_PAGE_FLAGS 797 #endif 798 799 /* 800 * The identification function is mainly used by the buddy allocator for 801 * determining if two pages could be buddies. We are not really identifying 802 * the zone since we could be using the section number id if we do not have 803 * node id available in page flags. 804 * We only guarantee that it will return the same value for two combinable 805 * pages in a zone. 806 */ 807 static inline int page_zone_id(struct page *page) 808 { 809 return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK; 810 } 811 812 static inline int zone_to_nid(struct zone *zone) 813 { 814 #ifdef CONFIG_NUMA 815 return zone->node; 816 #else 817 return 0; 818 #endif 819 } 820 821 #ifdef NODE_NOT_IN_PAGE_FLAGS 822 extern int page_to_nid(const struct page *page); 823 #else 824 static inline int page_to_nid(const struct page *page) 825 { 826 return (page->flags >> NODES_PGSHIFT) & NODES_MASK; 827 } 828 #endif 829 830 #ifdef CONFIG_NUMA_BALANCING 831 static inline int cpu_pid_to_cpupid(int cpu, int pid) 832 { 833 return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK); 834 } 835 836 static inline int cpupid_to_pid(int cpupid) 837 { 838 return cpupid & LAST__PID_MASK; 839 } 840 841 static inline int cpupid_to_cpu(int cpupid) 842 { 843 return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK; 844 } 845 846 static inline int cpupid_to_nid(int cpupid) 847 { 848 return cpu_to_node(cpupid_to_cpu(cpupid)); 849 } 850 851 static inline bool cpupid_pid_unset(int cpupid) 852 { 853 return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK); 854 } 855 856 static inline bool cpupid_cpu_unset(int cpupid) 857 { 858 return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK); 859 } 860 861 static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid) 862 { 863 return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid); 864 } 865 866 #define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid) 867 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS 868 static inline int page_cpupid_xchg_last(struct page *page, int cpupid) 869 { 870 return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK); 871 } 872 873 static inline int page_cpupid_last(struct page *page) 874 { 875 return page->_last_cpupid; 876 } 877 static inline void page_cpupid_reset_last(struct page *page) 878 { 879 page->_last_cpupid = -1 & LAST_CPUPID_MASK; 880 } 881 #else 882 static inline int page_cpupid_last(struct page *page) 883 { 884 return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK; 885 } 886 887 extern int page_cpupid_xchg_last(struct page *page, int cpupid); 888 889 static inline void page_cpupid_reset_last(struct page *page) 890 { 891 page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT; 892 } 893 #endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */ 894 #else /* !CONFIG_NUMA_BALANCING */ 895 static inline int page_cpupid_xchg_last(struct page *page, int cpupid) 896 { 897 return page_to_nid(page); /* XXX */ 898 } 899 900 static inline int page_cpupid_last(struct page *page) 901 { 902 return page_to_nid(page); /* XXX */ 903 } 904 905 static inline int cpupid_to_nid(int cpupid) 906 { 907 return -1; 908 } 909 910 static inline int cpupid_to_pid(int cpupid) 911 { 912 return -1; 913 } 914 915 static inline int cpupid_to_cpu(int cpupid) 916 { 917 return -1; 918 } 919 920 static inline int cpu_pid_to_cpupid(int nid, int pid) 921 { 922 return -1; 923 } 924 925 static inline bool cpupid_pid_unset(int cpupid) 926 { 927 return 1; 928 } 929 930 static inline void page_cpupid_reset_last(struct page *page) 931 { 932 } 933 934 static inline bool cpupid_match_pid(struct task_struct *task, int cpupid) 935 { 936 return false; 937 } 938 #endif /* CONFIG_NUMA_BALANCING */ 939 940 static inline struct zone *page_zone(const struct page *page) 941 { 942 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; 943 } 944 945 static inline pg_data_t *page_pgdat(const struct page *page) 946 { 947 return NODE_DATA(page_to_nid(page)); 948 } 949 950 #ifdef SECTION_IN_PAGE_FLAGS 951 static inline void set_page_section(struct page *page, unsigned long section) 952 { 953 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT); 954 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT; 955 } 956 957 static inline unsigned long page_to_section(const struct page *page) 958 { 959 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; 960 } 961 #endif 962 963 static inline void set_page_zone(struct page *page, enum zone_type zone) 964 { 965 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT); 966 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT; 967 } 968 969 static inline void set_page_node(struct page *page, unsigned long node) 970 { 971 page->flags &= ~(NODES_MASK << NODES_PGSHIFT); 972 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT; 973 } 974 975 static inline void set_page_links(struct page *page, enum zone_type zone, 976 unsigned long node, unsigned long pfn) 977 { 978 set_page_zone(page, zone); 979 set_page_node(page, node); 980 #ifdef SECTION_IN_PAGE_FLAGS 981 set_page_section(page, pfn_to_section_nr(pfn)); 982 #endif 983 } 984 985 #ifdef CONFIG_MEMCG 986 static inline struct mem_cgroup *page_memcg(struct page *page) 987 { 988 return page->mem_cgroup; 989 } 990 static inline struct mem_cgroup *page_memcg_rcu(struct page *page) 991 { 992 WARN_ON_ONCE(!rcu_read_lock_held()); 993 return READ_ONCE(page->mem_cgroup); 994 } 995 #else 996 static inline struct mem_cgroup *page_memcg(struct page *page) 997 { 998 return NULL; 999 } 1000 static inline struct mem_cgroup *page_memcg_rcu(struct page *page) 1001 { 1002 WARN_ON_ONCE(!rcu_read_lock_held()); 1003 return NULL; 1004 } 1005 #endif 1006 1007 /* 1008 * Some inline functions in vmstat.h depend on page_zone() 1009 */ 1010 #include <linux/vmstat.h> 1011 1012 static __always_inline void *lowmem_page_address(const struct page *page) 1013 { 1014 return page_to_virt(page); 1015 } 1016 1017 #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) 1018 #define HASHED_PAGE_VIRTUAL 1019 #endif 1020 1021 #if defined(WANT_PAGE_VIRTUAL) 1022 static inline void *page_address(const struct page *page) 1023 { 1024 return page->virtual; 1025 } 1026 static inline void set_page_address(struct page *page, void *address) 1027 { 1028 page->virtual = address; 1029 } 1030 #define page_address_init() do { } while(0) 1031 #endif 1032 1033 #if defined(HASHED_PAGE_VIRTUAL) 1034 void *page_address(const struct page *page); 1035 void set_page_address(struct page *page, void *virtual); 1036 void page_address_init(void); 1037 #endif 1038 1039 #if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL) 1040 #define page_address(page) lowmem_page_address(page) 1041 #define set_page_address(page, address) do { } while(0) 1042 #define page_address_init() do { } while(0) 1043 #endif 1044 1045 extern void *page_rmapping(struct page *page); 1046 extern struct anon_vma *page_anon_vma(struct page *page); 1047 extern struct address_space *page_mapping(struct page *page); 1048 1049 extern struct address_space *__page_file_mapping(struct page *); 1050 1051 static inline 1052 struct address_space *page_file_mapping(struct page *page) 1053 { 1054 if (unlikely(PageSwapCache(page))) 1055 return __page_file_mapping(page); 1056 1057 return page->mapping; 1058 } 1059 1060 extern pgoff_t __page_file_index(struct page *page); 1061 1062 /* 1063 * Return the pagecache index of the passed page. Regular pagecache pages 1064 * use ->index whereas swapcache pages use swp_offset(->private) 1065 */ 1066 static inline pgoff_t page_index(struct page *page) 1067 { 1068 if (unlikely(PageSwapCache(page))) 1069 return __page_file_index(page); 1070 return page->index; 1071 } 1072 1073 bool page_mapped(struct page *page); 1074 struct address_space *page_mapping(struct page *page); 1075 1076 /* 1077 * Return true only if the page has been allocated with 1078 * ALLOC_NO_WATERMARKS and the low watermark was not 1079 * met implying that the system is under some pressure. 1080 */ 1081 static inline bool page_is_pfmemalloc(struct page *page) 1082 { 1083 /* 1084 * Page index cannot be this large so this must be 1085 * a pfmemalloc page. 1086 */ 1087 return page->index == -1UL; 1088 } 1089 1090 /* 1091 * Only to be called by the page allocator on a freshly allocated 1092 * page. 1093 */ 1094 static inline void set_page_pfmemalloc(struct page *page) 1095 { 1096 page->index = -1UL; 1097 } 1098 1099 static inline void clear_page_pfmemalloc(struct page *page) 1100 { 1101 page->index = 0; 1102 } 1103 1104 /* 1105 * Different kinds of faults, as returned by handle_mm_fault(). 1106 * Used to decide whether a process gets delivered SIGBUS or 1107 * just gets major/minor fault counters bumped up. 1108 */ 1109 1110 #define VM_FAULT_OOM 0x0001 1111 #define VM_FAULT_SIGBUS 0x0002 1112 #define VM_FAULT_MAJOR 0x0004 1113 #define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */ 1114 #define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */ 1115 #define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */ 1116 #define VM_FAULT_SIGSEGV 0x0040 1117 1118 #define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ 1119 #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ 1120 #define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */ 1121 #define VM_FAULT_FALLBACK 0x0800 /* huge page fault failed, fall back to small */ 1122 #define VM_FAULT_DONE_COW 0x1000 /* ->fault has fully handled COW */ 1123 1124 #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */ 1125 1126 #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \ 1127 VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \ 1128 VM_FAULT_FALLBACK) 1129 1130 #define VM_FAULT_RESULT_TRACE \ 1131 { VM_FAULT_OOM, "OOM" }, \ 1132 { VM_FAULT_SIGBUS, "SIGBUS" }, \ 1133 { VM_FAULT_MAJOR, "MAJOR" }, \ 1134 { VM_FAULT_WRITE, "WRITE" }, \ 1135 { VM_FAULT_HWPOISON, "HWPOISON" }, \ 1136 { VM_FAULT_HWPOISON_LARGE, "HWPOISON_LARGE" }, \ 1137 { VM_FAULT_SIGSEGV, "SIGSEGV" }, \ 1138 { VM_FAULT_NOPAGE, "NOPAGE" }, \ 1139 { VM_FAULT_LOCKED, "LOCKED" }, \ 1140 { VM_FAULT_RETRY, "RETRY" }, \ 1141 { VM_FAULT_FALLBACK, "FALLBACK" }, \ 1142 { VM_FAULT_DONE_COW, "DONE_COW" } 1143 1144 /* Encode hstate index for a hwpoisoned large page */ 1145 #define VM_FAULT_SET_HINDEX(x) ((x) << 12) 1146 #define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf) 1147 1148 /* 1149 * Can be called by the pagefault handler when it gets a VM_FAULT_OOM. 1150 */ 1151 extern void pagefault_out_of_memory(void); 1152 1153 #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) 1154 1155 /* 1156 * Flags passed to show_mem() and show_free_areas() to suppress output in 1157 * various contexts. 1158 */ 1159 #define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */ 1160 1161 extern void show_free_areas(unsigned int flags, nodemask_t *nodemask); 1162 1163 extern bool can_do_mlock(void); 1164 extern int user_shm_lock(size_t, struct user_struct *); 1165 extern void user_shm_unlock(size_t, struct user_struct *); 1166 1167 /* 1168 * Parameter block passed down to zap_pte_range in exceptional cases. 1169 */ 1170 struct zap_details { 1171 struct address_space *check_mapping; /* Check page->mapping if set */ 1172 pgoff_t first_index; /* Lowest page->index to unmap */ 1173 pgoff_t last_index; /* Highest page->index to unmap */ 1174 }; 1175 1176 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, 1177 pte_t pte); 1178 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, 1179 pmd_t pmd); 1180 1181 int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 1182 unsigned long size); 1183 void zap_page_range(struct vm_area_struct *vma, unsigned long address, 1184 unsigned long size); 1185 void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma, 1186 unsigned long start, unsigned long end); 1187 1188 /** 1189 * mm_walk - callbacks for walk_page_range 1190 * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry 1191 * this handler should only handle pud_trans_huge() puds. 1192 * the pmd_entry or pte_entry callbacks will be used for 1193 * regular PUDs. 1194 * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry 1195 * this handler is required to be able to handle 1196 * pmd_trans_huge() pmds. They may simply choose to 1197 * split_huge_page() instead of handling it explicitly. 1198 * @pte_entry: if set, called for each non-empty PTE (4th-level) entry 1199 * @pte_hole: if set, called for each hole at all levels 1200 * @hugetlb_entry: if set, called for each hugetlb entry 1201 * @test_walk: caller specific callback function to determine whether 1202 * we walk over the current vma or not. Returning 0 1203 * value means "do page table walk over the current vma," 1204 * and a negative one means "abort current page table walk 1205 * right now." 1 means "skip the current vma." 1206 * @mm: mm_struct representing the target process of page table walk 1207 * @vma: vma currently walked (NULL if walking outside vmas) 1208 * @private: private data for callbacks' usage 1209 * 1210 * (see the comment on walk_page_range() for more details) 1211 */ 1212 struct mm_walk { 1213 int (*pud_entry)(pud_t *pud, unsigned long addr, 1214 unsigned long next, struct mm_walk *walk); 1215 int (*pmd_entry)(pmd_t *pmd, unsigned long addr, 1216 unsigned long next, struct mm_walk *walk); 1217 int (*pte_entry)(pte_t *pte, unsigned long addr, 1218 unsigned long next, struct mm_walk *walk); 1219 int (*pte_hole)(unsigned long addr, unsigned long next, 1220 struct mm_walk *walk); 1221 int (*hugetlb_entry)(pte_t *pte, unsigned long hmask, 1222 unsigned long addr, unsigned long next, 1223 struct mm_walk *walk); 1224 int (*test_walk)(unsigned long addr, unsigned long next, 1225 struct mm_walk *walk); 1226 struct mm_struct *mm; 1227 struct vm_area_struct *vma; 1228 void *private; 1229 }; 1230 1231 int walk_page_range(unsigned long addr, unsigned long end, 1232 struct mm_walk *walk); 1233 int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk); 1234 void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, 1235 unsigned long end, unsigned long floor, unsigned long ceiling); 1236 int copy_page_range(struct mm_struct *dst, struct mm_struct *src, 1237 struct vm_area_struct *vma); 1238 void unmap_mapping_range(struct address_space *mapping, 1239 loff_t const holebegin, loff_t const holelen, int even_cows); 1240 int follow_pte_pmd(struct mm_struct *mm, unsigned long address, 1241 pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp); 1242 int follow_pfn(struct vm_area_struct *vma, unsigned long address, 1243 unsigned long *pfn); 1244 int follow_phys(struct vm_area_struct *vma, unsigned long address, 1245 unsigned int flags, unsigned long *prot, resource_size_t *phys); 1246 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, 1247 void *buf, int len, int write); 1248 1249 static inline void unmap_shared_mapping_range(struct address_space *mapping, 1250 loff_t const holebegin, loff_t const holelen) 1251 { 1252 unmap_mapping_range(mapping, holebegin, holelen, 0); 1253 } 1254 1255 extern void truncate_pagecache(struct inode *inode, loff_t new); 1256 extern void truncate_setsize(struct inode *inode, loff_t newsize); 1257 void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to); 1258 void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end); 1259 int truncate_inode_page(struct address_space *mapping, struct page *page); 1260 int generic_error_remove_page(struct address_space *mapping, struct page *page); 1261 int invalidate_inode_page(struct page *page); 1262 1263 #ifdef CONFIG_MMU 1264 extern int handle_mm_fault(struct vm_area_struct *vma, unsigned long address, 1265 unsigned int flags); 1266 extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, 1267 unsigned long address, unsigned int fault_flags, 1268 bool *unlocked); 1269 #else 1270 static inline int handle_mm_fault(struct vm_area_struct *vma, 1271 unsigned long address, unsigned int flags) 1272 { 1273 /* should never happen if there's no MMU */ 1274 BUG(); 1275 return VM_FAULT_SIGBUS; 1276 } 1277 static inline int fixup_user_fault(struct task_struct *tsk, 1278 struct mm_struct *mm, unsigned long address, 1279 unsigned int fault_flags, bool *unlocked) 1280 { 1281 /* should never happen if there's no MMU */ 1282 BUG(); 1283 return -EFAULT; 1284 } 1285 #endif 1286 1287 extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, 1288 unsigned int gup_flags); 1289 extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, 1290 void *buf, int len, unsigned int gup_flags); 1291 extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, 1292 unsigned long addr, void *buf, int len, unsigned int gup_flags); 1293 1294 long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, 1295 unsigned long start, unsigned long nr_pages, 1296 unsigned int gup_flags, struct page **pages, 1297 struct vm_area_struct **vmas, int *locked); 1298 long get_user_pages(unsigned long start, unsigned long nr_pages, 1299 unsigned int gup_flags, struct page **pages, 1300 struct vm_area_struct **vmas); 1301 long get_user_pages_locked(unsigned long start, unsigned long nr_pages, 1302 unsigned int gup_flags, struct page **pages, int *locked); 1303 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, 1304 struct page **pages, unsigned int gup_flags); 1305 int get_user_pages_fast(unsigned long start, int nr_pages, int write, 1306 struct page **pages); 1307 1308 /* Container for pinned pfns / pages */ 1309 struct frame_vector { 1310 unsigned int nr_allocated; /* Number of frames we have space for */ 1311 unsigned int nr_frames; /* Number of frames stored in ptrs array */ 1312 bool got_ref; /* Did we pin pages by getting page ref? */ 1313 bool is_pfns; /* Does array contain pages or pfns? */ 1314 void *ptrs[0]; /* Array of pinned pfns / pages. Use 1315 * pfns_vector_pages() or pfns_vector_pfns() 1316 * for access */ 1317 }; 1318 1319 struct frame_vector *frame_vector_create(unsigned int nr_frames); 1320 void frame_vector_destroy(struct frame_vector *vec); 1321 int get_vaddr_frames(unsigned long start, unsigned int nr_pfns, 1322 unsigned int gup_flags, struct frame_vector *vec); 1323 void put_vaddr_frames(struct frame_vector *vec); 1324 int frame_vector_to_pages(struct frame_vector *vec); 1325 void frame_vector_to_pfns(struct frame_vector *vec); 1326 1327 static inline unsigned int frame_vector_count(struct frame_vector *vec) 1328 { 1329 return vec->nr_frames; 1330 } 1331 1332 static inline struct page **frame_vector_pages(struct frame_vector *vec) 1333 { 1334 if (vec->is_pfns) { 1335 int err = frame_vector_to_pages(vec); 1336 1337 if (err) 1338 return ERR_PTR(err); 1339 } 1340 return (struct page **)(vec->ptrs); 1341 } 1342 1343 static inline unsigned long *frame_vector_pfns(struct frame_vector *vec) 1344 { 1345 if (!vec->is_pfns) 1346 frame_vector_to_pfns(vec); 1347 return (unsigned long *)(vec->ptrs); 1348 } 1349 1350 struct kvec; 1351 int get_kernel_pages(const struct kvec *iov, int nr_pages, int write, 1352 struct page **pages); 1353 int get_kernel_page(unsigned long start, int write, struct page **pages); 1354 struct page *get_dump_page(unsigned long addr); 1355 1356 extern int try_to_release_page(struct page * page, gfp_t gfp_mask); 1357 extern void do_invalidatepage(struct page *page, unsigned int offset, 1358 unsigned int length); 1359 1360 int __set_page_dirty_nobuffers(struct page *page); 1361 int __set_page_dirty_no_writeback(struct page *page); 1362 int redirty_page_for_writepage(struct writeback_control *wbc, 1363 struct page *page); 1364 void account_page_dirtied(struct page *page, struct address_space *mapping); 1365 void account_page_cleaned(struct page *page, struct address_space *mapping, 1366 struct bdi_writeback *wb); 1367 int set_page_dirty(struct page *page); 1368 int set_page_dirty_lock(struct page *page); 1369 void cancel_dirty_page(struct page *page); 1370 int clear_page_dirty_for_io(struct page *page); 1371 1372 int get_cmdline(struct task_struct *task, char *buffer, int buflen); 1373 1374 /* Is the vma a continuation of the stack vma above it? */ 1375 static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr) 1376 { 1377 return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); 1378 } 1379 1380 static inline bool vma_is_anonymous(struct vm_area_struct *vma) 1381 { 1382 return !vma->vm_ops; 1383 } 1384 1385 #ifdef CONFIG_SHMEM 1386 /* 1387 * The vma_is_shmem is not inline because it is used only by slow 1388 * paths in userfault. 1389 */ 1390 bool vma_is_shmem(struct vm_area_struct *vma); 1391 #else 1392 static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; } 1393 #endif 1394 1395 static inline int stack_guard_page_start(struct vm_area_struct *vma, 1396 unsigned long addr) 1397 { 1398 return (vma->vm_flags & VM_GROWSDOWN) && 1399 (vma->vm_start == addr) && 1400 !vma_growsdown(vma->vm_prev, addr); 1401 } 1402 1403 /* Is the vma a continuation of the stack vma below it? */ 1404 static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr) 1405 { 1406 return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP); 1407 } 1408 1409 static inline int stack_guard_page_end(struct vm_area_struct *vma, 1410 unsigned long addr) 1411 { 1412 return (vma->vm_flags & VM_GROWSUP) && 1413 (vma->vm_end == addr) && 1414 !vma_growsup(vma->vm_next, addr); 1415 } 1416 1417 int vma_is_stack_for_current(struct vm_area_struct *vma); 1418 1419 extern unsigned long move_page_tables(struct vm_area_struct *vma, 1420 unsigned long old_addr, struct vm_area_struct *new_vma, 1421 unsigned long new_addr, unsigned long len, 1422 bool need_rmap_locks); 1423 extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, 1424 unsigned long end, pgprot_t newprot, 1425 int dirty_accountable, int prot_numa); 1426 extern int mprotect_fixup(struct vm_area_struct *vma, 1427 struct vm_area_struct **pprev, unsigned long start, 1428 unsigned long end, unsigned long newflags); 1429 1430 /* 1431 * doesn't attempt to fault and will return short. 1432 */ 1433 int __get_user_pages_fast(unsigned long start, int nr_pages, int write, 1434 struct page **pages); 1435 /* 1436 * per-process(per-mm_struct) statistics. 1437 */ 1438 static inline unsigned long get_mm_counter(struct mm_struct *mm, int member) 1439 { 1440 long val = atomic_long_read(&mm->rss_stat.count[member]); 1441 1442 #ifdef SPLIT_RSS_COUNTING 1443 /* 1444 * counter is updated in asynchronous manner and may go to minus. 1445 * But it's never be expected number for users. 1446 */ 1447 if (val < 0) 1448 val = 0; 1449 #endif 1450 return (unsigned long)val; 1451 } 1452 1453 static inline void add_mm_counter(struct mm_struct *mm, int member, long value) 1454 { 1455 atomic_long_add(value, &mm->rss_stat.count[member]); 1456 } 1457 1458 static inline void inc_mm_counter(struct mm_struct *mm, int member) 1459 { 1460 atomic_long_inc(&mm->rss_stat.count[member]); 1461 } 1462 1463 static inline void dec_mm_counter(struct mm_struct *mm, int member) 1464 { 1465 atomic_long_dec(&mm->rss_stat.count[member]); 1466 } 1467 1468 /* Optimized variant when page is already known not to be PageAnon */ 1469 static inline int mm_counter_file(struct page *page) 1470 { 1471 if (PageSwapBacked(page)) 1472 return MM_SHMEMPAGES; 1473 return MM_FILEPAGES; 1474 } 1475 1476 static inline int mm_counter(struct page *page) 1477 { 1478 if (PageAnon(page)) 1479 return MM_ANONPAGES; 1480 return mm_counter_file(page); 1481 } 1482 1483 static inline unsigned long get_mm_rss(struct mm_struct *mm) 1484 { 1485 return get_mm_counter(mm, MM_FILEPAGES) + 1486 get_mm_counter(mm, MM_ANONPAGES) + 1487 get_mm_counter(mm, MM_SHMEMPAGES); 1488 } 1489 1490 static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm) 1491 { 1492 return max(mm->hiwater_rss, get_mm_rss(mm)); 1493 } 1494 1495 static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm) 1496 { 1497 return max(mm->hiwater_vm, mm->total_vm); 1498 } 1499 1500 static inline void update_hiwater_rss(struct mm_struct *mm) 1501 { 1502 unsigned long _rss = get_mm_rss(mm); 1503 1504 if ((mm)->hiwater_rss < _rss) 1505 (mm)->hiwater_rss = _rss; 1506 } 1507 1508 static inline void update_hiwater_vm(struct mm_struct *mm) 1509 { 1510 if (mm->hiwater_vm < mm->total_vm) 1511 mm->hiwater_vm = mm->total_vm; 1512 } 1513 1514 static inline void reset_mm_hiwater_rss(struct mm_struct *mm) 1515 { 1516 mm->hiwater_rss = get_mm_rss(mm); 1517 } 1518 1519 static inline void setmax_mm_hiwater_rss(unsigned long *maxrss, 1520 struct mm_struct *mm) 1521 { 1522 unsigned long hiwater_rss = get_mm_hiwater_rss(mm); 1523 1524 if (*maxrss < hiwater_rss) 1525 *maxrss = hiwater_rss; 1526 } 1527 1528 #if defined(SPLIT_RSS_COUNTING) 1529 void sync_mm_rss(struct mm_struct *mm); 1530 #else 1531 static inline void sync_mm_rss(struct mm_struct *mm) 1532 { 1533 } 1534 #endif 1535 1536 #ifndef __HAVE_ARCH_PTE_DEVMAP 1537 static inline int pte_devmap(pte_t pte) 1538 { 1539 return 0; 1540 } 1541 #endif 1542 1543 int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot); 1544 1545 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, 1546 spinlock_t **ptl); 1547 static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, 1548 spinlock_t **ptl) 1549 { 1550 pte_t *ptep; 1551 __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl)); 1552 return ptep; 1553 } 1554 1555 #ifdef __PAGETABLE_P4D_FOLDED 1556 static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, 1557 unsigned long address) 1558 { 1559 return 0; 1560 } 1561 #else 1562 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); 1563 #endif 1564 1565 #ifdef __PAGETABLE_PUD_FOLDED 1566 static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, 1567 unsigned long address) 1568 { 1569 return 0; 1570 } 1571 #else 1572 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address); 1573 #endif 1574 1575 #if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU) 1576 static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud, 1577 unsigned long address) 1578 { 1579 return 0; 1580 } 1581 1582 static inline void mm_nr_pmds_init(struct mm_struct *mm) {} 1583 1584 static inline unsigned long mm_nr_pmds(struct mm_struct *mm) 1585 { 1586 return 0; 1587 } 1588 1589 static inline void mm_inc_nr_pmds(struct mm_struct *mm) {} 1590 static inline void mm_dec_nr_pmds(struct mm_struct *mm) {} 1591 1592 #else 1593 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address); 1594 1595 static inline void mm_nr_pmds_init(struct mm_struct *mm) 1596 { 1597 atomic_long_set(&mm->nr_pmds, 0); 1598 } 1599 1600 static inline unsigned long mm_nr_pmds(struct mm_struct *mm) 1601 { 1602 return atomic_long_read(&mm->nr_pmds); 1603 } 1604 1605 static inline void mm_inc_nr_pmds(struct mm_struct *mm) 1606 { 1607 atomic_long_inc(&mm->nr_pmds); 1608 } 1609 1610 static inline void mm_dec_nr_pmds(struct mm_struct *mm) 1611 { 1612 atomic_long_dec(&mm->nr_pmds); 1613 } 1614 #endif 1615 1616 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address); 1617 int __pte_alloc_kernel(pmd_t *pmd, unsigned long address); 1618 1619 /* 1620 * The following ifdef needed to get the 4level-fixup.h header to work. 1621 * Remove it when 4level-fixup.h has been removed. 1622 */ 1623 #if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK) 1624 1625 #ifndef __ARCH_HAS_5LEVEL_HACK 1626 static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd, 1627 unsigned long address) 1628 { 1629 return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ? 1630 NULL : p4d_offset(pgd, address); 1631 } 1632 1633 static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d, 1634 unsigned long address) 1635 { 1636 return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ? 1637 NULL : pud_offset(p4d, address); 1638 } 1639 #endif /* !__ARCH_HAS_5LEVEL_HACK */ 1640 1641 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) 1642 { 1643 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))? 1644 NULL: pmd_offset(pud, address); 1645 } 1646 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */ 1647 1648 #if USE_SPLIT_PTE_PTLOCKS 1649 #if ALLOC_SPLIT_PTLOCKS 1650 void __init ptlock_cache_init(void); 1651 extern bool ptlock_alloc(struct page *page); 1652 extern void ptlock_free(struct page *page); 1653 1654 static inline spinlock_t *ptlock_ptr(struct page *page) 1655 { 1656 return page->ptl; 1657 } 1658 #else /* ALLOC_SPLIT_PTLOCKS */ 1659 static inline void ptlock_cache_init(void) 1660 { 1661 } 1662 1663 static inline bool ptlock_alloc(struct page *page) 1664 { 1665 return true; 1666 } 1667 1668 static inline void ptlock_free(struct page *page) 1669 { 1670 } 1671 1672 static inline spinlock_t *ptlock_ptr(struct page *page) 1673 { 1674 return &page->ptl; 1675 } 1676 #endif /* ALLOC_SPLIT_PTLOCKS */ 1677 1678 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) 1679 { 1680 return ptlock_ptr(pmd_page(*pmd)); 1681 } 1682 1683 static inline bool ptlock_init(struct page *page) 1684 { 1685 /* 1686 * prep_new_page() initialize page->private (and therefore page->ptl) 1687 * with 0. Make sure nobody took it in use in between. 1688 * 1689 * It can happen if arch try to use slab for page table allocation: 1690 * slab code uses page->slab_cache, which share storage with page->ptl. 1691 */ 1692 VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page); 1693 if (!ptlock_alloc(page)) 1694 return false; 1695 spin_lock_init(ptlock_ptr(page)); 1696 return true; 1697 } 1698 1699 /* Reset page->mapping so free_pages_check won't complain. */ 1700 static inline void pte_lock_deinit(struct page *page) 1701 { 1702 page->mapping = NULL; 1703 ptlock_free(page); 1704 } 1705 1706 #else /* !USE_SPLIT_PTE_PTLOCKS */ 1707 /* 1708 * We use mm->page_table_lock to guard all pagetable pages of the mm. 1709 */ 1710 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) 1711 { 1712 return &mm->page_table_lock; 1713 } 1714 static inline void ptlock_cache_init(void) {} 1715 static inline bool ptlock_init(struct page *page) { return true; } 1716 static inline void pte_lock_deinit(struct page *page) {} 1717 #endif /* USE_SPLIT_PTE_PTLOCKS */ 1718 1719 static inline void pgtable_init(void) 1720 { 1721 ptlock_cache_init(); 1722 pgtable_cache_init(); 1723 } 1724 1725 static inline bool pgtable_page_ctor(struct page *page) 1726 { 1727 if (!ptlock_init(page)) 1728 return false; 1729 inc_zone_page_state(page, NR_PAGETABLE); 1730 return true; 1731 } 1732 1733 static inline void pgtable_page_dtor(struct page *page) 1734 { 1735 pte_lock_deinit(page); 1736 dec_zone_page_state(page, NR_PAGETABLE); 1737 } 1738 1739 #define pte_offset_map_lock(mm, pmd, address, ptlp) \ 1740 ({ \ 1741 spinlock_t *__ptl = pte_lockptr(mm, pmd); \ 1742 pte_t *__pte = pte_offset_map(pmd, address); \ 1743 *(ptlp) = __ptl; \ 1744 spin_lock(__ptl); \ 1745 __pte; \ 1746 }) 1747 1748 #define pte_unmap_unlock(pte, ptl) do { \ 1749 spin_unlock(ptl); \ 1750 pte_unmap(pte); \ 1751 } while (0) 1752 1753 #define pte_alloc(mm, pmd, address) \ 1754 (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd, address)) 1755 1756 #define pte_alloc_map(mm, pmd, address) \ 1757 (pte_alloc(mm, pmd, address) ? NULL : pte_offset_map(pmd, address)) 1758 1759 #define pte_alloc_map_lock(mm, pmd, address, ptlp) \ 1760 (pte_alloc(mm, pmd, address) ? \ 1761 NULL : pte_offset_map_lock(mm, pmd, address, ptlp)) 1762 1763 #define pte_alloc_kernel(pmd, address) \ 1764 ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \ 1765 NULL: pte_offset_kernel(pmd, address)) 1766 1767 #if USE_SPLIT_PMD_PTLOCKS 1768 1769 static struct page *pmd_to_page(pmd_t *pmd) 1770 { 1771 unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1); 1772 return virt_to_page((void *)((unsigned long) pmd & mask)); 1773 } 1774 1775 static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) 1776 { 1777 return ptlock_ptr(pmd_to_page(pmd)); 1778 } 1779 1780 static inline bool pgtable_pmd_page_ctor(struct page *page) 1781 { 1782 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1783 page->pmd_huge_pte = NULL; 1784 #endif 1785 return ptlock_init(page); 1786 } 1787 1788 static inline void pgtable_pmd_page_dtor(struct page *page) 1789 { 1790 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1791 VM_BUG_ON_PAGE(page->pmd_huge_pte, page); 1792 #endif 1793 ptlock_free(page); 1794 } 1795 1796 #define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte) 1797 1798 #else 1799 1800 static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) 1801 { 1802 return &mm->page_table_lock; 1803 } 1804 1805 static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; } 1806 static inline void pgtable_pmd_page_dtor(struct page *page) {} 1807 1808 #define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte) 1809 1810 #endif 1811 1812 static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd) 1813 { 1814 spinlock_t *ptl = pmd_lockptr(mm, pmd); 1815 spin_lock(ptl); 1816 return ptl; 1817 } 1818 1819 /* 1820 * No scalability reason to split PUD locks yet, but follow the same pattern 1821 * as the PMD locks to make it easier if we decide to. The VM should not be 1822 * considered ready to switch to split PUD locks yet; there may be places 1823 * which need to be converted from page_table_lock. 1824 */ 1825 static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud) 1826 { 1827 return &mm->page_table_lock; 1828 } 1829 1830 static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud) 1831 { 1832 spinlock_t *ptl = pud_lockptr(mm, pud); 1833 1834 spin_lock(ptl); 1835 return ptl; 1836 } 1837 1838 extern void __init pagecache_init(void); 1839 extern void free_area_init(unsigned long * zones_size); 1840 extern void free_area_init_node(int nid, unsigned long * zones_size, 1841 unsigned long zone_start_pfn, unsigned long *zholes_size); 1842 extern void free_initmem(void); 1843 1844 /* 1845 * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK) 1846 * into the buddy system. The freed pages will be poisoned with pattern 1847 * "poison" if it's within range [0, UCHAR_MAX]. 1848 * Return pages freed into the buddy system. 1849 */ 1850 extern unsigned long free_reserved_area(void *start, void *end, 1851 int poison, char *s); 1852 1853 #ifdef CONFIG_HIGHMEM 1854 /* 1855 * Free a highmem page into the buddy system, adjusting totalhigh_pages 1856 * and totalram_pages. 1857 */ 1858 extern void free_highmem_page(struct page *page); 1859 #endif 1860 1861 extern void adjust_managed_page_count(struct page *page, long count); 1862 extern void mem_init_print_info(const char *str); 1863 1864 extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end); 1865 1866 /* Free the reserved page into the buddy system, so it gets managed. */ 1867 static inline void __free_reserved_page(struct page *page) 1868 { 1869 ClearPageReserved(page); 1870 init_page_count(page); 1871 __free_page(page); 1872 } 1873 1874 static inline void free_reserved_page(struct page *page) 1875 { 1876 __free_reserved_page(page); 1877 adjust_managed_page_count(page, 1); 1878 } 1879 1880 static inline void mark_page_reserved(struct page *page) 1881 { 1882 SetPageReserved(page); 1883 adjust_managed_page_count(page, -1); 1884 } 1885 1886 /* 1887 * Default method to free all the __init memory into the buddy system. 1888 * The freed pages will be poisoned with pattern "poison" if it's within 1889 * range [0, UCHAR_MAX]. 1890 * Return pages freed into the buddy system. 1891 */ 1892 static inline unsigned long free_initmem_default(int poison) 1893 { 1894 extern char __init_begin[], __init_end[]; 1895 1896 return free_reserved_area(&__init_begin, &__init_end, 1897 poison, "unused kernel"); 1898 } 1899 1900 static inline unsigned long get_num_physpages(void) 1901 { 1902 int nid; 1903 unsigned long phys_pages = 0; 1904 1905 for_each_online_node(nid) 1906 phys_pages += node_present_pages(nid); 1907 1908 return phys_pages; 1909 } 1910 1911 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 1912 /* 1913 * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its 1914 * zones, allocate the backing mem_map and account for memory holes in a more 1915 * architecture independent manner. This is a substitute for creating the 1916 * zone_sizes[] and zholes_size[] arrays and passing them to 1917 * free_area_init_node() 1918 * 1919 * An architecture is expected to register range of page frames backed by 1920 * physical memory with memblock_add[_node]() before calling 1921 * free_area_init_nodes() passing in the PFN each zone ends at. At a basic 1922 * usage, an architecture is expected to do something like 1923 * 1924 * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn, 1925 * max_highmem_pfn}; 1926 * for_each_valid_physical_page_range() 1927 * memblock_add_node(base, size, nid) 1928 * free_area_init_nodes(max_zone_pfns); 1929 * 1930 * free_bootmem_with_active_regions() calls free_bootmem_node() for each 1931 * registered physical page range. Similarly 1932 * sparse_memory_present_with_active_regions() calls memory_present() for 1933 * each range when SPARSEMEM is enabled. 1934 * 1935 * See mm/page_alloc.c for more information on each function exposed by 1936 * CONFIG_HAVE_MEMBLOCK_NODE_MAP. 1937 */ 1938 extern void free_area_init_nodes(unsigned long *max_zone_pfn); 1939 unsigned long node_map_pfn_alignment(void); 1940 unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn, 1941 unsigned long end_pfn); 1942 extern unsigned long absent_pages_in_range(unsigned long start_pfn, 1943 unsigned long end_pfn); 1944 extern void get_pfn_range_for_nid(unsigned int nid, 1945 unsigned long *start_pfn, unsigned long *end_pfn); 1946 extern unsigned long find_min_pfn_with_active_regions(void); 1947 extern void free_bootmem_with_active_regions(int nid, 1948 unsigned long max_low_pfn); 1949 extern void sparse_memory_present_with_active_regions(int nid); 1950 1951 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 1952 1953 #if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \ 1954 !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) 1955 static inline int __early_pfn_to_nid(unsigned long pfn, 1956 struct mminit_pfnnid_cache *state) 1957 { 1958 return 0; 1959 } 1960 #else 1961 /* please see mm/page_alloc.c */ 1962 extern int __meminit early_pfn_to_nid(unsigned long pfn); 1963 /* there is a per-arch backend function. */ 1964 extern int __meminit __early_pfn_to_nid(unsigned long pfn, 1965 struct mminit_pfnnid_cache *state); 1966 #endif 1967 1968 extern void set_dma_reserve(unsigned long new_dma_reserve); 1969 extern void memmap_init_zone(unsigned long, int, unsigned long, 1970 unsigned long, enum memmap_context); 1971 extern void setup_per_zone_wmarks(void); 1972 extern int __meminit init_per_zone_wmark_min(void); 1973 extern void mem_init(void); 1974 extern void __init mmap_init(void); 1975 extern void show_mem(unsigned int flags, nodemask_t *nodemask); 1976 extern long si_mem_available(void); 1977 extern void si_meminfo(struct sysinfo * val); 1978 extern void si_meminfo_node(struct sysinfo *val, int nid); 1979 #ifdef __HAVE_ARCH_RESERVED_KERNEL_PAGES 1980 extern unsigned long arch_reserved_kernel_pages(void); 1981 #endif 1982 1983 extern __printf(3, 4) 1984 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...); 1985 1986 extern void setup_per_cpu_pageset(void); 1987 1988 extern void zone_pcp_update(struct zone *zone); 1989 extern void zone_pcp_reset(struct zone *zone); 1990 1991 /* page_alloc.c */ 1992 extern int min_free_kbytes; 1993 extern int watermark_scale_factor; 1994 1995 /* nommu.c */ 1996 extern atomic_long_t mmap_pages_allocated; 1997 extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t); 1998 1999 /* interval_tree.c */ 2000 void vma_interval_tree_insert(struct vm_area_struct *node, 2001 struct rb_root *root); 2002 void vma_interval_tree_insert_after(struct vm_area_struct *node, 2003 struct vm_area_struct *prev, 2004 struct rb_root *root); 2005 void vma_interval_tree_remove(struct vm_area_struct *node, 2006 struct rb_root *root); 2007 struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root *root, 2008 unsigned long start, unsigned long last); 2009 struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node, 2010 unsigned long start, unsigned long last); 2011 2012 #define vma_interval_tree_foreach(vma, root, start, last) \ 2013 for (vma = vma_interval_tree_iter_first(root, start, last); \ 2014 vma; vma = vma_interval_tree_iter_next(vma, start, last)) 2015 2016 void anon_vma_interval_tree_insert(struct anon_vma_chain *node, 2017 struct rb_root *root); 2018 void anon_vma_interval_tree_remove(struct anon_vma_chain *node, 2019 struct rb_root *root); 2020 struct anon_vma_chain *anon_vma_interval_tree_iter_first( 2021 struct rb_root *root, unsigned long start, unsigned long last); 2022 struct anon_vma_chain *anon_vma_interval_tree_iter_next( 2023 struct anon_vma_chain *node, unsigned long start, unsigned long last); 2024 #ifdef CONFIG_DEBUG_VM_RB 2025 void anon_vma_interval_tree_verify(struct anon_vma_chain *node); 2026 #endif 2027 2028 #define anon_vma_interval_tree_foreach(avc, root, start, last) \ 2029 for (avc = anon_vma_interval_tree_iter_first(root, start, last); \ 2030 avc; avc = anon_vma_interval_tree_iter_next(avc, start, last)) 2031 2032 /* mmap.c */ 2033 extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin); 2034 extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start, 2035 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert, 2036 struct vm_area_struct *expand); 2037 static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start, 2038 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert) 2039 { 2040 return __vma_adjust(vma, start, end, pgoff, insert, NULL); 2041 } 2042 extern struct vm_area_struct *vma_merge(struct mm_struct *, 2043 struct vm_area_struct *prev, unsigned long addr, unsigned long end, 2044 unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t, 2045 struct mempolicy *, struct vm_userfaultfd_ctx); 2046 extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *); 2047 extern int __split_vma(struct mm_struct *, struct vm_area_struct *, 2048 unsigned long addr, int new_below); 2049 extern int split_vma(struct mm_struct *, struct vm_area_struct *, 2050 unsigned long addr, int new_below); 2051 extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *); 2052 extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *, 2053 struct rb_node **, struct rb_node *); 2054 extern void unlink_file_vma(struct vm_area_struct *); 2055 extern struct vm_area_struct *copy_vma(struct vm_area_struct **, 2056 unsigned long addr, unsigned long len, pgoff_t pgoff, 2057 bool *need_rmap_locks); 2058 extern void exit_mmap(struct mm_struct *); 2059 2060 static inline int check_data_rlimit(unsigned long rlim, 2061 unsigned long new, 2062 unsigned long start, 2063 unsigned long end_data, 2064 unsigned long start_data) 2065 { 2066 if (rlim < RLIM_INFINITY) { 2067 if (((new - start) + (end_data - start_data)) > rlim) 2068 return -ENOSPC; 2069 } 2070 2071 return 0; 2072 } 2073 2074 extern int mm_take_all_locks(struct mm_struct *mm); 2075 extern void mm_drop_all_locks(struct mm_struct *mm); 2076 2077 extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); 2078 extern struct file *get_mm_exe_file(struct mm_struct *mm); 2079 extern struct file *get_task_exe_file(struct task_struct *task); 2080 2081 extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages); 2082 extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages); 2083 2084 extern bool vma_is_special_mapping(const struct vm_area_struct *vma, 2085 const struct vm_special_mapping *sm); 2086 extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm, 2087 unsigned long addr, unsigned long len, 2088 unsigned long flags, 2089 const struct vm_special_mapping *spec); 2090 /* This is an obsolete alternative to _install_special_mapping. */ 2091 extern int install_special_mapping(struct mm_struct *mm, 2092 unsigned long addr, unsigned long len, 2093 unsigned long flags, struct page **pages); 2094 2095 extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); 2096 2097 extern unsigned long mmap_region(struct file *file, unsigned long addr, 2098 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, 2099 struct list_head *uf); 2100 extern unsigned long do_mmap(struct file *file, unsigned long addr, 2101 unsigned long len, unsigned long prot, unsigned long flags, 2102 vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate, 2103 struct list_head *uf); 2104 extern int do_munmap(struct mm_struct *, unsigned long, size_t, 2105 struct list_head *uf); 2106 2107 static inline unsigned long 2108 do_mmap_pgoff(struct file *file, unsigned long addr, 2109 unsigned long len, unsigned long prot, unsigned long flags, 2110 unsigned long pgoff, unsigned long *populate, 2111 struct list_head *uf) 2112 { 2113 return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate, uf); 2114 } 2115 2116 #ifdef CONFIG_MMU 2117 extern int __mm_populate(unsigned long addr, unsigned long len, 2118 int ignore_errors); 2119 static inline void mm_populate(unsigned long addr, unsigned long len) 2120 { 2121 /* Ignore errors */ 2122 (void) __mm_populate(addr, len, 1); 2123 } 2124 #else 2125 static inline void mm_populate(unsigned long addr, unsigned long len) {} 2126 #endif 2127 2128 /* These take the mm semaphore themselves */ 2129 extern int __must_check vm_brk(unsigned long, unsigned long); 2130 extern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long); 2131 extern int vm_munmap(unsigned long, size_t); 2132 extern unsigned long __must_check vm_mmap(struct file *, unsigned long, 2133 unsigned long, unsigned long, 2134 unsigned long, unsigned long); 2135 2136 struct vm_unmapped_area_info { 2137 #define VM_UNMAPPED_AREA_TOPDOWN 1 2138 unsigned long flags; 2139 unsigned long length; 2140 unsigned long low_limit; 2141 unsigned long high_limit; 2142 unsigned long align_mask; 2143 unsigned long align_offset; 2144 }; 2145 2146 extern unsigned long unmapped_area(struct vm_unmapped_area_info *info); 2147 extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info); 2148 2149 /* 2150 * Search for an unmapped address range. 2151 * 2152 * We are looking for a range that: 2153 * - does not intersect with any VMA; 2154 * - is contained within the [low_limit, high_limit) interval; 2155 * - is at least the desired size. 2156 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask) 2157 */ 2158 static inline unsigned long 2159 vm_unmapped_area(struct vm_unmapped_area_info *info) 2160 { 2161 if (info->flags & VM_UNMAPPED_AREA_TOPDOWN) 2162 return unmapped_area_topdown(info); 2163 else 2164 return unmapped_area(info); 2165 } 2166 2167 /* truncate.c */ 2168 extern void truncate_inode_pages(struct address_space *, loff_t); 2169 extern void truncate_inode_pages_range(struct address_space *, 2170 loff_t lstart, loff_t lend); 2171 extern void truncate_inode_pages_final(struct address_space *); 2172 2173 /* generic vm_area_ops exported for stackable file systems */ 2174 extern int filemap_fault(struct vm_fault *vmf); 2175 extern void filemap_map_pages(struct vm_fault *vmf, 2176 pgoff_t start_pgoff, pgoff_t end_pgoff); 2177 extern int filemap_page_mkwrite(struct vm_fault *vmf); 2178 2179 /* mm/page-writeback.c */ 2180 int write_one_page(struct page *page, int wait); 2181 void task_dirty_inc(struct task_struct *tsk); 2182 2183 /* readahead.c */ 2184 #define VM_MAX_READAHEAD 128 /* kbytes */ 2185 #define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */ 2186 2187 int force_page_cache_readahead(struct address_space *mapping, struct file *filp, 2188 pgoff_t offset, unsigned long nr_to_read); 2189 2190 void page_cache_sync_readahead(struct address_space *mapping, 2191 struct file_ra_state *ra, 2192 struct file *filp, 2193 pgoff_t offset, 2194 unsigned long size); 2195 2196 void page_cache_async_readahead(struct address_space *mapping, 2197 struct file_ra_state *ra, 2198 struct file *filp, 2199 struct page *pg, 2200 pgoff_t offset, 2201 unsigned long size); 2202 2203 /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ 2204 extern int expand_stack(struct vm_area_struct *vma, unsigned long address); 2205 2206 /* CONFIG_STACK_GROWSUP still needs to to grow downwards at some places */ 2207 extern int expand_downwards(struct vm_area_struct *vma, 2208 unsigned long address); 2209 #if VM_GROWSUP 2210 extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); 2211 #else 2212 #define expand_upwards(vma, address) (0) 2213 #endif 2214 2215 /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ 2216 extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); 2217 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr, 2218 struct vm_area_struct **pprev); 2219 2220 /* Look up the first VMA which intersects the interval start_addr..end_addr-1, 2221 NULL if none. Assume start_addr < end_addr. */ 2222 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr) 2223 { 2224 struct vm_area_struct * vma = find_vma(mm,start_addr); 2225 2226 if (vma && end_addr <= vma->vm_start) 2227 vma = NULL; 2228 return vma; 2229 } 2230 2231 static inline unsigned long vma_pages(struct vm_area_struct *vma) 2232 { 2233 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 2234 } 2235 2236 /* Look up the first VMA which exactly match the interval vm_start ... vm_end */ 2237 static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm, 2238 unsigned long vm_start, unsigned long vm_end) 2239 { 2240 struct vm_area_struct *vma = find_vma(mm, vm_start); 2241 2242 if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end)) 2243 vma = NULL; 2244 2245 return vma; 2246 } 2247 2248 #ifdef CONFIG_MMU 2249 pgprot_t vm_get_page_prot(unsigned long vm_flags); 2250 void vma_set_page_prot(struct vm_area_struct *vma); 2251 #else 2252 static inline pgprot_t vm_get_page_prot(unsigned long vm_flags) 2253 { 2254 return __pgprot(0); 2255 } 2256 static inline void vma_set_page_prot(struct vm_area_struct *vma) 2257 { 2258 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 2259 } 2260 #endif 2261 2262 #ifdef CONFIG_NUMA_BALANCING 2263 unsigned long change_prot_numa(struct vm_area_struct *vma, 2264 unsigned long start, unsigned long end); 2265 #endif 2266 2267 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); 2268 int remap_pfn_range(struct vm_area_struct *, unsigned long addr, 2269 unsigned long pfn, unsigned long size, pgprot_t); 2270 int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); 2271 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, 2272 unsigned long pfn); 2273 int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, 2274 unsigned long pfn, pgprot_t pgprot); 2275 int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, 2276 pfn_t pfn); 2277 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len); 2278 2279 2280 struct page *follow_page_mask(struct vm_area_struct *vma, 2281 unsigned long address, unsigned int foll_flags, 2282 unsigned int *page_mask); 2283 2284 static inline struct page *follow_page(struct vm_area_struct *vma, 2285 unsigned long address, unsigned int foll_flags) 2286 { 2287 unsigned int unused_page_mask; 2288 return follow_page_mask(vma, address, foll_flags, &unused_page_mask); 2289 } 2290 2291 #define FOLL_WRITE 0x01 /* check pte is writable */ 2292 #define FOLL_TOUCH 0x02 /* mark page accessed */ 2293 #define FOLL_GET 0x04 /* do get_page on page */ 2294 #define FOLL_DUMP 0x08 /* give error on hole if it would be zero */ 2295 #define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */ 2296 #define FOLL_NOWAIT 0x20 /* if a disk transfer is needed, start the IO 2297 * and return without waiting upon it */ 2298 #define FOLL_POPULATE 0x40 /* fault in page */ 2299 #define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */ 2300 #define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */ 2301 #define FOLL_NUMA 0x200 /* force NUMA hinting page fault */ 2302 #define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */ 2303 #define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */ 2304 #define FOLL_MLOCK 0x1000 /* lock present pages */ 2305 #define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */ 2306 #define FOLL_COW 0x4000 /* internal GUP flag */ 2307 2308 typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, 2309 void *data); 2310 extern int apply_to_page_range(struct mm_struct *mm, unsigned long address, 2311 unsigned long size, pte_fn_t fn, void *data); 2312 2313 2314 #ifdef CONFIG_PAGE_POISONING 2315 extern bool page_poisoning_enabled(void); 2316 extern void kernel_poison_pages(struct page *page, int numpages, int enable); 2317 extern bool page_is_poisoned(struct page *page); 2318 #else 2319 static inline bool page_poisoning_enabled(void) { return false; } 2320 static inline void kernel_poison_pages(struct page *page, int numpages, 2321 int enable) { } 2322 static inline bool page_is_poisoned(struct page *page) { return false; } 2323 #endif 2324 2325 #ifdef CONFIG_DEBUG_PAGEALLOC 2326 extern bool _debug_pagealloc_enabled; 2327 extern void __kernel_map_pages(struct page *page, int numpages, int enable); 2328 2329 static inline bool debug_pagealloc_enabled(void) 2330 { 2331 return _debug_pagealloc_enabled; 2332 } 2333 2334 static inline void 2335 kernel_map_pages(struct page *page, int numpages, int enable) 2336 { 2337 if (!debug_pagealloc_enabled()) 2338 return; 2339 2340 __kernel_map_pages(page, numpages, enable); 2341 } 2342 #ifdef CONFIG_HIBERNATION 2343 extern bool kernel_page_present(struct page *page); 2344 #endif /* CONFIG_HIBERNATION */ 2345 #else /* CONFIG_DEBUG_PAGEALLOC */ 2346 static inline void 2347 kernel_map_pages(struct page *page, int numpages, int enable) {} 2348 #ifdef CONFIG_HIBERNATION 2349 static inline bool kernel_page_present(struct page *page) { return true; } 2350 #endif /* CONFIG_HIBERNATION */ 2351 static inline bool debug_pagealloc_enabled(void) 2352 { 2353 return false; 2354 } 2355 #endif /* CONFIG_DEBUG_PAGEALLOC */ 2356 2357 #ifdef __HAVE_ARCH_GATE_AREA 2358 extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm); 2359 extern int in_gate_area_no_mm(unsigned long addr); 2360 extern int in_gate_area(struct mm_struct *mm, unsigned long addr); 2361 #else 2362 static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm) 2363 { 2364 return NULL; 2365 } 2366 static inline int in_gate_area_no_mm(unsigned long addr) { return 0; } 2367 static inline int in_gate_area(struct mm_struct *mm, unsigned long addr) 2368 { 2369 return 0; 2370 } 2371 #endif /* __HAVE_ARCH_GATE_AREA */ 2372 2373 extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm); 2374 2375 #ifdef CONFIG_SYSCTL 2376 extern int sysctl_drop_caches; 2377 int drop_caches_sysctl_handler(struct ctl_table *, int, 2378 void __user *, size_t *, loff_t *); 2379 #endif 2380 2381 void drop_slab(void); 2382 void drop_slab_node(int nid); 2383 2384 #ifndef CONFIG_MMU 2385 #define randomize_va_space 0 2386 #else 2387 extern int randomize_va_space; 2388 #endif 2389 2390 const char * arch_vma_name(struct vm_area_struct *vma); 2391 void print_vma_addr(char *prefix, unsigned long rip); 2392 2393 void sparse_mem_maps_populate_node(struct page **map_map, 2394 unsigned long pnum_begin, 2395 unsigned long pnum_end, 2396 unsigned long map_count, 2397 int nodeid); 2398 2399 struct page *sparse_mem_map_populate(unsigned long pnum, int nid); 2400 pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); 2401 p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node); 2402 pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node); 2403 pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); 2404 pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node); 2405 void *vmemmap_alloc_block(unsigned long size, int node); 2406 struct vmem_altmap; 2407 void *__vmemmap_alloc_block_buf(unsigned long size, int node, 2408 struct vmem_altmap *altmap); 2409 static inline void *vmemmap_alloc_block_buf(unsigned long size, int node) 2410 { 2411 return __vmemmap_alloc_block_buf(size, node, NULL); 2412 } 2413 2414 void vmemmap_verify(pte_t *, int, unsigned long, unsigned long); 2415 int vmemmap_populate_basepages(unsigned long start, unsigned long end, 2416 int node); 2417 int vmemmap_populate(unsigned long start, unsigned long end, int node); 2418 void vmemmap_populate_print_last(void); 2419 #ifdef CONFIG_MEMORY_HOTPLUG 2420 void vmemmap_free(unsigned long start, unsigned long end); 2421 #endif 2422 void register_page_bootmem_memmap(unsigned long section_nr, struct page *map, 2423 unsigned long size); 2424 2425 enum mf_flags { 2426 MF_COUNT_INCREASED = 1 << 0, 2427 MF_ACTION_REQUIRED = 1 << 1, 2428 MF_MUST_KILL = 1 << 2, 2429 MF_SOFT_OFFLINE = 1 << 3, 2430 }; 2431 extern int memory_failure(unsigned long pfn, int trapno, int flags); 2432 extern void memory_failure_queue(unsigned long pfn, int trapno, int flags); 2433 extern int unpoison_memory(unsigned long pfn); 2434 extern int get_hwpoison_page(struct page *page); 2435 #define put_hwpoison_page(page) put_page(page) 2436 extern int sysctl_memory_failure_early_kill; 2437 extern int sysctl_memory_failure_recovery; 2438 extern void shake_page(struct page *p, int access); 2439 extern atomic_long_t num_poisoned_pages; 2440 extern int soft_offline_page(struct page *page, int flags); 2441 2442 2443 /* 2444 * Error handlers for various types of pages. 2445 */ 2446 enum mf_result { 2447 MF_IGNORED, /* Error: cannot be handled */ 2448 MF_FAILED, /* Error: handling failed */ 2449 MF_DELAYED, /* Will be handled later */ 2450 MF_RECOVERED, /* Successfully recovered */ 2451 }; 2452 2453 enum mf_action_page_type { 2454 MF_MSG_KERNEL, 2455 MF_MSG_KERNEL_HIGH_ORDER, 2456 MF_MSG_SLAB, 2457 MF_MSG_DIFFERENT_COMPOUND, 2458 MF_MSG_POISONED_HUGE, 2459 MF_MSG_HUGE, 2460 MF_MSG_FREE_HUGE, 2461 MF_MSG_UNMAP_FAILED, 2462 MF_MSG_DIRTY_SWAPCACHE, 2463 MF_MSG_CLEAN_SWAPCACHE, 2464 MF_MSG_DIRTY_MLOCKED_LRU, 2465 MF_MSG_CLEAN_MLOCKED_LRU, 2466 MF_MSG_DIRTY_UNEVICTABLE_LRU, 2467 MF_MSG_CLEAN_UNEVICTABLE_LRU, 2468 MF_MSG_DIRTY_LRU, 2469 MF_MSG_CLEAN_LRU, 2470 MF_MSG_TRUNCATED_LRU, 2471 MF_MSG_BUDDY, 2472 MF_MSG_BUDDY_2ND, 2473 MF_MSG_UNKNOWN, 2474 }; 2475 2476 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) 2477 extern void clear_huge_page(struct page *page, 2478 unsigned long addr, 2479 unsigned int pages_per_huge_page); 2480 extern void copy_user_huge_page(struct page *dst, struct page *src, 2481 unsigned long addr, struct vm_area_struct *vma, 2482 unsigned int pages_per_huge_page); 2483 extern long copy_huge_page_from_user(struct page *dst_page, 2484 const void __user *usr_src, 2485 unsigned int pages_per_huge_page, 2486 bool allow_pagefault); 2487 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ 2488 2489 extern struct page_ext_operations debug_guardpage_ops; 2490 2491 #ifdef CONFIG_DEBUG_PAGEALLOC 2492 extern unsigned int _debug_guardpage_minorder; 2493 extern bool _debug_guardpage_enabled; 2494 2495 static inline unsigned int debug_guardpage_minorder(void) 2496 { 2497 return _debug_guardpage_minorder; 2498 } 2499 2500 static inline bool debug_guardpage_enabled(void) 2501 { 2502 return _debug_guardpage_enabled; 2503 } 2504 2505 static inline bool page_is_guard(struct page *page) 2506 { 2507 struct page_ext *page_ext; 2508 2509 if (!debug_guardpage_enabled()) 2510 return false; 2511 2512 page_ext = lookup_page_ext(page); 2513 if (unlikely(!page_ext)) 2514 return false; 2515 2516 return test_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); 2517 } 2518 #else 2519 static inline unsigned int debug_guardpage_minorder(void) { return 0; } 2520 static inline bool debug_guardpage_enabled(void) { return false; } 2521 static inline bool page_is_guard(struct page *page) { return false; } 2522 #endif /* CONFIG_DEBUG_PAGEALLOC */ 2523 2524 #if MAX_NUMNODES > 1 2525 void __init setup_nr_node_ids(void); 2526 #else 2527 static inline void setup_nr_node_ids(void) {} 2528 #endif 2529 2530 #endif /* __KERNEL__ */ 2531 #endif /* _LINUX_MM_H */ 2532