1 #ifndef _LINUX_MM_H 2 #define _LINUX_MM_H 3 4 #include <linux/errno.h> 5 6 #ifdef __KERNEL__ 7 8 #include <linux/mmdebug.h> 9 #include <linux/gfp.h> 10 #include <linux/bug.h> 11 #include <linux/list.h> 12 #include <linux/mmzone.h> 13 #include <linux/rbtree.h> 14 #include <linux/atomic.h> 15 #include <linux/debug_locks.h> 16 #include <linux/mm_types.h> 17 #include <linux/range.h> 18 #include <linux/pfn.h> 19 #include <linux/percpu-refcount.h> 20 #include <linux/bit_spinlock.h> 21 #include <linux/shrinker.h> 22 #include <linux/resource.h> 23 #include <linux/page_ext.h> 24 #include <linux/err.h> 25 #include <linux/page_ref.h> 26 27 struct mempolicy; 28 struct anon_vma; 29 struct anon_vma_chain; 30 struct file_ra_state; 31 struct user_struct; 32 struct writeback_control; 33 struct bdi_writeback; 34 35 #ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */ 36 extern unsigned long max_mapnr; 37 38 static inline void set_max_mapnr(unsigned long limit) 39 { 40 max_mapnr = limit; 41 } 42 #else 43 static inline void set_max_mapnr(unsigned long limit) { } 44 #endif 45 46 extern unsigned long totalram_pages; 47 extern void * high_memory; 48 extern int page_cluster; 49 50 #ifdef CONFIG_SYSCTL 51 extern int sysctl_legacy_va_layout; 52 #else 53 #define sysctl_legacy_va_layout 0 54 #endif 55 56 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS 57 extern const int mmap_rnd_bits_min; 58 extern const int mmap_rnd_bits_max; 59 extern int mmap_rnd_bits __read_mostly; 60 #endif 61 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS 62 extern const int mmap_rnd_compat_bits_min; 63 extern const int mmap_rnd_compat_bits_max; 64 extern int mmap_rnd_compat_bits __read_mostly; 65 #endif 66 67 #include <asm/page.h> 68 #include <asm/pgtable.h> 69 #include <asm/processor.h> 70 71 #ifndef __pa_symbol 72 #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) 73 #endif 74 75 #ifndef page_to_virt 76 #define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x))) 77 #endif 78 79 /* 80 * To prevent common memory management code establishing 81 * a zero page mapping on a read fault. 82 * This macro should be defined within <asm/pgtable.h>. 83 * s390 does this to prevent multiplexing of hardware bits 84 * related to the physical page in case of virtualization. 85 */ 86 #ifndef mm_forbids_zeropage 87 #define mm_forbids_zeropage(X) (0) 88 #endif 89 90 /* 91 * Default maximum number of active map areas, this limits the number of vmas 92 * per mm struct. Users can overwrite this number by sysctl but there is a 93 * problem. 94 * 95 * When a program's coredump is generated as ELF format, a section is created 96 * per a vma. In ELF, the number of sections is represented in unsigned short. 97 * This means the number of sections should be smaller than 65535 at coredump. 98 * Because the kernel adds some informative sections to a image of program at 99 * generating coredump, we need some margin. The number of extra sections is 100 * 1-3 now and depends on arch. We use "5" as safe margin, here. 101 * 102 * ELF extended numbering allows more than 65535 sections, so 16-bit bound is 103 * not a hard limit any more. Although some userspace tools can be surprised by 104 * that. 105 */ 106 #define MAPCOUNT_ELF_CORE_MARGIN (5) 107 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) 108 109 extern int sysctl_max_map_count; 110 111 extern unsigned long sysctl_user_reserve_kbytes; 112 extern unsigned long sysctl_admin_reserve_kbytes; 113 114 extern int sysctl_overcommit_memory; 115 extern int sysctl_overcommit_ratio; 116 extern unsigned long sysctl_overcommit_kbytes; 117 118 extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *, 119 size_t *, loff_t *); 120 extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *, 121 size_t *, loff_t *); 122 123 #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) 124 125 /* to align the pointer to the (next) page boundary */ 126 #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) 127 128 /* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */ 129 #define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)addr, PAGE_SIZE) 130 131 /* 132 * Linux kernel virtual memory manager primitives. 133 * The idea being to have a "virtual" mm in the same way 134 * we have a virtual fs - giving a cleaner interface to the 135 * mm details, and allowing different kinds of memory mappings 136 * (from shared memory to executable loading to arbitrary 137 * mmap() functions). 138 */ 139 140 extern struct kmem_cache *vm_area_cachep; 141 142 #ifndef CONFIG_MMU 143 extern struct rb_root nommu_region_tree; 144 extern struct rw_semaphore nommu_region_sem; 145 146 extern unsigned int kobjsize(const void *objp); 147 #endif 148 149 /* 150 * vm_flags in vm_area_struct, see mm_types.h. 151 * When changing, update also include/trace/events/mmflags.h 152 */ 153 #define VM_NONE 0x00000000 154 155 #define VM_READ 0x00000001 /* currently active flags */ 156 #define VM_WRITE 0x00000002 157 #define VM_EXEC 0x00000004 158 #define VM_SHARED 0x00000008 159 160 /* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */ 161 #define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */ 162 #define VM_MAYWRITE 0x00000020 163 #define VM_MAYEXEC 0x00000040 164 #define VM_MAYSHARE 0x00000080 165 166 #define VM_GROWSDOWN 0x00000100 /* general info on the segment */ 167 #define VM_UFFD_MISSING 0x00000200 /* missing pages tracking */ 168 #define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ 169 #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ 170 #define VM_UFFD_WP 0x00001000 /* wrprotect pages tracking */ 171 172 #define VM_LOCKED 0x00002000 173 #define VM_IO 0x00004000 /* Memory mapped I/O or similar */ 174 175 /* Used by sys_madvise() */ 176 #define VM_SEQ_READ 0x00008000 /* App will access data sequentially */ 177 #define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */ 178 179 #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */ 180 #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ 181 #define VM_LOCKONFAULT 0x00080000 /* Lock the pages covered when they are faulted in */ 182 #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ 183 #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */ 184 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ 185 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */ 186 #define VM_ARCH_2 0x02000000 187 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */ 188 189 #ifdef CONFIG_MEM_SOFT_DIRTY 190 # define VM_SOFTDIRTY 0x08000000 /* Not soft dirty clean area */ 191 #else 192 # define VM_SOFTDIRTY 0 193 #endif 194 195 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ 196 #define VM_HUGEPAGE 0x20000000 /* MADV_HUGEPAGE marked this vma */ 197 #define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */ 198 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */ 199 200 #ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS 201 #define VM_HIGH_ARCH_BIT_0 32 /* bit only usable on 64-bit architectures */ 202 #define VM_HIGH_ARCH_BIT_1 33 /* bit only usable on 64-bit architectures */ 203 #define VM_HIGH_ARCH_BIT_2 34 /* bit only usable on 64-bit architectures */ 204 #define VM_HIGH_ARCH_BIT_3 35 /* bit only usable on 64-bit architectures */ 205 #define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0) 206 #define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1) 207 #define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2) 208 #define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3) 209 #endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */ 210 211 #if defined(CONFIG_X86) 212 # define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */ 213 #if defined (CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS) 214 # define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0 215 # define VM_PKEY_BIT0 VM_HIGH_ARCH_0 /* A protection key is a 4-bit value */ 216 # define VM_PKEY_BIT1 VM_HIGH_ARCH_1 217 # define VM_PKEY_BIT2 VM_HIGH_ARCH_2 218 # define VM_PKEY_BIT3 VM_HIGH_ARCH_3 219 #endif 220 #elif defined(CONFIG_PPC) 221 # define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */ 222 #elif defined(CONFIG_PARISC) 223 # define VM_GROWSUP VM_ARCH_1 224 #elif defined(CONFIG_METAG) 225 # define VM_GROWSUP VM_ARCH_1 226 #elif defined(CONFIG_IA64) 227 # define VM_GROWSUP VM_ARCH_1 228 #elif !defined(CONFIG_MMU) 229 # define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */ 230 #endif 231 232 #if defined(CONFIG_X86) 233 /* MPX specific bounds table or bounds directory */ 234 # define VM_MPX VM_ARCH_2 235 #endif 236 237 #ifndef VM_GROWSUP 238 # define VM_GROWSUP VM_NONE 239 #endif 240 241 /* Bits set in the VMA until the stack is in its final location */ 242 #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ) 243 244 #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ 245 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS 246 #endif 247 248 #ifdef CONFIG_STACK_GROWSUP 249 #define VM_STACK VM_GROWSUP 250 #else 251 #define VM_STACK VM_GROWSDOWN 252 #endif 253 254 #define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT) 255 256 /* 257 * Special vmas that are non-mergable, non-mlock()able. 258 * Note: mm/huge_memory.c VM_NO_THP depends on this definition. 259 */ 260 #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP) 261 262 /* This mask defines which mm->def_flags a process can inherit its parent */ 263 #define VM_INIT_DEF_MASK VM_NOHUGEPAGE 264 265 /* This mask is used to clear all the VMA flags used by mlock */ 266 #define VM_LOCKED_CLEAR_MASK (~(VM_LOCKED | VM_LOCKONFAULT)) 267 268 /* 269 * mapping from the currently active vm_flags protection bits (the 270 * low four bits) to a page protection mask.. 271 */ 272 extern pgprot_t protection_map[16]; 273 274 #define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */ 275 #define FAULT_FLAG_MKWRITE 0x02 /* Fault was mkwrite of existing pte */ 276 #define FAULT_FLAG_ALLOW_RETRY 0x04 /* Retry fault if blocking */ 277 #define FAULT_FLAG_RETRY_NOWAIT 0x08 /* Don't drop mmap_sem and wait when retrying */ 278 #define FAULT_FLAG_KILLABLE 0x10 /* The fault task is in SIGKILL killable region */ 279 #define FAULT_FLAG_TRIED 0x20 /* Second try */ 280 #define FAULT_FLAG_USER 0x40 /* The fault originated in userspace */ 281 #define FAULT_FLAG_REMOTE 0x80 /* faulting for non current tsk/mm */ 282 #define FAULT_FLAG_INSTRUCTION 0x100 /* The fault was during an instruction fetch */ 283 284 /* 285 * vm_fault is filled by the the pagefault handler and passed to the vma's 286 * ->fault function. The vma's ->fault is responsible for returning a bitmask 287 * of VM_FAULT_xxx flags that give details about how the fault was handled. 288 * 289 * MM layer fills up gfp_mask for page allocations but fault handler might 290 * alter it if its implementation requires a different allocation context. 291 * 292 * pgoff should be used in favour of virtual_address, if possible. 293 */ 294 struct vm_fault { 295 unsigned int flags; /* FAULT_FLAG_xxx flags */ 296 gfp_t gfp_mask; /* gfp mask to be used for allocations */ 297 pgoff_t pgoff; /* Logical page offset based on vma */ 298 void __user *virtual_address; /* Faulting virtual address */ 299 300 struct page *cow_page; /* Handler may choose to COW */ 301 struct page *page; /* ->fault handlers should return a 302 * page here, unless VM_FAULT_NOPAGE 303 * is set (which is also implied by 304 * VM_FAULT_ERROR). 305 */ 306 void *entry; /* ->fault handler can alternatively 307 * return locked DAX entry. In that 308 * case handler should return 309 * VM_FAULT_DAX_LOCKED and fill in 310 * entry here. 311 */ 312 /* for ->map_pages() only */ 313 pgoff_t max_pgoff; /* map pages for offset from pgoff till 314 * max_pgoff inclusive */ 315 pte_t *pte; /* pte entry associated with ->pgoff */ 316 }; 317 318 /* 319 * These are the virtual MM functions - opening of an area, closing and 320 * unmapping it (needed to keep files on disk up-to-date etc), pointer 321 * to the functions called when a no-page or a wp-page exception occurs. 322 */ 323 struct vm_operations_struct { 324 void (*open)(struct vm_area_struct * area); 325 void (*close)(struct vm_area_struct * area); 326 int (*mremap)(struct vm_area_struct * area); 327 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); 328 int (*pmd_fault)(struct vm_area_struct *, unsigned long address, 329 pmd_t *, unsigned int flags); 330 void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf); 331 332 /* notification that a previously read-only page is about to become 333 * writable, if an error is returned it will cause a SIGBUS */ 334 int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf); 335 336 /* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */ 337 int (*pfn_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf); 338 339 /* called by access_process_vm when get_user_pages() fails, typically 340 * for use by special VMAs that can switch between memory and hardware 341 */ 342 int (*access)(struct vm_area_struct *vma, unsigned long addr, 343 void *buf, int len, int write); 344 345 /* Called by the /proc/PID/maps code to ask the vma whether it 346 * has a special name. Returning non-NULL will also cause this 347 * vma to be dumped unconditionally. */ 348 const char *(*name)(struct vm_area_struct *vma); 349 350 #ifdef CONFIG_NUMA 351 /* 352 * set_policy() op must add a reference to any non-NULL @new mempolicy 353 * to hold the policy upon return. Caller should pass NULL @new to 354 * remove a policy and fall back to surrounding context--i.e. do not 355 * install a MPOL_DEFAULT policy, nor the task or system default 356 * mempolicy. 357 */ 358 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); 359 360 /* 361 * get_policy() op must add reference [mpol_get()] to any policy at 362 * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure 363 * in mm/mempolicy.c will do this automatically. 364 * get_policy() must NOT add a ref if the policy at (vma,addr) is not 365 * marked as MPOL_SHARED. vma policies are protected by the mmap_sem. 366 * If no [shared/vma] mempolicy exists at the addr, get_policy() op 367 * must return NULL--i.e., do not "fallback" to task or system default 368 * policy. 369 */ 370 struct mempolicy *(*get_policy)(struct vm_area_struct *vma, 371 unsigned long addr); 372 #endif 373 /* 374 * Called by vm_normal_page() for special PTEs to find the 375 * page for @addr. This is useful if the default behavior 376 * (using pte_page()) would not find the correct page. 377 */ 378 struct page *(*find_special_page)(struct vm_area_struct *vma, 379 unsigned long addr); 380 }; 381 382 struct mmu_gather; 383 struct inode; 384 385 #define page_private(page) ((page)->private) 386 #define set_page_private(page, v) ((page)->private = (v)) 387 388 #if !defined(__HAVE_ARCH_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE) 389 static inline int pmd_devmap(pmd_t pmd) 390 { 391 return 0; 392 } 393 #endif 394 395 /* 396 * FIXME: take this include out, include page-flags.h in 397 * files which need it (119 of them) 398 */ 399 #include <linux/page-flags.h> 400 #include <linux/huge_mm.h> 401 402 /* 403 * Methods to modify the page usage count. 404 * 405 * What counts for a page usage: 406 * - cache mapping (page->mapping) 407 * - private data (page->private) 408 * - page mapped in a task's page tables, each mapping 409 * is counted separately 410 * 411 * Also, many kernel routines increase the page count before a critical 412 * routine so they can be sure the page doesn't go away from under them. 413 */ 414 415 /* 416 * Drop a ref, return true if the refcount fell to zero (the page has no users) 417 */ 418 static inline int put_page_testzero(struct page *page) 419 { 420 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); 421 return page_ref_dec_and_test(page); 422 } 423 424 /* 425 * Try to grab a ref unless the page has a refcount of zero, return false if 426 * that is the case. 427 * This can be called when MMU is off so it must not access 428 * any of the virtual mappings. 429 */ 430 static inline int get_page_unless_zero(struct page *page) 431 { 432 return page_ref_add_unless(page, 1, 0); 433 } 434 435 extern int page_is_ram(unsigned long pfn); 436 437 enum { 438 REGION_INTERSECTS, 439 REGION_DISJOINT, 440 REGION_MIXED, 441 }; 442 443 int region_intersects(resource_size_t offset, size_t size, unsigned long flags, 444 unsigned long desc); 445 446 /* Support for virtually mapped pages */ 447 struct page *vmalloc_to_page(const void *addr); 448 unsigned long vmalloc_to_pfn(const void *addr); 449 450 /* 451 * Determine if an address is within the vmalloc range 452 * 453 * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there 454 * is no special casing required. 455 */ 456 static inline bool is_vmalloc_addr(const void *x) 457 { 458 #ifdef CONFIG_MMU 459 unsigned long addr = (unsigned long)x; 460 461 return addr >= VMALLOC_START && addr < VMALLOC_END; 462 #else 463 return false; 464 #endif 465 } 466 #ifdef CONFIG_MMU 467 extern int is_vmalloc_or_module_addr(const void *x); 468 #else 469 static inline int is_vmalloc_or_module_addr(const void *x) 470 { 471 return 0; 472 } 473 #endif 474 475 extern void kvfree(const void *addr); 476 477 static inline atomic_t *compound_mapcount_ptr(struct page *page) 478 { 479 return &page[1].compound_mapcount; 480 } 481 482 static inline int compound_mapcount(struct page *page) 483 { 484 VM_BUG_ON_PAGE(!PageCompound(page), page); 485 page = compound_head(page); 486 return atomic_read(compound_mapcount_ptr(page)) + 1; 487 } 488 489 /* 490 * The atomic page->_mapcount, starts from -1: so that transitions 491 * both from it and to it can be tracked, using atomic_inc_and_test 492 * and atomic_add_negative(-1). 493 */ 494 static inline void page_mapcount_reset(struct page *page) 495 { 496 atomic_set(&(page)->_mapcount, -1); 497 } 498 499 int __page_mapcount(struct page *page); 500 501 static inline int page_mapcount(struct page *page) 502 { 503 VM_BUG_ON_PAGE(PageSlab(page), page); 504 505 if (unlikely(PageCompound(page))) 506 return __page_mapcount(page); 507 return atomic_read(&page->_mapcount) + 1; 508 } 509 510 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 511 int total_mapcount(struct page *page); 512 int page_trans_huge_mapcount(struct page *page, int *total_mapcount); 513 #else 514 static inline int total_mapcount(struct page *page) 515 { 516 return page_mapcount(page); 517 } 518 static inline int page_trans_huge_mapcount(struct page *page, 519 int *total_mapcount) 520 { 521 int mapcount = page_mapcount(page); 522 if (total_mapcount) 523 *total_mapcount = mapcount; 524 return mapcount; 525 } 526 #endif 527 528 static inline struct page *virt_to_head_page(const void *x) 529 { 530 struct page *page = virt_to_page(x); 531 532 return compound_head(page); 533 } 534 535 void __put_page(struct page *page); 536 537 void put_pages_list(struct list_head *pages); 538 539 void split_page(struct page *page, unsigned int order); 540 int split_free_page(struct page *page); 541 542 /* 543 * Compound pages have a destructor function. Provide a 544 * prototype for that function and accessor functions. 545 * These are _only_ valid on the head of a compound page. 546 */ 547 typedef void compound_page_dtor(struct page *); 548 549 /* Keep the enum in sync with compound_page_dtors array in mm/page_alloc.c */ 550 enum compound_dtor_id { 551 NULL_COMPOUND_DTOR, 552 COMPOUND_PAGE_DTOR, 553 #ifdef CONFIG_HUGETLB_PAGE 554 HUGETLB_PAGE_DTOR, 555 #endif 556 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 557 TRANSHUGE_PAGE_DTOR, 558 #endif 559 NR_COMPOUND_DTORS, 560 }; 561 extern compound_page_dtor * const compound_page_dtors[]; 562 563 static inline void set_compound_page_dtor(struct page *page, 564 enum compound_dtor_id compound_dtor) 565 { 566 VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page); 567 page[1].compound_dtor = compound_dtor; 568 } 569 570 static inline compound_page_dtor *get_compound_page_dtor(struct page *page) 571 { 572 VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page); 573 return compound_page_dtors[page[1].compound_dtor]; 574 } 575 576 static inline unsigned int compound_order(struct page *page) 577 { 578 if (!PageHead(page)) 579 return 0; 580 return page[1].compound_order; 581 } 582 583 static inline void set_compound_order(struct page *page, unsigned int order) 584 { 585 page[1].compound_order = order; 586 } 587 588 void free_compound_page(struct page *page); 589 590 #ifdef CONFIG_MMU 591 /* 592 * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when 593 * servicing faults for write access. In the normal case, do always want 594 * pte_mkwrite. But get_user_pages can cause write faults for mappings 595 * that do not have writing enabled, when used by access_process_vm. 596 */ 597 static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) 598 { 599 if (likely(vma->vm_flags & VM_WRITE)) 600 pte = pte_mkwrite(pte); 601 return pte; 602 } 603 604 void do_set_pte(struct vm_area_struct *vma, unsigned long address, 605 struct page *page, pte_t *pte, bool write, bool anon); 606 #endif 607 608 /* 609 * Multiple processes may "see" the same page. E.g. for untouched 610 * mappings of /dev/null, all processes see the same page full of 611 * zeroes, and text pages of executables and shared libraries have 612 * only one copy in memory, at most, normally. 613 * 614 * For the non-reserved pages, page_count(page) denotes a reference count. 615 * page_count() == 0 means the page is free. page->lru is then used for 616 * freelist management in the buddy allocator. 617 * page_count() > 0 means the page has been allocated. 618 * 619 * Pages are allocated by the slab allocator in order to provide memory 620 * to kmalloc and kmem_cache_alloc. In this case, the management of the 621 * page, and the fields in 'struct page' are the responsibility of mm/slab.c 622 * unless a particular usage is carefully commented. (the responsibility of 623 * freeing the kmalloc memory is the caller's, of course). 624 * 625 * A page may be used by anyone else who does a __get_free_page(). 626 * In this case, page_count still tracks the references, and should only 627 * be used through the normal accessor functions. The top bits of page->flags 628 * and page->virtual store page management information, but all other fields 629 * are unused and could be used privately, carefully. The management of this 630 * page is the responsibility of the one who allocated it, and those who have 631 * subsequently been given references to it. 632 * 633 * The other pages (we may call them "pagecache pages") are completely 634 * managed by the Linux memory manager: I/O, buffers, swapping etc. 635 * The following discussion applies only to them. 636 * 637 * A pagecache page contains an opaque `private' member, which belongs to the 638 * page's address_space. Usually, this is the address of a circular list of 639 * the page's disk buffers. PG_private must be set to tell the VM to call 640 * into the filesystem to release these pages. 641 * 642 * A page may belong to an inode's memory mapping. In this case, page->mapping 643 * is the pointer to the inode, and page->index is the file offset of the page, 644 * in units of PAGE_SIZE. 645 * 646 * If pagecache pages are not associated with an inode, they are said to be 647 * anonymous pages. These may become associated with the swapcache, and in that 648 * case PG_swapcache is set, and page->private is an offset into the swapcache. 649 * 650 * In either case (swapcache or inode backed), the pagecache itself holds one 651 * reference to the page. Setting PG_private should also increment the 652 * refcount. The each user mapping also has a reference to the page. 653 * 654 * The pagecache pages are stored in a per-mapping radix tree, which is 655 * rooted at mapping->page_tree, and indexed by offset. 656 * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space 657 * lists, we instead now tag pages as dirty/writeback in the radix tree. 658 * 659 * All pagecache pages may be subject to I/O: 660 * - inode pages may need to be read from disk, 661 * - inode pages which have been modified and are MAP_SHARED may need 662 * to be written back to the inode on disk, 663 * - anonymous pages (including MAP_PRIVATE file mappings) which have been 664 * modified may need to be swapped out to swap space and (later) to be read 665 * back into memory. 666 */ 667 668 /* 669 * The zone field is never updated after free_area_init_core() 670 * sets it, so none of the operations on it need to be atomic. 671 */ 672 673 /* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */ 674 #define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH) 675 #define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH) 676 #define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH) 677 #define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH) 678 679 /* 680 * Define the bit shifts to access each section. For non-existent 681 * sections we define the shift as 0; that plus a 0 mask ensures 682 * the compiler will optimise away reference to them. 683 */ 684 #define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0)) 685 #define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) 686 #define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) 687 #define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0)) 688 689 /* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */ 690 #ifdef NODE_NOT_IN_PAGE_FLAGS 691 #define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT) 692 #define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \ 693 SECTIONS_PGOFF : ZONES_PGOFF) 694 #else 695 #define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT) 696 #define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \ 697 NODES_PGOFF : ZONES_PGOFF) 698 #endif 699 700 #define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0)) 701 702 #if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS 703 #error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS 704 #endif 705 706 #define ZONES_MASK ((1UL << ZONES_WIDTH) - 1) 707 #define NODES_MASK ((1UL << NODES_WIDTH) - 1) 708 #define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) 709 #define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1) 710 #define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1) 711 712 static inline enum zone_type page_zonenum(const struct page *page) 713 { 714 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; 715 } 716 717 #ifdef CONFIG_ZONE_DEVICE 718 void get_zone_device_page(struct page *page); 719 void put_zone_device_page(struct page *page); 720 static inline bool is_zone_device_page(const struct page *page) 721 { 722 return page_zonenum(page) == ZONE_DEVICE; 723 } 724 #else 725 static inline void get_zone_device_page(struct page *page) 726 { 727 } 728 static inline void put_zone_device_page(struct page *page) 729 { 730 } 731 static inline bool is_zone_device_page(const struct page *page) 732 { 733 return false; 734 } 735 #endif 736 737 static inline void get_page(struct page *page) 738 { 739 page = compound_head(page); 740 /* 741 * Getting a normal page or the head of a compound page 742 * requires to already have an elevated page->_refcount. 743 */ 744 VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page); 745 page_ref_inc(page); 746 747 if (unlikely(is_zone_device_page(page))) 748 get_zone_device_page(page); 749 } 750 751 static inline void put_page(struct page *page) 752 { 753 page = compound_head(page); 754 755 if (put_page_testzero(page)) 756 __put_page(page); 757 758 if (unlikely(is_zone_device_page(page))) 759 put_zone_device_page(page); 760 } 761 762 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) 763 #define SECTION_IN_PAGE_FLAGS 764 #endif 765 766 /* 767 * The identification function is mainly used by the buddy allocator for 768 * determining if two pages could be buddies. We are not really identifying 769 * the zone since we could be using the section number id if we do not have 770 * node id available in page flags. 771 * We only guarantee that it will return the same value for two combinable 772 * pages in a zone. 773 */ 774 static inline int page_zone_id(struct page *page) 775 { 776 return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK; 777 } 778 779 static inline int zone_to_nid(struct zone *zone) 780 { 781 #ifdef CONFIG_NUMA 782 return zone->node; 783 #else 784 return 0; 785 #endif 786 } 787 788 #ifdef NODE_NOT_IN_PAGE_FLAGS 789 extern int page_to_nid(const struct page *page); 790 #else 791 static inline int page_to_nid(const struct page *page) 792 { 793 return (page->flags >> NODES_PGSHIFT) & NODES_MASK; 794 } 795 #endif 796 797 #ifdef CONFIG_NUMA_BALANCING 798 static inline int cpu_pid_to_cpupid(int cpu, int pid) 799 { 800 return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK); 801 } 802 803 static inline int cpupid_to_pid(int cpupid) 804 { 805 return cpupid & LAST__PID_MASK; 806 } 807 808 static inline int cpupid_to_cpu(int cpupid) 809 { 810 return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK; 811 } 812 813 static inline int cpupid_to_nid(int cpupid) 814 { 815 return cpu_to_node(cpupid_to_cpu(cpupid)); 816 } 817 818 static inline bool cpupid_pid_unset(int cpupid) 819 { 820 return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK); 821 } 822 823 static inline bool cpupid_cpu_unset(int cpupid) 824 { 825 return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK); 826 } 827 828 static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid) 829 { 830 return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid); 831 } 832 833 #define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid) 834 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS 835 static inline int page_cpupid_xchg_last(struct page *page, int cpupid) 836 { 837 return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK); 838 } 839 840 static inline int page_cpupid_last(struct page *page) 841 { 842 return page->_last_cpupid; 843 } 844 static inline void page_cpupid_reset_last(struct page *page) 845 { 846 page->_last_cpupid = -1 & LAST_CPUPID_MASK; 847 } 848 #else 849 static inline int page_cpupid_last(struct page *page) 850 { 851 return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK; 852 } 853 854 extern int page_cpupid_xchg_last(struct page *page, int cpupid); 855 856 static inline void page_cpupid_reset_last(struct page *page) 857 { 858 page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT; 859 } 860 #endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */ 861 #else /* !CONFIG_NUMA_BALANCING */ 862 static inline int page_cpupid_xchg_last(struct page *page, int cpupid) 863 { 864 return page_to_nid(page); /* XXX */ 865 } 866 867 static inline int page_cpupid_last(struct page *page) 868 { 869 return page_to_nid(page); /* XXX */ 870 } 871 872 static inline int cpupid_to_nid(int cpupid) 873 { 874 return -1; 875 } 876 877 static inline int cpupid_to_pid(int cpupid) 878 { 879 return -1; 880 } 881 882 static inline int cpupid_to_cpu(int cpupid) 883 { 884 return -1; 885 } 886 887 static inline int cpu_pid_to_cpupid(int nid, int pid) 888 { 889 return -1; 890 } 891 892 static inline bool cpupid_pid_unset(int cpupid) 893 { 894 return 1; 895 } 896 897 static inline void page_cpupid_reset_last(struct page *page) 898 { 899 } 900 901 static inline bool cpupid_match_pid(struct task_struct *task, int cpupid) 902 { 903 return false; 904 } 905 #endif /* CONFIG_NUMA_BALANCING */ 906 907 static inline struct zone *page_zone(const struct page *page) 908 { 909 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; 910 } 911 912 #ifdef SECTION_IN_PAGE_FLAGS 913 static inline void set_page_section(struct page *page, unsigned long section) 914 { 915 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT); 916 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT; 917 } 918 919 static inline unsigned long page_to_section(const struct page *page) 920 { 921 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; 922 } 923 #endif 924 925 static inline void set_page_zone(struct page *page, enum zone_type zone) 926 { 927 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT); 928 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT; 929 } 930 931 static inline void set_page_node(struct page *page, unsigned long node) 932 { 933 page->flags &= ~(NODES_MASK << NODES_PGSHIFT); 934 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT; 935 } 936 937 static inline void set_page_links(struct page *page, enum zone_type zone, 938 unsigned long node, unsigned long pfn) 939 { 940 set_page_zone(page, zone); 941 set_page_node(page, node); 942 #ifdef SECTION_IN_PAGE_FLAGS 943 set_page_section(page, pfn_to_section_nr(pfn)); 944 #endif 945 } 946 947 #ifdef CONFIG_MEMCG 948 static inline struct mem_cgroup *page_memcg(struct page *page) 949 { 950 return page->mem_cgroup; 951 } 952 #else 953 static inline struct mem_cgroup *page_memcg(struct page *page) 954 { 955 return NULL; 956 } 957 #endif 958 959 /* 960 * Some inline functions in vmstat.h depend on page_zone() 961 */ 962 #include <linux/vmstat.h> 963 964 static __always_inline void *lowmem_page_address(const struct page *page) 965 { 966 return page_to_virt(page); 967 } 968 969 #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) 970 #define HASHED_PAGE_VIRTUAL 971 #endif 972 973 #if defined(WANT_PAGE_VIRTUAL) 974 static inline void *page_address(const struct page *page) 975 { 976 return page->virtual; 977 } 978 static inline void set_page_address(struct page *page, void *address) 979 { 980 page->virtual = address; 981 } 982 #define page_address_init() do { } while(0) 983 #endif 984 985 #if defined(HASHED_PAGE_VIRTUAL) 986 void *page_address(const struct page *page); 987 void set_page_address(struct page *page, void *virtual); 988 void page_address_init(void); 989 #endif 990 991 #if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL) 992 #define page_address(page) lowmem_page_address(page) 993 #define set_page_address(page, address) do { } while(0) 994 #define page_address_init() do { } while(0) 995 #endif 996 997 extern void *page_rmapping(struct page *page); 998 extern struct anon_vma *page_anon_vma(struct page *page); 999 extern struct address_space *page_mapping(struct page *page); 1000 1001 extern struct address_space *__page_file_mapping(struct page *); 1002 1003 static inline 1004 struct address_space *page_file_mapping(struct page *page) 1005 { 1006 if (unlikely(PageSwapCache(page))) 1007 return __page_file_mapping(page); 1008 1009 return page->mapping; 1010 } 1011 1012 /* 1013 * Return the pagecache index of the passed page. Regular pagecache pages 1014 * use ->index whereas swapcache pages use ->private 1015 */ 1016 static inline pgoff_t page_index(struct page *page) 1017 { 1018 if (unlikely(PageSwapCache(page))) 1019 return page_private(page); 1020 return page->index; 1021 } 1022 1023 extern pgoff_t __page_file_index(struct page *page); 1024 1025 /* 1026 * Return the file index of the page. Regular pagecache pages use ->index 1027 * whereas swapcache pages use swp_offset(->private) 1028 */ 1029 static inline pgoff_t page_file_index(struct page *page) 1030 { 1031 if (unlikely(PageSwapCache(page))) 1032 return __page_file_index(page); 1033 1034 return page->index; 1035 } 1036 1037 bool page_mapped(struct page *page); 1038 1039 /* 1040 * Return true only if the page has been allocated with 1041 * ALLOC_NO_WATERMARKS and the low watermark was not 1042 * met implying that the system is under some pressure. 1043 */ 1044 static inline bool page_is_pfmemalloc(struct page *page) 1045 { 1046 /* 1047 * Page index cannot be this large so this must be 1048 * a pfmemalloc page. 1049 */ 1050 return page->index == -1UL; 1051 } 1052 1053 /* 1054 * Only to be called by the page allocator on a freshly allocated 1055 * page. 1056 */ 1057 static inline void set_page_pfmemalloc(struct page *page) 1058 { 1059 page->index = -1UL; 1060 } 1061 1062 static inline void clear_page_pfmemalloc(struct page *page) 1063 { 1064 page->index = 0; 1065 } 1066 1067 /* 1068 * Different kinds of faults, as returned by handle_mm_fault(). 1069 * Used to decide whether a process gets delivered SIGBUS or 1070 * just gets major/minor fault counters bumped up. 1071 */ 1072 1073 #define VM_FAULT_OOM 0x0001 1074 #define VM_FAULT_SIGBUS 0x0002 1075 #define VM_FAULT_MAJOR 0x0004 1076 #define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */ 1077 #define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */ 1078 #define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */ 1079 #define VM_FAULT_SIGSEGV 0x0040 1080 1081 #define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ 1082 #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ 1083 #define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */ 1084 #define VM_FAULT_FALLBACK 0x0800 /* huge page fault failed, fall back to small */ 1085 #define VM_FAULT_DAX_LOCKED 0x1000 /* ->fault has locked DAX entry */ 1086 1087 #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */ 1088 1089 #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \ 1090 VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \ 1091 VM_FAULT_FALLBACK) 1092 1093 /* Encode hstate index for a hwpoisoned large page */ 1094 #define VM_FAULT_SET_HINDEX(x) ((x) << 12) 1095 #define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf) 1096 1097 /* 1098 * Can be called by the pagefault handler when it gets a VM_FAULT_OOM. 1099 */ 1100 extern void pagefault_out_of_memory(void); 1101 1102 #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) 1103 1104 /* 1105 * Flags passed to show_mem() and show_free_areas() to suppress output in 1106 * various contexts. 1107 */ 1108 #define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */ 1109 1110 extern void show_free_areas(unsigned int flags); 1111 extern bool skip_free_areas_node(unsigned int flags, int nid); 1112 1113 int shmem_zero_setup(struct vm_area_struct *); 1114 #ifdef CONFIG_SHMEM 1115 bool shmem_mapping(struct address_space *mapping); 1116 #else 1117 static inline bool shmem_mapping(struct address_space *mapping) 1118 { 1119 return false; 1120 } 1121 #endif 1122 1123 extern bool can_do_mlock(void); 1124 extern int user_shm_lock(size_t, struct user_struct *); 1125 extern void user_shm_unlock(size_t, struct user_struct *); 1126 1127 /* 1128 * Parameter block passed down to zap_pte_range in exceptional cases. 1129 */ 1130 struct zap_details { 1131 struct address_space *check_mapping; /* Check page->mapping if set */ 1132 pgoff_t first_index; /* Lowest page->index to unmap */ 1133 pgoff_t last_index; /* Highest page->index to unmap */ 1134 bool ignore_dirty; /* Ignore dirty pages */ 1135 bool check_swap_entries; /* Check also swap entries */ 1136 }; 1137 1138 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, 1139 pte_t pte); 1140 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, 1141 pmd_t pmd); 1142 1143 int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 1144 unsigned long size); 1145 void zap_page_range(struct vm_area_struct *vma, unsigned long address, 1146 unsigned long size, struct zap_details *); 1147 void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma, 1148 unsigned long start, unsigned long end); 1149 1150 /** 1151 * mm_walk - callbacks for walk_page_range 1152 * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry 1153 * this handler is required to be able to handle 1154 * pmd_trans_huge() pmds. They may simply choose to 1155 * split_huge_page() instead of handling it explicitly. 1156 * @pte_entry: if set, called for each non-empty PTE (4th-level) entry 1157 * @pte_hole: if set, called for each hole at all levels 1158 * @hugetlb_entry: if set, called for each hugetlb entry 1159 * @test_walk: caller specific callback function to determine whether 1160 * we walk over the current vma or not. A positive returned 1161 * value means "do page table walk over the current vma," 1162 * and a negative one means "abort current page table walk 1163 * right now." 0 means "skip the current vma." 1164 * @mm: mm_struct representing the target process of page table walk 1165 * @vma: vma currently walked (NULL if walking outside vmas) 1166 * @private: private data for callbacks' usage 1167 * 1168 * (see the comment on walk_page_range() for more details) 1169 */ 1170 struct mm_walk { 1171 int (*pmd_entry)(pmd_t *pmd, unsigned long addr, 1172 unsigned long next, struct mm_walk *walk); 1173 int (*pte_entry)(pte_t *pte, unsigned long addr, 1174 unsigned long next, struct mm_walk *walk); 1175 int (*pte_hole)(unsigned long addr, unsigned long next, 1176 struct mm_walk *walk); 1177 int (*hugetlb_entry)(pte_t *pte, unsigned long hmask, 1178 unsigned long addr, unsigned long next, 1179 struct mm_walk *walk); 1180 int (*test_walk)(unsigned long addr, unsigned long next, 1181 struct mm_walk *walk); 1182 struct mm_struct *mm; 1183 struct vm_area_struct *vma; 1184 void *private; 1185 }; 1186 1187 int walk_page_range(unsigned long addr, unsigned long end, 1188 struct mm_walk *walk); 1189 int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk); 1190 void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, 1191 unsigned long end, unsigned long floor, unsigned long ceiling); 1192 int copy_page_range(struct mm_struct *dst, struct mm_struct *src, 1193 struct vm_area_struct *vma); 1194 void unmap_mapping_range(struct address_space *mapping, 1195 loff_t const holebegin, loff_t const holelen, int even_cows); 1196 int follow_pfn(struct vm_area_struct *vma, unsigned long address, 1197 unsigned long *pfn); 1198 int follow_phys(struct vm_area_struct *vma, unsigned long address, 1199 unsigned int flags, unsigned long *prot, resource_size_t *phys); 1200 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, 1201 void *buf, int len, int write); 1202 1203 static inline void unmap_shared_mapping_range(struct address_space *mapping, 1204 loff_t const holebegin, loff_t const holelen) 1205 { 1206 unmap_mapping_range(mapping, holebegin, holelen, 0); 1207 } 1208 1209 extern void truncate_pagecache(struct inode *inode, loff_t new); 1210 extern void truncate_setsize(struct inode *inode, loff_t newsize); 1211 void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to); 1212 void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end); 1213 int truncate_inode_page(struct address_space *mapping, struct page *page); 1214 int generic_error_remove_page(struct address_space *mapping, struct page *page); 1215 int invalidate_inode_page(struct page *page); 1216 1217 #ifdef CONFIG_MMU 1218 extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, 1219 unsigned long address, unsigned int flags); 1220 extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, 1221 unsigned long address, unsigned int fault_flags, 1222 bool *unlocked); 1223 #else 1224 static inline int handle_mm_fault(struct mm_struct *mm, 1225 struct vm_area_struct *vma, unsigned long address, 1226 unsigned int flags) 1227 { 1228 /* should never happen if there's no MMU */ 1229 BUG(); 1230 return VM_FAULT_SIGBUS; 1231 } 1232 static inline int fixup_user_fault(struct task_struct *tsk, 1233 struct mm_struct *mm, unsigned long address, 1234 unsigned int fault_flags, bool *unlocked) 1235 { 1236 /* should never happen if there's no MMU */ 1237 BUG(); 1238 return -EFAULT; 1239 } 1240 #endif 1241 1242 extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); 1243 extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, 1244 void *buf, int len, int write); 1245 1246 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 1247 unsigned long start, unsigned long nr_pages, 1248 unsigned int foll_flags, struct page **pages, 1249 struct vm_area_struct **vmas, int *nonblocking); 1250 long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, 1251 unsigned long start, unsigned long nr_pages, 1252 int write, int force, struct page **pages, 1253 struct vm_area_struct **vmas); 1254 long get_user_pages(unsigned long start, unsigned long nr_pages, 1255 int write, int force, struct page **pages, 1256 struct vm_area_struct **vmas); 1257 long get_user_pages_locked(unsigned long start, unsigned long nr_pages, 1258 int write, int force, struct page **pages, int *locked); 1259 long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, 1260 unsigned long start, unsigned long nr_pages, 1261 int write, int force, struct page **pages, 1262 unsigned int gup_flags); 1263 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, 1264 int write, int force, struct page **pages); 1265 int get_user_pages_fast(unsigned long start, int nr_pages, int write, 1266 struct page **pages); 1267 1268 /* Container for pinned pfns / pages */ 1269 struct frame_vector { 1270 unsigned int nr_allocated; /* Number of frames we have space for */ 1271 unsigned int nr_frames; /* Number of frames stored in ptrs array */ 1272 bool got_ref; /* Did we pin pages by getting page ref? */ 1273 bool is_pfns; /* Does array contain pages or pfns? */ 1274 void *ptrs[0]; /* Array of pinned pfns / pages. Use 1275 * pfns_vector_pages() or pfns_vector_pfns() 1276 * for access */ 1277 }; 1278 1279 struct frame_vector *frame_vector_create(unsigned int nr_frames); 1280 void frame_vector_destroy(struct frame_vector *vec); 1281 int get_vaddr_frames(unsigned long start, unsigned int nr_pfns, 1282 bool write, bool force, struct frame_vector *vec); 1283 void put_vaddr_frames(struct frame_vector *vec); 1284 int frame_vector_to_pages(struct frame_vector *vec); 1285 void frame_vector_to_pfns(struct frame_vector *vec); 1286 1287 static inline unsigned int frame_vector_count(struct frame_vector *vec) 1288 { 1289 return vec->nr_frames; 1290 } 1291 1292 static inline struct page **frame_vector_pages(struct frame_vector *vec) 1293 { 1294 if (vec->is_pfns) { 1295 int err = frame_vector_to_pages(vec); 1296 1297 if (err) 1298 return ERR_PTR(err); 1299 } 1300 return (struct page **)(vec->ptrs); 1301 } 1302 1303 static inline unsigned long *frame_vector_pfns(struct frame_vector *vec) 1304 { 1305 if (!vec->is_pfns) 1306 frame_vector_to_pfns(vec); 1307 return (unsigned long *)(vec->ptrs); 1308 } 1309 1310 struct kvec; 1311 int get_kernel_pages(const struct kvec *iov, int nr_pages, int write, 1312 struct page **pages); 1313 int get_kernel_page(unsigned long start, int write, struct page **pages); 1314 struct page *get_dump_page(unsigned long addr); 1315 1316 extern int try_to_release_page(struct page * page, gfp_t gfp_mask); 1317 extern void do_invalidatepage(struct page *page, unsigned int offset, 1318 unsigned int length); 1319 1320 int __set_page_dirty_nobuffers(struct page *page); 1321 int __set_page_dirty_no_writeback(struct page *page); 1322 int redirty_page_for_writepage(struct writeback_control *wbc, 1323 struct page *page); 1324 void account_page_dirtied(struct page *page, struct address_space *mapping); 1325 void account_page_cleaned(struct page *page, struct address_space *mapping, 1326 struct bdi_writeback *wb); 1327 int set_page_dirty(struct page *page); 1328 int set_page_dirty_lock(struct page *page); 1329 void cancel_dirty_page(struct page *page); 1330 int clear_page_dirty_for_io(struct page *page); 1331 1332 int get_cmdline(struct task_struct *task, char *buffer, int buflen); 1333 1334 /* Is the vma a continuation of the stack vma above it? */ 1335 static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr) 1336 { 1337 return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); 1338 } 1339 1340 static inline bool vma_is_anonymous(struct vm_area_struct *vma) 1341 { 1342 return !vma->vm_ops; 1343 } 1344 1345 static inline int stack_guard_page_start(struct vm_area_struct *vma, 1346 unsigned long addr) 1347 { 1348 return (vma->vm_flags & VM_GROWSDOWN) && 1349 (vma->vm_start == addr) && 1350 !vma_growsdown(vma->vm_prev, addr); 1351 } 1352 1353 /* Is the vma a continuation of the stack vma below it? */ 1354 static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr) 1355 { 1356 return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP); 1357 } 1358 1359 static inline int stack_guard_page_end(struct vm_area_struct *vma, 1360 unsigned long addr) 1361 { 1362 return (vma->vm_flags & VM_GROWSUP) && 1363 (vma->vm_end == addr) && 1364 !vma_growsup(vma->vm_next, addr); 1365 } 1366 1367 int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t); 1368 1369 extern unsigned long move_page_tables(struct vm_area_struct *vma, 1370 unsigned long old_addr, struct vm_area_struct *new_vma, 1371 unsigned long new_addr, unsigned long len, 1372 bool need_rmap_locks); 1373 extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, 1374 unsigned long end, pgprot_t newprot, 1375 int dirty_accountable, int prot_numa); 1376 extern int mprotect_fixup(struct vm_area_struct *vma, 1377 struct vm_area_struct **pprev, unsigned long start, 1378 unsigned long end, unsigned long newflags); 1379 1380 /* 1381 * doesn't attempt to fault and will return short. 1382 */ 1383 int __get_user_pages_fast(unsigned long start, int nr_pages, int write, 1384 struct page **pages); 1385 /* 1386 * per-process(per-mm_struct) statistics. 1387 */ 1388 static inline unsigned long get_mm_counter(struct mm_struct *mm, int member) 1389 { 1390 long val = atomic_long_read(&mm->rss_stat.count[member]); 1391 1392 #ifdef SPLIT_RSS_COUNTING 1393 /* 1394 * counter is updated in asynchronous manner and may go to minus. 1395 * But it's never be expected number for users. 1396 */ 1397 if (val < 0) 1398 val = 0; 1399 #endif 1400 return (unsigned long)val; 1401 } 1402 1403 static inline void add_mm_counter(struct mm_struct *mm, int member, long value) 1404 { 1405 atomic_long_add(value, &mm->rss_stat.count[member]); 1406 } 1407 1408 static inline void inc_mm_counter(struct mm_struct *mm, int member) 1409 { 1410 atomic_long_inc(&mm->rss_stat.count[member]); 1411 } 1412 1413 static inline void dec_mm_counter(struct mm_struct *mm, int member) 1414 { 1415 atomic_long_dec(&mm->rss_stat.count[member]); 1416 } 1417 1418 /* Optimized variant when page is already known not to be PageAnon */ 1419 static inline int mm_counter_file(struct page *page) 1420 { 1421 if (PageSwapBacked(page)) 1422 return MM_SHMEMPAGES; 1423 return MM_FILEPAGES; 1424 } 1425 1426 static inline int mm_counter(struct page *page) 1427 { 1428 if (PageAnon(page)) 1429 return MM_ANONPAGES; 1430 return mm_counter_file(page); 1431 } 1432 1433 static inline unsigned long get_mm_rss(struct mm_struct *mm) 1434 { 1435 return get_mm_counter(mm, MM_FILEPAGES) + 1436 get_mm_counter(mm, MM_ANONPAGES) + 1437 get_mm_counter(mm, MM_SHMEMPAGES); 1438 } 1439 1440 static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm) 1441 { 1442 return max(mm->hiwater_rss, get_mm_rss(mm)); 1443 } 1444 1445 static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm) 1446 { 1447 return max(mm->hiwater_vm, mm->total_vm); 1448 } 1449 1450 static inline void update_hiwater_rss(struct mm_struct *mm) 1451 { 1452 unsigned long _rss = get_mm_rss(mm); 1453 1454 if ((mm)->hiwater_rss < _rss) 1455 (mm)->hiwater_rss = _rss; 1456 } 1457 1458 static inline void update_hiwater_vm(struct mm_struct *mm) 1459 { 1460 if (mm->hiwater_vm < mm->total_vm) 1461 mm->hiwater_vm = mm->total_vm; 1462 } 1463 1464 static inline void reset_mm_hiwater_rss(struct mm_struct *mm) 1465 { 1466 mm->hiwater_rss = get_mm_rss(mm); 1467 } 1468 1469 static inline void setmax_mm_hiwater_rss(unsigned long *maxrss, 1470 struct mm_struct *mm) 1471 { 1472 unsigned long hiwater_rss = get_mm_hiwater_rss(mm); 1473 1474 if (*maxrss < hiwater_rss) 1475 *maxrss = hiwater_rss; 1476 } 1477 1478 #if defined(SPLIT_RSS_COUNTING) 1479 void sync_mm_rss(struct mm_struct *mm); 1480 #else 1481 static inline void sync_mm_rss(struct mm_struct *mm) 1482 { 1483 } 1484 #endif 1485 1486 #ifndef __HAVE_ARCH_PTE_DEVMAP 1487 static inline int pte_devmap(pte_t pte) 1488 { 1489 return 0; 1490 } 1491 #endif 1492 1493 int vma_wants_writenotify(struct vm_area_struct *vma); 1494 1495 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, 1496 spinlock_t **ptl); 1497 static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, 1498 spinlock_t **ptl) 1499 { 1500 pte_t *ptep; 1501 __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl)); 1502 return ptep; 1503 } 1504 1505 #ifdef __PAGETABLE_PUD_FOLDED 1506 static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, 1507 unsigned long address) 1508 { 1509 return 0; 1510 } 1511 #else 1512 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); 1513 #endif 1514 1515 #if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU) 1516 static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud, 1517 unsigned long address) 1518 { 1519 return 0; 1520 } 1521 1522 static inline void mm_nr_pmds_init(struct mm_struct *mm) {} 1523 1524 static inline unsigned long mm_nr_pmds(struct mm_struct *mm) 1525 { 1526 return 0; 1527 } 1528 1529 static inline void mm_inc_nr_pmds(struct mm_struct *mm) {} 1530 static inline void mm_dec_nr_pmds(struct mm_struct *mm) {} 1531 1532 #else 1533 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address); 1534 1535 static inline void mm_nr_pmds_init(struct mm_struct *mm) 1536 { 1537 atomic_long_set(&mm->nr_pmds, 0); 1538 } 1539 1540 static inline unsigned long mm_nr_pmds(struct mm_struct *mm) 1541 { 1542 return atomic_long_read(&mm->nr_pmds); 1543 } 1544 1545 static inline void mm_inc_nr_pmds(struct mm_struct *mm) 1546 { 1547 atomic_long_inc(&mm->nr_pmds); 1548 } 1549 1550 static inline void mm_dec_nr_pmds(struct mm_struct *mm) 1551 { 1552 atomic_long_dec(&mm->nr_pmds); 1553 } 1554 #endif 1555 1556 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address); 1557 int __pte_alloc_kernel(pmd_t *pmd, unsigned long address); 1558 1559 /* 1560 * The following ifdef needed to get the 4level-fixup.h header to work. 1561 * Remove it when 4level-fixup.h has been removed. 1562 */ 1563 #if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK) 1564 static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) 1565 { 1566 return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))? 1567 NULL: pud_offset(pgd, address); 1568 } 1569 1570 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) 1571 { 1572 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))? 1573 NULL: pmd_offset(pud, address); 1574 } 1575 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */ 1576 1577 #if USE_SPLIT_PTE_PTLOCKS 1578 #if ALLOC_SPLIT_PTLOCKS 1579 void __init ptlock_cache_init(void); 1580 extern bool ptlock_alloc(struct page *page); 1581 extern void ptlock_free(struct page *page); 1582 1583 static inline spinlock_t *ptlock_ptr(struct page *page) 1584 { 1585 return page->ptl; 1586 } 1587 #else /* ALLOC_SPLIT_PTLOCKS */ 1588 static inline void ptlock_cache_init(void) 1589 { 1590 } 1591 1592 static inline bool ptlock_alloc(struct page *page) 1593 { 1594 return true; 1595 } 1596 1597 static inline void ptlock_free(struct page *page) 1598 { 1599 } 1600 1601 static inline spinlock_t *ptlock_ptr(struct page *page) 1602 { 1603 return &page->ptl; 1604 } 1605 #endif /* ALLOC_SPLIT_PTLOCKS */ 1606 1607 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) 1608 { 1609 return ptlock_ptr(pmd_page(*pmd)); 1610 } 1611 1612 static inline bool ptlock_init(struct page *page) 1613 { 1614 /* 1615 * prep_new_page() initialize page->private (and therefore page->ptl) 1616 * with 0. Make sure nobody took it in use in between. 1617 * 1618 * It can happen if arch try to use slab for page table allocation: 1619 * slab code uses page->slab_cache, which share storage with page->ptl. 1620 */ 1621 VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page); 1622 if (!ptlock_alloc(page)) 1623 return false; 1624 spin_lock_init(ptlock_ptr(page)); 1625 return true; 1626 } 1627 1628 /* Reset page->mapping so free_pages_check won't complain. */ 1629 static inline void pte_lock_deinit(struct page *page) 1630 { 1631 page->mapping = NULL; 1632 ptlock_free(page); 1633 } 1634 1635 #else /* !USE_SPLIT_PTE_PTLOCKS */ 1636 /* 1637 * We use mm->page_table_lock to guard all pagetable pages of the mm. 1638 */ 1639 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) 1640 { 1641 return &mm->page_table_lock; 1642 } 1643 static inline void ptlock_cache_init(void) {} 1644 static inline bool ptlock_init(struct page *page) { return true; } 1645 static inline void pte_lock_deinit(struct page *page) {} 1646 #endif /* USE_SPLIT_PTE_PTLOCKS */ 1647 1648 static inline void pgtable_init(void) 1649 { 1650 ptlock_cache_init(); 1651 pgtable_cache_init(); 1652 } 1653 1654 static inline bool pgtable_page_ctor(struct page *page) 1655 { 1656 if (!ptlock_init(page)) 1657 return false; 1658 inc_zone_page_state(page, NR_PAGETABLE); 1659 return true; 1660 } 1661 1662 static inline void pgtable_page_dtor(struct page *page) 1663 { 1664 pte_lock_deinit(page); 1665 dec_zone_page_state(page, NR_PAGETABLE); 1666 } 1667 1668 #define pte_offset_map_lock(mm, pmd, address, ptlp) \ 1669 ({ \ 1670 spinlock_t *__ptl = pte_lockptr(mm, pmd); \ 1671 pte_t *__pte = pte_offset_map(pmd, address); \ 1672 *(ptlp) = __ptl; \ 1673 spin_lock(__ptl); \ 1674 __pte; \ 1675 }) 1676 1677 #define pte_unmap_unlock(pte, ptl) do { \ 1678 spin_unlock(ptl); \ 1679 pte_unmap(pte); \ 1680 } while (0) 1681 1682 #define pte_alloc(mm, pmd, address) \ 1683 (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd, address)) 1684 1685 #define pte_alloc_map(mm, pmd, address) \ 1686 (pte_alloc(mm, pmd, address) ? NULL : pte_offset_map(pmd, address)) 1687 1688 #define pte_alloc_map_lock(mm, pmd, address, ptlp) \ 1689 (pte_alloc(mm, pmd, address) ? \ 1690 NULL : pte_offset_map_lock(mm, pmd, address, ptlp)) 1691 1692 #define pte_alloc_kernel(pmd, address) \ 1693 ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \ 1694 NULL: pte_offset_kernel(pmd, address)) 1695 1696 #if USE_SPLIT_PMD_PTLOCKS 1697 1698 static struct page *pmd_to_page(pmd_t *pmd) 1699 { 1700 unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1); 1701 return virt_to_page((void *)((unsigned long) pmd & mask)); 1702 } 1703 1704 static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) 1705 { 1706 return ptlock_ptr(pmd_to_page(pmd)); 1707 } 1708 1709 static inline bool pgtable_pmd_page_ctor(struct page *page) 1710 { 1711 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1712 page->pmd_huge_pte = NULL; 1713 #endif 1714 return ptlock_init(page); 1715 } 1716 1717 static inline void pgtable_pmd_page_dtor(struct page *page) 1718 { 1719 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1720 VM_BUG_ON_PAGE(page->pmd_huge_pte, page); 1721 #endif 1722 ptlock_free(page); 1723 } 1724 1725 #define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte) 1726 1727 #else 1728 1729 static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) 1730 { 1731 return &mm->page_table_lock; 1732 } 1733 1734 static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; } 1735 static inline void pgtable_pmd_page_dtor(struct page *page) {} 1736 1737 #define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte) 1738 1739 #endif 1740 1741 static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd) 1742 { 1743 spinlock_t *ptl = pmd_lockptr(mm, pmd); 1744 spin_lock(ptl); 1745 return ptl; 1746 } 1747 1748 extern void free_area_init(unsigned long * zones_size); 1749 extern void free_area_init_node(int nid, unsigned long * zones_size, 1750 unsigned long zone_start_pfn, unsigned long *zholes_size); 1751 extern void free_initmem(void); 1752 1753 /* 1754 * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK) 1755 * into the buddy system. The freed pages will be poisoned with pattern 1756 * "poison" if it's within range [0, UCHAR_MAX]. 1757 * Return pages freed into the buddy system. 1758 */ 1759 extern unsigned long free_reserved_area(void *start, void *end, 1760 int poison, char *s); 1761 1762 #ifdef CONFIG_HIGHMEM 1763 /* 1764 * Free a highmem page into the buddy system, adjusting totalhigh_pages 1765 * and totalram_pages. 1766 */ 1767 extern void free_highmem_page(struct page *page); 1768 #endif 1769 1770 extern void adjust_managed_page_count(struct page *page, long count); 1771 extern void mem_init_print_info(const char *str); 1772 1773 extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end); 1774 1775 /* Free the reserved page into the buddy system, so it gets managed. */ 1776 static inline void __free_reserved_page(struct page *page) 1777 { 1778 ClearPageReserved(page); 1779 init_page_count(page); 1780 __free_page(page); 1781 } 1782 1783 static inline void free_reserved_page(struct page *page) 1784 { 1785 __free_reserved_page(page); 1786 adjust_managed_page_count(page, 1); 1787 } 1788 1789 static inline void mark_page_reserved(struct page *page) 1790 { 1791 SetPageReserved(page); 1792 adjust_managed_page_count(page, -1); 1793 } 1794 1795 /* 1796 * Default method to free all the __init memory into the buddy system. 1797 * The freed pages will be poisoned with pattern "poison" if it's within 1798 * range [0, UCHAR_MAX]. 1799 * Return pages freed into the buddy system. 1800 */ 1801 static inline unsigned long free_initmem_default(int poison) 1802 { 1803 extern char __init_begin[], __init_end[]; 1804 1805 return free_reserved_area(&__init_begin, &__init_end, 1806 poison, "unused kernel"); 1807 } 1808 1809 static inline unsigned long get_num_physpages(void) 1810 { 1811 int nid; 1812 unsigned long phys_pages = 0; 1813 1814 for_each_online_node(nid) 1815 phys_pages += node_present_pages(nid); 1816 1817 return phys_pages; 1818 } 1819 1820 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 1821 /* 1822 * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its 1823 * zones, allocate the backing mem_map and account for memory holes in a more 1824 * architecture independent manner. This is a substitute for creating the 1825 * zone_sizes[] and zholes_size[] arrays and passing them to 1826 * free_area_init_node() 1827 * 1828 * An architecture is expected to register range of page frames backed by 1829 * physical memory with memblock_add[_node]() before calling 1830 * free_area_init_nodes() passing in the PFN each zone ends at. At a basic 1831 * usage, an architecture is expected to do something like 1832 * 1833 * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn, 1834 * max_highmem_pfn}; 1835 * for_each_valid_physical_page_range() 1836 * memblock_add_node(base, size, nid) 1837 * free_area_init_nodes(max_zone_pfns); 1838 * 1839 * free_bootmem_with_active_regions() calls free_bootmem_node() for each 1840 * registered physical page range. Similarly 1841 * sparse_memory_present_with_active_regions() calls memory_present() for 1842 * each range when SPARSEMEM is enabled. 1843 * 1844 * See mm/page_alloc.c for more information on each function exposed by 1845 * CONFIG_HAVE_MEMBLOCK_NODE_MAP. 1846 */ 1847 extern void free_area_init_nodes(unsigned long *max_zone_pfn); 1848 unsigned long node_map_pfn_alignment(void); 1849 unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn, 1850 unsigned long end_pfn); 1851 extern unsigned long absent_pages_in_range(unsigned long start_pfn, 1852 unsigned long end_pfn); 1853 extern void get_pfn_range_for_nid(unsigned int nid, 1854 unsigned long *start_pfn, unsigned long *end_pfn); 1855 extern unsigned long find_min_pfn_with_active_regions(void); 1856 extern void free_bootmem_with_active_regions(int nid, 1857 unsigned long max_low_pfn); 1858 extern void sparse_memory_present_with_active_regions(int nid); 1859 1860 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 1861 1862 #if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \ 1863 !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) 1864 static inline int __early_pfn_to_nid(unsigned long pfn, 1865 struct mminit_pfnnid_cache *state) 1866 { 1867 return 0; 1868 } 1869 #else 1870 /* please see mm/page_alloc.c */ 1871 extern int __meminit early_pfn_to_nid(unsigned long pfn); 1872 /* there is a per-arch backend function. */ 1873 extern int __meminit __early_pfn_to_nid(unsigned long pfn, 1874 struct mminit_pfnnid_cache *state); 1875 #endif 1876 1877 extern void set_dma_reserve(unsigned long new_dma_reserve); 1878 extern void memmap_init_zone(unsigned long, int, unsigned long, 1879 unsigned long, enum memmap_context); 1880 extern void setup_per_zone_wmarks(void); 1881 extern int __meminit init_per_zone_wmark_min(void); 1882 extern void mem_init(void); 1883 extern void __init mmap_init(void); 1884 extern void show_mem(unsigned int flags); 1885 extern long si_mem_available(void); 1886 extern void si_meminfo(struct sysinfo * val); 1887 extern void si_meminfo_node(struct sysinfo *val, int nid); 1888 1889 extern __printf(3, 4) 1890 void warn_alloc_failed(gfp_t gfp_mask, unsigned int order, 1891 const char *fmt, ...); 1892 1893 extern void setup_per_cpu_pageset(void); 1894 1895 extern void zone_pcp_update(struct zone *zone); 1896 extern void zone_pcp_reset(struct zone *zone); 1897 1898 /* page_alloc.c */ 1899 extern int min_free_kbytes; 1900 extern int watermark_scale_factor; 1901 1902 /* nommu.c */ 1903 extern atomic_long_t mmap_pages_allocated; 1904 extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t); 1905 1906 /* interval_tree.c */ 1907 void vma_interval_tree_insert(struct vm_area_struct *node, 1908 struct rb_root *root); 1909 void vma_interval_tree_insert_after(struct vm_area_struct *node, 1910 struct vm_area_struct *prev, 1911 struct rb_root *root); 1912 void vma_interval_tree_remove(struct vm_area_struct *node, 1913 struct rb_root *root); 1914 struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root *root, 1915 unsigned long start, unsigned long last); 1916 struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node, 1917 unsigned long start, unsigned long last); 1918 1919 #define vma_interval_tree_foreach(vma, root, start, last) \ 1920 for (vma = vma_interval_tree_iter_first(root, start, last); \ 1921 vma; vma = vma_interval_tree_iter_next(vma, start, last)) 1922 1923 void anon_vma_interval_tree_insert(struct anon_vma_chain *node, 1924 struct rb_root *root); 1925 void anon_vma_interval_tree_remove(struct anon_vma_chain *node, 1926 struct rb_root *root); 1927 struct anon_vma_chain *anon_vma_interval_tree_iter_first( 1928 struct rb_root *root, unsigned long start, unsigned long last); 1929 struct anon_vma_chain *anon_vma_interval_tree_iter_next( 1930 struct anon_vma_chain *node, unsigned long start, unsigned long last); 1931 #ifdef CONFIG_DEBUG_VM_RB 1932 void anon_vma_interval_tree_verify(struct anon_vma_chain *node); 1933 #endif 1934 1935 #define anon_vma_interval_tree_foreach(avc, root, start, last) \ 1936 for (avc = anon_vma_interval_tree_iter_first(root, start, last); \ 1937 avc; avc = anon_vma_interval_tree_iter_next(avc, start, last)) 1938 1939 /* mmap.c */ 1940 extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin); 1941 extern int vma_adjust(struct vm_area_struct *vma, unsigned long start, 1942 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert); 1943 extern struct vm_area_struct *vma_merge(struct mm_struct *, 1944 struct vm_area_struct *prev, unsigned long addr, unsigned long end, 1945 unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t, 1946 struct mempolicy *, struct vm_userfaultfd_ctx); 1947 extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *); 1948 extern int split_vma(struct mm_struct *, 1949 struct vm_area_struct *, unsigned long addr, int new_below); 1950 extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *); 1951 extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *, 1952 struct rb_node **, struct rb_node *); 1953 extern void unlink_file_vma(struct vm_area_struct *); 1954 extern struct vm_area_struct *copy_vma(struct vm_area_struct **, 1955 unsigned long addr, unsigned long len, pgoff_t pgoff, 1956 bool *need_rmap_locks); 1957 extern void exit_mmap(struct mm_struct *); 1958 1959 static inline int check_data_rlimit(unsigned long rlim, 1960 unsigned long new, 1961 unsigned long start, 1962 unsigned long end_data, 1963 unsigned long start_data) 1964 { 1965 if (rlim < RLIM_INFINITY) { 1966 if (((new - start) + (end_data - start_data)) > rlim) 1967 return -ENOSPC; 1968 } 1969 1970 return 0; 1971 } 1972 1973 extern int mm_take_all_locks(struct mm_struct *mm); 1974 extern void mm_drop_all_locks(struct mm_struct *mm); 1975 1976 extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); 1977 extern struct file *get_mm_exe_file(struct mm_struct *mm); 1978 1979 extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages); 1980 extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages); 1981 1982 extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm, 1983 unsigned long addr, unsigned long len, 1984 unsigned long flags, 1985 const struct vm_special_mapping *spec); 1986 /* This is an obsolete alternative to _install_special_mapping. */ 1987 extern int install_special_mapping(struct mm_struct *mm, 1988 unsigned long addr, unsigned long len, 1989 unsigned long flags, struct page **pages); 1990 1991 extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); 1992 1993 extern unsigned long mmap_region(struct file *file, unsigned long addr, 1994 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff); 1995 extern unsigned long do_mmap(struct file *file, unsigned long addr, 1996 unsigned long len, unsigned long prot, unsigned long flags, 1997 vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate); 1998 extern int do_munmap(struct mm_struct *, unsigned long, size_t); 1999 2000 static inline unsigned long 2001 do_mmap_pgoff(struct file *file, unsigned long addr, 2002 unsigned long len, unsigned long prot, unsigned long flags, 2003 unsigned long pgoff, unsigned long *populate) 2004 { 2005 return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate); 2006 } 2007 2008 #ifdef CONFIG_MMU 2009 extern int __mm_populate(unsigned long addr, unsigned long len, 2010 int ignore_errors); 2011 static inline void mm_populate(unsigned long addr, unsigned long len) 2012 { 2013 /* Ignore errors */ 2014 (void) __mm_populate(addr, len, 1); 2015 } 2016 #else 2017 static inline void mm_populate(unsigned long addr, unsigned long len) {} 2018 #endif 2019 2020 /* These take the mm semaphore themselves */ 2021 extern int __must_check vm_brk(unsigned long, unsigned long); 2022 extern int vm_munmap(unsigned long, size_t); 2023 extern unsigned long __must_check vm_mmap(struct file *, unsigned long, 2024 unsigned long, unsigned long, 2025 unsigned long, unsigned long); 2026 2027 struct vm_unmapped_area_info { 2028 #define VM_UNMAPPED_AREA_TOPDOWN 1 2029 unsigned long flags; 2030 unsigned long length; 2031 unsigned long low_limit; 2032 unsigned long high_limit; 2033 unsigned long align_mask; 2034 unsigned long align_offset; 2035 }; 2036 2037 extern unsigned long unmapped_area(struct vm_unmapped_area_info *info); 2038 extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info); 2039 2040 /* 2041 * Search for an unmapped address range. 2042 * 2043 * We are looking for a range that: 2044 * - does not intersect with any VMA; 2045 * - is contained within the [low_limit, high_limit) interval; 2046 * - is at least the desired size. 2047 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask) 2048 */ 2049 static inline unsigned long 2050 vm_unmapped_area(struct vm_unmapped_area_info *info) 2051 { 2052 if (info->flags & VM_UNMAPPED_AREA_TOPDOWN) 2053 return unmapped_area_topdown(info); 2054 else 2055 return unmapped_area(info); 2056 } 2057 2058 /* truncate.c */ 2059 extern void truncate_inode_pages(struct address_space *, loff_t); 2060 extern void truncate_inode_pages_range(struct address_space *, 2061 loff_t lstart, loff_t lend); 2062 extern void truncate_inode_pages_final(struct address_space *); 2063 2064 /* generic vm_area_ops exported for stackable file systems */ 2065 extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); 2066 extern void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf); 2067 extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); 2068 2069 /* mm/page-writeback.c */ 2070 int write_one_page(struct page *page, int wait); 2071 void task_dirty_inc(struct task_struct *tsk); 2072 2073 /* readahead.c */ 2074 #define VM_MAX_READAHEAD 128 /* kbytes */ 2075 #define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */ 2076 2077 int force_page_cache_readahead(struct address_space *mapping, struct file *filp, 2078 pgoff_t offset, unsigned long nr_to_read); 2079 2080 void page_cache_sync_readahead(struct address_space *mapping, 2081 struct file_ra_state *ra, 2082 struct file *filp, 2083 pgoff_t offset, 2084 unsigned long size); 2085 2086 void page_cache_async_readahead(struct address_space *mapping, 2087 struct file_ra_state *ra, 2088 struct file *filp, 2089 struct page *pg, 2090 pgoff_t offset, 2091 unsigned long size); 2092 2093 /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ 2094 extern int expand_stack(struct vm_area_struct *vma, unsigned long address); 2095 2096 /* CONFIG_STACK_GROWSUP still needs to to grow downwards at some places */ 2097 extern int expand_downwards(struct vm_area_struct *vma, 2098 unsigned long address); 2099 #if VM_GROWSUP 2100 extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); 2101 #else 2102 #define expand_upwards(vma, address) (0) 2103 #endif 2104 2105 /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ 2106 extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); 2107 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr, 2108 struct vm_area_struct **pprev); 2109 2110 /* Look up the first VMA which intersects the interval start_addr..end_addr-1, 2111 NULL if none. Assume start_addr < end_addr. */ 2112 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr) 2113 { 2114 struct vm_area_struct * vma = find_vma(mm,start_addr); 2115 2116 if (vma && end_addr <= vma->vm_start) 2117 vma = NULL; 2118 return vma; 2119 } 2120 2121 static inline unsigned long vma_pages(struct vm_area_struct *vma) 2122 { 2123 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 2124 } 2125 2126 /* Look up the first VMA which exactly match the interval vm_start ... vm_end */ 2127 static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm, 2128 unsigned long vm_start, unsigned long vm_end) 2129 { 2130 struct vm_area_struct *vma = find_vma(mm, vm_start); 2131 2132 if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end)) 2133 vma = NULL; 2134 2135 return vma; 2136 } 2137 2138 #ifdef CONFIG_MMU 2139 pgprot_t vm_get_page_prot(unsigned long vm_flags); 2140 void vma_set_page_prot(struct vm_area_struct *vma); 2141 #else 2142 static inline pgprot_t vm_get_page_prot(unsigned long vm_flags) 2143 { 2144 return __pgprot(0); 2145 } 2146 static inline void vma_set_page_prot(struct vm_area_struct *vma) 2147 { 2148 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 2149 } 2150 #endif 2151 2152 #ifdef CONFIG_NUMA_BALANCING 2153 unsigned long change_prot_numa(struct vm_area_struct *vma, 2154 unsigned long start, unsigned long end); 2155 #endif 2156 2157 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); 2158 int remap_pfn_range(struct vm_area_struct *, unsigned long addr, 2159 unsigned long pfn, unsigned long size, pgprot_t); 2160 int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); 2161 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, 2162 unsigned long pfn); 2163 int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, 2164 unsigned long pfn, pgprot_t pgprot); 2165 int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, 2166 pfn_t pfn); 2167 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len); 2168 2169 2170 struct page *follow_page_mask(struct vm_area_struct *vma, 2171 unsigned long address, unsigned int foll_flags, 2172 unsigned int *page_mask); 2173 2174 static inline struct page *follow_page(struct vm_area_struct *vma, 2175 unsigned long address, unsigned int foll_flags) 2176 { 2177 unsigned int unused_page_mask; 2178 return follow_page_mask(vma, address, foll_flags, &unused_page_mask); 2179 } 2180 2181 #define FOLL_WRITE 0x01 /* check pte is writable */ 2182 #define FOLL_TOUCH 0x02 /* mark page accessed */ 2183 #define FOLL_GET 0x04 /* do get_page on page */ 2184 #define FOLL_DUMP 0x08 /* give error on hole if it would be zero */ 2185 #define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */ 2186 #define FOLL_NOWAIT 0x20 /* if a disk transfer is needed, start the IO 2187 * and return without waiting upon it */ 2188 #define FOLL_POPULATE 0x40 /* fault in page */ 2189 #define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */ 2190 #define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */ 2191 #define FOLL_NUMA 0x200 /* force NUMA hinting page fault */ 2192 #define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */ 2193 #define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */ 2194 #define FOLL_MLOCK 0x1000 /* lock present pages */ 2195 #define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */ 2196 2197 typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, 2198 void *data); 2199 extern int apply_to_page_range(struct mm_struct *mm, unsigned long address, 2200 unsigned long size, pte_fn_t fn, void *data); 2201 2202 2203 #ifdef CONFIG_PAGE_POISONING 2204 extern bool page_poisoning_enabled(void); 2205 extern void kernel_poison_pages(struct page *page, int numpages, int enable); 2206 extern bool page_is_poisoned(struct page *page); 2207 #else 2208 static inline bool page_poisoning_enabled(void) { return false; } 2209 static inline void kernel_poison_pages(struct page *page, int numpages, 2210 int enable) { } 2211 static inline bool page_is_poisoned(struct page *page) { return false; } 2212 #endif 2213 2214 #ifdef CONFIG_DEBUG_PAGEALLOC 2215 extern bool _debug_pagealloc_enabled; 2216 extern void __kernel_map_pages(struct page *page, int numpages, int enable); 2217 2218 static inline bool debug_pagealloc_enabled(void) 2219 { 2220 return _debug_pagealloc_enabled; 2221 } 2222 2223 static inline void 2224 kernel_map_pages(struct page *page, int numpages, int enable) 2225 { 2226 if (!debug_pagealloc_enabled()) 2227 return; 2228 2229 __kernel_map_pages(page, numpages, enable); 2230 } 2231 #ifdef CONFIG_HIBERNATION 2232 extern bool kernel_page_present(struct page *page); 2233 #endif /* CONFIG_HIBERNATION */ 2234 #else /* CONFIG_DEBUG_PAGEALLOC */ 2235 static inline void 2236 kernel_map_pages(struct page *page, int numpages, int enable) {} 2237 #ifdef CONFIG_HIBERNATION 2238 static inline bool kernel_page_present(struct page *page) { return true; } 2239 #endif /* CONFIG_HIBERNATION */ 2240 static inline bool debug_pagealloc_enabled(void) 2241 { 2242 return false; 2243 } 2244 #endif /* CONFIG_DEBUG_PAGEALLOC */ 2245 2246 #ifdef __HAVE_ARCH_GATE_AREA 2247 extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm); 2248 extern int in_gate_area_no_mm(unsigned long addr); 2249 extern int in_gate_area(struct mm_struct *mm, unsigned long addr); 2250 #else 2251 static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm) 2252 { 2253 return NULL; 2254 } 2255 static inline int in_gate_area_no_mm(unsigned long addr) { return 0; } 2256 static inline int in_gate_area(struct mm_struct *mm, unsigned long addr) 2257 { 2258 return 0; 2259 } 2260 #endif /* __HAVE_ARCH_GATE_AREA */ 2261 2262 #ifdef CONFIG_SYSCTL 2263 extern int sysctl_drop_caches; 2264 int drop_caches_sysctl_handler(struct ctl_table *, int, 2265 void __user *, size_t *, loff_t *); 2266 #endif 2267 2268 void drop_slab(void); 2269 void drop_slab_node(int nid); 2270 2271 #ifndef CONFIG_MMU 2272 #define randomize_va_space 0 2273 #else 2274 extern int randomize_va_space; 2275 #endif 2276 2277 const char * arch_vma_name(struct vm_area_struct *vma); 2278 void print_vma_addr(char *prefix, unsigned long rip); 2279 2280 void sparse_mem_maps_populate_node(struct page **map_map, 2281 unsigned long pnum_begin, 2282 unsigned long pnum_end, 2283 unsigned long map_count, 2284 int nodeid); 2285 2286 struct page *sparse_mem_map_populate(unsigned long pnum, int nid); 2287 pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); 2288 pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node); 2289 pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); 2290 pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node); 2291 void *vmemmap_alloc_block(unsigned long size, int node); 2292 struct vmem_altmap; 2293 void *__vmemmap_alloc_block_buf(unsigned long size, int node, 2294 struct vmem_altmap *altmap); 2295 static inline void *vmemmap_alloc_block_buf(unsigned long size, int node) 2296 { 2297 return __vmemmap_alloc_block_buf(size, node, NULL); 2298 } 2299 2300 void vmemmap_verify(pte_t *, int, unsigned long, unsigned long); 2301 int vmemmap_populate_basepages(unsigned long start, unsigned long end, 2302 int node); 2303 int vmemmap_populate(unsigned long start, unsigned long end, int node); 2304 void vmemmap_populate_print_last(void); 2305 #ifdef CONFIG_MEMORY_HOTPLUG 2306 void vmemmap_free(unsigned long start, unsigned long end); 2307 #endif 2308 void register_page_bootmem_memmap(unsigned long section_nr, struct page *map, 2309 unsigned long size); 2310 2311 enum mf_flags { 2312 MF_COUNT_INCREASED = 1 << 0, 2313 MF_ACTION_REQUIRED = 1 << 1, 2314 MF_MUST_KILL = 1 << 2, 2315 MF_SOFT_OFFLINE = 1 << 3, 2316 }; 2317 extern int memory_failure(unsigned long pfn, int trapno, int flags); 2318 extern void memory_failure_queue(unsigned long pfn, int trapno, int flags); 2319 extern int unpoison_memory(unsigned long pfn); 2320 extern int get_hwpoison_page(struct page *page); 2321 #define put_hwpoison_page(page) put_page(page) 2322 extern int sysctl_memory_failure_early_kill; 2323 extern int sysctl_memory_failure_recovery; 2324 extern void shake_page(struct page *p, int access); 2325 extern atomic_long_t num_poisoned_pages; 2326 extern int soft_offline_page(struct page *page, int flags); 2327 2328 2329 /* 2330 * Error handlers for various types of pages. 2331 */ 2332 enum mf_result { 2333 MF_IGNORED, /* Error: cannot be handled */ 2334 MF_FAILED, /* Error: handling failed */ 2335 MF_DELAYED, /* Will be handled later */ 2336 MF_RECOVERED, /* Successfully recovered */ 2337 }; 2338 2339 enum mf_action_page_type { 2340 MF_MSG_KERNEL, 2341 MF_MSG_KERNEL_HIGH_ORDER, 2342 MF_MSG_SLAB, 2343 MF_MSG_DIFFERENT_COMPOUND, 2344 MF_MSG_POISONED_HUGE, 2345 MF_MSG_HUGE, 2346 MF_MSG_FREE_HUGE, 2347 MF_MSG_UNMAP_FAILED, 2348 MF_MSG_DIRTY_SWAPCACHE, 2349 MF_MSG_CLEAN_SWAPCACHE, 2350 MF_MSG_DIRTY_MLOCKED_LRU, 2351 MF_MSG_CLEAN_MLOCKED_LRU, 2352 MF_MSG_DIRTY_UNEVICTABLE_LRU, 2353 MF_MSG_CLEAN_UNEVICTABLE_LRU, 2354 MF_MSG_DIRTY_LRU, 2355 MF_MSG_CLEAN_LRU, 2356 MF_MSG_TRUNCATED_LRU, 2357 MF_MSG_BUDDY, 2358 MF_MSG_BUDDY_2ND, 2359 MF_MSG_UNKNOWN, 2360 }; 2361 2362 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) 2363 extern void clear_huge_page(struct page *page, 2364 unsigned long addr, 2365 unsigned int pages_per_huge_page); 2366 extern void copy_user_huge_page(struct page *dst, struct page *src, 2367 unsigned long addr, struct vm_area_struct *vma, 2368 unsigned int pages_per_huge_page); 2369 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ 2370 2371 extern struct page_ext_operations debug_guardpage_ops; 2372 extern struct page_ext_operations page_poisoning_ops; 2373 2374 #ifdef CONFIG_DEBUG_PAGEALLOC 2375 extern unsigned int _debug_guardpage_minorder; 2376 extern bool _debug_guardpage_enabled; 2377 2378 static inline unsigned int debug_guardpage_minorder(void) 2379 { 2380 return _debug_guardpage_minorder; 2381 } 2382 2383 static inline bool debug_guardpage_enabled(void) 2384 { 2385 return _debug_guardpage_enabled; 2386 } 2387 2388 static inline bool page_is_guard(struct page *page) 2389 { 2390 struct page_ext *page_ext; 2391 2392 if (!debug_guardpage_enabled()) 2393 return false; 2394 2395 page_ext = lookup_page_ext(page); 2396 if (unlikely(!page_ext)) 2397 return false; 2398 2399 return test_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); 2400 } 2401 #else 2402 static inline unsigned int debug_guardpage_minorder(void) { return 0; } 2403 static inline bool debug_guardpage_enabled(void) { return false; } 2404 static inline bool page_is_guard(struct page *page) { return false; } 2405 #endif /* CONFIG_DEBUG_PAGEALLOC */ 2406 2407 #if MAX_NUMNODES > 1 2408 void __init setup_nr_node_ids(void); 2409 #else 2410 static inline void setup_nr_node_ids(void) {} 2411 #endif 2412 2413 #endif /* __KERNEL__ */ 2414 #endif /* _LINUX_MM_H */ 2415