1 #ifndef _LINUX_MM_H 2 #define _LINUX_MM_H 3 4 #include <linux/errno.h> 5 6 #ifdef __KERNEL__ 7 8 #include <linux/gfp.h> 9 #include <linux/list.h> 10 #include <linux/mmdebug.h> 11 #include <linux/mmzone.h> 12 #include <linux/rbtree.h> 13 #include <linux/prio_tree.h> 14 #include <linux/debug_locks.h> 15 #include <linux/mm_types.h> 16 17 struct mempolicy; 18 struct anon_vma; 19 struct file_ra_state; 20 struct user_struct; 21 struct writeback_control; 22 23 #ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */ 24 extern unsigned long max_mapnr; 25 #endif 26 27 extern unsigned long num_physpages; 28 extern void * high_memory; 29 extern int page_cluster; 30 31 #ifdef CONFIG_SYSCTL 32 extern int sysctl_legacy_va_layout; 33 #else 34 #define sysctl_legacy_va_layout 0 35 #endif 36 37 extern unsigned long mmap_min_addr; 38 39 #include <asm/page.h> 40 #include <asm/pgtable.h> 41 #include <asm/processor.h> 42 43 #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) 44 45 /* to align the pointer to the (next) page boundary */ 46 #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) 47 48 /* 49 * Linux kernel virtual memory manager primitives. 50 * The idea being to have a "virtual" mm in the same way 51 * we have a virtual fs - giving a cleaner interface to the 52 * mm details, and allowing different kinds of memory mappings 53 * (from shared memory to executable loading to arbitrary 54 * mmap() functions). 55 */ 56 57 extern struct kmem_cache *vm_area_cachep; 58 59 /* 60 * This struct defines the per-mm list of VMAs for uClinux. If CONFIG_MMU is 61 * disabled, then there's a single shared list of VMAs maintained by the 62 * system, and mm's subscribe to these individually 63 */ 64 struct vm_list_struct { 65 struct vm_list_struct *next; 66 struct vm_area_struct *vma; 67 }; 68 69 #ifndef CONFIG_MMU 70 extern struct rb_root nommu_vma_tree; 71 extern struct rw_semaphore nommu_vma_sem; 72 73 extern unsigned int kobjsize(const void *objp); 74 #endif 75 76 /* 77 * vm_flags in vm_area_struct, see mm_types.h. 78 */ 79 #define VM_READ 0x00000001 /* currently active flags */ 80 #define VM_WRITE 0x00000002 81 #define VM_EXEC 0x00000004 82 #define VM_SHARED 0x00000008 83 84 /* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */ 85 #define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */ 86 #define VM_MAYWRITE 0x00000020 87 #define VM_MAYEXEC 0x00000040 88 #define VM_MAYSHARE 0x00000080 89 90 #define VM_GROWSDOWN 0x00000100 /* general info on the segment */ 91 #define VM_GROWSUP 0x00000200 92 #define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ 93 #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ 94 95 #define VM_EXECUTABLE 0x00001000 96 #define VM_LOCKED 0x00002000 97 #define VM_IO 0x00004000 /* Memory mapped I/O or similar */ 98 99 /* Used by sys_madvise() */ 100 #define VM_SEQ_READ 0x00008000 /* App will access data sequentially */ 101 #define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */ 102 103 #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */ 104 #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ 105 #define VM_RESERVED 0x00080000 /* Count as reserved_vm like IO */ 106 #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ 107 #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */ 108 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ 109 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ 110 #define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */ 111 #define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */ 112 #define VM_ALWAYSDUMP 0x04000000 /* Always include in core dumps */ 113 114 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */ 115 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ 116 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */ 117 118 #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ 119 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS 120 #endif 121 122 #ifdef CONFIG_STACK_GROWSUP 123 #define VM_STACK_FLAGS (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT) 124 #else 125 #define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT) 126 #endif 127 128 #define VM_READHINTMASK (VM_SEQ_READ | VM_RAND_READ) 129 #define VM_ClearReadHint(v) (v)->vm_flags &= ~VM_READHINTMASK 130 #define VM_NormalReadHint(v) (!((v)->vm_flags & VM_READHINTMASK)) 131 #define VM_SequentialReadHint(v) ((v)->vm_flags & VM_SEQ_READ) 132 #define VM_RandomReadHint(v) ((v)->vm_flags & VM_RAND_READ) 133 134 /* 135 * special vmas that are non-mergable, non-mlock()able 136 */ 137 #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP) 138 139 /* 140 * mapping from the currently active vm_flags protection bits (the 141 * low four bits) to a page protection mask.. 142 */ 143 extern pgprot_t protection_map[16]; 144 145 #define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */ 146 #define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */ 147 148 /* 149 * This interface is used by x86 PAT code to identify a pfn mapping that is 150 * linear over entire vma. This is to optimize PAT code that deals with 151 * marking the physical region with a particular prot. This is not for generic 152 * mm use. Note also that this check will not work if the pfn mapping is 153 * linear for a vma starting at physical address 0. In which case PAT code 154 * falls back to slow path of reserving physical range page by page. 155 */ 156 static inline int is_linear_pfn_mapping(struct vm_area_struct *vma) 157 { 158 return ((vma->vm_flags & VM_PFNMAP) && vma->vm_pgoff); 159 } 160 161 static inline int is_pfn_mapping(struct vm_area_struct *vma) 162 { 163 return (vma->vm_flags & VM_PFNMAP); 164 } 165 166 /* 167 * vm_fault is filled by the the pagefault handler and passed to the vma's 168 * ->fault function. The vma's ->fault is responsible for returning a bitmask 169 * of VM_FAULT_xxx flags that give details about how the fault was handled. 170 * 171 * pgoff should be used in favour of virtual_address, if possible. If pgoff 172 * is used, one may set VM_CAN_NONLINEAR in the vma->vm_flags to get nonlinear 173 * mapping support. 174 */ 175 struct vm_fault { 176 unsigned int flags; /* FAULT_FLAG_xxx flags */ 177 pgoff_t pgoff; /* Logical page offset based on vma */ 178 void __user *virtual_address; /* Faulting virtual address */ 179 180 struct page *page; /* ->fault handlers should return a 181 * page here, unless VM_FAULT_NOPAGE 182 * is set (which is also implied by 183 * VM_FAULT_ERROR). 184 */ 185 }; 186 187 /* 188 * These are the virtual MM functions - opening of an area, closing and 189 * unmapping it (needed to keep files on disk up-to-date etc), pointer 190 * to the functions called when a no-page or a wp-page exception occurs. 191 */ 192 struct vm_operations_struct { 193 void (*open)(struct vm_area_struct * area); 194 void (*close)(struct vm_area_struct * area); 195 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); 196 197 /* notification that a previously read-only page is about to become 198 * writable, if an error is returned it will cause a SIGBUS */ 199 int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page); 200 201 /* called by access_process_vm when get_user_pages() fails, typically 202 * for use by special VMAs that can switch between memory and hardware 203 */ 204 int (*access)(struct vm_area_struct *vma, unsigned long addr, 205 void *buf, int len, int write); 206 #ifdef CONFIG_NUMA 207 /* 208 * set_policy() op must add a reference to any non-NULL @new mempolicy 209 * to hold the policy upon return. Caller should pass NULL @new to 210 * remove a policy and fall back to surrounding context--i.e. do not 211 * install a MPOL_DEFAULT policy, nor the task or system default 212 * mempolicy. 213 */ 214 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); 215 216 /* 217 * get_policy() op must add reference [mpol_get()] to any policy at 218 * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure 219 * in mm/mempolicy.c will do this automatically. 220 * get_policy() must NOT add a ref if the policy at (vma,addr) is not 221 * marked as MPOL_SHARED. vma policies are protected by the mmap_sem. 222 * If no [shared/vma] mempolicy exists at the addr, get_policy() op 223 * must return NULL--i.e., do not "fallback" to task or system default 224 * policy. 225 */ 226 struct mempolicy *(*get_policy)(struct vm_area_struct *vma, 227 unsigned long addr); 228 int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from, 229 const nodemask_t *to, unsigned long flags); 230 #endif 231 }; 232 233 struct mmu_gather; 234 struct inode; 235 236 #define page_private(page) ((page)->private) 237 #define set_page_private(page, v) ((page)->private = (v)) 238 239 /* 240 * FIXME: take this include out, include page-flags.h in 241 * files which need it (119 of them) 242 */ 243 #include <linux/page-flags.h> 244 245 /* 246 * Methods to modify the page usage count. 247 * 248 * What counts for a page usage: 249 * - cache mapping (page->mapping) 250 * - private data (page->private) 251 * - page mapped in a task's page tables, each mapping 252 * is counted separately 253 * 254 * Also, many kernel routines increase the page count before a critical 255 * routine so they can be sure the page doesn't go away from under them. 256 */ 257 258 /* 259 * Drop a ref, return true if the refcount fell to zero (the page has no users) 260 */ 261 static inline int put_page_testzero(struct page *page) 262 { 263 VM_BUG_ON(atomic_read(&page->_count) == 0); 264 return atomic_dec_and_test(&page->_count); 265 } 266 267 /* 268 * Try to grab a ref unless the page has a refcount of zero, return false if 269 * that is the case. 270 */ 271 static inline int get_page_unless_zero(struct page *page) 272 { 273 VM_BUG_ON(PageTail(page)); 274 return atomic_inc_not_zero(&page->_count); 275 } 276 277 /* Support for virtually mapped pages */ 278 struct page *vmalloc_to_page(const void *addr); 279 unsigned long vmalloc_to_pfn(const void *addr); 280 281 /* 282 * Determine if an address is within the vmalloc range 283 * 284 * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there 285 * is no special casing required. 286 */ 287 static inline int is_vmalloc_addr(const void *x) 288 { 289 #ifdef CONFIG_MMU 290 unsigned long addr = (unsigned long)x; 291 292 return addr >= VMALLOC_START && addr < VMALLOC_END; 293 #else 294 return 0; 295 #endif 296 } 297 298 static inline struct page *compound_head(struct page *page) 299 { 300 if (unlikely(PageTail(page))) 301 return page->first_page; 302 return page; 303 } 304 305 static inline int page_count(struct page *page) 306 { 307 return atomic_read(&compound_head(page)->_count); 308 } 309 310 static inline void get_page(struct page *page) 311 { 312 page = compound_head(page); 313 VM_BUG_ON(atomic_read(&page->_count) == 0); 314 atomic_inc(&page->_count); 315 } 316 317 static inline struct page *virt_to_head_page(const void *x) 318 { 319 struct page *page = virt_to_page(x); 320 return compound_head(page); 321 } 322 323 /* 324 * Setup the page count before being freed into the page allocator for 325 * the first time (boot or memory hotplug) 326 */ 327 static inline void init_page_count(struct page *page) 328 { 329 atomic_set(&page->_count, 1); 330 } 331 332 void put_page(struct page *page); 333 void put_pages_list(struct list_head *pages); 334 335 void split_page(struct page *page, unsigned int order); 336 337 /* 338 * Compound pages have a destructor function. Provide a 339 * prototype for that function and accessor functions. 340 * These are _only_ valid on the head of a PG_compound page. 341 */ 342 typedef void compound_page_dtor(struct page *); 343 344 static inline void set_compound_page_dtor(struct page *page, 345 compound_page_dtor *dtor) 346 { 347 page[1].lru.next = (void *)dtor; 348 } 349 350 static inline compound_page_dtor *get_compound_page_dtor(struct page *page) 351 { 352 return (compound_page_dtor *)page[1].lru.next; 353 } 354 355 static inline int compound_order(struct page *page) 356 { 357 if (!PageHead(page)) 358 return 0; 359 return (unsigned long)page[1].lru.prev; 360 } 361 362 static inline void set_compound_order(struct page *page, unsigned long order) 363 { 364 page[1].lru.prev = (void *)order; 365 } 366 367 /* 368 * Multiple processes may "see" the same page. E.g. for untouched 369 * mappings of /dev/null, all processes see the same page full of 370 * zeroes, and text pages of executables and shared libraries have 371 * only one copy in memory, at most, normally. 372 * 373 * For the non-reserved pages, page_count(page) denotes a reference count. 374 * page_count() == 0 means the page is free. page->lru is then used for 375 * freelist management in the buddy allocator. 376 * page_count() > 0 means the page has been allocated. 377 * 378 * Pages are allocated by the slab allocator in order to provide memory 379 * to kmalloc and kmem_cache_alloc. In this case, the management of the 380 * page, and the fields in 'struct page' are the responsibility of mm/slab.c 381 * unless a particular usage is carefully commented. (the responsibility of 382 * freeing the kmalloc memory is the caller's, of course). 383 * 384 * A page may be used by anyone else who does a __get_free_page(). 385 * In this case, page_count still tracks the references, and should only 386 * be used through the normal accessor functions. The top bits of page->flags 387 * and page->virtual store page management information, but all other fields 388 * are unused and could be used privately, carefully. The management of this 389 * page is the responsibility of the one who allocated it, and those who have 390 * subsequently been given references to it. 391 * 392 * The other pages (we may call them "pagecache pages") are completely 393 * managed by the Linux memory manager: I/O, buffers, swapping etc. 394 * The following discussion applies only to them. 395 * 396 * A pagecache page contains an opaque `private' member, which belongs to the 397 * page's address_space. Usually, this is the address of a circular list of 398 * the page's disk buffers. PG_private must be set to tell the VM to call 399 * into the filesystem to release these pages. 400 * 401 * A page may belong to an inode's memory mapping. In this case, page->mapping 402 * is the pointer to the inode, and page->index is the file offset of the page, 403 * in units of PAGE_CACHE_SIZE. 404 * 405 * If pagecache pages are not associated with an inode, they are said to be 406 * anonymous pages. These may become associated with the swapcache, and in that 407 * case PG_swapcache is set, and page->private is an offset into the swapcache. 408 * 409 * In either case (swapcache or inode backed), the pagecache itself holds one 410 * reference to the page. Setting PG_private should also increment the 411 * refcount. The each user mapping also has a reference to the page. 412 * 413 * The pagecache pages are stored in a per-mapping radix tree, which is 414 * rooted at mapping->page_tree, and indexed by offset. 415 * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space 416 * lists, we instead now tag pages as dirty/writeback in the radix tree. 417 * 418 * All pagecache pages may be subject to I/O: 419 * - inode pages may need to be read from disk, 420 * - inode pages which have been modified and are MAP_SHARED may need 421 * to be written back to the inode on disk, 422 * - anonymous pages (including MAP_PRIVATE file mappings) which have been 423 * modified may need to be swapped out to swap space and (later) to be read 424 * back into memory. 425 */ 426 427 /* 428 * The zone field is never updated after free_area_init_core() 429 * sets it, so none of the operations on it need to be atomic. 430 */ 431 432 433 /* 434 * page->flags layout: 435 * 436 * There are three possibilities for how page->flags get 437 * laid out. The first is for the normal case, without 438 * sparsemem. The second is for sparsemem when there is 439 * plenty of space for node and section. The last is when 440 * we have run out of space and have to fall back to an 441 * alternate (slower) way of determining the node. 442 * 443 * No sparsemem or sparsemem vmemmap: | NODE | ZONE | ... | FLAGS | 444 * classic sparse with space for node:| SECTION | NODE | ZONE | ... | FLAGS | 445 * classic sparse no space for node: | SECTION | ZONE | ... | FLAGS | 446 */ 447 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) 448 #define SECTIONS_WIDTH SECTIONS_SHIFT 449 #else 450 #define SECTIONS_WIDTH 0 451 #endif 452 453 #define ZONES_WIDTH ZONES_SHIFT 454 455 #if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS 456 #define NODES_WIDTH NODES_SHIFT 457 #else 458 #ifdef CONFIG_SPARSEMEM_VMEMMAP 459 #error "Vmemmap: No space for nodes field in page flags" 460 #endif 461 #define NODES_WIDTH 0 462 #endif 463 464 /* Page flags: | [SECTION] | [NODE] | ZONE | ... | FLAGS | */ 465 #define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH) 466 #define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH) 467 #define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH) 468 469 /* 470 * We are going to use the flags for the page to node mapping if its in 471 * there. This includes the case where there is no node, so it is implicit. 472 */ 473 #if !(NODES_WIDTH > 0 || NODES_SHIFT == 0) 474 #define NODE_NOT_IN_PAGE_FLAGS 475 #endif 476 477 #ifndef PFN_SECTION_SHIFT 478 #define PFN_SECTION_SHIFT 0 479 #endif 480 481 /* 482 * Define the bit shifts to access each section. For non-existant 483 * sections we define the shift as 0; that plus a 0 mask ensures 484 * the compiler will optimise away reference to them. 485 */ 486 #define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0)) 487 #define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) 488 #define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) 489 490 /* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allcator */ 491 #ifdef NODE_NOT_IN_PAGEFLAGS 492 #define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT) 493 #define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \ 494 SECTIONS_PGOFF : ZONES_PGOFF) 495 #else 496 #define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT) 497 #define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \ 498 NODES_PGOFF : ZONES_PGOFF) 499 #endif 500 501 #define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0)) 502 503 #if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS 504 #error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS 505 #endif 506 507 #define ZONES_MASK ((1UL << ZONES_WIDTH) - 1) 508 #define NODES_MASK ((1UL << NODES_WIDTH) - 1) 509 #define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) 510 #define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1) 511 512 static inline enum zone_type page_zonenum(struct page *page) 513 { 514 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; 515 } 516 517 /* 518 * The identification function is only used by the buddy allocator for 519 * determining if two pages could be buddies. We are not really 520 * identifying a zone since we could be using a the section number 521 * id if we have not node id available in page flags. 522 * We guarantee only that it will return the same value for two 523 * combinable pages in a zone. 524 */ 525 static inline int page_zone_id(struct page *page) 526 { 527 return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK; 528 } 529 530 static inline int zone_to_nid(struct zone *zone) 531 { 532 #ifdef CONFIG_NUMA 533 return zone->node; 534 #else 535 return 0; 536 #endif 537 } 538 539 #ifdef NODE_NOT_IN_PAGE_FLAGS 540 extern int page_to_nid(struct page *page); 541 #else 542 static inline int page_to_nid(struct page *page) 543 { 544 return (page->flags >> NODES_PGSHIFT) & NODES_MASK; 545 } 546 #endif 547 548 static inline struct zone *page_zone(struct page *page) 549 { 550 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; 551 } 552 553 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) 554 static inline unsigned long page_to_section(struct page *page) 555 { 556 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; 557 } 558 #endif 559 560 static inline void set_page_zone(struct page *page, enum zone_type zone) 561 { 562 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT); 563 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT; 564 } 565 566 static inline void set_page_node(struct page *page, unsigned long node) 567 { 568 page->flags &= ~(NODES_MASK << NODES_PGSHIFT); 569 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT; 570 } 571 572 static inline void set_page_section(struct page *page, unsigned long section) 573 { 574 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT); 575 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT; 576 } 577 578 static inline void set_page_links(struct page *page, enum zone_type zone, 579 unsigned long node, unsigned long pfn) 580 { 581 set_page_zone(page, zone); 582 set_page_node(page, node); 583 set_page_section(page, pfn_to_section_nr(pfn)); 584 } 585 586 /* 587 * If a hint addr is less than mmap_min_addr change hint to be as 588 * low as possible but still greater than mmap_min_addr 589 */ 590 static inline unsigned long round_hint_to_min(unsigned long hint) 591 { 592 #ifdef CONFIG_SECURITY 593 hint &= PAGE_MASK; 594 if (((void *)hint != NULL) && 595 (hint < mmap_min_addr)) 596 return PAGE_ALIGN(mmap_min_addr); 597 #endif 598 return hint; 599 } 600 601 /* 602 * Some inline functions in vmstat.h depend on page_zone() 603 */ 604 #include <linux/vmstat.h> 605 606 static __always_inline void *lowmem_page_address(struct page *page) 607 { 608 return __va(page_to_pfn(page) << PAGE_SHIFT); 609 } 610 611 #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) 612 #define HASHED_PAGE_VIRTUAL 613 #endif 614 615 #if defined(WANT_PAGE_VIRTUAL) 616 #define page_address(page) ((page)->virtual) 617 #define set_page_address(page, address) \ 618 do { \ 619 (page)->virtual = (address); \ 620 } while(0) 621 #define page_address_init() do { } while(0) 622 #endif 623 624 #if defined(HASHED_PAGE_VIRTUAL) 625 void *page_address(struct page *page); 626 void set_page_address(struct page *page, void *virtual); 627 void page_address_init(void); 628 #endif 629 630 #if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL) 631 #define page_address(page) lowmem_page_address(page) 632 #define set_page_address(page, address) do { } while(0) 633 #define page_address_init() do { } while(0) 634 #endif 635 636 /* 637 * On an anonymous page mapped into a user virtual memory area, 638 * page->mapping points to its anon_vma, not to a struct address_space; 639 * with the PAGE_MAPPING_ANON bit set to distinguish it. 640 * 641 * Please note that, confusingly, "page_mapping" refers to the inode 642 * address_space which maps the page from disk; whereas "page_mapped" 643 * refers to user virtual address space into which the page is mapped. 644 */ 645 #define PAGE_MAPPING_ANON 1 646 647 extern struct address_space swapper_space; 648 static inline struct address_space *page_mapping(struct page *page) 649 { 650 struct address_space *mapping = page->mapping; 651 652 VM_BUG_ON(PageSlab(page)); 653 #ifdef CONFIG_SWAP 654 if (unlikely(PageSwapCache(page))) 655 mapping = &swapper_space; 656 else 657 #endif 658 if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON)) 659 mapping = NULL; 660 return mapping; 661 } 662 663 static inline int PageAnon(struct page *page) 664 { 665 return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; 666 } 667 668 /* 669 * Return the pagecache index of the passed page. Regular pagecache pages 670 * use ->index whereas swapcache pages use ->private 671 */ 672 static inline pgoff_t page_index(struct page *page) 673 { 674 if (unlikely(PageSwapCache(page))) 675 return page_private(page); 676 return page->index; 677 } 678 679 /* 680 * The atomic page->_mapcount, like _count, starts from -1: 681 * so that transitions both from it and to it can be tracked, 682 * using atomic_inc_and_test and atomic_add_negative(-1). 683 */ 684 static inline void reset_page_mapcount(struct page *page) 685 { 686 atomic_set(&(page)->_mapcount, -1); 687 } 688 689 static inline int page_mapcount(struct page *page) 690 { 691 return atomic_read(&(page)->_mapcount) + 1; 692 } 693 694 /* 695 * Return true if this page is mapped into pagetables. 696 */ 697 static inline int page_mapped(struct page *page) 698 { 699 return atomic_read(&(page)->_mapcount) >= 0; 700 } 701 702 /* 703 * Different kinds of faults, as returned by handle_mm_fault(). 704 * Used to decide whether a process gets delivered SIGBUS or 705 * just gets major/minor fault counters bumped up. 706 */ 707 708 #define VM_FAULT_MINOR 0 /* For backwards compat. Remove me quickly. */ 709 710 #define VM_FAULT_OOM 0x0001 711 #define VM_FAULT_SIGBUS 0x0002 712 #define VM_FAULT_MAJOR 0x0004 713 #define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */ 714 715 #define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ 716 #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ 717 718 #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS) 719 720 #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) 721 722 extern void show_free_areas(void); 723 724 #ifdef CONFIG_SHMEM 725 extern int shmem_lock(struct file *file, int lock, struct user_struct *user); 726 #else 727 static inline int shmem_lock(struct file *file, int lock, 728 struct user_struct *user) 729 { 730 return 0; 731 } 732 #endif 733 struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags); 734 735 int shmem_zero_setup(struct vm_area_struct *); 736 737 #ifndef CONFIG_MMU 738 extern unsigned long shmem_get_unmapped_area(struct file *file, 739 unsigned long addr, 740 unsigned long len, 741 unsigned long pgoff, 742 unsigned long flags); 743 #endif 744 745 extern int can_do_mlock(void); 746 extern int user_shm_lock(size_t, struct user_struct *); 747 extern void user_shm_unlock(size_t, struct user_struct *); 748 749 /* 750 * Parameter block passed down to zap_pte_range in exceptional cases. 751 */ 752 struct zap_details { 753 struct vm_area_struct *nonlinear_vma; /* Check page->index if set */ 754 struct address_space *check_mapping; /* Check page->mapping if set */ 755 pgoff_t first_index; /* Lowest page->index to unmap */ 756 pgoff_t last_index; /* Highest page->index to unmap */ 757 spinlock_t *i_mmap_lock; /* For unmap_mapping_range: */ 758 unsigned long truncate_count; /* Compare vm_truncate_count */ 759 }; 760 761 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, 762 pte_t pte); 763 764 int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 765 unsigned long size); 766 unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, 767 unsigned long size, struct zap_details *); 768 unsigned long unmap_vmas(struct mmu_gather **tlb, 769 struct vm_area_struct *start_vma, unsigned long start_addr, 770 unsigned long end_addr, unsigned long *nr_accounted, 771 struct zap_details *); 772 773 /** 774 * mm_walk - callbacks for walk_page_range 775 * @pgd_entry: if set, called for each non-empty PGD (top-level) entry 776 * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry 777 * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry 778 * @pte_entry: if set, called for each non-empty PTE (4th-level) entry 779 * @pte_hole: if set, called for each hole at all levels 780 * 781 * (see walk_page_range for more details) 782 */ 783 struct mm_walk { 784 int (*pgd_entry)(pgd_t *, unsigned long, unsigned long, struct mm_walk *); 785 int (*pud_entry)(pud_t *, unsigned long, unsigned long, struct mm_walk *); 786 int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *); 787 int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *); 788 int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *); 789 struct mm_struct *mm; 790 void *private; 791 }; 792 793 int walk_page_range(unsigned long addr, unsigned long end, 794 struct mm_walk *walk); 795 void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, 796 unsigned long end, unsigned long floor, unsigned long ceiling); 797 int copy_page_range(struct mm_struct *dst, struct mm_struct *src, 798 struct vm_area_struct *vma); 799 void unmap_mapping_range(struct address_space *mapping, 800 loff_t const holebegin, loff_t const holelen, int even_cows); 801 int follow_phys(struct vm_area_struct *vma, unsigned long address, 802 unsigned int flags, unsigned long *prot, resource_size_t *phys); 803 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, 804 void *buf, int len, int write); 805 806 static inline void unmap_shared_mapping_range(struct address_space *mapping, 807 loff_t const holebegin, loff_t const holelen) 808 { 809 unmap_mapping_range(mapping, holebegin, holelen, 0); 810 } 811 812 extern int vmtruncate(struct inode * inode, loff_t offset); 813 extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end); 814 815 #ifdef CONFIG_MMU 816 extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, 817 unsigned long address, int write_access); 818 #else 819 static inline int handle_mm_fault(struct mm_struct *mm, 820 struct vm_area_struct *vma, unsigned long address, 821 int write_access) 822 { 823 /* should never happen if there's no MMU */ 824 BUG(); 825 return VM_FAULT_SIGBUS; 826 } 827 #endif 828 829 extern int make_pages_present(unsigned long addr, unsigned long end); 830 extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); 831 832 int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, 833 int len, int write, int force, struct page **pages, struct vm_area_struct **vmas); 834 835 extern int try_to_release_page(struct page * page, gfp_t gfp_mask); 836 extern void do_invalidatepage(struct page *page, unsigned long offset); 837 838 int __set_page_dirty_nobuffers(struct page *page); 839 int __set_page_dirty_no_writeback(struct page *page); 840 int redirty_page_for_writepage(struct writeback_control *wbc, 841 struct page *page); 842 int set_page_dirty(struct page *page); 843 int set_page_dirty_lock(struct page *page); 844 int clear_page_dirty_for_io(struct page *page); 845 846 extern unsigned long move_page_tables(struct vm_area_struct *vma, 847 unsigned long old_addr, struct vm_area_struct *new_vma, 848 unsigned long new_addr, unsigned long len); 849 extern unsigned long do_mremap(unsigned long addr, 850 unsigned long old_len, unsigned long new_len, 851 unsigned long flags, unsigned long new_addr); 852 extern int mprotect_fixup(struct vm_area_struct *vma, 853 struct vm_area_struct **pprev, unsigned long start, 854 unsigned long end, unsigned long newflags); 855 856 /* 857 * get_user_pages_fast provides equivalent functionality to get_user_pages, 858 * operating on current and current->mm (force=0 and doesn't return any vmas). 859 * 860 * get_user_pages_fast may take mmap_sem and page tables, so no assumptions 861 * can be made about locking. get_user_pages_fast is to be implemented in a 862 * way that is advantageous (vs get_user_pages()) when the user memory area is 863 * already faulted in and present in ptes. However if the pages have to be 864 * faulted in, it may turn out to be slightly slower). 865 */ 866 int get_user_pages_fast(unsigned long start, int nr_pages, int write, 867 struct page **pages); 868 869 /* 870 * A callback you can register to apply pressure to ageable caches. 871 * 872 * 'shrink' is passed a count 'nr_to_scan' and a 'gfpmask'. It should 873 * look through the least-recently-used 'nr_to_scan' entries and 874 * attempt to free them up. It should return the number of objects 875 * which remain in the cache. If it returns -1, it means it cannot do 876 * any scanning at this time (eg. there is a risk of deadlock). 877 * 878 * The 'gfpmask' refers to the allocation we are currently trying to 879 * fulfil. 880 * 881 * Note that 'shrink' will be passed nr_to_scan == 0 when the VM is 882 * querying the cache size, so a fastpath for that case is appropriate. 883 */ 884 struct shrinker { 885 int (*shrink)(int nr_to_scan, gfp_t gfp_mask); 886 int seeks; /* seeks to recreate an obj */ 887 888 /* These are for internal use */ 889 struct list_head list; 890 long nr; /* objs pending delete */ 891 }; 892 #define DEFAULT_SEEKS 2 /* A good number if you don't know better. */ 893 extern void register_shrinker(struct shrinker *); 894 extern void unregister_shrinker(struct shrinker *); 895 896 int vma_wants_writenotify(struct vm_area_struct *vma); 897 898 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl); 899 900 #ifdef __PAGETABLE_PUD_FOLDED 901 static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, 902 unsigned long address) 903 { 904 return 0; 905 } 906 #else 907 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); 908 #endif 909 910 #ifdef __PAGETABLE_PMD_FOLDED 911 static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud, 912 unsigned long address) 913 { 914 return 0; 915 } 916 #else 917 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address); 918 #endif 919 920 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address); 921 int __pte_alloc_kernel(pmd_t *pmd, unsigned long address); 922 923 /* 924 * The following ifdef needed to get the 4level-fixup.h header to work. 925 * Remove it when 4level-fixup.h has been removed. 926 */ 927 #if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK) 928 static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) 929 { 930 return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))? 931 NULL: pud_offset(pgd, address); 932 } 933 934 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) 935 { 936 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))? 937 NULL: pmd_offset(pud, address); 938 } 939 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */ 940 941 #if USE_SPLIT_PTLOCKS 942 /* 943 * We tuck a spinlock to guard each pagetable page into its struct page, 944 * at page->private, with BUILD_BUG_ON to make sure that this will not 945 * overflow into the next struct page (as it might with DEBUG_SPINLOCK). 946 * When freeing, reset page->mapping so free_pages_check won't complain. 947 */ 948 #define __pte_lockptr(page) &((page)->ptl) 949 #define pte_lock_init(_page) do { \ 950 spin_lock_init(__pte_lockptr(_page)); \ 951 } while (0) 952 #define pte_lock_deinit(page) ((page)->mapping = NULL) 953 #define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));}) 954 #else /* !USE_SPLIT_PTLOCKS */ 955 /* 956 * We use mm->page_table_lock to guard all pagetable pages of the mm. 957 */ 958 #define pte_lock_init(page) do {} while (0) 959 #define pte_lock_deinit(page) do {} while (0) 960 #define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;}) 961 #endif /* USE_SPLIT_PTLOCKS */ 962 963 static inline void pgtable_page_ctor(struct page *page) 964 { 965 pte_lock_init(page); 966 inc_zone_page_state(page, NR_PAGETABLE); 967 } 968 969 static inline void pgtable_page_dtor(struct page *page) 970 { 971 pte_lock_deinit(page); 972 dec_zone_page_state(page, NR_PAGETABLE); 973 } 974 975 #define pte_offset_map_lock(mm, pmd, address, ptlp) \ 976 ({ \ 977 spinlock_t *__ptl = pte_lockptr(mm, pmd); \ 978 pte_t *__pte = pte_offset_map(pmd, address); \ 979 *(ptlp) = __ptl; \ 980 spin_lock(__ptl); \ 981 __pte; \ 982 }) 983 984 #define pte_unmap_unlock(pte, ptl) do { \ 985 spin_unlock(ptl); \ 986 pte_unmap(pte); \ 987 } while (0) 988 989 #define pte_alloc_map(mm, pmd, address) \ 990 ((unlikely(!pmd_present(*(pmd))) && __pte_alloc(mm, pmd, address))? \ 991 NULL: pte_offset_map(pmd, address)) 992 993 #define pte_alloc_map_lock(mm, pmd, address, ptlp) \ 994 ((unlikely(!pmd_present(*(pmd))) && __pte_alloc(mm, pmd, address))? \ 995 NULL: pte_offset_map_lock(mm, pmd, address, ptlp)) 996 997 #define pte_alloc_kernel(pmd, address) \ 998 ((unlikely(!pmd_present(*(pmd))) && __pte_alloc_kernel(pmd, address))? \ 999 NULL: pte_offset_kernel(pmd, address)) 1000 1001 extern void free_area_init(unsigned long * zones_size); 1002 extern void free_area_init_node(int nid, unsigned long * zones_size, 1003 unsigned long zone_start_pfn, unsigned long *zholes_size); 1004 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP 1005 /* 1006 * With CONFIG_ARCH_POPULATES_NODE_MAP set, an architecture may initialise its 1007 * zones, allocate the backing mem_map and account for memory holes in a more 1008 * architecture independent manner. This is a substitute for creating the 1009 * zone_sizes[] and zholes_size[] arrays and passing them to 1010 * free_area_init_node() 1011 * 1012 * An architecture is expected to register range of page frames backed by 1013 * physical memory with add_active_range() before calling 1014 * free_area_init_nodes() passing in the PFN each zone ends at. At a basic 1015 * usage, an architecture is expected to do something like 1016 * 1017 * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn, 1018 * max_highmem_pfn}; 1019 * for_each_valid_physical_page_range() 1020 * add_active_range(node_id, start_pfn, end_pfn) 1021 * free_area_init_nodes(max_zone_pfns); 1022 * 1023 * If the architecture guarantees that there are no holes in the ranges 1024 * registered with add_active_range(), free_bootmem_active_regions() 1025 * will call free_bootmem_node() for each registered physical page range. 1026 * Similarly sparse_memory_present_with_active_regions() calls 1027 * memory_present() for each range when SPARSEMEM is enabled. 1028 * 1029 * See mm/page_alloc.c for more information on each function exposed by 1030 * CONFIG_ARCH_POPULATES_NODE_MAP 1031 */ 1032 extern void free_area_init_nodes(unsigned long *max_zone_pfn); 1033 extern void add_active_range(unsigned int nid, unsigned long start_pfn, 1034 unsigned long end_pfn); 1035 extern void remove_active_range(unsigned int nid, unsigned long start_pfn, 1036 unsigned long end_pfn); 1037 extern void push_node_boundaries(unsigned int nid, unsigned long start_pfn, 1038 unsigned long end_pfn); 1039 extern void remove_all_active_ranges(void); 1040 extern unsigned long absent_pages_in_range(unsigned long start_pfn, 1041 unsigned long end_pfn); 1042 extern void get_pfn_range_for_nid(unsigned int nid, 1043 unsigned long *start_pfn, unsigned long *end_pfn); 1044 extern unsigned long find_min_pfn_with_active_regions(void); 1045 extern void free_bootmem_with_active_regions(int nid, 1046 unsigned long max_low_pfn); 1047 typedef int (*work_fn_t)(unsigned long, unsigned long, void *); 1048 extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data); 1049 extern void sparse_memory_present_with_active_regions(int nid); 1050 #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID 1051 extern int early_pfn_to_nid(unsigned long pfn); 1052 #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ 1053 #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ 1054 extern void set_dma_reserve(unsigned long new_dma_reserve); 1055 extern void memmap_init_zone(unsigned long, int, unsigned long, 1056 unsigned long, enum memmap_context); 1057 extern void setup_per_zone_pages_min(void); 1058 extern void mem_init(void); 1059 extern void show_mem(void); 1060 extern void si_meminfo(struct sysinfo * val); 1061 extern void si_meminfo_node(struct sysinfo *val, int nid); 1062 extern int after_bootmem; 1063 1064 #ifdef CONFIG_NUMA 1065 extern void setup_per_cpu_pageset(void); 1066 #else 1067 static inline void setup_per_cpu_pageset(void) {} 1068 #endif 1069 1070 /* prio_tree.c */ 1071 void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old); 1072 void vma_prio_tree_insert(struct vm_area_struct *, struct prio_tree_root *); 1073 void vma_prio_tree_remove(struct vm_area_struct *, struct prio_tree_root *); 1074 struct vm_area_struct *vma_prio_tree_next(struct vm_area_struct *vma, 1075 struct prio_tree_iter *iter); 1076 1077 #define vma_prio_tree_foreach(vma, iter, root, begin, end) \ 1078 for (prio_tree_iter_init(iter, root, begin, end), vma = NULL; \ 1079 (vma = vma_prio_tree_next(vma, iter)); ) 1080 1081 static inline void vma_nonlinear_insert(struct vm_area_struct *vma, 1082 struct list_head *list) 1083 { 1084 vma->shared.vm_set.parent = NULL; 1085 list_add_tail(&vma->shared.vm_set.list, list); 1086 } 1087 1088 /* mmap.c */ 1089 extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin); 1090 extern void vma_adjust(struct vm_area_struct *vma, unsigned long start, 1091 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert); 1092 extern struct vm_area_struct *vma_merge(struct mm_struct *, 1093 struct vm_area_struct *prev, unsigned long addr, unsigned long end, 1094 unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t, 1095 struct mempolicy *); 1096 extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *); 1097 extern int split_vma(struct mm_struct *, 1098 struct vm_area_struct *, unsigned long addr, int new_below); 1099 extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *); 1100 extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *, 1101 struct rb_node **, struct rb_node *); 1102 extern void unlink_file_vma(struct vm_area_struct *); 1103 extern struct vm_area_struct *copy_vma(struct vm_area_struct **, 1104 unsigned long addr, unsigned long len, pgoff_t pgoff); 1105 extern void exit_mmap(struct mm_struct *); 1106 1107 extern int mm_take_all_locks(struct mm_struct *mm); 1108 extern void mm_drop_all_locks(struct mm_struct *mm); 1109 1110 #ifdef CONFIG_PROC_FS 1111 /* From fs/proc/base.c. callers must _not_ hold the mm's exe_file_lock */ 1112 extern void added_exe_file_vma(struct mm_struct *mm); 1113 extern void removed_exe_file_vma(struct mm_struct *mm); 1114 #else 1115 static inline void added_exe_file_vma(struct mm_struct *mm) 1116 {} 1117 1118 static inline void removed_exe_file_vma(struct mm_struct *mm) 1119 {} 1120 #endif /* CONFIG_PROC_FS */ 1121 1122 extern int may_expand_vm(struct mm_struct *mm, unsigned long npages); 1123 extern int install_special_mapping(struct mm_struct *mm, 1124 unsigned long addr, unsigned long len, 1125 unsigned long flags, struct page **pages); 1126 1127 extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); 1128 1129 extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, 1130 unsigned long len, unsigned long prot, 1131 unsigned long flag, unsigned long pgoff); 1132 extern unsigned long mmap_region(struct file *file, unsigned long addr, 1133 unsigned long len, unsigned long flags, 1134 unsigned int vm_flags, unsigned long pgoff, 1135 int accountable); 1136 1137 static inline unsigned long do_mmap(struct file *file, unsigned long addr, 1138 unsigned long len, unsigned long prot, 1139 unsigned long flag, unsigned long offset) 1140 { 1141 unsigned long ret = -EINVAL; 1142 if ((offset + PAGE_ALIGN(len)) < offset) 1143 goto out; 1144 if (!(offset & ~PAGE_MASK)) 1145 ret = do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); 1146 out: 1147 return ret; 1148 } 1149 1150 extern int do_munmap(struct mm_struct *, unsigned long, size_t); 1151 1152 extern unsigned long do_brk(unsigned long, unsigned long); 1153 1154 /* filemap.c */ 1155 extern unsigned long page_unuse(struct page *); 1156 extern void truncate_inode_pages(struct address_space *, loff_t); 1157 extern void truncate_inode_pages_range(struct address_space *, 1158 loff_t lstart, loff_t lend); 1159 1160 /* generic vm_area_ops exported for stackable file systems */ 1161 extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); 1162 1163 /* mm/page-writeback.c */ 1164 int write_one_page(struct page *page, int wait); 1165 1166 /* readahead.c */ 1167 #define VM_MAX_READAHEAD 128 /* kbytes */ 1168 #define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */ 1169 1170 int do_page_cache_readahead(struct address_space *mapping, struct file *filp, 1171 pgoff_t offset, unsigned long nr_to_read); 1172 int force_page_cache_readahead(struct address_space *mapping, struct file *filp, 1173 pgoff_t offset, unsigned long nr_to_read); 1174 1175 void page_cache_sync_readahead(struct address_space *mapping, 1176 struct file_ra_state *ra, 1177 struct file *filp, 1178 pgoff_t offset, 1179 unsigned long size); 1180 1181 void page_cache_async_readahead(struct address_space *mapping, 1182 struct file_ra_state *ra, 1183 struct file *filp, 1184 struct page *pg, 1185 pgoff_t offset, 1186 unsigned long size); 1187 1188 unsigned long max_sane_readahead(unsigned long nr); 1189 1190 /* Do stack extension */ 1191 extern int expand_stack(struct vm_area_struct *vma, unsigned long address); 1192 #ifdef CONFIG_IA64 1193 extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); 1194 #endif 1195 extern int expand_stack_downwards(struct vm_area_struct *vma, 1196 unsigned long address); 1197 1198 /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ 1199 extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); 1200 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr, 1201 struct vm_area_struct **pprev); 1202 1203 /* Look up the first VMA which intersects the interval start_addr..end_addr-1, 1204 NULL if none. Assume start_addr < end_addr. */ 1205 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr) 1206 { 1207 struct vm_area_struct * vma = find_vma(mm,start_addr); 1208 1209 if (vma && end_addr <= vma->vm_start) 1210 vma = NULL; 1211 return vma; 1212 } 1213 1214 static inline unsigned long vma_pages(struct vm_area_struct *vma) 1215 { 1216 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 1217 } 1218 1219 pgprot_t vm_get_page_prot(unsigned long vm_flags); 1220 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); 1221 int remap_pfn_range(struct vm_area_struct *, unsigned long addr, 1222 unsigned long pfn, unsigned long size, pgprot_t); 1223 int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); 1224 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, 1225 unsigned long pfn); 1226 int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, 1227 unsigned long pfn); 1228 1229 struct page *follow_page(struct vm_area_struct *, unsigned long address, 1230 unsigned int foll_flags); 1231 #define FOLL_WRITE 0x01 /* check pte is writable */ 1232 #define FOLL_TOUCH 0x02 /* mark page accessed */ 1233 #define FOLL_GET 0x04 /* do get_page on page */ 1234 #define FOLL_ANON 0x08 /* give ZERO_PAGE if no pgtable */ 1235 1236 typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, 1237 void *data); 1238 extern int apply_to_page_range(struct mm_struct *mm, unsigned long address, 1239 unsigned long size, pte_fn_t fn, void *data); 1240 1241 #ifdef CONFIG_PROC_FS 1242 void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long); 1243 #else 1244 static inline void vm_stat_account(struct mm_struct *mm, 1245 unsigned long flags, struct file *file, long pages) 1246 { 1247 } 1248 #endif /* CONFIG_PROC_FS */ 1249 1250 #ifdef CONFIG_DEBUG_PAGEALLOC 1251 extern int debug_pagealloc_enabled; 1252 1253 extern void kernel_map_pages(struct page *page, int numpages, int enable); 1254 1255 static inline void enable_debug_pagealloc(void) 1256 { 1257 debug_pagealloc_enabled = 1; 1258 } 1259 #ifdef CONFIG_HIBERNATION 1260 extern bool kernel_page_present(struct page *page); 1261 #endif /* CONFIG_HIBERNATION */ 1262 #else 1263 static inline void 1264 kernel_map_pages(struct page *page, int numpages, int enable) {} 1265 static inline void enable_debug_pagealloc(void) 1266 { 1267 } 1268 #ifdef CONFIG_HIBERNATION 1269 static inline bool kernel_page_present(struct page *page) { return true; } 1270 #endif /* CONFIG_HIBERNATION */ 1271 #endif 1272 1273 extern struct vm_area_struct *get_gate_vma(struct task_struct *tsk); 1274 #ifdef __HAVE_ARCH_GATE_AREA 1275 int in_gate_area_no_task(unsigned long addr); 1276 int in_gate_area(struct task_struct *task, unsigned long addr); 1277 #else 1278 int in_gate_area_no_task(unsigned long addr); 1279 #define in_gate_area(task, addr) ({(void)task; in_gate_area_no_task(addr);}) 1280 #endif /* __HAVE_ARCH_GATE_AREA */ 1281 1282 int drop_caches_sysctl_handler(struct ctl_table *, int, struct file *, 1283 void __user *, size_t *, loff_t *); 1284 unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, 1285 unsigned long lru_pages); 1286 1287 #ifndef CONFIG_MMU 1288 #define randomize_va_space 0 1289 #else 1290 extern int randomize_va_space; 1291 #endif 1292 1293 const char * arch_vma_name(struct vm_area_struct *vma); 1294 void print_vma_addr(char *prefix, unsigned long rip); 1295 1296 struct page *sparse_mem_map_populate(unsigned long pnum, int nid); 1297 pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); 1298 pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node); 1299 pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); 1300 pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node); 1301 void *vmemmap_alloc_block(unsigned long size, int node); 1302 void vmemmap_verify(pte_t *, int, unsigned long, unsigned long); 1303 int vmemmap_populate_basepages(struct page *start_page, 1304 unsigned long pages, int node); 1305 int vmemmap_populate(struct page *start_page, unsigned long pages, int node); 1306 void vmemmap_populate_print_last(void); 1307 1308 extern void *alloc_locked_buffer(size_t size); 1309 extern void free_locked_buffer(void *buffer, size_t size); 1310 #endif /* __KERNEL__ */ 1311 #endif /* _LINUX_MM_H */ 1312