1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_PAGEMAP_H 3 #define _LINUX_PAGEMAP_H 4 5 /* 6 * Copyright 1995 Linus Torvalds 7 */ 8 #include <linux/mm.h> 9 #include <linux/fs.h> 10 #include <linux/list.h> 11 #include <linux/highmem.h> 12 #include <linux/compiler.h> 13 #include <linux/uaccess.h> 14 #include <linux/gfp.h> 15 #include <linux/bitops.h> 16 #include <linux/hardirq.h> /* for in_interrupt() */ 17 #include <linux/hugetlb_inline.h> 18 19 struct pagevec; 20 21 /* 22 * Bits in mapping->flags. 23 */ 24 enum mapping_flags { 25 AS_EIO = 0, /* IO error on async write */ 26 AS_ENOSPC = 1, /* ENOSPC on async write */ 27 AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */ 28 AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */ 29 AS_EXITING = 4, /* final truncate in progress */ 30 /* writeback related tags are not used */ 31 AS_NO_WRITEBACK_TAGS = 5, 32 }; 33 34 /** 35 * mapping_set_error - record a writeback error in the address_space 36 * @mapping: the mapping in which an error should be set 37 * @error: the error to set in the mapping 38 * 39 * When writeback fails in some way, we must record that error so that 40 * userspace can be informed when fsync and the like are called. We endeavor 41 * to report errors on any file that was open at the time of the error. Some 42 * internal callers also need to know when writeback errors have occurred. 43 * 44 * When a writeback error occurs, most filesystems will want to call 45 * mapping_set_error to record the error in the mapping so that it can be 46 * reported when the application calls fsync(2). 47 */ 48 static inline void mapping_set_error(struct address_space *mapping, int error) 49 { 50 if (likely(!error)) 51 return; 52 53 /* Record in wb_err for checkers using errseq_t based tracking */ 54 __filemap_set_wb_err(mapping, error); 55 56 /* Record it in superblock */ 57 errseq_set(&mapping->host->i_sb->s_wb_err, error); 58 59 /* Record it in flags for now, for legacy callers */ 60 if (error == -ENOSPC) 61 set_bit(AS_ENOSPC, &mapping->flags); 62 else 63 set_bit(AS_EIO, &mapping->flags); 64 } 65 66 static inline void mapping_set_unevictable(struct address_space *mapping) 67 { 68 set_bit(AS_UNEVICTABLE, &mapping->flags); 69 } 70 71 static inline void mapping_clear_unevictable(struct address_space *mapping) 72 { 73 clear_bit(AS_UNEVICTABLE, &mapping->flags); 74 } 75 76 static inline bool mapping_unevictable(struct address_space *mapping) 77 { 78 return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags); 79 } 80 81 static inline void mapping_set_exiting(struct address_space *mapping) 82 { 83 set_bit(AS_EXITING, &mapping->flags); 84 } 85 86 static inline int mapping_exiting(struct address_space *mapping) 87 { 88 return test_bit(AS_EXITING, &mapping->flags); 89 } 90 91 static inline void mapping_set_no_writeback_tags(struct address_space *mapping) 92 { 93 set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); 94 } 95 96 static inline int mapping_use_writeback_tags(struct address_space *mapping) 97 { 98 return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); 99 } 100 101 static inline gfp_t mapping_gfp_mask(struct address_space * mapping) 102 { 103 return mapping->gfp_mask; 104 } 105 106 /* Restricts the given gfp_mask to what the mapping allows. */ 107 static inline gfp_t mapping_gfp_constraint(struct address_space *mapping, 108 gfp_t gfp_mask) 109 { 110 return mapping_gfp_mask(mapping) & gfp_mask; 111 } 112 113 /* 114 * This is non-atomic. Only to be used before the mapping is activated. 115 * Probably needs a barrier... 116 */ 117 static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) 118 { 119 m->gfp_mask = mask; 120 } 121 122 void release_pages(struct page **pages, int nr); 123 124 /* 125 * speculatively take a reference to a page. 126 * If the page is free (_refcount == 0), then _refcount is untouched, and 0 127 * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned. 128 * 129 * This function must be called inside the same rcu_read_lock() section as has 130 * been used to lookup the page in the pagecache radix-tree (or page table): 131 * this allows allocators to use a synchronize_rcu() to stabilize _refcount. 132 * 133 * Unless an RCU grace period has passed, the count of all pages coming out 134 * of the allocator must be considered unstable. page_count may return higher 135 * than expected, and put_page must be able to do the right thing when the 136 * page has been finished with, no matter what it is subsequently allocated 137 * for (because put_page is what is used here to drop an invalid speculative 138 * reference). 139 * 140 * This is the interesting part of the lockless pagecache (and lockless 141 * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page) 142 * has the following pattern: 143 * 1. find page in radix tree 144 * 2. conditionally increment refcount 145 * 3. check the page is still in pagecache (if no, goto 1) 146 * 147 * Remove-side that cares about stability of _refcount (eg. reclaim) has the 148 * following (with the i_pages lock held): 149 * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg) 150 * B. remove page from pagecache 151 * C. free the page 152 * 153 * There are 2 critical interleavings that matter: 154 * - 2 runs before A: in this case, A sees elevated refcount and bails out 155 * - A runs before 2: in this case, 2 sees zero refcount and retries; 156 * subsequently, B will complete and 1 will find no page, causing the 157 * lookup to return NULL. 158 * 159 * It is possible that between 1 and 2, the page is removed then the exact same 160 * page is inserted into the same position in pagecache. That's OK: the 161 * old find_get_page using a lock could equally have run before or after 162 * such a re-insertion, depending on order that locks are granted. 163 * 164 * Lookups racing against pagecache insertion isn't a big problem: either 1 165 * will find the page or it will not. Likewise, the old find_get_page could run 166 * either before the insertion or afterwards, depending on timing. 167 */ 168 static inline int __page_cache_add_speculative(struct page *page, int count) 169 { 170 #ifdef CONFIG_TINY_RCU 171 # ifdef CONFIG_PREEMPT_COUNT 172 VM_BUG_ON(!in_atomic() && !irqs_disabled()); 173 # endif 174 /* 175 * Preempt must be disabled here - we rely on rcu_read_lock doing 176 * this for us. 177 * 178 * Pagecache won't be truncated from interrupt context, so if we have 179 * found a page in the radix tree here, we have pinned its refcount by 180 * disabling preempt, and hence no need for the "speculative get" that 181 * SMP requires. 182 */ 183 VM_BUG_ON_PAGE(page_count(page) == 0, page); 184 page_ref_add(page, count); 185 186 #else 187 if (unlikely(!page_ref_add_unless(page, count, 0))) { 188 /* 189 * Either the page has been freed, or will be freed. 190 * In either case, retry here and the caller should 191 * do the right thing (see comments above). 192 */ 193 return 0; 194 } 195 #endif 196 VM_BUG_ON_PAGE(PageTail(page), page); 197 198 return 1; 199 } 200 201 static inline int page_cache_get_speculative(struct page *page) 202 { 203 return __page_cache_add_speculative(page, 1); 204 } 205 206 static inline int page_cache_add_speculative(struct page *page, int count) 207 { 208 return __page_cache_add_speculative(page, count); 209 } 210 211 /** 212 * attach_page_private - Attach private data to a page. 213 * @page: Page to attach data to. 214 * @data: Data to attach to page. 215 * 216 * Attaching private data to a page increments the page's reference count. 217 * The data must be detached before the page will be freed. 218 */ 219 static inline void attach_page_private(struct page *page, void *data) 220 { 221 get_page(page); 222 set_page_private(page, (unsigned long)data); 223 SetPagePrivate(page); 224 } 225 226 /** 227 * detach_page_private - Detach private data from a page. 228 * @page: Page to detach data from. 229 * 230 * Removes the data that was previously attached to the page and decrements 231 * the refcount on the page. 232 * 233 * Return: Data that was attached to the page. 234 */ 235 static inline void *detach_page_private(struct page *page) 236 { 237 void *data = (void *)page_private(page); 238 239 if (!PagePrivate(page)) 240 return NULL; 241 ClearPagePrivate(page); 242 set_page_private(page, 0); 243 put_page(page); 244 245 return data; 246 } 247 248 #ifdef CONFIG_NUMA 249 extern struct page *__page_cache_alloc(gfp_t gfp); 250 #else 251 static inline struct page *__page_cache_alloc(gfp_t gfp) 252 { 253 return alloc_pages(gfp, 0); 254 } 255 #endif 256 257 static inline struct page *page_cache_alloc(struct address_space *x) 258 { 259 return __page_cache_alloc(mapping_gfp_mask(x)); 260 } 261 262 static inline gfp_t readahead_gfp_mask(struct address_space *x) 263 { 264 return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN; 265 } 266 267 typedef int filler_t(void *, struct page *); 268 269 pgoff_t page_cache_next_miss(struct address_space *mapping, 270 pgoff_t index, unsigned long max_scan); 271 pgoff_t page_cache_prev_miss(struct address_space *mapping, 272 pgoff_t index, unsigned long max_scan); 273 274 #define FGP_ACCESSED 0x00000001 275 #define FGP_LOCK 0x00000002 276 #define FGP_CREAT 0x00000004 277 #define FGP_WRITE 0x00000008 278 #define FGP_NOFS 0x00000010 279 #define FGP_NOWAIT 0x00000020 280 #define FGP_FOR_MMAP 0x00000040 281 282 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, 283 int fgp_flags, gfp_t cache_gfp_mask); 284 285 /** 286 * find_get_page - find and get a page reference 287 * @mapping: the address_space to search 288 * @offset: the page index 289 * 290 * Looks up the page cache slot at @mapping & @offset. If there is a 291 * page cache page, it is returned with an increased refcount. 292 * 293 * Otherwise, %NULL is returned. 294 */ 295 static inline struct page *find_get_page(struct address_space *mapping, 296 pgoff_t offset) 297 { 298 return pagecache_get_page(mapping, offset, 0, 0); 299 } 300 301 static inline struct page *find_get_page_flags(struct address_space *mapping, 302 pgoff_t offset, int fgp_flags) 303 { 304 return pagecache_get_page(mapping, offset, fgp_flags, 0); 305 } 306 307 /** 308 * find_lock_page - locate, pin and lock a pagecache page 309 * @mapping: the address_space to search 310 * @offset: the page index 311 * 312 * Looks up the page cache slot at @mapping & @offset. If there is a 313 * page cache page, it is returned locked and with an increased 314 * refcount. 315 * 316 * Otherwise, %NULL is returned. 317 * 318 * find_lock_page() may sleep. 319 */ 320 static inline struct page *find_lock_page(struct address_space *mapping, 321 pgoff_t offset) 322 { 323 return pagecache_get_page(mapping, offset, FGP_LOCK, 0); 324 } 325 326 /** 327 * find_or_create_page - locate or add a pagecache page 328 * @mapping: the page's address_space 329 * @index: the page's index into the mapping 330 * @gfp_mask: page allocation mode 331 * 332 * Looks up the page cache slot at @mapping & @offset. If there is a 333 * page cache page, it is returned locked and with an increased 334 * refcount. 335 * 336 * If the page is not present, a new page is allocated using @gfp_mask 337 * and added to the page cache and the VM's LRU list. The page is 338 * returned locked and with an increased refcount. 339 * 340 * On memory exhaustion, %NULL is returned. 341 * 342 * find_or_create_page() may sleep, even if @gfp_flags specifies an 343 * atomic allocation! 344 */ 345 static inline struct page *find_or_create_page(struct address_space *mapping, 346 pgoff_t index, gfp_t gfp_mask) 347 { 348 return pagecache_get_page(mapping, index, 349 FGP_LOCK|FGP_ACCESSED|FGP_CREAT, 350 gfp_mask); 351 } 352 353 /** 354 * grab_cache_page_nowait - returns locked page at given index in given cache 355 * @mapping: target address_space 356 * @index: the page index 357 * 358 * Same as grab_cache_page(), but do not wait if the page is unavailable. 359 * This is intended for speculative data generators, where the data can 360 * be regenerated if the page couldn't be grabbed. This routine should 361 * be safe to call while holding the lock for another page. 362 * 363 * Clear __GFP_FS when allocating the page to avoid recursion into the fs 364 * and deadlock against the caller's locked page. 365 */ 366 static inline struct page *grab_cache_page_nowait(struct address_space *mapping, 367 pgoff_t index) 368 { 369 return pagecache_get_page(mapping, index, 370 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, 371 mapping_gfp_mask(mapping)); 372 } 373 374 /* 375 * Given the page we found in the page cache, return the page corresponding 376 * to this index in the file 377 */ 378 static inline struct page *find_subpage(struct page *head, pgoff_t index) 379 { 380 /* HugeTLBfs wants the head page regardless */ 381 if (PageHuge(head)) 382 return head; 383 384 return head + (index & (hpage_nr_pages(head) - 1)); 385 } 386 387 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset); 388 struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset); 389 unsigned find_get_entries(struct address_space *mapping, pgoff_t start, 390 unsigned int nr_entries, struct page **entries, 391 pgoff_t *indices); 392 unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, 393 pgoff_t end, unsigned int nr_pages, 394 struct page **pages); 395 static inline unsigned find_get_pages(struct address_space *mapping, 396 pgoff_t *start, unsigned int nr_pages, 397 struct page **pages) 398 { 399 return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages, 400 pages); 401 } 402 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, 403 unsigned int nr_pages, struct page **pages); 404 unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, 405 pgoff_t end, xa_mark_t tag, unsigned int nr_pages, 406 struct page **pages); 407 static inline unsigned find_get_pages_tag(struct address_space *mapping, 408 pgoff_t *index, xa_mark_t tag, unsigned int nr_pages, 409 struct page **pages) 410 { 411 return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag, 412 nr_pages, pages); 413 } 414 415 struct page *grab_cache_page_write_begin(struct address_space *mapping, 416 pgoff_t index, unsigned flags); 417 418 /* 419 * Returns locked page at given index in given cache, creating it if needed. 420 */ 421 static inline struct page *grab_cache_page(struct address_space *mapping, 422 pgoff_t index) 423 { 424 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); 425 } 426 427 extern struct page * read_cache_page(struct address_space *mapping, 428 pgoff_t index, filler_t *filler, void *data); 429 extern struct page * read_cache_page_gfp(struct address_space *mapping, 430 pgoff_t index, gfp_t gfp_mask); 431 extern int read_cache_pages(struct address_space *mapping, 432 struct list_head *pages, filler_t *filler, void *data); 433 434 static inline struct page *read_mapping_page(struct address_space *mapping, 435 pgoff_t index, void *data) 436 { 437 return read_cache_page(mapping, index, NULL, data); 438 } 439 440 /* 441 * Get index of the page with in radix-tree 442 * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE) 443 */ 444 static inline pgoff_t page_to_index(struct page *page) 445 { 446 pgoff_t pgoff; 447 448 if (likely(!PageTransTail(page))) 449 return page->index; 450 451 /* 452 * We don't initialize ->index for tail pages: calculate based on 453 * head page 454 */ 455 pgoff = compound_head(page)->index; 456 pgoff += page - compound_head(page); 457 return pgoff; 458 } 459 460 /* 461 * Get the offset in PAGE_SIZE. 462 * (TODO: hugepage should have ->index in PAGE_SIZE) 463 */ 464 static inline pgoff_t page_to_pgoff(struct page *page) 465 { 466 if (unlikely(PageHeadHuge(page))) 467 return page->index << compound_order(page); 468 469 return page_to_index(page); 470 } 471 472 /* 473 * Return byte-offset into filesystem object for page. 474 */ 475 static inline loff_t page_offset(struct page *page) 476 { 477 return ((loff_t)page->index) << PAGE_SHIFT; 478 } 479 480 static inline loff_t page_file_offset(struct page *page) 481 { 482 return ((loff_t)page_index(page)) << PAGE_SHIFT; 483 } 484 485 extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, 486 unsigned long address); 487 488 static inline pgoff_t linear_page_index(struct vm_area_struct *vma, 489 unsigned long address) 490 { 491 pgoff_t pgoff; 492 if (unlikely(is_vm_hugetlb_page(vma))) 493 return linear_hugepage_index(vma, address); 494 pgoff = (address - vma->vm_start) >> PAGE_SHIFT; 495 pgoff += vma->vm_pgoff; 496 return pgoff; 497 } 498 499 /* This has the same layout as wait_bit_key - see fs/cachefiles/rdwr.c */ 500 struct wait_page_key { 501 struct page *page; 502 int bit_nr; 503 int page_match; 504 }; 505 506 struct wait_page_queue { 507 struct page *page; 508 int bit_nr; 509 wait_queue_entry_t wait; 510 }; 511 512 static inline bool wake_page_match(struct wait_page_queue *wait_page, 513 struct wait_page_key *key) 514 { 515 if (wait_page->page != key->page) 516 return false; 517 key->page_match = 1; 518 519 if (wait_page->bit_nr != key->bit_nr) 520 return false; 521 522 return true; 523 } 524 525 extern void __lock_page(struct page *page); 526 extern int __lock_page_killable(struct page *page); 527 extern int __lock_page_async(struct page *page, struct wait_page_queue *wait); 528 extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm, 529 unsigned int flags); 530 extern void unlock_page(struct page *page); 531 532 /* 533 * Return true if the page was successfully locked 534 */ 535 static inline int trylock_page(struct page *page) 536 { 537 page = compound_head(page); 538 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags))); 539 } 540 541 /* 542 * lock_page may only be called if we have the page's inode pinned. 543 */ 544 static inline void lock_page(struct page *page) 545 { 546 might_sleep(); 547 if (!trylock_page(page)) 548 __lock_page(page); 549 } 550 551 /* 552 * lock_page_killable is like lock_page but can be interrupted by fatal 553 * signals. It returns 0 if it locked the page and -EINTR if it was 554 * killed while waiting. 555 */ 556 static inline int lock_page_killable(struct page *page) 557 { 558 might_sleep(); 559 if (!trylock_page(page)) 560 return __lock_page_killable(page); 561 return 0; 562 } 563 564 /* 565 * lock_page_async - Lock the page, unless this would block. If the page 566 * is already locked, then queue a callback when the page becomes unlocked. 567 * This callback can then retry the operation. 568 * 569 * Returns 0 if the page is locked successfully, or -EIOCBQUEUED if the page 570 * was already locked and the callback defined in 'wait' was queued. 571 */ 572 static inline int lock_page_async(struct page *page, 573 struct wait_page_queue *wait) 574 { 575 if (!trylock_page(page)) 576 return __lock_page_async(page, wait); 577 return 0; 578 } 579 580 /* 581 * lock_page_or_retry - Lock the page, unless this would block and the 582 * caller indicated that it can handle a retry. 583 * 584 * Return value and mmap_lock implications depend on flags; see 585 * __lock_page_or_retry(). 586 */ 587 static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm, 588 unsigned int flags) 589 { 590 might_sleep(); 591 return trylock_page(page) || __lock_page_or_retry(page, mm, flags); 592 } 593 594 /* 595 * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc., 596 * and should not be used directly. 597 */ 598 extern void wait_on_page_bit(struct page *page, int bit_nr); 599 extern int wait_on_page_bit_killable(struct page *page, int bit_nr); 600 601 /* 602 * Wait for a page to be unlocked. 603 * 604 * This must be called with the caller "holding" the page, 605 * ie with increased "page->count" so that the page won't 606 * go away during the wait.. 607 */ 608 static inline void wait_on_page_locked(struct page *page) 609 { 610 if (PageLocked(page)) 611 wait_on_page_bit(compound_head(page), PG_locked); 612 } 613 614 static inline int wait_on_page_locked_killable(struct page *page) 615 { 616 if (!PageLocked(page)) 617 return 0; 618 return wait_on_page_bit_killable(compound_head(page), PG_locked); 619 } 620 621 extern void put_and_wait_on_page_locked(struct page *page); 622 623 void wait_on_page_writeback(struct page *page); 624 extern void end_page_writeback(struct page *page); 625 void wait_for_stable_page(struct page *page); 626 627 void page_endio(struct page *page, bool is_write, int err); 628 629 /* 630 * Add an arbitrary waiter to a page's wait queue 631 */ 632 extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter); 633 634 /* 635 * Fault everything in given userspace address range in. 636 */ 637 static inline int fault_in_pages_writeable(char __user *uaddr, int size) 638 { 639 char __user *end = uaddr + size - 1; 640 641 if (unlikely(size == 0)) 642 return 0; 643 644 if (unlikely(uaddr > end)) 645 return -EFAULT; 646 /* 647 * Writing zeroes into userspace here is OK, because we know that if 648 * the zero gets there, we'll be overwriting it. 649 */ 650 do { 651 if (unlikely(__put_user(0, uaddr) != 0)) 652 return -EFAULT; 653 uaddr += PAGE_SIZE; 654 } while (uaddr <= end); 655 656 /* Check whether the range spilled into the next page. */ 657 if (((unsigned long)uaddr & PAGE_MASK) == 658 ((unsigned long)end & PAGE_MASK)) 659 return __put_user(0, end); 660 661 return 0; 662 } 663 664 static inline int fault_in_pages_readable(const char __user *uaddr, int size) 665 { 666 volatile char c; 667 const char __user *end = uaddr + size - 1; 668 669 if (unlikely(size == 0)) 670 return 0; 671 672 if (unlikely(uaddr > end)) 673 return -EFAULT; 674 675 do { 676 if (unlikely(__get_user(c, uaddr) != 0)) 677 return -EFAULT; 678 uaddr += PAGE_SIZE; 679 } while (uaddr <= end); 680 681 /* Check whether the range spilled into the next page. */ 682 if (((unsigned long)uaddr & PAGE_MASK) == 683 ((unsigned long)end & PAGE_MASK)) { 684 return __get_user(c, end); 685 } 686 687 (void)c; 688 return 0; 689 } 690 691 int add_to_page_cache_locked(struct page *page, struct address_space *mapping, 692 pgoff_t index, gfp_t gfp_mask); 693 int add_to_page_cache_lru(struct page *page, struct address_space *mapping, 694 pgoff_t index, gfp_t gfp_mask); 695 extern void delete_from_page_cache(struct page *page); 696 extern void __delete_from_page_cache(struct page *page, void *shadow); 697 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask); 698 void delete_from_page_cache_batch(struct address_space *mapping, 699 struct pagevec *pvec); 700 701 #define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE) 702 703 void page_cache_sync_readahead(struct address_space *, struct file_ra_state *, 704 struct file *, pgoff_t index, unsigned long req_count); 705 void page_cache_async_readahead(struct address_space *, struct file_ra_state *, 706 struct file *, struct page *, pgoff_t index, 707 unsigned long req_count); 708 void page_cache_readahead_unbounded(struct address_space *, struct file *, 709 pgoff_t index, unsigned long nr_to_read, 710 unsigned long lookahead_count); 711 712 /* 713 * Like add_to_page_cache_locked, but used to add newly allocated pages: 714 * the page is new, so we can just run __SetPageLocked() against it. 715 */ 716 static inline int add_to_page_cache(struct page *page, 717 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) 718 { 719 int error; 720 721 __SetPageLocked(page); 722 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); 723 if (unlikely(error)) 724 __ClearPageLocked(page); 725 return error; 726 } 727 728 /** 729 * struct readahead_control - Describes a readahead request. 730 * 731 * A readahead request is for consecutive pages. Filesystems which 732 * implement the ->readahead method should call readahead_page() or 733 * readahead_page_batch() in a loop and attempt to start I/O against 734 * each page in the request. 735 * 736 * Most of the fields in this struct are private and should be accessed 737 * by the functions below. 738 * 739 * @file: The file, used primarily by network filesystems for authentication. 740 * May be NULL if invoked internally by the filesystem. 741 * @mapping: Readahead this filesystem object. 742 */ 743 struct readahead_control { 744 struct file *file; 745 struct address_space *mapping; 746 /* private: use the readahead_* accessors instead */ 747 pgoff_t _index; 748 unsigned int _nr_pages; 749 unsigned int _batch_count; 750 }; 751 752 /** 753 * readahead_page - Get the next page to read. 754 * @rac: The current readahead request. 755 * 756 * Context: The page is locked and has an elevated refcount. The caller 757 * should decreases the refcount once the page has been submitted for I/O 758 * and unlock the page once all I/O to that page has completed. 759 * Return: A pointer to the next page, or %NULL if we are done. 760 */ 761 static inline struct page *readahead_page(struct readahead_control *rac) 762 { 763 struct page *page; 764 765 BUG_ON(rac->_batch_count > rac->_nr_pages); 766 rac->_nr_pages -= rac->_batch_count; 767 rac->_index += rac->_batch_count; 768 769 if (!rac->_nr_pages) { 770 rac->_batch_count = 0; 771 return NULL; 772 } 773 774 page = xa_load(&rac->mapping->i_pages, rac->_index); 775 VM_BUG_ON_PAGE(!PageLocked(page), page); 776 rac->_batch_count = hpage_nr_pages(page); 777 778 return page; 779 } 780 781 static inline unsigned int __readahead_batch(struct readahead_control *rac, 782 struct page **array, unsigned int array_sz) 783 { 784 unsigned int i = 0; 785 XA_STATE(xas, &rac->mapping->i_pages, 0); 786 struct page *page; 787 788 BUG_ON(rac->_batch_count > rac->_nr_pages); 789 rac->_nr_pages -= rac->_batch_count; 790 rac->_index += rac->_batch_count; 791 rac->_batch_count = 0; 792 793 xas_set(&xas, rac->_index); 794 rcu_read_lock(); 795 xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) { 796 VM_BUG_ON_PAGE(!PageLocked(page), page); 797 VM_BUG_ON_PAGE(PageTail(page), page); 798 array[i++] = page; 799 rac->_batch_count += hpage_nr_pages(page); 800 801 /* 802 * The page cache isn't using multi-index entries yet, 803 * so the xas cursor needs to be manually moved to the 804 * next index. This can be removed once the page cache 805 * is converted. 806 */ 807 if (PageHead(page)) 808 xas_set(&xas, rac->_index + rac->_batch_count); 809 810 if (i == array_sz) 811 break; 812 } 813 rcu_read_unlock(); 814 815 return i; 816 } 817 818 /** 819 * readahead_page_batch - Get a batch of pages to read. 820 * @rac: The current readahead request. 821 * @array: An array of pointers to struct page. 822 * 823 * Context: The pages are locked and have an elevated refcount. The caller 824 * should decreases the refcount once the page has been submitted for I/O 825 * and unlock the page once all I/O to that page has completed. 826 * Return: The number of pages placed in the array. 0 indicates the request 827 * is complete. 828 */ 829 #define readahead_page_batch(rac, array) \ 830 __readahead_batch(rac, array, ARRAY_SIZE(array)) 831 832 /** 833 * readahead_pos - The byte offset into the file of this readahead request. 834 * @rac: The readahead request. 835 */ 836 static inline loff_t readahead_pos(struct readahead_control *rac) 837 { 838 return (loff_t)rac->_index * PAGE_SIZE; 839 } 840 841 /** 842 * readahead_length - The number of bytes in this readahead request. 843 * @rac: The readahead request. 844 */ 845 static inline loff_t readahead_length(struct readahead_control *rac) 846 { 847 return (loff_t)rac->_nr_pages * PAGE_SIZE; 848 } 849 850 /** 851 * readahead_index - The index of the first page in this readahead request. 852 * @rac: The readahead request. 853 */ 854 static inline pgoff_t readahead_index(struct readahead_control *rac) 855 { 856 return rac->_index; 857 } 858 859 /** 860 * readahead_count - The number of pages in this readahead request. 861 * @rac: The readahead request. 862 */ 863 static inline unsigned int readahead_count(struct readahead_control *rac) 864 { 865 return rac->_nr_pages; 866 } 867 868 static inline unsigned long dir_pages(struct inode *inode) 869 { 870 return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >> 871 PAGE_SHIFT; 872 } 873 874 /** 875 * page_mkwrite_check_truncate - check if page was truncated 876 * @page: the page to check 877 * @inode: the inode to check the page against 878 * 879 * Returns the number of bytes in the page up to EOF, 880 * or -EFAULT if the page was truncated. 881 */ 882 static inline int page_mkwrite_check_truncate(struct page *page, 883 struct inode *inode) 884 { 885 loff_t size = i_size_read(inode); 886 pgoff_t index = size >> PAGE_SHIFT; 887 int offset = offset_in_page(size); 888 889 if (page->mapping != inode->i_mapping) 890 return -EFAULT; 891 892 /* page is wholly inside EOF */ 893 if (page->index < index) 894 return PAGE_SIZE; 895 /* page is wholly past EOF */ 896 if (page->index > index || !offset) 897 return -EFAULT; 898 /* page is partially inside EOF */ 899 return offset; 900 } 901 902 #endif /* _LINUX_PAGEMAP_H */ 903