1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_PAGEMAP_H 3 #define _LINUX_PAGEMAP_H 4 5 /* 6 * Copyright 1995 Linus Torvalds 7 */ 8 #include <linux/mm.h> 9 #include <linux/fs.h> 10 #include <linux/list.h> 11 #include <linux/highmem.h> 12 #include <linux/compiler.h> 13 #include <linux/uaccess.h> 14 #include <linux/gfp.h> 15 #include <linux/bitops.h> 16 #include <linux/hardirq.h> /* for in_interrupt() */ 17 #include <linux/hugetlb_inline.h> 18 19 struct pagevec; 20 21 static inline bool mapping_empty(struct address_space *mapping) 22 { 23 return xa_empty(&mapping->i_pages); 24 } 25 26 /* 27 * mapping_shrinkable - test if page cache state allows inode reclaim 28 * @mapping: the page cache mapping 29 * 30 * This checks the mapping's cache state for the pupose of inode 31 * reclaim and LRU management. 32 * 33 * The caller is expected to hold the i_lock, but is not required to 34 * hold the i_pages lock, which usually protects cache state. That's 35 * because the i_lock and the list_lru lock that protect the inode and 36 * its LRU state don't nest inside the irq-safe i_pages lock. 37 * 38 * Cache deletions are performed under the i_lock, which ensures that 39 * when an inode goes empty, it will reliably get queued on the LRU. 40 * 41 * Cache additions do not acquire the i_lock and may race with this 42 * check, in which case we'll report the inode as shrinkable when it 43 * has cache pages. This is okay: the shrinker also checks the 44 * refcount and the referenced bit, which will be elevated or set in 45 * the process of adding new cache pages to an inode. 46 */ 47 static inline bool mapping_shrinkable(struct address_space *mapping) 48 { 49 void *head; 50 51 /* 52 * On highmem systems, there could be lowmem pressure from the 53 * inodes before there is highmem pressure from the page 54 * cache. Make inodes shrinkable regardless of cache state. 55 */ 56 if (IS_ENABLED(CONFIG_HIGHMEM)) 57 return true; 58 59 /* Cache completely empty? Shrink away. */ 60 head = rcu_access_pointer(mapping->i_pages.xa_head); 61 if (!head) 62 return true; 63 64 /* 65 * The xarray stores single offset-0 entries directly in the 66 * head pointer, which allows non-resident page cache entries 67 * to escape the shadow shrinker's list of xarray nodes. The 68 * inode shrinker needs to pick them up under memory pressure. 69 */ 70 if (!xa_is_node(head) && xa_is_value(head)) 71 return true; 72 73 return false; 74 } 75 76 /* 77 * Bits in mapping->flags. 78 */ 79 enum mapping_flags { 80 AS_EIO = 0, /* IO error on async write */ 81 AS_ENOSPC = 1, /* ENOSPC on async write */ 82 AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */ 83 AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */ 84 AS_EXITING = 4, /* final truncate in progress */ 85 /* writeback related tags are not used */ 86 AS_NO_WRITEBACK_TAGS = 5, 87 AS_THP_SUPPORT = 6, /* THPs supported */ 88 }; 89 90 /** 91 * mapping_set_error - record a writeback error in the address_space 92 * @mapping: the mapping in which an error should be set 93 * @error: the error to set in the mapping 94 * 95 * When writeback fails in some way, we must record that error so that 96 * userspace can be informed when fsync and the like are called. We endeavor 97 * to report errors on any file that was open at the time of the error. Some 98 * internal callers also need to know when writeback errors have occurred. 99 * 100 * When a writeback error occurs, most filesystems will want to call 101 * mapping_set_error to record the error in the mapping so that it can be 102 * reported when the application calls fsync(2). 103 */ 104 static inline void mapping_set_error(struct address_space *mapping, int error) 105 { 106 if (likely(!error)) 107 return; 108 109 /* Record in wb_err for checkers using errseq_t based tracking */ 110 __filemap_set_wb_err(mapping, error); 111 112 /* Record it in superblock */ 113 if (mapping->host) 114 errseq_set(&mapping->host->i_sb->s_wb_err, error); 115 116 /* Record it in flags for now, for legacy callers */ 117 if (error == -ENOSPC) 118 set_bit(AS_ENOSPC, &mapping->flags); 119 else 120 set_bit(AS_EIO, &mapping->flags); 121 } 122 123 static inline void mapping_set_unevictable(struct address_space *mapping) 124 { 125 set_bit(AS_UNEVICTABLE, &mapping->flags); 126 } 127 128 static inline void mapping_clear_unevictable(struct address_space *mapping) 129 { 130 clear_bit(AS_UNEVICTABLE, &mapping->flags); 131 } 132 133 static inline bool mapping_unevictable(struct address_space *mapping) 134 { 135 return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags); 136 } 137 138 static inline void mapping_set_exiting(struct address_space *mapping) 139 { 140 set_bit(AS_EXITING, &mapping->flags); 141 } 142 143 static inline int mapping_exiting(struct address_space *mapping) 144 { 145 return test_bit(AS_EXITING, &mapping->flags); 146 } 147 148 static inline void mapping_set_no_writeback_tags(struct address_space *mapping) 149 { 150 set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); 151 } 152 153 static inline int mapping_use_writeback_tags(struct address_space *mapping) 154 { 155 return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); 156 } 157 158 static inline gfp_t mapping_gfp_mask(struct address_space * mapping) 159 { 160 return mapping->gfp_mask; 161 } 162 163 /* Restricts the given gfp_mask to what the mapping allows. */ 164 static inline gfp_t mapping_gfp_constraint(struct address_space *mapping, 165 gfp_t gfp_mask) 166 { 167 return mapping_gfp_mask(mapping) & gfp_mask; 168 } 169 170 /* 171 * This is non-atomic. Only to be used before the mapping is activated. 172 * Probably needs a barrier... 173 */ 174 static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) 175 { 176 m->gfp_mask = mask; 177 } 178 179 static inline bool mapping_thp_support(struct address_space *mapping) 180 { 181 return test_bit(AS_THP_SUPPORT, &mapping->flags); 182 } 183 184 static inline int filemap_nr_thps(struct address_space *mapping) 185 { 186 #ifdef CONFIG_READ_ONLY_THP_FOR_FS 187 return atomic_read(&mapping->nr_thps); 188 #else 189 return 0; 190 #endif 191 } 192 193 static inline void filemap_nr_thps_inc(struct address_space *mapping) 194 { 195 #ifdef CONFIG_READ_ONLY_THP_FOR_FS 196 if (!mapping_thp_support(mapping)) 197 atomic_inc(&mapping->nr_thps); 198 #else 199 WARN_ON_ONCE(1); 200 #endif 201 } 202 203 static inline void filemap_nr_thps_dec(struct address_space *mapping) 204 { 205 #ifdef CONFIG_READ_ONLY_THP_FOR_FS 206 if (!mapping_thp_support(mapping)) 207 atomic_dec(&mapping->nr_thps); 208 #else 209 WARN_ON_ONCE(1); 210 #endif 211 } 212 213 void release_pages(struct page **pages, int nr); 214 215 struct address_space *page_mapping(struct page *); 216 struct address_space *folio_mapping(struct folio *); 217 struct address_space *swapcache_mapping(struct folio *); 218 219 /** 220 * folio_file_mapping - Find the mapping this folio belongs to. 221 * @folio: The folio. 222 * 223 * For folios which are in the page cache, return the mapping that this 224 * page belongs to. Folios in the swap cache return the mapping of the 225 * swap file or swap device where the data is stored. This is different 226 * from the mapping returned by folio_mapping(). The only reason to 227 * use it is if, like NFS, you return 0 from ->activate_swapfile. 228 * 229 * Do not call this for folios which aren't in the page cache or swap cache. 230 */ 231 static inline struct address_space *folio_file_mapping(struct folio *folio) 232 { 233 if (unlikely(folio_test_swapcache(folio))) 234 return swapcache_mapping(folio); 235 236 return folio->mapping; 237 } 238 239 static inline struct address_space *page_file_mapping(struct page *page) 240 { 241 return folio_file_mapping(page_folio(page)); 242 } 243 244 /* 245 * For file cache pages, return the address_space, otherwise return NULL 246 */ 247 static inline struct address_space *page_mapping_file(struct page *page) 248 { 249 struct folio *folio = page_folio(page); 250 251 if (unlikely(folio_test_swapcache(folio))) 252 return NULL; 253 return folio_mapping(folio); 254 } 255 256 /** 257 * folio_inode - Get the host inode for this folio. 258 * @folio: The folio. 259 * 260 * For folios which are in the page cache, return the inode that this folio 261 * belongs to. 262 * 263 * Do not call this for folios which aren't in the page cache. 264 */ 265 static inline struct inode *folio_inode(struct folio *folio) 266 { 267 return folio->mapping->host; 268 } 269 270 static inline bool page_cache_add_speculative(struct page *page, int count) 271 { 272 VM_BUG_ON_PAGE(PageTail(page), page); 273 return folio_ref_try_add_rcu((struct folio *)page, count); 274 } 275 276 static inline bool page_cache_get_speculative(struct page *page) 277 { 278 return page_cache_add_speculative(page, 1); 279 } 280 281 /** 282 * folio_attach_private - Attach private data to a folio. 283 * @folio: Folio to attach data to. 284 * @data: Data to attach to folio. 285 * 286 * Attaching private data to a folio increments the page's reference count. 287 * The data must be detached before the folio will be freed. 288 */ 289 static inline void folio_attach_private(struct folio *folio, void *data) 290 { 291 folio_get(folio); 292 folio->private = data; 293 folio_set_private(folio); 294 } 295 296 /** 297 * folio_change_private - Change private data on a folio. 298 * @folio: Folio to change the data on. 299 * @data: Data to set on the folio. 300 * 301 * Change the private data attached to a folio and return the old 302 * data. The page must previously have had data attached and the data 303 * must be detached before the folio will be freed. 304 * 305 * Return: Data that was previously attached to the folio. 306 */ 307 static inline void *folio_change_private(struct folio *folio, void *data) 308 { 309 void *old = folio_get_private(folio); 310 311 folio->private = data; 312 return old; 313 } 314 315 /** 316 * folio_detach_private - Detach private data from a folio. 317 * @folio: Folio to detach data from. 318 * 319 * Removes the data that was previously attached to the folio and decrements 320 * the refcount on the page. 321 * 322 * Return: Data that was attached to the folio. 323 */ 324 static inline void *folio_detach_private(struct folio *folio) 325 { 326 void *data = folio_get_private(folio); 327 328 if (!folio_test_private(folio)) 329 return NULL; 330 folio_clear_private(folio); 331 folio->private = NULL; 332 folio_put(folio); 333 334 return data; 335 } 336 337 static inline void attach_page_private(struct page *page, void *data) 338 { 339 folio_attach_private(page_folio(page), data); 340 } 341 342 static inline void *detach_page_private(struct page *page) 343 { 344 return folio_detach_private(page_folio(page)); 345 } 346 347 #ifdef CONFIG_NUMA 348 struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order); 349 #else 350 static inline struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order) 351 { 352 return folio_alloc(gfp, order); 353 } 354 #endif 355 356 static inline struct page *__page_cache_alloc(gfp_t gfp) 357 { 358 return &filemap_alloc_folio(gfp, 0)->page; 359 } 360 361 static inline struct page *page_cache_alloc(struct address_space *x) 362 { 363 return __page_cache_alloc(mapping_gfp_mask(x)); 364 } 365 366 static inline gfp_t readahead_gfp_mask(struct address_space *x) 367 { 368 return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN; 369 } 370 371 typedef int filler_t(void *, struct page *); 372 373 pgoff_t page_cache_next_miss(struct address_space *mapping, 374 pgoff_t index, unsigned long max_scan); 375 pgoff_t page_cache_prev_miss(struct address_space *mapping, 376 pgoff_t index, unsigned long max_scan); 377 378 #define FGP_ACCESSED 0x00000001 379 #define FGP_LOCK 0x00000002 380 #define FGP_CREAT 0x00000004 381 #define FGP_WRITE 0x00000008 382 #define FGP_NOFS 0x00000010 383 #define FGP_NOWAIT 0x00000020 384 #define FGP_FOR_MMAP 0x00000040 385 #define FGP_HEAD 0x00000080 386 #define FGP_ENTRY 0x00000100 387 #define FGP_STABLE 0x00000200 388 389 struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, 390 int fgp_flags, gfp_t gfp); 391 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index, 392 int fgp_flags, gfp_t gfp); 393 394 /** 395 * filemap_get_folio - Find and get a folio. 396 * @mapping: The address_space to search. 397 * @index: The page index. 398 * 399 * Looks up the page cache entry at @mapping & @index. If a folio is 400 * present, it is returned with an increased refcount. 401 * 402 * Otherwise, %NULL is returned. 403 */ 404 static inline struct folio *filemap_get_folio(struct address_space *mapping, 405 pgoff_t index) 406 { 407 return __filemap_get_folio(mapping, index, 0, 0); 408 } 409 410 /** 411 * find_get_page - find and get a page reference 412 * @mapping: the address_space to search 413 * @offset: the page index 414 * 415 * Looks up the page cache slot at @mapping & @offset. If there is a 416 * page cache page, it is returned with an increased refcount. 417 * 418 * Otherwise, %NULL is returned. 419 */ 420 static inline struct page *find_get_page(struct address_space *mapping, 421 pgoff_t offset) 422 { 423 return pagecache_get_page(mapping, offset, 0, 0); 424 } 425 426 static inline struct page *find_get_page_flags(struct address_space *mapping, 427 pgoff_t offset, int fgp_flags) 428 { 429 return pagecache_get_page(mapping, offset, fgp_flags, 0); 430 } 431 432 /** 433 * find_lock_page - locate, pin and lock a pagecache page 434 * @mapping: the address_space to search 435 * @index: the page index 436 * 437 * Looks up the page cache entry at @mapping & @index. If there is a 438 * page cache page, it is returned locked and with an increased 439 * refcount. 440 * 441 * Context: May sleep. 442 * Return: A struct page or %NULL if there is no page in the cache for this 443 * index. 444 */ 445 static inline struct page *find_lock_page(struct address_space *mapping, 446 pgoff_t index) 447 { 448 return pagecache_get_page(mapping, index, FGP_LOCK, 0); 449 } 450 451 /** 452 * find_or_create_page - locate or add a pagecache page 453 * @mapping: the page's address_space 454 * @index: the page's index into the mapping 455 * @gfp_mask: page allocation mode 456 * 457 * Looks up the page cache slot at @mapping & @offset. If there is a 458 * page cache page, it is returned locked and with an increased 459 * refcount. 460 * 461 * If the page is not present, a new page is allocated using @gfp_mask 462 * and added to the page cache and the VM's LRU list. The page is 463 * returned locked and with an increased refcount. 464 * 465 * On memory exhaustion, %NULL is returned. 466 * 467 * find_or_create_page() may sleep, even if @gfp_flags specifies an 468 * atomic allocation! 469 */ 470 static inline struct page *find_or_create_page(struct address_space *mapping, 471 pgoff_t index, gfp_t gfp_mask) 472 { 473 return pagecache_get_page(mapping, index, 474 FGP_LOCK|FGP_ACCESSED|FGP_CREAT, 475 gfp_mask); 476 } 477 478 /** 479 * grab_cache_page_nowait - returns locked page at given index in given cache 480 * @mapping: target address_space 481 * @index: the page index 482 * 483 * Same as grab_cache_page(), but do not wait if the page is unavailable. 484 * This is intended for speculative data generators, where the data can 485 * be regenerated if the page couldn't be grabbed. This routine should 486 * be safe to call while holding the lock for another page. 487 * 488 * Clear __GFP_FS when allocating the page to avoid recursion into the fs 489 * and deadlock against the caller's locked page. 490 */ 491 static inline struct page *grab_cache_page_nowait(struct address_space *mapping, 492 pgoff_t index) 493 { 494 return pagecache_get_page(mapping, index, 495 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, 496 mapping_gfp_mask(mapping)); 497 } 498 499 /* Does this page contain this index? */ 500 static inline bool thp_contains(struct page *head, pgoff_t index) 501 { 502 /* HugeTLBfs indexes the page cache in units of hpage_size */ 503 if (PageHuge(head)) 504 return head->index == index; 505 return page_index(head) == (index & ~(thp_nr_pages(head) - 1UL)); 506 } 507 508 #define swapcache_index(folio) __page_file_index(&(folio)->page) 509 510 /** 511 * folio_index - File index of a folio. 512 * @folio: The folio. 513 * 514 * For a folio which is either in the page cache or the swap cache, 515 * return its index within the address_space it belongs to. If you know 516 * the page is definitely in the page cache, you can look at the folio's 517 * index directly. 518 * 519 * Return: The index (offset in units of pages) of a folio in its file. 520 */ 521 static inline pgoff_t folio_index(struct folio *folio) 522 { 523 if (unlikely(folio_test_swapcache(folio))) 524 return swapcache_index(folio); 525 return folio->index; 526 } 527 528 /** 529 * folio_next_index - Get the index of the next folio. 530 * @folio: The current folio. 531 * 532 * Return: The index of the folio which follows this folio in the file. 533 */ 534 static inline pgoff_t folio_next_index(struct folio *folio) 535 { 536 return folio->index + folio_nr_pages(folio); 537 } 538 539 /** 540 * folio_file_page - The page for a particular index. 541 * @folio: The folio which contains this index. 542 * @index: The index we want to look up. 543 * 544 * Sometimes after looking up a folio in the page cache, we need to 545 * obtain the specific page for an index (eg a page fault). 546 * 547 * Return: The page containing the file data for this index. 548 */ 549 static inline struct page *folio_file_page(struct folio *folio, pgoff_t index) 550 { 551 /* HugeTLBfs indexes the page cache in units of hpage_size */ 552 if (folio_test_hugetlb(folio)) 553 return &folio->page; 554 return folio_page(folio, index & (folio_nr_pages(folio) - 1)); 555 } 556 557 /** 558 * folio_contains - Does this folio contain this index? 559 * @folio: The folio. 560 * @index: The page index within the file. 561 * 562 * Context: The caller should have the page locked in order to prevent 563 * (eg) shmem from moving the page between the page cache and swap cache 564 * and changing its index in the middle of the operation. 565 * Return: true or false. 566 */ 567 static inline bool folio_contains(struct folio *folio, pgoff_t index) 568 { 569 /* HugeTLBfs indexes the page cache in units of hpage_size */ 570 if (folio_test_hugetlb(folio)) 571 return folio->index == index; 572 return index - folio_index(folio) < folio_nr_pages(folio); 573 } 574 575 /* 576 * Given the page we found in the page cache, return the page corresponding 577 * to this index in the file 578 */ 579 static inline struct page *find_subpage(struct page *head, pgoff_t index) 580 { 581 /* HugeTLBfs wants the head page regardless */ 582 if (PageHuge(head)) 583 return head; 584 585 return head + (index & (thp_nr_pages(head) - 1)); 586 } 587 588 unsigned find_get_entries(struct address_space *mapping, pgoff_t start, 589 pgoff_t end, struct pagevec *pvec, pgoff_t *indices); 590 unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, 591 pgoff_t end, unsigned int nr_pages, 592 struct page **pages); 593 static inline unsigned find_get_pages(struct address_space *mapping, 594 pgoff_t *start, unsigned int nr_pages, 595 struct page **pages) 596 { 597 return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages, 598 pages); 599 } 600 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, 601 unsigned int nr_pages, struct page **pages); 602 unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, 603 pgoff_t end, xa_mark_t tag, unsigned int nr_pages, 604 struct page **pages); 605 static inline unsigned find_get_pages_tag(struct address_space *mapping, 606 pgoff_t *index, xa_mark_t tag, unsigned int nr_pages, 607 struct page **pages) 608 { 609 return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag, 610 nr_pages, pages); 611 } 612 613 struct page *grab_cache_page_write_begin(struct address_space *mapping, 614 pgoff_t index, unsigned flags); 615 616 /* 617 * Returns locked page at given index in given cache, creating it if needed. 618 */ 619 static inline struct page *grab_cache_page(struct address_space *mapping, 620 pgoff_t index) 621 { 622 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); 623 } 624 625 extern struct page * read_cache_page(struct address_space *mapping, 626 pgoff_t index, filler_t *filler, void *data); 627 extern struct page * read_cache_page_gfp(struct address_space *mapping, 628 pgoff_t index, gfp_t gfp_mask); 629 extern int read_cache_pages(struct address_space *mapping, 630 struct list_head *pages, filler_t *filler, void *data); 631 632 static inline struct page *read_mapping_page(struct address_space *mapping, 633 pgoff_t index, void *data) 634 { 635 return read_cache_page(mapping, index, NULL, data); 636 } 637 638 /* 639 * Get index of the page within radix-tree (but not for hugetlb pages). 640 * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE) 641 */ 642 static inline pgoff_t page_to_index(struct page *page) 643 { 644 struct page *head; 645 646 if (likely(!PageTransTail(page))) 647 return page->index; 648 649 head = compound_head(page); 650 /* 651 * We don't initialize ->index for tail pages: calculate based on 652 * head page 653 */ 654 return head->index + page - head; 655 } 656 657 extern pgoff_t hugetlb_basepage_index(struct page *page); 658 659 /* 660 * Get the offset in PAGE_SIZE (even for hugetlb pages). 661 * (TODO: hugetlb pages should have ->index in PAGE_SIZE) 662 */ 663 static inline pgoff_t page_to_pgoff(struct page *page) 664 { 665 if (unlikely(PageHuge(page))) 666 return hugetlb_basepage_index(page); 667 return page_to_index(page); 668 } 669 670 /* 671 * Return byte-offset into filesystem object for page. 672 */ 673 static inline loff_t page_offset(struct page *page) 674 { 675 return ((loff_t)page->index) << PAGE_SHIFT; 676 } 677 678 static inline loff_t page_file_offset(struct page *page) 679 { 680 return ((loff_t)page_index(page)) << PAGE_SHIFT; 681 } 682 683 /** 684 * folio_pos - Returns the byte position of this folio in its file. 685 * @folio: The folio. 686 */ 687 static inline loff_t folio_pos(struct folio *folio) 688 { 689 return page_offset(&folio->page); 690 } 691 692 /** 693 * folio_file_pos - Returns the byte position of this folio in its file. 694 * @folio: The folio. 695 * 696 * This differs from folio_pos() for folios which belong to a swap file. 697 * NFS is the only filesystem today which needs to use folio_file_pos(). 698 */ 699 static inline loff_t folio_file_pos(struct folio *folio) 700 { 701 return page_file_offset(&folio->page); 702 } 703 704 extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, 705 unsigned long address); 706 707 static inline pgoff_t linear_page_index(struct vm_area_struct *vma, 708 unsigned long address) 709 { 710 pgoff_t pgoff; 711 if (unlikely(is_vm_hugetlb_page(vma))) 712 return linear_hugepage_index(vma, address); 713 pgoff = (address - vma->vm_start) >> PAGE_SHIFT; 714 pgoff += vma->vm_pgoff; 715 return pgoff; 716 } 717 718 struct wait_page_key { 719 struct folio *folio; 720 int bit_nr; 721 int page_match; 722 }; 723 724 struct wait_page_queue { 725 struct folio *folio; 726 int bit_nr; 727 wait_queue_entry_t wait; 728 }; 729 730 static inline bool wake_page_match(struct wait_page_queue *wait_page, 731 struct wait_page_key *key) 732 { 733 if (wait_page->folio != key->folio) 734 return false; 735 key->page_match = 1; 736 737 if (wait_page->bit_nr != key->bit_nr) 738 return false; 739 740 return true; 741 } 742 743 void __folio_lock(struct folio *folio); 744 int __folio_lock_killable(struct folio *folio); 745 bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm, 746 unsigned int flags); 747 void unlock_page(struct page *page); 748 void folio_unlock(struct folio *folio); 749 750 static inline bool folio_trylock(struct folio *folio) 751 { 752 return likely(!test_and_set_bit_lock(PG_locked, folio_flags(folio, 0))); 753 } 754 755 /* 756 * Return true if the page was successfully locked 757 */ 758 static inline int trylock_page(struct page *page) 759 { 760 return folio_trylock(page_folio(page)); 761 } 762 763 static inline void folio_lock(struct folio *folio) 764 { 765 might_sleep(); 766 if (!folio_trylock(folio)) 767 __folio_lock(folio); 768 } 769 770 /* 771 * lock_page may only be called if we have the page's inode pinned. 772 */ 773 static inline void lock_page(struct page *page) 774 { 775 struct folio *folio; 776 might_sleep(); 777 778 folio = page_folio(page); 779 if (!folio_trylock(folio)) 780 __folio_lock(folio); 781 } 782 783 static inline int folio_lock_killable(struct folio *folio) 784 { 785 might_sleep(); 786 if (!folio_trylock(folio)) 787 return __folio_lock_killable(folio); 788 return 0; 789 } 790 791 /* 792 * lock_page_killable is like lock_page but can be interrupted by fatal 793 * signals. It returns 0 if it locked the page and -EINTR if it was 794 * killed while waiting. 795 */ 796 static inline int lock_page_killable(struct page *page) 797 { 798 return folio_lock_killable(page_folio(page)); 799 } 800 801 /* 802 * lock_page_or_retry - Lock the page, unless this would block and the 803 * caller indicated that it can handle a retry. 804 * 805 * Return value and mmap_lock implications depend on flags; see 806 * __folio_lock_or_retry(). 807 */ 808 static inline bool lock_page_or_retry(struct page *page, struct mm_struct *mm, 809 unsigned int flags) 810 { 811 struct folio *folio; 812 might_sleep(); 813 814 folio = page_folio(page); 815 return folio_trylock(folio) || __folio_lock_or_retry(folio, mm, flags); 816 } 817 818 /* 819 * This is exported only for folio_wait_locked/folio_wait_writeback, etc., 820 * and should not be used directly. 821 */ 822 void folio_wait_bit(struct folio *folio, int bit_nr); 823 int folio_wait_bit_killable(struct folio *folio, int bit_nr); 824 825 /* 826 * Wait for a folio to be unlocked. 827 * 828 * This must be called with the caller "holding" the folio, 829 * ie with increased "page->count" so that the folio won't 830 * go away during the wait.. 831 */ 832 static inline void folio_wait_locked(struct folio *folio) 833 { 834 if (folio_test_locked(folio)) 835 folio_wait_bit(folio, PG_locked); 836 } 837 838 static inline int folio_wait_locked_killable(struct folio *folio) 839 { 840 if (!folio_test_locked(folio)) 841 return 0; 842 return folio_wait_bit_killable(folio, PG_locked); 843 } 844 845 static inline void wait_on_page_locked(struct page *page) 846 { 847 folio_wait_locked(page_folio(page)); 848 } 849 850 static inline int wait_on_page_locked_killable(struct page *page) 851 { 852 return folio_wait_locked_killable(page_folio(page)); 853 } 854 855 int put_and_wait_on_page_locked(struct page *page, int state); 856 void wait_on_page_writeback(struct page *page); 857 void folio_wait_writeback(struct folio *folio); 858 int folio_wait_writeback_killable(struct folio *folio); 859 void end_page_writeback(struct page *page); 860 void folio_end_writeback(struct folio *folio); 861 void wait_for_stable_page(struct page *page); 862 void folio_wait_stable(struct folio *folio); 863 void __folio_mark_dirty(struct folio *folio, struct address_space *, int warn); 864 static inline void __set_page_dirty(struct page *page, 865 struct address_space *mapping, int warn) 866 { 867 __folio_mark_dirty(page_folio(page), mapping, warn); 868 } 869 void folio_account_cleaned(struct folio *folio, struct address_space *mapping, 870 struct bdi_writeback *wb); 871 static inline void account_page_cleaned(struct page *page, 872 struct address_space *mapping, struct bdi_writeback *wb) 873 { 874 return folio_account_cleaned(page_folio(page), mapping, wb); 875 } 876 void __folio_cancel_dirty(struct folio *folio); 877 static inline void folio_cancel_dirty(struct folio *folio) 878 { 879 /* Avoid atomic ops, locking, etc. when not actually needed. */ 880 if (folio_test_dirty(folio)) 881 __folio_cancel_dirty(folio); 882 } 883 static inline void cancel_dirty_page(struct page *page) 884 { 885 folio_cancel_dirty(page_folio(page)); 886 } 887 bool folio_clear_dirty_for_io(struct folio *folio); 888 bool clear_page_dirty_for_io(struct page *page); 889 int __must_check folio_write_one(struct folio *folio); 890 static inline int __must_check write_one_page(struct page *page) 891 { 892 return folio_write_one(page_folio(page)); 893 } 894 895 int __set_page_dirty_nobuffers(struct page *page); 896 int __set_page_dirty_no_writeback(struct page *page); 897 898 void page_endio(struct page *page, bool is_write, int err); 899 900 void folio_end_private_2(struct folio *folio); 901 void folio_wait_private_2(struct folio *folio); 902 int folio_wait_private_2_killable(struct folio *folio); 903 904 /* 905 * Add an arbitrary waiter to a page's wait queue 906 */ 907 void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter); 908 909 /* 910 * Fault in userspace address range. 911 */ 912 size_t fault_in_writeable(char __user *uaddr, size_t size); 913 size_t fault_in_safe_writeable(const char __user *uaddr, size_t size); 914 size_t fault_in_readable(const char __user *uaddr, size_t size); 915 916 int add_to_page_cache_locked(struct page *page, struct address_space *mapping, 917 pgoff_t index, gfp_t gfp); 918 int add_to_page_cache_lru(struct page *page, struct address_space *mapping, 919 pgoff_t index, gfp_t gfp); 920 int filemap_add_folio(struct address_space *mapping, struct folio *folio, 921 pgoff_t index, gfp_t gfp); 922 extern void delete_from_page_cache(struct page *page); 923 extern void __delete_from_page_cache(struct page *page, void *shadow); 924 void replace_page_cache_page(struct page *old, struct page *new); 925 void delete_from_page_cache_batch(struct address_space *mapping, 926 struct pagevec *pvec); 927 loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end, 928 int whence); 929 930 /* 931 * Like add_to_page_cache_locked, but used to add newly allocated pages: 932 * the page is new, so we can just run __SetPageLocked() against it. 933 */ 934 static inline int add_to_page_cache(struct page *page, 935 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) 936 { 937 int error; 938 939 __SetPageLocked(page); 940 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); 941 if (unlikely(error)) 942 __ClearPageLocked(page); 943 return error; 944 } 945 946 /* Must be non-static for BPF error injection */ 947 int __filemap_add_folio(struct address_space *mapping, struct folio *folio, 948 pgoff_t index, gfp_t gfp, void **shadowp); 949 950 /** 951 * struct readahead_control - Describes a readahead request. 952 * 953 * A readahead request is for consecutive pages. Filesystems which 954 * implement the ->readahead method should call readahead_page() or 955 * readahead_page_batch() in a loop and attempt to start I/O against 956 * each page in the request. 957 * 958 * Most of the fields in this struct are private and should be accessed 959 * by the functions below. 960 * 961 * @file: The file, used primarily by network filesystems for authentication. 962 * May be NULL if invoked internally by the filesystem. 963 * @mapping: Readahead this filesystem object. 964 * @ra: File readahead state. May be NULL. 965 */ 966 struct readahead_control { 967 struct file *file; 968 struct address_space *mapping; 969 struct file_ra_state *ra; 970 /* private: use the readahead_* accessors instead */ 971 pgoff_t _index; 972 unsigned int _nr_pages; 973 unsigned int _batch_count; 974 }; 975 976 #define DEFINE_READAHEAD(ractl, f, r, m, i) \ 977 struct readahead_control ractl = { \ 978 .file = f, \ 979 .mapping = m, \ 980 .ra = r, \ 981 ._index = i, \ 982 } 983 984 #define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE) 985 986 void page_cache_ra_unbounded(struct readahead_control *, 987 unsigned long nr_to_read, unsigned long lookahead_count); 988 void page_cache_sync_ra(struct readahead_control *, unsigned long req_count); 989 void page_cache_async_ra(struct readahead_control *, struct page *, 990 unsigned long req_count); 991 void readahead_expand(struct readahead_control *ractl, 992 loff_t new_start, size_t new_len); 993 994 /** 995 * page_cache_sync_readahead - generic file readahead 996 * @mapping: address_space which holds the pagecache and I/O vectors 997 * @ra: file_ra_state which holds the readahead state 998 * @file: Used by the filesystem for authentication. 999 * @index: Index of first page to be read. 1000 * @req_count: Total number of pages being read by the caller. 1001 * 1002 * page_cache_sync_readahead() should be called when a cache miss happened: 1003 * it will submit the read. The readahead logic may decide to piggyback more 1004 * pages onto the read request if access patterns suggest it will improve 1005 * performance. 1006 */ 1007 static inline 1008 void page_cache_sync_readahead(struct address_space *mapping, 1009 struct file_ra_state *ra, struct file *file, pgoff_t index, 1010 unsigned long req_count) 1011 { 1012 DEFINE_READAHEAD(ractl, file, ra, mapping, index); 1013 page_cache_sync_ra(&ractl, req_count); 1014 } 1015 1016 /** 1017 * page_cache_async_readahead - file readahead for marked pages 1018 * @mapping: address_space which holds the pagecache and I/O vectors 1019 * @ra: file_ra_state which holds the readahead state 1020 * @file: Used by the filesystem for authentication. 1021 * @page: The page at @index which triggered the readahead call. 1022 * @index: Index of first page to be read. 1023 * @req_count: Total number of pages being read by the caller. 1024 * 1025 * page_cache_async_readahead() should be called when a page is used which 1026 * is marked as PageReadahead; this is a marker to suggest that the application 1027 * has used up enough of the readahead window that we should start pulling in 1028 * more pages. 1029 */ 1030 static inline 1031 void page_cache_async_readahead(struct address_space *mapping, 1032 struct file_ra_state *ra, struct file *file, 1033 struct page *page, pgoff_t index, unsigned long req_count) 1034 { 1035 DEFINE_READAHEAD(ractl, file, ra, mapping, index); 1036 page_cache_async_ra(&ractl, page, req_count); 1037 } 1038 1039 static inline struct folio *__readahead_folio(struct readahead_control *ractl) 1040 { 1041 struct folio *folio; 1042 1043 BUG_ON(ractl->_batch_count > ractl->_nr_pages); 1044 ractl->_nr_pages -= ractl->_batch_count; 1045 ractl->_index += ractl->_batch_count; 1046 1047 if (!ractl->_nr_pages) { 1048 ractl->_batch_count = 0; 1049 return NULL; 1050 } 1051 1052 folio = xa_load(&ractl->mapping->i_pages, ractl->_index); 1053 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 1054 ractl->_batch_count = folio_nr_pages(folio); 1055 1056 return folio; 1057 } 1058 1059 /** 1060 * readahead_page - Get the next page to read. 1061 * @ractl: The current readahead request. 1062 * 1063 * Context: The page is locked and has an elevated refcount. The caller 1064 * should decreases the refcount once the page has been submitted for I/O 1065 * and unlock the page once all I/O to that page has completed. 1066 * Return: A pointer to the next page, or %NULL if we are done. 1067 */ 1068 static inline struct page *readahead_page(struct readahead_control *ractl) 1069 { 1070 struct folio *folio = __readahead_folio(ractl); 1071 1072 return &folio->page; 1073 } 1074 1075 /** 1076 * readahead_folio - Get the next folio to read. 1077 * @ractl: The current readahead request. 1078 * 1079 * Context: The folio is locked. The caller should unlock the folio once 1080 * all I/O to that folio has completed. 1081 * Return: A pointer to the next folio, or %NULL if we are done. 1082 */ 1083 static inline struct folio *readahead_folio(struct readahead_control *ractl) 1084 { 1085 struct folio *folio = __readahead_folio(ractl); 1086 1087 if (folio) 1088 folio_put(folio); 1089 return folio; 1090 } 1091 1092 static inline unsigned int __readahead_batch(struct readahead_control *rac, 1093 struct page **array, unsigned int array_sz) 1094 { 1095 unsigned int i = 0; 1096 XA_STATE(xas, &rac->mapping->i_pages, 0); 1097 struct page *page; 1098 1099 BUG_ON(rac->_batch_count > rac->_nr_pages); 1100 rac->_nr_pages -= rac->_batch_count; 1101 rac->_index += rac->_batch_count; 1102 rac->_batch_count = 0; 1103 1104 xas_set(&xas, rac->_index); 1105 rcu_read_lock(); 1106 xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) { 1107 if (xas_retry(&xas, page)) 1108 continue; 1109 VM_BUG_ON_PAGE(!PageLocked(page), page); 1110 VM_BUG_ON_PAGE(PageTail(page), page); 1111 array[i++] = page; 1112 rac->_batch_count += thp_nr_pages(page); 1113 1114 /* 1115 * The page cache isn't using multi-index entries yet, 1116 * so the xas cursor needs to be manually moved to the 1117 * next index. This can be removed once the page cache 1118 * is converted. 1119 */ 1120 if (PageHead(page)) 1121 xas_set(&xas, rac->_index + rac->_batch_count); 1122 1123 if (i == array_sz) 1124 break; 1125 } 1126 rcu_read_unlock(); 1127 1128 return i; 1129 } 1130 1131 /** 1132 * readahead_page_batch - Get a batch of pages to read. 1133 * @rac: The current readahead request. 1134 * @array: An array of pointers to struct page. 1135 * 1136 * Context: The pages are locked and have an elevated refcount. The caller 1137 * should decreases the refcount once the page has been submitted for I/O 1138 * and unlock the page once all I/O to that page has completed. 1139 * Return: The number of pages placed in the array. 0 indicates the request 1140 * is complete. 1141 */ 1142 #define readahead_page_batch(rac, array) \ 1143 __readahead_batch(rac, array, ARRAY_SIZE(array)) 1144 1145 /** 1146 * readahead_pos - The byte offset into the file of this readahead request. 1147 * @rac: The readahead request. 1148 */ 1149 static inline loff_t readahead_pos(struct readahead_control *rac) 1150 { 1151 return (loff_t)rac->_index * PAGE_SIZE; 1152 } 1153 1154 /** 1155 * readahead_length - The number of bytes in this readahead request. 1156 * @rac: The readahead request. 1157 */ 1158 static inline size_t readahead_length(struct readahead_control *rac) 1159 { 1160 return rac->_nr_pages * PAGE_SIZE; 1161 } 1162 1163 /** 1164 * readahead_index - The index of the first page in this readahead request. 1165 * @rac: The readahead request. 1166 */ 1167 static inline pgoff_t readahead_index(struct readahead_control *rac) 1168 { 1169 return rac->_index; 1170 } 1171 1172 /** 1173 * readahead_count - The number of pages in this readahead request. 1174 * @rac: The readahead request. 1175 */ 1176 static inline unsigned int readahead_count(struct readahead_control *rac) 1177 { 1178 return rac->_nr_pages; 1179 } 1180 1181 /** 1182 * readahead_batch_length - The number of bytes in the current batch. 1183 * @rac: The readahead request. 1184 */ 1185 static inline size_t readahead_batch_length(struct readahead_control *rac) 1186 { 1187 return rac->_batch_count * PAGE_SIZE; 1188 } 1189 1190 static inline unsigned long dir_pages(struct inode *inode) 1191 { 1192 return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >> 1193 PAGE_SHIFT; 1194 } 1195 1196 /** 1197 * folio_mkwrite_check_truncate - check if folio was truncated 1198 * @folio: the folio to check 1199 * @inode: the inode to check the folio against 1200 * 1201 * Return: the number of bytes in the folio up to EOF, 1202 * or -EFAULT if the folio was truncated. 1203 */ 1204 static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio, 1205 struct inode *inode) 1206 { 1207 loff_t size = i_size_read(inode); 1208 pgoff_t index = size >> PAGE_SHIFT; 1209 size_t offset = offset_in_folio(folio, size); 1210 1211 if (!folio->mapping) 1212 return -EFAULT; 1213 1214 /* folio is wholly inside EOF */ 1215 if (folio_next_index(folio) - 1 < index) 1216 return folio_size(folio); 1217 /* folio is wholly past EOF */ 1218 if (folio->index > index || !offset) 1219 return -EFAULT; 1220 /* folio is partially inside EOF */ 1221 return offset; 1222 } 1223 1224 /** 1225 * page_mkwrite_check_truncate - check if page was truncated 1226 * @page: the page to check 1227 * @inode: the inode to check the page against 1228 * 1229 * Returns the number of bytes in the page up to EOF, 1230 * or -EFAULT if the page was truncated. 1231 */ 1232 static inline int page_mkwrite_check_truncate(struct page *page, 1233 struct inode *inode) 1234 { 1235 loff_t size = i_size_read(inode); 1236 pgoff_t index = size >> PAGE_SHIFT; 1237 int offset = offset_in_page(size); 1238 1239 if (page->mapping != inode->i_mapping) 1240 return -EFAULT; 1241 1242 /* page is wholly inside EOF */ 1243 if (page->index < index) 1244 return PAGE_SIZE; 1245 /* page is wholly past EOF */ 1246 if (page->index > index || !offset) 1247 return -EFAULT; 1248 /* page is partially inside EOF */ 1249 return offset; 1250 } 1251 1252 /** 1253 * i_blocks_per_folio - How many blocks fit in this folio. 1254 * @inode: The inode which contains the blocks. 1255 * @folio: The folio. 1256 * 1257 * If the block size is larger than the size of this folio, return zero. 1258 * 1259 * Context: The caller should hold a refcount on the folio to prevent it 1260 * from being split. 1261 * Return: The number of filesystem blocks covered by this folio. 1262 */ 1263 static inline 1264 unsigned int i_blocks_per_folio(struct inode *inode, struct folio *folio) 1265 { 1266 return folio_size(folio) >> inode->i_blkbits; 1267 } 1268 1269 static inline 1270 unsigned int i_blocks_per_page(struct inode *inode, struct page *page) 1271 { 1272 return i_blocks_per_folio(inode, page_folio(page)); 1273 } 1274 #endif /* _LINUX_PAGEMAP_H */ 1275