1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_PAGEMAP_H 3 #define _LINUX_PAGEMAP_H 4 5 /* 6 * Copyright 1995 Linus Torvalds 7 */ 8 #include <linux/mm.h> 9 #include <linux/fs.h> 10 #include <linux/list.h> 11 #include <linux/highmem.h> 12 #include <linux/compiler.h> 13 #include <linux/uaccess.h> 14 #include <linux/gfp.h> 15 #include <linux/bitops.h> 16 #include <linux/hardirq.h> /* for in_interrupt() */ 17 #include <linux/hugetlb_inline.h> 18 19 struct pagevec; 20 21 static inline bool mapping_empty(struct address_space *mapping) 22 { 23 return xa_empty(&mapping->i_pages); 24 } 25 26 /* 27 * mapping_shrinkable - test if page cache state allows inode reclaim 28 * @mapping: the page cache mapping 29 * 30 * This checks the mapping's cache state for the pupose of inode 31 * reclaim and LRU management. 32 * 33 * The caller is expected to hold the i_lock, but is not required to 34 * hold the i_pages lock, which usually protects cache state. That's 35 * because the i_lock and the list_lru lock that protect the inode and 36 * its LRU state don't nest inside the irq-safe i_pages lock. 37 * 38 * Cache deletions are performed under the i_lock, which ensures that 39 * when an inode goes empty, it will reliably get queued on the LRU. 40 * 41 * Cache additions do not acquire the i_lock and may race with this 42 * check, in which case we'll report the inode as shrinkable when it 43 * has cache pages. This is okay: the shrinker also checks the 44 * refcount and the referenced bit, which will be elevated or set in 45 * the process of adding new cache pages to an inode. 46 */ 47 static inline bool mapping_shrinkable(struct address_space *mapping) 48 { 49 void *head; 50 51 /* 52 * On highmem systems, there could be lowmem pressure from the 53 * inodes before there is highmem pressure from the page 54 * cache. Make inodes shrinkable regardless of cache state. 55 */ 56 if (IS_ENABLED(CONFIG_HIGHMEM)) 57 return true; 58 59 /* Cache completely empty? Shrink away. */ 60 head = rcu_access_pointer(mapping->i_pages.xa_head); 61 if (!head) 62 return true; 63 64 /* 65 * The xarray stores single offset-0 entries directly in the 66 * head pointer, which allows non-resident page cache entries 67 * to escape the shadow shrinker's list of xarray nodes. The 68 * inode shrinker needs to pick them up under memory pressure. 69 */ 70 if (!xa_is_node(head) && xa_is_value(head)) 71 return true; 72 73 return false; 74 } 75 76 /* 77 * Bits in mapping->flags. 78 */ 79 enum mapping_flags { 80 AS_EIO = 0, /* IO error on async write */ 81 AS_ENOSPC = 1, /* ENOSPC on async write */ 82 AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */ 83 AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */ 84 AS_EXITING = 4, /* final truncate in progress */ 85 /* writeback related tags are not used */ 86 AS_NO_WRITEBACK_TAGS = 5, 87 AS_LARGE_FOLIO_SUPPORT = 6, 88 }; 89 90 /** 91 * mapping_set_error - record a writeback error in the address_space 92 * @mapping: the mapping in which an error should be set 93 * @error: the error to set in the mapping 94 * 95 * When writeback fails in some way, we must record that error so that 96 * userspace can be informed when fsync and the like are called. We endeavor 97 * to report errors on any file that was open at the time of the error. Some 98 * internal callers also need to know when writeback errors have occurred. 99 * 100 * When a writeback error occurs, most filesystems will want to call 101 * mapping_set_error to record the error in the mapping so that it can be 102 * reported when the application calls fsync(2). 103 */ 104 static inline void mapping_set_error(struct address_space *mapping, int error) 105 { 106 if (likely(!error)) 107 return; 108 109 /* Record in wb_err for checkers using errseq_t based tracking */ 110 __filemap_set_wb_err(mapping, error); 111 112 /* Record it in superblock */ 113 if (mapping->host) 114 errseq_set(&mapping->host->i_sb->s_wb_err, error); 115 116 /* Record it in flags for now, for legacy callers */ 117 if (error == -ENOSPC) 118 set_bit(AS_ENOSPC, &mapping->flags); 119 else 120 set_bit(AS_EIO, &mapping->flags); 121 } 122 123 static inline void mapping_set_unevictable(struct address_space *mapping) 124 { 125 set_bit(AS_UNEVICTABLE, &mapping->flags); 126 } 127 128 static inline void mapping_clear_unevictable(struct address_space *mapping) 129 { 130 clear_bit(AS_UNEVICTABLE, &mapping->flags); 131 } 132 133 static inline bool mapping_unevictable(struct address_space *mapping) 134 { 135 return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags); 136 } 137 138 static inline void mapping_set_exiting(struct address_space *mapping) 139 { 140 set_bit(AS_EXITING, &mapping->flags); 141 } 142 143 static inline int mapping_exiting(struct address_space *mapping) 144 { 145 return test_bit(AS_EXITING, &mapping->flags); 146 } 147 148 static inline void mapping_set_no_writeback_tags(struct address_space *mapping) 149 { 150 set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); 151 } 152 153 static inline int mapping_use_writeback_tags(struct address_space *mapping) 154 { 155 return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); 156 } 157 158 static inline gfp_t mapping_gfp_mask(struct address_space * mapping) 159 { 160 return mapping->gfp_mask; 161 } 162 163 /* Restricts the given gfp_mask to what the mapping allows. */ 164 static inline gfp_t mapping_gfp_constraint(struct address_space *mapping, 165 gfp_t gfp_mask) 166 { 167 return mapping_gfp_mask(mapping) & gfp_mask; 168 } 169 170 /* 171 * This is non-atomic. Only to be used before the mapping is activated. 172 * Probably needs a barrier... 173 */ 174 static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) 175 { 176 m->gfp_mask = mask; 177 } 178 179 /** 180 * mapping_set_large_folios() - Indicate the file supports large folios. 181 * @mapping: The file. 182 * 183 * The filesystem should call this function in its inode constructor to 184 * indicate that the VFS can use large folios to cache the contents of 185 * the file. 186 * 187 * Context: This should not be called while the inode is active as it 188 * is non-atomic. 189 */ 190 static inline void mapping_set_large_folios(struct address_space *mapping) 191 { 192 __set_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags); 193 } 194 195 static inline bool mapping_large_folio_support(struct address_space *mapping) 196 { 197 return test_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags); 198 } 199 200 static inline int filemap_nr_thps(struct address_space *mapping) 201 { 202 #ifdef CONFIG_READ_ONLY_THP_FOR_FS 203 return atomic_read(&mapping->nr_thps); 204 #else 205 return 0; 206 #endif 207 } 208 209 static inline void filemap_nr_thps_inc(struct address_space *mapping) 210 { 211 #ifdef CONFIG_READ_ONLY_THP_FOR_FS 212 if (!mapping_large_folio_support(mapping)) 213 atomic_inc(&mapping->nr_thps); 214 #else 215 WARN_ON_ONCE(1); 216 #endif 217 } 218 219 static inline void filemap_nr_thps_dec(struct address_space *mapping) 220 { 221 #ifdef CONFIG_READ_ONLY_THP_FOR_FS 222 if (!mapping_large_folio_support(mapping)) 223 atomic_dec(&mapping->nr_thps); 224 #else 225 WARN_ON_ONCE(1); 226 #endif 227 } 228 229 void release_pages(struct page **pages, int nr); 230 231 struct address_space *page_mapping(struct page *); 232 struct address_space *folio_mapping(struct folio *); 233 struct address_space *swapcache_mapping(struct folio *); 234 235 /** 236 * folio_file_mapping - Find the mapping this folio belongs to. 237 * @folio: The folio. 238 * 239 * For folios which are in the page cache, return the mapping that this 240 * page belongs to. Folios in the swap cache return the mapping of the 241 * swap file or swap device where the data is stored. This is different 242 * from the mapping returned by folio_mapping(). The only reason to 243 * use it is if, like NFS, you return 0 from ->activate_swapfile. 244 * 245 * Do not call this for folios which aren't in the page cache or swap cache. 246 */ 247 static inline struct address_space *folio_file_mapping(struct folio *folio) 248 { 249 if (unlikely(folio_test_swapcache(folio))) 250 return swapcache_mapping(folio); 251 252 return folio->mapping; 253 } 254 255 static inline struct address_space *page_file_mapping(struct page *page) 256 { 257 return folio_file_mapping(page_folio(page)); 258 } 259 260 /* 261 * For file cache pages, return the address_space, otherwise return NULL 262 */ 263 static inline struct address_space *page_mapping_file(struct page *page) 264 { 265 struct folio *folio = page_folio(page); 266 267 if (unlikely(folio_test_swapcache(folio))) 268 return NULL; 269 return folio_mapping(folio); 270 } 271 272 /** 273 * folio_inode - Get the host inode for this folio. 274 * @folio: The folio. 275 * 276 * For folios which are in the page cache, return the inode that this folio 277 * belongs to. 278 * 279 * Do not call this for folios which aren't in the page cache. 280 */ 281 static inline struct inode *folio_inode(struct folio *folio) 282 { 283 return folio->mapping->host; 284 } 285 286 static inline bool page_cache_add_speculative(struct page *page, int count) 287 { 288 VM_BUG_ON_PAGE(PageTail(page), page); 289 return folio_ref_try_add_rcu((struct folio *)page, count); 290 } 291 292 static inline bool page_cache_get_speculative(struct page *page) 293 { 294 return page_cache_add_speculative(page, 1); 295 } 296 297 /** 298 * folio_attach_private - Attach private data to a folio. 299 * @folio: Folio to attach data to. 300 * @data: Data to attach to folio. 301 * 302 * Attaching private data to a folio increments the page's reference count. 303 * The data must be detached before the folio will be freed. 304 */ 305 static inline void folio_attach_private(struct folio *folio, void *data) 306 { 307 folio_get(folio); 308 folio->private = data; 309 folio_set_private(folio); 310 } 311 312 /** 313 * folio_change_private - Change private data on a folio. 314 * @folio: Folio to change the data on. 315 * @data: Data to set on the folio. 316 * 317 * Change the private data attached to a folio and return the old 318 * data. The page must previously have had data attached and the data 319 * must be detached before the folio will be freed. 320 * 321 * Return: Data that was previously attached to the folio. 322 */ 323 static inline void *folio_change_private(struct folio *folio, void *data) 324 { 325 void *old = folio_get_private(folio); 326 327 folio->private = data; 328 return old; 329 } 330 331 /** 332 * folio_detach_private - Detach private data from a folio. 333 * @folio: Folio to detach data from. 334 * 335 * Removes the data that was previously attached to the folio and decrements 336 * the refcount on the page. 337 * 338 * Return: Data that was attached to the folio. 339 */ 340 static inline void *folio_detach_private(struct folio *folio) 341 { 342 void *data = folio_get_private(folio); 343 344 if (!folio_test_private(folio)) 345 return NULL; 346 folio_clear_private(folio); 347 folio->private = NULL; 348 folio_put(folio); 349 350 return data; 351 } 352 353 static inline void attach_page_private(struct page *page, void *data) 354 { 355 folio_attach_private(page_folio(page), data); 356 } 357 358 static inline void *detach_page_private(struct page *page) 359 { 360 return folio_detach_private(page_folio(page)); 361 } 362 363 #ifdef CONFIG_NUMA 364 struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order); 365 #else 366 static inline struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order) 367 { 368 return folio_alloc(gfp, order); 369 } 370 #endif 371 372 static inline struct page *__page_cache_alloc(gfp_t gfp) 373 { 374 return &filemap_alloc_folio(gfp, 0)->page; 375 } 376 377 static inline struct page *page_cache_alloc(struct address_space *x) 378 { 379 return __page_cache_alloc(mapping_gfp_mask(x)); 380 } 381 382 static inline gfp_t readahead_gfp_mask(struct address_space *x) 383 { 384 return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN; 385 } 386 387 typedef int filler_t(void *, struct page *); 388 389 pgoff_t page_cache_next_miss(struct address_space *mapping, 390 pgoff_t index, unsigned long max_scan); 391 pgoff_t page_cache_prev_miss(struct address_space *mapping, 392 pgoff_t index, unsigned long max_scan); 393 394 #define FGP_ACCESSED 0x00000001 395 #define FGP_LOCK 0x00000002 396 #define FGP_CREAT 0x00000004 397 #define FGP_WRITE 0x00000008 398 #define FGP_NOFS 0x00000010 399 #define FGP_NOWAIT 0x00000020 400 #define FGP_FOR_MMAP 0x00000040 401 #define FGP_HEAD 0x00000080 402 #define FGP_ENTRY 0x00000100 403 #define FGP_STABLE 0x00000200 404 405 struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, 406 int fgp_flags, gfp_t gfp); 407 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index, 408 int fgp_flags, gfp_t gfp); 409 410 /** 411 * filemap_get_folio - Find and get a folio. 412 * @mapping: The address_space to search. 413 * @index: The page index. 414 * 415 * Looks up the page cache entry at @mapping & @index. If a folio is 416 * present, it is returned with an increased refcount. 417 * 418 * Otherwise, %NULL is returned. 419 */ 420 static inline struct folio *filemap_get_folio(struct address_space *mapping, 421 pgoff_t index) 422 { 423 return __filemap_get_folio(mapping, index, 0, 0); 424 } 425 426 /** 427 * find_get_page - find and get a page reference 428 * @mapping: the address_space to search 429 * @offset: the page index 430 * 431 * Looks up the page cache slot at @mapping & @offset. If there is a 432 * page cache page, it is returned with an increased refcount. 433 * 434 * Otherwise, %NULL is returned. 435 */ 436 static inline struct page *find_get_page(struct address_space *mapping, 437 pgoff_t offset) 438 { 439 return pagecache_get_page(mapping, offset, 0, 0); 440 } 441 442 static inline struct page *find_get_page_flags(struct address_space *mapping, 443 pgoff_t offset, int fgp_flags) 444 { 445 return pagecache_get_page(mapping, offset, fgp_flags, 0); 446 } 447 448 /** 449 * find_lock_page - locate, pin and lock a pagecache page 450 * @mapping: the address_space to search 451 * @index: the page index 452 * 453 * Looks up the page cache entry at @mapping & @index. If there is a 454 * page cache page, it is returned locked and with an increased 455 * refcount. 456 * 457 * Context: May sleep. 458 * Return: A struct page or %NULL if there is no page in the cache for this 459 * index. 460 */ 461 static inline struct page *find_lock_page(struct address_space *mapping, 462 pgoff_t index) 463 { 464 return pagecache_get_page(mapping, index, FGP_LOCK, 0); 465 } 466 467 /** 468 * find_or_create_page - locate or add a pagecache page 469 * @mapping: the page's address_space 470 * @index: the page's index into the mapping 471 * @gfp_mask: page allocation mode 472 * 473 * Looks up the page cache slot at @mapping & @offset. If there is a 474 * page cache page, it is returned locked and with an increased 475 * refcount. 476 * 477 * If the page is not present, a new page is allocated using @gfp_mask 478 * and added to the page cache and the VM's LRU list. The page is 479 * returned locked and with an increased refcount. 480 * 481 * On memory exhaustion, %NULL is returned. 482 * 483 * find_or_create_page() may sleep, even if @gfp_flags specifies an 484 * atomic allocation! 485 */ 486 static inline struct page *find_or_create_page(struct address_space *mapping, 487 pgoff_t index, gfp_t gfp_mask) 488 { 489 return pagecache_get_page(mapping, index, 490 FGP_LOCK|FGP_ACCESSED|FGP_CREAT, 491 gfp_mask); 492 } 493 494 /** 495 * grab_cache_page_nowait - returns locked page at given index in given cache 496 * @mapping: target address_space 497 * @index: the page index 498 * 499 * Same as grab_cache_page(), but do not wait if the page is unavailable. 500 * This is intended for speculative data generators, where the data can 501 * be regenerated if the page couldn't be grabbed. This routine should 502 * be safe to call while holding the lock for another page. 503 * 504 * Clear __GFP_FS when allocating the page to avoid recursion into the fs 505 * and deadlock against the caller's locked page. 506 */ 507 static inline struct page *grab_cache_page_nowait(struct address_space *mapping, 508 pgoff_t index) 509 { 510 return pagecache_get_page(mapping, index, 511 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, 512 mapping_gfp_mask(mapping)); 513 } 514 515 /* Does this page contain this index? */ 516 static inline bool thp_contains(struct page *head, pgoff_t index) 517 { 518 /* HugeTLBfs indexes the page cache in units of hpage_size */ 519 if (PageHuge(head)) 520 return head->index == index; 521 return page_index(head) == (index & ~(thp_nr_pages(head) - 1UL)); 522 } 523 524 #define swapcache_index(folio) __page_file_index(&(folio)->page) 525 526 /** 527 * folio_index - File index of a folio. 528 * @folio: The folio. 529 * 530 * For a folio which is either in the page cache or the swap cache, 531 * return its index within the address_space it belongs to. If you know 532 * the page is definitely in the page cache, you can look at the folio's 533 * index directly. 534 * 535 * Return: The index (offset in units of pages) of a folio in its file. 536 */ 537 static inline pgoff_t folio_index(struct folio *folio) 538 { 539 if (unlikely(folio_test_swapcache(folio))) 540 return swapcache_index(folio); 541 return folio->index; 542 } 543 544 /** 545 * folio_next_index - Get the index of the next folio. 546 * @folio: The current folio. 547 * 548 * Return: The index of the folio which follows this folio in the file. 549 */ 550 static inline pgoff_t folio_next_index(struct folio *folio) 551 { 552 return folio->index + folio_nr_pages(folio); 553 } 554 555 /** 556 * folio_file_page - The page for a particular index. 557 * @folio: The folio which contains this index. 558 * @index: The index we want to look up. 559 * 560 * Sometimes after looking up a folio in the page cache, we need to 561 * obtain the specific page for an index (eg a page fault). 562 * 563 * Return: The page containing the file data for this index. 564 */ 565 static inline struct page *folio_file_page(struct folio *folio, pgoff_t index) 566 { 567 /* HugeTLBfs indexes the page cache in units of hpage_size */ 568 if (folio_test_hugetlb(folio)) 569 return &folio->page; 570 return folio_page(folio, index & (folio_nr_pages(folio) - 1)); 571 } 572 573 /** 574 * folio_contains - Does this folio contain this index? 575 * @folio: The folio. 576 * @index: The page index within the file. 577 * 578 * Context: The caller should have the page locked in order to prevent 579 * (eg) shmem from moving the page between the page cache and swap cache 580 * and changing its index in the middle of the operation. 581 * Return: true or false. 582 */ 583 static inline bool folio_contains(struct folio *folio, pgoff_t index) 584 { 585 /* HugeTLBfs indexes the page cache in units of hpage_size */ 586 if (folio_test_hugetlb(folio)) 587 return folio->index == index; 588 return index - folio_index(folio) < folio_nr_pages(folio); 589 } 590 591 /* 592 * Given the page we found in the page cache, return the page corresponding 593 * to this index in the file 594 */ 595 static inline struct page *find_subpage(struct page *head, pgoff_t index) 596 { 597 /* HugeTLBfs wants the head page regardless */ 598 if (PageHuge(head)) 599 return head; 600 601 return head + (index & (thp_nr_pages(head) - 1)); 602 } 603 604 unsigned find_get_entries(struct address_space *mapping, pgoff_t start, 605 pgoff_t end, struct pagevec *pvec, pgoff_t *indices); 606 unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, 607 pgoff_t end, unsigned int nr_pages, 608 struct page **pages); 609 static inline unsigned find_get_pages(struct address_space *mapping, 610 pgoff_t *start, unsigned int nr_pages, 611 struct page **pages) 612 { 613 return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages, 614 pages); 615 } 616 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, 617 unsigned int nr_pages, struct page **pages); 618 unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, 619 pgoff_t end, xa_mark_t tag, unsigned int nr_pages, 620 struct page **pages); 621 static inline unsigned find_get_pages_tag(struct address_space *mapping, 622 pgoff_t *index, xa_mark_t tag, unsigned int nr_pages, 623 struct page **pages) 624 { 625 return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag, 626 nr_pages, pages); 627 } 628 629 struct page *grab_cache_page_write_begin(struct address_space *mapping, 630 pgoff_t index, unsigned flags); 631 632 /* 633 * Returns locked page at given index in given cache, creating it if needed. 634 */ 635 static inline struct page *grab_cache_page(struct address_space *mapping, 636 pgoff_t index) 637 { 638 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); 639 } 640 641 extern struct page * read_cache_page(struct address_space *mapping, 642 pgoff_t index, filler_t *filler, void *data); 643 extern struct page * read_cache_page_gfp(struct address_space *mapping, 644 pgoff_t index, gfp_t gfp_mask); 645 extern int read_cache_pages(struct address_space *mapping, 646 struct list_head *pages, filler_t *filler, void *data); 647 648 static inline struct page *read_mapping_page(struct address_space *mapping, 649 pgoff_t index, void *data) 650 { 651 return read_cache_page(mapping, index, NULL, data); 652 } 653 654 /* 655 * Get index of the page within radix-tree (but not for hugetlb pages). 656 * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE) 657 */ 658 static inline pgoff_t page_to_index(struct page *page) 659 { 660 struct page *head; 661 662 if (likely(!PageTransTail(page))) 663 return page->index; 664 665 head = compound_head(page); 666 /* 667 * We don't initialize ->index for tail pages: calculate based on 668 * head page 669 */ 670 return head->index + page - head; 671 } 672 673 extern pgoff_t hugetlb_basepage_index(struct page *page); 674 675 /* 676 * Get the offset in PAGE_SIZE (even for hugetlb pages). 677 * (TODO: hugetlb pages should have ->index in PAGE_SIZE) 678 */ 679 static inline pgoff_t page_to_pgoff(struct page *page) 680 { 681 if (unlikely(PageHuge(page))) 682 return hugetlb_basepage_index(page); 683 return page_to_index(page); 684 } 685 686 /* 687 * Return byte-offset into filesystem object for page. 688 */ 689 static inline loff_t page_offset(struct page *page) 690 { 691 return ((loff_t)page->index) << PAGE_SHIFT; 692 } 693 694 static inline loff_t page_file_offset(struct page *page) 695 { 696 return ((loff_t)page_index(page)) << PAGE_SHIFT; 697 } 698 699 /** 700 * folio_pos - Returns the byte position of this folio in its file. 701 * @folio: The folio. 702 */ 703 static inline loff_t folio_pos(struct folio *folio) 704 { 705 return page_offset(&folio->page); 706 } 707 708 /** 709 * folio_file_pos - Returns the byte position of this folio in its file. 710 * @folio: The folio. 711 * 712 * This differs from folio_pos() for folios which belong to a swap file. 713 * NFS is the only filesystem today which needs to use folio_file_pos(). 714 */ 715 static inline loff_t folio_file_pos(struct folio *folio) 716 { 717 return page_file_offset(&folio->page); 718 } 719 720 extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, 721 unsigned long address); 722 723 static inline pgoff_t linear_page_index(struct vm_area_struct *vma, 724 unsigned long address) 725 { 726 pgoff_t pgoff; 727 if (unlikely(is_vm_hugetlb_page(vma))) 728 return linear_hugepage_index(vma, address); 729 pgoff = (address - vma->vm_start) >> PAGE_SHIFT; 730 pgoff += vma->vm_pgoff; 731 return pgoff; 732 } 733 734 struct wait_page_key { 735 struct folio *folio; 736 int bit_nr; 737 int page_match; 738 }; 739 740 struct wait_page_queue { 741 struct folio *folio; 742 int bit_nr; 743 wait_queue_entry_t wait; 744 }; 745 746 static inline bool wake_page_match(struct wait_page_queue *wait_page, 747 struct wait_page_key *key) 748 { 749 if (wait_page->folio != key->folio) 750 return false; 751 key->page_match = 1; 752 753 if (wait_page->bit_nr != key->bit_nr) 754 return false; 755 756 return true; 757 } 758 759 void __folio_lock(struct folio *folio); 760 int __folio_lock_killable(struct folio *folio); 761 bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm, 762 unsigned int flags); 763 void unlock_page(struct page *page); 764 void folio_unlock(struct folio *folio); 765 766 static inline bool folio_trylock(struct folio *folio) 767 { 768 return likely(!test_and_set_bit_lock(PG_locked, folio_flags(folio, 0))); 769 } 770 771 /* 772 * Return true if the page was successfully locked 773 */ 774 static inline int trylock_page(struct page *page) 775 { 776 return folio_trylock(page_folio(page)); 777 } 778 779 static inline void folio_lock(struct folio *folio) 780 { 781 might_sleep(); 782 if (!folio_trylock(folio)) 783 __folio_lock(folio); 784 } 785 786 /* 787 * lock_page may only be called if we have the page's inode pinned. 788 */ 789 static inline void lock_page(struct page *page) 790 { 791 struct folio *folio; 792 might_sleep(); 793 794 folio = page_folio(page); 795 if (!folio_trylock(folio)) 796 __folio_lock(folio); 797 } 798 799 static inline int folio_lock_killable(struct folio *folio) 800 { 801 might_sleep(); 802 if (!folio_trylock(folio)) 803 return __folio_lock_killable(folio); 804 return 0; 805 } 806 807 /* 808 * lock_page_killable is like lock_page but can be interrupted by fatal 809 * signals. It returns 0 if it locked the page and -EINTR if it was 810 * killed while waiting. 811 */ 812 static inline int lock_page_killable(struct page *page) 813 { 814 return folio_lock_killable(page_folio(page)); 815 } 816 817 /* 818 * lock_page_or_retry - Lock the page, unless this would block and the 819 * caller indicated that it can handle a retry. 820 * 821 * Return value and mmap_lock implications depend on flags; see 822 * __folio_lock_or_retry(). 823 */ 824 static inline bool lock_page_or_retry(struct page *page, struct mm_struct *mm, 825 unsigned int flags) 826 { 827 struct folio *folio; 828 might_sleep(); 829 830 folio = page_folio(page); 831 return folio_trylock(folio) || __folio_lock_or_retry(folio, mm, flags); 832 } 833 834 /* 835 * This is exported only for folio_wait_locked/folio_wait_writeback, etc., 836 * and should not be used directly. 837 */ 838 void folio_wait_bit(struct folio *folio, int bit_nr); 839 int folio_wait_bit_killable(struct folio *folio, int bit_nr); 840 841 /* 842 * Wait for a folio to be unlocked. 843 * 844 * This must be called with the caller "holding" the folio, 845 * ie with increased "page->count" so that the folio won't 846 * go away during the wait.. 847 */ 848 static inline void folio_wait_locked(struct folio *folio) 849 { 850 if (folio_test_locked(folio)) 851 folio_wait_bit(folio, PG_locked); 852 } 853 854 static inline int folio_wait_locked_killable(struct folio *folio) 855 { 856 if (!folio_test_locked(folio)) 857 return 0; 858 return folio_wait_bit_killable(folio, PG_locked); 859 } 860 861 static inline void wait_on_page_locked(struct page *page) 862 { 863 folio_wait_locked(page_folio(page)); 864 } 865 866 static inline int wait_on_page_locked_killable(struct page *page) 867 { 868 return folio_wait_locked_killable(page_folio(page)); 869 } 870 871 int put_and_wait_on_page_locked(struct page *page, int state); 872 void wait_on_page_writeback(struct page *page); 873 void folio_wait_writeback(struct folio *folio); 874 int folio_wait_writeback_killable(struct folio *folio); 875 void end_page_writeback(struct page *page); 876 void folio_end_writeback(struct folio *folio); 877 void wait_for_stable_page(struct page *page); 878 void folio_wait_stable(struct folio *folio); 879 void __folio_mark_dirty(struct folio *folio, struct address_space *, int warn); 880 static inline void __set_page_dirty(struct page *page, 881 struct address_space *mapping, int warn) 882 { 883 __folio_mark_dirty(page_folio(page), mapping, warn); 884 } 885 void folio_account_cleaned(struct folio *folio, struct address_space *mapping, 886 struct bdi_writeback *wb); 887 static inline void account_page_cleaned(struct page *page, 888 struct address_space *mapping, struct bdi_writeback *wb) 889 { 890 return folio_account_cleaned(page_folio(page), mapping, wb); 891 } 892 void __folio_cancel_dirty(struct folio *folio); 893 static inline void folio_cancel_dirty(struct folio *folio) 894 { 895 /* Avoid atomic ops, locking, etc. when not actually needed. */ 896 if (folio_test_dirty(folio)) 897 __folio_cancel_dirty(folio); 898 } 899 static inline void cancel_dirty_page(struct page *page) 900 { 901 folio_cancel_dirty(page_folio(page)); 902 } 903 bool folio_clear_dirty_for_io(struct folio *folio); 904 bool clear_page_dirty_for_io(struct page *page); 905 int __must_check folio_write_one(struct folio *folio); 906 static inline int __must_check write_one_page(struct page *page) 907 { 908 return folio_write_one(page_folio(page)); 909 } 910 911 int __set_page_dirty_nobuffers(struct page *page); 912 int __set_page_dirty_no_writeback(struct page *page); 913 914 void page_endio(struct page *page, bool is_write, int err); 915 916 void folio_end_private_2(struct folio *folio); 917 void folio_wait_private_2(struct folio *folio); 918 int folio_wait_private_2_killable(struct folio *folio); 919 920 /* 921 * Add an arbitrary waiter to a page's wait queue 922 */ 923 void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter); 924 925 /* 926 * Fault in userspace address range. 927 */ 928 size_t fault_in_writeable(char __user *uaddr, size_t size); 929 size_t fault_in_safe_writeable(const char __user *uaddr, size_t size); 930 size_t fault_in_readable(const char __user *uaddr, size_t size); 931 932 int add_to_page_cache_locked(struct page *page, struct address_space *mapping, 933 pgoff_t index, gfp_t gfp); 934 int add_to_page_cache_lru(struct page *page, struct address_space *mapping, 935 pgoff_t index, gfp_t gfp); 936 int filemap_add_folio(struct address_space *mapping, struct folio *folio, 937 pgoff_t index, gfp_t gfp); 938 extern void delete_from_page_cache(struct page *page); 939 extern void __delete_from_page_cache(struct page *page, void *shadow); 940 void replace_page_cache_page(struct page *old, struct page *new); 941 void delete_from_page_cache_batch(struct address_space *mapping, 942 struct pagevec *pvec); 943 loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end, 944 int whence); 945 946 /* 947 * Like add_to_page_cache_locked, but used to add newly allocated pages: 948 * the page is new, so we can just run __SetPageLocked() against it. 949 */ 950 static inline int add_to_page_cache(struct page *page, 951 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) 952 { 953 int error; 954 955 __SetPageLocked(page); 956 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); 957 if (unlikely(error)) 958 __ClearPageLocked(page); 959 return error; 960 } 961 962 /* Must be non-static for BPF error injection */ 963 int __filemap_add_folio(struct address_space *mapping, struct folio *folio, 964 pgoff_t index, gfp_t gfp, void **shadowp); 965 966 /** 967 * struct readahead_control - Describes a readahead request. 968 * 969 * A readahead request is for consecutive pages. Filesystems which 970 * implement the ->readahead method should call readahead_page() or 971 * readahead_page_batch() in a loop and attempt to start I/O against 972 * each page in the request. 973 * 974 * Most of the fields in this struct are private and should be accessed 975 * by the functions below. 976 * 977 * @file: The file, used primarily by network filesystems for authentication. 978 * May be NULL if invoked internally by the filesystem. 979 * @mapping: Readahead this filesystem object. 980 * @ra: File readahead state. May be NULL. 981 */ 982 struct readahead_control { 983 struct file *file; 984 struct address_space *mapping; 985 struct file_ra_state *ra; 986 /* private: use the readahead_* accessors instead */ 987 pgoff_t _index; 988 unsigned int _nr_pages; 989 unsigned int _batch_count; 990 }; 991 992 #define DEFINE_READAHEAD(ractl, f, r, m, i) \ 993 struct readahead_control ractl = { \ 994 .file = f, \ 995 .mapping = m, \ 996 .ra = r, \ 997 ._index = i, \ 998 } 999 1000 #define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE) 1001 1002 void page_cache_ra_unbounded(struct readahead_control *, 1003 unsigned long nr_to_read, unsigned long lookahead_count); 1004 void page_cache_sync_ra(struct readahead_control *, unsigned long req_count); 1005 void page_cache_async_ra(struct readahead_control *, struct page *, 1006 unsigned long req_count); 1007 void readahead_expand(struct readahead_control *ractl, 1008 loff_t new_start, size_t new_len); 1009 1010 /** 1011 * page_cache_sync_readahead - generic file readahead 1012 * @mapping: address_space which holds the pagecache and I/O vectors 1013 * @ra: file_ra_state which holds the readahead state 1014 * @file: Used by the filesystem for authentication. 1015 * @index: Index of first page to be read. 1016 * @req_count: Total number of pages being read by the caller. 1017 * 1018 * page_cache_sync_readahead() should be called when a cache miss happened: 1019 * it will submit the read. The readahead logic may decide to piggyback more 1020 * pages onto the read request if access patterns suggest it will improve 1021 * performance. 1022 */ 1023 static inline 1024 void page_cache_sync_readahead(struct address_space *mapping, 1025 struct file_ra_state *ra, struct file *file, pgoff_t index, 1026 unsigned long req_count) 1027 { 1028 DEFINE_READAHEAD(ractl, file, ra, mapping, index); 1029 page_cache_sync_ra(&ractl, req_count); 1030 } 1031 1032 /** 1033 * page_cache_async_readahead - file readahead for marked pages 1034 * @mapping: address_space which holds the pagecache and I/O vectors 1035 * @ra: file_ra_state which holds the readahead state 1036 * @file: Used by the filesystem for authentication. 1037 * @page: The page at @index which triggered the readahead call. 1038 * @index: Index of first page to be read. 1039 * @req_count: Total number of pages being read by the caller. 1040 * 1041 * page_cache_async_readahead() should be called when a page is used which 1042 * is marked as PageReadahead; this is a marker to suggest that the application 1043 * has used up enough of the readahead window that we should start pulling in 1044 * more pages. 1045 */ 1046 static inline 1047 void page_cache_async_readahead(struct address_space *mapping, 1048 struct file_ra_state *ra, struct file *file, 1049 struct page *page, pgoff_t index, unsigned long req_count) 1050 { 1051 DEFINE_READAHEAD(ractl, file, ra, mapping, index); 1052 page_cache_async_ra(&ractl, page, req_count); 1053 } 1054 1055 static inline struct folio *__readahead_folio(struct readahead_control *ractl) 1056 { 1057 struct folio *folio; 1058 1059 BUG_ON(ractl->_batch_count > ractl->_nr_pages); 1060 ractl->_nr_pages -= ractl->_batch_count; 1061 ractl->_index += ractl->_batch_count; 1062 1063 if (!ractl->_nr_pages) { 1064 ractl->_batch_count = 0; 1065 return NULL; 1066 } 1067 1068 folio = xa_load(&ractl->mapping->i_pages, ractl->_index); 1069 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 1070 ractl->_batch_count = folio_nr_pages(folio); 1071 1072 return folio; 1073 } 1074 1075 /** 1076 * readahead_page - Get the next page to read. 1077 * @ractl: The current readahead request. 1078 * 1079 * Context: The page is locked and has an elevated refcount. The caller 1080 * should decreases the refcount once the page has been submitted for I/O 1081 * and unlock the page once all I/O to that page has completed. 1082 * Return: A pointer to the next page, or %NULL if we are done. 1083 */ 1084 static inline struct page *readahead_page(struct readahead_control *ractl) 1085 { 1086 struct folio *folio = __readahead_folio(ractl); 1087 1088 return &folio->page; 1089 } 1090 1091 /** 1092 * readahead_folio - Get the next folio to read. 1093 * @ractl: The current readahead request. 1094 * 1095 * Context: The folio is locked. The caller should unlock the folio once 1096 * all I/O to that folio has completed. 1097 * Return: A pointer to the next folio, or %NULL if we are done. 1098 */ 1099 static inline struct folio *readahead_folio(struct readahead_control *ractl) 1100 { 1101 struct folio *folio = __readahead_folio(ractl); 1102 1103 if (folio) 1104 folio_put(folio); 1105 return folio; 1106 } 1107 1108 static inline unsigned int __readahead_batch(struct readahead_control *rac, 1109 struct page **array, unsigned int array_sz) 1110 { 1111 unsigned int i = 0; 1112 XA_STATE(xas, &rac->mapping->i_pages, 0); 1113 struct page *page; 1114 1115 BUG_ON(rac->_batch_count > rac->_nr_pages); 1116 rac->_nr_pages -= rac->_batch_count; 1117 rac->_index += rac->_batch_count; 1118 rac->_batch_count = 0; 1119 1120 xas_set(&xas, rac->_index); 1121 rcu_read_lock(); 1122 xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) { 1123 if (xas_retry(&xas, page)) 1124 continue; 1125 VM_BUG_ON_PAGE(!PageLocked(page), page); 1126 VM_BUG_ON_PAGE(PageTail(page), page); 1127 array[i++] = page; 1128 rac->_batch_count += thp_nr_pages(page); 1129 1130 /* 1131 * The page cache isn't using multi-index entries yet, 1132 * so the xas cursor needs to be manually moved to the 1133 * next index. This can be removed once the page cache 1134 * is converted. 1135 */ 1136 if (PageHead(page)) 1137 xas_set(&xas, rac->_index + rac->_batch_count); 1138 1139 if (i == array_sz) 1140 break; 1141 } 1142 rcu_read_unlock(); 1143 1144 return i; 1145 } 1146 1147 /** 1148 * readahead_page_batch - Get a batch of pages to read. 1149 * @rac: The current readahead request. 1150 * @array: An array of pointers to struct page. 1151 * 1152 * Context: The pages are locked and have an elevated refcount. The caller 1153 * should decreases the refcount once the page has been submitted for I/O 1154 * and unlock the page once all I/O to that page has completed. 1155 * Return: The number of pages placed in the array. 0 indicates the request 1156 * is complete. 1157 */ 1158 #define readahead_page_batch(rac, array) \ 1159 __readahead_batch(rac, array, ARRAY_SIZE(array)) 1160 1161 /** 1162 * readahead_pos - The byte offset into the file of this readahead request. 1163 * @rac: The readahead request. 1164 */ 1165 static inline loff_t readahead_pos(struct readahead_control *rac) 1166 { 1167 return (loff_t)rac->_index * PAGE_SIZE; 1168 } 1169 1170 /** 1171 * readahead_length - The number of bytes in this readahead request. 1172 * @rac: The readahead request. 1173 */ 1174 static inline size_t readahead_length(struct readahead_control *rac) 1175 { 1176 return rac->_nr_pages * PAGE_SIZE; 1177 } 1178 1179 /** 1180 * readahead_index - The index of the first page in this readahead request. 1181 * @rac: The readahead request. 1182 */ 1183 static inline pgoff_t readahead_index(struct readahead_control *rac) 1184 { 1185 return rac->_index; 1186 } 1187 1188 /** 1189 * readahead_count - The number of pages in this readahead request. 1190 * @rac: The readahead request. 1191 */ 1192 static inline unsigned int readahead_count(struct readahead_control *rac) 1193 { 1194 return rac->_nr_pages; 1195 } 1196 1197 /** 1198 * readahead_batch_length - The number of bytes in the current batch. 1199 * @rac: The readahead request. 1200 */ 1201 static inline size_t readahead_batch_length(struct readahead_control *rac) 1202 { 1203 return rac->_batch_count * PAGE_SIZE; 1204 } 1205 1206 static inline unsigned long dir_pages(struct inode *inode) 1207 { 1208 return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >> 1209 PAGE_SHIFT; 1210 } 1211 1212 /** 1213 * folio_mkwrite_check_truncate - check if folio was truncated 1214 * @folio: the folio to check 1215 * @inode: the inode to check the folio against 1216 * 1217 * Return: the number of bytes in the folio up to EOF, 1218 * or -EFAULT if the folio was truncated. 1219 */ 1220 static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio, 1221 struct inode *inode) 1222 { 1223 loff_t size = i_size_read(inode); 1224 pgoff_t index = size >> PAGE_SHIFT; 1225 size_t offset = offset_in_folio(folio, size); 1226 1227 if (!folio->mapping) 1228 return -EFAULT; 1229 1230 /* folio is wholly inside EOF */ 1231 if (folio_next_index(folio) - 1 < index) 1232 return folio_size(folio); 1233 /* folio is wholly past EOF */ 1234 if (folio->index > index || !offset) 1235 return -EFAULT; 1236 /* folio is partially inside EOF */ 1237 return offset; 1238 } 1239 1240 /** 1241 * page_mkwrite_check_truncate - check if page was truncated 1242 * @page: the page to check 1243 * @inode: the inode to check the page against 1244 * 1245 * Returns the number of bytes in the page up to EOF, 1246 * or -EFAULT if the page was truncated. 1247 */ 1248 static inline int page_mkwrite_check_truncate(struct page *page, 1249 struct inode *inode) 1250 { 1251 loff_t size = i_size_read(inode); 1252 pgoff_t index = size >> PAGE_SHIFT; 1253 int offset = offset_in_page(size); 1254 1255 if (page->mapping != inode->i_mapping) 1256 return -EFAULT; 1257 1258 /* page is wholly inside EOF */ 1259 if (page->index < index) 1260 return PAGE_SIZE; 1261 /* page is wholly past EOF */ 1262 if (page->index > index || !offset) 1263 return -EFAULT; 1264 /* page is partially inside EOF */ 1265 return offset; 1266 } 1267 1268 /** 1269 * i_blocks_per_folio - How many blocks fit in this folio. 1270 * @inode: The inode which contains the blocks. 1271 * @folio: The folio. 1272 * 1273 * If the block size is larger than the size of this folio, return zero. 1274 * 1275 * Context: The caller should hold a refcount on the folio to prevent it 1276 * from being split. 1277 * Return: The number of filesystem blocks covered by this folio. 1278 */ 1279 static inline 1280 unsigned int i_blocks_per_folio(struct inode *inode, struct folio *folio) 1281 { 1282 return folio_size(folio) >> inode->i_blkbits; 1283 } 1284 1285 static inline 1286 unsigned int i_blocks_per_page(struct inode *inode, struct page *page) 1287 { 1288 return i_blocks_per_folio(inode, page_folio(page)); 1289 } 1290 #endif /* _LINUX_PAGEMAP_H */ 1291