1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_PAGEMAP_H 3 #define _LINUX_PAGEMAP_H 4 5 /* 6 * Copyright 1995 Linus Torvalds 7 */ 8 #include <linux/mm.h> 9 #include <linux/fs.h> 10 #include <linux/list.h> 11 #include <linux/highmem.h> 12 #include <linux/compiler.h> 13 #include <linux/uaccess.h> 14 #include <linux/gfp.h> 15 #include <linux/bitops.h> 16 #include <linux/hardirq.h> /* for in_interrupt() */ 17 #include <linux/hugetlb_inline.h> 18 19 struct folio_batch; 20 21 unsigned long invalidate_mapping_pages(struct address_space *mapping, 22 pgoff_t start, pgoff_t end); 23 24 static inline void invalidate_remote_inode(struct inode *inode) 25 { 26 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 27 S_ISLNK(inode->i_mode)) 28 invalidate_mapping_pages(inode->i_mapping, 0, -1); 29 } 30 int invalidate_inode_pages2(struct address_space *mapping); 31 int invalidate_inode_pages2_range(struct address_space *mapping, 32 pgoff_t start, pgoff_t end); 33 int kiocb_invalidate_pages(struct kiocb *iocb, size_t count); 34 void kiocb_invalidate_post_direct_write(struct kiocb *iocb, size_t count); 35 36 int write_inode_now(struct inode *, int sync); 37 int filemap_fdatawrite(struct address_space *); 38 int filemap_flush(struct address_space *); 39 int filemap_fdatawait_keep_errors(struct address_space *mapping); 40 int filemap_fdatawait_range(struct address_space *, loff_t lstart, loff_t lend); 41 int filemap_fdatawait_range_keep_errors(struct address_space *mapping, 42 loff_t start_byte, loff_t end_byte); 43 int filemap_invalidate_inode(struct inode *inode, bool flush, 44 loff_t start, loff_t end); 45 46 static inline int filemap_fdatawait(struct address_space *mapping) 47 { 48 return filemap_fdatawait_range(mapping, 0, LLONG_MAX); 49 } 50 51 bool filemap_range_has_page(struct address_space *, loff_t lstart, loff_t lend); 52 int filemap_write_and_wait_range(struct address_space *mapping, 53 loff_t lstart, loff_t lend); 54 int __filemap_fdatawrite_range(struct address_space *mapping, 55 loff_t start, loff_t end, int sync_mode); 56 int filemap_fdatawrite_range(struct address_space *mapping, 57 loff_t start, loff_t end); 58 int filemap_check_errors(struct address_space *mapping); 59 void __filemap_set_wb_err(struct address_space *mapping, int err); 60 int filemap_fdatawrite_wbc(struct address_space *mapping, 61 struct writeback_control *wbc); 62 int kiocb_write_and_wait(struct kiocb *iocb, size_t count); 63 64 static inline int filemap_write_and_wait(struct address_space *mapping) 65 { 66 return filemap_write_and_wait_range(mapping, 0, LLONG_MAX); 67 } 68 69 /** 70 * filemap_set_wb_err - set a writeback error on an address_space 71 * @mapping: mapping in which to set writeback error 72 * @err: error to be set in mapping 73 * 74 * When writeback fails in some way, we must record that error so that 75 * userspace can be informed when fsync and the like are called. We endeavor 76 * to report errors on any file that was open at the time of the error. Some 77 * internal callers also need to know when writeback errors have occurred. 78 * 79 * When a writeback error occurs, most filesystems will want to call 80 * filemap_set_wb_err to record the error in the mapping so that it will be 81 * automatically reported whenever fsync is called on the file. 82 */ 83 static inline void filemap_set_wb_err(struct address_space *mapping, int err) 84 { 85 /* Fastpath for common case of no error */ 86 if (unlikely(err)) 87 __filemap_set_wb_err(mapping, err); 88 } 89 90 /** 91 * filemap_check_wb_err - has an error occurred since the mark was sampled? 92 * @mapping: mapping to check for writeback errors 93 * @since: previously-sampled errseq_t 94 * 95 * Grab the errseq_t value from the mapping, and see if it has changed "since" 96 * the given value was sampled. 97 * 98 * If it has then report the latest error set, otherwise return 0. 99 */ 100 static inline int filemap_check_wb_err(struct address_space *mapping, 101 errseq_t since) 102 { 103 return errseq_check(&mapping->wb_err, since); 104 } 105 106 /** 107 * filemap_sample_wb_err - sample the current errseq_t to test for later errors 108 * @mapping: mapping to be sampled 109 * 110 * Writeback errors are always reported relative to a particular sample point 111 * in the past. This function provides those sample points. 112 */ 113 static inline errseq_t filemap_sample_wb_err(struct address_space *mapping) 114 { 115 return errseq_sample(&mapping->wb_err); 116 } 117 118 /** 119 * file_sample_sb_err - sample the current errseq_t to test for later errors 120 * @file: file pointer to be sampled 121 * 122 * Grab the most current superblock-level errseq_t value for the given 123 * struct file. 124 */ 125 static inline errseq_t file_sample_sb_err(struct file *file) 126 { 127 return errseq_sample(&file->f_path.dentry->d_sb->s_wb_err); 128 } 129 130 /* 131 * Flush file data before changing attributes. Caller must hold any locks 132 * required to prevent further writes to this file until we're done setting 133 * flags. 134 */ 135 static inline int inode_drain_writes(struct inode *inode) 136 { 137 inode_dio_wait(inode); 138 return filemap_write_and_wait(inode->i_mapping); 139 } 140 141 static inline bool mapping_empty(struct address_space *mapping) 142 { 143 return xa_empty(&mapping->i_pages); 144 } 145 146 /* 147 * mapping_shrinkable - test if page cache state allows inode reclaim 148 * @mapping: the page cache mapping 149 * 150 * This checks the mapping's cache state for the pupose of inode 151 * reclaim and LRU management. 152 * 153 * The caller is expected to hold the i_lock, but is not required to 154 * hold the i_pages lock, which usually protects cache state. That's 155 * because the i_lock and the list_lru lock that protect the inode and 156 * its LRU state don't nest inside the irq-safe i_pages lock. 157 * 158 * Cache deletions are performed under the i_lock, which ensures that 159 * when an inode goes empty, it will reliably get queued on the LRU. 160 * 161 * Cache additions do not acquire the i_lock and may race with this 162 * check, in which case we'll report the inode as shrinkable when it 163 * has cache pages. This is okay: the shrinker also checks the 164 * refcount and the referenced bit, which will be elevated or set in 165 * the process of adding new cache pages to an inode. 166 */ 167 static inline bool mapping_shrinkable(struct address_space *mapping) 168 { 169 void *head; 170 171 /* 172 * On highmem systems, there could be lowmem pressure from the 173 * inodes before there is highmem pressure from the page 174 * cache. Make inodes shrinkable regardless of cache state. 175 */ 176 if (IS_ENABLED(CONFIG_HIGHMEM)) 177 return true; 178 179 /* Cache completely empty? Shrink away. */ 180 head = rcu_access_pointer(mapping->i_pages.xa_head); 181 if (!head) 182 return true; 183 184 /* 185 * The xarray stores single offset-0 entries directly in the 186 * head pointer, which allows non-resident page cache entries 187 * to escape the shadow shrinker's list of xarray nodes. The 188 * inode shrinker needs to pick them up under memory pressure. 189 */ 190 if (!xa_is_node(head) && xa_is_value(head)) 191 return true; 192 193 return false; 194 } 195 196 /* 197 * Bits in mapping->flags. 198 */ 199 enum mapping_flags { 200 AS_EIO = 0, /* IO error on async write */ 201 AS_ENOSPC = 1, /* ENOSPC on async write */ 202 AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */ 203 AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */ 204 AS_EXITING = 4, /* final truncate in progress */ 205 /* writeback related tags are not used */ 206 AS_NO_WRITEBACK_TAGS = 5, 207 AS_LARGE_FOLIO_SUPPORT = 6, 208 AS_RELEASE_ALWAYS, /* Call ->release_folio(), even if no private data */ 209 AS_STABLE_WRITES, /* must wait for writeback before modifying 210 folio contents */ 211 AS_UNMOVABLE, /* The mapping cannot be moved, ever */ 212 }; 213 214 /** 215 * mapping_set_error - record a writeback error in the address_space 216 * @mapping: the mapping in which an error should be set 217 * @error: the error to set in the mapping 218 * 219 * When writeback fails in some way, we must record that error so that 220 * userspace can be informed when fsync and the like are called. We endeavor 221 * to report errors on any file that was open at the time of the error. Some 222 * internal callers also need to know when writeback errors have occurred. 223 * 224 * When a writeback error occurs, most filesystems will want to call 225 * mapping_set_error to record the error in the mapping so that it can be 226 * reported when the application calls fsync(2). 227 */ 228 static inline void mapping_set_error(struct address_space *mapping, int error) 229 { 230 if (likely(!error)) 231 return; 232 233 /* Record in wb_err for checkers using errseq_t based tracking */ 234 __filemap_set_wb_err(mapping, error); 235 236 /* Record it in superblock */ 237 if (mapping->host) 238 errseq_set(&mapping->host->i_sb->s_wb_err, error); 239 240 /* Record it in flags for now, for legacy callers */ 241 if (error == -ENOSPC) 242 set_bit(AS_ENOSPC, &mapping->flags); 243 else 244 set_bit(AS_EIO, &mapping->flags); 245 } 246 247 static inline void mapping_set_unevictable(struct address_space *mapping) 248 { 249 set_bit(AS_UNEVICTABLE, &mapping->flags); 250 } 251 252 static inline void mapping_clear_unevictable(struct address_space *mapping) 253 { 254 clear_bit(AS_UNEVICTABLE, &mapping->flags); 255 } 256 257 static inline bool mapping_unevictable(struct address_space *mapping) 258 { 259 return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags); 260 } 261 262 static inline void mapping_set_exiting(struct address_space *mapping) 263 { 264 set_bit(AS_EXITING, &mapping->flags); 265 } 266 267 static inline int mapping_exiting(struct address_space *mapping) 268 { 269 return test_bit(AS_EXITING, &mapping->flags); 270 } 271 272 static inline void mapping_set_no_writeback_tags(struct address_space *mapping) 273 { 274 set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); 275 } 276 277 static inline int mapping_use_writeback_tags(struct address_space *mapping) 278 { 279 return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); 280 } 281 282 static inline bool mapping_release_always(const struct address_space *mapping) 283 { 284 return test_bit(AS_RELEASE_ALWAYS, &mapping->flags); 285 } 286 287 static inline void mapping_set_release_always(struct address_space *mapping) 288 { 289 set_bit(AS_RELEASE_ALWAYS, &mapping->flags); 290 } 291 292 static inline void mapping_clear_release_always(struct address_space *mapping) 293 { 294 clear_bit(AS_RELEASE_ALWAYS, &mapping->flags); 295 } 296 297 static inline bool mapping_stable_writes(const struct address_space *mapping) 298 { 299 return test_bit(AS_STABLE_WRITES, &mapping->flags); 300 } 301 302 static inline void mapping_set_stable_writes(struct address_space *mapping) 303 { 304 set_bit(AS_STABLE_WRITES, &mapping->flags); 305 } 306 307 static inline void mapping_clear_stable_writes(struct address_space *mapping) 308 { 309 clear_bit(AS_STABLE_WRITES, &mapping->flags); 310 } 311 312 static inline void mapping_set_unmovable(struct address_space *mapping) 313 { 314 /* 315 * It's expected unmovable mappings are also unevictable. Compaction 316 * migrate scanner (isolate_migratepages_block()) relies on this to 317 * reduce page locking. 318 */ 319 set_bit(AS_UNEVICTABLE, &mapping->flags); 320 set_bit(AS_UNMOVABLE, &mapping->flags); 321 } 322 323 static inline bool mapping_unmovable(struct address_space *mapping) 324 { 325 return test_bit(AS_UNMOVABLE, &mapping->flags); 326 } 327 328 static inline gfp_t mapping_gfp_mask(struct address_space * mapping) 329 { 330 return mapping->gfp_mask; 331 } 332 333 /* Restricts the given gfp_mask to what the mapping allows. */ 334 static inline gfp_t mapping_gfp_constraint(struct address_space *mapping, 335 gfp_t gfp_mask) 336 { 337 return mapping_gfp_mask(mapping) & gfp_mask; 338 } 339 340 /* 341 * This is non-atomic. Only to be used before the mapping is activated. 342 * Probably needs a barrier... 343 */ 344 static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) 345 { 346 m->gfp_mask = mask; 347 } 348 349 /* 350 * There are some parts of the kernel which assume that PMD entries 351 * are exactly HPAGE_PMD_ORDER. Those should be fixed, but until then, 352 * limit the maximum allocation order to PMD size. I'm not aware of any 353 * assumptions about maximum order if THP are disabled, but 8 seems like 354 * a good order (that's 1MB if you're using 4kB pages) 355 */ 356 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 357 #define MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER 358 #else 359 #define MAX_PAGECACHE_ORDER 8 360 #endif 361 362 /** 363 * mapping_set_large_folios() - Indicate the file supports large folios. 364 * @mapping: The file. 365 * 366 * The filesystem should call this function in its inode constructor to 367 * indicate that the VFS can use large folios to cache the contents of 368 * the file. 369 * 370 * Context: This should not be called while the inode is active as it 371 * is non-atomic. 372 */ 373 static inline void mapping_set_large_folios(struct address_space *mapping) 374 { 375 __set_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags); 376 } 377 378 /* 379 * Large folio support currently depends on THP. These dependencies are 380 * being worked on but are not yet fixed. 381 */ 382 static inline bool mapping_large_folio_support(struct address_space *mapping) 383 { 384 /* AS_LARGE_FOLIO_SUPPORT is only reasonable for pagecache folios */ 385 VM_WARN_ONCE((unsigned long)mapping & PAGE_MAPPING_ANON, 386 "Anonymous mapping always supports large folio"); 387 388 return IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 389 test_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags); 390 } 391 392 /* Return the maximum folio size for this pagecache mapping, in bytes. */ 393 static inline size_t mapping_max_folio_size(struct address_space *mapping) 394 { 395 if (mapping_large_folio_support(mapping)) 396 return PAGE_SIZE << MAX_PAGECACHE_ORDER; 397 return PAGE_SIZE; 398 } 399 400 static inline int filemap_nr_thps(struct address_space *mapping) 401 { 402 #ifdef CONFIG_READ_ONLY_THP_FOR_FS 403 return atomic_read(&mapping->nr_thps); 404 #else 405 return 0; 406 #endif 407 } 408 409 static inline void filemap_nr_thps_inc(struct address_space *mapping) 410 { 411 #ifdef CONFIG_READ_ONLY_THP_FOR_FS 412 if (!mapping_large_folio_support(mapping)) 413 atomic_inc(&mapping->nr_thps); 414 #else 415 WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0); 416 #endif 417 } 418 419 static inline void filemap_nr_thps_dec(struct address_space *mapping) 420 { 421 #ifdef CONFIG_READ_ONLY_THP_FOR_FS 422 if (!mapping_large_folio_support(mapping)) 423 atomic_dec(&mapping->nr_thps); 424 #else 425 WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0); 426 #endif 427 } 428 429 struct address_space *folio_mapping(struct folio *); 430 struct address_space *swapcache_mapping(struct folio *); 431 432 /** 433 * folio_file_mapping - Find the mapping this folio belongs to. 434 * @folio: The folio. 435 * 436 * For folios which are in the page cache, return the mapping that this 437 * page belongs to. Folios in the swap cache return the mapping of the 438 * swap file or swap device where the data is stored. This is different 439 * from the mapping returned by folio_mapping(). The only reason to 440 * use it is if, like NFS, you return 0 from ->activate_swapfile. 441 * 442 * Do not call this for folios which aren't in the page cache or swap cache. 443 */ 444 static inline struct address_space *folio_file_mapping(struct folio *folio) 445 { 446 if (unlikely(folio_test_swapcache(folio))) 447 return swapcache_mapping(folio); 448 449 return folio->mapping; 450 } 451 452 /** 453 * folio_flush_mapping - Find the file mapping this folio belongs to. 454 * @folio: The folio. 455 * 456 * For folios which are in the page cache, return the mapping that this 457 * page belongs to. Anonymous folios return NULL, even if they're in 458 * the swap cache. Other kinds of folio also return NULL. 459 * 460 * This is ONLY used by architecture cache flushing code. If you aren't 461 * writing cache flushing code, you want either folio_mapping() or 462 * folio_file_mapping(). 463 */ 464 static inline struct address_space *folio_flush_mapping(struct folio *folio) 465 { 466 if (unlikely(folio_test_swapcache(folio))) 467 return NULL; 468 469 return folio_mapping(folio); 470 } 471 472 static inline struct address_space *page_file_mapping(struct page *page) 473 { 474 return folio_file_mapping(page_folio(page)); 475 } 476 477 /** 478 * folio_inode - Get the host inode for this folio. 479 * @folio: The folio. 480 * 481 * For folios which are in the page cache, return the inode that this folio 482 * belongs to. 483 * 484 * Do not call this for folios which aren't in the page cache. 485 */ 486 static inline struct inode *folio_inode(struct folio *folio) 487 { 488 return folio->mapping->host; 489 } 490 491 /** 492 * folio_attach_private - Attach private data to a folio. 493 * @folio: Folio to attach data to. 494 * @data: Data to attach to folio. 495 * 496 * Attaching private data to a folio increments the page's reference count. 497 * The data must be detached before the folio will be freed. 498 */ 499 static inline void folio_attach_private(struct folio *folio, void *data) 500 { 501 folio_get(folio); 502 folio->private = data; 503 folio_set_private(folio); 504 } 505 506 /** 507 * folio_change_private - Change private data on a folio. 508 * @folio: Folio to change the data on. 509 * @data: Data to set on the folio. 510 * 511 * Change the private data attached to a folio and return the old 512 * data. The page must previously have had data attached and the data 513 * must be detached before the folio will be freed. 514 * 515 * Return: Data that was previously attached to the folio. 516 */ 517 static inline void *folio_change_private(struct folio *folio, void *data) 518 { 519 void *old = folio_get_private(folio); 520 521 folio->private = data; 522 return old; 523 } 524 525 /** 526 * folio_detach_private - Detach private data from a folio. 527 * @folio: Folio to detach data from. 528 * 529 * Removes the data that was previously attached to the folio and decrements 530 * the refcount on the page. 531 * 532 * Return: Data that was attached to the folio. 533 */ 534 static inline void *folio_detach_private(struct folio *folio) 535 { 536 void *data = folio_get_private(folio); 537 538 if (!folio_test_private(folio)) 539 return NULL; 540 folio_clear_private(folio); 541 folio->private = NULL; 542 folio_put(folio); 543 544 return data; 545 } 546 547 static inline void attach_page_private(struct page *page, void *data) 548 { 549 folio_attach_private(page_folio(page), data); 550 } 551 552 static inline void *detach_page_private(struct page *page) 553 { 554 return folio_detach_private(page_folio(page)); 555 } 556 557 #ifdef CONFIG_NUMA 558 struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order); 559 #else 560 static inline struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order) 561 { 562 return folio_alloc_noprof(gfp, order); 563 } 564 #endif 565 566 #define filemap_alloc_folio(...) \ 567 alloc_hooks(filemap_alloc_folio_noprof(__VA_ARGS__)) 568 569 static inline struct page *__page_cache_alloc(gfp_t gfp) 570 { 571 return &filemap_alloc_folio(gfp, 0)->page; 572 } 573 574 static inline gfp_t readahead_gfp_mask(struct address_space *x) 575 { 576 return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN; 577 } 578 579 typedef int filler_t(struct file *, struct folio *); 580 581 pgoff_t page_cache_next_miss(struct address_space *mapping, 582 pgoff_t index, unsigned long max_scan); 583 pgoff_t page_cache_prev_miss(struct address_space *mapping, 584 pgoff_t index, unsigned long max_scan); 585 586 /** 587 * typedef fgf_t - Flags for getting folios from the page cache. 588 * 589 * Most users of the page cache will not need to use these flags; 590 * there are convenience functions such as filemap_get_folio() and 591 * filemap_lock_folio(). For users which need more control over exactly 592 * what is done with the folios, these flags to __filemap_get_folio() 593 * are available. 594 * 595 * * %FGP_ACCESSED - The folio will be marked accessed. 596 * * %FGP_LOCK - The folio is returned locked. 597 * * %FGP_CREAT - If no folio is present then a new folio is allocated, 598 * added to the page cache and the VM's LRU list. The folio is 599 * returned locked. 600 * * %FGP_FOR_MMAP - The caller wants to do its own locking dance if the 601 * folio is already in cache. If the folio was allocated, unlock it 602 * before returning so the caller can do the same dance. 603 * * %FGP_WRITE - The folio will be written to by the caller. 604 * * %FGP_NOFS - __GFP_FS will get cleared in gfp. 605 * * %FGP_NOWAIT - Don't block on the folio lock. 606 * * %FGP_STABLE - Wait for the folio to be stable (finished writeback) 607 * * %FGP_WRITEBEGIN - The flags to use in a filesystem write_begin() 608 * implementation. 609 */ 610 typedef unsigned int __bitwise fgf_t; 611 612 #define FGP_ACCESSED ((__force fgf_t)0x00000001) 613 #define FGP_LOCK ((__force fgf_t)0x00000002) 614 #define FGP_CREAT ((__force fgf_t)0x00000004) 615 #define FGP_WRITE ((__force fgf_t)0x00000008) 616 #define FGP_NOFS ((__force fgf_t)0x00000010) 617 #define FGP_NOWAIT ((__force fgf_t)0x00000020) 618 #define FGP_FOR_MMAP ((__force fgf_t)0x00000040) 619 #define FGP_STABLE ((__force fgf_t)0x00000080) 620 #define FGF_GET_ORDER(fgf) (((__force unsigned)fgf) >> 26) /* top 6 bits */ 621 622 #define FGP_WRITEBEGIN (FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE) 623 624 /** 625 * fgf_set_order - Encode a length in the fgf_t flags. 626 * @size: The suggested size of the folio to create. 627 * 628 * The caller of __filemap_get_folio() can use this to suggest a preferred 629 * size for the folio that is created. If there is already a folio at 630 * the index, it will be returned, no matter what its size. If a folio 631 * is freshly created, it may be of a different size than requested 632 * due to alignment constraints, memory pressure, or the presence of 633 * other folios at nearby indices. 634 */ 635 static inline fgf_t fgf_set_order(size_t size) 636 { 637 unsigned int shift = ilog2(size); 638 639 if (shift <= PAGE_SHIFT) 640 return 0; 641 return (__force fgf_t)((shift - PAGE_SHIFT) << 26); 642 } 643 644 void *filemap_get_entry(struct address_space *mapping, pgoff_t index); 645 struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, 646 fgf_t fgp_flags, gfp_t gfp); 647 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index, 648 fgf_t fgp_flags, gfp_t gfp); 649 650 /** 651 * filemap_get_folio - Find and get a folio. 652 * @mapping: The address_space to search. 653 * @index: The page index. 654 * 655 * Looks up the page cache entry at @mapping & @index. If a folio is 656 * present, it is returned with an increased refcount. 657 * 658 * Return: A folio or ERR_PTR(-ENOENT) if there is no folio in the cache for 659 * this index. Will not return a shadow, swap or DAX entry. 660 */ 661 static inline struct folio *filemap_get_folio(struct address_space *mapping, 662 pgoff_t index) 663 { 664 return __filemap_get_folio(mapping, index, 0, 0); 665 } 666 667 /** 668 * filemap_lock_folio - Find and lock a folio. 669 * @mapping: The address_space to search. 670 * @index: The page index. 671 * 672 * Looks up the page cache entry at @mapping & @index. If a folio is 673 * present, it is returned locked with an increased refcount. 674 * 675 * Context: May sleep. 676 * Return: A folio or ERR_PTR(-ENOENT) if there is no folio in the cache for 677 * this index. Will not return a shadow, swap or DAX entry. 678 */ 679 static inline struct folio *filemap_lock_folio(struct address_space *mapping, 680 pgoff_t index) 681 { 682 return __filemap_get_folio(mapping, index, FGP_LOCK, 0); 683 } 684 685 /** 686 * filemap_grab_folio - grab a folio from the page cache 687 * @mapping: The address space to search 688 * @index: The page index 689 * 690 * Looks up the page cache entry at @mapping & @index. If no folio is found, 691 * a new folio is created. The folio is locked, marked as accessed, and 692 * returned. 693 * 694 * Return: A found or created folio. ERR_PTR(-ENOMEM) if no folio is found 695 * and failed to create a folio. 696 */ 697 static inline struct folio *filemap_grab_folio(struct address_space *mapping, 698 pgoff_t index) 699 { 700 return __filemap_get_folio(mapping, index, 701 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, 702 mapping_gfp_mask(mapping)); 703 } 704 705 /** 706 * find_get_page - find and get a page reference 707 * @mapping: the address_space to search 708 * @offset: the page index 709 * 710 * Looks up the page cache slot at @mapping & @offset. If there is a 711 * page cache page, it is returned with an increased refcount. 712 * 713 * Otherwise, %NULL is returned. 714 */ 715 static inline struct page *find_get_page(struct address_space *mapping, 716 pgoff_t offset) 717 { 718 return pagecache_get_page(mapping, offset, 0, 0); 719 } 720 721 static inline struct page *find_get_page_flags(struct address_space *mapping, 722 pgoff_t offset, fgf_t fgp_flags) 723 { 724 return pagecache_get_page(mapping, offset, fgp_flags, 0); 725 } 726 727 /** 728 * find_lock_page - locate, pin and lock a pagecache page 729 * @mapping: the address_space to search 730 * @index: the page index 731 * 732 * Looks up the page cache entry at @mapping & @index. If there is a 733 * page cache page, it is returned locked and with an increased 734 * refcount. 735 * 736 * Context: May sleep. 737 * Return: A struct page or %NULL if there is no page in the cache for this 738 * index. 739 */ 740 static inline struct page *find_lock_page(struct address_space *mapping, 741 pgoff_t index) 742 { 743 return pagecache_get_page(mapping, index, FGP_LOCK, 0); 744 } 745 746 /** 747 * find_or_create_page - locate or add a pagecache page 748 * @mapping: the page's address_space 749 * @index: the page's index into the mapping 750 * @gfp_mask: page allocation mode 751 * 752 * Looks up the page cache slot at @mapping & @offset. If there is a 753 * page cache page, it is returned locked and with an increased 754 * refcount. 755 * 756 * If the page is not present, a new page is allocated using @gfp_mask 757 * and added to the page cache and the VM's LRU list. The page is 758 * returned locked and with an increased refcount. 759 * 760 * On memory exhaustion, %NULL is returned. 761 * 762 * find_or_create_page() may sleep, even if @gfp_flags specifies an 763 * atomic allocation! 764 */ 765 static inline struct page *find_or_create_page(struct address_space *mapping, 766 pgoff_t index, gfp_t gfp_mask) 767 { 768 return pagecache_get_page(mapping, index, 769 FGP_LOCK|FGP_ACCESSED|FGP_CREAT, 770 gfp_mask); 771 } 772 773 /** 774 * grab_cache_page_nowait - returns locked page at given index in given cache 775 * @mapping: target address_space 776 * @index: the page index 777 * 778 * Same as grab_cache_page(), but do not wait if the page is unavailable. 779 * This is intended for speculative data generators, where the data can 780 * be regenerated if the page couldn't be grabbed. This routine should 781 * be safe to call while holding the lock for another page. 782 * 783 * Clear __GFP_FS when allocating the page to avoid recursion into the fs 784 * and deadlock against the caller's locked page. 785 */ 786 static inline struct page *grab_cache_page_nowait(struct address_space *mapping, 787 pgoff_t index) 788 { 789 return pagecache_get_page(mapping, index, 790 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, 791 mapping_gfp_mask(mapping)); 792 } 793 794 extern pgoff_t __folio_swap_cache_index(struct folio *folio); 795 796 /** 797 * folio_index - File index of a folio. 798 * @folio: The folio. 799 * 800 * For a folio which is either in the page cache or the swap cache, 801 * return its index within the address_space it belongs to. If you know 802 * the page is definitely in the page cache, you can look at the folio's 803 * index directly. 804 * 805 * Return: The index (offset in units of pages) of a folio in its file. 806 */ 807 static inline pgoff_t folio_index(struct folio *folio) 808 { 809 if (unlikely(folio_test_swapcache(folio))) 810 return __folio_swap_cache_index(folio); 811 return folio->index; 812 } 813 814 /** 815 * folio_next_index - Get the index of the next folio. 816 * @folio: The current folio. 817 * 818 * Return: The index of the folio which follows this folio in the file. 819 */ 820 static inline pgoff_t folio_next_index(struct folio *folio) 821 { 822 return folio->index + folio_nr_pages(folio); 823 } 824 825 /** 826 * folio_file_page - The page for a particular index. 827 * @folio: The folio which contains this index. 828 * @index: The index we want to look up. 829 * 830 * Sometimes after looking up a folio in the page cache, we need to 831 * obtain the specific page for an index (eg a page fault). 832 * 833 * Return: The page containing the file data for this index. 834 */ 835 static inline struct page *folio_file_page(struct folio *folio, pgoff_t index) 836 { 837 return folio_page(folio, index & (folio_nr_pages(folio) - 1)); 838 } 839 840 /** 841 * folio_contains - Does this folio contain this index? 842 * @folio: The folio. 843 * @index: The page index within the file. 844 * 845 * Context: The caller should have the page locked in order to prevent 846 * (eg) shmem from moving the page between the page cache and swap cache 847 * and changing its index in the middle of the operation. 848 * Return: true or false. 849 */ 850 static inline bool folio_contains(struct folio *folio, pgoff_t index) 851 { 852 return index - folio_index(folio) < folio_nr_pages(folio); 853 } 854 855 /* 856 * Given the page we found in the page cache, return the page corresponding 857 * to this index in the file 858 */ 859 static inline struct page *find_subpage(struct page *head, pgoff_t index) 860 { 861 /* HugeTLBfs wants the head page regardless */ 862 if (PageHuge(head)) 863 return head; 864 865 return head + (index & (thp_nr_pages(head) - 1)); 866 } 867 868 unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start, 869 pgoff_t end, struct folio_batch *fbatch); 870 unsigned filemap_get_folios_contig(struct address_space *mapping, 871 pgoff_t *start, pgoff_t end, struct folio_batch *fbatch); 872 unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start, 873 pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch); 874 875 struct page *grab_cache_page_write_begin(struct address_space *mapping, 876 pgoff_t index); 877 878 /* 879 * Returns locked page at given index in given cache, creating it if needed. 880 */ 881 static inline struct page *grab_cache_page(struct address_space *mapping, 882 pgoff_t index) 883 { 884 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); 885 } 886 887 struct folio *read_cache_folio(struct address_space *, pgoff_t index, 888 filler_t *filler, struct file *file); 889 struct folio *mapping_read_folio_gfp(struct address_space *, pgoff_t index, 890 gfp_t flags); 891 struct page *read_cache_page(struct address_space *, pgoff_t index, 892 filler_t *filler, struct file *file); 893 extern struct page * read_cache_page_gfp(struct address_space *mapping, 894 pgoff_t index, gfp_t gfp_mask); 895 896 static inline struct page *read_mapping_page(struct address_space *mapping, 897 pgoff_t index, struct file *file) 898 { 899 return read_cache_page(mapping, index, NULL, file); 900 } 901 902 static inline struct folio *read_mapping_folio(struct address_space *mapping, 903 pgoff_t index, struct file *file) 904 { 905 return read_cache_folio(mapping, index, NULL, file); 906 } 907 908 /* 909 * Get the offset in PAGE_SIZE (even for hugetlb pages). 910 */ 911 static inline pgoff_t page_to_pgoff(struct page *page) 912 { 913 struct page *head; 914 915 if (likely(!PageTransTail(page))) 916 return page->index; 917 918 head = compound_head(page); 919 /* 920 * We don't initialize ->index for tail pages: calculate based on 921 * head page 922 */ 923 return head->index + page - head; 924 } 925 926 /* 927 * Return byte-offset into filesystem object for page. 928 */ 929 static inline loff_t page_offset(struct page *page) 930 { 931 return ((loff_t)page->index) << PAGE_SHIFT; 932 } 933 934 /** 935 * folio_pos - Returns the byte position of this folio in its file. 936 * @folio: The folio. 937 */ 938 static inline loff_t folio_pos(struct folio *folio) 939 { 940 return page_offset(&folio->page); 941 } 942 943 /* 944 * Get the offset in PAGE_SIZE (even for hugetlb folios). 945 */ 946 static inline pgoff_t folio_pgoff(struct folio *folio) 947 { 948 return folio->index; 949 } 950 951 static inline pgoff_t linear_page_index(struct vm_area_struct *vma, 952 unsigned long address) 953 { 954 pgoff_t pgoff; 955 pgoff = (address - vma->vm_start) >> PAGE_SHIFT; 956 pgoff += vma->vm_pgoff; 957 return pgoff; 958 } 959 960 struct wait_page_key { 961 struct folio *folio; 962 int bit_nr; 963 int page_match; 964 }; 965 966 struct wait_page_queue { 967 struct folio *folio; 968 int bit_nr; 969 wait_queue_entry_t wait; 970 }; 971 972 static inline bool wake_page_match(struct wait_page_queue *wait_page, 973 struct wait_page_key *key) 974 { 975 if (wait_page->folio != key->folio) 976 return false; 977 key->page_match = 1; 978 979 if (wait_page->bit_nr != key->bit_nr) 980 return false; 981 982 return true; 983 } 984 985 void __folio_lock(struct folio *folio); 986 int __folio_lock_killable(struct folio *folio); 987 vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf); 988 void unlock_page(struct page *page); 989 void folio_unlock(struct folio *folio); 990 991 /** 992 * folio_trylock() - Attempt to lock a folio. 993 * @folio: The folio to attempt to lock. 994 * 995 * Sometimes it is undesirable to wait for a folio to be unlocked (eg 996 * when the locks are being taken in the wrong order, or if making 997 * progress through a batch of folios is more important than processing 998 * them in order). Usually folio_lock() is the correct function to call. 999 * 1000 * Context: Any context. 1001 * Return: Whether the lock was successfully acquired. 1002 */ 1003 static inline bool folio_trylock(struct folio *folio) 1004 { 1005 return likely(!test_and_set_bit_lock(PG_locked, folio_flags(folio, 0))); 1006 } 1007 1008 /* 1009 * Return true if the page was successfully locked 1010 */ 1011 static inline bool trylock_page(struct page *page) 1012 { 1013 return folio_trylock(page_folio(page)); 1014 } 1015 1016 /** 1017 * folio_lock() - Lock this folio. 1018 * @folio: The folio to lock. 1019 * 1020 * The folio lock protects against many things, probably more than it 1021 * should. It is primarily held while a folio is being brought uptodate, 1022 * either from its backing file or from swap. It is also held while a 1023 * folio is being truncated from its address_space, so holding the lock 1024 * is sufficient to keep folio->mapping stable. 1025 * 1026 * The folio lock is also held while write() is modifying the page to 1027 * provide POSIX atomicity guarantees (as long as the write does not 1028 * cross a page boundary). Other modifications to the data in the folio 1029 * do not hold the folio lock and can race with writes, eg DMA and stores 1030 * to mapped pages. 1031 * 1032 * Context: May sleep. If you need to acquire the locks of two or 1033 * more folios, they must be in order of ascending index, if they are 1034 * in the same address_space. If they are in different address_spaces, 1035 * acquire the lock of the folio which belongs to the address_space which 1036 * has the lowest address in memory first. 1037 */ 1038 static inline void folio_lock(struct folio *folio) 1039 { 1040 might_sleep(); 1041 if (!folio_trylock(folio)) 1042 __folio_lock(folio); 1043 } 1044 1045 /** 1046 * lock_page() - Lock the folio containing this page. 1047 * @page: The page to lock. 1048 * 1049 * See folio_lock() for a description of what the lock protects. 1050 * This is a legacy function and new code should probably use folio_lock() 1051 * instead. 1052 * 1053 * Context: May sleep. Pages in the same folio share a lock, so do not 1054 * attempt to lock two pages which share a folio. 1055 */ 1056 static inline void lock_page(struct page *page) 1057 { 1058 struct folio *folio; 1059 might_sleep(); 1060 1061 folio = page_folio(page); 1062 if (!folio_trylock(folio)) 1063 __folio_lock(folio); 1064 } 1065 1066 /** 1067 * folio_lock_killable() - Lock this folio, interruptible by a fatal signal. 1068 * @folio: The folio to lock. 1069 * 1070 * Attempts to lock the folio, like folio_lock(), except that the sleep 1071 * to acquire the lock is interruptible by a fatal signal. 1072 * 1073 * Context: May sleep; see folio_lock(). 1074 * Return: 0 if the lock was acquired; -EINTR if a fatal signal was received. 1075 */ 1076 static inline int folio_lock_killable(struct folio *folio) 1077 { 1078 might_sleep(); 1079 if (!folio_trylock(folio)) 1080 return __folio_lock_killable(folio); 1081 return 0; 1082 } 1083 1084 /* 1085 * folio_lock_or_retry - Lock the folio, unless this would block and the 1086 * caller indicated that it can handle a retry. 1087 * 1088 * Return value and mmap_lock implications depend on flags; see 1089 * __folio_lock_or_retry(). 1090 */ 1091 static inline vm_fault_t folio_lock_or_retry(struct folio *folio, 1092 struct vm_fault *vmf) 1093 { 1094 might_sleep(); 1095 if (!folio_trylock(folio)) 1096 return __folio_lock_or_retry(folio, vmf); 1097 return 0; 1098 } 1099 1100 /* 1101 * This is exported only for folio_wait_locked/folio_wait_writeback, etc., 1102 * and should not be used directly. 1103 */ 1104 void folio_wait_bit(struct folio *folio, int bit_nr); 1105 int folio_wait_bit_killable(struct folio *folio, int bit_nr); 1106 1107 /* 1108 * Wait for a folio to be unlocked. 1109 * 1110 * This must be called with the caller "holding" the folio, 1111 * ie with increased folio reference count so that the folio won't 1112 * go away during the wait. 1113 */ 1114 static inline void folio_wait_locked(struct folio *folio) 1115 { 1116 if (folio_test_locked(folio)) 1117 folio_wait_bit(folio, PG_locked); 1118 } 1119 1120 static inline int folio_wait_locked_killable(struct folio *folio) 1121 { 1122 if (!folio_test_locked(folio)) 1123 return 0; 1124 return folio_wait_bit_killable(folio, PG_locked); 1125 } 1126 1127 static inline void wait_on_page_locked(struct page *page) 1128 { 1129 folio_wait_locked(page_folio(page)); 1130 } 1131 1132 void folio_end_read(struct folio *folio, bool success); 1133 void wait_on_page_writeback(struct page *page); 1134 void folio_wait_writeback(struct folio *folio); 1135 int folio_wait_writeback_killable(struct folio *folio); 1136 void end_page_writeback(struct page *page); 1137 void folio_end_writeback(struct folio *folio); 1138 void wait_for_stable_page(struct page *page); 1139 void folio_wait_stable(struct folio *folio); 1140 void __folio_mark_dirty(struct folio *folio, struct address_space *, int warn); 1141 void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb); 1142 void __folio_cancel_dirty(struct folio *folio); 1143 static inline void folio_cancel_dirty(struct folio *folio) 1144 { 1145 /* Avoid atomic ops, locking, etc. when not actually needed. */ 1146 if (folio_test_dirty(folio)) 1147 __folio_cancel_dirty(folio); 1148 } 1149 bool folio_clear_dirty_for_io(struct folio *folio); 1150 bool clear_page_dirty_for_io(struct page *page); 1151 void folio_invalidate(struct folio *folio, size_t offset, size_t length); 1152 bool noop_dirty_folio(struct address_space *mapping, struct folio *folio); 1153 1154 #ifdef CONFIG_MIGRATION 1155 int filemap_migrate_folio(struct address_space *mapping, struct folio *dst, 1156 struct folio *src, enum migrate_mode mode); 1157 #else 1158 #define filemap_migrate_folio NULL 1159 #endif 1160 void folio_end_private_2(struct folio *folio); 1161 void folio_wait_private_2(struct folio *folio); 1162 int folio_wait_private_2_killable(struct folio *folio); 1163 1164 /* 1165 * Add an arbitrary waiter to a page's wait queue 1166 */ 1167 void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter); 1168 1169 /* 1170 * Fault in userspace address range. 1171 */ 1172 size_t fault_in_writeable(char __user *uaddr, size_t size); 1173 size_t fault_in_subpage_writeable(char __user *uaddr, size_t size); 1174 size_t fault_in_safe_writeable(const char __user *uaddr, size_t size); 1175 size_t fault_in_readable(const char __user *uaddr, size_t size); 1176 1177 int add_to_page_cache_lru(struct page *page, struct address_space *mapping, 1178 pgoff_t index, gfp_t gfp); 1179 int filemap_add_folio(struct address_space *mapping, struct folio *folio, 1180 pgoff_t index, gfp_t gfp); 1181 void filemap_remove_folio(struct folio *folio); 1182 void __filemap_remove_folio(struct folio *folio, void *shadow); 1183 void replace_page_cache_folio(struct folio *old, struct folio *new); 1184 void delete_from_page_cache_batch(struct address_space *mapping, 1185 struct folio_batch *fbatch); 1186 bool filemap_release_folio(struct folio *folio, gfp_t gfp); 1187 loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end, 1188 int whence); 1189 1190 /* Must be non-static for BPF error injection */ 1191 int __filemap_add_folio(struct address_space *mapping, struct folio *folio, 1192 pgoff_t index, gfp_t gfp, void **shadowp); 1193 1194 bool filemap_range_has_writeback(struct address_space *mapping, 1195 loff_t start_byte, loff_t end_byte); 1196 1197 /** 1198 * filemap_range_needs_writeback - check if range potentially needs writeback 1199 * @mapping: address space within which to check 1200 * @start_byte: offset in bytes where the range starts 1201 * @end_byte: offset in bytes where the range ends (inclusive) 1202 * 1203 * Find at least one page in the range supplied, usually used to check if 1204 * direct writing in this range will trigger a writeback. Used by O_DIRECT 1205 * read/write with IOCB_NOWAIT, to see if the caller needs to do 1206 * filemap_write_and_wait_range() before proceeding. 1207 * 1208 * Return: %true if the caller should do filemap_write_and_wait_range() before 1209 * doing O_DIRECT to a page in this range, %false otherwise. 1210 */ 1211 static inline bool filemap_range_needs_writeback(struct address_space *mapping, 1212 loff_t start_byte, 1213 loff_t end_byte) 1214 { 1215 if (!mapping->nrpages) 1216 return false; 1217 if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && 1218 !mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) 1219 return false; 1220 return filemap_range_has_writeback(mapping, start_byte, end_byte); 1221 } 1222 1223 /** 1224 * struct readahead_control - Describes a readahead request. 1225 * 1226 * A readahead request is for consecutive pages. Filesystems which 1227 * implement the ->readahead method should call readahead_page() or 1228 * readahead_page_batch() in a loop and attempt to start I/O against 1229 * each page in the request. 1230 * 1231 * Most of the fields in this struct are private and should be accessed 1232 * by the functions below. 1233 * 1234 * @file: The file, used primarily by network filesystems for authentication. 1235 * May be NULL if invoked internally by the filesystem. 1236 * @mapping: Readahead this filesystem object. 1237 * @ra: File readahead state. May be NULL. 1238 */ 1239 struct readahead_control { 1240 struct file *file; 1241 struct address_space *mapping; 1242 struct file_ra_state *ra; 1243 /* private: use the readahead_* accessors instead */ 1244 pgoff_t _index; 1245 unsigned int _nr_pages; 1246 unsigned int _batch_count; 1247 bool _workingset; 1248 unsigned long _pflags; 1249 }; 1250 1251 #define DEFINE_READAHEAD(ractl, f, r, m, i) \ 1252 struct readahead_control ractl = { \ 1253 .file = f, \ 1254 .mapping = m, \ 1255 .ra = r, \ 1256 ._index = i, \ 1257 } 1258 1259 #define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE) 1260 1261 void page_cache_ra_unbounded(struct readahead_control *, 1262 unsigned long nr_to_read, unsigned long lookahead_count); 1263 void page_cache_sync_ra(struct readahead_control *, unsigned long req_count); 1264 void page_cache_async_ra(struct readahead_control *, struct folio *, 1265 unsigned long req_count); 1266 void readahead_expand(struct readahead_control *ractl, 1267 loff_t new_start, size_t new_len); 1268 1269 /** 1270 * page_cache_sync_readahead - generic file readahead 1271 * @mapping: address_space which holds the pagecache and I/O vectors 1272 * @ra: file_ra_state which holds the readahead state 1273 * @file: Used by the filesystem for authentication. 1274 * @index: Index of first page to be read. 1275 * @req_count: Total number of pages being read by the caller. 1276 * 1277 * page_cache_sync_readahead() should be called when a cache miss happened: 1278 * it will submit the read. The readahead logic may decide to piggyback more 1279 * pages onto the read request if access patterns suggest it will improve 1280 * performance. 1281 */ 1282 static inline 1283 void page_cache_sync_readahead(struct address_space *mapping, 1284 struct file_ra_state *ra, struct file *file, pgoff_t index, 1285 unsigned long req_count) 1286 { 1287 DEFINE_READAHEAD(ractl, file, ra, mapping, index); 1288 page_cache_sync_ra(&ractl, req_count); 1289 } 1290 1291 /** 1292 * page_cache_async_readahead - file readahead for marked pages 1293 * @mapping: address_space which holds the pagecache and I/O vectors 1294 * @ra: file_ra_state which holds the readahead state 1295 * @file: Used by the filesystem for authentication. 1296 * @folio: The folio at @index which triggered the readahead call. 1297 * @index: Index of first page to be read. 1298 * @req_count: Total number of pages being read by the caller. 1299 * 1300 * page_cache_async_readahead() should be called when a page is used which 1301 * is marked as PageReadahead; this is a marker to suggest that the application 1302 * has used up enough of the readahead window that we should start pulling in 1303 * more pages. 1304 */ 1305 static inline 1306 void page_cache_async_readahead(struct address_space *mapping, 1307 struct file_ra_state *ra, struct file *file, 1308 struct folio *folio, pgoff_t index, unsigned long req_count) 1309 { 1310 DEFINE_READAHEAD(ractl, file, ra, mapping, index); 1311 page_cache_async_ra(&ractl, folio, req_count); 1312 } 1313 1314 static inline struct folio *__readahead_folio(struct readahead_control *ractl) 1315 { 1316 struct folio *folio; 1317 1318 BUG_ON(ractl->_batch_count > ractl->_nr_pages); 1319 ractl->_nr_pages -= ractl->_batch_count; 1320 ractl->_index += ractl->_batch_count; 1321 1322 if (!ractl->_nr_pages) { 1323 ractl->_batch_count = 0; 1324 return NULL; 1325 } 1326 1327 folio = xa_load(&ractl->mapping->i_pages, ractl->_index); 1328 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 1329 ractl->_batch_count = folio_nr_pages(folio); 1330 1331 return folio; 1332 } 1333 1334 /** 1335 * readahead_page - Get the next page to read. 1336 * @ractl: The current readahead request. 1337 * 1338 * Context: The page is locked and has an elevated refcount. The caller 1339 * should decreases the refcount once the page has been submitted for I/O 1340 * and unlock the page once all I/O to that page has completed. 1341 * Return: A pointer to the next page, or %NULL if we are done. 1342 */ 1343 static inline struct page *readahead_page(struct readahead_control *ractl) 1344 { 1345 struct folio *folio = __readahead_folio(ractl); 1346 1347 return &folio->page; 1348 } 1349 1350 /** 1351 * readahead_folio - Get the next folio to read. 1352 * @ractl: The current readahead request. 1353 * 1354 * Context: The folio is locked. The caller should unlock the folio once 1355 * all I/O to that folio has completed. 1356 * Return: A pointer to the next folio, or %NULL if we are done. 1357 */ 1358 static inline struct folio *readahead_folio(struct readahead_control *ractl) 1359 { 1360 struct folio *folio = __readahead_folio(ractl); 1361 1362 if (folio) 1363 folio_put(folio); 1364 return folio; 1365 } 1366 1367 static inline unsigned int __readahead_batch(struct readahead_control *rac, 1368 struct page **array, unsigned int array_sz) 1369 { 1370 unsigned int i = 0; 1371 XA_STATE(xas, &rac->mapping->i_pages, 0); 1372 struct page *page; 1373 1374 BUG_ON(rac->_batch_count > rac->_nr_pages); 1375 rac->_nr_pages -= rac->_batch_count; 1376 rac->_index += rac->_batch_count; 1377 rac->_batch_count = 0; 1378 1379 xas_set(&xas, rac->_index); 1380 rcu_read_lock(); 1381 xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) { 1382 if (xas_retry(&xas, page)) 1383 continue; 1384 VM_BUG_ON_PAGE(!PageLocked(page), page); 1385 VM_BUG_ON_PAGE(PageTail(page), page); 1386 array[i++] = page; 1387 rac->_batch_count += thp_nr_pages(page); 1388 if (i == array_sz) 1389 break; 1390 } 1391 rcu_read_unlock(); 1392 1393 return i; 1394 } 1395 1396 /** 1397 * readahead_page_batch - Get a batch of pages to read. 1398 * @rac: The current readahead request. 1399 * @array: An array of pointers to struct page. 1400 * 1401 * Context: The pages are locked and have an elevated refcount. The caller 1402 * should decreases the refcount once the page has been submitted for I/O 1403 * and unlock the page once all I/O to that page has completed. 1404 * Return: The number of pages placed in the array. 0 indicates the request 1405 * is complete. 1406 */ 1407 #define readahead_page_batch(rac, array) \ 1408 __readahead_batch(rac, array, ARRAY_SIZE(array)) 1409 1410 /** 1411 * readahead_pos - The byte offset into the file of this readahead request. 1412 * @rac: The readahead request. 1413 */ 1414 static inline loff_t readahead_pos(struct readahead_control *rac) 1415 { 1416 return (loff_t)rac->_index * PAGE_SIZE; 1417 } 1418 1419 /** 1420 * readahead_length - The number of bytes in this readahead request. 1421 * @rac: The readahead request. 1422 */ 1423 static inline size_t readahead_length(struct readahead_control *rac) 1424 { 1425 return rac->_nr_pages * PAGE_SIZE; 1426 } 1427 1428 /** 1429 * readahead_index - The index of the first page in this readahead request. 1430 * @rac: The readahead request. 1431 */ 1432 static inline pgoff_t readahead_index(struct readahead_control *rac) 1433 { 1434 return rac->_index; 1435 } 1436 1437 /** 1438 * readahead_count - The number of pages in this readahead request. 1439 * @rac: The readahead request. 1440 */ 1441 static inline unsigned int readahead_count(struct readahead_control *rac) 1442 { 1443 return rac->_nr_pages; 1444 } 1445 1446 /** 1447 * readahead_batch_length - The number of bytes in the current batch. 1448 * @rac: The readahead request. 1449 */ 1450 static inline size_t readahead_batch_length(struct readahead_control *rac) 1451 { 1452 return rac->_batch_count * PAGE_SIZE; 1453 } 1454 1455 static inline unsigned long dir_pages(struct inode *inode) 1456 { 1457 return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >> 1458 PAGE_SHIFT; 1459 } 1460 1461 /** 1462 * folio_mkwrite_check_truncate - check if folio was truncated 1463 * @folio: the folio to check 1464 * @inode: the inode to check the folio against 1465 * 1466 * Return: the number of bytes in the folio up to EOF, 1467 * or -EFAULT if the folio was truncated. 1468 */ 1469 static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio, 1470 struct inode *inode) 1471 { 1472 loff_t size = i_size_read(inode); 1473 pgoff_t index = size >> PAGE_SHIFT; 1474 size_t offset = offset_in_folio(folio, size); 1475 1476 if (!folio->mapping) 1477 return -EFAULT; 1478 1479 /* folio is wholly inside EOF */ 1480 if (folio_next_index(folio) - 1 < index) 1481 return folio_size(folio); 1482 /* folio is wholly past EOF */ 1483 if (folio->index > index || !offset) 1484 return -EFAULT; 1485 /* folio is partially inside EOF */ 1486 return offset; 1487 } 1488 1489 /** 1490 * page_mkwrite_check_truncate - check if page was truncated 1491 * @page: the page to check 1492 * @inode: the inode to check the page against 1493 * 1494 * Returns the number of bytes in the page up to EOF, 1495 * or -EFAULT if the page was truncated. 1496 */ 1497 static inline int page_mkwrite_check_truncate(struct page *page, 1498 struct inode *inode) 1499 { 1500 loff_t size = i_size_read(inode); 1501 pgoff_t index = size >> PAGE_SHIFT; 1502 int offset = offset_in_page(size); 1503 1504 if (page->mapping != inode->i_mapping) 1505 return -EFAULT; 1506 1507 /* page is wholly inside EOF */ 1508 if (page->index < index) 1509 return PAGE_SIZE; 1510 /* page is wholly past EOF */ 1511 if (page->index > index || !offset) 1512 return -EFAULT; 1513 /* page is partially inside EOF */ 1514 return offset; 1515 } 1516 1517 /** 1518 * i_blocks_per_folio - How many blocks fit in this folio. 1519 * @inode: The inode which contains the blocks. 1520 * @folio: The folio. 1521 * 1522 * If the block size is larger than the size of this folio, return zero. 1523 * 1524 * Context: The caller should hold a refcount on the folio to prevent it 1525 * from being split. 1526 * Return: The number of filesystem blocks covered by this folio. 1527 */ 1528 static inline 1529 unsigned int i_blocks_per_folio(struct inode *inode, struct folio *folio) 1530 { 1531 return folio_size(folio) >> inode->i_blkbits; 1532 } 1533 1534 static inline 1535 unsigned int i_blocks_per_page(struct inode *inode, struct page *page) 1536 { 1537 return i_blocks_per_folio(inode, page_folio(page)); 1538 } 1539 #endif /* _LINUX_PAGEMAP_H */ 1540