1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_PAGEMAP_H 3 #define _LINUX_PAGEMAP_H 4 5 /* 6 * Copyright 1995 Linus Torvalds 7 */ 8 #include <linux/mm.h> 9 #include <linux/fs.h> 10 #include <linux/list.h> 11 #include <linux/highmem.h> 12 #include <linux/compiler.h> 13 #include <linux/uaccess.h> 14 #include <linux/gfp.h> 15 #include <linux/bitops.h> 16 #include <linux/hardirq.h> /* for in_interrupt() */ 17 #include <linux/hugetlb_inline.h> 18 19 struct folio_batch; 20 21 unsigned long invalidate_mapping_pages(struct address_space *mapping, 22 pgoff_t start, pgoff_t end); 23 24 static inline void invalidate_remote_inode(struct inode *inode) 25 { 26 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 27 S_ISLNK(inode->i_mode)) 28 invalidate_mapping_pages(inode->i_mapping, 0, -1); 29 } 30 int invalidate_inode_pages2(struct address_space *mapping); 31 int invalidate_inode_pages2_range(struct address_space *mapping, 32 pgoff_t start, pgoff_t end); 33 int kiocb_invalidate_pages(struct kiocb *iocb, size_t count); 34 void kiocb_invalidate_post_direct_write(struct kiocb *iocb, size_t count); 35 36 int write_inode_now(struct inode *, int sync); 37 int filemap_fdatawrite(struct address_space *); 38 int filemap_flush(struct address_space *); 39 int filemap_fdatawait_keep_errors(struct address_space *mapping); 40 int filemap_fdatawait_range(struct address_space *, loff_t lstart, loff_t lend); 41 int filemap_fdatawait_range_keep_errors(struct address_space *mapping, 42 loff_t start_byte, loff_t end_byte); 43 44 static inline int filemap_fdatawait(struct address_space *mapping) 45 { 46 return filemap_fdatawait_range(mapping, 0, LLONG_MAX); 47 } 48 49 bool filemap_range_has_page(struct address_space *, loff_t lstart, loff_t lend); 50 int filemap_write_and_wait_range(struct address_space *mapping, 51 loff_t lstart, loff_t lend); 52 int __filemap_fdatawrite_range(struct address_space *mapping, 53 loff_t start, loff_t end, int sync_mode); 54 int filemap_fdatawrite_range(struct address_space *mapping, 55 loff_t start, loff_t end); 56 int filemap_check_errors(struct address_space *mapping); 57 void __filemap_set_wb_err(struct address_space *mapping, int err); 58 int filemap_fdatawrite_wbc(struct address_space *mapping, 59 struct writeback_control *wbc); 60 int kiocb_write_and_wait(struct kiocb *iocb, size_t count); 61 62 static inline int filemap_write_and_wait(struct address_space *mapping) 63 { 64 return filemap_write_and_wait_range(mapping, 0, LLONG_MAX); 65 } 66 67 /** 68 * filemap_set_wb_err - set a writeback error on an address_space 69 * @mapping: mapping in which to set writeback error 70 * @err: error to be set in mapping 71 * 72 * When writeback fails in some way, we must record that error so that 73 * userspace can be informed when fsync and the like are called. We endeavor 74 * to report errors on any file that was open at the time of the error. Some 75 * internal callers also need to know when writeback errors have occurred. 76 * 77 * When a writeback error occurs, most filesystems will want to call 78 * filemap_set_wb_err to record the error in the mapping so that it will be 79 * automatically reported whenever fsync is called on the file. 80 */ 81 static inline void filemap_set_wb_err(struct address_space *mapping, int err) 82 { 83 /* Fastpath for common case of no error */ 84 if (unlikely(err)) 85 __filemap_set_wb_err(mapping, err); 86 } 87 88 /** 89 * filemap_check_wb_err - has an error occurred since the mark was sampled? 90 * @mapping: mapping to check for writeback errors 91 * @since: previously-sampled errseq_t 92 * 93 * Grab the errseq_t value from the mapping, and see if it has changed "since" 94 * the given value was sampled. 95 * 96 * If it has then report the latest error set, otherwise return 0. 97 */ 98 static inline int filemap_check_wb_err(struct address_space *mapping, 99 errseq_t since) 100 { 101 return errseq_check(&mapping->wb_err, since); 102 } 103 104 /** 105 * filemap_sample_wb_err - sample the current errseq_t to test for later errors 106 * @mapping: mapping to be sampled 107 * 108 * Writeback errors are always reported relative to a particular sample point 109 * in the past. This function provides those sample points. 110 */ 111 static inline errseq_t filemap_sample_wb_err(struct address_space *mapping) 112 { 113 return errseq_sample(&mapping->wb_err); 114 } 115 116 /** 117 * file_sample_sb_err - sample the current errseq_t to test for later errors 118 * @file: file pointer to be sampled 119 * 120 * Grab the most current superblock-level errseq_t value for the given 121 * struct file. 122 */ 123 static inline errseq_t file_sample_sb_err(struct file *file) 124 { 125 return errseq_sample(&file->f_path.dentry->d_sb->s_wb_err); 126 } 127 128 /* 129 * Flush file data before changing attributes. Caller must hold any locks 130 * required to prevent further writes to this file until we're done setting 131 * flags. 132 */ 133 static inline int inode_drain_writes(struct inode *inode) 134 { 135 inode_dio_wait(inode); 136 return filemap_write_and_wait(inode->i_mapping); 137 } 138 139 static inline bool mapping_empty(struct address_space *mapping) 140 { 141 return xa_empty(&mapping->i_pages); 142 } 143 144 /* 145 * mapping_shrinkable - test if page cache state allows inode reclaim 146 * @mapping: the page cache mapping 147 * 148 * This checks the mapping's cache state for the pupose of inode 149 * reclaim and LRU management. 150 * 151 * The caller is expected to hold the i_lock, but is not required to 152 * hold the i_pages lock, which usually protects cache state. That's 153 * because the i_lock and the list_lru lock that protect the inode and 154 * its LRU state don't nest inside the irq-safe i_pages lock. 155 * 156 * Cache deletions are performed under the i_lock, which ensures that 157 * when an inode goes empty, it will reliably get queued on the LRU. 158 * 159 * Cache additions do not acquire the i_lock and may race with this 160 * check, in which case we'll report the inode as shrinkable when it 161 * has cache pages. This is okay: the shrinker also checks the 162 * refcount and the referenced bit, which will be elevated or set in 163 * the process of adding new cache pages to an inode. 164 */ 165 static inline bool mapping_shrinkable(struct address_space *mapping) 166 { 167 void *head; 168 169 /* 170 * On highmem systems, there could be lowmem pressure from the 171 * inodes before there is highmem pressure from the page 172 * cache. Make inodes shrinkable regardless of cache state. 173 */ 174 if (IS_ENABLED(CONFIG_HIGHMEM)) 175 return true; 176 177 /* Cache completely empty? Shrink away. */ 178 head = rcu_access_pointer(mapping->i_pages.xa_head); 179 if (!head) 180 return true; 181 182 /* 183 * The xarray stores single offset-0 entries directly in the 184 * head pointer, which allows non-resident page cache entries 185 * to escape the shadow shrinker's list of xarray nodes. The 186 * inode shrinker needs to pick them up under memory pressure. 187 */ 188 if (!xa_is_node(head) && xa_is_value(head)) 189 return true; 190 191 return false; 192 } 193 194 /* 195 * Bits in mapping->flags. 196 */ 197 enum mapping_flags { 198 AS_EIO = 0, /* IO error on async write */ 199 AS_ENOSPC = 1, /* ENOSPC on async write */ 200 AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */ 201 AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */ 202 AS_EXITING = 4, /* final truncate in progress */ 203 /* writeback related tags are not used */ 204 AS_NO_WRITEBACK_TAGS = 5, 205 AS_LARGE_FOLIO_SUPPORT = 6, 206 }; 207 208 /** 209 * mapping_set_error - record a writeback error in the address_space 210 * @mapping: the mapping in which an error should be set 211 * @error: the error to set in the mapping 212 * 213 * When writeback fails in some way, we must record that error so that 214 * userspace can be informed when fsync and the like are called. We endeavor 215 * to report errors on any file that was open at the time of the error. Some 216 * internal callers also need to know when writeback errors have occurred. 217 * 218 * When a writeback error occurs, most filesystems will want to call 219 * mapping_set_error to record the error in the mapping so that it can be 220 * reported when the application calls fsync(2). 221 */ 222 static inline void mapping_set_error(struct address_space *mapping, int error) 223 { 224 if (likely(!error)) 225 return; 226 227 /* Record in wb_err for checkers using errseq_t based tracking */ 228 __filemap_set_wb_err(mapping, error); 229 230 /* Record it in superblock */ 231 if (mapping->host) 232 errseq_set(&mapping->host->i_sb->s_wb_err, error); 233 234 /* Record it in flags for now, for legacy callers */ 235 if (error == -ENOSPC) 236 set_bit(AS_ENOSPC, &mapping->flags); 237 else 238 set_bit(AS_EIO, &mapping->flags); 239 } 240 241 static inline void mapping_set_unevictable(struct address_space *mapping) 242 { 243 set_bit(AS_UNEVICTABLE, &mapping->flags); 244 } 245 246 static inline void mapping_clear_unevictable(struct address_space *mapping) 247 { 248 clear_bit(AS_UNEVICTABLE, &mapping->flags); 249 } 250 251 static inline bool mapping_unevictable(struct address_space *mapping) 252 { 253 return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags); 254 } 255 256 static inline void mapping_set_exiting(struct address_space *mapping) 257 { 258 set_bit(AS_EXITING, &mapping->flags); 259 } 260 261 static inline int mapping_exiting(struct address_space *mapping) 262 { 263 return test_bit(AS_EXITING, &mapping->flags); 264 } 265 266 static inline void mapping_set_no_writeback_tags(struct address_space *mapping) 267 { 268 set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); 269 } 270 271 static inline int mapping_use_writeback_tags(struct address_space *mapping) 272 { 273 return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); 274 } 275 276 static inline gfp_t mapping_gfp_mask(struct address_space * mapping) 277 { 278 return mapping->gfp_mask; 279 } 280 281 /* Restricts the given gfp_mask to what the mapping allows. */ 282 static inline gfp_t mapping_gfp_constraint(struct address_space *mapping, 283 gfp_t gfp_mask) 284 { 285 return mapping_gfp_mask(mapping) & gfp_mask; 286 } 287 288 /* 289 * This is non-atomic. Only to be used before the mapping is activated. 290 * Probably needs a barrier... 291 */ 292 static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) 293 { 294 m->gfp_mask = mask; 295 } 296 297 /** 298 * mapping_set_large_folios() - Indicate the file supports large folios. 299 * @mapping: The file. 300 * 301 * The filesystem should call this function in its inode constructor to 302 * indicate that the VFS can use large folios to cache the contents of 303 * the file. 304 * 305 * Context: This should not be called while the inode is active as it 306 * is non-atomic. 307 */ 308 static inline void mapping_set_large_folios(struct address_space *mapping) 309 { 310 __set_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags); 311 } 312 313 /* 314 * Large folio support currently depends on THP. These dependencies are 315 * being worked on but are not yet fixed. 316 */ 317 static inline bool mapping_large_folio_support(struct address_space *mapping) 318 { 319 return IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 320 test_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags); 321 } 322 323 static inline int filemap_nr_thps(struct address_space *mapping) 324 { 325 #ifdef CONFIG_READ_ONLY_THP_FOR_FS 326 return atomic_read(&mapping->nr_thps); 327 #else 328 return 0; 329 #endif 330 } 331 332 static inline void filemap_nr_thps_inc(struct address_space *mapping) 333 { 334 #ifdef CONFIG_READ_ONLY_THP_FOR_FS 335 if (!mapping_large_folio_support(mapping)) 336 atomic_inc(&mapping->nr_thps); 337 #else 338 WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0); 339 #endif 340 } 341 342 static inline void filemap_nr_thps_dec(struct address_space *mapping) 343 { 344 #ifdef CONFIG_READ_ONLY_THP_FOR_FS 345 if (!mapping_large_folio_support(mapping)) 346 atomic_dec(&mapping->nr_thps); 347 #else 348 WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0); 349 #endif 350 } 351 352 struct address_space *page_mapping(struct page *); 353 struct address_space *folio_mapping(struct folio *); 354 struct address_space *swapcache_mapping(struct folio *); 355 356 /** 357 * folio_file_mapping - Find the mapping this folio belongs to. 358 * @folio: The folio. 359 * 360 * For folios which are in the page cache, return the mapping that this 361 * page belongs to. Folios in the swap cache return the mapping of the 362 * swap file or swap device where the data is stored. This is different 363 * from the mapping returned by folio_mapping(). The only reason to 364 * use it is if, like NFS, you return 0 from ->activate_swapfile. 365 * 366 * Do not call this for folios which aren't in the page cache or swap cache. 367 */ 368 static inline struct address_space *folio_file_mapping(struct folio *folio) 369 { 370 if (unlikely(folio_test_swapcache(folio))) 371 return swapcache_mapping(folio); 372 373 return folio->mapping; 374 } 375 376 static inline struct address_space *page_file_mapping(struct page *page) 377 { 378 return folio_file_mapping(page_folio(page)); 379 } 380 381 /* 382 * For file cache pages, return the address_space, otherwise return NULL 383 */ 384 static inline struct address_space *page_mapping_file(struct page *page) 385 { 386 struct folio *folio = page_folio(page); 387 388 if (unlikely(folio_test_swapcache(folio))) 389 return NULL; 390 return folio_mapping(folio); 391 } 392 393 /** 394 * folio_inode - Get the host inode for this folio. 395 * @folio: The folio. 396 * 397 * For folios which are in the page cache, return the inode that this folio 398 * belongs to. 399 * 400 * Do not call this for folios which aren't in the page cache. 401 */ 402 static inline struct inode *folio_inode(struct folio *folio) 403 { 404 return folio->mapping->host; 405 } 406 407 /** 408 * folio_attach_private - Attach private data to a folio. 409 * @folio: Folio to attach data to. 410 * @data: Data to attach to folio. 411 * 412 * Attaching private data to a folio increments the page's reference count. 413 * The data must be detached before the folio will be freed. 414 */ 415 static inline void folio_attach_private(struct folio *folio, void *data) 416 { 417 folio_get(folio); 418 folio->private = data; 419 folio_set_private(folio); 420 } 421 422 /** 423 * folio_change_private - Change private data on a folio. 424 * @folio: Folio to change the data on. 425 * @data: Data to set on the folio. 426 * 427 * Change the private data attached to a folio and return the old 428 * data. The page must previously have had data attached and the data 429 * must be detached before the folio will be freed. 430 * 431 * Return: Data that was previously attached to the folio. 432 */ 433 static inline void *folio_change_private(struct folio *folio, void *data) 434 { 435 void *old = folio_get_private(folio); 436 437 folio->private = data; 438 return old; 439 } 440 441 /** 442 * folio_detach_private - Detach private data from a folio. 443 * @folio: Folio to detach data from. 444 * 445 * Removes the data that was previously attached to the folio and decrements 446 * the refcount on the page. 447 * 448 * Return: Data that was attached to the folio. 449 */ 450 static inline void *folio_detach_private(struct folio *folio) 451 { 452 void *data = folio_get_private(folio); 453 454 if (!folio_test_private(folio)) 455 return NULL; 456 folio_clear_private(folio); 457 folio->private = NULL; 458 folio_put(folio); 459 460 return data; 461 } 462 463 static inline void attach_page_private(struct page *page, void *data) 464 { 465 folio_attach_private(page_folio(page), data); 466 } 467 468 static inline void *detach_page_private(struct page *page) 469 { 470 return folio_detach_private(page_folio(page)); 471 } 472 473 #ifdef CONFIG_NUMA 474 struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order); 475 #else 476 static inline struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order) 477 { 478 return folio_alloc(gfp, order); 479 } 480 #endif 481 482 static inline struct page *__page_cache_alloc(gfp_t gfp) 483 { 484 return &filemap_alloc_folio(gfp, 0)->page; 485 } 486 487 static inline struct page *page_cache_alloc(struct address_space *x) 488 { 489 return __page_cache_alloc(mapping_gfp_mask(x)); 490 } 491 492 static inline gfp_t readahead_gfp_mask(struct address_space *x) 493 { 494 return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN; 495 } 496 497 typedef int filler_t(struct file *, struct folio *); 498 499 pgoff_t page_cache_next_miss(struct address_space *mapping, 500 pgoff_t index, unsigned long max_scan); 501 pgoff_t page_cache_prev_miss(struct address_space *mapping, 502 pgoff_t index, unsigned long max_scan); 503 504 #define FGP_ACCESSED 0x00000001 505 #define FGP_LOCK 0x00000002 506 #define FGP_CREAT 0x00000004 507 #define FGP_WRITE 0x00000008 508 #define FGP_NOFS 0x00000010 509 #define FGP_NOWAIT 0x00000020 510 #define FGP_FOR_MMAP 0x00000040 511 #define FGP_STABLE 0x00000080 512 513 #define FGP_WRITEBEGIN (FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE) 514 515 void *filemap_get_entry(struct address_space *mapping, pgoff_t index); 516 struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, 517 int fgp_flags, gfp_t gfp); 518 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index, 519 int fgp_flags, gfp_t gfp); 520 521 /** 522 * filemap_get_folio - Find and get a folio. 523 * @mapping: The address_space to search. 524 * @index: The page index. 525 * 526 * Looks up the page cache entry at @mapping & @index. If a folio is 527 * present, it is returned with an increased refcount. 528 * 529 * Return: A folio or ERR_PTR(-ENOENT) if there is no folio in the cache for 530 * this index. Will not return a shadow, swap or DAX entry. 531 */ 532 static inline struct folio *filemap_get_folio(struct address_space *mapping, 533 pgoff_t index) 534 { 535 return __filemap_get_folio(mapping, index, 0, 0); 536 } 537 538 /** 539 * filemap_lock_folio - Find and lock a folio. 540 * @mapping: The address_space to search. 541 * @index: The page index. 542 * 543 * Looks up the page cache entry at @mapping & @index. If a folio is 544 * present, it is returned locked with an increased refcount. 545 * 546 * Context: May sleep. 547 * Return: A folio or ERR_PTR(-ENOENT) if there is no folio in the cache for 548 * this index. Will not return a shadow, swap or DAX entry. 549 */ 550 static inline struct folio *filemap_lock_folio(struct address_space *mapping, 551 pgoff_t index) 552 { 553 return __filemap_get_folio(mapping, index, FGP_LOCK, 0); 554 } 555 556 /** 557 * filemap_grab_folio - grab a folio from the page cache 558 * @mapping: The address space to search 559 * @index: The page index 560 * 561 * Looks up the page cache entry at @mapping & @index. If no folio is found, 562 * a new folio is created. The folio is locked, marked as accessed, and 563 * returned. 564 * 565 * Return: A found or created folio. ERR_PTR(-ENOMEM) if no folio is found 566 * and failed to create a folio. 567 */ 568 static inline struct folio *filemap_grab_folio(struct address_space *mapping, 569 pgoff_t index) 570 { 571 return __filemap_get_folio(mapping, index, 572 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, 573 mapping_gfp_mask(mapping)); 574 } 575 576 /** 577 * find_get_page - find and get a page reference 578 * @mapping: the address_space to search 579 * @offset: the page index 580 * 581 * Looks up the page cache slot at @mapping & @offset. If there is a 582 * page cache page, it is returned with an increased refcount. 583 * 584 * Otherwise, %NULL is returned. 585 */ 586 static inline struct page *find_get_page(struct address_space *mapping, 587 pgoff_t offset) 588 { 589 return pagecache_get_page(mapping, offset, 0, 0); 590 } 591 592 static inline struct page *find_get_page_flags(struct address_space *mapping, 593 pgoff_t offset, int fgp_flags) 594 { 595 return pagecache_get_page(mapping, offset, fgp_flags, 0); 596 } 597 598 /** 599 * find_lock_page - locate, pin and lock a pagecache page 600 * @mapping: the address_space to search 601 * @index: the page index 602 * 603 * Looks up the page cache entry at @mapping & @index. If there is a 604 * page cache page, it is returned locked and with an increased 605 * refcount. 606 * 607 * Context: May sleep. 608 * Return: A struct page or %NULL if there is no page in the cache for this 609 * index. 610 */ 611 static inline struct page *find_lock_page(struct address_space *mapping, 612 pgoff_t index) 613 { 614 return pagecache_get_page(mapping, index, FGP_LOCK, 0); 615 } 616 617 /** 618 * find_or_create_page - locate or add a pagecache page 619 * @mapping: the page's address_space 620 * @index: the page's index into the mapping 621 * @gfp_mask: page allocation mode 622 * 623 * Looks up the page cache slot at @mapping & @offset. If there is a 624 * page cache page, it is returned locked and with an increased 625 * refcount. 626 * 627 * If the page is not present, a new page is allocated using @gfp_mask 628 * and added to the page cache and the VM's LRU list. The page is 629 * returned locked and with an increased refcount. 630 * 631 * On memory exhaustion, %NULL is returned. 632 * 633 * find_or_create_page() may sleep, even if @gfp_flags specifies an 634 * atomic allocation! 635 */ 636 static inline struct page *find_or_create_page(struct address_space *mapping, 637 pgoff_t index, gfp_t gfp_mask) 638 { 639 return pagecache_get_page(mapping, index, 640 FGP_LOCK|FGP_ACCESSED|FGP_CREAT, 641 gfp_mask); 642 } 643 644 /** 645 * grab_cache_page_nowait - returns locked page at given index in given cache 646 * @mapping: target address_space 647 * @index: the page index 648 * 649 * Same as grab_cache_page(), but do not wait if the page is unavailable. 650 * This is intended for speculative data generators, where the data can 651 * be regenerated if the page couldn't be grabbed. This routine should 652 * be safe to call while holding the lock for another page. 653 * 654 * Clear __GFP_FS when allocating the page to avoid recursion into the fs 655 * and deadlock against the caller's locked page. 656 */ 657 static inline struct page *grab_cache_page_nowait(struct address_space *mapping, 658 pgoff_t index) 659 { 660 return pagecache_get_page(mapping, index, 661 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, 662 mapping_gfp_mask(mapping)); 663 } 664 665 #define swapcache_index(folio) __page_file_index(&(folio)->page) 666 667 /** 668 * folio_index - File index of a folio. 669 * @folio: The folio. 670 * 671 * For a folio which is either in the page cache or the swap cache, 672 * return its index within the address_space it belongs to. If you know 673 * the page is definitely in the page cache, you can look at the folio's 674 * index directly. 675 * 676 * Return: The index (offset in units of pages) of a folio in its file. 677 */ 678 static inline pgoff_t folio_index(struct folio *folio) 679 { 680 if (unlikely(folio_test_swapcache(folio))) 681 return swapcache_index(folio); 682 return folio->index; 683 } 684 685 /** 686 * folio_next_index - Get the index of the next folio. 687 * @folio: The current folio. 688 * 689 * Return: The index of the folio which follows this folio in the file. 690 */ 691 static inline pgoff_t folio_next_index(struct folio *folio) 692 { 693 return folio->index + folio_nr_pages(folio); 694 } 695 696 /** 697 * folio_file_page - The page for a particular index. 698 * @folio: The folio which contains this index. 699 * @index: The index we want to look up. 700 * 701 * Sometimes after looking up a folio in the page cache, we need to 702 * obtain the specific page for an index (eg a page fault). 703 * 704 * Return: The page containing the file data for this index. 705 */ 706 static inline struct page *folio_file_page(struct folio *folio, pgoff_t index) 707 { 708 /* HugeTLBfs indexes the page cache in units of hpage_size */ 709 if (folio_test_hugetlb(folio)) 710 return &folio->page; 711 return folio_page(folio, index & (folio_nr_pages(folio) - 1)); 712 } 713 714 /** 715 * folio_contains - Does this folio contain this index? 716 * @folio: The folio. 717 * @index: The page index within the file. 718 * 719 * Context: The caller should have the page locked in order to prevent 720 * (eg) shmem from moving the page between the page cache and swap cache 721 * and changing its index in the middle of the operation. 722 * Return: true or false. 723 */ 724 static inline bool folio_contains(struct folio *folio, pgoff_t index) 725 { 726 /* HugeTLBfs indexes the page cache in units of hpage_size */ 727 if (folio_test_hugetlb(folio)) 728 return folio->index == index; 729 return index - folio_index(folio) < folio_nr_pages(folio); 730 } 731 732 /* 733 * Given the page we found in the page cache, return the page corresponding 734 * to this index in the file 735 */ 736 static inline struct page *find_subpage(struct page *head, pgoff_t index) 737 { 738 /* HugeTLBfs wants the head page regardless */ 739 if (PageHuge(head)) 740 return head; 741 742 return head + (index & (thp_nr_pages(head) - 1)); 743 } 744 745 unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start, 746 pgoff_t end, struct folio_batch *fbatch); 747 unsigned filemap_get_folios_contig(struct address_space *mapping, 748 pgoff_t *start, pgoff_t end, struct folio_batch *fbatch); 749 unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start, 750 pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch); 751 752 struct page *grab_cache_page_write_begin(struct address_space *mapping, 753 pgoff_t index); 754 755 /* 756 * Returns locked page at given index in given cache, creating it if needed. 757 */ 758 static inline struct page *grab_cache_page(struct address_space *mapping, 759 pgoff_t index) 760 { 761 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); 762 } 763 764 struct folio *read_cache_folio(struct address_space *, pgoff_t index, 765 filler_t *filler, struct file *file); 766 struct folio *mapping_read_folio_gfp(struct address_space *, pgoff_t index, 767 gfp_t flags); 768 struct page *read_cache_page(struct address_space *, pgoff_t index, 769 filler_t *filler, struct file *file); 770 extern struct page * read_cache_page_gfp(struct address_space *mapping, 771 pgoff_t index, gfp_t gfp_mask); 772 773 static inline struct page *read_mapping_page(struct address_space *mapping, 774 pgoff_t index, struct file *file) 775 { 776 return read_cache_page(mapping, index, NULL, file); 777 } 778 779 static inline struct folio *read_mapping_folio(struct address_space *mapping, 780 pgoff_t index, struct file *file) 781 { 782 return read_cache_folio(mapping, index, NULL, file); 783 } 784 785 /* 786 * Get index of the page within radix-tree (but not for hugetlb pages). 787 * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE) 788 */ 789 static inline pgoff_t page_to_index(struct page *page) 790 { 791 struct page *head; 792 793 if (likely(!PageTransTail(page))) 794 return page->index; 795 796 head = compound_head(page); 797 /* 798 * We don't initialize ->index for tail pages: calculate based on 799 * head page 800 */ 801 return head->index + page - head; 802 } 803 804 extern pgoff_t hugetlb_basepage_index(struct page *page); 805 806 /* 807 * Get the offset in PAGE_SIZE (even for hugetlb pages). 808 * (TODO: hugetlb pages should have ->index in PAGE_SIZE) 809 */ 810 static inline pgoff_t page_to_pgoff(struct page *page) 811 { 812 if (unlikely(PageHuge(page))) 813 return hugetlb_basepage_index(page); 814 return page_to_index(page); 815 } 816 817 /* 818 * Return byte-offset into filesystem object for page. 819 */ 820 static inline loff_t page_offset(struct page *page) 821 { 822 return ((loff_t)page->index) << PAGE_SHIFT; 823 } 824 825 static inline loff_t page_file_offset(struct page *page) 826 { 827 return ((loff_t)page_index(page)) << PAGE_SHIFT; 828 } 829 830 /** 831 * folio_pos - Returns the byte position of this folio in its file. 832 * @folio: The folio. 833 */ 834 static inline loff_t folio_pos(struct folio *folio) 835 { 836 return page_offset(&folio->page); 837 } 838 839 /** 840 * folio_file_pos - Returns the byte position of this folio in its file. 841 * @folio: The folio. 842 * 843 * This differs from folio_pos() for folios which belong to a swap file. 844 * NFS is the only filesystem today which needs to use folio_file_pos(). 845 */ 846 static inline loff_t folio_file_pos(struct folio *folio) 847 { 848 return page_file_offset(&folio->page); 849 } 850 851 /* 852 * Get the offset in PAGE_SIZE (even for hugetlb folios). 853 * (TODO: hugetlb folios should have ->index in PAGE_SIZE) 854 */ 855 static inline pgoff_t folio_pgoff(struct folio *folio) 856 { 857 if (unlikely(folio_test_hugetlb(folio))) 858 return hugetlb_basepage_index(&folio->page); 859 return folio->index; 860 } 861 862 extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, 863 unsigned long address); 864 865 static inline pgoff_t linear_page_index(struct vm_area_struct *vma, 866 unsigned long address) 867 { 868 pgoff_t pgoff; 869 if (unlikely(is_vm_hugetlb_page(vma))) 870 return linear_hugepage_index(vma, address); 871 pgoff = (address - vma->vm_start) >> PAGE_SHIFT; 872 pgoff += vma->vm_pgoff; 873 return pgoff; 874 } 875 876 struct wait_page_key { 877 struct folio *folio; 878 int bit_nr; 879 int page_match; 880 }; 881 882 struct wait_page_queue { 883 struct folio *folio; 884 int bit_nr; 885 wait_queue_entry_t wait; 886 }; 887 888 static inline bool wake_page_match(struct wait_page_queue *wait_page, 889 struct wait_page_key *key) 890 { 891 if (wait_page->folio != key->folio) 892 return false; 893 key->page_match = 1; 894 895 if (wait_page->bit_nr != key->bit_nr) 896 return false; 897 898 return true; 899 } 900 901 void __folio_lock(struct folio *folio); 902 int __folio_lock_killable(struct folio *folio); 903 bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm, 904 unsigned int flags); 905 void unlock_page(struct page *page); 906 void folio_unlock(struct folio *folio); 907 908 /** 909 * folio_trylock() - Attempt to lock a folio. 910 * @folio: The folio to attempt to lock. 911 * 912 * Sometimes it is undesirable to wait for a folio to be unlocked (eg 913 * when the locks are being taken in the wrong order, or if making 914 * progress through a batch of folios is more important than processing 915 * them in order). Usually folio_lock() is the correct function to call. 916 * 917 * Context: Any context. 918 * Return: Whether the lock was successfully acquired. 919 */ 920 static inline bool folio_trylock(struct folio *folio) 921 { 922 return likely(!test_and_set_bit_lock(PG_locked, folio_flags(folio, 0))); 923 } 924 925 /* 926 * Return true if the page was successfully locked 927 */ 928 static inline int trylock_page(struct page *page) 929 { 930 return folio_trylock(page_folio(page)); 931 } 932 933 /** 934 * folio_lock() - Lock this folio. 935 * @folio: The folio to lock. 936 * 937 * The folio lock protects against many things, probably more than it 938 * should. It is primarily held while a folio is being brought uptodate, 939 * either from its backing file or from swap. It is also held while a 940 * folio is being truncated from its address_space, so holding the lock 941 * is sufficient to keep folio->mapping stable. 942 * 943 * The folio lock is also held while write() is modifying the page to 944 * provide POSIX atomicity guarantees (as long as the write does not 945 * cross a page boundary). Other modifications to the data in the folio 946 * do not hold the folio lock and can race with writes, eg DMA and stores 947 * to mapped pages. 948 * 949 * Context: May sleep. If you need to acquire the locks of two or 950 * more folios, they must be in order of ascending index, if they are 951 * in the same address_space. If they are in different address_spaces, 952 * acquire the lock of the folio which belongs to the address_space which 953 * has the lowest address in memory first. 954 */ 955 static inline void folio_lock(struct folio *folio) 956 { 957 might_sleep(); 958 if (!folio_trylock(folio)) 959 __folio_lock(folio); 960 } 961 962 /** 963 * lock_page() - Lock the folio containing this page. 964 * @page: The page to lock. 965 * 966 * See folio_lock() for a description of what the lock protects. 967 * This is a legacy function and new code should probably use folio_lock() 968 * instead. 969 * 970 * Context: May sleep. Pages in the same folio share a lock, so do not 971 * attempt to lock two pages which share a folio. 972 */ 973 static inline void lock_page(struct page *page) 974 { 975 struct folio *folio; 976 might_sleep(); 977 978 folio = page_folio(page); 979 if (!folio_trylock(folio)) 980 __folio_lock(folio); 981 } 982 983 /** 984 * folio_lock_killable() - Lock this folio, interruptible by a fatal signal. 985 * @folio: The folio to lock. 986 * 987 * Attempts to lock the folio, like folio_lock(), except that the sleep 988 * to acquire the lock is interruptible by a fatal signal. 989 * 990 * Context: May sleep; see folio_lock(). 991 * Return: 0 if the lock was acquired; -EINTR if a fatal signal was received. 992 */ 993 static inline int folio_lock_killable(struct folio *folio) 994 { 995 might_sleep(); 996 if (!folio_trylock(folio)) 997 return __folio_lock_killable(folio); 998 return 0; 999 } 1000 1001 /* 1002 * folio_lock_or_retry - Lock the folio, unless this would block and the 1003 * caller indicated that it can handle a retry. 1004 * 1005 * Return value and mmap_lock implications depend on flags; see 1006 * __folio_lock_or_retry(). 1007 */ 1008 static inline bool folio_lock_or_retry(struct folio *folio, 1009 struct mm_struct *mm, unsigned int flags) 1010 { 1011 might_sleep(); 1012 return folio_trylock(folio) || __folio_lock_or_retry(folio, mm, flags); 1013 } 1014 1015 /* 1016 * This is exported only for folio_wait_locked/folio_wait_writeback, etc., 1017 * and should not be used directly. 1018 */ 1019 void folio_wait_bit(struct folio *folio, int bit_nr); 1020 int folio_wait_bit_killable(struct folio *folio, int bit_nr); 1021 1022 /* 1023 * Wait for a folio to be unlocked. 1024 * 1025 * This must be called with the caller "holding" the folio, 1026 * ie with increased folio reference count so that the folio won't 1027 * go away during the wait. 1028 */ 1029 static inline void folio_wait_locked(struct folio *folio) 1030 { 1031 if (folio_test_locked(folio)) 1032 folio_wait_bit(folio, PG_locked); 1033 } 1034 1035 static inline int folio_wait_locked_killable(struct folio *folio) 1036 { 1037 if (!folio_test_locked(folio)) 1038 return 0; 1039 return folio_wait_bit_killable(folio, PG_locked); 1040 } 1041 1042 static inline void wait_on_page_locked(struct page *page) 1043 { 1044 folio_wait_locked(page_folio(page)); 1045 } 1046 1047 static inline int wait_on_page_locked_killable(struct page *page) 1048 { 1049 return folio_wait_locked_killable(page_folio(page)); 1050 } 1051 1052 void wait_on_page_writeback(struct page *page); 1053 void folio_wait_writeback(struct folio *folio); 1054 int folio_wait_writeback_killable(struct folio *folio); 1055 void end_page_writeback(struct page *page); 1056 void folio_end_writeback(struct folio *folio); 1057 void wait_for_stable_page(struct page *page); 1058 void folio_wait_stable(struct folio *folio); 1059 void __folio_mark_dirty(struct folio *folio, struct address_space *, int warn); 1060 static inline void __set_page_dirty(struct page *page, 1061 struct address_space *mapping, int warn) 1062 { 1063 __folio_mark_dirty(page_folio(page), mapping, warn); 1064 } 1065 void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb); 1066 void __folio_cancel_dirty(struct folio *folio); 1067 static inline void folio_cancel_dirty(struct folio *folio) 1068 { 1069 /* Avoid atomic ops, locking, etc. when not actually needed. */ 1070 if (folio_test_dirty(folio)) 1071 __folio_cancel_dirty(folio); 1072 } 1073 bool folio_clear_dirty_for_io(struct folio *folio); 1074 bool clear_page_dirty_for_io(struct page *page); 1075 void folio_invalidate(struct folio *folio, size_t offset, size_t length); 1076 int __set_page_dirty_nobuffers(struct page *page); 1077 bool noop_dirty_folio(struct address_space *mapping, struct folio *folio); 1078 1079 #ifdef CONFIG_MIGRATION 1080 int filemap_migrate_folio(struct address_space *mapping, struct folio *dst, 1081 struct folio *src, enum migrate_mode mode); 1082 #else 1083 #define filemap_migrate_folio NULL 1084 #endif 1085 void folio_end_private_2(struct folio *folio); 1086 void folio_wait_private_2(struct folio *folio); 1087 int folio_wait_private_2_killable(struct folio *folio); 1088 1089 /* 1090 * Add an arbitrary waiter to a page's wait queue 1091 */ 1092 void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter); 1093 1094 /* 1095 * Fault in userspace address range. 1096 */ 1097 size_t fault_in_writeable(char __user *uaddr, size_t size); 1098 size_t fault_in_subpage_writeable(char __user *uaddr, size_t size); 1099 size_t fault_in_safe_writeable(const char __user *uaddr, size_t size); 1100 size_t fault_in_readable(const char __user *uaddr, size_t size); 1101 1102 int add_to_page_cache_lru(struct page *page, struct address_space *mapping, 1103 pgoff_t index, gfp_t gfp); 1104 int filemap_add_folio(struct address_space *mapping, struct folio *folio, 1105 pgoff_t index, gfp_t gfp); 1106 void filemap_remove_folio(struct folio *folio); 1107 void __filemap_remove_folio(struct folio *folio, void *shadow); 1108 void replace_page_cache_folio(struct folio *old, struct folio *new); 1109 void delete_from_page_cache_batch(struct address_space *mapping, 1110 struct folio_batch *fbatch); 1111 bool filemap_release_folio(struct folio *folio, gfp_t gfp); 1112 loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end, 1113 int whence); 1114 1115 /* Must be non-static for BPF error injection */ 1116 int __filemap_add_folio(struct address_space *mapping, struct folio *folio, 1117 pgoff_t index, gfp_t gfp, void **shadowp); 1118 1119 bool filemap_range_has_writeback(struct address_space *mapping, 1120 loff_t start_byte, loff_t end_byte); 1121 1122 /** 1123 * filemap_range_needs_writeback - check if range potentially needs writeback 1124 * @mapping: address space within which to check 1125 * @start_byte: offset in bytes where the range starts 1126 * @end_byte: offset in bytes where the range ends (inclusive) 1127 * 1128 * Find at least one page in the range supplied, usually used to check if 1129 * direct writing in this range will trigger a writeback. Used by O_DIRECT 1130 * read/write with IOCB_NOWAIT, to see if the caller needs to do 1131 * filemap_write_and_wait_range() before proceeding. 1132 * 1133 * Return: %true if the caller should do filemap_write_and_wait_range() before 1134 * doing O_DIRECT to a page in this range, %false otherwise. 1135 */ 1136 static inline bool filemap_range_needs_writeback(struct address_space *mapping, 1137 loff_t start_byte, 1138 loff_t end_byte) 1139 { 1140 if (!mapping->nrpages) 1141 return false; 1142 if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && 1143 !mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) 1144 return false; 1145 return filemap_range_has_writeback(mapping, start_byte, end_byte); 1146 } 1147 1148 /** 1149 * struct readahead_control - Describes a readahead request. 1150 * 1151 * A readahead request is for consecutive pages. Filesystems which 1152 * implement the ->readahead method should call readahead_page() or 1153 * readahead_page_batch() in a loop and attempt to start I/O against 1154 * each page in the request. 1155 * 1156 * Most of the fields in this struct are private and should be accessed 1157 * by the functions below. 1158 * 1159 * @file: The file, used primarily by network filesystems for authentication. 1160 * May be NULL if invoked internally by the filesystem. 1161 * @mapping: Readahead this filesystem object. 1162 * @ra: File readahead state. May be NULL. 1163 */ 1164 struct readahead_control { 1165 struct file *file; 1166 struct address_space *mapping; 1167 struct file_ra_state *ra; 1168 /* private: use the readahead_* accessors instead */ 1169 pgoff_t _index; 1170 unsigned int _nr_pages; 1171 unsigned int _batch_count; 1172 bool _workingset; 1173 unsigned long _pflags; 1174 }; 1175 1176 #define DEFINE_READAHEAD(ractl, f, r, m, i) \ 1177 struct readahead_control ractl = { \ 1178 .file = f, \ 1179 .mapping = m, \ 1180 .ra = r, \ 1181 ._index = i, \ 1182 } 1183 1184 #define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE) 1185 1186 void page_cache_ra_unbounded(struct readahead_control *, 1187 unsigned long nr_to_read, unsigned long lookahead_count); 1188 void page_cache_sync_ra(struct readahead_control *, unsigned long req_count); 1189 void page_cache_async_ra(struct readahead_control *, struct folio *, 1190 unsigned long req_count); 1191 void readahead_expand(struct readahead_control *ractl, 1192 loff_t new_start, size_t new_len); 1193 1194 /** 1195 * page_cache_sync_readahead - generic file readahead 1196 * @mapping: address_space which holds the pagecache and I/O vectors 1197 * @ra: file_ra_state which holds the readahead state 1198 * @file: Used by the filesystem for authentication. 1199 * @index: Index of first page to be read. 1200 * @req_count: Total number of pages being read by the caller. 1201 * 1202 * page_cache_sync_readahead() should be called when a cache miss happened: 1203 * it will submit the read. The readahead logic may decide to piggyback more 1204 * pages onto the read request if access patterns suggest it will improve 1205 * performance. 1206 */ 1207 static inline 1208 void page_cache_sync_readahead(struct address_space *mapping, 1209 struct file_ra_state *ra, struct file *file, pgoff_t index, 1210 unsigned long req_count) 1211 { 1212 DEFINE_READAHEAD(ractl, file, ra, mapping, index); 1213 page_cache_sync_ra(&ractl, req_count); 1214 } 1215 1216 /** 1217 * page_cache_async_readahead - file readahead for marked pages 1218 * @mapping: address_space which holds the pagecache and I/O vectors 1219 * @ra: file_ra_state which holds the readahead state 1220 * @file: Used by the filesystem for authentication. 1221 * @folio: The folio at @index which triggered the readahead call. 1222 * @index: Index of first page to be read. 1223 * @req_count: Total number of pages being read by the caller. 1224 * 1225 * page_cache_async_readahead() should be called when a page is used which 1226 * is marked as PageReadahead; this is a marker to suggest that the application 1227 * has used up enough of the readahead window that we should start pulling in 1228 * more pages. 1229 */ 1230 static inline 1231 void page_cache_async_readahead(struct address_space *mapping, 1232 struct file_ra_state *ra, struct file *file, 1233 struct folio *folio, pgoff_t index, unsigned long req_count) 1234 { 1235 DEFINE_READAHEAD(ractl, file, ra, mapping, index); 1236 page_cache_async_ra(&ractl, folio, req_count); 1237 } 1238 1239 static inline struct folio *__readahead_folio(struct readahead_control *ractl) 1240 { 1241 struct folio *folio; 1242 1243 BUG_ON(ractl->_batch_count > ractl->_nr_pages); 1244 ractl->_nr_pages -= ractl->_batch_count; 1245 ractl->_index += ractl->_batch_count; 1246 1247 if (!ractl->_nr_pages) { 1248 ractl->_batch_count = 0; 1249 return NULL; 1250 } 1251 1252 folio = xa_load(&ractl->mapping->i_pages, ractl->_index); 1253 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 1254 ractl->_batch_count = folio_nr_pages(folio); 1255 1256 return folio; 1257 } 1258 1259 /** 1260 * readahead_page - Get the next page to read. 1261 * @ractl: The current readahead request. 1262 * 1263 * Context: The page is locked and has an elevated refcount. The caller 1264 * should decreases the refcount once the page has been submitted for I/O 1265 * and unlock the page once all I/O to that page has completed. 1266 * Return: A pointer to the next page, or %NULL if we are done. 1267 */ 1268 static inline struct page *readahead_page(struct readahead_control *ractl) 1269 { 1270 struct folio *folio = __readahead_folio(ractl); 1271 1272 return &folio->page; 1273 } 1274 1275 /** 1276 * readahead_folio - Get the next folio to read. 1277 * @ractl: The current readahead request. 1278 * 1279 * Context: The folio is locked. The caller should unlock the folio once 1280 * all I/O to that folio has completed. 1281 * Return: A pointer to the next folio, or %NULL if we are done. 1282 */ 1283 static inline struct folio *readahead_folio(struct readahead_control *ractl) 1284 { 1285 struct folio *folio = __readahead_folio(ractl); 1286 1287 if (folio) 1288 folio_put(folio); 1289 return folio; 1290 } 1291 1292 static inline unsigned int __readahead_batch(struct readahead_control *rac, 1293 struct page **array, unsigned int array_sz) 1294 { 1295 unsigned int i = 0; 1296 XA_STATE(xas, &rac->mapping->i_pages, 0); 1297 struct page *page; 1298 1299 BUG_ON(rac->_batch_count > rac->_nr_pages); 1300 rac->_nr_pages -= rac->_batch_count; 1301 rac->_index += rac->_batch_count; 1302 rac->_batch_count = 0; 1303 1304 xas_set(&xas, rac->_index); 1305 rcu_read_lock(); 1306 xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) { 1307 if (xas_retry(&xas, page)) 1308 continue; 1309 VM_BUG_ON_PAGE(!PageLocked(page), page); 1310 VM_BUG_ON_PAGE(PageTail(page), page); 1311 array[i++] = page; 1312 rac->_batch_count += thp_nr_pages(page); 1313 if (i == array_sz) 1314 break; 1315 } 1316 rcu_read_unlock(); 1317 1318 return i; 1319 } 1320 1321 /** 1322 * readahead_page_batch - Get a batch of pages to read. 1323 * @rac: The current readahead request. 1324 * @array: An array of pointers to struct page. 1325 * 1326 * Context: The pages are locked and have an elevated refcount. The caller 1327 * should decreases the refcount once the page has been submitted for I/O 1328 * and unlock the page once all I/O to that page has completed. 1329 * Return: The number of pages placed in the array. 0 indicates the request 1330 * is complete. 1331 */ 1332 #define readahead_page_batch(rac, array) \ 1333 __readahead_batch(rac, array, ARRAY_SIZE(array)) 1334 1335 /** 1336 * readahead_pos - The byte offset into the file of this readahead request. 1337 * @rac: The readahead request. 1338 */ 1339 static inline loff_t readahead_pos(struct readahead_control *rac) 1340 { 1341 return (loff_t)rac->_index * PAGE_SIZE; 1342 } 1343 1344 /** 1345 * readahead_length - The number of bytes in this readahead request. 1346 * @rac: The readahead request. 1347 */ 1348 static inline size_t readahead_length(struct readahead_control *rac) 1349 { 1350 return rac->_nr_pages * PAGE_SIZE; 1351 } 1352 1353 /** 1354 * readahead_index - The index of the first page in this readahead request. 1355 * @rac: The readahead request. 1356 */ 1357 static inline pgoff_t readahead_index(struct readahead_control *rac) 1358 { 1359 return rac->_index; 1360 } 1361 1362 /** 1363 * readahead_count - The number of pages in this readahead request. 1364 * @rac: The readahead request. 1365 */ 1366 static inline unsigned int readahead_count(struct readahead_control *rac) 1367 { 1368 return rac->_nr_pages; 1369 } 1370 1371 /** 1372 * readahead_batch_length - The number of bytes in the current batch. 1373 * @rac: The readahead request. 1374 */ 1375 static inline size_t readahead_batch_length(struct readahead_control *rac) 1376 { 1377 return rac->_batch_count * PAGE_SIZE; 1378 } 1379 1380 static inline unsigned long dir_pages(struct inode *inode) 1381 { 1382 return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >> 1383 PAGE_SHIFT; 1384 } 1385 1386 /** 1387 * folio_mkwrite_check_truncate - check if folio was truncated 1388 * @folio: the folio to check 1389 * @inode: the inode to check the folio against 1390 * 1391 * Return: the number of bytes in the folio up to EOF, 1392 * or -EFAULT if the folio was truncated. 1393 */ 1394 static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio, 1395 struct inode *inode) 1396 { 1397 loff_t size = i_size_read(inode); 1398 pgoff_t index = size >> PAGE_SHIFT; 1399 size_t offset = offset_in_folio(folio, size); 1400 1401 if (!folio->mapping) 1402 return -EFAULT; 1403 1404 /* folio is wholly inside EOF */ 1405 if (folio_next_index(folio) - 1 < index) 1406 return folio_size(folio); 1407 /* folio is wholly past EOF */ 1408 if (folio->index > index || !offset) 1409 return -EFAULT; 1410 /* folio is partially inside EOF */ 1411 return offset; 1412 } 1413 1414 /** 1415 * page_mkwrite_check_truncate - check if page was truncated 1416 * @page: the page to check 1417 * @inode: the inode to check the page against 1418 * 1419 * Returns the number of bytes in the page up to EOF, 1420 * or -EFAULT if the page was truncated. 1421 */ 1422 static inline int page_mkwrite_check_truncate(struct page *page, 1423 struct inode *inode) 1424 { 1425 loff_t size = i_size_read(inode); 1426 pgoff_t index = size >> PAGE_SHIFT; 1427 int offset = offset_in_page(size); 1428 1429 if (page->mapping != inode->i_mapping) 1430 return -EFAULT; 1431 1432 /* page is wholly inside EOF */ 1433 if (page->index < index) 1434 return PAGE_SIZE; 1435 /* page is wholly past EOF */ 1436 if (page->index > index || !offset) 1437 return -EFAULT; 1438 /* page is partially inside EOF */ 1439 return offset; 1440 } 1441 1442 /** 1443 * i_blocks_per_folio - How many blocks fit in this folio. 1444 * @inode: The inode which contains the blocks. 1445 * @folio: The folio. 1446 * 1447 * If the block size is larger than the size of this folio, return zero. 1448 * 1449 * Context: The caller should hold a refcount on the folio to prevent it 1450 * from being split. 1451 * Return: The number of filesystem blocks covered by this folio. 1452 */ 1453 static inline 1454 unsigned int i_blocks_per_folio(struct inode *inode, struct folio *folio) 1455 { 1456 return folio_size(folio) >> inode->i_blkbits; 1457 } 1458 1459 static inline 1460 unsigned int i_blocks_per_page(struct inode *inode, struct page *page) 1461 { 1462 return i_blocks_per_folio(inode, page_folio(page)); 1463 } 1464 #endif /* _LINUX_PAGEMAP_H */ 1465