1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/fs/buffer.c 4 * 5 * Copyright (C) 1991, 1992, 2002 Linus Torvalds 6 */ 7 8 /* 9 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95 10 * 11 * Removed a lot of unnecessary code and simplified things now that 12 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96 13 * 14 * Speed up hash, lru, and free list operations. Use gfp() for allocating 15 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM 16 * 17 * Added 32k buffer block sizes - these are required older ARM systems. - RMK 18 * 19 * async buffer flushing, 1999 Andrea Arcangeli <[email protected]> 20 */ 21 22 #include <linux/kernel.h> 23 #include <linux/sched/signal.h> 24 #include <linux/syscalls.h> 25 #include <linux/fs.h> 26 #include <linux/iomap.h> 27 #include <linux/mm.h> 28 #include <linux/percpu.h> 29 #include <linux/slab.h> 30 #include <linux/capability.h> 31 #include <linux/blkdev.h> 32 #include <linux/file.h> 33 #include <linux/quotaops.h> 34 #include <linux/highmem.h> 35 #include <linux/export.h> 36 #include <linux/backing-dev.h> 37 #include <linux/writeback.h> 38 #include <linux/hash.h> 39 #include <linux/suspend.h> 40 #include <linux/buffer_head.h> 41 #include <linux/task_io_accounting_ops.h> 42 #include <linux/bio.h> 43 #include <linux/cpu.h> 44 #include <linux/bitops.h> 45 #include <linux/mpage.h> 46 #include <linux/bit_spinlock.h> 47 #include <linux/pagevec.h> 48 #include <linux/sched/mm.h> 49 #include <trace/events/block.h> 50 #include <linux/fscrypt.h> 51 #include <linux/fsverity.h> 52 #include <linux/sched/isolation.h> 53 54 #include "internal.h" 55 56 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); 57 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh, 58 enum rw_hint hint, struct writeback_control *wbc); 59 60 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers) 61 62 inline void touch_buffer(struct buffer_head *bh) 63 { 64 trace_block_touch_buffer(bh); 65 folio_mark_accessed(bh->b_folio); 66 } 67 EXPORT_SYMBOL(touch_buffer); 68 69 void __lock_buffer(struct buffer_head *bh) 70 { 71 wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); 72 } 73 EXPORT_SYMBOL(__lock_buffer); 74 75 void unlock_buffer(struct buffer_head *bh) 76 { 77 clear_bit_unlock(BH_Lock, &bh->b_state); 78 smp_mb__after_atomic(); 79 wake_up_bit(&bh->b_state, BH_Lock); 80 } 81 EXPORT_SYMBOL(unlock_buffer); 82 83 /* 84 * Returns if the folio has dirty or writeback buffers. If all the buffers 85 * are unlocked and clean then the folio_test_dirty information is stale. If 86 * any of the buffers are locked, it is assumed they are locked for IO. 87 */ 88 void buffer_check_dirty_writeback(struct folio *folio, 89 bool *dirty, bool *writeback) 90 { 91 struct buffer_head *head, *bh; 92 *dirty = false; 93 *writeback = false; 94 95 BUG_ON(!folio_test_locked(folio)); 96 97 head = folio_buffers(folio); 98 if (!head) 99 return; 100 101 if (folio_test_writeback(folio)) 102 *writeback = true; 103 104 bh = head; 105 do { 106 if (buffer_locked(bh)) 107 *writeback = true; 108 109 if (buffer_dirty(bh)) 110 *dirty = true; 111 112 bh = bh->b_this_page; 113 } while (bh != head); 114 } 115 116 /* 117 * Block until a buffer comes unlocked. This doesn't stop it 118 * from becoming locked again - you have to lock it yourself 119 * if you want to preserve its state. 120 */ 121 void __wait_on_buffer(struct buffer_head * bh) 122 { 123 wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); 124 } 125 EXPORT_SYMBOL(__wait_on_buffer); 126 127 static void buffer_io_error(struct buffer_head *bh, char *msg) 128 { 129 if (!test_bit(BH_Quiet, &bh->b_state)) 130 printk_ratelimited(KERN_ERR 131 "Buffer I/O error on dev %pg, logical block %llu%s\n", 132 bh->b_bdev, (unsigned long long)bh->b_blocknr, msg); 133 } 134 135 /* 136 * End-of-IO handler helper function which does not touch the bh after 137 * unlocking it. 138 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but 139 * a race there is benign: unlock_buffer() only use the bh's address for 140 * hashing after unlocking the buffer, so it doesn't actually touch the bh 141 * itself. 142 */ 143 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate) 144 { 145 if (uptodate) { 146 set_buffer_uptodate(bh); 147 } else { 148 /* This happens, due to failed read-ahead attempts. */ 149 clear_buffer_uptodate(bh); 150 } 151 unlock_buffer(bh); 152 } 153 154 /* 155 * Default synchronous end-of-IO handler.. Just mark it up-to-date and 156 * unlock the buffer. 157 */ 158 void end_buffer_read_sync(struct buffer_head *bh, int uptodate) 159 { 160 __end_buffer_read_notouch(bh, uptodate); 161 put_bh(bh); 162 } 163 EXPORT_SYMBOL(end_buffer_read_sync); 164 165 void end_buffer_write_sync(struct buffer_head *bh, int uptodate) 166 { 167 if (uptodate) { 168 set_buffer_uptodate(bh); 169 } else { 170 buffer_io_error(bh, ", lost sync page write"); 171 mark_buffer_write_io_error(bh); 172 clear_buffer_uptodate(bh); 173 } 174 unlock_buffer(bh); 175 put_bh(bh); 176 } 177 EXPORT_SYMBOL(end_buffer_write_sync); 178 179 static struct buffer_head * 180 __find_get_block_slow(struct block_device *bdev, sector_t block, bool atomic) 181 { 182 struct address_space *bd_mapping = bdev->bd_mapping; 183 const int blkbits = bd_mapping->host->i_blkbits; 184 struct buffer_head *ret = NULL; 185 pgoff_t index; 186 struct buffer_head *bh; 187 struct buffer_head *head; 188 struct folio *folio; 189 int all_mapped = 1; 190 static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1); 191 192 index = ((loff_t)block << blkbits) / PAGE_SIZE; 193 folio = __filemap_get_folio(bd_mapping, index, FGP_ACCESSED, 0); 194 if (IS_ERR(folio)) 195 goto out; 196 197 /* 198 * Folio lock protects the buffers. Callers that cannot block 199 * will fallback to serializing vs try_to_free_buffers() via 200 * the i_private_lock. 201 */ 202 if (atomic) 203 spin_lock(&bd_mapping->i_private_lock); 204 else 205 folio_lock(folio); 206 207 head = folio_buffers(folio); 208 if (!head) 209 goto out_unlock; 210 bh = head; 211 do { 212 if (!buffer_mapped(bh)) 213 all_mapped = 0; 214 else if (bh->b_blocknr == block) { 215 ret = bh; 216 get_bh(bh); 217 goto out_unlock; 218 } 219 bh = bh->b_this_page; 220 } while (bh != head); 221 222 /* we might be here because some of the buffers on this page are 223 * not mapped. This is due to various races between 224 * file io on the block device and getblk. It gets dealt with 225 * elsewhere, don't buffer_error if we had some unmapped buffers 226 */ 227 ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE); 228 if (all_mapped && __ratelimit(&last_warned)) { 229 printk("__find_get_block_slow() failed. block=%llu, " 230 "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, " 231 "device %pg blocksize: %d\n", 232 (unsigned long long)block, 233 (unsigned long long)bh->b_blocknr, 234 bh->b_state, bh->b_size, bdev, 235 1 << blkbits); 236 } 237 out_unlock: 238 if (atomic) 239 spin_unlock(&bd_mapping->i_private_lock); 240 else 241 folio_unlock(folio); 242 folio_put(folio); 243 out: 244 return ret; 245 } 246 247 static void end_buffer_async_read(struct buffer_head *bh, int uptodate) 248 { 249 unsigned long flags; 250 struct buffer_head *first; 251 struct buffer_head *tmp; 252 struct folio *folio; 253 int folio_uptodate = 1; 254 255 BUG_ON(!buffer_async_read(bh)); 256 257 folio = bh->b_folio; 258 if (uptodate) { 259 set_buffer_uptodate(bh); 260 } else { 261 clear_buffer_uptodate(bh); 262 buffer_io_error(bh, ", async page read"); 263 } 264 265 /* 266 * Be _very_ careful from here on. Bad things can happen if 267 * two buffer heads end IO at almost the same time and both 268 * decide that the page is now completely done. 269 */ 270 first = folio_buffers(folio); 271 spin_lock_irqsave(&first->b_uptodate_lock, flags); 272 clear_buffer_async_read(bh); 273 unlock_buffer(bh); 274 tmp = bh; 275 do { 276 if (!buffer_uptodate(tmp)) 277 folio_uptodate = 0; 278 if (buffer_async_read(tmp)) { 279 BUG_ON(!buffer_locked(tmp)); 280 goto still_busy; 281 } 282 tmp = tmp->b_this_page; 283 } while (tmp != bh); 284 spin_unlock_irqrestore(&first->b_uptodate_lock, flags); 285 286 folio_end_read(folio, folio_uptodate); 287 return; 288 289 still_busy: 290 spin_unlock_irqrestore(&first->b_uptodate_lock, flags); 291 return; 292 } 293 294 struct postprocess_bh_ctx { 295 struct work_struct work; 296 struct buffer_head *bh; 297 }; 298 299 static void verify_bh(struct work_struct *work) 300 { 301 struct postprocess_bh_ctx *ctx = 302 container_of(work, struct postprocess_bh_ctx, work); 303 struct buffer_head *bh = ctx->bh; 304 bool valid; 305 306 valid = fsverity_verify_blocks(bh->b_folio, bh->b_size, bh_offset(bh)); 307 end_buffer_async_read(bh, valid); 308 kfree(ctx); 309 } 310 311 static bool need_fsverity(struct buffer_head *bh) 312 { 313 struct folio *folio = bh->b_folio; 314 struct inode *inode = folio->mapping->host; 315 316 return fsverity_active(inode) && 317 /* needed by ext4 */ 318 folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE); 319 } 320 321 static void decrypt_bh(struct work_struct *work) 322 { 323 struct postprocess_bh_ctx *ctx = 324 container_of(work, struct postprocess_bh_ctx, work); 325 struct buffer_head *bh = ctx->bh; 326 int err; 327 328 err = fscrypt_decrypt_pagecache_blocks(bh->b_folio, bh->b_size, 329 bh_offset(bh)); 330 if (err == 0 && need_fsverity(bh)) { 331 /* 332 * We use different work queues for decryption and for verity 333 * because verity may require reading metadata pages that need 334 * decryption, and we shouldn't recurse to the same workqueue. 335 */ 336 INIT_WORK(&ctx->work, verify_bh); 337 fsverity_enqueue_verify_work(&ctx->work); 338 return; 339 } 340 end_buffer_async_read(bh, err == 0); 341 kfree(ctx); 342 } 343 344 /* 345 * I/O completion handler for block_read_full_folio() - pages 346 * which come unlocked at the end of I/O. 347 */ 348 static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate) 349 { 350 struct inode *inode = bh->b_folio->mapping->host; 351 bool decrypt = fscrypt_inode_uses_fs_layer_crypto(inode); 352 bool verify = need_fsverity(bh); 353 354 /* Decrypt (with fscrypt) and/or verify (with fsverity) if needed. */ 355 if (uptodate && (decrypt || verify)) { 356 struct postprocess_bh_ctx *ctx = 357 kmalloc(sizeof(*ctx), GFP_ATOMIC); 358 359 if (ctx) { 360 ctx->bh = bh; 361 if (decrypt) { 362 INIT_WORK(&ctx->work, decrypt_bh); 363 fscrypt_enqueue_decrypt_work(&ctx->work); 364 } else { 365 INIT_WORK(&ctx->work, verify_bh); 366 fsverity_enqueue_verify_work(&ctx->work); 367 } 368 return; 369 } 370 uptodate = 0; 371 } 372 end_buffer_async_read(bh, uptodate); 373 } 374 375 /* 376 * Completion handler for block_write_full_folio() - folios which are unlocked 377 * during I/O, and which have the writeback flag cleared upon I/O completion. 378 */ 379 static void end_buffer_async_write(struct buffer_head *bh, int uptodate) 380 { 381 unsigned long flags; 382 struct buffer_head *first; 383 struct buffer_head *tmp; 384 struct folio *folio; 385 386 BUG_ON(!buffer_async_write(bh)); 387 388 folio = bh->b_folio; 389 if (uptodate) { 390 set_buffer_uptodate(bh); 391 } else { 392 buffer_io_error(bh, ", lost async page write"); 393 mark_buffer_write_io_error(bh); 394 clear_buffer_uptodate(bh); 395 } 396 397 first = folio_buffers(folio); 398 spin_lock_irqsave(&first->b_uptodate_lock, flags); 399 400 clear_buffer_async_write(bh); 401 unlock_buffer(bh); 402 tmp = bh->b_this_page; 403 while (tmp != bh) { 404 if (buffer_async_write(tmp)) { 405 BUG_ON(!buffer_locked(tmp)); 406 goto still_busy; 407 } 408 tmp = tmp->b_this_page; 409 } 410 spin_unlock_irqrestore(&first->b_uptodate_lock, flags); 411 folio_end_writeback(folio); 412 return; 413 414 still_busy: 415 spin_unlock_irqrestore(&first->b_uptodate_lock, flags); 416 return; 417 } 418 419 /* 420 * If a page's buffers are under async readin (end_buffer_async_read 421 * completion) then there is a possibility that another thread of 422 * control could lock one of the buffers after it has completed 423 * but while some of the other buffers have not completed. This 424 * locked buffer would confuse end_buffer_async_read() into not unlocking 425 * the page. So the absence of BH_Async_Read tells end_buffer_async_read() 426 * that this buffer is not under async I/O. 427 * 428 * The page comes unlocked when it has no locked buffer_async buffers 429 * left. 430 * 431 * PageLocked prevents anyone starting new async I/O reads any of 432 * the buffers. 433 * 434 * PageWriteback is used to prevent simultaneous writeout of the same 435 * page. 436 * 437 * PageLocked prevents anyone from starting writeback of a page which is 438 * under read I/O (PageWriteback is only ever set against a locked page). 439 */ 440 static void mark_buffer_async_read(struct buffer_head *bh) 441 { 442 bh->b_end_io = end_buffer_async_read_io; 443 set_buffer_async_read(bh); 444 } 445 446 static void mark_buffer_async_write_endio(struct buffer_head *bh, 447 bh_end_io_t *handler) 448 { 449 bh->b_end_io = handler; 450 set_buffer_async_write(bh); 451 } 452 453 void mark_buffer_async_write(struct buffer_head *bh) 454 { 455 mark_buffer_async_write_endio(bh, end_buffer_async_write); 456 } 457 EXPORT_SYMBOL(mark_buffer_async_write); 458 459 460 /* 461 * fs/buffer.c contains helper functions for buffer-backed address space's 462 * fsync functions. A common requirement for buffer-based filesystems is 463 * that certain data from the backing blockdev needs to be written out for 464 * a successful fsync(). For example, ext2 indirect blocks need to be 465 * written back and waited upon before fsync() returns. 466 * 467 * The functions mark_buffer_dirty_inode(), fsync_inode_buffers(), 468 * inode_has_buffers() and invalidate_inode_buffers() are provided for the 469 * management of a list of dependent buffers at ->i_mapping->i_private_list. 470 * 471 * Locking is a little subtle: try_to_free_buffers() will remove buffers 472 * from their controlling inode's queue when they are being freed. But 473 * try_to_free_buffers() will be operating against the *blockdev* mapping 474 * at the time, not against the S_ISREG file which depends on those buffers. 475 * So the locking for i_private_list is via the i_private_lock in the address_space 476 * which backs the buffers. Which is different from the address_space 477 * against which the buffers are listed. So for a particular address_space, 478 * mapping->i_private_lock does *not* protect mapping->i_private_list! In fact, 479 * mapping->i_private_list will always be protected by the backing blockdev's 480 * ->i_private_lock. 481 * 482 * Which introduces a requirement: all buffers on an address_space's 483 * ->i_private_list must be from the same address_space: the blockdev's. 484 * 485 * address_spaces which do not place buffers at ->i_private_list via these 486 * utility functions are free to use i_private_lock and i_private_list for 487 * whatever they want. The only requirement is that list_empty(i_private_list) 488 * be true at clear_inode() time. 489 * 490 * FIXME: clear_inode should not call invalidate_inode_buffers(). The 491 * filesystems should do that. invalidate_inode_buffers() should just go 492 * BUG_ON(!list_empty). 493 * 494 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should 495 * take an address_space, not an inode. And it should be called 496 * mark_buffer_dirty_fsync() to clearly define why those buffers are being 497 * queued up. 498 * 499 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the 500 * list if it is already on a list. Because if the buffer is on a list, 501 * it *must* already be on the right one. If not, the filesystem is being 502 * silly. This will save a ton of locking. But first we have to ensure 503 * that buffers are taken *off* the old inode's list when they are freed 504 * (presumably in truncate). That requires careful auditing of all 505 * filesystems (do it inside bforget()). It could also be done by bringing 506 * b_inode back. 507 */ 508 509 /* 510 * The buffer's backing address_space's i_private_lock must be held 511 */ 512 static void __remove_assoc_queue(struct buffer_head *bh) 513 { 514 list_del_init(&bh->b_assoc_buffers); 515 WARN_ON(!bh->b_assoc_map); 516 bh->b_assoc_map = NULL; 517 } 518 519 int inode_has_buffers(struct inode *inode) 520 { 521 return !list_empty(&inode->i_data.i_private_list); 522 } 523 524 /* 525 * osync is designed to support O_SYNC io. It waits synchronously for 526 * all already-submitted IO to complete, but does not queue any new 527 * writes to the disk. 528 * 529 * To do O_SYNC writes, just queue the buffer writes with write_dirty_buffer 530 * as you dirty the buffers, and then use osync_inode_buffers to wait for 531 * completion. Any other dirty buffers which are not yet queued for 532 * write will not be flushed to disk by the osync. 533 */ 534 static int osync_buffers_list(spinlock_t *lock, struct list_head *list) 535 { 536 struct buffer_head *bh; 537 struct list_head *p; 538 int err = 0; 539 540 spin_lock(lock); 541 repeat: 542 list_for_each_prev(p, list) { 543 bh = BH_ENTRY(p); 544 if (buffer_locked(bh)) { 545 get_bh(bh); 546 spin_unlock(lock); 547 wait_on_buffer(bh); 548 if (!buffer_uptodate(bh)) 549 err = -EIO; 550 brelse(bh); 551 spin_lock(lock); 552 goto repeat; 553 } 554 } 555 spin_unlock(lock); 556 return err; 557 } 558 559 /** 560 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers 561 * @mapping: the mapping which wants those buffers written 562 * 563 * Starts I/O against the buffers at mapping->i_private_list, and waits upon 564 * that I/O. 565 * 566 * Basically, this is a convenience function for fsync(). 567 * @mapping is a file or directory which needs those buffers to be written for 568 * a successful fsync(). 569 */ 570 int sync_mapping_buffers(struct address_space *mapping) 571 { 572 struct address_space *buffer_mapping = mapping->i_private_data; 573 574 if (buffer_mapping == NULL || list_empty(&mapping->i_private_list)) 575 return 0; 576 577 return fsync_buffers_list(&buffer_mapping->i_private_lock, 578 &mapping->i_private_list); 579 } 580 EXPORT_SYMBOL(sync_mapping_buffers); 581 582 /** 583 * generic_buffers_fsync_noflush - generic buffer fsync implementation 584 * for simple filesystems with no inode lock 585 * 586 * @file: file to synchronize 587 * @start: start offset in bytes 588 * @end: end offset in bytes (inclusive) 589 * @datasync: only synchronize essential metadata if true 590 * 591 * This is a generic implementation of the fsync method for simple 592 * filesystems which track all non-inode metadata in the buffers list 593 * hanging off the address_space structure. 594 */ 595 int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end, 596 bool datasync) 597 { 598 struct inode *inode = file->f_mapping->host; 599 int err; 600 int ret; 601 602 err = file_write_and_wait_range(file, start, end); 603 if (err) 604 return err; 605 606 ret = sync_mapping_buffers(inode->i_mapping); 607 if (!(inode->i_state & I_DIRTY_ALL)) 608 goto out; 609 if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) 610 goto out; 611 612 err = sync_inode_metadata(inode, 1); 613 if (ret == 0) 614 ret = err; 615 616 out: 617 /* check and advance again to catch errors after syncing out buffers */ 618 err = file_check_and_advance_wb_err(file); 619 if (ret == 0) 620 ret = err; 621 return ret; 622 } 623 EXPORT_SYMBOL(generic_buffers_fsync_noflush); 624 625 /** 626 * generic_buffers_fsync - generic buffer fsync implementation 627 * for simple filesystems with no inode lock 628 * 629 * @file: file to synchronize 630 * @start: start offset in bytes 631 * @end: end offset in bytes (inclusive) 632 * @datasync: only synchronize essential metadata if true 633 * 634 * This is a generic implementation of the fsync method for simple 635 * filesystems which track all non-inode metadata in the buffers list 636 * hanging off the address_space structure. This also makes sure that 637 * a device cache flush operation is called at the end. 638 */ 639 int generic_buffers_fsync(struct file *file, loff_t start, loff_t end, 640 bool datasync) 641 { 642 struct inode *inode = file->f_mapping->host; 643 int ret; 644 645 ret = generic_buffers_fsync_noflush(file, start, end, datasync); 646 if (!ret) 647 ret = blkdev_issue_flush(inode->i_sb->s_bdev); 648 return ret; 649 } 650 EXPORT_SYMBOL(generic_buffers_fsync); 651 652 /* 653 * Called when we've recently written block `bblock', and it is known that 654 * `bblock' was for a buffer_boundary() buffer. This means that the block at 655 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's 656 * dirty, schedule it for IO. So that indirects merge nicely with their data. 657 */ 658 void write_boundary_block(struct block_device *bdev, 659 sector_t bblock, unsigned blocksize) 660 { 661 struct buffer_head *bh; 662 663 bh = __find_get_block_nonatomic(bdev, bblock + 1, blocksize); 664 if (bh) { 665 if (buffer_dirty(bh)) 666 write_dirty_buffer(bh, 0); 667 put_bh(bh); 668 } 669 } 670 671 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode) 672 { 673 struct address_space *mapping = inode->i_mapping; 674 struct address_space *buffer_mapping = bh->b_folio->mapping; 675 676 mark_buffer_dirty(bh); 677 if (!mapping->i_private_data) { 678 mapping->i_private_data = buffer_mapping; 679 } else { 680 BUG_ON(mapping->i_private_data != buffer_mapping); 681 } 682 if (!bh->b_assoc_map) { 683 spin_lock(&buffer_mapping->i_private_lock); 684 list_move_tail(&bh->b_assoc_buffers, 685 &mapping->i_private_list); 686 bh->b_assoc_map = mapping; 687 spin_unlock(&buffer_mapping->i_private_lock); 688 } 689 } 690 EXPORT_SYMBOL(mark_buffer_dirty_inode); 691 692 /** 693 * block_dirty_folio - Mark a folio as dirty. 694 * @mapping: The address space containing this folio. 695 * @folio: The folio to mark dirty. 696 * 697 * Filesystems which use buffer_heads can use this function as their 698 * ->dirty_folio implementation. Some filesystems need to do a little 699 * work before calling this function. Filesystems which do not use 700 * buffer_heads should call filemap_dirty_folio() instead. 701 * 702 * If the folio has buffers, the uptodate buffers are set dirty, to 703 * preserve dirty-state coherency between the folio and the buffers. 704 * Buffers added to a dirty folio are created dirty. 705 * 706 * The buffers are dirtied before the folio is dirtied. There's a small 707 * race window in which writeback may see the folio cleanness but not the 708 * buffer dirtiness. That's fine. If this code were to set the folio 709 * dirty before the buffers, writeback could clear the folio dirty flag, 710 * see a bunch of clean buffers and we'd end up with dirty buffers/clean 711 * folio on the dirty folio list. 712 * 713 * We use i_private_lock to lock against try_to_free_buffers() while 714 * using the folio's buffer list. This also prevents clean buffers 715 * being added to the folio after it was set dirty. 716 * 717 * Context: May only be called from process context. Does not sleep. 718 * Caller must ensure that @folio cannot be truncated during this call, 719 * typically by holding the folio lock or having a page in the folio 720 * mapped and holding the page table lock. 721 * 722 * Return: True if the folio was dirtied; false if it was already dirtied. 723 */ 724 bool block_dirty_folio(struct address_space *mapping, struct folio *folio) 725 { 726 struct buffer_head *head; 727 bool newly_dirty; 728 729 spin_lock(&mapping->i_private_lock); 730 head = folio_buffers(folio); 731 if (head) { 732 struct buffer_head *bh = head; 733 734 do { 735 set_buffer_dirty(bh); 736 bh = bh->b_this_page; 737 } while (bh != head); 738 } 739 /* 740 * Lock out page's memcg migration to keep PageDirty 741 * synchronized with per-memcg dirty page counters. 742 */ 743 newly_dirty = !folio_test_set_dirty(folio); 744 spin_unlock(&mapping->i_private_lock); 745 746 if (newly_dirty) 747 __folio_mark_dirty(folio, mapping, 1); 748 749 if (newly_dirty) 750 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 751 752 return newly_dirty; 753 } 754 EXPORT_SYMBOL(block_dirty_folio); 755 756 /* 757 * Write out and wait upon a list of buffers. 758 * 759 * We have conflicting pressures: we want to make sure that all 760 * initially dirty buffers get waited on, but that any subsequently 761 * dirtied buffers don't. After all, we don't want fsync to last 762 * forever if somebody is actively writing to the file. 763 * 764 * Do this in two main stages: first we copy dirty buffers to a 765 * temporary inode list, queueing the writes as we go. Then we clean 766 * up, waiting for those writes to complete. 767 * 768 * During this second stage, any subsequent updates to the file may end 769 * up refiling the buffer on the original inode's dirty list again, so 770 * there is a chance we will end up with a buffer queued for write but 771 * not yet completed on that list. So, as a final cleanup we go through 772 * the osync code to catch these locked, dirty buffers without requeuing 773 * any newly dirty buffers for write. 774 */ 775 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) 776 { 777 struct buffer_head *bh; 778 struct address_space *mapping; 779 int err = 0, err2; 780 struct blk_plug plug; 781 LIST_HEAD(tmp); 782 783 blk_start_plug(&plug); 784 785 spin_lock(lock); 786 while (!list_empty(list)) { 787 bh = BH_ENTRY(list->next); 788 mapping = bh->b_assoc_map; 789 __remove_assoc_queue(bh); 790 /* Avoid race with mark_buffer_dirty_inode() which does 791 * a lockless check and we rely on seeing the dirty bit */ 792 smp_mb(); 793 if (buffer_dirty(bh) || buffer_locked(bh)) { 794 list_add(&bh->b_assoc_buffers, &tmp); 795 bh->b_assoc_map = mapping; 796 if (buffer_dirty(bh)) { 797 get_bh(bh); 798 spin_unlock(lock); 799 /* 800 * Ensure any pending I/O completes so that 801 * write_dirty_buffer() actually writes the 802 * current contents - it is a noop if I/O is 803 * still in flight on potentially older 804 * contents. 805 */ 806 write_dirty_buffer(bh, REQ_SYNC); 807 808 /* 809 * Kick off IO for the previous mapping. Note 810 * that we will not run the very last mapping, 811 * wait_on_buffer() will do that for us 812 * through sync_buffer(). 813 */ 814 brelse(bh); 815 spin_lock(lock); 816 } 817 } 818 } 819 820 spin_unlock(lock); 821 blk_finish_plug(&plug); 822 spin_lock(lock); 823 824 while (!list_empty(&tmp)) { 825 bh = BH_ENTRY(tmp.prev); 826 get_bh(bh); 827 mapping = bh->b_assoc_map; 828 __remove_assoc_queue(bh); 829 /* Avoid race with mark_buffer_dirty_inode() which does 830 * a lockless check and we rely on seeing the dirty bit */ 831 smp_mb(); 832 if (buffer_dirty(bh)) { 833 list_add(&bh->b_assoc_buffers, 834 &mapping->i_private_list); 835 bh->b_assoc_map = mapping; 836 } 837 spin_unlock(lock); 838 wait_on_buffer(bh); 839 if (!buffer_uptodate(bh)) 840 err = -EIO; 841 brelse(bh); 842 spin_lock(lock); 843 } 844 845 spin_unlock(lock); 846 err2 = osync_buffers_list(lock, list); 847 if (err) 848 return err; 849 else 850 return err2; 851 } 852 853 /* 854 * Invalidate any and all dirty buffers on a given inode. We are 855 * probably unmounting the fs, but that doesn't mean we have already 856 * done a sync(). Just drop the buffers from the inode list. 857 * 858 * NOTE: we take the inode's blockdev's mapping's i_private_lock. Which 859 * assumes that all the buffers are against the blockdev. 860 */ 861 void invalidate_inode_buffers(struct inode *inode) 862 { 863 if (inode_has_buffers(inode)) { 864 struct address_space *mapping = &inode->i_data; 865 struct list_head *list = &mapping->i_private_list; 866 struct address_space *buffer_mapping = mapping->i_private_data; 867 868 spin_lock(&buffer_mapping->i_private_lock); 869 while (!list_empty(list)) 870 __remove_assoc_queue(BH_ENTRY(list->next)); 871 spin_unlock(&buffer_mapping->i_private_lock); 872 } 873 } 874 EXPORT_SYMBOL(invalidate_inode_buffers); 875 876 /* 877 * Remove any clean buffers from the inode's buffer list. This is called 878 * when we're trying to free the inode itself. Those buffers can pin it. 879 * 880 * Returns true if all buffers were removed. 881 */ 882 int remove_inode_buffers(struct inode *inode) 883 { 884 int ret = 1; 885 886 if (inode_has_buffers(inode)) { 887 struct address_space *mapping = &inode->i_data; 888 struct list_head *list = &mapping->i_private_list; 889 struct address_space *buffer_mapping = mapping->i_private_data; 890 891 spin_lock(&buffer_mapping->i_private_lock); 892 while (!list_empty(list)) { 893 struct buffer_head *bh = BH_ENTRY(list->next); 894 if (buffer_dirty(bh)) { 895 ret = 0; 896 break; 897 } 898 __remove_assoc_queue(bh); 899 } 900 spin_unlock(&buffer_mapping->i_private_lock); 901 } 902 return ret; 903 } 904 905 /* 906 * Create the appropriate buffers when given a folio for data area and 907 * the size of each buffer.. Use the bh->b_this_page linked list to 908 * follow the buffers created. Return NULL if unable to create more 909 * buffers. 910 * 911 * The retry flag is used to differentiate async IO (paging, swapping) 912 * which may not fail from ordinary buffer allocations. 913 */ 914 struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size, 915 gfp_t gfp) 916 { 917 struct buffer_head *bh, *head; 918 long offset; 919 struct mem_cgroup *memcg, *old_memcg; 920 921 /* The folio lock pins the memcg */ 922 memcg = folio_memcg(folio); 923 old_memcg = set_active_memcg(memcg); 924 925 head = NULL; 926 offset = folio_size(folio); 927 while ((offset -= size) >= 0) { 928 bh = alloc_buffer_head(gfp); 929 if (!bh) 930 goto no_grow; 931 932 bh->b_this_page = head; 933 bh->b_blocknr = -1; 934 head = bh; 935 936 bh->b_size = size; 937 938 /* Link the buffer to its folio */ 939 folio_set_bh(bh, folio, offset); 940 } 941 out: 942 set_active_memcg(old_memcg); 943 return head; 944 /* 945 * In case anything failed, we just free everything we got. 946 */ 947 no_grow: 948 if (head) { 949 do { 950 bh = head; 951 head = head->b_this_page; 952 free_buffer_head(bh); 953 } while (head); 954 } 955 956 goto out; 957 } 958 EXPORT_SYMBOL_GPL(folio_alloc_buffers); 959 960 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size) 961 { 962 gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT; 963 964 return folio_alloc_buffers(page_folio(page), size, gfp); 965 } 966 EXPORT_SYMBOL_GPL(alloc_page_buffers); 967 968 static inline void link_dev_buffers(struct folio *folio, 969 struct buffer_head *head) 970 { 971 struct buffer_head *bh, *tail; 972 973 bh = head; 974 do { 975 tail = bh; 976 bh = bh->b_this_page; 977 } while (bh); 978 tail->b_this_page = head; 979 folio_attach_private(folio, head); 980 } 981 982 static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size) 983 { 984 sector_t retval = ~((sector_t)0); 985 loff_t sz = bdev_nr_bytes(bdev); 986 987 if (sz) { 988 unsigned int sizebits = blksize_bits(size); 989 retval = (sz >> sizebits); 990 } 991 return retval; 992 } 993 994 /* 995 * Initialise the state of a blockdev folio's buffers. 996 */ 997 static sector_t folio_init_buffers(struct folio *folio, 998 struct block_device *bdev, unsigned size) 999 { 1000 struct buffer_head *head = folio_buffers(folio); 1001 struct buffer_head *bh = head; 1002 bool uptodate = folio_test_uptodate(folio); 1003 sector_t block = div_u64(folio_pos(folio), size); 1004 sector_t end_block = blkdev_max_block(bdev, size); 1005 1006 do { 1007 if (!buffer_mapped(bh)) { 1008 bh->b_end_io = NULL; 1009 bh->b_private = NULL; 1010 bh->b_bdev = bdev; 1011 bh->b_blocknr = block; 1012 if (uptodate) 1013 set_buffer_uptodate(bh); 1014 if (block < end_block) 1015 set_buffer_mapped(bh); 1016 } 1017 block++; 1018 bh = bh->b_this_page; 1019 } while (bh != head); 1020 1021 /* 1022 * Caller needs to validate requested block against end of device. 1023 */ 1024 return end_block; 1025 } 1026 1027 /* 1028 * Create the page-cache folio that contains the requested block. 1029 * 1030 * This is used purely for blockdev mappings. 1031 * 1032 * Returns false if we have a failure which cannot be cured by retrying 1033 * without sleeping. Returns true if we succeeded, or the caller should retry. 1034 */ 1035 static bool grow_dev_folio(struct block_device *bdev, sector_t block, 1036 pgoff_t index, unsigned size, gfp_t gfp) 1037 { 1038 struct address_space *mapping = bdev->bd_mapping; 1039 struct folio *folio; 1040 struct buffer_head *bh; 1041 sector_t end_block = 0; 1042 1043 folio = __filemap_get_folio(mapping, index, 1044 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp); 1045 if (IS_ERR(folio)) 1046 return false; 1047 1048 bh = folio_buffers(folio); 1049 if (bh) { 1050 if (bh->b_size == size) { 1051 end_block = folio_init_buffers(folio, bdev, size); 1052 goto unlock; 1053 } 1054 1055 /* 1056 * Retrying may succeed; for example the folio may finish 1057 * writeback, or buffers may be cleaned. This should not 1058 * happen very often; maybe we have old buffers attached to 1059 * this blockdev's page cache and we're trying to change 1060 * the block size? 1061 */ 1062 if (!try_to_free_buffers(folio)) { 1063 end_block = ~0ULL; 1064 goto unlock; 1065 } 1066 } 1067 1068 bh = folio_alloc_buffers(folio, size, gfp | __GFP_ACCOUNT); 1069 if (!bh) 1070 goto unlock; 1071 1072 /* 1073 * Link the folio to the buffers and initialise them. Take the 1074 * lock to be atomic wrt __find_get_block(), which does not 1075 * run under the folio lock. 1076 */ 1077 spin_lock(&mapping->i_private_lock); 1078 link_dev_buffers(folio, bh); 1079 end_block = folio_init_buffers(folio, bdev, size); 1080 spin_unlock(&mapping->i_private_lock); 1081 unlock: 1082 folio_unlock(folio); 1083 folio_put(folio); 1084 return block < end_block; 1085 } 1086 1087 /* 1088 * Create buffers for the specified block device block's folio. If 1089 * that folio was dirty, the buffers are set dirty also. Returns false 1090 * if we've hit a permanent error. 1091 */ 1092 static bool grow_buffers(struct block_device *bdev, sector_t block, 1093 unsigned size, gfp_t gfp) 1094 { 1095 loff_t pos; 1096 1097 /* 1098 * Check for a block which lies outside our maximum possible 1099 * pagecache index. 1100 */ 1101 if (check_mul_overflow(block, (sector_t)size, &pos) || pos > MAX_LFS_FILESIZE) { 1102 printk(KERN_ERR "%s: requested out-of-range block %llu for device %pg\n", 1103 __func__, (unsigned long long)block, 1104 bdev); 1105 return false; 1106 } 1107 1108 /* Create a folio with the proper size buffers */ 1109 return grow_dev_folio(bdev, block, pos / PAGE_SIZE, size, gfp); 1110 } 1111 1112 static struct buffer_head * 1113 __getblk_slow(struct block_device *bdev, sector_t block, 1114 unsigned size, gfp_t gfp) 1115 { 1116 /* Size must be multiple of hard sectorsize */ 1117 if (unlikely(size & (bdev_logical_block_size(bdev)-1) || 1118 (size < 512 || size > PAGE_SIZE))) { 1119 printk(KERN_ERR "getblk(): invalid block size %d requested\n", 1120 size); 1121 printk(KERN_ERR "logical block size: %d\n", 1122 bdev_logical_block_size(bdev)); 1123 1124 dump_stack(); 1125 return NULL; 1126 } 1127 1128 for (;;) { 1129 struct buffer_head *bh; 1130 1131 bh = __find_get_block(bdev, block, size); 1132 if (bh) 1133 return bh; 1134 1135 if (!grow_buffers(bdev, block, size, gfp)) 1136 return NULL; 1137 } 1138 } 1139 1140 /* 1141 * The relationship between dirty buffers and dirty pages: 1142 * 1143 * Whenever a page has any dirty buffers, the page's dirty bit is set, and 1144 * the page is tagged dirty in the page cache. 1145 * 1146 * At all times, the dirtiness of the buffers represents the dirtiness of 1147 * subsections of the page. If the page has buffers, the page dirty bit is 1148 * merely a hint about the true dirty state. 1149 * 1150 * When a page is set dirty in its entirety, all its buffers are marked dirty 1151 * (if the page has buffers). 1152 * 1153 * When a buffer is marked dirty, its page is dirtied, but the page's other 1154 * buffers are not. 1155 * 1156 * Also. When blockdev buffers are explicitly read with bread(), they 1157 * individually become uptodate. But their backing page remains not 1158 * uptodate - even if all of its buffers are uptodate. A subsequent 1159 * block_read_full_folio() against that folio will discover all the uptodate 1160 * buffers, will set the folio uptodate and will perform no I/O. 1161 */ 1162 1163 /** 1164 * mark_buffer_dirty - mark a buffer_head as needing writeout 1165 * @bh: the buffer_head to mark dirty 1166 * 1167 * mark_buffer_dirty() will set the dirty bit against the buffer, then set 1168 * its backing page dirty, then tag the page as dirty in the page cache 1169 * and then attach the address_space's inode to its superblock's dirty 1170 * inode list. 1171 * 1172 * mark_buffer_dirty() is atomic. It takes bh->b_folio->mapping->i_private_lock, 1173 * i_pages lock and mapping->host->i_lock. 1174 */ 1175 void mark_buffer_dirty(struct buffer_head *bh) 1176 { 1177 WARN_ON_ONCE(!buffer_uptodate(bh)); 1178 1179 trace_block_dirty_buffer(bh); 1180 1181 /* 1182 * Very *carefully* optimize the it-is-already-dirty case. 1183 * 1184 * Don't let the final "is it dirty" escape to before we 1185 * perhaps modified the buffer. 1186 */ 1187 if (buffer_dirty(bh)) { 1188 smp_mb(); 1189 if (buffer_dirty(bh)) 1190 return; 1191 } 1192 1193 if (!test_set_buffer_dirty(bh)) { 1194 struct folio *folio = bh->b_folio; 1195 struct address_space *mapping = NULL; 1196 1197 if (!folio_test_set_dirty(folio)) { 1198 mapping = folio->mapping; 1199 if (mapping) 1200 __folio_mark_dirty(folio, mapping, 0); 1201 } 1202 if (mapping) 1203 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 1204 } 1205 } 1206 EXPORT_SYMBOL(mark_buffer_dirty); 1207 1208 void mark_buffer_write_io_error(struct buffer_head *bh) 1209 { 1210 set_buffer_write_io_error(bh); 1211 /* FIXME: do we need to set this in both places? */ 1212 if (bh->b_folio && bh->b_folio->mapping) 1213 mapping_set_error(bh->b_folio->mapping, -EIO); 1214 if (bh->b_assoc_map) { 1215 mapping_set_error(bh->b_assoc_map, -EIO); 1216 errseq_set(&bh->b_assoc_map->host->i_sb->s_wb_err, -EIO); 1217 } 1218 } 1219 EXPORT_SYMBOL(mark_buffer_write_io_error); 1220 1221 /** 1222 * __brelse - Release a buffer. 1223 * @bh: The buffer to release. 1224 * 1225 * This variant of brelse() can be called if @bh is guaranteed to not be NULL. 1226 */ 1227 void __brelse(struct buffer_head *bh) 1228 { 1229 if (atomic_read(&bh->b_count)) { 1230 put_bh(bh); 1231 return; 1232 } 1233 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n"); 1234 } 1235 EXPORT_SYMBOL(__brelse); 1236 1237 /** 1238 * __bforget - Discard any dirty data in a buffer. 1239 * @bh: The buffer to forget. 1240 * 1241 * This variant of bforget() can be called if @bh is guaranteed to not 1242 * be NULL. 1243 */ 1244 void __bforget(struct buffer_head *bh) 1245 { 1246 clear_buffer_dirty(bh); 1247 if (bh->b_assoc_map) { 1248 struct address_space *buffer_mapping = bh->b_folio->mapping; 1249 1250 spin_lock(&buffer_mapping->i_private_lock); 1251 list_del_init(&bh->b_assoc_buffers); 1252 bh->b_assoc_map = NULL; 1253 spin_unlock(&buffer_mapping->i_private_lock); 1254 } 1255 __brelse(bh); 1256 } 1257 EXPORT_SYMBOL(__bforget); 1258 1259 static struct buffer_head *__bread_slow(struct buffer_head *bh) 1260 { 1261 lock_buffer(bh); 1262 if (buffer_uptodate(bh)) { 1263 unlock_buffer(bh); 1264 return bh; 1265 } else { 1266 get_bh(bh); 1267 bh->b_end_io = end_buffer_read_sync; 1268 submit_bh(REQ_OP_READ, bh); 1269 wait_on_buffer(bh); 1270 if (buffer_uptodate(bh)) 1271 return bh; 1272 } 1273 brelse(bh); 1274 return NULL; 1275 } 1276 1277 /* 1278 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block(). 1279 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their 1280 * refcount elevated by one when they're in an LRU. A buffer can only appear 1281 * once in a particular CPU's LRU. A single buffer can be present in multiple 1282 * CPU's LRUs at the same time. 1283 * 1284 * This is a transparent caching front-end to sb_bread(), sb_getblk() and 1285 * sb_find_get_block(). 1286 * 1287 * The LRUs themselves only need locking against invalidate_bh_lrus. We use 1288 * a local interrupt disable for that. 1289 */ 1290 1291 #define BH_LRU_SIZE 16 1292 1293 struct bh_lru { 1294 struct buffer_head *bhs[BH_LRU_SIZE]; 1295 }; 1296 1297 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }}; 1298 1299 #ifdef CONFIG_SMP 1300 #define bh_lru_lock() local_irq_disable() 1301 #define bh_lru_unlock() local_irq_enable() 1302 #else 1303 #define bh_lru_lock() preempt_disable() 1304 #define bh_lru_unlock() preempt_enable() 1305 #endif 1306 1307 static inline void check_irqs_on(void) 1308 { 1309 #ifdef irqs_disabled 1310 BUG_ON(irqs_disabled()); 1311 #endif 1312 } 1313 1314 /* 1315 * Install a buffer_head into this cpu's LRU. If not already in the LRU, it is 1316 * inserted at the front, and the buffer_head at the back if any is evicted. 1317 * Or, if already in the LRU it is moved to the front. 1318 */ 1319 static void bh_lru_install(struct buffer_head *bh) 1320 { 1321 struct buffer_head *evictee = bh; 1322 struct bh_lru *b; 1323 int i; 1324 1325 check_irqs_on(); 1326 bh_lru_lock(); 1327 1328 /* 1329 * the refcount of buffer_head in bh_lru prevents dropping the 1330 * attached page(i.e., try_to_free_buffers) so it could cause 1331 * failing page migration. 1332 * Skip putting upcoming bh into bh_lru until migration is done. 1333 */ 1334 if (lru_cache_disabled() || cpu_is_isolated(smp_processor_id())) { 1335 bh_lru_unlock(); 1336 return; 1337 } 1338 1339 b = this_cpu_ptr(&bh_lrus); 1340 for (i = 0; i < BH_LRU_SIZE; i++) { 1341 swap(evictee, b->bhs[i]); 1342 if (evictee == bh) { 1343 bh_lru_unlock(); 1344 return; 1345 } 1346 } 1347 1348 get_bh(bh); 1349 bh_lru_unlock(); 1350 brelse(evictee); 1351 } 1352 1353 /* 1354 * Look up the bh in this cpu's LRU. If it's there, move it to the head. 1355 */ 1356 static struct buffer_head * 1357 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size) 1358 { 1359 struct buffer_head *ret = NULL; 1360 unsigned int i; 1361 1362 check_irqs_on(); 1363 bh_lru_lock(); 1364 if (cpu_is_isolated(smp_processor_id())) { 1365 bh_lru_unlock(); 1366 return NULL; 1367 } 1368 for (i = 0; i < BH_LRU_SIZE; i++) { 1369 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]); 1370 1371 if (bh && bh->b_blocknr == block && bh->b_bdev == bdev && 1372 bh->b_size == size) { 1373 if (i) { 1374 while (i) { 1375 __this_cpu_write(bh_lrus.bhs[i], 1376 __this_cpu_read(bh_lrus.bhs[i - 1])); 1377 i--; 1378 } 1379 __this_cpu_write(bh_lrus.bhs[0], bh); 1380 } 1381 get_bh(bh); 1382 ret = bh; 1383 break; 1384 } 1385 } 1386 bh_lru_unlock(); 1387 return ret; 1388 } 1389 1390 /* 1391 * Perform a pagecache lookup for the matching buffer. If it's there, refresh 1392 * it in the LRU and mark it as accessed. If it is not present then return 1393 * NULL 1394 */ 1395 static struct buffer_head * 1396 find_get_block_common(struct block_device *bdev, sector_t block, 1397 unsigned size, bool atomic) 1398 { 1399 struct buffer_head *bh = lookup_bh_lru(bdev, block, size); 1400 1401 if (bh == NULL) { 1402 /* __find_get_block_slow will mark the page accessed */ 1403 bh = __find_get_block_slow(bdev, block, atomic); 1404 if (bh) 1405 bh_lru_install(bh); 1406 } else 1407 touch_buffer(bh); 1408 1409 return bh; 1410 } 1411 1412 struct buffer_head * 1413 __find_get_block(struct block_device *bdev, sector_t block, unsigned size) 1414 { 1415 return find_get_block_common(bdev, block, size, true); 1416 } 1417 EXPORT_SYMBOL(__find_get_block); 1418 1419 /* same as __find_get_block() but allows sleeping contexts */ 1420 struct buffer_head * 1421 __find_get_block_nonatomic(struct block_device *bdev, sector_t block, 1422 unsigned size) 1423 { 1424 return find_get_block_common(bdev, block, size, false); 1425 } 1426 EXPORT_SYMBOL(__find_get_block_nonatomic); 1427 1428 /** 1429 * bdev_getblk - Get a buffer_head in a block device's buffer cache. 1430 * @bdev: The block device. 1431 * @block: The block number. 1432 * @size: The size of buffer_heads for this @bdev. 1433 * @gfp: The memory allocation flags to use. 1434 * 1435 * The returned buffer head has its reference count incremented, but is 1436 * not locked. The caller should call brelse() when it has finished 1437 * with the buffer. The buffer may not be uptodate. If needed, the 1438 * caller can bring it uptodate either by reading it or overwriting it. 1439 * 1440 * Return: The buffer head, or NULL if memory could not be allocated. 1441 */ 1442 struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block, 1443 unsigned size, gfp_t gfp) 1444 { 1445 struct buffer_head *bh; 1446 1447 if (gfpflags_allow_blocking(gfp)) 1448 bh = __find_get_block_nonatomic(bdev, block, size); 1449 else 1450 bh = __find_get_block(bdev, block, size); 1451 1452 might_alloc(gfp); 1453 if (bh) 1454 return bh; 1455 1456 return __getblk_slow(bdev, block, size, gfp); 1457 } 1458 EXPORT_SYMBOL(bdev_getblk); 1459 1460 /* 1461 * Do async read-ahead on a buffer.. 1462 */ 1463 void __breadahead(struct block_device *bdev, sector_t block, unsigned size) 1464 { 1465 struct buffer_head *bh = bdev_getblk(bdev, block, size, 1466 GFP_NOWAIT | __GFP_MOVABLE); 1467 1468 if (likely(bh)) { 1469 bh_readahead(bh, REQ_RAHEAD); 1470 brelse(bh); 1471 } 1472 } 1473 EXPORT_SYMBOL(__breadahead); 1474 1475 /** 1476 * __bread_gfp() - Read a block. 1477 * @bdev: The block device to read from. 1478 * @block: Block number in units of block size. 1479 * @size: The block size of this device in bytes. 1480 * @gfp: Not page allocation flags; see below. 1481 * 1482 * You are not expected to call this function. You should use one of 1483 * sb_bread(), sb_bread_unmovable() or __bread(). 1484 * 1485 * Read a specified block, and return the buffer head that refers to it. 1486 * If @gfp is 0, the memory will be allocated using the block device's 1487 * default GFP flags. If @gfp is __GFP_MOVABLE, the memory may be 1488 * allocated from a movable area. Do not pass in a complete set of 1489 * GFP flags. 1490 * 1491 * The returned buffer head has its refcount increased. The caller should 1492 * call brelse() when it has finished with the buffer. 1493 * 1494 * Context: May sleep waiting for I/O. 1495 * Return: NULL if the block was unreadable. 1496 */ 1497 struct buffer_head *__bread_gfp(struct block_device *bdev, sector_t block, 1498 unsigned size, gfp_t gfp) 1499 { 1500 struct buffer_head *bh; 1501 1502 gfp |= mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS); 1503 1504 /* 1505 * Prefer looping in the allocator rather than here, at least that 1506 * code knows what it's doing. 1507 */ 1508 gfp |= __GFP_NOFAIL; 1509 1510 bh = bdev_getblk(bdev, block, size, gfp); 1511 1512 if (likely(bh) && !buffer_uptodate(bh)) 1513 bh = __bread_slow(bh); 1514 return bh; 1515 } 1516 EXPORT_SYMBOL(__bread_gfp); 1517 1518 static void __invalidate_bh_lrus(struct bh_lru *b) 1519 { 1520 int i; 1521 1522 for (i = 0; i < BH_LRU_SIZE; i++) { 1523 brelse(b->bhs[i]); 1524 b->bhs[i] = NULL; 1525 } 1526 } 1527 /* 1528 * invalidate_bh_lrus() is called rarely - but not only at unmount. 1529 * This doesn't race because it runs in each cpu either in irq 1530 * or with preempt disabled. 1531 */ 1532 static void invalidate_bh_lru(void *arg) 1533 { 1534 struct bh_lru *b = &get_cpu_var(bh_lrus); 1535 1536 __invalidate_bh_lrus(b); 1537 put_cpu_var(bh_lrus); 1538 } 1539 1540 bool has_bh_in_lru(int cpu, void *dummy) 1541 { 1542 struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu); 1543 int i; 1544 1545 for (i = 0; i < BH_LRU_SIZE; i++) { 1546 if (b->bhs[i]) 1547 return true; 1548 } 1549 1550 return false; 1551 } 1552 1553 void invalidate_bh_lrus(void) 1554 { 1555 on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1); 1556 } 1557 EXPORT_SYMBOL_GPL(invalidate_bh_lrus); 1558 1559 /* 1560 * It's called from workqueue context so we need a bh_lru_lock to close 1561 * the race with preemption/irq. 1562 */ 1563 void invalidate_bh_lrus_cpu(void) 1564 { 1565 struct bh_lru *b; 1566 1567 bh_lru_lock(); 1568 b = this_cpu_ptr(&bh_lrus); 1569 __invalidate_bh_lrus(b); 1570 bh_lru_unlock(); 1571 } 1572 1573 void folio_set_bh(struct buffer_head *bh, struct folio *folio, 1574 unsigned long offset) 1575 { 1576 bh->b_folio = folio; 1577 BUG_ON(offset >= folio_size(folio)); 1578 if (folio_test_highmem(folio)) 1579 /* 1580 * This catches illegal uses and preserves the offset: 1581 */ 1582 bh->b_data = (char *)(0 + offset); 1583 else 1584 bh->b_data = folio_address(folio) + offset; 1585 } 1586 EXPORT_SYMBOL(folio_set_bh); 1587 1588 /* 1589 * Called when truncating a buffer on a page completely. 1590 */ 1591 1592 /* Bits that are cleared during an invalidate */ 1593 #define BUFFER_FLAGS_DISCARD \ 1594 (1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \ 1595 1 << BH_Delay | 1 << BH_Unwritten) 1596 1597 static void discard_buffer(struct buffer_head * bh) 1598 { 1599 unsigned long b_state; 1600 1601 lock_buffer(bh); 1602 clear_buffer_dirty(bh); 1603 bh->b_bdev = NULL; 1604 b_state = READ_ONCE(bh->b_state); 1605 do { 1606 } while (!try_cmpxchg(&bh->b_state, &b_state, 1607 b_state & ~BUFFER_FLAGS_DISCARD)); 1608 unlock_buffer(bh); 1609 } 1610 1611 /** 1612 * block_invalidate_folio - Invalidate part or all of a buffer-backed folio. 1613 * @folio: The folio which is affected. 1614 * @offset: start of the range to invalidate 1615 * @length: length of the range to invalidate 1616 * 1617 * block_invalidate_folio() is called when all or part of the folio has been 1618 * invalidated by a truncate operation. 1619 * 1620 * block_invalidate_folio() does not have to release all buffers, but it must 1621 * ensure that no dirty buffer is left outside @offset and that no I/O 1622 * is underway against any of the blocks which are outside the truncation 1623 * point. Because the caller is about to free (and possibly reuse) those 1624 * blocks on-disk. 1625 */ 1626 void block_invalidate_folio(struct folio *folio, size_t offset, size_t length) 1627 { 1628 struct buffer_head *head, *bh, *next; 1629 size_t curr_off = 0; 1630 size_t stop = length + offset; 1631 1632 BUG_ON(!folio_test_locked(folio)); 1633 1634 /* 1635 * Check for overflow 1636 */ 1637 BUG_ON(stop > folio_size(folio) || stop < length); 1638 1639 head = folio_buffers(folio); 1640 if (!head) 1641 return; 1642 1643 bh = head; 1644 do { 1645 size_t next_off = curr_off + bh->b_size; 1646 next = bh->b_this_page; 1647 1648 /* 1649 * Are we still fully in range ? 1650 */ 1651 if (next_off > stop) 1652 goto out; 1653 1654 /* 1655 * is this block fully invalidated? 1656 */ 1657 if (offset <= curr_off) 1658 discard_buffer(bh); 1659 curr_off = next_off; 1660 bh = next; 1661 } while (bh != head); 1662 1663 /* 1664 * We release buffers only if the entire folio is being invalidated. 1665 * The get_block cached value has been unconditionally invalidated, 1666 * so real IO is not possible anymore. 1667 */ 1668 if (length == folio_size(folio)) 1669 filemap_release_folio(folio, 0); 1670 out: 1671 folio_clear_mappedtodisk(folio); 1672 return; 1673 } 1674 EXPORT_SYMBOL(block_invalidate_folio); 1675 1676 /* 1677 * We attach and possibly dirty the buffers atomically wrt 1678 * block_dirty_folio() via i_private_lock. try_to_free_buffers 1679 * is already excluded via the folio lock. 1680 */ 1681 struct buffer_head *create_empty_buffers(struct folio *folio, 1682 unsigned long blocksize, unsigned long b_state) 1683 { 1684 struct buffer_head *bh, *head, *tail; 1685 gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT | __GFP_NOFAIL; 1686 1687 head = folio_alloc_buffers(folio, blocksize, gfp); 1688 bh = head; 1689 do { 1690 bh->b_state |= b_state; 1691 tail = bh; 1692 bh = bh->b_this_page; 1693 } while (bh); 1694 tail->b_this_page = head; 1695 1696 spin_lock(&folio->mapping->i_private_lock); 1697 if (folio_test_uptodate(folio) || folio_test_dirty(folio)) { 1698 bh = head; 1699 do { 1700 if (folio_test_dirty(folio)) 1701 set_buffer_dirty(bh); 1702 if (folio_test_uptodate(folio)) 1703 set_buffer_uptodate(bh); 1704 bh = bh->b_this_page; 1705 } while (bh != head); 1706 } 1707 folio_attach_private(folio, head); 1708 spin_unlock(&folio->mapping->i_private_lock); 1709 1710 return head; 1711 } 1712 EXPORT_SYMBOL(create_empty_buffers); 1713 1714 /** 1715 * clean_bdev_aliases: clean a range of buffers in block device 1716 * @bdev: Block device to clean buffers in 1717 * @block: Start of a range of blocks to clean 1718 * @len: Number of blocks to clean 1719 * 1720 * We are taking a range of blocks for data and we don't want writeback of any 1721 * buffer-cache aliases starting from return from this function and until the 1722 * moment when something will explicitly mark the buffer dirty (hopefully that 1723 * will not happen until we will free that block ;-) We don't even need to mark 1724 * it not-uptodate - nobody can expect anything from a newly allocated buffer 1725 * anyway. We used to use unmap_buffer() for such invalidation, but that was 1726 * wrong. We definitely don't want to mark the alias unmapped, for example - it 1727 * would confuse anyone who might pick it with bread() afterwards... 1728 * 1729 * Also.. Note that bforget() doesn't lock the buffer. So there can be 1730 * writeout I/O going on against recently-freed buffers. We don't wait on that 1731 * I/O in bforget() - it's more efficient to wait on the I/O only if we really 1732 * need to. That happens here. 1733 */ 1734 void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len) 1735 { 1736 struct address_space *bd_mapping = bdev->bd_mapping; 1737 const int blkbits = bd_mapping->host->i_blkbits; 1738 struct folio_batch fbatch; 1739 pgoff_t index = ((loff_t)block << blkbits) / PAGE_SIZE; 1740 pgoff_t end; 1741 int i, count; 1742 struct buffer_head *bh; 1743 struct buffer_head *head; 1744 1745 end = ((loff_t)(block + len - 1) << blkbits) / PAGE_SIZE; 1746 folio_batch_init(&fbatch); 1747 while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) { 1748 count = folio_batch_count(&fbatch); 1749 for (i = 0; i < count; i++) { 1750 struct folio *folio = fbatch.folios[i]; 1751 1752 if (!folio_buffers(folio)) 1753 continue; 1754 /* 1755 * We use folio lock instead of bd_mapping->i_private_lock 1756 * to pin buffers here since we can afford to sleep and 1757 * it scales better than a global spinlock lock. 1758 */ 1759 folio_lock(folio); 1760 /* Recheck when the folio is locked which pins bhs */ 1761 head = folio_buffers(folio); 1762 if (!head) 1763 goto unlock_page; 1764 bh = head; 1765 do { 1766 if (!buffer_mapped(bh) || (bh->b_blocknr < block)) 1767 goto next; 1768 if (bh->b_blocknr >= block + len) 1769 break; 1770 clear_buffer_dirty(bh); 1771 wait_on_buffer(bh); 1772 clear_buffer_req(bh); 1773 next: 1774 bh = bh->b_this_page; 1775 } while (bh != head); 1776 unlock_page: 1777 folio_unlock(folio); 1778 } 1779 folio_batch_release(&fbatch); 1780 cond_resched(); 1781 /* End of range already reached? */ 1782 if (index > end || !index) 1783 break; 1784 } 1785 } 1786 EXPORT_SYMBOL(clean_bdev_aliases); 1787 1788 static struct buffer_head *folio_create_buffers(struct folio *folio, 1789 struct inode *inode, 1790 unsigned int b_state) 1791 { 1792 struct buffer_head *bh; 1793 1794 BUG_ON(!folio_test_locked(folio)); 1795 1796 bh = folio_buffers(folio); 1797 if (!bh) 1798 bh = create_empty_buffers(folio, 1799 1 << READ_ONCE(inode->i_blkbits), b_state); 1800 return bh; 1801 } 1802 1803 /* 1804 * NOTE! All mapped/uptodate combinations are valid: 1805 * 1806 * Mapped Uptodate Meaning 1807 * 1808 * No No "unknown" - must do get_block() 1809 * No Yes "hole" - zero-filled 1810 * Yes No "allocated" - allocated on disk, not read in 1811 * Yes Yes "valid" - allocated and up-to-date in memory. 1812 * 1813 * "Dirty" is valid only with the last case (mapped+uptodate). 1814 */ 1815 1816 /* 1817 * While block_write_full_folio is writing back the dirty buffers under 1818 * the page lock, whoever dirtied the buffers may decide to clean them 1819 * again at any time. We handle that by only looking at the buffer 1820 * state inside lock_buffer(). 1821 * 1822 * If block_write_full_folio() is called for regular writeback 1823 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a 1824 * locked buffer. This only can happen if someone has written the buffer 1825 * directly, with submit_bh(). At the address_space level PageWriteback 1826 * prevents this contention from occurring. 1827 * 1828 * If block_write_full_folio() is called with wbc->sync_mode == 1829 * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this 1830 * causes the writes to be flagged as synchronous writes. 1831 */ 1832 int __block_write_full_folio(struct inode *inode, struct folio *folio, 1833 get_block_t *get_block, struct writeback_control *wbc) 1834 { 1835 int err; 1836 sector_t block; 1837 sector_t last_block; 1838 struct buffer_head *bh, *head; 1839 size_t blocksize; 1840 int nr_underway = 0; 1841 blk_opf_t write_flags = wbc_to_write_flags(wbc); 1842 1843 head = folio_create_buffers(folio, inode, 1844 (1 << BH_Dirty) | (1 << BH_Uptodate)); 1845 1846 /* 1847 * Be very careful. We have no exclusion from block_dirty_folio 1848 * here, and the (potentially unmapped) buffers may become dirty at 1849 * any time. If a buffer becomes dirty here after we've inspected it 1850 * then we just miss that fact, and the folio stays dirty. 1851 * 1852 * Buffers outside i_size may be dirtied by block_dirty_folio; 1853 * handle that here by just cleaning them. 1854 */ 1855 1856 bh = head; 1857 blocksize = bh->b_size; 1858 1859 block = div_u64(folio_pos(folio), blocksize); 1860 last_block = div_u64(i_size_read(inode) - 1, blocksize); 1861 1862 /* 1863 * Get all the dirty buffers mapped to disk addresses and 1864 * handle any aliases from the underlying blockdev's mapping. 1865 */ 1866 do { 1867 if (block > last_block) { 1868 /* 1869 * mapped buffers outside i_size will occur, because 1870 * this folio can be outside i_size when there is a 1871 * truncate in progress. 1872 */ 1873 /* 1874 * The buffer was zeroed by block_write_full_folio() 1875 */ 1876 clear_buffer_dirty(bh); 1877 set_buffer_uptodate(bh); 1878 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) && 1879 buffer_dirty(bh)) { 1880 WARN_ON(bh->b_size != blocksize); 1881 err = get_block(inode, block, bh, 1); 1882 if (err) 1883 goto recover; 1884 clear_buffer_delay(bh); 1885 if (buffer_new(bh)) { 1886 /* blockdev mappings never come here */ 1887 clear_buffer_new(bh); 1888 clean_bdev_bh_alias(bh); 1889 } 1890 } 1891 bh = bh->b_this_page; 1892 block++; 1893 } while (bh != head); 1894 1895 do { 1896 if (!buffer_mapped(bh)) 1897 continue; 1898 /* 1899 * If it's a fully non-blocking write attempt and we cannot 1900 * lock the buffer then redirty the folio. Note that this can 1901 * potentially cause a busy-wait loop from writeback threads 1902 * and kswapd activity, but those code paths have their own 1903 * higher-level throttling. 1904 */ 1905 if (wbc->sync_mode != WB_SYNC_NONE) { 1906 lock_buffer(bh); 1907 } else if (!trylock_buffer(bh)) { 1908 folio_redirty_for_writepage(wbc, folio); 1909 continue; 1910 } 1911 if (test_clear_buffer_dirty(bh)) { 1912 mark_buffer_async_write_endio(bh, 1913 end_buffer_async_write); 1914 } else { 1915 unlock_buffer(bh); 1916 } 1917 } while ((bh = bh->b_this_page) != head); 1918 1919 /* 1920 * The folio and its buffers are protected by the writeback flag, 1921 * so we can drop the bh refcounts early. 1922 */ 1923 BUG_ON(folio_test_writeback(folio)); 1924 folio_start_writeback(folio); 1925 1926 do { 1927 struct buffer_head *next = bh->b_this_page; 1928 if (buffer_async_write(bh)) { 1929 submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, 1930 inode->i_write_hint, wbc); 1931 nr_underway++; 1932 } 1933 bh = next; 1934 } while (bh != head); 1935 folio_unlock(folio); 1936 1937 err = 0; 1938 done: 1939 if (nr_underway == 0) { 1940 /* 1941 * The folio was marked dirty, but the buffers were 1942 * clean. Someone wrote them back by hand with 1943 * write_dirty_buffer/submit_bh. A rare case. 1944 */ 1945 folio_end_writeback(folio); 1946 1947 /* 1948 * The folio and buffer_heads can be released at any time from 1949 * here on. 1950 */ 1951 } 1952 return err; 1953 1954 recover: 1955 /* 1956 * ENOSPC, or some other error. We may already have added some 1957 * blocks to the file, so we need to write these out to avoid 1958 * exposing stale data. 1959 * The folio is currently locked and not marked for writeback 1960 */ 1961 bh = head; 1962 /* Recovery: lock and submit the mapped buffers */ 1963 do { 1964 if (buffer_mapped(bh) && buffer_dirty(bh) && 1965 !buffer_delay(bh)) { 1966 lock_buffer(bh); 1967 mark_buffer_async_write_endio(bh, 1968 end_buffer_async_write); 1969 } else { 1970 /* 1971 * The buffer may have been set dirty during 1972 * attachment to a dirty folio. 1973 */ 1974 clear_buffer_dirty(bh); 1975 } 1976 } while ((bh = bh->b_this_page) != head); 1977 BUG_ON(folio_test_writeback(folio)); 1978 mapping_set_error(folio->mapping, err); 1979 folio_start_writeback(folio); 1980 do { 1981 struct buffer_head *next = bh->b_this_page; 1982 if (buffer_async_write(bh)) { 1983 clear_buffer_dirty(bh); 1984 submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, 1985 inode->i_write_hint, wbc); 1986 nr_underway++; 1987 } 1988 bh = next; 1989 } while (bh != head); 1990 folio_unlock(folio); 1991 goto done; 1992 } 1993 EXPORT_SYMBOL(__block_write_full_folio); 1994 1995 /* 1996 * If a folio has any new buffers, zero them out here, and mark them uptodate 1997 * and dirty so they'll be written out (in order to prevent uninitialised 1998 * block data from leaking). And clear the new bit. 1999 */ 2000 void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to) 2001 { 2002 size_t block_start, block_end; 2003 struct buffer_head *head, *bh; 2004 2005 BUG_ON(!folio_test_locked(folio)); 2006 head = folio_buffers(folio); 2007 if (!head) 2008 return; 2009 2010 bh = head; 2011 block_start = 0; 2012 do { 2013 block_end = block_start + bh->b_size; 2014 2015 if (buffer_new(bh)) { 2016 if (block_end > from && block_start < to) { 2017 if (!folio_test_uptodate(folio)) { 2018 size_t start, xend; 2019 2020 start = max(from, block_start); 2021 xend = min(to, block_end); 2022 2023 folio_zero_segment(folio, start, xend); 2024 set_buffer_uptodate(bh); 2025 } 2026 2027 clear_buffer_new(bh); 2028 mark_buffer_dirty(bh); 2029 } 2030 } 2031 2032 block_start = block_end; 2033 bh = bh->b_this_page; 2034 } while (bh != head); 2035 } 2036 EXPORT_SYMBOL(folio_zero_new_buffers); 2037 2038 static int 2039 iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh, 2040 const struct iomap *iomap) 2041 { 2042 loff_t offset = (loff_t)block << inode->i_blkbits; 2043 2044 bh->b_bdev = iomap->bdev; 2045 2046 /* 2047 * Block points to offset in file we need to map, iomap contains 2048 * the offset at which the map starts. If the map ends before the 2049 * current block, then do not map the buffer and let the caller 2050 * handle it. 2051 */ 2052 if (offset >= iomap->offset + iomap->length) 2053 return -EIO; 2054 2055 switch (iomap->type) { 2056 case IOMAP_HOLE: 2057 /* 2058 * If the buffer is not up to date or beyond the current EOF, 2059 * we need to mark it as new to ensure sub-block zeroing is 2060 * executed if necessary. 2061 */ 2062 if (!buffer_uptodate(bh) || 2063 (offset >= i_size_read(inode))) 2064 set_buffer_new(bh); 2065 return 0; 2066 case IOMAP_DELALLOC: 2067 if (!buffer_uptodate(bh) || 2068 (offset >= i_size_read(inode))) 2069 set_buffer_new(bh); 2070 set_buffer_uptodate(bh); 2071 set_buffer_mapped(bh); 2072 set_buffer_delay(bh); 2073 return 0; 2074 case IOMAP_UNWRITTEN: 2075 /* 2076 * For unwritten regions, we always need to ensure that regions 2077 * in the block we are not writing to are zeroed. Mark the 2078 * buffer as new to ensure this. 2079 */ 2080 set_buffer_new(bh); 2081 set_buffer_unwritten(bh); 2082 fallthrough; 2083 case IOMAP_MAPPED: 2084 if ((iomap->flags & IOMAP_F_NEW) || 2085 offset >= i_size_read(inode)) { 2086 /* 2087 * This can happen if truncating the block device races 2088 * with the check in the caller as i_size updates on 2089 * block devices aren't synchronized by i_rwsem for 2090 * block devices. 2091 */ 2092 if (S_ISBLK(inode->i_mode)) 2093 return -EIO; 2094 set_buffer_new(bh); 2095 } 2096 bh->b_blocknr = (iomap->addr + offset - iomap->offset) >> 2097 inode->i_blkbits; 2098 set_buffer_mapped(bh); 2099 return 0; 2100 default: 2101 WARN_ON_ONCE(1); 2102 return -EIO; 2103 } 2104 } 2105 2106 int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len, 2107 get_block_t *get_block, const struct iomap *iomap) 2108 { 2109 size_t from = offset_in_folio(folio, pos); 2110 size_t to = from + len; 2111 struct inode *inode = folio->mapping->host; 2112 size_t block_start, block_end; 2113 sector_t block; 2114 int err = 0; 2115 size_t blocksize; 2116 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait; 2117 2118 BUG_ON(!folio_test_locked(folio)); 2119 BUG_ON(to > folio_size(folio)); 2120 BUG_ON(from > to); 2121 2122 head = folio_create_buffers(folio, inode, 0); 2123 blocksize = head->b_size; 2124 block = div_u64(folio_pos(folio), blocksize); 2125 2126 for (bh = head, block_start = 0; bh != head || !block_start; 2127 block++, block_start=block_end, bh = bh->b_this_page) { 2128 block_end = block_start + blocksize; 2129 if (block_end <= from || block_start >= to) { 2130 if (folio_test_uptodate(folio)) { 2131 if (!buffer_uptodate(bh)) 2132 set_buffer_uptodate(bh); 2133 } 2134 continue; 2135 } 2136 if (buffer_new(bh)) 2137 clear_buffer_new(bh); 2138 if (!buffer_mapped(bh)) { 2139 WARN_ON(bh->b_size != blocksize); 2140 if (get_block) 2141 err = get_block(inode, block, bh, 1); 2142 else 2143 err = iomap_to_bh(inode, block, bh, iomap); 2144 if (err) 2145 break; 2146 2147 if (buffer_new(bh)) { 2148 clean_bdev_bh_alias(bh); 2149 if (folio_test_uptodate(folio)) { 2150 clear_buffer_new(bh); 2151 set_buffer_uptodate(bh); 2152 mark_buffer_dirty(bh); 2153 continue; 2154 } 2155 if (block_end > to || block_start < from) 2156 folio_zero_segments(folio, 2157 to, block_end, 2158 block_start, from); 2159 continue; 2160 } 2161 } 2162 if (folio_test_uptodate(folio)) { 2163 if (!buffer_uptodate(bh)) 2164 set_buffer_uptodate(bh); 2165 continue; 2166 } 2167 if (!buffer_uptodate(bh) && !buffer_delay(bh) && 2168 !buffer_unwritten(bh) && 2169 (block_start < from || block_end > to)) { 2170 bh_read_nowait(bh, 0); 2171 *wait_bh++=bh; 2172 } 2173 } 2174 /* 2175 * If we issued read requests - let them complete. 2176 */ 2177 while(wait_bh > wait) { 2178 wait_on_buffer(*--wait_bh); 2179 if (!buffer_uptodate(*wait_bh)) 2180 err = -EIO; 2181 } 2182 if (unlikely(err)) 2183 folio_zero_new_buffers(folio, from, to); 2184 return err; 2185 } 2186 2187 int __block_write_begin(struct folio *folio, loff_t pos, unsigned len, 2188 get_block_t *get_block) 2189 { 2190 return __block_write_begin_int(folio, pos, len, get_block, NULL); 2191 } 2192 EXPORT_SYMBOL(__block_write_begin); 2193 2194 void block_commit_write(struct folio *folio, size_t from, size_t to) 2195 { 2196 size_t block_start, block_end; 2197 bool partial = false; 2198 unsigned blocksize; 2199 struct buffer_head *bh, *head; 2200 2201 bh = head = folio_buffers(folio); 2202 if (!bh) 2203 return; 2204 blocksize = bh->b_size; 2205 2206 block_start = 0; 2207 do { 2208 block_end = block_start + blocksize; 2209 if (block_end <= from || block_start >= to) { 2210 if (!buffer_uptodate(bh)) 2211 partial = true; 2212 } else { 2213 set_buffer_uptodate(bh); 2214 mark_buffer_dirty(bh); 2215 } 2216 if (buffer_new(bh)) 2217 clear_buffer_new(bh); 2218 2219 block_start = block_end; 2220 bh = bh->b_this_page; 2221 } while (bh != head); 2222 2223 /* 2224 * If this is a partial write which happened to make all buffers 2225 * uptodate then we can optimize away a bogus read_folio() for 2226 * the next read(). Here we 'discover' whether the folio went 2227 * uptodate as a result of this (potentially partial) write. 2228 */ 2229 if (!partial) 2230 folio_mark_uptodate(folio); 2231 } 2232 EXPORT_SYMBOL(block_commit_write); 2233 2234 /* 2235 * block_write_begin takes care of the basic task of block allocation and 2236 * bringing partial write blocks uptodate first. 2237 * 2238 * The filesystem needs to handle block truncation upon failure. 2239 */ 2240 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, 2241 struct folio **foliop, get_block_t *get_block) 2242 { 2243 pgoff_t index = pos >> PAGE_SHIFT; 2244 struct folio *folio; 2245 int status; 2246 2247 folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN, 2248 mapping_gfp_mask(mapping)); 2249 if (IS_ERR(folio)) 2250 return PTR_ERR(folio); 2251 2252 status = __block_write_begin_int(folio, pos, len, get_block, NULL); 2253 if (unlikely(status)) { 2254 folio_unlock(folio); 2255 folio_put(folio); 2256 folio = NULL; 2257 } 2258 2259 *foliop = folio; 2260 return status; 2261 } 2262 EXPORT_SYMBOL(block_write_begin); 2263 2264 int block_write_end(struct file *file, struct address_space *mapping, 2265 loff_t pos, unsigned len, unsigned copied, 2266 struct folio *folio, void *fsdata) 2267 { 2268 size_t start = pos - folio_pos(folio); 2269 2270 if (unlikely(copied < len)) { 2271 /* 2272 * The buffers that were written will now be uptodate, so 2273 * we don't have to worry about a read_folio reading them 2274 * and overwriting a partial write. However if we have 2275 * encountered a short write and only partially written 2276 * into a buffer, it will not be marked uptodate, so a 2277 * read_folio might come in and destroy our partial write. 2278 * 2279 * Do the simplest thing, and just treat any short write to a 2280 * non uptodate folio as a zero-length write, and force the 2281 * caller to redo the whole thing. 2282 */ 2283 if (!folio_test_uptodate(folio)) 2284 copied = 0; 2285 2286 folio_zero_new_buffers(folio, start+copied, start+len); 2287 } 2288 flush_dcache_folio(folio); 2289 2290 /* This could be a short (even 0-length) commit */ 2291 block_commit_write(folio, start, start + copied); 2292 2293 return copied; 2294 } 2295 EXPORT_SYMBOL(block_write_end); 2296 2297 int generic_write_end(struct file *file, struct address_space *mapping, 2298 loff_t pos, unsigned len, unsigned copied, 2299 struct folio *folio, void *fsdata) 2300 { 2301 struct inode *inode = mapping->host; 2302 loff_t old_size = inode->i_size; 2303 bool i_size_changed = false; 2304 2305 copied = block_write_end(file, mapping, pos, len, copied, folio, fsdata); 2306 2307 /* 2308 * No need to use i_size_read() here, the i_size cannot change under us 2309 * because we hold i_rwsem. 2310 * 2311 * But it's important to update i_size while still holding folio lock: 2312 * page writeout could otherwise come in and zero beyond i_size. 2313 */ 2314 if (pos + copied > inode->i_size) { 2315 i_size_write(inode, pos + copied); 2316 i_size_changed = true; 2317 } 2318 2319 folio_unlock(folio); 2320 folio_put(folio); 2321 2322 if (old_size < pos) 2323 pagecache_isize_extended(inode, old_size, pos); 2324 /* 2325 * Don't mark the inode dirty under page lock. First, it unnecessarily 2326 * makes the holding time of page lock longer. Second, it forces lock 2327 * ordering of page lock and transaction start for journaling 2328 * filesystems. 2329 */ 2330 if (i_size_changed) 2331 mark_inode_dirty(inode); 2332 return copied; 2333 } 2334 EXPORT_SYMBOL(generic_write_end); 2335 2336 /* 2337 * block_is_partially_uptodate checks whether buffers within a folio are 2338 * uptodate or not. 2339 * 2340 * Returns true if all buffers which correspond to the specified part 2341 * of the folio are uptodate. 2342 */ 2343 bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count) 2344 { 2345 unsigned block_start, block_end, blocksize; 2346 unsigned to; 2347 struct buffer_head *bh, *head; 2348 bool ret = true; 2349 2350 head = folio_buffers(folio); 2351 if (!head) 2352 return false; 2353 blocksize = head->b_size; 2354 to = min_t(unsigned, folio_size(folio) - from, count); 2355 to = from + to; 2356 if (from < blocksize && to > folio_size(folio) - blocksize) 2357 return false; 2358 2359 bh = head; 2360 block_start = 0; 2361 do { 2362 block_end = block_start + blocksize; 2363 if (block_end > from && block_start < to) { 2364 if (!buffer_uptodate(bh)) { 2365 ret = false; 2366 break; 2367 } 2368 if (block_end >= to) 2369 break; 2370 } 2371 block_start = block_end; 2372 bh = bh->b_this_page; 2373 } while (bh != head); 2374 2375 return ret; 2376 } 2377 EXPORT_SYMBOL(block_is_partially_uptodate); 2378 2379 /* 2380 * Generic "read_folio" function for block devices that have the normal 2381 * get_block functionality. This is most of the block device filesystems. 2382 * Reads the folio asynchronously --- the unlock_buffer() and 2383 * set/clear_buffer_uptodate() functions propagate buffer state into the 2384 * folio once IO has completed. 2385 */ 2386 int block_read_full_folio(struct folio *folio, get_block_t *get_block) 2387 { 2388 struct inode *inode = folio->mapping->host; 2389 sector_t iblock, lblock; 2390 struct buffer_head *bh, *head, *prev = NULL; 2391 size_t blocksize; 2392 int fully_mapped = 1; 2393 bool page_error = false; 2394 loff_t limit = i_size_read(inode); 2395 2396 /* This is needed for ext4. */ 2397 if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode)) 2398 limit = inode->i_sb->s_maxbytes; 2399 2400 head = folio_create_buffers(folio, inode, 0); 2401 blocksize = head->b_size; 2402 2403 iblock = div_u64(folio_pos(folio), blocksize); 2404 lblock = div_u64(limit + blocksize - 1, blocksize); 2405 bh = head; 2406 2407 do { 2408 if (buffer_uptodate(bh)) 2409 continue; 2410 2411 if (!buffer_mapped(bh)) { 2412 int err = 0; 2413 2414 fully_mapped = 0; 2415 if (iblock < lblock) { 2416 WARN_ON(bh->b_size != blocksize); 2417 err = get_block(inode, iblock, bh, 0); 2418 if (err) 2419 page_error = true; 2420 } 2421 if (!buffer_mapped(bh)) { 2422 folio_zero_range(folio, bh_offset(bh), 2423 blocksize); 2424 if (!err) 2425 set_buffer_uptodate(bh); 2426 continue; 2427 } 2428 /* 2429 * get_block() might have updated the buffer 2430 * synchronously 2431 */ 2432 if (buffer_uptodate(bh)) 2433 continue; 2434 } 2435 2436 lock_buffer(bh); 2437 if (buffer_uptodate(bh)) { 2438 unlock_buffer(bh); 2439 continue; 2440 } 2441 2442 mark_buffer_async_read(bh); 2443 if (prev) 2444 submit_bh(REQ_OP_READ, prev); 2445 prev = bh; 2446 } while (iblock++, (bh = bh->b_this_page) != head); 2447 2448 if (fully_mapped) 2449 folio_set_mappedtodisk(folio); 2450 2451 /* 2452 * All buffers are uptodate or get_block() returned an error 2453 * when trying to map them - we must finish the read because 2454 * end_buffer_async_read() will never be called on any buffer 2455 * in this folio. 2456 */ 2457 if (prev) 2458 submit_bh(REQ_OP_READ, prev); 2459 else 2460 folio_end_read(folio, !page_error); 2461 2462 return 0; 2463 } 2464 EXPORT_SYMBOL(block_read_full_folio); 2465 2466 /* utility function for filesystems that need to do work on expanding 2467 * truncates. Uses filesystem pagecache writes to allow the filesystem to 2468 * deal with the hole. 2469 */ 2470 int generic_cont_expand_simple(struct inode *inode, loff_t size) 2471 { 2472 struct address_space *mapping = inode->i_mapping; 2473 const struct address_space_operations *aops = mapping->a_ops; 2474 struct folio *folio; 2475 void *fsdata = NULL; 2476 int err; 2477 2478 err = inode_newsize_ok(inode, size); 2479 if (err) 2480 goto out; 2481 2482 err = aops->write_begin(NULL, mapping, size, 0, &folio, &fsdata); 2483 if (err) 2484 goto out; 2485 2486 err = aops->write_end(NULL, mapping, size, 0, 0, folio, fsdata); 2487 BUG_ON(err > 0); 2488 2489 out: 2490 return err; 2491 } 2492 EXPORT_SYMBOL(generic_cont_expand_simple); 2493 2494 static int cont_expand_zero(struct file *file, struct address_space *mapping, 2495 loff_t pos, loff_t *bytes) 2496 { 2497 struct inode *inode = mapping->host; 2498 const struct address_space_operations *aops = mapping->a_ops; 2499 unsigned int blocksize = i_blocksize(inode); 2500 struct folio *folio; 2501 void *fsdata = NULL; 2502 pgoff_t index, curidx; 2503 loff_t curpos; 2504 unsigned zerofrom, offset, len; 2505 int err = 0; 2506 2507 index = pos >> PAGE_SHIFT; 2508 offset = pos & ~PAGE_MASK; 2509 2510 while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) { 2511 zerofrom = curpos & ~PAGE_MASK; 2512 if (zerofrom & (blocksize-1)) { 2513 *bytes |= (blocksize-1); 2514 (*bytes)++; 2515 } 2516 len = PAGE_SIZE - zerofrom; 2517 2518 err = aops->write_begin(file, mapping, curpos, len, 2519 &folio, &fsdata); 2520 if (err) 2521 goto out; 2522 folio_zero_range(folio, offset_in_folio(folio, curpos), len); 2523 err = aops->write_end(file, mapping, curpos, len, len, 2524 folio, fsdata); 2525 if (err < 0) 2526 goto out; 2527 BUG_ON(err != len); 2528 err = 0; 2529 2530 balance_dirty_pages_ratelimited(mapping); 2531 2532 if (fatal_signal_pending(current)) { 2533 err = -EINTR; 2534 goto out; 2535 } 2536 } 2537 2538 /* page covers the boundary, find the boundary offset */ 2539 if (index == curidx) { 2540 zerofrom = curpos & ~PAGE_MASK; 2541 /* if we will expand the thing last block will be filled */ 2542 if (offset <= zerofrom) { 2543 goto out; 2544 } 2545 if (zerofrom & (blocksize-1)) { 2546 *bytes |= (blocksize-1); 2547 (*bytes)++; 2548 } 2549 len = offset - zerofrom; 2550 2551 err = aops->write_begin(file, mapping, curpos, len, 2552 &folio, &fsdata); 2553 if (err) 2554 goto out; 2555 folio_zero_range(folio, offset_in_folio(folio, curpos), len); 2556 err = aops->write_end(file, mapping, curpos, len, len, 2557 folio, fsdata); 2558 if (err < 0) 2559 goto out; 2560 BUG_ON(err != len); 2561 err = 0; 2562 } 2563 out: 2564 return err; 2565 } 2566 2567 /* 2568 * For moronic filesystems that do not allow holes in file. 2569 * We may have to extend the file. 2570 */ 2571 int cont_write_begin(struct file *file, struct address_space *mapping, 2572 loff_t pos, unsigned len, 2573 struct folio **foliop, void **fsdata, 2574 get_block_t *get_block, loff_t *bytes) 2575 { 2576 struct inode *inode = mapping->host; 2577 unsigned int blocksize = i_blocksize(inode); 2578 unsigned int zerofrom; 2579 int err; 2580 2581 err = cont_expand_zero(file, mapping, pos, bytes); 2582 if (err) 2583 return err; 2584 2585 zerofrom = *bytes & ~PAGE_MASK; 2586 if (pos+len > *bytes && zerofrom & (blocksize-1)) { 2587 *bytes |= (blocksize-1); 2588 (*bytes)++; 2589 } 2590 2591 return block_write_begin(mapping, pos, len, foliop, get_block); 2592 } 2593 EXPORT_SYMBOL(cont_write_begin); 2594 2595 /* 2596 * block_page_mkwrite() is not allowed to change the file size as it gets 2597 * called from a page fault handler when a page is first dirtied. Hence we must 2598 * be careful to check for EOF conditions here. We set the page up correctly 2599 * for a written page which means we get ENOSPC checking when writing into 2600 * holes and correct delalloc and unwritten extent mapping on filesystems that 2601 * support these features. 2602 * 2603 * We are not allowed to take the i_mutex here so we have to play games to 2604 * protect against truncate races as the page could now be beyond EOF. Because 2605 * truncate writes the inode size before removing pages, once we have the 2606 * page lock we can determine safely if the page is beyond EOF. If it is not 2607 * beyond EOF, then the page is guaranteed safe against truncation until we 2608 * unlock the page. 2609 * 2610 * Direct callers of this function should protect against filesystem freezing 2611 * using sb_start_pagefault() - sb_end_pagefault() functions. 2612 */ 2613 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, 2614 get_block_t get_block) 2615 { 2616 struct folio *folio = page_folio(vmf->page); 2617 struct inode *inode = file_inode(vma->vm_file); 2618 unsigned long end; 2619 loff_t size; 2620 int ret; 2621 2622 folio_lock(folio); 2623 size = i_size_read(inode); 2624 if ((folio->mapping != inode->i_mapping) || 2625 (folio_pos(folio) >= size)) { 2626 /* We overload EFAULT to mean page got truncated */ 2627 ret = -EFAULT; 2628 goto out_unlock; 2629 } 2630 2631 end = folio_size(folio); 2632 /* folio is wholly or partially inside EOF */ 2633 if (folio_pos(folio) + end > size) 2634 end = size - folio_pos(folio); 2635 2636 ret = __block_write_begin_int(folio, 0, end, get_block, NULL); 2637 if (unlikely(ret)) 2638 goto out_unlock; 2639 2640 block_commit_write(folio, 0, end); 2641 2642 folio_mark_dirty(folio); 2643 folio_wait_stable(folio); 2644 return 0; 2645 out_unlock: 2646 folio_unlock(folio); 2647 return ret; 2648 } 2649 EXPORT_SYMBOL(block_page_mkwrite); 2650 2651 int block_truncate_page(struct address_space *mapping, 2652 loff_t from, get_block_t *get_block) 2653 { 2654 pgoff_t index = from >> PAGE_SHIFT; 2655 unsigned blocksize; 2656 sector_t iblock; 2657 size_t offset, length, pos; 2658 struct inode *inode = mapping->host; 2659 struct folio *folio; 2660 struct buffer_head *bh; 2661 int err = 0; 2662 2663 blocksize = i_blocksize(inode); 2664 length = from & (blocksize - 1); 2665 2666 /* Block boundary? Nothing to do */ 2667 if (!length) 2668 return 0; 2669 2670 length = blocksize - length; 2671 iblock = ((loff_t)index * PAGE_SIZE) >> inode->i_blkbits; 2672 2673 folio = filemap_grab_folio(mapping, index); 2674 if (IS_ERR(folio)) 2675 return PTR_ERR(folio); 2676 2677 bh = folio_buffers(folio); 2678 if (!bh) 2679 bh = create_empty_buffers(folio, blocksize, 0); 2680 2681 /* Find the buffer that contains "offset" */ 2682 offset = offset_in_folio(folio, from); 2683 pos = blocksize; 2684 while (offset >= pos) { 2685 bh = bh->b_this_page; 2686 iblock++; 2687 pos += blocksize; 2688 } 2689 2690 if (!buffer_mapped(bh)) { 2691 WARN_ON(bh->b_size != blocksize); 2692 err = get_block(inode, iblock, bh, 0); 2693 if (err) 2694 goto unlock; 2695 /* unmapped? It's a hole - nothing to do */ 2696 if (!buffer_mapped(bh)) 2697 goto unlock; 2698 } 2699 2700 /* Ok, it's mapped. Make sure it's up-to-date */ 2701 if (folio_test_uptodate(folio)) 2702 set_buffer_uptodate(bh); 2703 2704 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) { 2705 err = bh_read(bh, 0); 2706 /* Uhhuh. Read error. Complain and punt. */ 2707 if (err < 0) 2708 goto unlock; 2709 } 2710 2711 folio_zero_range(folio, offset, length); 2712 mark_buffer_dirty(bh); 2713 2714 unlock: 2715 folio_unlock(folio); 2716 folio_put(folio); 2717 2718 return err; 2719 } 2720 EXPORT_SYMBOL(block_truncate_page); 2721 2722 /* 2723 * The generic ->writepage function for buffer-backed address_spaces 2724 */ 2725 int block_write_full_folio(struct folio *folio, struct writeback_control *wbc, 2726 void *get_block) 2727 { 2728 struct inode * const inode = folio->mapping->host; 2729 loff_t i_size = i_size_read(inode); 2730 2731 /* Is the folio fully inside i_size? */ 2732 if (folio_pos(folio) + folio_size(folio) <= i_size) 2733 return __block_write_full_folio(inode, folio, get_block, wbc); 2734 2735 /* Is the folio fully outside i_size? (truncate in progress) */ 2736 if (folio_pos(folio) >= i_size) { 2737 folio_unlock(folio); 2738 return 0; /* don't care */ 2739 } 2740 2741 /* 2742 * The folio straddles i_size. It must be zeroed out on each and every 2743 * writepage invocation because it may be mmapped. "A file is mapped 2744 * in multiples of the page size. For a file that is not a multiple of 2745 * the page size, the remaining memory is zeroed when mapped, and 2746 * writes to that region are not written out to the file." 2747 */ 2748 folio_zero_segment(folio, offset_in_folio(folio, i_size), 2749 folio_size(folio)); 2750 return __block_write_full_folio(inode, folio, get_block, wbc); 2751 } 2752 2753 sector_t generic_block_bmap(struct address_space *mapping, sector_t block, 2754 get_block_t *get_block) 2755 { 2756 struct inode *inode = mapping->host; 2757 struct buffer_head tmp = { 2758 .b_size = i_blocksize(inode), 2759 }; 2760 2761 get_block(inode, block, &tmp, 0); 2762 return tmp.b_blocknr; 2763 } 2764 EXPORT_SYMBOL(generic_block_bmap); 2765 2766 static void end_bio_bh_io_sync(struct bio *bio) 2767 { 2768 struct buffer_head *bh = bio->bi_private; 2769 2770 if (unlikely(bio_flagged(bio, BIO_QUIET))) 2771 set_bit(BH_Quiet, &bh->b_state); 2772 2773 bh->b_end_io(bh, !bio->bi_status); 2774 bio_put(bio); 2775 } 2776 2777 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh, 2778 enum rw_hint write_hint, 2779 struct writeback_control *wbc) 2780 { 2781 const enum req_op op = opf & REQ_OP_MASK; 2782 struct bio *bio; 2783 2784 BUG_ON(!buffer_locked(bh)); 2785 BUG_ON(!buffer_mapped(bh)); 2786 BUG_ON(!bh->b_end_io); 2787 BUG_ON(buffer_delay(bh)); 2788 BUG_ON(buffer_unwritten(bh)); 2789 2790 /* 2791 * Only clear out a write error when rewriting 2792 */ 2793 if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE)) 2794 clear_buffer_write_io_error(bh); 2795 2796 if (buffer_meta(bh)) 2797 opf |= REQ_META; 2798 if (buffer_prio(bh)) 2799 opf |= REQ_PRIO; 2800 2801 bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO); 2802 2803 fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO); 2804 2805 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); 2806 bio->bi_write_hint = write_hint; 2807 2808 bio_add_folio_nofail(bio, bh->b_folio, bh->b_size, bh_offset(bh)); 2809 2810 bio->bi_end_io = end_bio_bh_io_sync; 2811 bio->bi_private = bh; 2812 2813 /* Take care of bh's that straddle the end of the device */ 2814 guard_bio_eod(bio); 2815 2816 if (wbc) { 2817 wbc_init_bio(wbc, bio); 2818 wbc_account_cgroup_owner(wbc, bh->b_folio, bh->b_size); 2819 } 2820 2821 submit_bio(bio); 2822 } 2823 2824 void submit_bh(blk_opf_t opf, struct buffer_head *bh) 2825 { 2826 submit_bh_wbc(opf, bh, WRITE_LIFE_NOT_SET, NULL); 2827 } 2828 EXPORT_SYMBOL(submit_bh); 2829 2830 void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags) 2831 { 2832 lock_buffer(bh); 2833 if (!test_clear_buffer_dirty(bh)) { 2834 unlock_buffer(bh); 2835 return; 2836 } 2837 bh->b_end_io = end_buffer_write_sync; 2838 get_bh(bh); 2839 submit_bh(REQ_OP_WRITE | op_flags, bh); 2840 } 2841 EXPORT_SYMBOL(write_dirty_buffer); 2842 2843 /* 2844 * For a data-integrity writeout, we need to wait upon any in-progress I/O 2845 * and then start new I/O and then wait upon it. The caller must have a ref on 2846 * the buffer_head. 2847 */ 2848 int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags) 2849 { 2850 WARN_ON(atomic_read(&bh->b_count) < 1); 2851 lock_buffer(bh); 2852 if (test_clear_buffer_dirty(bh)) { 2853 /* 2854 * The bh should be mapped, but it might not be if the 2855 * device was hot-removed. Not much we can do but fail the I/O. 2856 */ 2857 if (!buffer_mapped(bh)) { 2858 unlock_buffer(bh); 2859 return -EIO; 2860 } 2861 2862 get_bh(bh); 2863 bh->b_end_io = end_buffer_write_sync; 2864 submit_bh(REQ_OP_WRITE | op_flags, bh); 2865 wait_on_buffer(bh); 2866 if (!buffer_uptodate(bh)) 2867 return -EIO; 2868 } else { 2869 unlock_buffer(bh); 2870 } 2871 return 0; 2872 } 2873 EXPORT_SYMBOL(__sync_dirty_buffer); 2874 2875 int sync_dirty_buffer(struct buffer_head *bh) 2876 { 2877 return __sync_dirty_buffer(bh, REQ_SYNC); 2878 } 2879 EXPORT_SYMBOL(sync_dirty_buffer); 2880 2881 static inline int buffer_busy(struct buffer_head *bh) 2882 { 2883 return atomic_read(&bh->b_count) | 2884 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock))); 2885 } 2886 2887 static bool 2888 drop_buffers(struct folio *folio, struct buffer_head **buffers_to_free) 2889 { 2890 struct buffer_head *head = folio_buffers(folio); 2891 struct buffer_head *bh; 2892 2893 bh = head; 2894 do { 2895 if (buffer_busy(bh)) 2896 goto failed; 2897 bh = bh->b_this_page; 2898 } while (bh != head); 2899 2900 do { 2901 struct buffer_head *next = bh->b_this_page; 2902 2903 if (bh->b_assoc_map) 2904 __remove_assoc_queue(bh); 2905 bh = next; 2906 } while (bh != head); 2907 *buffers_to_free = head; 2908 folio_detach_private(folio); 2909 return true; 2910 failed: 2911 return false; 2912 } 2913 2914 /** 2915 * try_to_free_buffers - Release buffers attached to this folio. 2916 * @folio: The folio. 2917 * 2918 * If any buffers are in use (dirty, under writeback, elevated refcount), 2919 * no buffers will be freed. 2920 * 2921 * If the folio is dirty but all the buffers are clean then we need to 2922 * be sure to mark the folio clean as well. This is because the folio 2923 * may be against a block device, and a later reattachment of buffers 2924 * to a dirty folio will set *all* buffers dirty. Which would corrupt 2925 * filesystem data on the same device. 2926 * 2927 * The same applies to regular filesystem folios: if all the buffers are 2928 * clean then we set the folio clean and proceed. To do that, we require 2929 * total exclusion from block_dirty_folio(). That is obtained with 2930 * i_private_lock. 2931 * 2932 * Exclusion against try_to_free_buffers may be obtained by either 2933 * locking the folio or by holding its mapping's i_private_lock. 2934 * 2935 * Context: Process context. @folio must be locked. Will not sleep. 2936 * Return: true if all buffers attached to this folio were freed. 2937 */ 2938 bool try_to_free_buffers(struct folio *folio) 2939 { 2940 struct address_space * const mapping = folio->mapping; 2941 struct buffer_head *buffers_to_free = NULL; 2942 bool ret = 0; 2943 2944 BUG_ON(!folio_test_locked(folio)); 2945 if (folio_test_writeback(folio)) 2946 return false; 2947 2948 if (mapping == NULL) { /* can this still happen? */ 2949 ret = drop_buffers(folio, &buffers_to_free); 2950 goto out; 2951 } 2952 2953 spin_lock(&mapping->i_private_lock); 2954 ret = drop_buffers(folio, &buffers_to_free); 2955 2956 /* 2957 * If the filesystem writes its buffers by hand (eg ext3) 2958 * then we can have clean buffers against a dirty folio. We 2959 * clean the folio here; otherwise the VM will never notice 2960 * that the filesystem did any IO at all. 2961 * 2962 * Also, during truncate, discard_buffer will have marked all 2963 * the folio's buffers clean. We discover that here and clean 2964 * the folio also. 2965 * 2966 * i_private_lock must be held over this entire operation in order 2967 * to synchronise against block_dirty_folio and prevent the 2968 * dirty bit from being lost. 2969 */ 2970 if (ret) 2971 folio_cancel_dirty(folio); 2972 spin_unlock(&mapping->i_private_lock); 2973 out: 2974 if (buffers_to_free) { 2975 struct buffer_head *bh = buffers_to_free; 2976 2977 do { 2978 struct buffer_head *next = bh->b_this_page; 2979 free_buffer_head(bh); 2980 bh = next; 2981 } while (bh != buffers_to_free); 2982 } 2983 return ret; 2984 } 2985 EXPORT_SYMBOL(try_to_free_buffers); 2986 2987 /* 2988 * Buffer-head allocation 2989 */ 2990 static struct kmem_cache *bh_cachep __ro_after_init; 2991 2992 /* 2993 * Once the number of bh's in the machine exceeds this level, we start 2994 * stripping them in writeback. 2995 */ 2996 static unsigned long max_buffer_heads __ro_after_init; 2997 2998 int buffer_heads_over_limit; 2999 3000 struct bh_accounting { 3001 int nr; /* Number of live bh's */ 3002 int ratelimit; /* Limit cacheline bouncing */ 3003 }; 3004 3005 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0}; 3006 3007 static void recalc_bh_state(void) 3008 { 3009 int i; 3010 int tot = 0; 3011 3012 if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096) 3013 return; 3014 __this_cpu_write(bh_accounting.ratelimit, 0); 3015 for_each_online_cpu(i) 3016 tot += per_cpu(bh_accounting, i).nr; 3017 buffer_heads_over_limit = (tot > max_buffer_heads); 3018 } 3019 3020 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) 3021 { 3022 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); 3023 if (ret) { 3024 INIT_LIST_HEAD(&ret->b_assoc_buffers); 3025 spin_lock_init(&ret->b_uptodate_lock); 3026 preempt_disable(); 3027 __this_cpu_inc(bh_accounting.nr); 3028 recalc_bh_state(); 3029 preempt_enable(); 3030 } 3031 return ret; 3032 } 3033 EXPORT_SYMBOL(alloc_buffer_head); 3034 3035 void free_buffer_head(struct buffer_head *bh) 3036 { 3037 BUG_ON(!list_empty(&bh->b_assoc_buffers)); 3038 kmem_cache_free(bh_cachep, bh); 3039 preempt_disable(); 3040 __this_cpu_dec(bh_accounting.nr); 3041 recalc_bh_state(); 3042 preempt_enable(); 3043 } 3044 EXPORT_SYMBOL(free_buffer_head); 3045 3046 static int buffer_exit_cpu_dead(unsigned int cpu) 3047 { 3048 int i; 3049 struct bh_lru *b = &per_cpu(bh_lrus, cpu); 3050 3051 for (i = 0; i < BH_LRU_SIZE; i++) { 3052 brelse(b->bhs[i]); 3053 b->bhs[i] = NULL; 3054 } 3055 this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr); 3056 per_cpu(bh_accounting, cpu).nr = 0; 3057 return 0; 3058 } 3059 3060 /** 3061 * bh_uptodate_or_lock - Test whether the buffer is uptodate 3062 * @bh: struct buffer_head 3063 * 3064 * Return true if the buffer is up-to-date and false, 3065 * with the buffer locked, if not. 3066 */ 3067 int bh_uptodate_or_lock(struct buffer_head *bh) 3068 { 3069 if (!buffer_uptodate(bh)) { 3070 lock_buffer(bh); 3071 if (!buffer_uptodate(bh)) 3072 return 0; 3073 unlock_buffer(bh); 3074 } 3075 return 1; 3076 } 3077 EXPORT_SYMBOL(bh_uptodate_or_lock); 3078 3079 /** 3080 * __bh_read - Submit read for a locked buffer 3081 * @bh: struct buffer_head 3082 * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ 3083 * @wait: wait until reading finish 3084 * 3085 * Returns zero on success or don't wait, and -EIO on error. 3086 */ 3087 int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait) 3088 { 3089 int ret = 0; 3090 3091 BUG_ON(!buffer_locked(bh)); 3092 3093 get_bh(bh); 3094 bh->b_end_io = end_buffer_read_sync; 3095 submit_bh(REQ_OP_READ | op_flags, bh); 3096 if (wait) { 3097 wait_on_buffer(bh); 3098 if (!buffer_uptodate(bh)) 3099 ret = -EIO; 3100 } 3101 return ret; 3102 } 3103 EXPORT_SYMBOL(__bh_read); 3104 3105 /** 3106 * __bh_read_batch - Submit read for a batch of unlocked buffers 3107 * @nr: entry number of the buffer batch 3108 * @bhs: a batch of struct buffer_head 3109 * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ 3110 * @force_lock: force to get a lock on the buffer if set, otherwise drops any 3111 * buffer that cannot lock. 3112 * 3113 * Returns zero on success or don't wait, and -EIO on error. 3114 */ 3115 void __bh_read_batch(int nr, struct buffer_head *bhs[], 3116 blk_opf_t op_flags, bool force_lock) 3117 { 3118 int i; 3119 3120 for (i = 0; i < nr; i++) { 3121 struct buffer_head *bh = bhs[i]; 3122 3123 if (buffer_uptodate(bh)) 3124 continue; 3125 3126 if (force_lock) 3127 lock_buffer(bh); 3128 else 3129 if (!trylock_buffer(bh)) 3130 continue; 3131 3132 if (buffer_uptodate(bh)) { 3133 unlock_buffer(bh); 3134 continue; 3135 } 3136 3137 bh->b_end_io = end_buffer_read_sync; 3138 get_bh(bh); 3139 submit_bh(REQ_OP_READ | op_flags, bh); 3140 } 3141 } 3142 EXPORT_SYMBOL(__bh_read_batch); 3143 3144 void __init buffer_init(void) 3145 { 3146 unsigned long nrpages; 3147 int ret; 3148 3149 bh_cachep = KMEM_CACHE(buffer_head, 3150 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC); 3151 /* 3152 * Limit the bh occupancy to 10% of ZONE_NORMAL 3153 */ 3154 nrpages = (nr_free_buffer_pages() * 10) / 100; 3155 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head)); 3156 ret = cpuhp_setup_state_nocalls(CPUHP_FS_BUFF_DEAD, "fs/buffer:dead", 3157 NULL, buffer_exit_cpu_dead); 3158 WARN_ON(ret < 0); 3159 } 3160