1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/fs/buffer.c 4 * 5 * Copyright (C) 1991, 1992, 2002 Linus Torvalds 6 */ 7 8 /* 9 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95 10 * 11 * Removed a lot of unnecessary code and simplified things now that 12 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96 13 * 14 * Speed up hash, lru, and free list operations. Use gfp() for allocating 15 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM 16 * 17 * Added 32k buffer block sizes - these are required older ARM systems. - RMK 18 * 19 * async buffer flushing, 1999 Andrea Arcangeli <[email protected]> 20 */ 21 22 #include <linux/kernel.h> 23 #include <linux/sched/signal.h> 24 #include <linux/syscalls.h> 25 #include <linux/fs.h> 26 #include <linux/iomap.h> 27 #include <linux/mm.h> 28 #include <linux/percpu.h> 29 #include <linux/slab.h> 30 #include <linux/capability.h> 31 #include <linux/blkdev.h> 32 #include <linux/file.h> 33 #include <linux/quotaops.h> 34 #include <linux/highmem.h> 35 #include <linux/export.h> 36 #include <linux/backing-dev.h> 37 #include <linux/writeback.h> 38 #include <linux/hash.h> 39 #include <linux/suspend.h> 40 #include <linux/buffer_head.h> 41 #include <linux/task_io_accounting_ops.h> 42 #include <linux/bio.h> 43 #include <linux/cpu.h> 44 #include <linux/bitops.h> 45 #include <linux/mpage.h> 46 #include <linux/bit_spinlock.h> 47 #include <linux/pagevec.h> 48 #include <linux/sched/mm.h> 49 #include <trace/events/block.h> 50 #include <linux/fscrypt.h> 51 #include <linux/fsverity.h> 52 #include <linux/sched/isolation.h> 53 54 #include "internal.h" 55 56 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); 57 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh, 58 enum rw_hint hint, struct writeback_control *wbc); 59 60 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers) 61 62 inline void touch_buffer(struct buffer_head *bh) 63 { 64 trace_block_touch_buffer(bh); 65 folio_mark_accessed(bh->b_folio); 66 } 67 EXPORT_SYMBOL(touch_buffer); 68 69 void __lock_buffer(struct buffer_head *bh) 70 { 71 wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); 72 } 73 EXPORT_SYMBOL(__lock_buffer); 74 75 void unlock_buffer(struct buffer_head *bh) 76 { 77 clear_bit_unlock(BH_Lock, &bh->b_state); 78 smp_mb__after_atomic(); 79 wake_up_bit(&bh->b_state, BH_Lock); 80 } 81 EXPORT_SYMBOL(unlock_buffer); 82 83 /* 84 * Returns if the folio has dirty or writeback buffers. If all the buffers 85 * are unlocked and clean then the folio_test_dirty information is stale. If 86 * any of the buffers are locked, it is assumed they are locked for IO. 87 */ 88 void buffer_check_dirty_writeback(struct folio *folio, 89 bool *dirty, bool *writeback) 90 { 91 struct buffer_head *head, *bh; 92 *dirty = false; 93 *writeback = false; 94 95 BUG_ON(!folio_test_locked(folio)); 96 97 head = folio_buffers(folio); 98 if (!head) 99 return; 100 101 if (folio_test_writeback(folio)) 102 *writeback = true; 103 104 bh = head; 105 do { 106 if (buffer_locked(bh)) 107 *writeback = true; 108 109 if (buffer_dirty(bh)) 110 *dirty = true; 111 112 bh = bh->b_this_page; 113 } while (bh != head); 114 } 115 116 /* 117 * Block until a buffer comes unlocked. This doesn't stop it 118 * from becoming locked again - you have to lock it yourself 119 * if you want to preserve its state. 120 */ 121 void __wait_on_buffer(struct buffer_head * bh) 122 { 123 wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); 124 } 125 EXPORT_SYMBOL(__wait_on_buffer); 126 127 static void buffer_io_error(struct buffer_head *bh, char *msg) 128 { 129 if (!test_bit(BH_Quiet, &bh->b_state)) 130 printk_ratelimited(KERN_ERR 131 "Buffer I/O error on dev %pg, logical block %llu%s\n", 132 bh->b_bdev, (unsigned long long)bh->b_blocknr, msg); 133 } 134 135 /* 136 * End-of-IO handler helper function which does not touch the bh after 137 * unlocking it. 138 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but 139 * a race there is benign: unlock_buffer() only use the bh's address for 140 * hashing after unlocking the buffer, so it doesn't actually touch the bh 141 * itself. 142 */ 143 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate) 144 { 145 if (uptodate) { 146 set_buffer_uptodate(bh); 147 } else { 148 /* This happens, due to failed read-ahead attempts. */ 149 clear_buffer_uptodate(bh); 150 } 151 unlock_buffer(bh); 152 } 153 154 /* 155 * Default synchronous end-of-IO handler.. Just mark it up-to-date and 156 * unlock the buffer. 157 */ 158 void end_buffer_read_sync(struct buffer_head *bh, int uptodate) 159 { 160 __end_buffer_read_notouch(bh, uptodate); 161 put_bh(bh); 162 } 163 EXPORT_SYMBOL(end_buffer_read_sync); 164 165 void end_buffer_write_sync(struct buffer_head *bh, int uptodate) 166 { 167 if (uptodate) { 168 set_buffer_uptodate(bh); 169 } else { 170 buffer_io_error(bh, ", lost sync page write"); 171 mark_buffer_write_io_error(bh); 172 clear_buffer_uptodate(bh); 173 } 174 unlock_buffer(bh); 175 put_bh(bh); 176 } 177 EXPORT_SYMBOL(end_buffer_write_sync); 178 179 /* 180 * Various filesystems appear to want __find_get_block to be non-blocking. 181 * But it's the page lock which protects the buffers. To get around this, 182 * we get exclusion from try_to_free_buffers with the blockdev mapping's 183 * i_private_lock. 184 * 185 * Hack idea: for the blockdev mapping, i_private_lock contention 186 * may be quite high. This code could TryLock the page, and if that 187 * succeeds, there is no need to take i_private_lock. 188 */ 189 static struct buffer_head * 190 __find_get_block_slow(struct block_device *bdev, sector_t block) 191 { 192 struct address_space *bd_mapping = bdev->bd_mapping; 193 const int blkbits = bd_mapping->host->i_blkbits; 194 struct buffer_head *ret = NULL; 195 pgoff_t index; 196 struct buffer_head *bh; 197 struct buffer_head *head; 198 struct folio *folio; 199 int all_mapped = 1; 200 static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1); 201 202 index = ((loff_t)block << blkbits) / PAGE_SIZE; 203 folio = __filemap_get_folio(bd_mapping, index, FGP_ACCESSED, 0); 204 if (IS_ERR(folio)) 205 goto out; 206 207 spin_lock(&bd_mapping->i_private_lock); 208 head = folio_buffers(folio); 209 if (!head) 210 goto out_unlock; 211 bh = head; 212 do { 213 if (!buffer_mapped(bh)) 214 all_mapped = 0; 215 else if (bh->b_blocknr == block) { 216 ret = bh; 217 get_bh(bh); 218 goto out_unlock; 219 } 220 bh = bh->b_this_page; 221 } while (bh != head); 222 223 /* we might be here because some of the buffers on this page are 224 * not mapped. This is due to various races between 225 * file io on the block device and getblk. It gets dealt with 226 * elsewhere, don't buffer_error if we had some unmapped buffers 227 */ 228 ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE); 229 if (all_mapped && __ratelimit(&last_warned)) { 230 printk("__find_get_block_slow() failed. block=%llu, " 231 "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, " 232 "device %pg blocksize: %d\n", 233 (unsigned long long)block, 234 (unsigned long long)bh->b_blocknr, 235 bh->b_state, bh->b_size, bdev, 236 1 << blkbits); 237 } 238 out_unlock: 239 spin_unlock(&bd_mapping->i_private_lock); 240 folio_put(folio); 241 out: 242 return ret; 243 } 244 245 static void end_buffer_async_read(struct buffer_head *bh, int uptodate) 246 { 247 unsigned long flags; 248 struct buffer_head *first; 249 struct buffer_head *tmp; 250 struct folio *folio; 251 int folio_uptodate = 1; 252 253 BUG_ON(!buffer_async_read(bh)); 254 255 folio = bh->b_folio; 256 if (uptodate) { 257 set_buffer_uptodate(bh); 258 } else { 259 clear_buffer_uptodate(bh); 260 buffer_io_error(bh, ", async page read"); 261 } 262 263 /* 264 * Be _very_ careful from here on. Bad things can happen if 265 * two buffer heads end IO at almost the same time and both 266 * decide that the page is now completely done. 267 */ 268 first = folio_buffers(folio); 269 spin_lock_irqsave(&first->b_uptodate_lock, flags); 270 clear_buffer_async_read(bh); 271 unlock_buffer(bh); 272 tmp = bh; 273 do { 274 if (!buffer_uptodate(tmp)) 275 folio_uptodate = 0; 276 if (buffer_async_read(tmp)) { 277 BUG_ON(!buffer_locked(tmp)); 278 goto still_busy; 279 } 280 tmp = tmp->b_this_page; 281 } while (tmp != bh); 282 spin_unlock_irqrestore(&first->b_uptodate_lock, flags); 283 284 folio_end_read(folio, folio_uptodate); 285 return; 286 287 still_busy: 288 spin_unlock_irqrestore(&first->b_uptodate_lock, flags); 289 return; 290 } 291 292 struct postprocess_bh_ctx { 293 struct work_struct work; 294 struct buffer_head *bh; 295 }; 296 297 static void verify_bh(struct work_struct *work) 298 { 299 struct postprocess_bh_ctx *ctx = 300 container_of(work, struct postprocess_bh_ctx, work); 301 struct buffer_head *bh = ctx->bh; 302 bool valid; 303 304 valid = fsverity_verify_blocks(bh->b_folio, bh->b_size, bh_offset(bh)); 305 end_buffer_async_read(bh, valid); 306 kfree(ctx); 307 } 308 309 static bool need_fsverity(struct buffer_head *bh) 310 { 311 struct folio *folio = bh->b_folio; 312 struct inode *inode = folio->mapping->host; 313 314 return fsverity_active(inode) && 315 /* needed by ext4 */ 316 folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE); 317 } 318 319 static void decrypt_bh(struct work_struct *work) 320 { 321 struct postprocess_bh_ctx *ctx = 322 container_of(work, struct postprocess_bh_ctx, work); 323 struct buffer_head *bh = ctx->bh; 324 int err; 325 326 err = fscrypt_decrypt_pagecache_blocks(bh->b_folio, bh->b_size, 327 bh_offset(bh)); 328 if (err == 0 && need_fsverity(bh)) { 329 /* 330 * We use different work queues for decryption and for verity 331 * because verity may require reading metadata pages that need 332 * decryption, and we shouldn't recurse to the same workqueue. 333 */ 334 INIT_WORK(&ctx->work, verify_bh); 335 fsverity_enqueue_verify_work(&ctx->work); 336 return; 337 } 338 end_buffer_async_read(bh, err == 0); 339 kfree(ctx); 340 } 341 342 /* 343 * I/O completion handler for block_read_full_folio() - pages 344 * which come unlocked at the end of I/O. 345 */ 346 static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate) 347 { 348 struct inode *inode = bh->b_folio->mapping->host; 349 bool decrypt = fscrypt_inode_uses_fs_layer_crypto(inode); 350 bool verify = need_fsverity(bh); 351 352 /* Decrypt (with fscrypt) and/or verify (with fsverity) if needed. */ 353 if (uptodate && (decrypt || verify)) { 354 struct postprocess_bh_ctx *ctx = 355 kmalloc(sizeof(*ctx), GFP_ATOMIC); 356 357 if (ctx) { 358 ctx->bh = bh; 359 if (decrypt) { 360 INIT_WORK(&ctx->work, decrypt_bh); 361 fscrypt_enqueue_decrypt_work(&ctx->work); 362 } else { 363 INIT_WORK(&ctx->work, verify_bh); 364 fsverity_enqueue_verify_work(&ctx->work); 365 } 366 return; 367 } 368 uptodate = 0; 369 } 370 end_buffer_async_read(bh, uptodate); 371 } 372 373 /* 374 * Completion handler for block_write_full_folio() - folios which are unlocked 375 * during I/O, and which have the writeback flag cleared upon I/O completion. 376 */ 377 static void end_buffer_async_write(struct buffer_head *bh, int uptodate) 378 { 379 unsigned long flags; 380 struct buffer_head *first; 381 struct buffer_head *tmp; 382 struct folio *folio; 383 384 BUG_ON(!buffer_async_write(bh)); 385 386 folio = bh->b_folio; 387 if (uptodate) { 388 set_buffer_uptodate(bh); 389 } else { 390 buffer_io_error(bh, ", lost async page write"); 391 mark_buffer_write_io_error(bh); 392 clear_buffer_uptodate(bh); 393 } 394 395 first = folio_buffers(folio); 396 spin_lock_irqsave(&first->b_uptodate_lock, flags); 397 398 clear_buffer_async_write(bh); 399 unlock_buffer(bh); 400 tmp = bh->b_this_page; 401 while (tmp != bh) { 402 if (buffer_async_write(tmp)) { 403 BUG_ON(!buffer_locked(tmp)); 404 goto still_busy; 405 } 406 tmp = tmp->b_this_page; 407 } 408 spin_unlock_irqrestore(&first->b_uptodate_lock, flags); 409 folio_end_writeback(folio); 410 return; 411 412 still_busy: 413 spin_unlock_irqrestore(&first->b_uptodate_lock, flags); 414 return; 415 } 416 417 /* 418 * If a page's buffers are under async readin (end_buffer_async_read 419 * completion) then there is a possibility that another thread of 420 * control could lock one of the buffers after it has completed 421 * but while some of the other buffers have not completed. This 422 * locked buffer would confuse end_buffer_async_read() into not unlocking 423 * the page. So the absence of BH_Async_Read tells end_buffer_async_read() 424 * that this buffer is not under async I/O. 425 * 426 * The page comes unlocked when it has no locked buffer_async buffers 427 * left. 428 * 429 * PageLocked prevents anyone starting new async I/O reads any of 430 * the buffers. 431 * 432 * PageWriteback is used to prevent simultaneous writeout of the same 433 * page. 434 * 435 * PageLocked prevents anyone from starting writeback of a page which is 436 * under read I/O (PageWriteback is only ever set against a locked page). 437 */ 438 static void mark_buffer_async_read(struct buffer_head *bh) 439 { 440 bh->b_end_io = end_buffer_async_read_io; 441 set_buffer_async_read(bh); 442 } 443 444 static void mark_buffer_async_write_endio(struct buffer_head *bh, 445 bh_end_io_t *handler) 446 { 447 bh->b_end_io = handler; 448 set_buffer_async_write(bh); 449 } 450 451 void mark_buffer_async_write(struct buffer_head *bh) 452 { 453 mark_buffer_async_write_endio(bh, end_buffer_async_write); 454 } 455 EXPORT_SYMBOL(mark_buffer_async_write); 456 457 458 /* 459 * fs/buffer.c contains helper functions for buffer-backed address space's 460 * fsync functions. A common requirement for buffer-based filesystems is 461 * that certain data from the backing blockdev needs to be written out for 462 * a successful fsync(). For example, ext2 indirect blocks need to be 463 * written back and waited upon before fsync() returns. 464 * 465 * The functions mark_buffer_dirty_inode(), fsync_inode_buffers(), 466 * inode_has_buffers() and invalidate_inode_buffers() are provided for the 467 * management of a list of dependent buffers at ->i_mapping->i_private_list. 468 * 469 * Locking is a little subtle: try_to_free_buffers() will remove buffers 470 * from their controlling inode's queue when they are being freed. But 471 * try_to_free_buffers() will be operating against the *blockdev* mapping 472 * at the time, not against the S_ISREG file which depends on those buffers. 473 * So the locking for i_private_list is via the i_private_lock in the address_space 474 * which backs the buffers. Which is different from the address_space 475 * against which the buffers are listed. So for a particular address_space, 476 * mapping->i_private_lock does *not* protect mapping->i_private_list! In fact, 477 * mapping->i_private_list will always be protected by the backing blockdev's 478 * ->i_private_lock. 479 * 480 * Which introduces a requirement: all buffers on an address_space's 481 * ->i_private_list must be from the same address_space: the blockdev's. 482 * 483 * address_spaces which do not place buffers at ->i_private_list via these 484 * utility functions are free to use i_private_lock and i_private_list for 485 * whatever they want. The only requirement is that list_empty(i_private_list) 486 * be true at clear_inode() time. 487 * 488 * FIXME: clear_inode should not call invalidate_inode_buffers(). The 489 * filesystems should do that. invalidate_inode_buffers() should just go 490 * BUG_ON(!list_empty). 491 * 492 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should 493 * take an address_space, not an inode. And it should be called 494 * mark_buffer_dirty_fsync() to clearly define why those buffers are being 495 * queued up. 496 * 497 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the 498 * list if it is already on a list. Because if the buffer is on a list, 499 * it *must* already be on the right one. If not, the filesystem is being 500 * silly. This will save a ton of locking. But first we have to ensure 501 * that buffers are taken *off* the old inode's list when they are freed 502 * (presumably in truncate). That requires careful auditing of all 503 * filesystems (do it inside bforget()). It could also be done by bringing 504 * b_inode back. 505 */ 506 507 /* 508 * The buffer's backing address_space's i_private_lock must be held 509 */ 510 static void __remove_assoc_queue(struct buffer_head *bh) 511 { 512 list_del_init(&bh->b_assoc_buffers); 513 WARN_ON(!bh->b_assoc_map); 514 bh->b_assoc_map = NULL; 515 } 516 517 int inode_has_buffers(struct inode *inode) 518 { 519 return !list_empty(&inode->i_data.i_private_list); 520 } 521 522 /* 523 * osync is designed to support O_SYNC io. It waits synchronously for 524 * all already-submitted IO to complete, but does not queue any new 525 * writes to the disk. 526 * 527 * To do O_SYNC writes, just queue the buffer writes with write_dirty_buffer 528 * as you dirty the buffers, and then use osync_inode_buffers to wait for 529 * completion. Any other dirty buffers which are not yet queued for 530 * write will not be flushed to disk by the osync. 531 */ 532 static int osync_buffers_list(spinlock_t *lock, struct list_head *list) 533 { 534 struct buffer_head *bh; 535 struct list_head *p; 536 int err = 0; 537 538 spin_lock(lock); 539 repeat: 540 list_for_each_prev(p, list) { 541 bh = BH_ENTRY(p); 542 if (buffer_locked(bh)) { 543 get_bh(bh); 544 spin_unlock(lock); 545 wait_on_buffer(bh); 546 if (!buffer_uptodate(bh)) 547 err = -EIO; 548 brelse(bh); 549 spin_lock(lock); 550 goto repeat; 551 } 552 } 553 spin_unlock(lock); 554 return err; 555 } 556 557 /** 558 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers 559 * @mapping: the mapping which wants those buffers written 560 * 561 * Starts I/O against the buffers at mapping->i_private_list, and waits upon 562 * that I/O. 563 * 564 * Basically, this is a convenience function for fsync(). 565 * @mapping is a file or directory which needs those buffers to be written for 566 * a successful fsync(). 567 */ 568 int sync_mapping_buffers(struct address_space *mapping) 569 { 570 struct address_space *buffer_mapping = mapping->i_private_data; 571 572 if (buffer_mapping == NULL || list_empty(&mapping->i_private_list)) 573 return 0; 574 575 return fsync_buffers_list(&buffer_mapping->i_private_lock, 576 &mapping->i_private_list); 577 } 578 EXPORT_SYMBOL(sync_mapping_buffers); 579 580 /** 581 * generic_buffers_fsync_noflush - generic buffer fsync implementation 582 * for simple filesystems with no inode lock 583 * 584 * @file: file to synchronize 585 * @start: start offset in bytes 586 * @end: end offset in bytes (inclusive) 587 * @datasync: only synchronize essential metadata if true 588 * 589 * This is a generic implementation of the fsync method for simple 590 * filesystems which track all non-inode metadata in the buffers list 591 * hanging off the address_space structure. 592 */ 593 int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end, 594 bool datasync) 595 { 596 struct inode *inode = file->f_mapping->host; 597 int err; 598 int ret; 599 600 err = file_write_and_wait_range(file, start, end); 601 if (err) 602 return err; 603 604 ret = sync_mapping_buffers(inode->i_mapping); 605 if (!(inode->i_state & I_DIRTY_ALL)) 606 goto out; 607 if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) 608 goto out; 609 610 err = sync_inode_metadata(inode, 1); 611 if (ret == 0) 612 ret = err; 613 614 out: 615 /* check and advance again to catch errors after syncing out buffers */ 616 err = file_check_and_advance_wb_err(file); 617 if (ret == 0) 618 ret = err; 619 return ret; 620 } 621 EXPORT_SYMBOL(generic_buffers_fsync_noflush); 622 623 /** 624 * generic_buffers_fsync - generic buffer fsync implementation 625 * for simple filesystems with no inode lock 626 * 627 * @file: file to synchronize 628 * @start: start offset in bytes 629 * @end: end offset in bytes (inclusive) 630 * @datasync: only synchronize essential metadata if true 631 * 632 * This is a generic implementation of the fsync method for simple 633 * filesystems which track all non-inode metadata in the buffers list 634 * hanging off the address_space structure. This also makes sure that 635 * a device cache flush operation is called at the end. 636 */ 637 int generic_buffers_fsync(struct file *file, loff_t start, loff_t end, 638 bool datasync) 639 { 640 struct inode *inode = file->f_mapping->host; 641 int ret; 642 643 ret = generic_buffers_fsync_noflush(file, start, end, datasync); 644 if (!ret) 645 ret = blkdev_issue_flush(inode->i_sb->s_bdev); 646 return ret; 647 } 648 EXPORT_SYMBOL(generic_buffers_fsync); 649 650 /* 651 * Called when we've recently written block `bblock', and it is known that 652 * `bblock' was for a buffer_boundary() buffer. This means that the block at 653 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's 654 * dirty, schedule it for IO. So that indirects merge nicely with their data. 655 */ 656 void write_boundary_block(struct block_device *bdev, 657 sector_t bblock, unsigned blocksize) 658 { 659 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize); 660 if (bh) { 661 if (buffer_dirty(bh)) 662 write_dirty_buffer(bh, 0); 663 put_bh(bh); 664 } 665 } 666 667 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode) 668 { 669 struct address_space *mapping = inode->i_mapping; 670 struct address_space *buffer_mapping = bh->b_folio->mapping; 671 672 mark_buffer_dirty(bh); 673 if (!mapping->i_private_data) { 674 mapping->i_private_data = buffer_mapping; 675 } else { 676 BUG_ON(mapping->i_private_data != buffer_mapping); 677 } 678 if (!bh->b_assoc_map) { 679 spin_lock(&buffer_mapping->i_private_lock); 680 list_move_tail(&bh->b_assoc_buffers, 681 &mapping->i_private_list); 682 bh->b_assoc_map = mapping; 683 spin_unlock(&buffer_mapping->i_private_lock); 684 } 685 } 686 EXPORT_SYMBOL(mark_buffer_dirty_inode); 687 688 /** 689 * block_dirty_folio - Mark a folio as dirty. 690 * @mapping: The address space containing this folio. 691 * @folio: The folio to mark dirty. 692 * 693 * Filesystems which use buffer_heads can use this function as their 694 * ->dirty_folio implementation. Some filesystems need to do a little 695 * work before calling this function. Filesystems which do not use 696 * buffer_heads should call filemap_dirty_folio() instead. 697 * 698 * If the folio has buffers, the uptodate buffers are set dirty, to 699 * preserve dirty-state coherency between the folio and the buffers. 700 * Buffers added to a dirty folio are created dirty. 701 * 702 * The buffers are dirtied before the folio is dirtied. There's a small 703 * race window in which writeback may see the folio cleanness but not the 704 * buffer dirtiness. That's fine. If this code were to set the folio 705 * dirty before the buffers, writeback could clear the folio dirty flag, 706 * see a bunch of clean buffers and we'd end up with dirty buffers/clean 707 * folio on the dirty folio list. 708 * 709 * We use i_private_lock to lock against try_to_free_buffers() while 710 * using the folio's buffer list. This also prevents clean buffers 711 * being added to the folio after it was set dirty. 712 * 713 * Context: May only be called from process context. Does not sleep. 714 * Caller must ensure that @folio cannot be truncated during this call, 715 * typically by holding the folio lock or having a page in the folio 716 * mapped and holding the page table lock. 717 * 718 * Return: True if the folio was dirtied; false if it was already dirtied. 719 */ 720 bool block_dirty_folio(struct address_space *mapping, struct folio *folio) 721 { 722 struct buffer_head *head; 723 bool newly_dirty; 724 725 spin_lock(&mapping->i_private_lock); 726 head = folio_buffers(folio); 727 if (head) { 728 struct buffer_head *bh = head; 729 730 do { 731 set_buffer_dirty(bh); 732 bh = bh->b_this_page; 733 } while (bh != head); 734 } 735 /* 736 * Lock out page's memcg migration to keep PageDirty 737 * synchronized with per-memcg dirty page counters. 738 */ 739 folio_memcg_lock(folio); 740 newly_dirty = !folio_test_set_dirty(folio); 741 spin_unlock(&mapping->i_private_lock); 742 743 if (newly_dirty) 744 __folio_mark_dirty(folio, mapping, 1); 745 746 folio_memcg_unlock(folio); 747 748 if (newly_dirty) 749 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 750 751 return newly_dirty; 752 } 753 EXPORT_SYMBOL(block_dirty_folio); 754 755 /* 756 * Write out and wait upon a list of buffers. 757 * 758 * We have conflicting pressures: we want to make sure that all 759 * initially dirty buffers get waited on, but that any subsequently 760 * dirtied buffers don't. After all, we don't want fsync to last 761 * forever if somebody is actively writing to the file. 762 * 763 * Do this in two main stages: first we copy dirty buffers to a 764 * temporary inode list, queueing the writes as we go. Then we clean 765 * up, waiting for those writes to complete. 766 * 767 * During this second stage, any subsequent updates to the file may end 768 * up refiling the buffer on the original inode's dirty list again, so 769 * there is a chance we will end up with a buffer queued for write but 770 * not yet completed on that list. So, as a final cleanup we go through 771 * the osync code to catch these locked, dirty buffers without requeuing 772 * any newly dirty buffers for write. 773 */ 774 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) 775 { 776 struct buffer_head *bh; 777 struct address_space *mapping; 778 int err = 0, err2; 779 struct blk_plug plug; 780 LIST_HEAD(tmp); 781 782 blk_start_plug(&plug); 783 784 spin_lock(lock); 785 while (!list_empty(list)) { 786 bh = BH_ENTRY(list->next); 787 mapping = bh->b_assoc_map; 788 __remove_assoc_queue(bh); 789 /* Avoid race with mark_buffer_dirty_inode() which does 790 * a lockless check and we rely on seeing the dirty bit */ 791 smp_mb(); 792 if (buffer_dirty(bh) || buffer_locked(bh)) { 793 list_add(&bh->b_assoc_buffers, &tmp); 794 bh->b_assoc_map = mapping; 795 if (buffer_dirty(bh)) { 796 get_bh(bh); 797 spin_unlock(lock); 798 /* 799 * Ensure any pending I/O completes so that 800 * write_dirty_buffer() actually writes the 801 * current contents - it is a noop if I/O is 802 * still in flight on potentially older 803 * contents. 804 */ 805 write_dirty_buffer(bh, REQ_SYNC); 806 807 /* 808 * Kick off IO for the previous mapping. Note 809 * that we will not run the very last mapping, 810 * wait_on_buffer() will do that for us 811 * through sync_buffer(). 812 */ 813 brelse(bh); 814 spin_lock(lock); 815 } 816 } 817 } 818 819 spin_unlock(lock); 820 blk_finish_plug(&plug); 821 spin_lock(lock); 822 823 while (!list_empty(&tmp)) { 824 bh = BH_ENTRY(tmp.prev); 825 get_bh(bh); 826 mapping = bh->b_assoc_map; 827 __remove_assoc_queue(bh); 828 /* Avoid race with mark_buffer_dirty_inode() which does 829 * a lockless check and we rely on seeing the dirty bit */ 830 smp_mb(); 831 if (buffer_dirty(bh)) { 832 list_add(&bh->b_assoc_buffers, 833 &mapping->i_private_list); 834 bh->b_assoc_map = mapping; 835 } 836 spin_unlock(lock); 837 wait_on_buffer(bh); 838 if (!buffer_uptodate(bh)) 839 err = -EIO; 840 brelse(bh); 841 spin_lock(lock); 842 } 843 844 spin_unlock(lock); 845 err2 = osync_buffers_list(lock, list); 846 if (err) 847 return err; 848 else 849 return err2; 850 } 851 852 /* 853 * Invalidate any and all dirty buffers on a given inode. We are 854 * probably unmounting the fs, but that doesn't mean we have already 855 * done a sync(). Just drop the buffers from the inode list. 856 * 857 * NOTE: we take the inode's blockdev's mapping's i_private_lock. Which 858 * assumes that all the buffers are against the blockdev. 859 */ 860 void invalidate_inode_buffers(struct inode *inode) 861 { 862 if (inode_has_buffers(inode)) { 863 struct address_space *mapping = &inode->i_data; 864 struct list_head *list = &mapping->i_private_list; 865 struct address_space *buffer_mapping = mapping->i_private_data; 866 867 spin_lock(&buffer_mapping->i_private_lock); 868 while (!list_empty(list)) 869 __remove_assoc_queue(BH_ENTRY(list->next)); 870 spin_unlock(&buffer_mapping->i_private_lock); 871 } 872 } 873 EXPORT_SYMBOL(invalidate_inode_buffers); 874 875 /* 876 * Remove any clean buffers from the inode's buffer list. This is called 877 * when we're trying to free the inode itself. Those buffers can pin it. 878 * 879 * Returns true if all buffers were removed. 880 */ 881 int remove_inode_buffers(struct inode *inode) 882 { 883 int ret = 1; 884 885 if (inode_has_buffers(inode)) { 886 struct address_space *mapping = &inode->i_data; 887 struct list_head *list = &mapping->i_private_list; 888 struct address_space *buffer_mapping = mapping->i_private_data; 889 890 spin_lock(&buffer_mapping->i_private_lock); 891 while (!list_empty(list)) { 892 struct buffer_head *bh = BH_ENTRY(list->next); 893 if (buffer_dirty(bh)) { 894 ret = 0; 895 break; 896 } 897 __remove_assoc_queue(bh); 898 } 899 spin_unlock(&buffer_mapping->i_private_lock); 900 } 901 return ret; 902 } 903 904 /* 905 * Create the appropriate buffers when given a folio for data area and 906 * the size of each buffer.. Use the bh->b_this_page linked list to 907 * follow the buffers created. Return NULL if unable to create more 908 * buffers. 909 * 910 * The retry flag is used to differentiate async IO (paging, swapping) 911 * which may not fail from ordinary buffer allocations. 912 */ 913 struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size, 914 gfp_t gfp) 915 { 916 struct buffer_head *bh, *head; 917 long offset; 918 struct mem_cgroup *memcg, *old_memcg; 919 920 /* The folio lock pins the memcg */ 921 memcg = folio_memcg(folio); 922 old_memcg = set_active_memcg(memcg); 923 924 head = NULL; 925 offset = folio_size(folio); 926 while ((offset -= size) >= 0) { 927 bh = alloc_buffer_head(gfp); 928 if (!bh) 929 goto no_grow; 930 931 bh->b_this_page = head; 932 bh->b_blocknr = -1; 933 head = bh; 934 935 bh->b_size = size; 936 937 /* Link the buffer to its folio */ 938 folio_set_bh(bh, folio, offset); 939 } 940 out: 941 set_active_memcg(old_memcg); 942 return head; 943 /* 944 * In case anything failed, we just free everything we got. 945 */ 946 no_grow: 947 if (head) { 948 do { 949 bh = head; 950 head = head->b_this_page; 951 free_buffer_head(bh); 952 } while (head); 953 } 954 955 goto out; 956 } 957 EXPORT_SYMBOL_GPL(folio_alloc_buffers); 958 959 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size) 960 { 961 gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT; 962 963 return folio_alloc_buffers(page_folio(page), size, gfp); 964 } 965 EXPORT_SYMBOL_GPL(alloc_page_buffers); 966 967 static inline void link_dev_buffers(struct folio *folio, 968 struct buffer_head *head) 969 { 970 struct buffer_head *bh, *tail; 971 972 bh = head; 973 do { 974 tail = bh; 975 bh = bh->b_this_page; 976 } while (bh); 977 tail->b_this_page = head; 978 folio_attach_private(folio, head); 979 } 980 981 static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size) 982 { 983 sector_t retval = ~((sector_t)0); 984 loff_t sz = bdev_nr_bytes(bdev); 985 986 if (sz) { 987 unsigned int sizebits = blksize_bits(size); 988 retval = (sz >> sizebits); 989 } 990 return retval; 991 } 992 993 /* 994 * Initialise the state of a blockdev folio's buffers. 995 */ 996 static sector_t folio_init_buffers(struct folio *folio, 997 struct block_device *bdev, unsigned size) 998 { 999 struct buffer_head *head = folio_buffers(folio); 1000 struct buffer_head *bh = head; 1001 bool uptodate = folio_test_uptodate(folio); 1002 sector_t block = div_u64(folio_pos(folio), size); 1003 sector_t end_block = blkdev_max_block(bdev, size); 1004 1005 do { 1006 if (!buffer_mapped(bh)) { 1007 bh->b_end_io = NULL; 1008 bh->b_private = NULL; 1009 bh->b_bdev = bdev; 1010 bh->b_blocknr = block; 1011 if (uptodate) 1012 set_buffer_uptodate(bh); 1013 if (block < end_block) 1014 set_buffer_mapped(bh); 1015 } 1016 block++; 1017 bh = bh->b_this_page; 1018 } while (bh != head); 1019 1020 /* 1021 * Caller needs to validate requested block against end of device. 1022 */ 1023 return end_block; 1024 } 1025 1026 /* 1027 * Create the page-cache folio that contains the requested block. 1028 * 1029 * This is used purely for blockdev mappings. 1030 * 1031 * Returns false if we have a failure which cannot be cured by retrying 1032 * without sleeping. Returns true if we succeeded, or the caller should retry. 1033 */ 1034 static bool grow_dev_folio(struct block_device *bdev, sector_t block, 1035 pgoff_t index, unsigned size, gfp_t gfp) 1036 { 1037 struct address_space *mapping = bdev->bd_mapping; 1038 struct folio *folio; 1039 struct buffer_head *bh; 1040 sector_t end_block = 0; 1041 1042 folio = __filemap_get_folio(mapping, index, 1043 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp); 1044 if (IS_ERR(folio)) 1045 return false; 1046 1047 bh = folio_buffers(folio); 1048 if (bh) { 1049 if (bh->b_size == size) { 1050 end_block = folio_init_buffers(folio, bdev, size); 1051 goto unlock; 1052 } 1053 1054 /* 1055 * Retrying may succeed; for example the folio may finish 1056 * writeback, or buffers may be cleaned. This should not 1057 * happen very often; maybe we have old buffers attached to 1058 * this blockdev's page cache and we're trying to change 1059 * the block size? 1060 */ 1061 if (!try_to_free_buffers(folio)) { 1062 end_block = ~0ULL; 1063 goto unlock; 1064 } 1065 } 1066 1067 bh = folio_alloc_buffers(folio, size, gfp | __GFP_ACCOUNT); 1068 if (!bh) 1069 goto unlock; 1070 1071 /* 1072 * Link the folio to the buffers and initialise them. Take the 1073 * lock to be atomic wrt __find_get_block(), which does not 1074 * run under the folio lock. 1075 */ 1076 spin_lock(&mapping->i_private_lock); 1077 link_dev_buffers(folio, bh); 1078 end_block = folio_init_buffers(folio, bdev, size); 1079 spin_unlock(&mapping->i_private_lock); 1080 unlock: 1081 folio_unlock(folio); 1082 folio_put(folio); 1083 return block < end_block; 1084 } 1085 1086 /* 1087 * Create buffers for the specified block device block's folio. If 1088 * that folio was dirty, the buffers are set dirty also. Returns false 1089 * if we've hit a permanent error. 1090 */ 1091 static bool grow_buffers(struct block_device *bdev, sector_t block, 1092 unsigned size, gfp_t gfp) 1093 { 1094 loff_t pos; 1095 1096 /* 1097 * Check for a block which lies outside our maximum possible 1098 * pagecache index. 1099 */ 1100 if (check_mul_overflow(block, (sector_t)size, &pos) || pos > MAX_LFS_FILESIZE) { 1101 printk(KERN_ERR "%s: requested out-of-range block %llu for device %pg\n", 1102 __func__, (unsigned long long)block, 1103 bdev); 1104 return false; 1105 } 1106 1107 /* Create a folio with the proper size buffers */ 1108 return grow_dev_folio(bdev, block, pos / PAGE_SIZE, size, gfp); 1109 } 1110 1111 static struct buffer_head * 1112 __getblk_slow(struct block_device *bdev, sector_t block, 1113 unsigned size, gfp_t gfp) 1114 { 1115 /* Size must be multiple of hard sectorsize */ 1116 if (unlikely(size & (bdev_logical_block_size(bdev)-1) || 1117 (size < 512 || size > PAGE_SIZE))) { 1118 printk(KERN_ERR "getblk(): invalid block size %d requested\n", 1119 size); 1120 printk(KERN_ERR "logical block size: %d\n", 1121 bdev_logical_block_size(bdev)); 1122 1123 dump_stack(); 1124 return NULL; 1125 } 1126 1127 for (;;) { 1128 struct buffer_head *bh; 1129 1130 bh = __find_get_block(bdev, block, size); 1131 if (bh) 1132 return bh; 1133 1134 if (!grow_buffers(bdev, block, size, gfp)) 1135 return NULL; 1136 } 1137 } 1138 1139 /* 1140 * The relationship between dirty buffers and dirty pages: 1141 * 1142 * Whenever a page has any dirty buffers, the page's dirty bit is set, and 1143 * the page is tagged dirty in the page cache. 1144 * 1145 * At all times, the dirtiness of the buffers represents the dirtiness of 1146 * subsections of the page. If the page has buffers, the page dirty bit is 1147 * merely a hint about the true dirty state. 1148 * 1149 * When a page is set dirty in its entirety, all its buffers are marked dirty 1150 * (if the page has buffers). 1151 * 1152 * When a buffer is marked dirty, its page is dirtied, but the page's other 1153 * buffers are not. 1154 * 1155 * Also. When blockdev buffers are explicitly read with bread(), they 1156 * individually become uptodate. But their backing page remains not 1157 * uptodate - even if all of its buffers are uptodate. A subsequent 1158 * block_read_full_folio() against that folio will discover all the uptodate 1159 * buffers, will set the folio uptodate and will perform no I/O. 1160 */ 1161 1162 /** 1163 * mark_buffer_dirty - mark a buffer_head as needing writeout 1164 * @bh: the buffer_head to mark dirty 1165 * 1166 * mark_buffer_dirty() will set the dirty bit against the buffer, then set 1167 * its backing page dirty, then tag the page as dirty in the page cache 1168 * and then attach the address_space's inode to its superblock's dirty 1169 * inode list. 1170 * 1171 * mark_buffer_dirty() is atomic. It takes bh->b_folio->mapping->i_private_lock, 1172 * i_pages lock and mapping->host->i_lock. 1173 */ 1174 void mark_buffer_dirty(struct buffer_head *bh) 1175 { 1176 WARN_ON_ONCE(!buffer_uptodate(bh)); 1177 1178 trace_block_dirty_buffer(bh); 1179 1180 /* 1181 * Very *carefully* optimize the it-is-already-dirty case. 1182 * 1183 * Don't let the final "is it dirty" escape to before we 1184 * perhaps modified the buffer. 1185 */ 1186 if (buffer_dirty(bh)) { 1187 smp_mb(); 1188 if (buffer_dirty(bh)) 1189 return; 1190 } 1191 1192 if (!test_set_buffer_dirty(bh)) { 1193 struct folio *folio = bh->b_folio; 1194 struct address_space *mapping = NULL; 1195 1196 folio_memcg_lock(folio); 1197 if (!folio_test_set_dirty(folio)) { 1198 mapping = folio->mapping; 1199 if (mapping) 1200 __folio_mark_dirty(folio, mapping, 0); 1201 } 1202 folio_memcg_unlock(folio); 1203 if (mapping) 1204 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 1205 } 1206 } 1207 EXPORT_SYMBOL(mark_buffer_dirty); 1208 1209 void mark_buffer_write_io_error(struct buffer_head *bh) 1210 { 1211 set_buffer_write_io_error(bh); 1212 /* FIXME: do we need to set this in both places? */ 1213 if (bh->b_folio && bh->b_folio->mapping) 1214 mapping_set_error(bh->b_folio->mapping, -EIO); 1215 if (bh->b_assoc_map) { 1216 mapping_set_error(bh->b_assoc_map, -EIO); 1217 errseq_set(&bh->b_assoc_map->host->i_sb->s_wb_err, -EIO); 1218 } 1219 } 1220 EXPORT_SYMBOL(mark_buffer_write_io_error); 1221 1222 /** 1223 * __brelse - Release a buffer. 1224 * @bh: The buffer to release. 1225 * 1226 * This variant of brelse() can be called if @bh is guaranteed to not be NULL. 1227 */ 1228 void __brelse(struct buffer_head *bh) 1229 { 1230 if (atomic_read(&bh->b_count)) { 1231 put_bh(bh); 1232 return; 1233 } 1234 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n"); 1235 } 1236 EXPORT_SYMBOL(__brelse); 1237 1238 /** 1239 * __bforget - Discard any dirty data in a buffer. 1240 * @bh: The buffer to forget. 1241 * 1242 * This variant of bforget() can be called if @bh is guaranteed to not 1243 * be NULL. 1244 */ 1245 void __bforget(struct buffer_head *bh) 1246 { 1247 clear_buffer_dirty(bh); 1248 if (bh->b_assoc_map) { 1249 struct address_space *buffer_mapping = bh->b_folio->mapping; 1250 1251 spin_lock(&buffer_mapping->i_private_lock); 1252 list_del_init(&bh->b_assoc_buffers); 1253 bh->b_assoc_map = NULL; 1254 spin_unlock(&buffer_mapping->i_private_lock); 1255 } 1256 __brelse(bh); 1257 } 1258 EXPORT_SYMBOL(__bforget); 1259 1260 static struct buffer_head *__bread_slow(struct buffer_head *bh) 1261 { 1262 lock_buffer(bh); 1263 if (buffer_uptodate(bh)) { 1264 unlock_buffer(bh); 1265 return bh; 1266 } else { 1267 get_bh(bh); 1268 bh->b_end_io = end_buffer_read_sync; 1269 submit_bh(REQ_OP_READ, bh); 1270 wait_on_buffer(bh); 1271 if (buffer_uptodate(bh)) 1272 return bh; 1273 } 1274 brelse(bh); 1275 return NULL; 1276 } 1277 1278 /* 1279 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block(). 1280 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their 1281 * refcount elevated by one when they're in an LRU. A buffer can only appear 1282 * once in a particular CPU's LRU. A single buffer can be present in multiple 1283 * CPU's LRUs at the same time. 1284 * 1285 * This is a transparent caching front-end to sb_bread(), sb_getblk() and 1286 * sb_find_get_block(). 1287 * 1288 * The LRUs themselves only need locking against invalidate_bh_lrus. We use 1289 * a local interrupt disable for that. 1290 */ 1291 1292 #define BH_LRU_SIZE 16 1293 1294 struct bh_lru { 1295 struct buffer_head *bhs[BH_LRU_SIZE]; 1296 }; 1297 1298 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }}; 1299 1300 #ifdef CONFIG_SMP 1301 #define bh_lru_lock() local_irq_disable() 1302 #define bh_lru_unlock() local_irq_enable() 1303 #else 1304 #define bh_lru_lock() preempt_disable() 1305 #define bh_lru_unlock() preempt_enable() 1306 #endif 1307 1308 static inline void check_irqs_on(void) 1309 { 1310 #ifdef irqs_disabled 1311 BUG_ON(irqs_disabled()); 1312 #endif 1313 } 1314 1315 /* 1316 * Install a buffer_head into this cpu's LRU. If not already in the LRU, it is 1317 * inserted at the front, and the buffer_head at the back if any is evicted. 1318 * Or, if already in the LRU it is moved to the front. 1319 */ 1320 static void bh_lru_install(struct buffer_head *bh) 1321 { 1322 struct buffer_head *evictee = bh; 1323 struct bh_lru *b; 1324 int i; 1325 1326 check_irqs_on(); 1327 bh_lru_lock(); 1328 1329 /* 1330 * the refcount of buffer_head in bh_lru prevents dropping the 1331 * attached page(i.e., try_to_free_buffers) so it could cause 1332 * failing page migration. 1333 * Skip putting upcoming bh into bh_lru until migration is done. 1334 */ 1335 if (lru_cache_disabled() || cpu_is_isolated(smp_processor_id())) { 1336 bh_lru_unlock(); 1337 return; 1338 } 1339 1340 b = this_cpu_ptr(&bh_lrus); 1341 for (i = 0; i < BH_LRU_SIZE; i++) { 1342 swap(evictee, b->bhs[i]); 1343 if (evictee == bh) { 1344 bh_lru_unlock(); 1345 return; 1346 } 1347 } 1348 1349 get_bh(bh); 1350 bh_lru_unlock(); 1351 brelse(evictee); 1352 } 1353 1354 /* 1355 * Look up the bh in this cpu's LRU. If it's there, move it to the head. 1356 */ 1357 static struct buffer_head * 1358 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size) 1359 { 1360 struct buffer_head *ret = NULL; 1361 unsigned int i; 1362 1363 check_irqs_on(); 1364 bh_lru_lock(); 1365 if (cpu_is_isolated(smp_processor_id())) { 1366 bh_lru_unlock(); 1367 return NULL; 1368 } 1369 for (i = 0; i < BH_LRU_SIZE; i++) { 1370 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]); 1371 1372 if (bh && bh->b_blocknr == block && bh->b_bdev == bdev && 1373 bh->b_size == size) { 1374 if (i) { 1375 while (i) { 1376 __this_cpu_write(bh_lrus.bhs[i], 1377 __this_cpu_read(bh_lrus.bhs[i - 1])); 1378 i--; 1379 } 1380 __this_cpu_write(bh_lrus.bhs[0], bh); 1381 } 1382 get_bh(bh); 1383 ret = bh; 1384 break; 1385 } 1386 } 1387 bh_lru_unlock(); 1388 return ret; 1389 } 1390 1391 /* 1392 * Perform a pagecache lookup for the matching buffer. If it's there, refresh 1393 * it in the LRU and mark it as accessed. If it is not present then return 1394 * NULL 1395 */ 1396 struct buffer_head * 1397 __find_get_block(struct block_device *bdev, sector_t block, unsigned size) 1398 { 1399 struct buffer_head *bh = lookup_bh_lru(bdev, block, size); 1400 1401 if (bh == NULL) { 1402 /* __find_get_block_slow will mark the page accessed */ 1403 bh = __find_get_block_slow(bdev, block); 1404 if (bh) 1405 bh_lru_install(bh); 1406 } else 1407 touch_buffer(bh); 1408 1409 return bh; 1410 } 1411 EXPORT_SYMBOL(__find_get_block); 1412 1413 /** 1414 * bdev_getblk - Get a buffer_head in a block device's buffer cache. 1415 * @bdev: The block device. 1416 * @block: The block number. 1417 * @size: The size of buffer_heads for this @bdev. 1418 * @gfp: The memory allocation flags to use. 1419 * 1420 * The returned buffer head has its reference count incremented, but is 1421 * not locked. The caller should call brelse() when it has finished 1422 * with the buffer. The buffer may not be uptodate. If needed, the 1423 * caller can bring it uptodate either by reading it or overwriting it. 1424 * 1425 * Return: The buffer head, or NULL if memory could not be allocated. 1426 */ 1427 struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block, 1428 unsigned size, gfp_t gfp) 1429 { 1430 struct buffer_head *bh = __find_get_block(bdev, block, size); 1431 1432 might_alloc(gfp); 1433 if (bh) 1434 return bh; 1435 1436 return __getblk_slow(bdev, block, size, gfp); 1437 } 1438 EXPORT_SYMBOL(bdev_getblk); 1439 1440 /* 1441 * Do async read-ahead on a buffer.. 1442 */ 1443 void __breadahead(struct block_device *bdev, sector_t block, unsigned size) 1444 { 1445 struct buffer_head *bh = bdev_getblk(bdev, block, size, 1446 GFP_NOWAIT | __GFP_MOVABLE); 1447 1448 if (likely(bh)) { 1449 bh_readahead(bh, REQ_RAHEAD); 1450 brelse(bh); 1451 } 1452 } 1453 EXPORT_SYMBOL(__breadahead); 1454 1455 /** 1456 * __bread_gfp() - Read a block. 1457 * @bdev: The block device to read from. 1458 * @block: Block number in units of block size. 1459 * @size: The block size of this device in bytes. 1460 * @gfp: Not page allocation flags; see below. 1461 * 1462 * You are not expected to call this function. You should use one of 1463 * sb_bread(), sb_bread_unmovable() or __bread(). 1464 * 1465 * Read a specified block, and return the buffer head that refers to it. 1466 * If @gfp is 0, the memory will be allocated using the block device's 1467 * default GFP flags. If @gfp is __GFP_MOVABLE, the memory may be 1468 * allocated from a movable area. Do not pass in a complete set of 1469 * GFP flags. 1470 * 1471 * The returned buffer head has its refcount increased. The caller should 1472 * call brelse() when it has finished with the buffer. 1473 * 1474 * Context: May sleep waiting for I/O. 1475 * Return: NULL if the block was unreadable. 1476 */ 1477 struct buffer_head *__bread_gfp(struct block_device *bdev, sector_t block, 1478 unsigned size, gfp_t gfp) 1479 { 1480 struct buffer_head *bh; 1481 1482 gfp |= mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS); 1483 1484 /* 1485 * Prefer looping in the allocator rather than here, at least that 1486 * code knows what it's doing. 1487 */ 1488 gfp |= __GFP_NOFAIL; 1489 1490 bh = bdev_getblk(bdev, block, size, gfp); 1491 1492 if (likely(bh) && !buffer_uptodate(bh)) 1493 bh = __bread_slow(bh); 1494 return bh; 1495 } 1496 EXPORT_SYMBOL(__bread_gfp); 1497 1498 static void __invalidate_bh_lrus(struct bh_lru *b) 1499 { 1500 int i; 1501 1502 for (i = 0; i < BH_LRU_SIZE; i++) { 1503 brelse(b->bhs[i]); 1504 b->bhs[i] = NULL; 1505 } 1506 } 1507 /* 1508 * invalidate_bh_lrus() is called rarely - but not only at unmount. 1509 * This doesn't race because it runs in each cpu either in irq 1510 * or with preempt disabled. 1511 */ 1512 static void invalidate_bh_lru(void *arg) 1513 { 1514 struct bh_lru *b = &get_cpu_var(bh_lrus); 1515 1516 __invalidate_bh_lrus(b); 1517 put_cpu_var(bh_lrus); 1518 } 1519 1520 bool has_bh_in_lru(int cpu, void *dummy) 1521 { 1522 struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu); 1523 int i; 1524 1525 for (i = 0; i < BH_LRU_SIZE; i++) { 1526 if (b->bhs[i]) 1527 return true; 1528 } 1529 1530 return false; 1531 } 1532 1533 void invalidate_bh_lrus(void) 1534 { 1535 on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1); 1536 } 1537 EXPORT_SYMBOL_GPL(invalidate_bh_lrus); 1538 1539 /* 1540 * It's called from workqueue context so we need a bh_lru_lock to close 1541 * the race with preemption/irq. 1542 */ 1543 void invalidate_bh_lrus_cpu(void) 1544 { 1545 struct bh_lru *b; 1546 1547 bh_lru_lock(); 1548 b = this_cpu_ptr(&bh_lrus); 1549 __invalidate_bh_lrus(b); 1550 bh_lru_unlock(); 1551 } 1552 1553 void folio_set_bh(struct buffer_head *bh, struct folio *folio, 1554 unsigned long offset) 1555 { 1556 bh->b_folio = folio; 1557 BUG_ON(offset >= folio_size(folio)); 1558 if (folio_test_highmem(folio)) 1559 /* 1560 * This catches illegal uses and preserves the offset: 1561 */ 1562 bh->b_data = (char *)(0 + offset); 1563 else 1564 bh->b_data = folio_address(folio) + offset; 1565 } 1566 EXPORT_SYMBOL(folio_set_bh); 1567 1568 /* 1569 * Called when truncating a buffer on a page completely. 1570 */ 1571 1572 /* Bits that are cleared during an invalidate */ 1573 #define BUFFER_FLAGS_DISCARD \ 1574 (1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \ 1575 1 << BH_Delay | 1 << BH_Unwritten) 1576 1577 static void discard_buffer(struct buffer_head * bh) 1578 { 1579 unsigned long b_state; 1580 1581 lock_buffer(bh); 1582 clear_buffer_dirty(bh); 1583 bh->b_bdev = NULL; 1584 b_state = READ_ONCE(bh->b_state); 1585 do { 1586 } while (!try_cmpxchg(&bh->b_state, &b_state, 1587 b_state & ~BUFFER_FLAGS_DISCARD)); 1588 unlock_buffer(bh); 1589 } 1590 1591 /** 1592 * block_invalidate_folio - Invalidate part or all of a buffer-backed folio. 1593 * @folio: The folio which is affected. 1594 * @offset: start of the range to invalidate 1595 * @length: length of the range to invalidate 1596 * 1597 * block_invalidate_folio() is called when all or part of the folio has been 1598 * invalidated by a truncate operation. 1599 * 1600 * block_invalidate_folio() does not have to release all buffers, but it must 1601 * ensure that no dirty buffer is left outside @offset and that no I/O 1602 * is underway against any of the blocks which are outside the truncation 1603 * point. Because the caller is about to free (and possibly reuse) those 1604 * blocks on-disk. 1605 */ 1606 void block_invalidate_folio(struct folio *folio, size_t offset, size_t length) 1607 { 1608 struct buffer_head *head, *bh, *next; 1609 size_t curr_off = 0; 1610 size_t stop = length + offset; 1611 1612 BUG_ON(!folio_test_locked(folio)); 1613 1614 /* 1615 * Check for overflow 1616 */ 1617 BUG_ON(stop > folio_size(folio) || stop < length); 1618 1619 head = folio_buffers(folio); 1620 if (!head) 1621 return; 1622 1623 bh = head; 1624 do { 1625 size_t next_off = curr_off + bh->b_size; 1626 next = bh->b_this_page; 1627 1628 /* 1629 * Are we still fully in range ? 1630 */ 1631 if (next_off > stop) 1632 goto out; 1633 1634 /* 1635 * is this block fully invalidated? 1636 */ 1637 if (offset <= curr_off) 1638 discard_buffer(bh); 1639 curr_off = next_off; 1640 bh = next; 1641 } while (bh != head); 1642 1643 /* 1644 * We release buffers only if the entire folio is being invalidated. 1645 * The get_block cached value has been unconditionally invalidated, 1646 * so real IO is not possible anymore. 1647 */ 1648 if (length == folio_size(folio)) 1649 filemap_release_folio(folio, 0); 1650 out: 1651 return; 1652 } 1653 EXPORT_SYMBOL(block_invalidate_folio); 1654 1655 /* 1656 * We attach and possibly dirty the buffers atomically wrt 1657 * block_dirty_folio() via i_private_lock. try_to_free_buffers 1658 * is already excluded via the folio lock. 1659 */ 1660 struct buffer_head *create_empty_buffers(struct folio *folio, 1661 unsigned long blocksize, unsigned long b_state) 1662 { 1663 struct buffer_head *bh, *head, *tail; 1664 gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT | __GFP_NOFAIL; 1665 1666 head = folio_alloc_buffers(folio, blocksize, gfp); 1667 bh = head; 1668 do { 1669 bh->b_state |= b_state; 1670 tail = bh; 1671 bh = bh->b_this_page; 1672 } while (bh); 1673 tail->b_this_page = head; 1674 1675 spin_lock(&folio->mapping->i_private_lock); 1676 if (folio_test_uptodate(folio) || folio_test_dirty(folio)) { 1677 bh = head; 1678 do { 1679 if (folio_test_dirty(folio)) 1680 set_buffer_dirty(bh); 1681 if (folio_test_uptodate(folio)) 1682 set_buffer_uptodate(bh); 1683 bh = bh->b_this_page; 1684 } while (bh != head); 1685 } 1686 folio_attach_private(folio, head); 1687 spin_unlock(&folio->mapping->i_private_lock); 1688 1689 return head; 1690 } 1691 EXPORT_SYMBOL(create_empty_buffers); 1692 1693 /** 1694 * clean_bdev_aliases: clean a range of buffers in block device 1695 * @bdev: Block device to clean buffers in 1696 * @block: Start of a range of blocks to clean 1697 * @len: Number of blocks to clean 1698 * 1699 * We are taking a range of blocks for data and we don't want writeback of any 1700 * buffer-cache aliases starting from return from this function and until the 1701 * moment when something will explicitly mark the buffer dirty (hopefully that 1702 * will not happen until we will free that block ;-) We don't even need to mark 1703 * it not-uptodate - nobody can expect anything from a newly allocated buffer 1704 * anyway. We used to use unmap_buffer() for such invalidation, but that was 1705 * wrong. We definitely don't want to mark the alias unmapped, for example - it 1706 * would confuse anyone who might pick it with bread() afterwards... 1707 * 1708 * Also.. Note that bforget() doesn't lock the buffer. So there can be 1709 * writeout I/O going on against recently-freed buffers. We don't wait on that 1710 * I/O in bforget() - it's more efficient to wait on the I/O only if we really 1711 * need to. That happens here. 1712 */ 1713 void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len) 1714 { 1715 struct address_space *bd_mapping = bdev->bd_mapping; 1716 const int blkbits = bd_mapping->host->i_blkbits; 1717 struct folio_batch fbatch; 1718 pgoff_t index = ((loff_t)block << blkbits) / PAGE_SIZE; 1719 pgoff_t end; 1720 int i, count; 1721 struct buffer_head *bh; 1722 struct buffer_head *head; 1723 1724 end = ((loff_t)(block + len - 1) << blkbits) / PAGE_SIZE; 1725 folio_batch_init(&fbatch); 1726 while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) { 1727 count = folio_batch_count(&fbatch); 1728 for (i = 0; i < count; i++) { 1729 struct folio *folio = fbatch.folios[i]; 1730 1731 if (!folio_buffers(folio)) 1732 continue; 1733 /* 1734 * We use folio lock instead of bd_mapping->i_private_lock 1735 * to pin buffers here since we can afford to sleep and 1736 * it scales better than a global spinlock lock. 1737 */ 1738 folio_lock(folio); 1739 /* Recheck when the folio is locked which pins bhs */ 1740 head = folio_buffers(folio); 1741 if (!head) 1742 goto unlock_page; 1743 bh = head; 1744 do { 1745 if (!buffer_mapped(bh) || (bh->b_blocknr < block)) 1746 goto next; 1747 if (bh->b_blocknr >= block + len) 1748 break; 1749 clear_buffer_dirty(bh); 1750 wait_on_buffer(bh); 1751 clear_buffer_req(bh); 1752 next: 1753 bh = bh->b_this_page; 1754 } while (bh != head); 1755 unlock_page: 1756 folio_unlock(folio); 1757 } 1758 folio_batch_release(&fbatch); 1759 cond_resched(); 1760 /* End of range already reached? */ 1761 if (index > end || !index) 1762 break; 1763 } 1764 } 1765 EXPORT_SYMBOL(clean_bdev_aliases); 1766 1767 static struct buffer_head *folio_create_buffers(struct folio *folio, 1768 struct inode *inode, 1769 unsigned int b_state) 1770 { 1771 struct buffer_head *bh; 1772 1773 BUG_ON(!folio_test_locked(folio)); 1774 1775 bh = folio_buffers(folio); 1776 if (!bh) 1777 bh = create_empty_buffers(folio, 1778 1 << READ_ONCE(inode->i_blkbits), b_state); 1779 return bh; 1780 } 1781 1782 /* 1783 * NOTE! All mapped/uptodate combinations are valid: 1784 * 1785 * Mapped Uptodate Meaning 1786 * 1787 * No No "unknown" - must do get_block() 1788 * No Yes "hole" - zero-filled 1789 * Yes No "allocated" - allocated on disk, not read in 1790 * Yes Yes "valid" - allocated and up-to-date in memory. 1791 * 1792 * "Dirty" is valid only with the last case (mapped+uptodate). 1793 */ 1794 1795 /* 1796 * While block_write_full_folio is writing back the dirty buffers under 1797 * the page lock, whoever dirtied the buffers may decide to clean them 1798 * again at any time. We handle that by only looking at the buffer 1799 * state inside lock_buffer(). 1800 * 1801 * If block_write_full_folio() is called for regular writeback 1802 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a 1803 * locked buffer. This only can happen if someone has written the buffer 1804 * directly, with submit_bh(). At the address_space level PageWriteback 1805 * prevents this contention from occurring. 1806 * 1807 * If block_write_full_folio() is called with wbc->sync_mode == 1808 * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this 1809 * causes the writes to be flagged as synchronous writes. 1810 */ 1811 int __block_write_full_folio(struct inode *inode, struct folio *folio, 1812 get_block_t *get_block, struct writeback_control *wbc) 1813 { 1814 int err; 1815 sector_t block; 1816 sector_t last_block; 1817 struct buffer_head *bh, *head; 1818 size_t blocksize; 1819 int nr_underway = 0; 1820 blk_opf_t write_flags = wbc_to_write_flags(wbc); 1821 1822 head = folio_create_buffers(folio, inode, 1823 (1 << BH_Dirty) | (1 << BH_Uptodate)); 1824 1825 /* 1826 * Be very careful. We have no exclusion from block_dirty_folio 1827 * here, and the (potentially unmapped) buffers may become dirty at 1828 * any time. If a buffer becomes dirty here after we've inspected it 1829 * then we just miss that fact, and the folio stays dirty. 1830 * 1831 * Buffers outside i_size may be dirtied by block_dirty_folio; 1832 * handle that here by just cleaning them. 1833 */ 1834 1835 bh = head; 1836 blocksize = bh->b_size; 1837 1838 block = div_u64(folio_pos(folio), blocksize); 1839 last_block = div_u64(i_size_read(inode) - 1, blocksize); 1840 1841 /* 1842 * Get all the dirty buffers mapped to disk addresses and 1843 * handle any aliases from the underlying blockdev's mapping. 1844 */ 1845 do { 1846 if (block > last_block) { 1847 /* 1848 * mapped buffers outside i_size will occur, because 1849 * this folio can be outside i_size when there is a 1850 * truncate in progress. 1851 */ 1852 /* 1853 * The buffer was zeroed by block_write_full_folio() 1854 */ 1855 clear_buffer_dirty(bh); 1856 set_buffer_uptodate(bh); 1857 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) && 1858 buffer_dirty(bh)) { 1859 WARN_ON(bh->b_size != blocksize); 1860 err = get_block(inode, block, bh, 1); 1861 if (err) 1862 goto recover; 1863 clear_buffer_delay(bh); 1864 if (buffer_new(bh)) { 1865 /* blockdev mappings never come here */ 1866 clear_buffer_new(bh); 1867 clean_bdev_bh_alias(bh); 1868 } 1869 } 1870 bh = bh->b_this_page; 1871 block++; 1872 } while (bh != head); 1873 1874 do { 1875 if (!buffer_mapped(bh)) 1876 continue; 1877 /* 1878 * If it's a fully non-blocking write attempt and we cannot 1879 * lock the buffer then redirty the folio. Note that this can 1880 * potentially cause a busy-wait loop from writeback threads 1881 * and kswapd activity, but those code paths have their own 1882 * higher-level throttling. 1883 */ 1884 if (wbc->sync_mode != WB_SYNC_NONE) { 1885 lock_buffer(bh); 1886 } else if (!trylock_buffer(bh)) { 1887 folio_redirty_for_writepage(wbc, folio); 1888 continue; 1889 } 1890 if (test_clear_buffer_dirty(bh)) { 1891 mark_buffer_async_write_endio(bh, 1892 end_buffer_async_write); 1893 } else { 1894 unlock_buffer(bh); 1895 } 1896 } while ((bh = bh->b_this_page) != head); 1897 1898 /* 1899 * The folio and its buffers are protected by the writeback flag, 1900 * so we can drop the bh refcounts early. 1901 */ 1902 BUG_ON(folio_test_writeback(folio)); 1903 folio_start_writeback(folio); 1904 1905 do { 1906 struct buffer_head *next = bh->b_this_page; 1907 if (buffer_async_write(bh)) { 1908 submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, 1909 inode->i_write_hint, wbc); 1910 nr_underway++; 1911 } 1912 bh = next; 1913 } while (bh != head); 1914 folio_unlock(folio); 1915 1916 err = 0; 1917 done: 1918 if (nr_underway == 0) { 1919 /* 1920 * The folio was marked dirty, but the buffers were 1921 * clean. Someone wrote them back by hand with 1922 * write_dirty_buffer/submit_bh. A rare case. 1923 */ 1924 folio_end_writeback(folio); 1925 1926 /* 1927 * The folio and buffer_heads can be released at any time from 1928 * here on. 1929 */ 1930 } 1931 return err; 1932 1933 recover: 1934 /* 1935 * ENOSPC, or some other error. We may already have added some 1936 * blocks to the file, so we need to write these out to avoid 1937 * exposing stale data. 1938 * The folio is currently locked and not marked for writeback 1939 */ 1940 bh = head; 1941 /* Recovery: lock and submit the mapped buffers */ 1942 do { 1943 if (buffer_mapped(bh) && buffer_dirty(bh) && 1944 !buffer_delay(bh)) { 1945 lock_buffer(bh); 1946 mark_buffer_async_write_endio(bh, 1947 end_buffer_async_write); 1948 } else { 1949 /* 1950 * The buffer may have been set dirty during 1951 * attachment to a dirty folio. 1952 */ 1953 clear_buffer_dirty(bh); 1954 } 1955 } while ((bh = bh->b_this_page) != head); 1956 BUG_ON(folio_test_writeback(folio)); 1957 mapping_set_error(folio->mapping, err); 1958 folio_start_writeback(folio); 1959 do { 1960 struct buffer_head *next = bh->b_this_page; 1961 if (buffer_async_write(bh)) { 1962 clear_buffer_dirty(bh); 1963 submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, 1964 inode->i_write_hint, wbc); 1965 nr_underway++; 1966 } 1967 bh = next; 1968 } while (bh != head); 1969 folio_unlock(folio); 1970 goto done; 1971 } 1972 EXPORT_SYMBOL(__block_write_full_folio); 1973 1974 /* 1975 * If a folio has any new buffers, zero them out here, and mark them uptodate 1976 * and dirty so they'll be written out (in order to prevent uninitialised 1977 * block data from leaking). And clear the new bit. 1978 */ 1979 void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to) 1980 { 1981 size_t block_start, block_end; 1982 struct buffer_head *head, *bh; 1983 1984 BUG_ON(!folio_test_locked(folio)); 1985 head = folio_buffers(folio); 1986 if (!head) 1987 return; 1988 1989 bh = head; 1990 block_start = 0; 1991 do { 1992 block_end = block_start + bh->b_size; 1993 1994 if (buffer_new(bh)) { 1995 if (block_end > from && block_start < to) { 1996 if (!folio_test_uptodate(folio)) { 1997 size_t start, xend; 1998 1999 start = max(from, block_start); 2000 xend = min(to, block_end); 2001 2002 folio_zero_segment(folio, start, xend); 2003 set_buffer_uptodate(bh); 2004 } 2005 2006 clear_buffer_new(bh); 2007 mark_buffer_dirty(bh); 2008 } 2009 } 2010 2011 block_start = block_end; 2012 bh = bh->b_this_page; 2013 } while (bh != head); 2014 } 2015 EXPORT_SYMBOL(folio_zero_new_buffers); 2016 2017 static int 2018 iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh, 2019 const struct iomap *iomap) 2020 { 2021 loff_t offset = (loff_t)block << inode->i_blkbits; 2022 2023 bh->b_bdev = iomap->bdev; 2024 2025 /* 2026 * Block points to offset in file we need to map, iomap contains 2027 * the offset at which the map starts. If the map ends before the 2028 * current block, then do not map the buffer and let the caller 2029 * handle it. 2030 */ 2031 if (offset >= iomap->offset + iomap->length) 2032 return -EIO; 2033 2034 switch (iomap->type) { 2035 case IOMAP_HOLE: 2036 /* 2037 * If the buffer is not up to date or beyond the current EOF, 2038 * we need to mark it as new to ensure sub-block zeroing is 2039 * executed if necessary. 2040 */ 2041 if (!buffer_uptodate(bh) || 2042 (offset >= i_size_read(inode))) 2043 set_buffer_new(bh); 2044 return 0; 2045 case IOMAP_DELALLOC: 2046 if (!buffer_uptodate(bh) || 2047 (offset >= i_size_read(inode))) 2048 set_buffer_new(bh); 2049 set_buffer_uptodate(bh); 2050 set_buffer_mapped(bh); 2051 set_buffer_delay(bh); 2052 return 0; 2053 case IOMAP_UNWRITTEN: 2054 /* 2055 * For unwritten regions, we always need to ensure that regions 2056 * in the block we are not writing to are zeroed. Mark the 2057 * buffer as new to ensure this. 2058 */ 2059 set_buffer_new(bh); 2060 set_buffer_unwritten(bh); 2061 fallthrough; 2062 case IOMAP_MAPPED: 2063 if ((iomap->flags & IOMAP_F_NEW) || 2064 offset >= i_size_read(inode)) { 2065 /* 2066 * This can happen if truncating the block device races 2067 * with the check in the caller as i_size updates on 2068 * block devices aren't synchronized by i_rwsem for 2069 * block devices. 2070 */ 2071 if (S_ISBLK(inode->i_mode)) 2072 return -EIO; 2073 set_buffer_new(bh); 2074 } 2075 bh->b_blocknr = (iomap->addr + offset - iomap->offset) >> 2076 inode->i_blkbits; 2077 set_buffer_mapped(bh); 2078 return 0; 2079 default: 2080 WARN_ON_ONCE(1); 2081 return -EIO; 2082 } 2083 } 2084 2085 int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len, 2086 get_block_t *get_block, const struct iomap *iomap) 2087 { 2088 size_t from = offset_in_folio(folio, pos); 2089 size_t to = from + len; 2090 struct inode *inode = folio->mapping->host; 2091 size_t block_start, block_end; 2092 sector_t block; 2093 int err = 0; 2094 size_t blocksize; 2095 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait; 2096 2097 BUG_ON(!folio_test_locked(folio)); 2098 BUG_ON(to > folio_size(folio)); 2099 BUG_ON(from > to); 2100 2101 head = folio_create_buffers(folio, inode, 0); 2102 blocksize = head->b_size; 2103 block = div_u64(folio_pos(folio), blocksize); 2104 2105 for (bh = head, block_start = 0; bh != head || !block_start; 2106 block++, block_start=block_end, bh = bh->b_this_page) { 2107 block_end = block_start + blocksize; 2108 if (block_end <= from || block_start >= to) { 2109 if (folio_test_uptodate(folio)) { 2110 if (!buffer_uptodate(bh)) 2111 set_buffer_uptodate(bh); 2112 } 2113 continue; 2114 } 2115 if (buffer_new(bh)) 2116 clear_buffer_new(bh); 2117 if (!buffer_mapped(bh)) { 2118 WARN_ON(bh->b_size != blocksize); 2119 if (get_block) 2120 err = get_block(inode, block, bh, 1); 2121 else 2122 err = iomap_to_bh(inode, block, bh, iomap); 2123 if (err) 2124 break; 2125 2126 if (buffer_new(bh)) { 2127 clean_bdev_bh_alias(bh); 2128 if (folio_test_uptodate(folio)) { 2129 clear_buffer_new(bh); 2130 set_buffer_uptodate(bh); 2131 mark_buffer_dirty(bh); 2132 continue; 2133 } 2134 if (block_end > to || block_start < from) 2135 folio_zero_segments(folio, 2136 to, block_end, 2137 block_start, from); 2138 continue; 2139 } 2140 } 2141 if (folio_test_uptodate(folio)) { 2142 if (!buffer_uptodate(bh)) 2143 set_buffer_uptodate(bh); 2144 continue; 2145 } 2146 if (!buffer_uptodate(bh) && !buffer_delay(bh) && 2147 !buffer_unwritten(bh) && 2148 (block_start < from || block_end > to)) { 2149 bh_read_nowait(bh, 0); 2150 *wait_bh++=bh; 2151 } 2152 } 2153 /* 2154 * If we issued read requests - let them complete. 2155 */ 2156 while(wait_bh > wait) { 2157 wait_on_buffer(*--wait_bh); 2158 if (!buffer_uptodate(*wait_bh)) 2159 err = -EIO; 2160 } 2161 if (unlikely(err)) 2162 folio_zero_new_buffers(folio, from, to); 2163 return err; 2164 } 2165 2166 int __block_write_begin(struct folio *folio, loff_t pos, unsigned len, 2167 get_block_t *get_block) 2168 { 2169 return __block_write_begin_int(folio, pos, len, get_block, NULL); 2170 } 2171 EXPORT_SYMBOL(__block_write_begin); 2172 2173 static void __block_commit_write(struct folio *folio, size_t from, size_t to) 2174 { 2175 size_t block_start, block_end; 2176 bool partial = false; 2177 unsigned blocksize; 2178 struct buffer_head *bh, *head; 2179 2180 bh = head = folio_buffers(folio); 2181 if (!bh) 2182 return; 2183 blocksize = bh->b_size; 2184 2185 block_start = 0; 2186 do { 2187 block_end = block_start + blocksize; 2188 if (block_end <= from || block_start >= to) { 2189 if (!buffer_uptodate(bh)) 2190 partial = true; 2191 } else { 2192 set_buffer_uptodate(bh); 2193 mark_buffer_dirty(bh); 2194 } 2195 if (buffer_new(bh)) 2196 clear_buffer_new(bh); 2197 2198 block_start = block_end; 2199 bh = bh->b_this_page; 2200 } while (bh != head); 2201 2202 /* 2203 * If this is a partial write which happened to make all buffers 2204 * uptodate then we can optimize away a bogus read_folio() for 2205 * the next read(). Here we 'discover' whether the folio went 2206 * uptodate as a result of this (potentially partial) write. 2207 */ 2208 if (!partial) 2209 folio_mark_uptodate(folio); 2210 } 2211 2212 /* 2213 * block_write_begin takes care of the basic task of block allocation and 2214 * bringing partial write blocks uptodate first. 2215 * 2216 * The filesystem needs to handle block truncation upon failure. 2217 */ 2218 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, 2219 struct folio **foliop, get_block_t *get_block) 2220 { 2221 pgoff_t index = pos >> PAGE_SHIFT; 2222 struct folio *folio; 2223 int status; 2224 2225 folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN, 2226 mapping_gfp_mask(mapping)); 2227 if (IS_ERR(folio)) 2228 return PTR_ERR(folio); 2229 2230 status = __block_write_begin_int(folio, pos, len, get_block, NULL); 2231 if (unlikely(status)) { 2232 folio_unlock(folio); 2233 folio_put(folio); 2234 folio = NULL; 2235 } 2236 2237 *foliop = folio; 2238 return status; 2239 } 2240 EXPORT_SYMBOL(block_write_begin); 2241 2242 int block_write_end(struct file *file, struct address_space *mapping, 2243 loff_t pos, unsigned len, unsigned copied, 2244 struct folio *folio, void *fsdata) 2245 { 2246 size_t start = pos - folio_pos(folio); 2247 2248 if (unlikely(copied < len)) { 2249 /* 2250 * The buffers that were written will now be uptodate, so 2251 * we don't have to worry about a read_folio reading them 2252 * and overwriting a partial write. However if we have 2253 * encountered a short write and only partially written 2254 * into a buffer, it will not be marked uptodate, so a 2255 * read_folio might come in and destroy our partial write. 2256 * 2257 * Do the simplest thing, and just treat any short write to a 2258 * non uptodate folio as a zero-length write, and force the 2259 * caller to redo the whole thing. 2260 */ 2261 if (!folio_test_uptodate(folio)) 2262 copied = 0; 2263 2264 folio_zero_new_buffers(folio, start+copied, start+len); 2265 } 2266 flush_dcache_folio(folio); 2267 2268 /* This could be a short (even 0-length) commit */ 2269 __block_commit_write(folio, start, start + copied); 2270 2271 return copied; 2272 } 2273 EXPORT_SYMBOL(block_write_end); 2274 2275 int generic_write_end(struct file *file, struct address_space *mapping, 2276 loff_t pos, unsigned len, unsigned copied, 2277 struct folio *folio, void *fsdata) 2278 { 2279 struct inode *inode = mapping->host; 2280 loff_t old_size = inode->i_size; 2281 bool i_size_changed = false; 2282 2283 copied = block_write_end(file, mapping, pos, len, copied, folio, fsdata); 2284 2285 /* 2286 * No need to use i_size_read() here, the i_size cannot change under us 2287 * because we hold i_rwsem. 2288 * 2289 * But it's important to update i_size while still holding folio lock: 2290 * page writeout could otherwise come in and zero beyond i_size. 2291 */ 2292 if (pos + copied > inode->i_size) { 2293 i_size_write(inode, pos + copied); 2294 i_size_changed = true; 2295 } 2296 2297 folio_unlock(folio); 2298 folio_put(folio); 2299 2300 if (old_size < pos) 2301 pagecache_isize_extended(inode, old_size, pos); 2302 /* 2303 * Don't mark the inode dirty under page lock. First, it unnecessarily 2304 * makes the holding time of page lock longer. Second, it forces lock 2305 * ordering of page lock and transaction start for journaling 2306 * filesystems. 2307 */ 2308 if (i_size_changed) 2309 mark_inode_dirty(inode); 2310 return copied; 2311 } 2312 EXPORT_SYMBOL(generic_write_end); 2313 2314 /* 2315 * block_is_partially_uptodate checks whether buffers within a folio are 2316 * uptodate or not. 2317 * 2318 * Returns true if all buffers which correspond to the specified part 2319 * of the folio are uptodate. 2320 */ 2321 bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count) 2322 { 2323 unsigned block_start, block_end, blocksize; 2324 unsigned to; 2325 struct buffer_head *bh, *head; 2326 bool ret = true; 2327 2328 head = folio_buffers(folio); 2329 if (!head) 2330 return false; 2331 blocksize = head->b_size; 2332 to = min_t(unsigned, folio_size(folio) - from, count); 2333 to = from + to; 2334 if (from < blocksize && to > folio_size(folio) - blocksize) 2335 return false; 2336 2337 bh = head; 2338 block_start = 0; 2339 do { 2340 block_end = block_start + blocksize; 2341 if (block_end > from && block_start < to) { 2342 if (!buffer_uptodate(bh)) { 2343 ret = false; 2344 break; 2345 } 2346 if (block_end >= to) 2347 break; 2348 } 2349 block_start = block_end; 2350 bh = bh->b_this_page; 2351 } while (bh != head); 2352 2353 return ret; 2354 } 2355 EXPORT_SYMBOL(block_is_partially_uptodate); 2356 2357 /* 2358 * Generic "read_folio" function for block devices that have the normal 2359 * get_block functionality. This is most of the block device filesystems. 2360 * Reads the folio asynchronously --- the unlock_buffer() and 2361 * set/clear_buffer_uptodate() functions propagate buffer state into the 2362 * folio once IO has completed. 2363 */ 2364 int block_read_full_folio(struct folio *folio, get_block_t *get_block) 2365 { 2366 struct inode *inode = folio->mapping->host; 2367 sector_t iblock, lblock; 2368 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE]; 2369 size_t blocksize; 2370 int nr, i; 2371 int fully_mapped = 1; 2372 bool page_error = false; 2373 loff_t limit = i_size_read(inode); 2374 2375 /* This is needed for ext4. */ 2376 if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode)) 2377 limit = inode->i_sb->s_maxbytes; 2378 2379 VM_BUG_ON_FOLIO(folio_test_large(folio), folio); 2380 2381 head = folio_create_buffers(folio, inode, 0); 2382 blocksize = head->b_size; 2383 2384 iblock = div_u64(folio_pos(folio), blocksize); 2385 lblock = div_u64(limit + blocksize - 1, blocksize); 2386 bh = head; 2387 nr = 0; 2388 i = 0; 2389 2390 do { 2391 if (buffer_uptodate(bh)) 2392 continue; 2393 2394 if (!buffer_mapped(bh)) { 2395 int err = 0; 2396 2397 fully_mapped = 0; 2398 if (iblock < lblock) { 2399 WARN_ON(bh->b_size != blocksize); 2400 err = get_block(inode, iblock, bh, 0); 2401 if (err) 2402 page_error = true; 2403 } 2404 if (!buffer_mapped(bh)) { 2405 folio_zero_range(folio, i * blocksize, 2406 blocksize); 2407 if (!err) 2408 set_buffer_uptodate(bh); 2409 continue; 2410 } 2411 /* 2412 * get_block() might have updated the buffer 2413 * synchronously 2414 */ 2415 if (buffer_uptodate(bh)) 2416 continue; 2417 } 2418 arr[nr++] = bh; 2419 } while (i++, iblock++, (bh = bh->b_this_page) != head); 2420 2421 if (fully_mapped) 2422 folio_set_mappedtodisk(folio); 2423 2424 if (!nr) { 2425 /* 2426 * All buffers are uptodate or get_block() returned an 2427 * error when trying to map them - we can finish the read. 2428 */ 2429 folio_end_read(folio, !page_error); 2430 return 0; 2431 } 2432 2433 /* Stage two: lock the buffers */ 2434 for (i = 0; i < nr; i++) { 2435 bh = arr[i]; 2436 lock_buffer(bh); 2437 mark_buffer_async_read(bh); 2438 } 2439 2440 /* 2441 * Stage 3: start the IO. Check for uptodateness 2442 * inside the buffer lock in case another process reading 2443 * the underlying blockdev brought it uptodate (the sct fix). 2444 */ 2445 for (i = 0; i < nr; i++) { 2446 bh = arr[i]; 2447 if (buffer_uptodate(bh)) 2448 end_buffer_async_read(bh, 1); 2449 else 2450 submit_bh(REQ_OP_READ, bh); 2451 } 2452 return 0; 2453 } 2454 EXPORT_SYMBOL(block_read_full_folio); 2455 2456 /* utility function for filesystems that need to do work on expanding 2457 * truncates. Uses filesystem pagecache writes to allow the filesystem to 2458 * deal with the hole. 2459 */ 2460 int generic_cont_expand_simple(struct inode *inode, loff_t size) 2461 { 2462 struct address_space *mapping = inode->i_mapping; 2463 const struct address_space_operations *aops = mapping->a_ops; 2464 struct folio *folio; 2465 void *fsdata = NULL; 2466 int err; 2467 2468 err = inode_newsize_ok(inode, size); 2469 if (err) 2470 goto out; 2471 2472 err = aops->write_begin(NULL, mapping, size, 0, &folio, &fsdata); 2473 if (err) 2474 goto out; 2475 2476 err = aops->write_end(NULL, mapping, size, 0, 0, folio, fsdata); 2477 BUG_ON(err > 0); 2478 2479 out: 2480 return err; 2481 } 2482 EXPORT_SYMBOL(generic_cont_expand_simple); 2483 2484 static int cont_expand_zero(struct file *file, struct address_space *mapping, 2485 loff_t pos, loff_t *bytes) 2486 { 2487 struct inode *inode = mapping->host; 2488 const struct address_space_operations *aops = mapping->a_ops; 2489 unsigned int blocksize = i_blocksize(inode); 2490 struct folio *folio; 2491 void *fsdata = NULL; 2492 pgoff_t index, curidx; 2493 loff_t curpos; 2494 unsigned zerofrom, offset, len; 2495 int err = 0; 2496 2497 index = pos >> PAGE_SHIFT; 2498 offset = pos & ~PAGE_MASK; 2499 2500 while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) { 2501 zerofrom = curpos & ~PAGE_MASK; 2502 if (zerofrom & (blocksize-1)) { 2503 *bytes |= (blocksize-1); 2504 (*bytes)++; 2505 } 2506 len = PAGE_SIZE - zerofrom; 2507 2508 err = aops->write_begin(file, mapping, curpos, len, 2509 &folio, &fsdata); 2510 if (err) 2511 goto out; 2512 folio_zero_range(folio, offset_in_folio(folio, curpos), len); 2513 err = aops->write_end(file, mapping, curpos, len, len, 2514 folio, fsdata); 2515 if (err < 0) 2516 goto out; 2517 BUG_ON(err != len); 2518 err = 0; 2519 2520 balance_dirty_pages_ratelimited(mapping); 2521 2522 if (fatal_signal_pending(current)) { 2523 err = -EINTR; 2524 goto out; 2525 } 2526 } 2527 2528 /* page covers the boundary, find the boundary offset */ 2529 if (index == curidx) { 2530 zerofrom = curpos & ~PAGE_MASK; 2531 /* if we will expand the thing last block will be filled */ 2532 if (offset <= zerofrom) { 2533 goto out; 2534 } 2535 if (zerofrom & (blocksize-1)) { 2536 *bytes |= (blocksize-1); 2537 (*bytes)++; 2538 } 2539 len = offset - zerofrom; 2540 2541 err = aops->write_begin(file, mapping, curpos, len, 2542 &folio, &fsdata); 2543 if (err) 2544 goto out; 2545 folio_zero_range(folio, offset_in_folio(folio, curpos), len); 2546 err = aops->write_end(file, mapping, curpos, len, len, 2547 folio, fsdata); 2548 if (err < 0) 2549 goto out; 2550 BUG_ON(err != len); 2551 err = 0; 2552 } 2553 out: 2554 return err; 2555 } 2556 2557 /* 2558 * For moronic filesystems that do not allow holes in file. 2559 * We may have to extend the file. 2560 */ 2561 int cont_write_begin(struct file *file, struct address_space *mapping, 2562 loff_t pos, unsigned len, 2563 struct folio **foliop, void **fsdata, 2564 get_block_t *get_block, loff_t *bytes) 2565 { 2566 struct inode *inode = mapping->host; 2567 unsigned int blocksize = i_blocksize(inode); 2568 unsigned int zerofrom; 2569 int err; 2570 2571 err = cont_expand_zero(file, mapping, pos, bytes); 2572 if (err) 2573 return err; 2574 2575 zerofrom = *bytes & ~PAGE_MASK; 2576 if (pos+len > *bytes && zerofrom & (blocksize-1)) { 2577 *bytes |= (blocksize-1); 2578 (*bytes)++; 2579 } 2580 2581 return block_write_begin(mapping, pos, len, foliop, get_block); 2582 } 2583 EXPORT_SYMBOL(cont_write_begin); 2584 2585 void block_commit_write(struct page *page, unsigned from, unsigned to) 2586 { 2587 struct folio *folio = page_folio(page); 2588 __block_commit_write(folio, from, to); 2589 } 2590 EXPORT_SYMBOL(block_commit_write); 2591 2592 /* 2593 * block_page_mkwrite() is not allowed to change the file size as it gets 2594 * called from a page fault handler when a page is first dirtied. Hence we must 2595 * be careful to check for EOF conditions here. We set the page up correctly 2596 * for a written page which means we get ENOSPC checking when writing into 2597 * holes and correct delalloc and unwritten extent mapping on filesystems that 2598 * support these features. 2599 * 2600 * We are not allowed to take the i_mutex here so we have to play games to 2601 * protect against truncate races as the page could now be beyond EOF. Because 2602 * truncate writes the inode size before removing pages, once we have the 2603 * page lock we can determine safely if the page is beyond EOF. If it is not 2604 * beyond EOF, then the page is guaranteed safe against truncation until we 2605 * unlock the page. 2606 * 2607 * Direct callers of this function should protect against filesystem freezing 2608 * using sb_start_pagefault() - sb_end_pagefault() functions. 2609 */ 2610 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, 2611 get_block_t get_block) 2612 { 2613 struct folio *folio = page_folio(vmf->page); 2614 struct inode *inode = file_inode(vma->vm_file); 2615 unsigned long end; 2616 loff_t size; 2617 int ret; 2618 2619 folio_lock(folio); 2620 size = i_size_read(inode); 2621 if ((folio->mapping != inode->i_mapping) || 2622 (folio_pos(folio) >= size)) { 2623 /* We overload EFAULT to mean page got truncated */ 2624 ret = -EFAULT; 2625 goto out_unlock; 2626 } 2627 2628 end = folio_size(folio); 2629 /* folio is wholly or partially inside EOF */ 2630 if (folio_pos(folio) + end > size) 2631 end = size - folio_pos(folio); 2632 2633 ret = __block_write_begin_int(folio, 0, end, get_block, NULL); 2634 if (unlikely(ret)) 2635 goto out_unlock; 2636 2637 __block_commit_write(folio, 0, end); 2638 2639 folio_mark_dirty(folio); 2640 folio_wait_stable(folio); 2641 return 0; 2642 out_unlock: 2643 folio_unlock(folio); 2644 return ret; 2645 } 2646 EXPORT_SYMBOL(block_page_mkwrite); 2647 2648 int block_truncate_page(struct address_space *mapping, 2649 loff_t from, get_block_t *get_block) 2650 { 2651 pgoff_t index = from >> PAGE_SHIFT; 2652 unsigned blocksize; 2653 sector_t iblock; 2654 size_t offset, length, pos; 2655 struct inode *inode = mapping->host; 2656 struct folio *folio; 2657 struct buffer_head *bh; 2658 int err = 0; 2659 2660 blocksize = i_blocksize(inode); 2661 length = from & (blocksize - 1); 2662 2663 /* Block boundary? Nothing to do */ 2664 if (!length) 2665 return 0; 2666 2667 length = blocksize - length; 2668 iblock = ((loff_t)index * PAGE_SIZE) >> inode->i_blkbits; 2669 2670 folio = filemap_grab_folio(mapping, index); 2671 if (IS_ERR(folio)) 2672 return PTR_ERR(folio); 2673 2674 bh = folio_buffers(folio); 2675 if (!bh) 2676 bh = create_empty_buffers(folio, blocksize, 0); 2677 2678 /* Find the buffer that contains "offset" */ 2679 offset = offset_in_folio(folio, from); 2680 pos = blocksize; 2681 while (offset >= pos) { 2682 bh = bh->b_this_page; 2683 iblock++; 2684 pos += blocksize; 2685 } 2686 2687 if (!buffer_mapped(bh)) { 2688 WARN_ON(bh->b_size != blocksize); 2689 err = get_block(inode, iblock, bh, 0); 2690 if (err) 2691 goto unlock; 2692 /* unmapped? It's a hole - nothing to do */ 2693 if (!buffer_mapped(bh)) 2694 goto unlock; 2695 } 2696 2697 /* Ok, it's mapped. Make sure it's up-to-date */ 2698 if (folio_test_uptodate(folio)) 2699 set_buffer_uptodate(bh); 2700 2701 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) { 2702 err = bh_read(bh, 0); 2703 /* Uhhuh. Read error. Complain and punt. */ 2704 if (err < 0) 2705 goto unlock; 2706 } 2707 2708 folio_zero_range(folio, offset, length); 2709 mark_buffer_dirty(bh); 2710 2711 unlock: 2712 folio_unlock(folio); 2713 folio_put(folio); 2714 2715 return err; 2716 } 2717 EXPORT_SYMBOL(block_truncate_page); 2718 2719 /* 2720 * The generic ->writepage function for buffer-backed address_spaces 2721 */ 2722 int block_write_full_folio(struct folio *folio, struct writeback_control *wbc, 2723 void *get_block) 2724 { 2725 struct inode * const inode = folio->mapping->host; 2726 loff_t i_size = i_size_read(inode); 2727 2728 /* Is the folio fully inside i_size? */ 2729 if (folio_pos(folio) + folio_size(folio) <= i_size) 2730 return __block_write_full_folio(inode, folio, get_block, wbc); 2731 2732 /* Is the folio fully outside i_size? (truncate in progress) */ 2733 if (folio_pos(folio) >= i_size) { 2734 folio_unlock(folio); 2735 return 0; /* don't care */ 2736 } 2737 2738 /* 2739 * The folio straddles i_size. It must be zeroed out on each and every 2740 * writepage invocation because it may be mmapped. "A file is mapped 2741 * in multiples of the page size. For a file that is not a multiple of 2742 * the page size, the remaining memory is zeroed when mapped, and 2743 * writes to that region are not written out to the file." 2744 */ 2745 folio_zero_segment(folio, offset_in_folio(folio, i_size), 2746 folio_size(folio)); 2747 return __block_write_full_folio(inode, folio, get_block, wbc); 2748 } 2749 2750 sector_t generic_block_bmap(struct address_space *mapping, sector_t block, 2751 get_block_t *get_block) 2752 { 2753 struct inode *inode = mapping->host; 2754 struct buffer_head tmp = { 2755 .b_size = i_blocksize(inode), 2756 }; 2757 2758 get_block(inode, block, &tmp, 0); 2759 return tmp.b_blocknr; 2760 } 2761 EXPORT_SYMBOL(generic_block_bmap); 2762 2763 static void end_bio_bh_io_sync(struct bio *bio) 2764 { 2765 struct buffer_head *bh = bio->bi_private; 2766 2767 if (unlikely(bio_flagged(bio, BIO_QUIET))) 2768 set_bit(BH_Quiet, &bh->b_state); 2769 2770 bh->b_end_io(bh, !bio->bi_status); 2771 bio_put(bio); 2772 } 2773 2774 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh, 2775 enum rw_hint write_hint, 2776 struct writeback_control *wbc) 2777 { 2778 const enum req_op op = opf & REQ_OP_MASK; 2779 struct bio *bio; 2780 2781 BUG_ON(!buffer_locked(bh)); 2782 BUG_ON(!buffer_mapped(bh)); 2783 BUG_ON(!bh->b_end_io); 2784 BUG_ON(buffer_delay(bh)); 2785 BUG_ON(buffer_unwritten(bh)); 2786 2787 /* 2788 * Only clear out a write error when rewriting 2789 */ 2790 if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE)) 2791 clear_buffer_write_io_error(bh); 2792 2793 if (buffer_meta(bh)) 2794 opf |= REQ_META; 2795 if (buffer_prio(bh)) 2796 opf |= REQ_PRIO; 2797 2798 bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO); 2799 2800 fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO); 2801 2802 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); 2803 bio->bi_write_hint = write_hint; 2804 2805 __bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh)); 2806 2807 bio->bi_end_io = end_bio_bh_io_sync; 2808 bio->bi_private = bh; 2809 2810 /* Take care of bh's that straddle the end of the device */ 2811 guard_bio_eod(bio); 2812 2813 if (wbc) { 2814 wbc_init_bio(wbc, bio); 2815 wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size); 2816 } 2817 2818 submit_bio(bio); 2819 } 2820 2821 void submit_bh(blk_opf_t opf, struct buffer_head *bh) 2822 { 2823 submit_bh_wbc(opf, bh, WRITE_LIFE_NOT_SET, NULL); 2824 } 2825 EXPORT_SYMBOL(submit_bh); 2826 2827 void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags) 2828 { 2829 lock_buffer(bh); 2830 if (!test_clear_buffer_dirty(bh)) { 2831 unlock_buffer(bh); 2832 return; 2833 } 2834 bh->b_end_io = end_buffer_write_sync; 2835 get_bh(bh); 2836 submit_bh(REQ_OP_WRITE | op_flags, bh); 2837 } 2838 EXPORT_SYMBOL(write_dirty_buffer); 2839 2840 /* 2841 * For a data-integrity writeout, we need to wait upon any in-progress I/O 2842 * and then start new I/O and then wait upon it. The caller must have a ref on 2843 * the buffer_head. 2844 */ 2845 int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags) 2846 { 2847 WARN_ON(atomic_read(&bh->b_count) < 1); 2848 lock_buffer(bh); 2849 if (test_clear_buffer_dirty(bh)) { 2850 /* 2851 * The bh should be mapped, but it might not be if the 2852 * device was hot-removed. Not much we can do but fail the I/O. 2853 */ 2854 if (!buffer_mapped(bh)) { 2855 unlock_buffer(bh); 2856 return -EIO; 2857 } 2858 2859 get_bh(bh); 2860 bh->b_end_io = end_buffer_write_sync; 2861 submit_bh(REQ_OP_WRITE | op_flags, bh); 2862 wait_on_buffer(bh); 2863 if (!buffer_uptodate(bh)) 2864 return -EIO; 2865 } else { 2866 unlock_buffer(bh); 2867 } 2868 return 0; 2869 } 2870 EXPORT_SYMBOL(__sync_dirty_buffer); 2871 2872 int sync_dirty_buffer(struct buffer_head *bh) 2873 { 2874 return __sync_dirty_buffer(bh, REQ_SYNC); 2875 } 2876 EXPORT_SYMBOL(sync_dirty_buffer); 2877 2878 static inline int buffer_busy(struct buffer_head *bh) 2879 { 2880 return atomic_read(&bh->b_count) | 2881 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock))); 2882 } 2883 2884 static bool 2885 drop_buffers(struct folio *folio, struct buffer_head **buffers_to_free) 2886 { 2887 struct buffer_head *head = folio_buffers(folio); 2888 struct buffer_head *bh; 2889 2890 bh = head; 2891 do { 2892 if (buffer_busy(bh)) 2893 goto failed; 2894 bh = bh->b_this_page; 2895 } while (bh != head); 2896 2897 do { 2898 struct buffer_head *next = bh->b_this_page; 2899 2900 if (bh->b_assoc_map) 2901 __remove_assoc_queue(bh); 2902 bh = next; 2903 } while (bh != head); 2904 *buffers_to_free = head; 2905 folio_detach_private(folio); 2906 return true; 2907 failed: 2908 return false; 2909 } 2910 2911 /** 2912 * try_to_free_buffers - Release buffers attached to this folio. 2913 * @folio: The folio. 2914 * 2915 * If any buffers are in use (dirty, under writeback, elevated refcount), 2916 * no buffers will be freed. 2917 * 2918 * If the folio is dirty but all the buffers are clean then we need to 2919 * be sure to mark the folio clean as well. This is because the folio 2920 * may be against a block device, and a later reattachment of buffers 2921 * to a dirty folio will set *all* buffers dirty. Which would corrupt 2922 * filesystem data on the same device. 2923 * 2924 * The same applies to regular filesystem folios: if all the buffers are 2925 * clean then we set the folio clean and proceed. To do that, we require 2926 * total exclusion from block_dirty_folio(). That is obtained with 2927 * i_private_lock. 2928 * 2929 * Exclusion against try_to_free_buffers may be obtained by either 2930 * locking the folio or by holding its mapping's i_private_lock. 2931 * 2932 * Context: Process context. @folio must be locked. Will not sleep. 2933 * Return: true if all buffers attached to this folio were freed. 2934 */ 2935 bool try_to_free_buffers(struct folio *folio) 2936 { 2937 struct address_space * const mapping = folio->mapping; 2938 struct buffer_head *buffers_to_free = NULL; 2939 bool ret = 0; 2940 2941 BUG_ON(!folio_test_locked(folio)); 2942 if (folio_test_writeback(folio)) 2943 return false; 2944 2945 if (mapping == NULL) { /* can this still happen? */ 2946 ret = drop_buffers(folio, &buffers_to_free); 2947 goto out; 2948 } 2949 2950 spin_lock(&mapping->i_private_lock); 2951 ret = drop_buffers(folio, &buffers_to_free); 2952 2953 /* 2954 * If the filesystem writes its buffers by hand (eg ext3) 2955 * then we can have clean buffers against a dirty folio. We 2956 * clean the folio here; otherwise the VM will never notice 2957 * that the filesystem did any IO at all. 2958 * 2959 * Also, during truncate, discard_buffer will have marked all 2960 * the folio's buffers clean. We discover that here and clean 2961 * the folio also. 2962 * 2963 * i_private_lock must be held over this entire operation in order 2964 * to synchronise against block_dirty_folio and prevent the 2965 * dirty bit from being lost. 2966 */ 2967 if (ret) 2968 folio_cancel_dirty(folio); 2969 spin_unlock(&mapping->i_private_lock); 2970 out: 2971 if (buffers_to_free) { 2972 struct buffer_head *bh = buffers_to_free; 2973 2974 do { 2975 struct buffer_head *next = bh->b_this_page; 2976 free_buffer_head(bh); 2977 bh = next; 2978 } while (bh != buffers_to_free); 2979 } 2980 return ret; 2981 } 2982 EXPORT_SYMBOL(try_to_free_buffers); 2983 2984 /* 2985 * Buffer-head allocation 2986 */ 2987 static struct kmem_cache *bh_cachep __ro_after_init; 2988 2989 /* 2990 * Once the number of bh's in the machine exceeds this level, we start 2991 * stripping them in writeback. 2992 */ 2993 static unsigned long max_buffer_heads __ro_after_init; 2994 2995 int buffer_heads_over_limit; 2996 2997 struct bh_accounting { 2998 int nr; /* Number of live bh's */ 2999 int ratelimit; /* Limit cacheline bouncing */ 3000 }; 3001 3002 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0}; 3003 3004 static void recalc_bh_state(void) 3005 { 3006 int i; 3007 int tot = 0; 3008 3009 if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096) 3010 return; 3011 __this_cpu_write(bh_accounting.ratelimit, 0); 3012 for_each_online_cpu(i) 3013 tot += per_cpu(bh_accounting, i).nr; 3014 buffer_heads_over_limit = (tot > max_buffer_heads); 3015 } 3016 3017 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) 3018 { 3019 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); 3020 if (ret) { 3021 INIT_LIST_HEAD(&ret->b_assoc_buffers); 3022 spin_lock_init(&ret->b_uptodate_lock); 3023 preempt_disable(); 3024 __this_cpu_inc(bh_accounting.nr); 3025 recalc_bh_state(); 3026 preempt_enable(); 3027 } 3028 return ret; 3029 } 3030 EXPORT_SYMBOL(alloc_buffer_head); 3031 3032 void free_buffer_head(struct buffer_head *bh) 3033 { 3034 BUG_ON(!list_empty(&bh->b_assoc_buffers)); 3035 kmem_cache_free(bh_cachep, bh); 3036 preempt_disable(); 3037 __this_cpu_dec(bh_accounting.nr); 3038 recalc_bh_state(); 3039 preempt_enable(); 3040 } 3041 EXPORT_SYMBOL(free_buffer_head); 3042 3043 static int buffer_exit_cpu_dead(unsigned int cpu) 3044 { 3045 int i; 3046 struct bh_lru *b = &per_cpu(bh_lrus, cpu); 3047 3048 for (i = 0; i < BH_LRU_SIZE; i++) { 3049 brelse(b->bhs[i]); 3050 b->bhs[i] = NULL; 3051 } 3052 this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr); 3053 per_cpu(bh_accounting, cpu).nr = 0; 3054 return 0; 3055 } 3056 3057 /** 3058 * bh_uptodate_or_lock - Test whether the buffer is uptodate 3059 * @bh: struct buffer_head 3060 * 3061 * Return true if the buffer is up-to-date and false, 3062 * with the buffer locked, if not. 3063 */ 3064 int bh_uptodate_or_lock(struct buffer_head *bh) 3065 { 3066 if (!buffer_uptodate(bh)) { 3067 lock_buffer(bh); 3068 if (!buffer_uptodate(bh)) 3069 return 0; 3070 unlock_buffer(bh); 3071 } 3072 return 1; 3073 } 3074 EXPORT_SYMBOL(bh_uptodate_or_lock); 3075 3076 /** 3077 * __bh_read - Submit read for a locked buffer 3078 * @bh: struct buffer_head 3079 * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ 3080 * @wait: wait until reading finish 3081 * 3082 * Returns zero on success or don't wait, and -EIO on error. 3083 */ 3084 int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait) 3085 { 3086 int ret = 0; 3087 3088 BUG_ON(!buffer_locked(bh)); 3089 3090 get_bh(bh); 3091 bh->b_end_io = end_buffer_read_sync; 3092 submit_bh(REQ_OP_READ | op_flags, bh); 3093 if (wait) { 3094 wait_on_buffer(bh); 3095 if (!buffer_uptodate(bh)) 3096 ret = -EIO; 3097 } 3098 return ret; 3099 } 3100 EXPORT_SYMBOL(__bh_read); 3101 3102 /** 3103 * __bh_read_batch - Submit read for a batch of unlocked buffers 3104 * @nr: entry number of the buffer batch 3105 * @bhs: a batch of struct buffer_head 3106 * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ 3107 * @force_lock: force to get a lock on the buffer if set, otherwise drops any 3108 * buffer that cannot lock. 3109 * 3110 * Returns zero on success or don't wait, and -EIO on error. 3111 */ 3112 void __bh_read_batch(int nr, struct buffer_head *bhs[], 3113 blk_opf_t op_flags, bool force_lock) 3114 { 3115 int i; 3116 3117 for (i = 0; i < nr; i++) { 3118 struct buffer_head *bh = bhs[i]; 3119 3120 if (buffer_uptodate(bh)) 3121 continue; 3122 3123 if (force_lock) 3124 lock_buffer(bh); 3125 else 3126 if (!trylock_buffer(bh)) 3127 continue; 3128 3129 if (buffer_uptodate(bh)) { 3130 unlock_buffer(bh); 3131 continue; 3132 } 3133 3134 bh->b_end_io = end_buffer_read_sync; 3135 get_bh(bh); 3136 submit_bh(REQ_OP_READ | op_flags, bh); 3137 } 3138 } 3139 EXPORT_SYMBOL(__bh_read_batch); 3140 3141 void __init buffer_init(void) 3142 { 3143 unsigned long nrpages; 3144 int ret; 3145 3146 bh_cachep = KMEM_CACHE(buffer_head, 3147 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC); 3148 /* 3149 * Limit the bh occupancy to 10% of ZONE_NORMAL 3150 */ 3151 nrpages = (nr_free_buffer_pages() * 10) / 100; 3152 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head)); 3153 ret = cpuhp_setup_state_nocalls(CPUHP_FS_BUFF_DEAD, "fs/buffer:dead", 3154 NULL, buffer_exit_cpu_dead); 3155 WARN_ON(ret < 0); 3156 } 3157