1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/fs/buffer.c 4 * 5 * Copyright (C) 1991, 1992, 2002 Linus Torvalds 6 */ 7 8 /* 9 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95 10 * 11 * Removed a lot of unnecessary code and simplified things now that 12 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96 13 * 14 * Speed up hash, lru, and free list operations. Use gfp() for allocating 15 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM 16 * 17 * Added 32k buffer block sizes - these are required older ARM systems. - RMK 18 * 19 * async buffer flushing, 1999 Andrea Arcangeli <[email protected]> 20 */ 21 22 #include <linux/kernel.h> 23 #include <linux/sched/signal.h> 24 #include <linux/syscalls.h> 25 #include <linux/fs.h> 26 #include <linux/iomap.h> 27 #include <linux/mm.h> 28 #include <linux/percpu.h> 29 #include <linux/slab.h> 30 #include <linux/capability.h> 31 #include <linux/blkdev.h> 32 #include <linux/file.h> 33 #include <linux/quotaops.h> 34 #include <linux/highmem.h> 35 #include <linux/export.h> 36 #include <linux/backing-dev.h> 37 #include <linux/writeback.h> 38 #include <linux/hash.h> 39 #include <linux/suspend.h> 40 #include <linux/buffer_head.h> 41 #include <linux/task_io_accounting_ops.h> 42 #include <linux/bio.h> 43 #include <linux/cpu.h> 44 #include <linux/bitops.h> 45 #include <linux/mpage.h> 46 #include <linux/bit_spinlock.h> 47 #include <linux/pagevec.h> 48 #include <linux/sched/mm.h> 49 #include <trace/events/block.h> 50 #include <linux/fscrypt.h> 51 #include <linux/fsverity.h> 52 #include <linux/sched/isolation.h> 53 54 #include "internal.h" 55 56 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); 57 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh, 58 struct writeback_control *wbc); 59 60 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers) 61 62 inline void touch_buffer(struct buffer_head *bh) 63 { 64 trace_block_touch_buffer(bh); 65 folio_mark_accessed(bh->b_folio); 66 } 67 EXPORT_SYMBOL(touch_buffer); 68 69 void __lock_buffer(struct buffer_head *bh) 70 { 71 wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); 72 } 73 EXPORT_SYMBOL(__lock_buffer); 74 75 void unlock_buffer(struct buffer_head *bh) 76 { 77 clear_bit_unlock(BH_Lock, &bh->b_state); 78 smp_mb__after_atomic(); 79 wake_up_bit(&bh->b_state, BH_Lock); 80 } 81 EXPORT_SYMBOL(unlock_buffer); 82 83 /* 84 * Returns if the folio has dirty or writeback buffers. If all the buffers 85 * are unlocked and clean then the folio_test_dirty information is stale. If 86 * any of the buffers are locked, it is assumed they are locked for IO. 87 */ 88 void buffer_check_dirty_writeback(struct folio *folio, 89 bool *dirty, bool *writeback) 90 { 91 struct buffer_head *head, *bh; 92 *dirty = false; 93 *writeback = false; 94 95 BUG_ON(!folio_test_locked(folio)); 96 97 head = folio_buffers(folio); 98 if (!head) 99 return; 100 101 if (folio_test_writeback(folio)) 102 *writeback = true; 103 104 bh = head; 105 do { 106 if (buffer_locked(bh)) 107 *writeback = true; 108 109 if (buffer_dirty(bh)) 110 *dirty = true; 111 112 bh = bh->b_this_page; 113 } while (bh != head); 114 } 115 116 /* 117 * Block until a buffer comes unlocked. This doesn't stop it 118 * from becoming locked again - you have to lock it yourself 119 * if you want to preserve its state. 120 */ 121 void __wait_on_buffer(struct buffer_head * bh) 122 { 123 wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); 124 } 125 EXPORT_SYMBOL(__wait_on_buffer); 126 127 static void buffer_io_error(struct buffer_head *bh, char *msg) 128 { 129 if (!test_bit(BH_Quiet, &bh->b_state)) 130 printk_ratelimited(KERN_ERR 131 "Buffer I/O error on dev %pg, logical block %llu%s\n", 132 bh->b_bdev, (unsigned long long)bh->b_blocknr, msg); 133 } 134 135 /* 136 * End-of-IO handler helper function which does not touch the bh after 137 * unlocking it. 138 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but 139 * a race there is benign: unlock_buffer() only use the bh's address for 140 * hashing after unlocking the buffer, so it doesn't actually touch the bh 141 * itself. 142 */ 143 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate) 144 { 145 if (uptodate) { 146 set_buffer_uptodate(bh); 147 } else { 148 /* This happens, due to failed read-ahead attempts. */ 149 clear_buffer_uptodate(bh); 150 } 151 unlock_buffer(bh); 152 } 153 154 /* 155 * Default synchronous end-of-IO handler.. Just mark it up-to-date and 156 * unlock the buffer. 157 */ 158 void end_buffer_read_sync(struct buffer_head *bh, int uptodate) 159 { 160 __end_buffer_read_notouch(bh, uptodate); 161 put_bh(bh); 162 } 163 EXPORT_SYMBOL(end_buffer_read_sync); 164 165 void end_buffer_write_sync(struct buffer_head *bh, int uptodate) 166 { 167 if (uptodate) { 168 set_buffer_uptodate(bh); 169 } else { 170 buffer_io_error(bh, ", lost sync page write"); 171 mark_buffer_write_io_error(bh); 172 clear_buffer_uptodate(bh); 173 } 174 unlock_buffer(bh); 175 put_bh(bh); 176 } 177 EXPORT_SYMBOL(end_buffer_write_sync); 178 179 /* 180 * Various filesystems appear to want __find_get_block to be non-blocking. 181 * But it's the page lock which protects the buffers. To get around this, 182 * we get exclusion from try_to_free_buffers with the blockdev mapping's 183 * private_lock. 184 * 185 * Hack idea: for the blockdev mapping, private_lock contention 186 * may be quite high. This code could TryLock the page, and if that 187 * succeeds, there is no need to take private_lock. 188 */ 189 static struct buffer_head * 190 __find_get_block_slow(struct block_device *bdev, sector_t block) 191 { 192 struct inode *bd_inode = bdev->bd_inode; 193 struct address_space *bd_mapping = bd_inode->i_mapping; 194 struct buffer_head *ret = NULL; 195 pgoff_t index; 196 struct buffer_head *bh; 197 struct buffer_head *head; 198 struct folio *folio; 199 int all_mapped = 1; 200 static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1); 201 202 index = block >> (PAGE_SHIFT - bd_inode->i_blkbits); 203 folio = __filemap_get_folio(bd_mapping, index, FGP_ACCESSED, 0); 204 if (IS_ERR(folio)) 205 goto out; 206 207 spin_lock(&bd_mapping->private_lock); 208 head = folio_buffers(folio); 209 if (!head) 210 goto out_unlock; 211 bh = head; 212 do { 213 if (!buffer_mapped(bh)) 214 all_mapped = 0; 215 else if (bh->b_blocknr == block) { 216 ret = bh; 217 get_bh(bh); 218 goto out_unlock; 219 } 220 bh = bh->b_this_page; 221 } while (bh != head); 222 223 /* we might be here because some of the buffers on this page are 224 * not mapped. This is due to various races between 225 * file io on the block device and getblk. It gets dealt with 226 * elsewhere, don't buffer_error if we had some unmapped buffers 227 */ 228 ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE); 229 if (all_mapped && __ratelimit(&last_warned)) { 230 printk("__find_get_block_slow() failed. block=%llu, " 231 "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, " 232 "device %pg blocksize: %d\n", 233 (unsigned long long)block, 234 (unsigned long long)bh->b_blocknr, 235 bh->b_state, bh->b_size, bdev, 236 1 << bd_inode->i_blkbits); 237 } 238 out_unlock: 239 spin_unlock(&bd_mapping->private_lock); 240 folio_put(folio); 241 out: 242 return ret; 243 } 244 245 static void end_buffer_async_read(struct buffer_head *bh, int uptodate) 246 { 247 unsigned long flags; 248 struct buffer_head *first; 249 struct buffer_head *tmp; 250 struct folio *folio; 251 int folio_uptodate = 1; 252 253 BUG_ON(!buffer_async_read(bh)); 254 255 folio = bh->b_folio; 256 if (uptodate) { 257 set_buffer_uptodate(bh); 258 } else { 259 clear_buffer_uptodate(bh); 260 buffer_io_error(bh, ", async page read"); 261 folio_set_error(folio); 262 } 263 264 /* 265 * Be _very_ careful from here on. Bad things can happen if 266 * two buffer heads end IO at almost the same time and both 267 * decide that the page is now completely done. 268 */ 269 first = folio_buffers(folio); 270 spin_lock_irqsave(&first->b_uptodate_lock, flags); 271 clear_buffer_async_read(bh); 272 unlock_buffer(bh); 273 tmp = bh; 274 do { 275 if (!buffer_uptodate(tmp)) 276 folio_uptodate = 0; 277 if (buffer_async_read(tmp)) { 278 BUG_ON(!buffer_locked(tmp)); 279 goto still_busy; 280 } 281 tmp = tmp->b_this_page; 282 } while (tmp != bh); 283 spin_unlock_irqrestore(&first->b_uptodate_lock, flags); 284 285 folio_end_read(folio, folio_uptodate); 286 return; 287 288 still_busy: 289 spin_unlock_irqrestore(&first->b_uptodate_lock, flags); 290 return; 291 } 292 293 struct postprocess_bh_ctx { 294 struct work_struct work; 295 struct buffer_head *bh; 296 }; 297 298 static void verify_bh(struct work_struct *work) 299 { 300 struct postprocess_bh_ctx *ctx = 301 container_of(work, struct postprocess_bh_ctx, work); 302 struct buffer_head *bh = ctx->bh; 303 bool valid; 304 305 valid = fsverity_verify_blocks(bh->b_folio, bh->b_size, bh_offset(bh)); 306 end_buffer_async_read(bh, valid); 307 kfree(ctx); 308 } 309 310 static bool need_fsverity(struct buffer_head *bh) 311 { 312 struct folio *folio = bh->b_folio; 313 struct inode *inode = folio->mapping->host; 314 315 return fsverity_active(inode) && 316 /* needed by ext4 */ 317 folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE); 318 } 319 320 static void decrypt_bh(struct work_struct *work) 321 { 322 struct postprocess_bh_ctx *ctx = 323 container_of(work, struct postprocess_bh_ctx, work); 324 struct buffer_head *bh = ctx->bh; 325 int err; 326 327 err = fscrypt_decrypt_pagecache_blocks(bh->b_folio, bh->b_size, 328 bh_offset(bh)); 329 if (err == 0 && need_fsverity(bh)) { 330 /* 331 * We use different work queues for decryption and for verity 332 * because verity may require reading metadata pages that need 333 * decryption, and we shouldn't recurse to the same workqueue. 334 */ 335 INIT_WORK(&ctx->work, verify_bh); 336 fsverity_enqueue_verify_work(&ctx->work); 337 return; 338 } 339 end_buffer_async_read(bh, err == 0); 340 kfree(ctx); 341 } 342 343 /* 344 * I/O completion handler for block_read_full_folio() - pages 345 * which come unlocked at the end of I/O. 346 */ 347 static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate) 348 { 349 struct inode *inode = bh->b_folio->mapping->host; 350 bool decrypt = fscrypt_inode_uses_fs_layer_crypto(inode); 351 bool verify = need_fsverity(bh); 352 353 /* Decrypt (with fscrypt) and/or verify (with fsverity) if needed. */ 354 if (uptodate && (decrypt || verify)) { 355 struct postprocess_bh_ctx *ctx = 356 kmalloc(sizeof(*ctx), GFP_ATOMIC); 357 358 if (ctx) { 359 ctx->bh = bh; 360 if (decrypt) { 361 INIT_WORK(&ctx->work, decrypt_bh); 362 fscrypt_enqueue_decrypt_work(&ctx->work); 363 } else { 364 INIT_WORK(&ctx->work, verify_bh); 365 fsverity_enqueue_verify_work(&ctx->work); 366 } 367 return; 368 } 369 uptodate = 0; 370 } 371 end_buffer_async_read(bh, uptodate); 372 } 373 374 /* 375 * Completion handler for block_write_full_page() - pages which are unlocked 376 * during I/O, and which have PageWriteback cleared upon I/O completion. 377 */ 378 void end_buffer_async_write(struct buffer_head *bh, int uptodate) 379 { 380 unsigned long flags; 381 struct buffer_head *first; 382 struct buffer_head *tmp; 383 struct folio *folio; 384 385 BUG_ON(!buffer_async_write(bh)); 386 387 folio = bh->b_folio; 388 if (uptodate) { 389 set_buffer_uptodate(bh); 390 } else { 391 buffer_io_error(bh, ", lost async page write"); 392 mark_buffer_write_io_error(bh); 393 clear_buffer_uptodate(bh); 394 folio_set_error(folio); 395 } 396 397 first = folio_buffers(folio); 398 spin_lock_irqsave(&first->b_uptodate_lock, flags); 399 400 clear_buffer_async_write(bh); 401 unlock_buffer(bh); 402 tmp = bh->b_this_page; 403 while (tmp != bh) { 404 if (buffer_async_write(tmp)) { 405 BUG_ON(!buffer_locked(tmp)); 406 goto still_busy; 407 } 408 tmp = tmp->b_this_page; 409 } 410 spin_unlock_irqrestore(&first->b_uptodate_lock, flags); 411 folio_end_writeback(folio); 412 return; 413 414 still_busy: 415 spin_unlock_irqrestore(&first->b_uptodate_lock, flags); 416 return; 417 } 418 EXPORT_SYMBOL(end_buffer_async_write); 419 420 /* 421 * If a page's buffers are under async readin (end_buffer_async_read 422 * completion) then there is a possibility that another thread of 423 * control could lock one of the buffers after it has completed 424 * but while some of the other buffers have not completed. This 425 * locked buffer would confuse end_buffer_async_read() into not unlocking 426 * the page. So the absence of BH_Async_Read tells end_buffer_async_read() 427 * that this buffer is not under async I/O. 428 * 429 * The page comes unlocked when it has no locked buffer_async buffers 430 * left. 431 * 432 * PageLocked prevents anyone starting new async I/O reads any of 433 * the buffers. 434 * 435 * PageWriteback is used to prevent simultaneous writeout of the same 436 * page. 437 * 438 * PageLocked prevents anyone from starting writeback of a page which is 439 * under read I/O (PageWriteback is only ever set against a locked page). 440 */ 441 static void mark_buffer_async_read(struct buffer_head *bh) 442 { 443 bh->b_end_io = end_buffer_async_read_io; 444 set_buffer_async_read(bh); 445 } 446 447 static void mark_buffer_async_write_endio(struct buffer_head *bh, 448 bh_end_io_t *handler) 449 { 450 bh->b_end_io = handler; 451 set_buffer_async_write(bh); 452 } 453 454 void mark_buffer_async_write(struct buffer_head *bh) 455 { 456 mark_buffer_async_write_endio(bh, end_buffer_async_write); 457 } 458 EXPORT_SYMBOL(mark_buffer_async_write); 459 460 461 /* 462 * fs/buffer.c contains helper functions for buffer-backed address space's 463 * fsync functions. A common requirement for buffer-based filesystems is 464 * that certain data from the backing blockdev needs to be written out for 465 * a successful fsync(). For example, ext2 indirect blocks need to be 466 * written back and waited upon before fsync() returns. 467 * 468 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(), 469 * inode_has_buffers() and invalidate_inode_buffers() are provided for the 470 * management of a list of dependent buffers at ->i_mapping->private_list. 471 * 472 * Locking is a little subtle: try_to_free_buffers() will remove buffers 473 * from their controlling inode's queue when they are being freed. But 474 * try_to_free_buffers() will be operating against the *blockdev* mapping 475 * at the time, not against the S_ISREG file which depends on those buffers. 476 * So the locking for private_list is via the private_lock in the address_space 477 * which backs the buffers. Which is different from the address_space 478 * against which the buffers are listed. So for a particular address_space, 479 * mapping->private_lock does *not* protect mapping->private_list! In fact, 480 * mapping->private_list will always be protected by the backing blockdev's 481 * ->private_lock. 482 * 483 * Which introduces a requirement: all buffers on an address_space's 484 * ->private_list must be from the same address_space: the blockdev's. 485 * 486 * address_spaces which do not place buffers at ->private_list via these 487 * utility functions are free to use private_lock and private_list for 488 * whatever they want. The only requirement is that list_empty(private_list) 489 * be true at clear_inode() time. 490 * 491 * FIXME: clear_inode should not call invalidate_inode_buffers(). The 492 * filesystems should do that. invalidate_inode_buffers() should just go 493 * BUG_ON(!list_empty). 494 * 495 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should 496 * take an address_space, not an inode. And it should be called 497 * mark_buffer_dirty_fsync() to clearly define why those buffers are being 498 * queued up. 499 * 500 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the 501 * list if it is already on a list. Because if the buffer is on a list, 502 * it *must* already be on the right one. If not, the filesystem is being 503 * silly. This will save a ton of locking. But first we have to ensure 504 * that buffers are taken *off* the old inode's list when they are freed 505 * (presumably in truncate). That requires careful auditing of all 506 * filesystems (do it inside bforget()). It could also be done by bringing 507 * b_inode back. 508 */ 509 510 /* 511 * The buffer's backing address_space's private_lock must be held 512 */ 513 static void __remove_assoc_queue(struct buffer_head *bh) 514 { 515 list_del_init(&bh->b_assoc_buffers); 516 WARN_ON(!bh->b_assoc_map); 517 bh->b_assoc_map = NULL; 518 } 519 520 int inode_has_buffers(struct inode *inode) 521 { 522 return !list_empty(&inode->i_data.private_list); 523 } 524 525 /* 526 * osync is designed to support O_SYNC io. It waits synchronously for 527 * all already-submitted IO to complete, but does not queue any new 528 * writes to the disk. 529 * 530 * To do O_SYNC writes, just queue the buffer writes with write_dirty_buffer 531 * as you dirty the buffers, and then use osync_inode_buffers to wait for 532 * completion. Any other dirty buffers which are not yet queued for 533 * write will not be flushed to disk by the osync. 534 */ 535 static int osync_buffers_list(spinlock_t *lock, struct list_head *list) 536 { 537 struct buffer_head *bh; 538 struct list_head *p; 539 int err = 0; 540 541 spin_lock(lock); 542 repeat: 543 list_for_each_prev(p, list) { 544 bh = BH_ENTRY(p); 545 if (buffer_locked(bh)) { 546 get_bh(bh); 547 spin_unlock(lock); 548 wait_on_buffer(bh); 549 if (!buffer_uptodate(bh)) 550 err = -EIO; 551 brelse(bh); 552 spin_lock(lock); 553 goto repeat; 554 } 555 } 556 spin_unlock(lock); 557 return err; 558 } 559 560 /** 561 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers 562 * @mapping: the mapping which wants those buffers written 563 * 564 * Starts I/O against the buffers at mapping->private_list, and waits upon 565 * that I/O. 566 * 567 * Basically, this is a convenience function for fsync(). 568 * @mapping is a file or directory which needs those buffers to be written for 569 * a successful fsync(). 570 */ 571 int sync_mapping_buffers(struct address_space *mapping) 572 { 573 struct address_space *buffer_mapping = mapping->private_data; 574 575 if (buffer_mapping == NULL || list_empty(&mapping->private_list)) 576 return 0; 577 578 return fsync_buffers_list(&buffer_mapping->private_lock, 579 &mapping->private_list); 580 } 581 EXPORT_SYMBOL(sync_mapping_buffers); 582 583 /** 584 * generic_buffers_fsync_noflush - generic buffer fsync implementation 585 * for simple filesystems with no inode lock 586 * 587 * @file: file to synchronize 588 * @start: start offset in bytes 589 * @end: end offset in bytes (inclusive) 590 * @datasync: only synchronize essential metadata if true 591 * 592 * This is a generic implementation of the fsync method for simple 593 * filesystems which track all non-inode metadata in the buffers list 594 * hanging off the address_space structure. 595 */ 596 int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end, 597 bool datasync) 598 { 599 struct inode *inode = file->f_mapping->host; 600 int err; 601 int ret; 602 603 err = file_write_and_wait_range(file, start, end); 604 if (err) 605 return err; 606 607 ret = sync_mapping_buffers(inode->i_mapping); 608 if (!(inode->i_state & I_DIRTY_ALL)) 609 goto out; 610 if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) 611 goto out; 612 613 err = sync_inode_metadata(inode, 1); 614 if (ret == 0) 615 ret = err; 616 617 out: 618 /* check and advance again to catch errors after syncing out buffers */ 619 err = file_check_and_advance_wb_err(file); 620 if (ret == 0) 621 ret = err; 622 return ret; 623 } 624 EXPORT_SYMBOL(generic_buffers_fsync_noflush); 625 626 /** 627 * generic_buffers_fsync - generic buffer fsync implementation 628 * for simple filesystems with no inode lock 629 * 630 * @file: file to synchronize 631 * @start: start offset in bytes 632 * @end: end offset in bytes (inclusive) 633 * @datasync: only synchronize essential metadata if true 634 * 635 * This is a generic implementation of the fsync method for simple 636 * filesystems which track all non-inode metadata in the buffers list 637 * hanging off the address_space structure. This also makes sure that 638 * a device cache flush operation is called at the end. 639 */ 640 int generic_buffers_fsync(struct file *file, loff_t start, loff_t end, 641 bool datasync) 642 { 643 struct inode *inode = file->f_mapping->host; 644 int ret; 645 646 ret = generic_buffers_fsync_noflush(file, start, end, datasync); 647 if (!ret) 648 ret = blkdev_issue_flush(inode->i_sb->s_bdev); 649 return ret; 650 } 651 EXPORT_SYMBOL(generic_buffers_fsync); 652 653 /* 654 * Called when we've recently written block `bblock', and it is known that 655 * `bblock' was for a buffer_boundary() buffer. This means that the block at 656 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's 657 * dirty, schedule it for IO. So that indirects merge nicely with their data. 658 */ 659 void write_boundary_block(struct block_device *bdev, 660 sector_t bblock, unsigned blocksize) 661 { 662 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize); 663 if (bh) { 664 if (buffer_dirty(bh)) 665 write_dirty_buffer(bh, 0); 666 put_bh(bh); 667 } 668 } 669 670 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode) 671 { 672 struct address_space *mapping = inode->i_mapping; 673 struct address_space *buffer_mapping = bh->b_folio->mapping; 674 675 mark_buffer_dirty(bh); 676 if (!mapping->private_data) { 677 mapping->private_data = buffer_mapping; 678 } else { 679 BUG_ON(mapping->private_data != buffer_mapping); 680 } 681 if (!bh->b_assoc_map) { 682 spin_lock(&buffer_mapping->private_lock); 683 list_move_tail(&bh->b_assoc_buffers, 684 &mapping->private_list); 685 bh->b_assoc_map = mapping; 686 spin_unlock(&buffer_mapping->private_lock); 687 } 688 } 689 EXPORT_SYMBOL(mark_buffer_dirty_inode); 690 691 /* 692 * Add a page to the dirty page list. 693 * 694 * It is a sad fact of life that this function is called from several places 695 * deeply under spinlocking. It may not sleep. 696 * 697 * If the page has buffers, the uptodate buffers are set dirty, to preserve 698 * dirty-state coherency between the page and the buffers. It the page does 699 * not have buffers then when they are later attached they will all be set 700 * dirty. 701 * 702 * The buffers are dirtied before the page is dirtied. There's a small race 703 * window in which a writepage caller may see the page cleanness but not the 704 * buffer dirtiness. That's fine. If this code were to set the page dirty 705 * before the buffers, a concurrent writepage caller could clear the page dirty 706 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean 707 * page on the dirty page list. 708 * 709 * We use private_lock to lock against try_to_free_buffers while using the 710 * page's buffer list. Also use this to protect against clean buffers being 711 * added to the page after it was set dirty. 712 * 713 * FIXME: may need to call ->reservepage here as well. That's rather up to the 714 * address_space though. 715 */ 716 bool block_dirty_folio(struct address_space *mapping, struct folio *folio) 717 { 718 struct buffer_head *head; 719 bool newly_dirty; 720 721 spin_lock(&mapping->private_lock); 722 head = folio_buffers(folio); 723 if (head) { 724 struct buffer_head *bh = head; 725 726 do { 727 set_buffer_dirty(bh); 728 bh = bh->b_this_page; 729 } while (bh != head); 730 } 731 /* 732 * Lock out page's memcg migration to keep PageDirty 733 * synchronized with per-memcg dirty page counters. 734 */ 735 folio_memcg_lock(folio); 736 newly_dirty = !folio_test_set_dirty(folio); 737 spin_unlock(&mapping->private_lock); 738 739 if (newly_dirty) 740 __folio_mark_dirty(folio, mapping, 1); 741 742 folio_memcg_unlock(folio); 743 744 if (newly_dirty) 745 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 746 747 return newly_dirty; 748 } 749 EXPORT_SYMBOL(block_dirty_folio); 750 751 /* 752 * Write out and wait upon a list of buffers. 753 * 754 * We have conflicting pressures: we want to make sure that all 755 * initially dirty buffers get waited on, but that any subsequently 756 * dirtied buffers don't. After all, we don't want fsync to last 757 * forever if somebody is actively writing to the file. 758 * 759 * Do this in two main stages: first we copy dirty buffers to a 760 * temporary inode list, queueing the writes as we go. Then we clean 761 * up, waiting for those writes to complete. 762 * 763 * During this second stage, any subsequent updates to the file may end 764 * up refiling the buffer on the original inode's dirty list again, so 765 * there is a chance we will end up with a buffer queued for write but 766 * not yet completed on that list. So, as a final cleanup we go through 767 * the osync code to catch these locked, dirty buffers without requeuing 768 * any newly dirty buffers for write. 769 */ 770 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) 771 { 772 struct buffer_head *bh; 773 struct list_head tmp; 774 struct address_space *mapping; 775 int err = 0, err2; 776 struct blk_plug plug; 777 778 INIT_LIST_HEAD(&tmp); 779 blk_start_plug(&plug); 780 781 spin_lock(lock); 782 while (!list_empty(list)) { 783 bh = BH_ENTRY(list->next); 784 mapping = bh->b_assoc_map; 785 __remove_assoc_queue(bh); 786 /* Avoid race with mark_buffer_dirty_inode() which does 787 * a lockless check and we rely on seeing the dirty bit */ 788 smp_mb(); 789 if (buffer_dirty(bh) || buffer_locked(bh)) { 790 list_add(&bh->b_assoc_buffers, &tmp); 791 bh->b_assoc_map = mapping; 792 if (buffer_dirty(bh)) { 793 get_bh(bh); 794 spin_unlock(lock); 795 /* 796 * Ensure any pending I/O completes so that 797 * write_dirty_buffer() actually writes the 798 * current contents - it is a noop if I/O is 799 * still in flight on potentially older 800 * contents. 801 */ 802 write_dirty_buffer(bh, REQ_SYNC); 803 804 /* 805 * Kick off IO for the previous mapping. Note 806 * that we will not run the very last mapping, 807 * wait_on_buffer() will do that for us 808 * through sync_buffer(). 809 */ 810 brelse(bh); 811 spin_lock(lock); 812 } 813 } 814 } 815 816 spin_unlock(lock); 817 blk_finish_plug(&plug); 818 spin_lock(lock); 819 820 while (!list_empty(&tmp)) { 821 bh = BH_ENTRY(tmp.prev); 822 get_bh(bh); 823 mapping = bh->b_assoc_map; 824 __remove_assoc_queue(bh); 825 /* Avoid race with mark_buffer_dirty_inode() which does 826 * a lockless check and we rely on seeing the dirty bit */ 827 smp_mb(); 828 if (buffer_dirty(bh)) { 829 list_add(&bh->b_assoc_buffers, 830 &mapping->private_list); 831 bh->b_assoc_map = mapping; 832 } 833 spin_unlock(lock); 834 wait_on_buffer(bh); 835 if (!buffer_uptodate(bh)) 836 err = -EIO; 837 brelse(bh); 838 spin_lock(lock); 839 } 840 841 spin_unlock(lock); 842 err2 = osync_buffers_list(lock, list); 843 if (err) 844 return err; 845 else 846 return err2; 847 } 848 849 /* 850 * Invalidate any and all dirty buffers on a given inode. We are 851 * probably unmounting the fs, but that doesn't mean we have already 852 * done a sync(). Just drop the buffers from the inode list. 853 * 854 * NOTE: we take the inode's blockdev's mapping's private_lock. Which 855 * assumes that all the buffers are against the blockdev. Not true 856 * for reiserfs. 857 */ 858 void invalidate_inode_buffers(struct inode *inode) 859 { 860 if (inode_has_buffers(inode)) { 861 struct address_space *mapping = &inode->i_data; 862 struct list_head *list = &mapping->private_list; 863 struct address_space *buffer_mapping = mapping->private_data; 864 865 spin_lock(&buffer_mapping->private_lock); 866 while (!list_empty(list)) 867 __remove_assoc_queue(BH_ENTRY(list->next)); 868 spin_unlock(&buffer_mapping->private_lock); 869 } 870 } 871 EXPORT_SYMBOL(invalidate_inode_buffers); 872 873 /* 874 * Remove any clean buffers from the inode's buffer list. This is called 875 * when we're trying to free the inode itself. Those buffers can pin it. 876 * 877 * Returns true if all buffers were removed. 878 */ 879 int remove_inode_buffers(struct inode *inode) 880 { 881 int ret = 1; 882 883 if (inode_has_buffers(inode)) { 884 struct address_space *mapping = &inode->i_data; 885 struct list_head *list = &mapping->private_list; 886 struct address_space *buffer_mapping = mapping->private_data; 887 888 spin_lock(&buffer_mapping->private_lock); 889 while (!list_empty(list)) { 890 struct buffer_head *bh = BH_ENTRY(list->next); 891 if (buffer_dirty(bh)) { 892 ret = 0; 893 break; 894 } 895 __remove_assoc_queue(bh); 896 } 897 spin_unlock(&buffer_mapping->private_lock); 898 } 899 return ret; 900 } 901 902 /* 903 * Create the appropriate buffers when given a folio for data area and 904 * the size of each buffer.. Use the bh->b_this_page linked list to 905 * follow the buffers created. Return NULL if unable to create more 906 * buffers. 907 * 908 * The retry flag is used to differentiate async IO (paging, swapping) 909 * which may not fail from ordinary buffer allocations. 910 */ 911 struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size, 912 gfp_t gfp) 913 { 914 struct buffer_head *bh, *head; 915 long offset; 916 struct mem_cgroup *memcg, *old_memcg; 917 918 /* The folio lock pins the memcg */ 919 memcg = folio_memcg(folio); 920 old_memcg = set_active_memcg(memcg); 921 922 head = NULL; 923 offset = folio_size(folio); 924 while ((offset -= size) >= 0) { 925 bh = alloc_buffer_head(gfp); 926 if (!bh) 927 goto no_grow; 928 929 bh->b_this_page = head; 930 bh->b_blocknr = -1; 931 head = bh; 932 933 bh->b_size = size; 934 935 /* Link the buffer to its folio */ 936 folio_set_bh(bh, folio, offset); 937 } 938 out: 939 set_active_memcg(old_memcg); 940 return head; 941 /* 942 * In case anything failed, we just free everything we got. 943 */ 944 no_grow: 945 if (head) { 946 do { 947 bh = head; 948 head = head->b_this_page; 949 free_buffer_head(bh); 950 } while (head); 951 } 952 953 goto out; 954 } 955 EXPORT_SYMBOL_GPL(folio_alloc_buffers); 956 957 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size, 958 bool retry) 959 { 960 gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT; 961 if (retry) 962 gfp |= __GFP_NOFAIL; 963 964 return folio_alloc_buffers(page_folio(page), size, gfp); 965 } 966 EXPORT_SYMBOL_GPL(alloc_page_buffers); 967 968 static inline void link_dev_buffers(struct folio *folio, 969 struct buffer_head *head) 970 { 971 struct buffer_head *bh, *tail; 972 973 bh = head; 974 do { 975 tail = bh; 976 bh = bh->b_this_page; 977 } while (bh); 978 tail->b_this_page = head; 979 folio_attach_private(folio, head); 980 } 981 982 static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size) 983 { 984 sector_t retval = ~((sector_t)0); 985 loff_t sz = bdev_nr_bytes(bdev); 986 987 if (sz) { 988 unsigned int sizebits = blksize_bits(size); 989 retval = (sz >> sizebits); 990 } 991 return retval; 992 } 993 994 /* 995 * Initialise the state of a blockdev folio's buffers. 996 */ 997 static sector_t folio_init_buffers(struct folio *folio, 998 struct block_device *bdev, sector_t block, int size) 999 { 1000 struct buffer_head *head = folio_buffers(folio); 1001 struct buffer_head *bh = head; 1002 bool uptodate = folio_test_uptodate(folio); 1003 sector_t end_block = blkdev_max_block(bdev, size); 1004 1005 do { 1006 if (!buffer_mapped(bh)) { 1007 bh->b_end_io = NULL; 1008 bh->b_private = NULL; 1009 bh->b_bdev = bdev; 1010 bh->b_blocknr = block; 1011 if (uptodate) 1012 set_buffer_uptodate(bh); 1013 if (block < end_block) 1014 set_buffer_mapped(bh); 1015 } 1016 block++; 1017 bh = bh->b_this_page; 1018 } while (bh != head); 1019 1020 /* 1021 * Caller needs to validate requested block against end of device. 1022 */ 1023 return end_block; 1024 } 1025 1026 /* 1027 * Create the page-cache page that contains the requested block. 1028 * 1029 * This is used purely for blockdev mappings. 1030 */ 1031 static int 1032 grow_dev_page(struct block_device *bdev, sector_t block, 1033 pgoff_t index, int size, int sizebits, gfp_t gfp) 1034 { 1035 struct inode *inode = bdev->bd_inode; 1036 struct folio *folio; 1037 struct buffer_head *bh; 1038 sector_t end_block; 1039 int ret = 0; 1040 1041 folio = __filemap_get_folio(inode->i_mapping, index, 1042 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp); 1043 if (IS_ERR(folio)) 1044 return PTR_ERR(folio); 1045 1046 bh = folio_buffers(folio); 1047 if (bh) { 1048 if (bh->b_size == size) { 1049 end_block = folio_init_buffers(folio, bdev, 1050 (sector_t)index << sizebits, size); 1051 goto done; 1052 } 1053 if (!try_to_free_buffers(folio)) 1054 goto failed; 1055 } 1056 1057 ret = -ENOMEM; 1058 bh = folio_alloc_buffers(folio, size, gfp | __GFP_ACCOUNT); 1059 if (!bh) 1060 goto failed; 1061 1062 /* 1063 * Link the folio to the buffers and initialise them. Take the 1064 * lock to be atomic wrt __find_get_block(), which does not 1065 * run under the folio lock. 1066 */ 1067 spin_lock(&inode->i_mapping->private_lock); 1068 link_dev_buffers(folio, bh); 1069 end_block = folio_init_buffers(folio, bdev, 1070 (sector_t)index << sizebits, size); 1071 spin_unlock(&inode->i_mapping->private_lock); 1072 done: 1073 ret = (block < end_block) ? 1 : -ENXIO; 1074 failed: 1075 folio_unlock(folio); 1076 folio_put(folio); 1077 return ret; 1078 } 1079 1080 /* 1081 * Create buffers for the specified block device block's page. If 1082 * that page was dirty, the buffers are set dirty also. 1083 */ 1084 static int 1085 grow_buffers(struct block_device *bdev, sector_t block, int size, gfp_t gfp) 1086 { 1087 pgoff_t index; 1088 int sizebits; 1089 1090 sizebits = PAGE_SHIFT - __ffs(size); 1091 index = block >> sizebits; 1092 1093 /* 1094 * Check for a block which wants to lie outside our maximum possible 1095 * pagecache index. (this comparison is done using sector_t types). 1096 */ 1097 if (unlikely(index != block >> sizebits)) { 1098 printk(KERN_ERR "%s: requested out-of-range block %llu for " 1099 "device %pg\n", 1100 __func__, (unsigned long long)block, 1101 bdev); 1102 return -EIO; 1103 } 1104 1105 /* Create a page with the proper size buffers.. */ 1106 return grow_dev_page(bdev, block, index, size, sizebits, gfp); 1107 } 1108 1109 static struct buffer_head * 1110 __getblk_slow(struct block_device *bdev, sector_t block, 1111 unsigned size, gfp_t gfp) 1112 { 1113 /* Size must be multiple of hard sectorsize */ 1114 if (unlikely(size & (bdev_logical_block_size(bdev)-1) || 1115 (size < 512 || size > PAGE_SIZE))) { 1116 printk(KERN_ERR "getblk(): invalid block size %d requested\n", 1117 size); 1118 printk(KERN_ERR "logical block size: %d\n", 1119 bdev_logical_block_size(bdev)); 1120 1121 dump_stack(); 1122 return NULL; 1123 } 1124 1125 for (;;) { 1126 struct buffer_head *bh; 1127 int ret; 1128 1129 bh = __find_get_block(bdev, block, size); 1130 if (bh) 1131 return bh; 1132 1133 ret = grow_buffers(bdev, block, size, gfp); 1134 if (ret < 0) 1135 return NULL; 1136 } 1137 } 1138 1139 /* 1140 * The relationship between dirty buffers and dirty pages: 1141 * 1142 * Whenever a page has any dirty buffers, the page's dirty bit is set, and 1143 * the page is tagged dirty in the page cache. 1144 * 1145 * At all times, the dirtiness of the buffers represents the dirtiness of 1146 * subsections of the page. If the page has buffers, the page dirty bit is 1147 * merely a hint about the true dirty state. 1148 * 1149 * When a page is set dirty in its entirety, all its buffers are marked dirty 1150 * (if the page has buffers). 1151 * 1152 * When a buffer is marked dirty, its page is dirtied, but the page's other 1153 * buffers are not. 1154 * 1155 * Also. When blockdev buffers are explicitly read with bread(), they 1156 * individually become uptodate. But their backing page remains not 1157 * uptodate - even if all of its buffers are uptodate. A subsequent 1158 * block_read_full_folio() against that folio will discover all the uptodate 1159 * buffers, will set the folio uptodate and will perform no I/O. 1160 */ 1161 1162 /** 1163 * mark_buffer_dirty - mark a buffer_head as needing writeout 1164 * @bh: the buffer_head to mark dirty 1165 * 1166 * mark_buffer_dirty() will set the dirty bit against the buffer, then set 1167 * its backing page dirty, then tag the page as dirty in the page cache 1168 * and then attach the address_space's inode to its superblock's dirty 1169 * inode list. 1170 * 1171 * mark_buffer_dirty() is atomic. It takes bh->b_folio->mapping->private_lock, 1172 * i_pages lock and mapping->host->i_lock. 1173 */ 1174 void mark_buffer_dirty(struct buffer_head *bh) 1175 { 1176 WARN_ON_ONCE(!buffer_uptodate(bh)); 1177 1178 trace_block_dirty_buffer(bh); 1179 1180 /* 1181 * Very *carefully* optimize the it-is-already-dirty case. 1182 * 1183 * Don't let the final "is it dirty" escape to before we 1184 * perhaps modified the buffer. 1185 */ 1186 if (buffer_dirty(bh)) { 1187 smp_mb(); 1188 if (buffer_dirty(bh)) 1189 return; 1190 } 1191 1192 if (!test_set_buffer_dirty(bh)) { 1193 struct folio *folio = bh->b_folio; 1194 struct address_space *mapping = NULL; 1195 1196 folio_memcg_lock(folio); 1197 if (!folio_test_set_dirty(folio)) { 1198 mapping = folio->mapping; 1199 if (mapping) 1200 __folio_mark_dirty(folio, mapping, 0); 1201 } 1202 folio_memcg_unlock(folio); 1203 if (mapping) 1204 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 1205 } 1206 } 1207 EXPORT_SYMBOL(mark_buffer_dirty); 1208 1209 void mark_buffer_write_io_error(struct buffer_head *bh) 1210 { 1211 set_buffer_write_io_error(bh); 1212 /* FIXME: do we need to set this in both places? */ 1213 if (bh->b_folio && bh->b_folio->mapping) 1214 mapping_set_error(bh->b_folio->mapping, -EIO); 1215 if (bh->b_assoc_map) { 1216 mapping_set_error(bh->b_assoc_map, -EIO); 1217 errseq_set(&bh->b_assoc_map->host->i_sb->s_wb_err, -EIO); 1218 } 1219 } 1220 EXPORT_SYMBOL(mark_buffer_write_io_error); 1221 1222 /* 1223 * Decrement a buffer_head's reference count. If all buffers against a page 1224 * have zero reference count, are clean and unlocked, and if the page is clean 1225 * and unlocked then try_to_free_buffers() may strip the buffers from the page 1226 * in preparation for freeing it (sometimes, rarely, buffers are removed from 1227 * a page but it ends up not being freed, and buffers may later be reattached). 1228 */ 1229 void __brelse(struct buffer_head * buf) 1230 { 1231 if (atomic_read(&buf->b_count)) { 1232 put_bh(buf); 1233 return; 1234 } 1235 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n"); 1236 } 1237 EXPORT_SYMBOL(__brelse); 1238 1239 /* 1240 * bforget() is like brelse(), except it discards any 1241 * potentially dirty data. 1242 */ 1243 void __bforget(struct buffer_head *bh) 1244 { 1245 clear_buffer_dirty(bh); 1246 if (bh->b_assoc_map) { 1247 struct address_space *buffer_mapping = bh->b_folio->mapping; 1248 1249 spin_lock(&buffer_mapping->private_lock); 1250 list_del_init(&bh->b_assoc_buffers); 1251 bh->b_assoc_map = NULL; 1252 spin_unlock(&buffer_mapping->private_lock); 1253 } 1254 __brelse(bh); 1255 } 1256 EXPORT_SYMBOL(__bforget); 1257 1258 static struct buffer_head *__bread_slow(struct buffer_head *bh) 1259 { 1260 lock_buffer(bh); 1261 if (buffer_uptodate(bh)) { 1262 unlock_buffer(bh); 1263 return bh; 1264 } else { 1265 get_bh(bh); 1266 bh->b_end_io = end_buffer_read_sync; 1267 submit_bh(REQ_OP_READ, bh); 1268 wait_on_buffer(bh); 1269 if (buffer_uptodate(bh)) 1270 return bh; 1271 } 1272 brelse(bh); 1273 return NULL; 1274 } 1275 1276 /* 1277 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block(). 1278 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their 1279 * refcount elevated by one when they're in an LRU. A buffer can only appear 1280 * once in a particular CPU's LRU. A single buffer can be present in multiple 1281 * CPU's LRUs at the same time. 1282 * 1283 * This is a transparent caching front-end to sb_bread(), sb_getblk() and 1284 * sb_find_get_block(). 1285 * 1286 * The LRUs themselves only need locking against invalidate_bh_lrus. We use 1287 * a local interrupt disable for that. 1288 */ 1289 1290 #define BH_LRU_SIZE 16 1291 1292 struct bh_lru { 1293 struct buffer_head *bhs[BH_LRU_SIZE]; 1294 }; 1295 1296 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }}; 1297 1298 #ifdef CONFIG_SMP 1299 #define bh_lru_lock() local_irq_disable() 1300 #define bh_lru_unlock() local_irq_enable() 1301 #else 1302 #define bh_lru_lock() preempt_disable() 1303 #define bh_lru_unlock() preempt_enable() 1304 #endif 1305 1306 static inline void check_irqs_on(void) 1307 { 1308 #ifdef irqs_disabled 1309 BUG_ON(irqs_disabled()); 1310 #endif 1311 } 1312 1313 /* 1314 * Install a buffer_head into this cpu's LRU. If not already in the LRU, it is 1315 * inserted at the front, and the buffer_head at the back if any is evicted. 1316 * Or, if already in the LRU it is moved to the front. 1317 */ 1318 static void bh_lru_install(struct buffer_head *bh) 1319 { 1320 struct buffer_head *evictee = bh; 1321 struct bh_lru *b; 1322 int i; 1323 1324 check_irqs_on(); 1325 bh_lru_lock(); 1326 1327 /* 1328 * the refcount of buffer_head in bh_lru prevents dropping the 1329 * attached page(i.e., try_to_free_buffers) so it could cause 1330 * failing page migration. 1331 * Skip putting upcoming bh into bh_lru until migration is done. 1332 */ 1333 if (lru_cache_disabled() || cpu_is_isolated(smp_processor_id())) { 1334 bh_lru_unlock(); 1335 return; 1336 } 1337 1338 b = this_cpu_ptr(&bh_lrus); 1339 for (i = 0; i < BH_LRU_SIZE; i++) { 1340 swap(evictee, b->bhs[i]); 1341 if (evictee == bh) { 1342 bh_lru_unlock(); 1343 return; 1344 } 1345 } 1346 1347 get_bh(bh); 1348 bh_lru_unlock(); 1349 brelse(evictee); 1350 } 1351 1352 /* 1353 * Look up the bh in this cpu's LRU. If it's there, move it to the head. 1354 */ 1355 static struct buffer_head * 1356 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size) 1357 { 1358 struct buffer_head *ret = NULL; 1359 unsigned int i; 1360 1361 check_irqs_on(); 1362 bh_lru_lock(); 1363 if (cpu_is_isolated(smp_processor_id())) { 1364 bh_lru_unlock(); 1365 return NULL; 1366 } 1367 for (i = 0; i < BH_LRU_SIZE; i++) { 1368 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]); 1369 1370 if (bh && bh->b_blocknr == block && bh->b_bdev == bdev && 1371 bh->b_size == size) { 1372 if (i) { 1373 while (i) { 1374 __this_cpu_write(bh_lrus.bhs[i], 1375 __this_cpu_read(bh_lrus.bhs[i - 1])); 1376 i--; 1377 } 1378 __this_cpu_write(bh_lrus.bhs[0], bh); 1379 } 1380 get_bh(bh); 1381 ret = bh; 1382 break; 1383 } 1384 } 1385 bh_lru_unlock(); 1386 return ret; 1387 } 1388 1389 /* 1390 * Perform a pagecache lookup for the matching buffer. If it's there, refresh 1391 * it in the LRU and mark it as accessed. If it is not present then return 1392 * NULL 1393 */ 1394 struct buffer_head * 1395 __find_get_block(struct block_device *bdev, sector_t block, unsigned size) 1396 { 1397 struct buffer_head *bh = lookup_bh_lru(bdev, block, size); 1398 1399 if (bh == NULL) { 1400 /* __find_get_block_slow will mark the page accessed */ 1401 bh = __find_get_block_slow(bdev, block); 1402 if (bh) 1403 bh_lru_install(bh); 1404 } else 1405 touch_buffer(bh); 1406 1407 return bh; 1408 } 1409 EXPORT_SYMBOL(__find_get_block); 1410 1411 /** 1412 * bdev_getblk - Get a buffer_head in a block device's buffer cache. 1413 * @bdev: The block device. 1414 * @block: The block number. 1415 * @size: The size of buffer_heads for this @bdev. 1416 * @gfp: The memory allocation flags to use. 1417 * 1418 * Return: The buffer head, or NULL if memory could not be allocated. 1419 */ 1420 struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block, 1421 unsigned size, gfp_t gfp) 1422 { 1423 struct buffer_head *bh = __find_get_block(bdev, block, size); 1424 1425 might_alloc(gfp); 1426 if (bh) 1427 return bh; 1428 1429 return __getblk_slow(bdev, block, size, gfp); 1430 } 1431 EXPORT_SYMBOL(bdev_getblk); 1432 1433 /* 1434 * Do async read-ahead on a buffer.. 1435 */ 1436 void __breadahead(struct block_device *bdev, sector_t block, unsigned size) 1437 { 1438 struct buffer_head *bh = bdev_getblk(bdev, block, size, 1439 GFP_NOWAIT | __GFP_MOVABLE); 1440 1441 if (likely(bh)) { 1442 bh_readahead(bh, REQ_RAHEAD); 1443 brelse(bh); 1444 } 1445 } 1446 EXPORT_SYMBOL(__breadahead); 1447 1448 /** 1449 * __bread_gfp() - reads a specified block and returns the bh 1450 * @bdev: the block_device to read from 1451 * @block: number of block 1452 * @size: size (in bytes) to read 1453 * @gfp: page allocation flag 1454 * 1455 * Reads a specified block, and returns buffer head that contains it. 1456 * The page cache can be allocated from non-movable area 1457 * not to prevent page migration if you set gfp to zero. 1458 * It returns NULL if the block was unreadable. 1459 */ 1460 struct buffer_head * 1461 __bread_gfp(struct block_device *bdev, sector_t block, 1462 unsigned size, gfp_t gfp) 1463 { 1464 struct buffer_head *bh; 1465 1466 gfp |= mapping_gfp_constraint(bdev->bd_inode->i_mapping, ~__GFP_FS); 1467 1468 /* 1469 * Prefer looping in the allocator rather than here, at least that 1470 * code knows what it's doing. 1471 */ 1472 gfp |= __GFP_NOFAIL; 1473 1474 bh = bdev_getblk(bdev, block, size, gfp); 1475 1476 if (likely(bh) && !buffer_uptodate(bh)) 1477 bh = __bread_slow(bh); 1478 return bh; 1479 } 1480 EXPORT_SYMBOL(__bread_gfp); 1481 1482 static void __invalidate_bh_lrus(struct bh_lru *b) 1483 { 1484 int i; 1485 1486 for (i = 0; i < BH_LRU_SIZE; i++) { 1487 brelse(b->bhs[i]); 1488 b->bhs[i] = NULL; 1489 } 1490 } 1491 /* 1492 * invalidate_bh_lrus() is called rarely - but not only at unmount. 1493 * This doesn't race because it runs in each cpu either in irq 1494 * or with preempt disabled. 1495 */ 1496 static void invalidate_bh_lru(void *arg) 1497 { 1498 struct bh_lru *b = &get_cpu_var(bh_lrus); 1499 1500 __invalidate_bh_lrus(b); 1501 put_cpu_var(bh_lrus); 1502 } 1503 1504 bool has_bh_in_lru(int cpu, void *dummy) 1505 { 1506 struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu); 1507 int i; 1508 1509 for (i = 0; i < BH_LRU_SIZE; i++) { 1510 if (b->bhs[i]) 1511 return true; 1512 } 1513 1514 return false; 1515 } 1516 1517 void invalidate_bh_lrus(void) 1518 { 1519 on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1); 1520 } 1521 EXPORT_SYMBOL_GPL(invalidate_bh_lrus); 1522 1523 /* 1524 * It's called from workqueue context so we need a bh_lru_lock to close 1525 * the race with preemption/irq. 1526 */ 1527 void invalidate_bh_lrus_cpu(void) 1528 { 1529 struct bh_lru *b; 1530 1531 bh_lru_lock(); 1532 b = this_cpu_ptr(&bh_lrus); 1533 __invalidate_bh_lrus(b); 1534 bh_lru_unlock(); 1535 } 1536 1537 void folio_set_bh(struct buffer_head *bh, struct folio *folio, 1538 unsigned long offset) 1539 { 1540 bh->b_folio = folio; 1541 BUG_ON(offset >= folio_size(folio)); 1542 if (folio_test_highmem(folio)) 1543 /* 1544 * This catches illegal uses and preserves the offset: 1545 */ 1546 bh->b_data = (char *)(0 + offset); 1547 else 1548 bh->b_data = folio_address(folio) + offset; 1549 } 1550 EXPORT_SYMBOL(folio_set_bh); 1551 1552 /* 1553 * Called when truncating a buffer on a page completely. 1554 */ 1555 1556 /* Bits that are cleared during an invalidate */ 1557 #define BUFFER_FLAGS_DISCARD \ 1558 (1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \ 1559 1 << BH_Delay | 1 << BH_Unwritten) 1560 1561 static void discard_buffer(struct buffer_head * bh) 1562 { 1563 unsigned long b_state; 1564 1565 lock_buffer(bh); 1566 clear_buffer_dirty(bh); 1567 bh->b_bdev = NULL; 1568 b_state = READ_ONCE(bh->b_state); 1569 do { 1570 } while (!try_cmpxchg(&bh->b_state, &b_state, 1571 b_state & ~BUFFER_FLAGS_DISCARD)); 1572 unlock_buffer(bh); 1573 } 1574 1575 /** 1576 * block_invalidate_folio - Invalidate part or all of a buffer-backed folio. 1577 * @folio: The folio which is affected. 1578 * @offset: start of the range to invalidate 1579 * @length: length of the range to invalidate 1580 * 1581 * block_invalidate_folio() is called when all or part of the folio has been 1582 * invalidated by a truncate operation. 1583 * 1584 * block_invalidate_folio() does not have to release all buffers, but it must 1585 * ensure that no dirty buffer is left outside @offset and that no I/O 1586 * is underway against any of the blocks which are outside the truncation 1587 * point. Because the caller is about to free (and possibly reuse) those 1588 * blocks on-disk. 1589 */ 1590 void block_invalidate_folio(struct folio *folio, size_t offset, size_t length) 1591 { 1592 struct buffer_head *head, *bh, *next; 1593 size_t curr_off = 0; 1594 size_t stop = length + offset; 1595 1596 BUG_ON(!folio_test_locked(folio)); 1597 1598 /* 1599 * Check for overflow 1600 */ 1601 BUG_ON(stop > folio_size(folio) || stop < length); 1602 1603 head = folio_buffers(folio); 1604 if (!head) 1605 return; 1606 1607 bh = head; 1608 do { 1609 size_t next_off = curr_off + bh->b_size; 1610 next = bh->b_this_page; 1611 1612 /* 1613 * Are we still fully in range ? 1614 */ 1615 if (next_off > stop) 1616 goto out; 1617 1618 /* 1619 * is this block fully invalidated? 1620 */ 1621 if (offset <= curr_off) 1622 discard_buffer(bh); 1623 curr_off = next_off; 1624 bh = next; 1625 } while (bh != head); 1626 1627 /* 1628 * We release buffers only if the entire folio is being invalidated. 1629 * The get_block cached value has been unconditionally invalidated, 1630 * so real IO is not possible anymore. 1631 */ 1632 if (length == folio_size(folio)) 1633 filemap_release_folio(folio, 0); 1634 out: 1635 return; 1636 } 1637 EXPORT_SYMBOL(block_invalidate_folio); 1638 1639 /* 1640 * We attach and possibly dirty the buffers atomically wrt 1641 * block_dirty_folio() via private_lock. try_to_free_buffers 1642 * is already excluded via the folio lock. 1643 */ 1644 void folio_create_empty_buffers(struct folio *folio, unsigned long blocksize, 1645 unsigned long b_state) 1646 { 1647 struct buffer_head *bh, *head, *tail; 1648 gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT | __GFP_NOFAIL; 1649 1650 head = folio_alloc_buffers(folio, blocksize, gfp); 1651 bh = head; 1652 do { 1653 bh->b_state |= b_state; 1654 tail = bh; 1655 bh = bh->b_this_page; 1656 } while (bh); 1657 tail->b_this_page = head; 1658 1659 spin_lock(&folio->mapping->private_lock); 1660 if (folio_test_uptodate(folio) || folio_test_dirty(folio)) { 1661 bh = head; 1662 do { 1663 if (folio_test_dirty(folio)) 1664 set_buffer_dirty(bh); 1665 if (folio_test_uptodate(folio)) 1666 set_buffer_uptodate(bh); 1667 bh = bh->b_this_page; 1668 } while (bh != head); 1669 } 1670 folio_attach_private(folio, head); 1671 spin_unlock(&folio->mapping->private_lock); 1672 } 1673 EXPORT_SYMBOL(folio_create_empty_buffers); 1674 1675 void create_empty_buffers(struct page *page, 1676 unsigned long blocksize, unsigned long b_state) 1677 { 1678 folio_create_empty_buffers(page_folio(page), blocksize, b_state); 1679 } 1680 EXPORT_SYMBOL(create_empty_buffers); 1681 1682 /** 1683 * clean_bdev_aliases: clean a range of buffers in block device 1684 * @bdev: Block device to clean buffers in 1685 * @block: Start of a range of blocks to clean 1686 * @len: Number of blocks to clean 1687 * 1688 * We are taking a range of blocks for data and we don't want writeback of any 1689 * buffer-cache aliases starting from return from this function and until the 1690 * moment when something will explicitly mark the buffer dirty (hopefully that 1691 * will not happen until we will free that block ;-) We don't even need to mark 1692 * it not-uptodate - nobody can expect anything from a newly allocated buffer 1693 * anyway. We used to use unmap_buffer() for such invalidation, but that was 1694 * wrong. We definitely don't want to mark the alias unmapped, for example - it 1695 * would confuse anyone who might pick it with bread() afterwards... 1696 * 1697 * Also.. Note that bforget() doesn't lock the buffer. So there can be 1698 * writeout I/O going on against recently-freed buffers. We don't wait on that 1699 * I/O in bforget() - it's more efficient to wait on the I/O only if we really 1700 * need to. That happens here. 1701 */ 1702 void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len) 1703 { 1704 struct inode *bd_inode = bdev->bd_inode; 1705 struct address_space *bd_mapping = bd_inode->i_mapping; 1706 struct folio_batch fbatch; 1707 pgoff_t index = block >> (PAGE_SHIFT - bd_inode->i_blkbits); 1708 pgoff_t end; 1709 int i, count; 1710 struct buffer_head *bh; 1711 struct buffer_head *head; 1712 1713 end = (block + len - 1) >> (PAGE_SHIFT - bd_inode->i_blkbits); 1714 folio_batch_init(&fbatch); 1715 while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) { 1716 count = folio_batch_count(&fbatch); 1717 for (i = 0; i < count; i++) { 1718 struct folio *folio = fbatch.folios[i]; 1719 1720 if (!folio_buffers(folio)) 1721 continue; 1722 /* 1723 * We use folio lock instead of bd_mapping->private_lock 1724 * to pin buffers here since we can afford to sleep and 1725 * it scales better than a global spinlock lock. 1726 */ 1727 folio_lock(folio); 1728 /* Recheck when the folio is locked which pins bhs */ 1729 head = folio_buffers(folio); 1730 if (!head) 1731 goto unlock_page; 1732 bh = head; 1733 do { 1734 if (!buffer_mapped(bh) || (bh->b_blocknr < block)) 1735 goto next; 1736 if (bh->b_blocknr >= block + len) 1737 break; 1738 clear_buffer_dirty(bh); 1739 wait_on_buffer(bh); 1740 clear_buffer_req(bh); 1741 next: 1742 bh = bh->b_this_page; 1743 } while (bh != head); 1744 unlock_page: 1745 folio_unlock(folio); 1746 } 1747 folio_batch_release(&fbatch); 1748 cond_resched(); 1749 /* End of range already reached? */ 1750 if (index > end || !index) 1751 break; 1752 } 1753 } 1754 EXPORT_SYMBOL(clean_bdev_aliases); 1755 1756 /* 1757 * Size is a power-of-two in the range 512..PAGE_SIZE, 1758 * and the case we care about most is PAGE_SIZE. 1759 * 1760 * So this *could* possibly be written with those 1761 * constraints in mind (relevant mostly if some 1762 * architecture has a slow bit-scan instruction) 1763 */ 1764 static inline int block_size_bits(unsigned int blocksize) 1765 { 1766 return ilog2(blocksize); 1767 } 1768 1769 static struct buffer_head *folio_create_buffers(struct folio *folio, 1770 struct inode *inode, 1771 unsigned int b_state) 1772 { 1773 BUG_ON(!folio_test_locked(folio)); 1774 1775 if (!folio_buffers(folio)) 1776 folio_create_empty_buffers(folio, 1777 1 << READ_ONCE(inode->i_blkbits), 1778 b_state); 1779 return folio_buffers(folio); 1780 } 1781 1782 /* 1783 * NOTE! All mapped/uptodate combinations are valid: 1784 * 1785 * Mapped Uptodate Meaning 1786 * 1787 * No No "unknown" - must do get_block() 1788 * No Yes "hole" - zero-filled 1789 * Yes No "allocated" - allocated on disk, not read in 1790 * Yes Yes "valid" - allocated and up-to-date in memory. 1791 * 1792 * "Dirty" is valid only with the last case (mapped+uptodate). 1793 */ 1794 1795 /* 1796 * While block_write_full_page is writing back the dirty buffers under 1797 * the page lock, whoever dirtied the buffers may decide to clean them 1798 * again at any time. We handle that by only looking at the buffer 1799 * state inside lock_buffer(). 1800 * 1801 * If block_write_full_page() is called for regular writeback 1802 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a 1803 * locked buffer. This only can happen if someone has written the buffer 1804 * directly, with submit_bh(). At the address_space level PageWriteback 1805 * prevents this contention from occurring. 1806 * 1807 * If block_write_full_page() is called with wbc->sync_mode == 1808 * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this 1809 * causes the writes to be flagged as synchronous writes. 1810 */ 1811 int __block_write_full_folio(struct inode *inode, struct folio *folio, 1812 get_block_t *get_block, struct writeback_control *wbc, 1813 bh_end_io_t *handler) 1814 { 1815 int err; 1816 sector_t block; 1817 sector_t last_block; 1818 struct buffer_head *bh, *head; 1819 unsigned int blocksize, bbits; 1820 int nr_underway = 0; 1821 blk_opf_t write_flags = wbc_to_write_flags(wbc); 1822 1823 head = folio_create_buffers(folio, inode, 1824 (1 << BH_Dirty) | (1 << BH_Uptodate)); 1825 1826 /* 1827 * Be very careful. We have no exclusion from block_dirty_folio 1828 * here, and the (potentially unmapped) buffers may become dirty at 1829 * any time. If a buffer becomes dirty here after we've inspected it 1830 * then we just miss that fact, and the folio stays dirty. 1831 * 1832 * Buffers outside i_size may be dirtied by block_dirty_folio; 1833 * handle that here by just cleaning them. 1834 */ 1835 1836 bh = head; 1837 blocksize = bh->b_size; 1838 bbits = block_size_bits(blocksize); 1839 1840 block = (sector_t)folio->index << (PAGE_SHIFT - bbits); 1841 last_block = (i_size_read(inode) - 1) >> bbits; 1842 1843 /* 1844 * Get all the dirty buffers mapped to disk addresses and 1845 * handle any aliases from the underlying blockdev's mapping. 1846 */ 1847 do { 1848 if (block > last_block) { 1849 /* 1850 * mapped buffers outside i_size will occur, because 1851 * this folio can be outside i_size when there is a 1852 * truncate in progress. 1853 */ 1854 /* 1855 * The buffer was zeroed by block_write_full_page() 1856 */ 1857 clear_buffer_dirty(bh); 1858 set_buffer_uptodate(bh); 1859 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) && 1860 buffer_dirty(bh)) { 1861 WARN_ON(bh->b_size != blocksize); 1862 err = get_block(inode, block, bh, 1); 1863 if (err) 1864 goto recover; 1865 clear_buffer_delay(bh); 1866 if (buffer_new(bh)) { 1867 /* blockdev mappings never come here */ 1868 clear_buffer_new(bh); 1869 clean_bdev_bh_alias(bh); 1870 } 1871 } 1872 bh = bh->b_this_page; 1873 block++; 1874 } while (bh != head); 1875 1876 do { 1877 if (!buffer_mapped(bh)) 1878 continue; 1879 /* 1880 * If it's a fully non-blocking write attempt and we cannot 1881 * lock the buffer then redirty the folio. Note that this can 1882 * potentially cause a busy-wait loop from writeback threads 1883 * and kswapd activity, but those code paths have their own 1884 * higher-level throttling. 1885 */ 1886 if (wbc->sync_mode != WB_SYNC_NONE) { 1887 lock_buffer(bh); 1888 } else if (!trylock_buffer(bh)) { 1889 folio_redirty_for_writepage(wbc, folio); 1890 continue; 1891 } 1892 if (test_clear_buffer_dirty(bh)) { 1893 mark_buffer_async_write_endio(bh, handler); 1894 } else { 1895 unlock_buffer(bh); 1896 } 1897 } while ((bh = bh->b_this_page) != head); 1898 1899 /* 1900 * The folio and its buffers are protected by the writeback flag, 1901 * so we can drop the bh refcounts early. 1902 */ 1903 BUG_ON(folio_test_writeback(folio)); 1904 folio_start_writeback(folio); 1905 1906 do { 1907 struct buffer_head *next = bh->b_this_page; 1908 if (buffer_async_write(bh)) { 1909 submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, wbc); 1910 nr_underway++; 1911 } 1912 bh = next; 1913 } while (bh != head); 1914 folio_unlock(folio); 1915 1916 err = 0; 1917 done: 1918 if (nr_underway == 0) { 1919 /* 1920 * The folio was marked dirty, but the buffers were 1921 * clean. Someone wrote them back by hand with 1922 * write_dirty_buffer/submit_bh. A rare case. 1923 */ 1924 folio_end_writeback(folio); 1925 1926 /* 1927 * The folio and buffer_heads can be released at any time from 1928 * here on. 1929 */ 1930 } 1931 return err; 1932 1933 recover: 1934 /* 1935 * ENOSPC, or some other error. We may already have added some 1936 * blocks to the file, so we need to write these out to avoid 1937 * exposing stale data. 1938 * The folio is currently locked and not marked for writeback 1939 */ 1940 bh = head; 1941 /* Recovery: lock and submit the mapped buffers */ 1942 do { 1943 if (buffer_mapped(bh) && buffer_dirty(bh) && 1944 !buffer_delay(bh)) { 1945 lock_buffer(bh); 1946 mark_buffer_async_write_endio(bh, handler); 1947 } else { 1948 /* 1949 * The buffer may have been set dirty during 1950 * attachment to a dirty folio. 1951 */ 1952 clear_buffer_dirty(bh); 1953 } 1954 } while ((bh = bh->b_this_page) != head); 1955 folio_set_error(folio); 1956 BUG_ON(folio_test_writeback(folio)); 1957 mapping_set_error(folio->mapping, err); 1958 folio_start_writeback(folio); 1959 do { 1960 struct buffer_head *next = bh->b_this_page; 1961 if (buffer_async_write(bh)) { 1962 clear_buffer_dirty(bh); 1963 submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, wbc); 1964 nr_underway++; 1965 } 1966 bh = next; 1967 } while (bh != head); 1968 folio_unlock(folio); 1969 goto done; 1970 } 1971 EXPORT_SYMBOL(__block_write_full_folio); 1972 1973 /* 1974 * If a folio has any new buffers, zero them out here, and mark them uptodate 1975 * and dirty so they'll be written out (in order to prevent uninitialised 1976 * block data from leaking). And clear the new bit. 1977 */ 1978 void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to) 1979 { 1980 size_t block_start, block_end; 1981 struct buffer_head *head, *bh; 1982 1983 BUG_ON(!folio_test_locked(folio)); 1984 head = folio_buffers(folio); 1985 if (!head) 1986 return; 1987 1988 bh = head; 1989 block_start = 0; 1990 do { 1991 block_end = block_start + bh->b_size; 1992 1993 if (buffer_new(bh)) { 1994 if (block_end > from && block_start < to) { 1995 if (!folio_test_uptodate(folio)) { 1996 size_t start, xend; 1997 1998 start = max(from, block_start); 1999 xend = min(to, block_end); 2000 2001 folio_zero_segment(folio, start, xend); 2002 set_buffer_uptodate(bh); 2003 } 2004 2005 clear_buffer_new(bh); 2006 mark_buffer_dirty(bh); 2007 } 2008 } 2009 2010 block_start = block_end; 2011 bh = bh->b_this_page; 2012 } while (bh != head); 2013 } 2014 EXPORT_SYMBOL(folio_zero_new_buffers); 2015 2016 static int 2017 iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh, 2018 const struct iomap *iomap) 2019 { 2020 loff_t offset = block << inode->i_blkbits; 2021 2022 bh->b_bdev = iomap->bdev; 2023 2024 /* 2025 * Block points to offset in file we need to map, iomap contains 2026 * the offset at which the map starts. If the map ends before the 2027 * current block, then do not map the buffer and let the caller 2028 * handle it. 2029 */ 2030 if (offset >= iomap->offset + iomap->length) 2031 return -EIO; 2032 2033 switch (iomap->type) { 2034 case IOMAP_HOLE: 2035 /* 2036 * If the buffer is not up to date or beyond the current EOF, 2037 * we need to mark it as new to ensure sub-block zeroing is 2038 * executed if necessary. 2039 */ 2040 if (!buffer_uptodate(bh) || 2041 (offset >= i_size_read(inode))) 2042 set_buffer_new(bh); 2043 return 0; 2044 case IOMAP_DELALLOC: 2045 if (!buffer_uptodate(bh) || 2046 (offset >= i_size_read(inode))) 2047 set_buffer_new(bh); 2048 set_buffer_uptodate(bh); 2049 set_buffer_mapped(bh); 2050 set_buffer_delay(bh); 2051 return 0; 2052 case IOMAP_UNWRITTEN: 2053 /* 2054 * For unwritten regions, we always need to ensure that regions 2055 * in the block we are not writing to are zeroed. Mark the 2056 * buffer as new to ensure this. 2057 */ 2058 set_buffer_new(bh); 2059 set_buffer_unwritten(bh); 2060 fallthrough; 2061 case IOMAP_MAPPED: 2062 if ((iomap->flags & IOMAP_F_NEW) || 2063 offset >= i_size_read(inode)) { 2064 /* 2065 * This can happen if truncating the block device races 2066 * with the check in the caller as i_size updates on 2067 * block devices aren't synchronized by i_rwsem for 2068 * block devices. 2069 */ 2070 if (S_ISBLK(inode->i_mode)) 2071 return -EIO; 2072 set_buffer_new(bh); 2073 } 2074 bh->b_blocknr = (iomap->addr + offset - iomap->offset) >> 2075 inode->i_blkbits; 2076 set_buffer_mapped(bh); 2077 return 0; 2078 default: 2079 WARN_ON_ONCE(1); 2080 return -EIO; 2081 } 2082 } 2083 2084 int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len, 2085 get_block_t *get_block, const struct iomap *iomap) 2086 { 2087 unsigned from = pos & (PAGE_SIZE - 1); 2088 unsigned to = from + len; 2089 struct inode *inode = folio->mapping->host; 2090 unsigned block_start, block_end; 2091 sector_t block; 2092 int err = 0; 2093 unsigned blocksize, bbits; 2094 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait; 2095 2096 BUG_ON(!folio_test_locked(folio)); 2097 BUG_ON(from > PAGE_SIZE); 2098 BUG_ON(to > PAGE_SIZE); 2099 BUG_ON(from > to); 2100 2101 head = folio_create_buffers(folio, inode, 0); 2102 blocksize = head->b_size; 2103 bbits = block_size_bits(blocksize); 2104 2105 block = (sector_t)folio->index << (PAGE_SHIFT - bbits); 2106 2107 for(bh = head, block_start = 0; bh != head || !block_start; 2108 block++, block_start=block_end, bh = bh->b_this_page) { 2109 block_end = block_start + blocksize; 2110 if (block_end <= from || block_start >= to) { 2111 if (folio_test_uptodate(folio)) { 2112 if (!buffer_uptodate(bh)) 2113 set_buffer_uptodate(bh); 2114 } 2115 continue; 2116 } 2117 if (buffer_new(bh)) 2118 clear_buffer_new(bh); 2119 if (!buffer_mapped(bh)) { 2120 WARN_ON(bh->b_size != blocksize); 2121 if (get_block) 2122 err = get_block(inode, block, bh, 1); 2123 else 2124 err = iomap_to_bh(inode, block, bh, iomap); 2125 if (err) 2126 break; 2127 2128 if (buffer_new(bh)) { 2129 clean_bdev_bh_alias(bh); 2130 if (folio_test_uptodate(folio)) { 2131 clear_buffer_new(bh); 2132 set_buffer_uptodate(bh); 2133 mark_buffer_dirty(bh); 2134 continue; 2135 } 2136 if (block_end > to || block_start < from) 2137 folio_zero_segments(folio, 2138 to, block_end, 2139 block_start, from); 2140 continue; 2141 } 2142 } 2143 if (folio_test_uptodate(folio)) { 2144 if (!buffer_uptodate(bh)) 2145 set_buffer_uptodate(bh); 2146 continue; 2147 } 2148 if (!buffer_uptodate(bh) && !buffer_delay(bh) && 2149 !buffer_unwritten(bh) && 2150 (block_start < from || block_end > to)) { 2151 bh_read_nowait(bh, 0); 2152 *wait_bh++=bh; 2153 } 2154 } 2155 /* 2156 * If we issued read requests - let them complete. 2157 */ 2158 while(wait_bh > wait) { 2159 wait_on_buffer(*--wait_bh); 2160 if (!buffer_uptodate(*wait_bh)) 2161 err = -EIO; 2162 } 2163 if (unlikely(err)) 2164 folio_zero_new_buffers(folio, from, to); 2165 return err; 2166 } 2167 2168 int __block_write_begin(struct page *page, loff_t pos, unsigned len, 2169 get_block_t *get_block) 2170 { 2171 return __block_write_begin_int(page_folio(page), pos, len, get_block, 2172 NULL); 2173 } 2174 EXPORT_SYMBOL(__block_write_begin); 2175 2176 static void __block_commit_write(struct folio *folio, size_t from, size_t to) 2177 { 2178 size_t block_start, block_end; 2179 bool partial = false; 2180 unsigned blocksize; 2181 struct buffer_head *bh, *head; 2182 2183 bh = head = folio_buffers(folio); 2184 blocksize = bh->b_size; 2185 2186 block_start = 0; 2187 do { 2188 block_end = block_start + blocksize; 2189 if (block_end <= from || block_start >= to) { 2190 if (!buffer_uptodate(bh)) 2191 partial = true; 2192 } else { 2193 set_buffer_uptodate(bh); 2194 mark_buffer_dirty(bh); 2195 } 2196 if (buffer_new(bh)) 2197 clear_buffer_new(bh); 2198 2199 block_start = block_end; 2200 bh = bh->b_this_page; 2201 } while (bh != head); 2202 2203 /* 2204 * If this is a partial write which happened to make all buffers 2205 * uptodate then we can optimize away a bogus read_folio() for 2206 * the next read(). Here we 'discover' whether the folio went 2207 * uptodate as a result of this (potentially partial) write. 2208 */ 2209 if (!partial) 2210 folio_mark_uptodate(folio); 2211 } 2212 2213 /* 2214 * block_write_begin takes care of the basic task of block allocation and 2215 * bringing partial write blocks uptodate first. 2216 * 2217 * The filesystem needs to handle block truncation upon failure. 2218 */ 2219 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, 2220 struct page **pagep, get_block_t *get_block) 2221 { 2222 pgoff_t index = pos >> PAGE_SHIFT; 2223 struct page *page; 2224 int status; 2225 2226 page = grab_cache_page_write_begin(mapping, index); 2227 if (!page) 2228 return -ENOMEM; 2229 2230 status = __block_write_begin(page, pos, len, get_block); 2231 if (unlikely(status)) { 2232 unlock_page(page); 2233 put_page(page); 2234 page = NULL; 2235 } 2236 2237 *pagep = page; 2238 return status; 2239 } 2240 EXPORT_SYMBOL(block_write_begin); 2241 2242 int block_write_end(struct file *file, struct address_space *mapping, 2243 loff_t pos, unsigned len, unsigned copied, 2244 struct page *page, void *fsdata) 2245 { 2246 struct folio *folio = page_folio(page); 2247 size_t start = pos - folio_pos(folio); 2248 2249 if (unlikely(copied < len)) { 2250 /* 2251 * The buffers that were written will now be uptodate, so 2252 * we don't have to worry about a read_folio reading them 2253 * and overwriting a partial write. However if we have 2254 * encountered a short write and only partially written 2255 * into a buffer, it will not be marked uptodate, so a 2256 * read_folio might come in and destroy our partial write. 2257 * 2258 * Do the simplest thing, and just treat any short write to a 2259 * non uptodate folio as a zero-length write, and force the 2260 * caller to redo the whole thing. 2261 */ 2262 if (!folio_test_uptodate(folio)) 2263 copied = 0; 2264 2265 folio_zero_new_buffers(folio, start+copied, start+len); 2266 } 2267 flush_dcache_folio(folio); 2268 2269 /* This could be a short (even 0-length) commit */ 2270 __block_commit_write(folio, start, start + copied); 2271 2272 return copied; 2273 } 2274 EXPORT_SYMBOL(block_write_end); 2275 2276 int generic_write_end(struct file *file, struct address_space *mapping, 2277 loff_t pos, unsigned len, unsigned copied, 2278 struct page *page, void *fsdata) 2279 { 2280 struct inode *inode = mapping->host; 2281 loff_t old_size = inode->i_size; 2282 bool i_size_changed = false; 2283 2284 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); 2285 2286 /* 2287 * No need to use i_size_read() here, the i_size cannot change under us 2288 * because we hold i_rwsem. 2289 * 2290 * But it's important to update i_size while still holding page lock: 2291 * page writeout could otherwise come in and zero beyond i_size. 2292 */ 2293 if (pos + copied > inode->i_size) { 2294 i_size_write(inode, pos + copied); 2295 i_size_changed = true; 2296 } 2297 2298 unlock_page(page); 2299 put_page(page); 2300 2301 if (old_size < pos) 2302 pagecache_isize_extended(inode, old_size, pos); 2303 /* 2304 * Don't mark the inode dirty under page lock. First, it unnecessarily 2305 * makes the holding time of page lock longer. Second, it forces lock 2306 * ordering of page lock and transaction start for journaling 2307 * filesystems. 2308 */ 2309 if (i_size_changed) 2310 mark_inode_dirty(inode); 2311 return copied; 2312 } 2313 EXPORT_SYMBOL(generic_write_end); 2314 2315 /* 2316 * block_is_partially_uptodate checks whether buffers within a folio are 2317 * uptodate or not. 2318 * 2319 * Returns true if all buffers which correspond to the specified part 2320 * of the folio are uptodate. 2321 */ 2322 bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count) 2323 { 2324 unsigned block_start, block_end, blocksize; 2325 unsigned to; 2326 struct buffer_head *bh, *head; 2327 bool ret = true; 2328 2329 head = folio_buffers(folio); 2330 if (!head) 2331 return false; 2332 blocksize = head->b_size; 2333 to = min_t(unsigned, folio_size(folio) - from, count); 2334 to = from + to; 2335 if (from < blocksize && to > folio_size(folio) - blocksize) 2336 return false; 2337 2338 bh = head; 2339 block_start = 0; 2340 do { 2341 block_end = block_start + blocksize; 2342 if (block_end > from && block_start < to) { 2343 if (!buffer_uptodate(bh)) { 2344 ret = false; 2345 break; 2346 } 2347 if (block_end >= to) 2348 break; 2349 } 2350 block_start = block_end; 2351 bh = bh->b_this_page; 2352 } while (bh != head); 2353 2354 return ret; 2355 } 2356 EXPORT_SYMBOL(block_is_partially_uptodate); 2357 2358 /* 2359 * Generic "read_folio" function for block devices that have the normal 2360 * get_block functionality. This is most of the block device filesystems. 2361 * Reads the folio asynchronously --- the unlock_buffer() and 2362 * set/clear_buffer_uptodate() functions propagate buffer state into the 2363 * folio once IO has completed. 2364 */ 2365 int block_read_full_folio(struct folio *folio, get_block_t *get_block) 2366 { 2367 struct inode *inode = folio->mapping->host; 2368 sector_t iblock, lblock; 2369 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE]; 2370 unsigned int blocksize, bbits; 2371 int nr, i; 2372 int fully_mapped = 1; 2373 bool page_error = false; 2374 loff_t limit = i_size_read(inode); 2375 2376 /* This is needed for ext4. */ 2377 if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode)) 2378 limit = inode->i_sb->s_maxbytes; 2379 2380 VM_BUG_ON_FOLIO(folio_test_large(folio), folio); 2381 2382 head = folio_create_buffers(folio, inode, 0); 2383 blocksize = head->b_size; 2384 bbits = block_size_bits(blocksize); 2385 2386 iblock = (sector_t)folio->index << (PAGE_SHIFT - bbits); 2387 lblock = (limit+blocksize-1) >> bbits; 2388 bh = head; 2389 nr = 0; 2390 i = 0; 2391 2392 do { 2393 if (buffer_uptodate(bh)) 2394 continue; 2395 2396 if (!buffer_mapped(bh)) { 2397 int err = 0; 2398 2399 fully_mapped = 0; 2400 if (iblock < lblock) { 2401 WARN_ON(bh->b_size != blocksize); 2402 err = get_block(inode, iblock, bh, 0); 2403 if (err) { 2404 folio_set_error(folio); 2405 page_error = true; 2406 } 2407 } 2408 if (!buffer_mapped(bh)) { 2409 folio_zero_range(folio, i * blocksize, 2410 blocksize); 2411 if (!err) 2412 set_buffer_uptodate(bh); 2413 continue; 2414 } 2415 /* 2416 * get_block() might have updated the buffer 2417 * synchronously 2418 */ 2419 if (buffer_uptodate(bh)) 2420 continue; 2421 } 2422 arr[nr++] = bh; 2423 } while (i++, iblock++, (bh = bh->b_this_page) != head); 2424 2425 if (fully_mapped) 2426 folio_set_mappedtodisk(folio); 2427 2428 if (!nr) { 2429 /* 2430 * All buffers are uptodate or get_block() returned an 2431 * error when trying to map them - we can finish the read. 2432 */ 2433 folio_end_read(folio, !page_error); 2434 return 0; 2435 } 2436 2437 /* Stage two: lock the buffers */ 2438 for (i = 0; i < nr; i++) { 2439 bh = arr[i]; 2440 lock_buffer(bh); 2441 mark_buffer_async_read(bh); 2442 } 2443 2444 /* 2445 * Stage 3: start the IO. Check for uptodateness 2446 * inside the buffer lock in case another process reading 2447 * the underlying blockdev brought it uptodate (the sct fix). 2448 */ 2449 for (i = 0; i < nr; i++) { 2450 bh = arr[i]; 2451 if (buffer_uptodate(bh)) 2452 end_buffer_async_read(bh, 1); 2453 else 2454 submit_bh(REQ_OP_READ, bh); 2455 } 2456 return 0; 2457 } 2458 EXPORT_SYMBOL(block_read_full_folio); 2459 2460 /* utility function for filesystems that need to do work on expanding 2461 * truncates. Uses filesystem pagecache writes to allow the filesystem to 2462 * deal with the hole. 2463 */ 2464 int generic_cont_expand_simple(struct inode *inode, loff_t size) 2465 { 2466 struct address_space *mapping = inode->i_mapping; 2467 const struct address_space_operations *aops = mapping->a_ops; 2468 struct page *page; 2469 void *fsdata = NULL; 2470 int err; 2471 2472 err = inode_newsize_ok(inode, size); 2473 if (err) 2474 goto out; 2475 2476 err = aops->write_begin(NULL, mapping, size, 0, &page, &fsdata); 2477 if (err) 2478 goto out; 2479 2480 err = aops->write_end(NULL, mapping, size, 0, 0, page, fsdata); 2481 BUG_ON(err > 0); 2482 2483 out: 2484 return err; 2485 } 2486 EXPORT_SYMBOL(generic_cont_expand_simple); 2487 2488 static int cont_expand_zero(struct file *file, struct address_space *mapping, 2489 loff_t pos, loff_t *bytes) 2490 { 2491 struct inode *inode = mapping->host; 2492 const struct address_space_operations *aops = mapping->a_ops; 2493 unsigned int blocksize = i_blocksize(inode); 2494 struct page *page; 2495 void *fsdata = NULL; 2496 pgoff_t index, curidx; 2497 loff_t curpos; 2498 unsigned zerofrom, offset, len; 2499 int err = 0; 2500 2501 index = pos >> PAGE_SHIFT; 2502 offset = pos & ~PAGE_MASK; 2503 2504 while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) { 2505 zerofrom = curpos & ~PAGE_MASK; 2506 if (zerofrom & (blocksize-1)) { 2507 *bytes |= (blocksize-1); 2508 (*bytes)++; 2509 } 2510 len = PAGE_SIZE - zerofrom; 2511 2512 err = aops->write_begin(file, mapping, curpos, len, 2513 &page, &fsdata); 2514 if (err) 2515 goto out; 2516 zero_user(page, zerofrom, len); 2517 err = aops->write_end(file, mapping, curpos, len, len, 2518 page, fsdata); 2519 if (err < 0) 2520 goto out; 2521 BUG_ON(err != len); 2522 err = 0; 2523 2524 balance_dirty_pages_ratelimited(mapping); 2525 2526 if (fatal_signal_pending(current)) { 2527 err = -EINTR; 2528 goto out; 2529 } 2530 } 2531 2532 /* page covers the boundary, find the boundary offset */ 2533 if (index == curidx) { 2534 zerofrom = curpos & ~PAGE_MASK; 2535 /* if we will expand the thing last block will be filled */ 2536 if (offset <= zerofrom) { 2537 goto out; 2538 } 2539 if (zerofrom & (blocksize-1)) { 2540 *bytes |= (blocksize-1); 2541 (*bytes)++; 2542 } 2543 len = offset - zerofrom; 2544 2545 err = aops->write_begin(file, mapping, curpos, len, 2546 &page, &fsdata); 2547 if (err) 2548 goto out; 2549 zero_user(page, zerofrom, len); 2550 err = aops->write_end(file, mapping, curpos, len, len, 2551 page, fsdata); 2552 if (err < 0) 2553 goto out; 2554 BUG_ON(err != len); 2555 err = 0; 2556 } 2557 out: 2558 return err; 2559 } 2560 2561 /* 2562 * For moronic filesystems that do not allow holes in file. 2563 * We may have to extend the file. 2564 */ 2565 int cont_write_begin(struct file *file, struct address_space *mapping, 2566 loff_t pos, unsigned len, 2567 struct page **pagep, void **fsdata, 2568 get_block_t *get_block, loff_t *bytes) 2569 { 2570 struct inode *inode = mapping->host; 2571 unsigned int blocksize = i_blocksize(inode); 2572 unsigned int zerofrom; 2573 int err; 2574 2575 err = cont_expand_zero(file, mapping, pos, bytes); 2576 if (err) 2577 return err; 2578 2579 zerofrom = *bytes & ~PAGE_MASK; 2580 if (pos+len > *bytes && zerofrom & (blocksize-1)) { 2581 *bytes |= (blocksize-1); 2582 (*bytes)++; 2583 } 2584 2585 return block_write_begin(mapping, pos, len, pagep, get_block); 2586 } 2587 EXPORT_SYMBOL(cont_write_begin); 2588 2589 void block_commit_write(struct page *page, unsigned from, unsigned to) 2590 { 2591 struct folio *folio = page_folio(page); 2592 __block_commit_write(folio, from, to); 2593 } 2594 EXPORT_SYMBOL(block_commit_write); 2595 2596 /* 2597 * block_page_mkwrite() is not allowed to change the file size as it gets 2598 * called from a page fault handler when a page is first dirtied. Hence we must 2599 * be careful to check for EOF conditions here. We set the page up correctly 2600 * for a written page which means we get ENOSPC checking when writing into 2601 * holes and correct delalloc and unwritten extent mapping on filesystems that 2602 * support these features. 2603 * 2604 * We are not allowed to take the i_mutex here so we have to play games to 2605 * protect against truncate races as the page could now be beyond EOF. Because 2606 * truncate writes the inode size before removing pages, once we have the 2607 * page lock we can determine safely if the page is beyond EOF. If it is not 2608 * beyond EOF, then the page is guaranteed safe against truncation until we 2609 * unlock the page. 2610 * 2611 * Direct callers of this function should protect against filesystem freezing 2612 * using sb_start_pagefault() - sb_end_pagefault() functions. 2613 */ 2614 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, 2615 get_block_t get_block) 2616 { 2617 struct folio *folio = page_folio(vmf->page); 2618 struct inode *inode = file_inode(vma->vm_file); 2619 unsigned long end; 2620 loff_t size; 2621 int ret; 2622 2623 folio_lock(folio); 2624 size = i_size_read(inode); 2625 if ((folio->mapping != inode->i_mapping) || 2626 (folio_pos(folio) >= size)) { 2627 /* We overload EFAULT to mean page got truncated */ 2628 ret = -EFAULT; 2629 goto out_unlock; 2630 } 2631 2632 end = folio_size(folio); 2633 /* folio is wholly or partially inside EOF */ 2634 if (folio_pos(folio) + end > size) 2635 end = size - folio_pos(folio); 2636 2637 ret = __block_write_begin_int(folio, 0, end, get_block, NULL); 2638 if (unlikely(ret)) 2639 goto out_unlock; 2640 2641 __block_commit_write(folio, 0, end); 2642 2643 folio_mark_dirty(folio); 2644 folio_wait_stable(folio); 2645 return 0; 2646 out_unlock: 2647 folio_unlock(folio); 2648 return ret; 2649 } 2650 EXPORT_SYMBOL(block_page_mkwrite); 2651 2652 int block_truncate_page(struct address_space *mapping, 2653 loff_t from, get_block_t *get_block) 2654 { 2655 pgoff_t index = from >> PAGE_SHIFT; 2656 unsigned blocksize; 2657 sector_t iblock; 2658 size_t offset, length, pos; 2659 struct inode *inode = mapping->host; 2660 struct folio *folio; 2661 struct buffer_head *bh; 2662 int err = 0; 2663 2664 blocksize = i_blocksize(inode); 2665 length = from & (blocksize - 1); 2666 2667 /* Block boundary? Nothing to do */ 2668 if (!length) 2669 return 0; 2670 2671 length = blocksize - length; 2672 iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits); 2673 2674 folio = filemap_grab_folio(mapping, index); 2675 if (IS_ERR(folio)) 2676 return PTR_ERR(folio); 2677 2678 bh = folio_buffers(folio); 2679 if (!bh) { 2680 folio_create_empty_buffers(folio, blocksize, 0); 2681 bh = folio_buffers(folio); 2682 } 2683 2684 /* Find the buffer that contains "offset" */ 2685 offset = offset_in_folio(folio, from); 2686 pos = blocksize; 2687 while (offset >= pos) { 2688 bh = bh->b_this_page; 2689 iblock++; 2690 pos += blocksize; 2691 } 2692 2693 if (!buffer_mapped(bh)) { 2694 WARN_ON(bh->b_size != blocksize); 2695 err = get_block(inode, iblock, bh, 0); 2696 if (err) 2697 goto unlock; 2698 /* unmapped? It's a hole - nothing to do */ 2699 if (!buffer_mapped(bh)) 2700 goto unlock; 2701 } 2702 2703 /* Ok, it's mapped. Make sure it's up-to-date */ 2704 if (folio_test_uptodate(folio)) 2705 set_buffer_uptodate(bh); 2706 2707 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) { 2708 err = bh_read(bh, 0); 2709 /* Uhhuh. Read error. Complain and punt. */ 2710 if (err < 0) 2711 goto unlock; 2712 } 2713 2714 folio_zero_range(folio, offset, length); 2715 mark_buffer_dirty(bh); 2716 2717 unlock: 2718 folio_unlock(folio); 2719 folio_put(folio); 2720 2721 return err; 2722 } 2723 EXPORT_SYMBOL(block_truncate_page); 2724 2725 /* 2726 * The generic ->writepage function for buffer-backed address_spaces 2727 */ 2728 int block_write_full_page(struct page *page, get_block_t *get_block, 2729 struct writeback_control *wbc) 2730 { 2731 struct folio *folio = page_folio(page); 2732 struct inode * const inode = folio->mapping->host; 2733 loff_t i_size = i_size_read(inode); 2734 2735 /* Is the folio fully inside i_size? */ 2736 if (folio_pos(folio) + folio_size(folio) <= i_size) 2737 return __block_write_full_folio(inode, folio, get_block, wbc, 2738 end_buffer_async_write); 2739 2740 /* Is the folio fully outside i_size? (truncate in progress) */ 2741 if (folio_pos(folio) >= i_size) { 2742 folio_unlock(folio); 2743 return 0; /* don't care */ 2744 } 2745 2746 /* 2747 * The folio straddles i_size. It must be zeroed out on each and every 2748 * writepage invocation because it may be mmapped. "A file is mapped 2749 * in multiples of the page size. For a file that is not a multiple of 2750 * the page size, the remaining memory is zeroed when mapped, and 2751 * writes to that region are not written out to the file." 2752 */ 2753 folio_zero_segment(folio, offset_in_folio(folio, i_size), 2754 folio_size(folio)); 2755 return __block_write_full_folio(inode, folio, get_block, wbc, 2756 end_buffer_async_write); 2757 } 2758 EXPORT_SYMBOL(block_write_full_page); 2759 2760 sector_t generic_block_bmap(struct address_space *mapping, sector_t block, 2761 get_block_t *get_block) 2762 { 2763 struct inode *inode = mapping->host; 2764 struct buffer_head tmp = { 2765 .b_size = i_blocksize(inode), 2766 }; 2767 2768 get_block(inode, block, &tmp, 0); 2769 return tmp.b_blocknr; 2770 } 2771 EXPORT_SYMBOL(generic_block_bmap); 2772 2773 static void end_bio_bh_io_sync(struct bio *bio) 2774 { 2775 struct buffer_head *bh = bio->bi_private; 2776 2777 if (unlikely(bio_flagged(bio, BIO_QUIET))) 2778 set_bit(BH_Quiet, &bh->b_state); 2779 2780 bh->b_end_io(bh, !bio->bi_status); 2781 bio_put(bio); 2782 } 2783 2784 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh, 2785 struct writeback_control *wbc) 2786 { 2787 const enum req_op op = opf & REQ_OP_MASK; 2788 struct bio *bio; 2789 2790 BUG_ON(!buffer_locked(bh)); 2791 BUG_ON(!buffer_mapped(bh)); 2792 BUG_ON(!bh->b_end_io); 2793 BUG_ON(buffer_delay(bh)); 2794 BUG_ON(buffer_unwritten(bh)); 2795 2796 /* 2797 * Only clear out a write error when rewriting 2798 */ 2799 if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE)) 2800 clear_buffer_write_io_error(bh); 2801 2802 if (buffer_meta(bh)) 2803 opf |= REQ_META; 2804 if (buffer_prio(bh)) 2805 opf |= REQ_PRIO; 2806 2807 bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO); 2808 2809 fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO); 2810 2811 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); 2812 2813 __bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh)); 2814 2815 bio->bi_end_io = end_bio_bh_io_sync; 2816 bio->bi_private = bh; 2817 2818 /* Take care of bh's that straddle the end of the device */ 2819 guard_bio_eod(bio); 2820 2821 if (wbc) { 2822 wbc_init_bio(wbc, bio); 2823 wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size); 2824 } 2825 2826 submit_bio(bio); 2827 } 2828 2829 void submit_bh(blk_opf_t opf, struct buffer_head *bh) 2830 { 2831 submit_bh_wbc(opf, bh, NULL); 2832 } 2833 EXPORT_SYMBOL(submit_bh); 2834 2835 void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags) 2836 { 2837 lock_buffer(bh); 2838 if (!test_clear_buffer_dirty(bh)) { 2839 unlock_buffer(bh); 2840 return; 2841 } 2842 bh->b_end_io = end_buffer_write_sync; 2843 get_bh(bh); 2844 submit_bh(REQ_OP_WRITE | op_flags, bh); 2845 } 2846 EXPORT_SYMBOL(write_dirty_buffer); 2847 2848 /* 2849 * For a data-integrity writeout, we need to wait upon any in-progress I/O 2850 * and then start new I/O and then wait upon it. The caller must have a ref on 2851 * the buffer_head. 2852 */ 2853 int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags) 2854 { 2855 WARN_ON(atomic_read(&bh->b_count) < 1); 2856 lock_buffer(bh); 2857 if (test_clear_buffer_dirty(bh)) { 2858 /* 2859 * The bh should be mapped, but it might not be if the 2860 * device was hot-removed. Not much we can do but fail the I/O. 2861 */ 2862 if (!buffer_mapped(bh)) { 2863 unlock_buffer(bh); 2864 return -EIO; 2865 } 2866 2867 get_bh(bh); 2868 bh->b_end_io = end_buffer_write_sync; 2869 submit_bh(REQ_OP_WRITE | op_flags, bh); 2870 wait_on_buffer(bh); 2871 if (!buffer_uptodate(bh)) 2872 return -EIO; 2873 } else { 2874 unlock_buffer(bh); 2875 } 2876 return 0; 2877 } 2878 EXPORT_SYMBOL(__sync_dirty_buffer); 2879 2880 int sync_dirty_buffer(struct buffer_head *bh) 2881 { 2882 return __sync_dirty_buffer(bh, REQ_SYNC); 2883 } 2884 EXPORT_SYMBOL(sync_dirty_buffer); 2885 2886 /* 2887 * try_to_free_buffers() checks if all the buffers on this particular folio 2888 * are unused, and releases them if so. 2889 * 2890 * Exclusion against try_to_free_buffers may be obtained by either 2891 * locking the folio or by holding its mapping's private_lock. 2892 * 2893 * If the folio is dirty but all the buffers are clean then we need to 2894 * be sure to mark the folio clean as well. This is because the folio 2895 * may be against a block device, and a later reattachment of buffers 2896 * to a dirty folio will set *all* buffers dirty. Which would corrupt 2897 * filesystem data on the same device. 2898 * 2899 * The same applies to regular filesystem folios: if all the buffers are 2900 * clean then we set the folio clean and proceed. To do that, we require 2901 * total exclusion from block_dirty_folio(). That is obtained with 2902 * private_lock. 2903 * 2904 * try_to_free_buffers() is non-blocking. 2905 */ 2906 static inline int buffer_busy(struct buffer_head *bh) 2907 { 2908 return atomic_read(&bh->b_count) | 2909 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock))); 2910 } 2911 2912 static bool 2913 drop_buffers(struct folio *folio, struct buffer_head **buffers_to_free) 2914 { 2915 struct buffer_head *head = folio_buffers(folio); 2916 struct buffer_head *bh; 2917 2918 bh = head; 2919 do { 2920 if (buffer_busy(bh)) 2921 goto failed; 2922 bh = bh->b_this_page; 2923 } while (bh != head); 2924 2925 do { 2926 struct buffer_head *next = bh->b_this_page; 2927 2928 if (bh->b_assoc_map) 2929 __remove_assoc_queue(bh); 2930 bh = next; 2931 } while (bh != head); 2932 *buffers_to_free = head; 2933 folio_detach_private(folio); 2934 return true; 2935 failed: 2936 return false; 2937 } 2938 2939 bool try_to_free_buffers(struct folio *folio) 2940 { 2941 struct address_space * const mapping = folio->mapping; 2942 struct buffer_head *buffers_to_free = NULL; 2943 bool ret = 0; 2944 2945 BUG_ON(!folio_test_locked(folio)); 2946 if (folio_test_writeback(folio)) 2947 return false; 2948 2949 if (mapping == NULL) { /* can this still happen? */ 2950 ret = drop_buffers(folio, &buffers_to_free); 2951 goto out; 2952 } 2953 2954 spin_lock(&mapping->private_lock); 2955 ret = drop_buffers(folio, &buffers_to_free); 2956 2957 /* 2958 * If the filesystem writes its buffers by hand (eg ext3) 2959 * then we can have clean buffers against a dirty folio. We 2960 * clean the folio here; otherwise the VM will never notice 2961 * that the filesystem did any IO at all. 2962 * 2963 * Also, during truncate, discard_buffer will have marked all 2964 * the folio's buffers clean. We discover that here and clean 2965 * the folio also. 2966 * 2967 * private_lock must be held over this entire operation in order 2968 * to synchronise against block_dirty_folio and prevent the 2969 * dirty bit from being lost. 2970 */ 2971 if (ret) 2972 folio_cancel_dirty(folio); 2973 spin_unlock(&mapping->private_lock); 2974 out: 2975 if (buffers_to_free) { 2976 struct buffer_head *bh = buffers_to_free; 2977 2978 do { 2979 struct buffer_head *next = bh->b_this_page; 2980 free_buffer_head(bh); 2981 bh = next; 2982 } while (bh != buffers_to_free); 2983 } 2984 return ret; 2985 } 2986 EXPORT_SYMBOL(try_to_free_buffers); 2987 2988 /* 2989 * Buffer-head allocation 2990 */ 2991 static struct kmem_cache *bh_cachep __read_mostly; 2992 2993 /* 2994 * Once the number of bh's in the machine exceeds this level, we start 2995 * stripping them in writeback. 2996 */ 2997 static unsigned long max_buffer_heads; 2998 2999 int buffer_heads_over_limit; 3000 3001 struct bh_accounting { 3002 int nr; /* Number of live bh's */ 3003 int ratelimit; /* Limit cacheline bouncing */ 3004 }; 3005 3006 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0}; 3007 3008 static void recalc_bh_state(void) 3009 { 3010 int i; 3011 int tot = 0; 3012 3013 if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096) 3014 return; 3015 __this_cpu_write(bh_accounting.ratelimit, 0); 3016 for_each_online_cpu(i) 3017 tot += per_cpu(bh_accounting, i).nr; 3018 buffer_heads_over_limit = (tot > max_buffer_heads); 3019 } 3020 3021 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) 3022 { 3023 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); 3024 if (ret) { 3025 INIT_LIST_HEAD(&ret->b_assoc_buffers); 3026 spin_lock_init(&ret->b_uptodate_lock); 3027 preempt_disable(); 3028 __this_cpu_inc(bh_accounting.nr); 3029 recalc_bh_state(); 3030 preempt_enable(); 3031 } 3032 return ret; 3033 } 3034 EXPORT_SYMBOL(alloc_buffer_head); 3035 3036 void free_buffer_head(struct buffer_head *bh) 3037 { 3038 BUG_ON(!list_empty(&bh->b_assoc_buffers)); 3039 kmem_cache_free(bh_cachep, bh); 3040 preempt_disable(); 3041 __this_cpu_dec(bh_accounting.nr); 3042 recalc_bh_state(); 3043 preempt_enable(); 3044 } 3045 EXPORT_SYMBOL(free_buffer_head); 3046 3047 static int buffer_exit_cpu_dead(unsigned int cpu) 3048 { 3049 int i; 3050 struct bh_lru *b = &per_cpu(bh_lrus, cpu); 3051 3052 for (i = 0; i < BH_LRU_SIZE; i++) { 3053 brelse(b->bhs[i]); 3054 b->bhs[i] = NULL; 3055 } 3056 this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr); 3057 per_cpu(bh_accounting, cpu).nr = 0; 3058 return 0; 3059 } 3060 3061 /** 3062 * bh_uptodate_or_lock - Test whether the buffer is uptodate 3063 * @bh: struct buffer_head 3064 * 3065 * Return true if the buffer is up-to-date and false, 3066 * with the buffer locked, if not. 3067 */ 3068 int bh_uptodate_or_lock(struct buffer_head *bh) 3069 { 3070 if (!buffer_uptodate(bh)) { 3071 lock_buffer(bh); 3072 if (!buffer_uptodate(bh)) 3073 return 0; 3074 unlock_buffer(bh); 3075 } 3076 return 1; 3077 } 3078 EXPORT_SYMBOL(bh_uptodate_or_lock); 3079 3080 /** 3081 * __bh_read - Submit read for a locked buffer 3082 * @bh: struct buffer_head 3083 * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ 3084 * @wait: wait until reading finish 3085 * 3086 * Returns zero on success or don't wait, and -EIO on error. 3087 */ 3088 int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait) 3089 { 3090 int ret = 0; 3091 3092 BUG_ON(!buffer_locked(bh)); 3093 3094 get_bh(bh); 3095 bh->b_end_io = end_buffer_read_sync; 3096 submit_bh(REQ_OP_READ | op_flags, bh); 3097 if (wait) { 3098 wait_on_buffer(bh); 3099 if (!buffer_uptodate(bh)) 3100 ret = -EIO; 3101 } 3102 return ret; 3103 } 3104 EXPORT_SYMBOL(__bh_read); 3105 3106 /** 3107 * __bh_read_batch - Submit read for a batch of unlocked buffers 3108 * @nr: entry number of the buffer batch 3109 * @bhs: a batch of struct buffer_head 3110 * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ 3111 * @force_lock: force to get a lock on the buffer if set, otherwise drops any 3112 * buffer that cannot lock. 3113 * 3114 * Returns zero on success or don't wait, and -EIO on error. 3115 */ 3116 void __bh_read_batch(int nr, struct buffer_head *bhs[], 3117 blk_opf_t op_flags, bool force_lock) 3118 { 3119 int i; 3120 3121 for (i = 0; i < nr; i++) { 3122 struct buffer_head *bh = bhs[i]; 3123 3124 if (buffer_uptodate(bh)) 3125 continue; 3126 3127 if (force_lock) 3128 lock_buffer(bh); 3129 else 3130 if (!trylock_buffer(bh)) 3131 continue; 3132 3133 if (buffer_uptodate(bh)) { 3134 unlock_buffer(bh); 3135 continue; 3136 } 3137 3138 bh->b_end_io = end_buffer_read_sync; 3139 get_bh(bh); 3140 submit_bh(REQ_OP_READ | op_flags, bh); 3141 } 3142 } 3143 EXPORT_SYMBOL(__bh_read_batch); 3144 3145 void __init buffer_init(void) 3146 { 3147 unsigned long nrpages; 3148 int ret; 3149 3150 bh_cachep = kmem_cache_create("buffer_head", 3151 sizeof(struct buffer_head), 0, 3152 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| 3153 SLAB_MEM_SPREAD), 3154 NULL); 3155 3156 /* 3157 * Limit the bh occupancy to 10% of ZONE_NORMAL 3158 */ 3159 nrpages = (nr_free_buffer_pages() * 10) / 100; 3160 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head)); 3161 ret = cpuhp_setup_state_nocalls(CPUHP_FS_BUFF_DEAD, "fs/buffer:dead", 3162 NULL, buffer_exit_cpu_dead); 3163 WARN_ON(ret < 0); 3164 } 3165