1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds * linux/fs/buffer.c
41da177e4SLinus Torvalds *
51da177e4SLinus Torvalds * Copyright (C) 1991, 1992, 2002 Linus Torvalds
61da177e4SLinus Torvalds */
71da177e4SLinus Torvalds
81da177e4SLinus Torvalds /*
91da177e4SLinus Torvalds * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
101da177e4SLinus Torvalds *
111da177e4SLinus Torvalds * Removed a lot of unnecessary code and simplified things now that
121da177e4SLinus Torvalds * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
131da177e4SLinus Torvalds *
141da177e4SLinus Torvalds * Speed up hash, lru, and free list operations. Use gfp() for allocating
151da177e4SLinus Torvalds * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
161da177e4SLinus Torvalds *
171da177e4SLinus Torvalds * Added 32k buffer block sizes - these are required older ARM systems. - RMK
181da177e4SLinus Torvalds *
191da177e4SLinus Torvalds * async buffer flushing, 1999 Andrea Arcangeli <[email protected]>
201da177e4SLinus Torvalds */
211da177e4SLinus Torvalds
221da177e4SLinus Torvalds #include <linux/kernel.h>
23f361bf4aSIngo Molnar #include <linux/sched/signal.h>
241da177e4SLinus Torvalds #include <linux/syscalls.h>
251da177e4SLinus Torvalds #include <linux/fs.h>
26ae259a9cSChristoph Hellwig #include <linux/iomap.h>
271da177e4SLinus Torvalds #include <linux/mm.h>
281da177e4SLinus Torvalds #include <linux/percpu.h>
291da177e4SLinus Torvalds #include <linux/slab.h>
3016f7e0feSRandy Dunlap #include <linux/capability.h>
311da177e4SLinus Torvalds #include <linux/blkdev.h>
321da177e4SLinus Torvalds #include <linux/file.h>
331da177e4SLinus Torvalds #include <linux/quotaops.h>
341da177e4SLinus Torvalds #include <linux/highmem.h>
35630d9c47SPaul Gortmaker #include <linux/export.h>
36bafc0dbaSTejun Heo #include <linux/backing-dev.h>
371da177e4SLinus Torvalds #include <linux/writeback.h>
381da177e4SLinus Torvalds #include <linux/hash.h>
391da177e4SLinus Torvalds #include <linux/suspend.h>
401da177e4SLinus Torvalds #include <linux/buffer_head.h>
4155e829afSAndrew Morton #include <linux/task_io_accounting_ops.h>
421da177e4SLinus Torvalds #include <linux/bio.h>
431da177e4SLinus Torvalds #include <linux/cpu.h>
441da177e4SLinus Torvalds #include <linux/bitops.h>
451da177e4SLinus Torvalds #include <linux/mpage.h>
46fb1c8f93SIngo Molnar #include <linux/bit_spinlock.h>
4729f3ad7dSJan Kara #include <linux/pagevec.h>
48f745c6f5SShakeel Butt #include <linux/sched/mm.h>
495305cb83STejun Heo #include <trace/events/block.h>
5031fb992cSEric Biggers #include <linux/fscrypt.h>
514fa512ceSEric Biggers #include <linux/fsverity.h>
528a237adfSMarcelo Tosatti #include <linux/sched/isolation.h>
531da177e4SLinus Torvalds
542b211dc0SBen Dooks #include "internal.h"
552b211dc0SBen Dooks
561da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
575bdf402aSRitesh Harjani (IBM) static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
5844981351SBart Van Assche enum rw_hint hint, struct writeback_control *wbc);
591da177e4SLinus Torvalds
601da177e4SLinus Torvalds #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
611da177e4SLinus Torvalds
touch_buffer(struct buffer_head * bh)62f0059afdSTejun Heo inline void touch_buffer(struct buffer_head *bh)
63f0059afdSTejun Heo {
645305cb83STejun Heo trace_block_touch_buffer(bh);
6503c5f331SMatthew Wilcox (Oracle) folio_mark_accessed(bh->b_folio);
66f0059afdSTejun Heo }
67f0059afdSTejun Heo EXPORT_SYMBOL(touch_buffer);
68f0059afdSTejun Heo
__lock_buffer(struct buffer_head * bh)69fc9b52cdSHarvey Harrison void __lock_buffer(struct buffer_head *bh)
701da177e4SLinus Torvalds {
7174316201SNeilBrown wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
721da177e4SLinus Torvalds }
731da177e4SLinus Torvalds EXPORT_SYMBOL(__lock_buffer);
741da177e4SLinus Torvalds
unlock_buffer(struct buffer_head * bh)75fc9b52cdSHarvey Harrison void unlock_buffer(struct buffer_head *bh)
761da177e4SLinus Torvalds {
7751b07fc3SNick Piggin clear_bit_unlock(BH_Lock, &bh->b_state);
784e857c58SPeter Zijlstra smp_mb__after_atomic();
791da177e4SLinus Torvalds wake_up_bit(&bh->b_state, BH_Lock);
801da177e4SLinus Torvalds }
811fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(unlock_buffer);
821da177e4SLinus Torvalds
831da177e4SLinus Torvalds /*
84520f301cSMatthew Wilcox (Oracle) * Returns if the folio has dirty or writeback buffers. If all the buffers
85520f301cSMatthew Wilcox (Oracle) * are unlocked and clean then the folio_test_dirty information is stale. If
86520f301cSMatthew Wilcox (Oracle) * any of the buffers are locked, it is assumed they are locked for IO.
87b4597226SMel Gorman */
buffer_check_dirty_writeback(struct folio * folio,bool * dirty,bool * writeback)88520f301cSMatthew Wilcox (Oracle) void buffer_check_dirty_writeback(struct folio *folio,
89b4597226SMel Gorman bool *dirty, bool *writeback)
90b4597226SMel Gorman {
91b4597226SMel Gorman struct buffer_head *head, *bh;
92b4597226SMel Gorman *dirty = false;
93b4597226SMel Gorman *writeback = false;
94b4597226SMel Gorman
95520f301cSMatthew Wilcox (Oracle) BUG_ON(!folio_test_locked(folio));
96b4597226SMel Gorman
97520f301cSMatthew Wilcox (Oracle) head = folio_buffers(folio);
98520f301cSMatthew Wilcox (Oracle) if (!head)
99b4597226SMel Gorman return;
100b4597226SMel Gorman
101520f301cSMatthew Wilcox (Oracle) if (folio_test_writeback(folio))
102b4597226SMel Gorman *writeback = true;
103b4597226SMel Gorman
104b4597226SMel Gorman bh = head;
105b4597226SMel Gorman do {
106b4597226SMel Gorman if (buffer_locked(bh))
107b4597226SMel Gorman *writeback = true;
108b4597226SMel Gorman
109b4597226SMel Gorman if (buffer_dirty(bh))
110b4597226SMel Gorman *dirty = true;
111b4597226SMel Gorman
112b4597226SMel Gorman bh = bh->b_this_page;
113b4597226SMel Gorman } while (bh != head);
114b4597226SMel Gorman }
115b4597226SMel Gorman
116b4597226SMel Gorman /*
1171da177e4SLinus Torvalds * Block until a buffer comes unlocked. This doesn't stop it
1181da177e4SLinus Torvalds * from becoming locked again - you have to lock it yourself
1191da177e4SLinus Torvalds * if you want to preserve its state.
1201da177e4SLinus Torvalds */
__wait_on_buffer(struct buffer_head * bh)1211da177e4SLinus Torvalds void __wait_on_buffer(struct buffer_head * bh)
1221da177e4SLinus Torvalds {
12374316201SNeilBrown wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
1241da177e4SLinus Torvalds }
1251fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(__wait_on_buffer);
1261da177e4SLinus Torvalds
buffer_io_error(struct buffer_head * bh,char * msg)127b744c2acSRobert Elliott static void buffer_io_error(struct buffer_head *bh, char *msg)
1281da177e4SLinus Torvalds {
129432f16e6SRobert Elliott if (!test_bit(BH_Quiet, &bh->b_state))
130432f16e6SRobert Elliott printk_ratelimited(KERN_ERR
131a1c6f057SDmitry Monakhov "Buffer I/O error on dev %pg, logical block %llu%s\n",
132a1c6f057SDmitry Monakhov bh->b_bdev, (unsigned long long)bh->b_blocknr, msg);
1331da177e4SLinus Torvalds }
1341da177e4SLinus Torvalds
1351da177e4SLinus Torvalds /*
13668671f35SDmitry Monakhov * End-of-IO handler helper function which does not touch the bh after
13768671f35SDmitry Monakhov * unlocking it.
13868671f35SDmitry Monakhov * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
13968671f35SDmitry Monakhov * a race there is benign: unlock_buffer() only use the bh's address for
14068671f35SDmitry Monakhov * hashing after unlocking the buffer, so it doesn't actually touch the bh
14168671f35SDmitry Monakhov * itself.
1421da177e4SLinus Torvalds */
__end_buffer_read_notouch(struct buffer_head * bh,int uptodate)14368671f35SDmitry Monakhov static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
1441da177e4SLinus Torvalds {
1451da177e4SLinus Torvalds if (uptodate) {
1461da177e4SLinus Torvalds set_buffer_uptodate(bh);
1471da177e4SLinus Torvalds } else {
14870246286SChristoph Hellwig /* This happens, due to failed read-ahead attempts. */
1491da177e4SLinus Torvalds clear_buffer_uptodate(bh);
1501da177e4SLinus Torvalds }
1511da177e4SLinus Torvalds unlock_buffer(bh);
15268671f35SDmitry Monakhov }
15368671f35SDmitry Monakhov
15468671f35SDmitry Monakhov /*
15568671f35SDmitry Monakhov * Default synchronous end-of-IO handler.. Just mark it up-to-date and
15679f59784SZhang Yi * unlock the buffer.
15768671f35SDmitry Monakhov */
end_buffer_read_sync(struct buffer_head * bh,int uptodate)15868671f35SDmitry Monakhov void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
15968671f35SDmitry Monakhov {
16068671f35SDmitry Monakhov __end_buffer_read_notouch(bh, uptodate);
1611da177e4SLinus Torvalds put_bh(bh);
1621da177e4SLinus Torvalds }
1631fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(end_buffer_read_sync);
1641da177e4SLinus Torvalds
end_buffer_write_sync(struct buffer_head * bh,int uptodate)1651da177e4SLinus Torvalds void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
1661da177e4SLinus Torvalds {
1671da177e4SLinus Torvalds if (uptodate) {
1681da177e4SLinus Torvalds set_buffer_uptodate(bh);
1691da177e4SLinus Torvalds } else {
170b744c2acSRobert Elliott buffer_io_error(bh, ", lost sync page write");
17187354e5dSJeff Layton mark_buffer_write_io_error(bh);
1721da177e4SLinus Torvalds clear_buffer_uptodate(bh);
1731da177e4SLinus Torvalds }
1741da177e4SLinus Torvalds unlock_buffer(bh);
1751da177e4SLinus Torvalds put_bh(bh);
1761da177e4SLinus Torvalds }
1771fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(end_buffer_write_sync);
1781da177e4SLinus Torvalds
1791da177e4SLinus Torvalds static struct buffer_head *
__find_get_block_slow(struct block_device * bdev,sector_t block,bool atomic)1807ffe3de5SDavidlohr Bueso __find_get_block_slow(struct block_device *bdev, sector_t block, bool atomic)
1811da177e4SLinus Torvalds {
18253cd4cd3SAl Viro struct address_space *bd_mapping = bdev->bd_mapping;
18353cd4cd3SAl Viro const int blkbits = bd_mapping->host->i_blkbits;
1841da177e4SLinus Torvalds struct buffer_head *ret = NULL;
1851da177e4SLinus Torvalds pgoff_t index;
1861da177e4SLinus Torvalds struct buffer_head *bh;
1871da177e4SLinus Torvalds struct buffer_head *head;
188eee25182SMatthew Wilcox (Oracle) struct folio *folio;
1891da177e4SLinus Torvalds int all_mapped = 1;
19043636c80STetsuo Handa static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
1911da177e4SLinus Torvalds
19253cd4cd3SAl Viro index = ((loff_t)block << blkbits) / PAGE_SIZE;
193eee25182SMatthew Wilcox (Oracle) folio = __filemap_get_folio(bd_mapping, index, FGP_ACCESSED, 0);
194eee25182SMatthew Wilcox (Oracle) if (IS_ERR(folio))
1951da177e4SLinus Torvalds goto out;
1961da177e4SLinus Torvalds
1977ffe3de5SDavidlohr Bueso /*
1987ffe3de5SDavidlohr Bueso * Folio lock protects the buffers. Callers that cannot block
1997ffe3de5SDavidlohr Bueso * will fallback to serializing vs try_to_free_buffers() via
2007ffe3de5SDavidlohr Bueso * the i_private_lock.
2017ffe3de5SDavidlohr Bueso */
2027ffe3de5SDavidlohr Bueso if (atomic)
203600f111eSMatthew Wilcox (Oracle) spin_lock(&bd_mapping->i_private_lock);
2047ffe3de5SDavidlohr Bueso else
2057ffe3de5SDavidlohr Bueso folio_lock(folio);
2067ffe3de5SDavidlohr Bueso
207eee25182SMatthew Wilcox (Oracle) head = folio_buffers(folio);
208eee25182SMatthew Wilcox (Oracle) if (!head)
2091da177e4SLinus Torvalds goto out_unlock;
2102d900effSDavidlohr Bueso /*
2112d900effSDavidlohr Bueso * Upon a noref migration, the folio lock serializes here;
2122d900effSDavidlohr Bueso * otherwise bail.
2132d900effSDavidlohr Bueso */
2142d900effSDavidlohr Bueso if (test_bit_acquire(BH_Migrate, &head->b_state)) {
2152d900effSDavidlohr Bueso WARN_ON(!atomic);
2162d900effSDavidlohr Bueso goto out_unlock;
2172d900effSDavidlohr Bueso }
2182d900effSDavidlohr Bueso
2191da177e4SLinus Torvalds bh = head;
2201da177e4SLinus Torvalds do {
22197f76d3dSNikanth Karthikesan if (!buffer_mapped(bh))
22297f76d3dSNikanth Karthikesan all_mapped = 0;
22397f76d3dSNikanth Karthikesan else if (bh->b_blocknr == block) {
2241da177e4SLinus Torvalds ret = bh;
2251da177e4SLinus Torvalds get_bh(bh);
2261da177e4SLinus Torvalds goto out_unlock;
2271da177e4SLinus Torvalds }
2281da177e4SLinus Torvalds bh = bh->b_this_page;
2291da177e4SLinus Torvalds } while (bh != head);
2301da177e4SLinus Torvalds
2311da177e4SLinus Torvalds /* we might be here because some of the buffers on this page are
2321da177e4SLinus Torvalds * not mapped. This is due to various races between
2331da177e4SLinus Torvalds * file io on the block device and getblk. It gets dealt with
2341da177e4SLinus Torvalds * elsewhere, don't buffer_error if we had some unmapped buffers
2351da177e4SLinus Torvalds */
23643636c80STetsuo Handa ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE);
23743636c80STetsuo Handa if (all_mapped && __ratelimit(&last_warned)) {
23843636c80STetsuo Handa printk("__find_get_block_slow() failed. block=%llu, "
23943636c80STetsuo Handa "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, "
24043636c80STetsuo Handa "device %pg blocksize: %d\n",
241205f87f6SBadari Pulavarty (unsigned long long)block,
24243636c80STetsuo Handa (unsigned long long)bh->b_blocknr,
24343636c80STetsuo Handa bh->b_state, bh->b_size, bdev,
24453cd4cd3SAl Viro 1 << blkbits);
2451da177e4SLinus Torvalds }
2461da177e4SLinus Torvalds out_unlock:
2477ffe3de5SDavidlohr Bueso if (atomic)
248600f111eSMatthew Wilcox (Oracle) spin_unlock(&bd_mapping->i_private_lock);
2497ffe3de5SDavidlohr Bueso else
2507ffe3de5SDavidlohr Bueso folio_unlock(folio);
251eee25182SMatthew Wilcox (Oracle) folio_put(folio);
2521da177e4SLinus Torvalds out:
2531da177e4SLinus Torvalds return ret;
2541da177e4SLinus Torvalds }
2551da177e4SLinus Torvalds
end_buffer_async_read(struct buffer_head * bh,int uptodate)2561da177e4SLinus Torvalds static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
2571da177e4SLinus Torvalds {
2581da177e4SLinus Torvalds unsigned long flags;
259a3972203SNick Piggin struct buffer_head *first;
2601da177e4SLinus Torvalds struct buffer_head *tmp;
2612e2dba15SMatthew Wilcox (Oracle) struct folio *folio;
2622e2dba15SMatthew Wilcox (Oracle) int folio_uptodate = 1;
2631da177e4SLinus Torvalds
2641da177e4SLinus Torvalds BUG_ON(!buffer_async_read(bh));
2651da177e4SLinus Torvalds
2662e2dba15SMatthew Wilcox (Oracle) folio = bh->b_folio;
2671da177e4SLinus Torvalds if (uptodate) {
2681da177e4SLinus Torvalds set_buffer_uptodate(bh);
2691da177e4SLinus Torvalds } else {
2701da177e4SLinus Torvalds clear_buffer_uptodate(bh);
271b744c2acSRobert Elliott buffer_io_error(bh, ", async page read");
2721da177e4SLinus Torvalds }
2731da177e4SLinus Torvalds
2741da177e4SLinus Torvalds /*
2751da177e4SLinus Torvalds * Be _very_ careful from here on. Bad things can happen if
2761da177e4SLinus Torvalds * two buffer heads end IO at almost the same time and both
2771da177e4SLinus Torvalds * decide that the page is now completely done.
2781da177e4SLinus Torvalds */
2792e2dba15SMatthew Wilcox (Oracle) first = folio_buffers(folio);
280f1e67e35SThomas Gleixner spin_lock_irqsave(&first->b_uptodate_lock, flags);
2811da177e4SLinus Torvalds clear_buffer_async_read(bh);
2821da177e4SLinus Torvalds unlock_buffer(bh);
2831da177e4SLinus Torvalds tmp = bh;
2841da177e4SLinus Torvalds do {
2851da177e4SLinus Torvalds if (!buffer_uptodate(tmp))
2862e2dba15SMatthew Wilcox (Oracle) folio_uptodate = 0;
2871da177e4SLinus Torvalds if (buffer_async_read(tmp)) {
2881da177e4SLinus Torvalds BUG_ON(!buffer_locked(tmp));
2891da177e4SLinus Torvalds goto still_busy;
2901da177e4SLinus Torvalds }
2911da177e4SLinus Torvalds tmp = tmp->b_this_page;
2921da177e4SLinus Torvalds } while (tmp != bh);
293f1e67e35SThomas Gleixner spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
2941da177e4SLinus Torvalds
2956ba924d3SMatthew Wilcox (Oracle) folio_end_read(folio, folio_uptodate);
2961da177e4SLinus Torvalds return;
2971da177e4SLinus Torvalds
2981da177e4SLinus Torvalds still_busy:
299f1e67e35SThomas Gleixner spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
3001da177e4SLinus Torvalds }
3011da177e4SLinus Torvalds
3024fa512ceSEric Biggers struct postprocess_bh_ctx {
30331fb992cSEric Biggers struct work_struct work;
30431fb992cSEric Biggers struct buffer_head *bh;
30531fb992cSEric Biggers };
30631fb992cSEric Biggers
verify_bh(struct work_struct * work)3074fa512ceSEric Biggers static void verify_bh(struct work_struct *work)
3084fa512ceSEric Biggers {
3094fa512ceSEric Biggers struct postprocess_bh_ctx *ctx =
3104fa512ceSEric Biggers container_of(work, struct postprocess_bh_ctx, work);
3114fa512ceSEric Biggers struct buffer_head *bh = ctx->bh;
3124fa512ceSEric Biggers bool valid;
3134fa512ceSEric Biggers
3148b7d3fe9SEric Biggers valid = fsverity_verify_blocks(bh->b_folio, bh->b_size, bh_offset(bh));
3154fa512ceSEric Biggers end_buffer_async_read(bh, valid);
3164fa512ceSEric Biggers kfree(ctx);
3174fa512ceSEric Biggers }
3184fa512ceSEric Biggers
need_fsverity(struct buffer_head * bh)3194fa512ceSEric Biggers static bool need_fsverity(struct buffer_head *bh)
3204fa512ceSEric Biggers {
3218b7d3fe9SEric Biggers struct folio *folio = bh->b_folio;
3228b7d3fe9SEric Biggers struct inode *inode = folio->mapping->host;
3234fa512ceSEric Biggers
3244fa512ceSEric Biggers return fsverity_active(inode) &&
3254fa512ceSEric Biggers /* needed by ext4 */
3268b7d3fe9SEric Biggers folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
3274fa512ceSEric Biggers }
3284fa512ceSEric Biggers
decrypt_bh(struct work_struct * work)32931fb992cSEric Biggers static void decrypt_bh(struct work_struct *work)
33031fb992cSEric Biggers {
3314fa512ceSEric Biggers struct postprocess_bh_ctx *ctx =
3324fa512ceSEric Biggers container_of(work, struct postprocess_bh_ctx, work);
33331fb992cSEric Biggers struct buffer_head *bh = ctx->bh;
33431fb992cSEric Biggers int err;
33531fb992cSEric Biggers
3369c7fb7f7SEric Biggers err = fscrypt_decrypt_pagecache_blocks(bh->b_folio, bh->b_size,
3379c7fb7f7SEric Biggers bh_offset(bh));
3384fa512ceSEric Biggers if (err == 0 && need_fsverity(bh)) {
3394fa512ceSEric Biggers /*
3404fa512ceSEric Biggers * We use different work queues for decryption and for verity
3414fa512ceSEric Biggers * because verity may require reading metadata pages that need
3424fa512ceSEric Biggers * decryption, and we shouldn't recurse to the same workqueue.
3434fa512ceSEric Biggers */
3444fa512ceSEric Biggers INIT_WORK(&ctx->work, verify_bh);
3454fa512ceSEric Biggers fsverity_enqueue_verify_work(&ctx->work);
3464fa512ceSEric Biggers return;
3474fa512ceSEric Biggers }
34831fb992cSEric Biggers end_buffer_async_read(bh, err == 0);
34931fb992cSEric Biggers kfree(ctx);
35031fb992cSEric Biggers }
35131fb992cSEric Biggers
35231fb992cSEric Biggers /*
3532c69e205SMatthew Wilcox (Oracle) * I/O completion handler for block_read_full_folio() - pages
35431fb992cSEric Biggers * which come unlocked at the end of I/O.
35531fb992cSEric Biggers */
end_buffer_async_read_io(struct buffer_head * bh,int uptodate)35631fb992cSEric Biggers static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
35731fb992cSEric Biggers {
3583822a7c4SLinus Torvalds struct inode *inode = bh->b_folio->mapping->host;
3594fa512ceSEric Biggers bool decrypt = fscrypt_inode_uses_fs_layer_crypto(inode);
3604fa512ceSEric Biggers bool verify = need_fsverity(bh);
3614fa512ceSEric Biggers
3624fa512ceSEric Biggers /* Decrypt (with fscrypt) and/or verify (with fsverity) if needed. */
3634fa512ceSEric Biggers if (uptodate && (decrypt || verify)) {
3644fa512ceSEric Biggers struct postprocess_bh_ctx *ctx =
3654fa512ceSEric Biggers kmalloc(sizeof(*ctx), GFP_ATOMIC);
36631fb992cSEric Biggers
36731fb992cSEric Biggers if (ctx) {
36831fb992cSEric Biggers ctx->bh = bh;
3694fa512ceSEric Biggers if (decrypt) {
3704fa512ceSEric Biggers INIT_WORK(&ctx->work, decrypt_bh);
37131fb992cSEric Biggers fscrypt_enqueue_decrypt_work(&ctx->work);
3724fa512ceSEric Biggers } else {
3734fa512ceSEric Biggers INIT_WORK(&ctx->work, verify_bh);
3744fa512ceSEric Biggers fsverity_enqueue_verify_work(&ctx->work);
3754fa512ceSEric Biggers }
37631fb992cSEric Biggers return;
37731fb992cSEric Biggers }
37831fb992cSEric Biggers uptodate = 0;
37931fb992cSEric Biggers }
38031fb992cSEric Biggers end_buffer_async_read(bh, uptodate);
38131fb992cSEric Biggers }
38231fb992cSEric Biggers
3831da177e4SLinus Torvalds /*
38414059f66SMatthew Wilcox (Oracle) * Completion handler for block_write_full_folio() - folios which are unlocked
38514059f66SMatthew Wilcox (Oracle) * during I/O, and which have the writeback flag cleared upon I/O completion.
3861da177e4SLinus Torvalds */
end_buffer_async_write(struct buffer_head * bh,int uptodate)38714059f66SMatthew Wilcox (Oracle) static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
3881da177e4SLinus Torvalds {
3891da177e4SLinus Torvalds unsigned long flags;
390a3972203SNick Piggin struct buffer_head *first;
3911da177e4SLinus Torvalds struct buffer_head *tmp;
392743ed81eSMatthew Wilcox (Oracle) struct folio *folio;
3931da177e4SLinus Torvalds
3941da177e4SLinus Torvalds BUG_ON(!buffer_async_write(bh));
3951da177e4SLinus Torvalds
396743ed81eSMatthew Wilcox (Oracle) folio = bh->b_folio;
3971da177e4SLinus Torvalds if (uptodate) {
3981da177e4SLinus Torvalds set_buffer_uptodate(bh);
3991da177e4SLinus Torvalds } else {
400b744c2acSRobert Elliott buffer_io_error(bh, ", lost async page write");
40187354e5dSJeff Layton mark_buffer_write_io_error(bh);
4021da177e4SLinus Torvalds clear_buffer_uptodate(bh);
4031da177e4SLinus Torvalds }
4041da177e4SLinus Torvalds
405743ed81eSMatthew Wilcox (Oracle) first = folio_buffers(folio);
406f1e67e35SThomas Gleixner spin_lock_irqsave(&first->b_uptodate_lock, flags);
407a3972203SNick Piggin
4081da177e4SLinus Torvalds clear_buffer_async_write(bh);
4091da177e4SLinus Torvalds unlock_buffer(bh);
4101da177e4SLinus Torvalds tmp = bh->b_this_page;
4111da177e4SLinus Torvalds while (tmp != bh) {
4121da177e4SLinus Torvalds if (buffer_async_write(tmp)) {
4131da177e4SLinus Torvalds BUG_ON(!buffer_locked(tmp));
4141da177e4SLinus Torvalds goto still_busy;
4151da177e4SLinus Torvalds }
4161da177e4SLinus Torvalds tmp = tmp->b_this_page;
4171da177e4SLinus Torvalds }
418f1e67e35SThomas Gleixner spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
419743ed81eSMatthew Wilcox (Oracle) folio_end_writeback(folio);
4201da177e4SLinus Torvalds return;
4211da177e4SLinus Torvalds
4221da177e4SLinus Torvalds still_busy:
423f1e67e35SThomas Gleixner spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
4241da177e4SLinus Torvalds }
4251da177e4SLinus Torvalds
4261da177e4SLinus Torvalds /*
4271da177e4SLinus Torvalds * If a page's buffers are under async readin (end_buffer_async_read
4281da177e4SLinus Torvalds * completion) then there is a possibility that another thread of
4291da177e4SLinus Torvalds * control could lock one of the buffers after it has completed
4301da177e4SLinus Torvalds * but while some of the other buffers have not completed. This
4311da177e4SLinus Torvalds * locked buffer would confuse end_buffer_async_read() into not unlocking
4321da177e4SLinus Torvalds * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
4331da177e4SLinus Torvalds * that this buffer is not under async I/O.
4341da177e4SLinus Torvalds *
4351da177e4SLinus Torvalds * The page comes unlocked when it has no locked buffer_async buffers
4361da177e4SLinus Torvalds * left.
4371da177e4SLinus Torvalds *
4381da177e4SLinus Torvalds * PageLocked prevents anyone starting new async I/O reads any of
4391da177e4SLinus Torvalds * the buffers.
4401da177e4SLinus Torvalds *
4411da177e4SLinus Torvalds * PageWriteback is used to prevent simultaneous writeout of the same
4421da177e4SLinus Torvalds * page.
4431da177e4SLinus Torvalds *
4441da177e4SLinus Torvalds * PageLocked prevents anyone from starting writeback of a page which is
4451da177e4SLinus Torvalds * under read I/O (PageWriteback is only ever set against a locked page).
4461da177e4SLinus Torvalds */
mark_buffer_async_read(struct buffer_head * bh)4471da177e4SLinus Torvalds static void mark_buffer_async_read(struct buffer_head *bh)
4481da177e4SLinus Torvalds {
44931fb992cSEric Biggers bh->b_end_io = end_buffer_async_read_io;
4501da177e4SLinus Torvalds set_buffer_async_read(bh);
4511da177e4SLinus Torvalds }
4521da177e4SLinus Torvalds
mark_buffer_async_write_endio(struct buffer_head * bh,bh_end_io_t * handler)4531fe72eaaSH Hartley Sweeten static void mark_buffer_async_write_endio(struct buffer_head *bh,
45435c80d5fSChris Mason bh_end_io_t *handler)
45535c80d5fSChris Mason {
45635c80d5fSChris Mason bh->b_end_io = handler;
45735c80d5fSChris Mason set_buffer_async_write(bh);
45835c80d5fSChris Mason }
45935c80d5fSChris Mason
mark_buffer_async_write(struct buffer_head * bh)4601da177e4SLinus Torvalds void mark_buffer_async_write(struct buffer_head *bh)
4611da177e4SLinus Torvalds {
46235c80d5fSChris Mason mark_buffer_async_write_endio(bh, end_buffer_async_write);
4631da177e4SLinus Torvalds }
4641da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_async_write);
4651da177e4SLinus Torvalds
4661da177e4SLinus Torvalds
4671da177e4SLinus Torvalds /*
4681da177e4SLinus Torvalds * fs/buffer.c contains helper functions for buffer-backed address space's
4691da177e4SLinus Torvalds * fsync functions. A common requirement for buffer-based filesystems is
4701da177e4SLinus Torvalds * that certain data from the backing blockdev needs to be written out for
4711da177e4SLinus Torvalds * a successful fsync(). For example, ext2 indirect blocks need to be
4721da177e4SLinus Torvalds * written back and waited upon before fsync() returns.
4731da177e4SLinus Torvalds *
47473f65b8bSAndreas Gruenbacher * The functions mark_buffer_dirty_inode(), fsync_inode_buffers(),
4751da177e4SLinus Torvalds * inode_has_buffers() and invalidate_inode_buffers() are provided for the
476600f111eSMatthew Wilcox (Oracle) * management of a list of dependent buffers at ->i_mapping->i_private_list.
4771da177e4SLinus Torvalds *
4781da177e4SLinus Torvalds * Locking is a little subtle: try_to_free_buffers() will remove buffers
4791da177e4SLinus Torvalds * from their controlling inode's queue when they are being freed. But
4801da177e4SLinus Torvalds * try_to_free_buffers() will be operating against the *blockdev* mapping
4811da177e4SLinus Torvalds * at the time, not against the S_ISREG file which depends on those buffers.
482600f111eSMatthew Wilcox (Oracle) * So the locking for i_private_list is via the i_private_lock in the address_space
4831da177e4SLinus Torvalds * which backs the buffers. Which is different from the address_space
4841da177e4SLinus Torvalds * against which the buffers are listed. So for a particular address_space,
485600f111eSMatthew Wilcox (Oracle) * mapping->i_private_lock does *not* protect mapping->i_private_list! In fact,
486600f111eSMatthew Wilcox (Oracle) * mapping->i_private_list will always be protected by the backing blockdev's
487600f111eSMatthew Wilcox (Oracle) * ->i_private_lock.
4881da177e4SLinus Torvalds *
4891da177e4SLinus Torvalds * Which introduces a requirement: all buffers on an address_space's
490600f111eSMatthew Wilcox (Oracle) * ->i_private_list must be from the same address_space: the blockdev's.
4911da177e4SLinus Torvalds *
492600f111eSMatthew Wilcox (Oracle) * address_spaces which do not place buffers at ->i_private_list via these
493600f111eSMatthew Wilcox (Oracle) * utility functions are free to use i_private_lock and i_private_list for
494600f111eSMatthew Wilcox (Oracle) * whatever they want. The only requirement is that list_empty(i_private_list)
4951da177e4SLinus Torvalds * be true at clear_inode() time.
4961da177e4SLinus Torvalds *
4971da177e4SLinus Torvalds * FIXME: clear_inode should not call invalidate_inode_buffers(). The
4981da177e4SLinus Torvalds * filesystems should do that. invalidate_inode_buffers() should just go
4991da177e4SLinus Torvalds * BUG_ON(!list_empty).
5001da177e4SLinus Torvalds *
5011da177e4SLinus Torvalds * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
5021da177e4SLinus Torvalds * take an address_space, not an inode. And it should be called
5031da177e4SLinus Torvalds * mark_buffer_dirty_fsync() to clearly define why those buffers are being
5041da177e4SLinus Torvalds * queued up.
5051da177e4SLinus Torvalds *
5061da177e4SLinus Torvalds * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
5071da177e4SLinus Torvalds * list if it is already on a list. Because if the buffer is on a list,
5081da177e4SLinus Torvalds * it *must* already be on the right one. If not, the filesystem is being
5091da177e4SLinus Torvalds * silly. This will save a ton of locking. But first we have to ensure
5101da177e4SLinus Torvalds * that buffers are taken *off* the old inode's list when they are freed
5111da177e4SLinus Torvalds * (presumably in truncate). That requires careful auditing of all
5121da177e4SLinus Torvalds * filesystems (do it inside bforget()). It could also be done by bringing
5131da177e4SLinus Torvalds * b_inode back.
5141da177e4SLinus Torvalds */
5151da177e4SLinus Torvalds
5161da177e4SLinus Torvalds /*
517600f111eSMatthew Wilcox (Oracle) * The buffer's backing address_space's i_private_lock must be held
5181da177e4SLinus Torvalds */
__remove_assoc_queue(struct buffer_head * bh)519dbacefc9SThomas Petazzoni static void __remove_assoc_queue(struct buffer_head *bh)
5201da177e4SLinus Torvalds {
5211da177e4SLinus Torvalds list_del_init(&bh->b_assoc_buffers);
52258ff407bSJan Kara WARN_ON(!bh->b_assoc_map);
52358ff407bSJan Kara bh->b_assoc_map = NULL;
5241da177e4SLinus Torvalds }
5251da177e4SLinus Torvalds
inode_has_buffers(struct inode * inode)5261da177e4SLinus Torvalds int inode_has_buffers(struct inode *inode)
5271da177e4SLinus Torvalds {
528600f111eSMatthew Wilcox (Oracle) return !list_empty(&inode->i_data.i_private_list);
5291da177e4SLinus Torvalds }
5301da177e4SLinus Torvalds
5311da177e4SLinus Torvalds /*
5321da177e4SLinus Torvalds * osync is designed to support O_SYNC io. It waits synchronously for
5331da177e4SLinus Torvalds * all already-submitted IO to complete, but does not queue any new
5341da177e4SLinus Torvalds * writes to the disk.
5351da177e4SLinus Torvalds *
53679f59784SZhang Yi * To do O_SYNC writes, just queue the buffer writes with write_dirty_buffer
53779f59784SZhang Yi * as you dirty the buffers, and then use osync_inode_buffers to wait for
5381da177e4SLinus Torvalds * completion. Any other dirty buffers which are not yet queued for
5391da177e4SLinus Torvalds * write will not be flushed to disk by the osync.
5401da177e4SLinus Torvalds */
osync_buffers_list(spinlock_t * lock,struct list_head * list)5411da177e4SLinus Torvalds static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
5421da177e4SLinus Torvalds {
5431da177e4SLinus Torvalds struct buffer_head *bh;
5441da177e4SLinus Torvalds struct list_head *p;
5451da177e4SLinus Torvalds int err = 0;
5461da177e4SLinus Torvalds
5471da177e4SLinus Torvalds spin_lock(lock);
5481da177e4SLinus Torvalds repeat:
5491da177e4SLinus Torvalds list_for_each_prev(p, list) {
5501da177e4SLinus Torvalds bh = BH_ENTRY(p);
5511da177e4SLinus Torvalds if (buffer_locked(bh)) {
5521da177e4SLinus Torvalds get_bh(bh);
5531da177e4SLinus Torvalds spin_unlock(lock);
5541da177e4SLinus Torvalds wait_on_buffer(bh);
5551da177e4SLinus Torvalds if (!buffer_uptodate(bh))
5561da177e4SLinus Torvalds err = -EIO;
5571da177e4SLinus Torvalds brelse(bh);
5581da177e4SLinus Torvalds spin_lock(lock);
5591da177e4SLinus Torvalds goto repeat;
5601da177e4SLinus Torvalds }
5611da177e4SLinus Torvalds }
5621da177e4SLinus Torvalds spin_unlock(lock);
5631da177e4SLinus Torvalds return err;
5641da177e4SLinus Torvalds }
5651da177e4SLinus Torvalds
5661da177e4SLinus Torvalds /**
56778a4a50aSRandy Dunlap * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
56867be2dd1SMartin Waitz * @mapping: the mapping which wants those buffers written
5691da177e4SLinus Torvalds *
570600f111eSMatthew Wilcox (Oracle) * Starts I/O against the buffers at mapping->i_private_list, and waits upon
5711da177e4SLinus Torvalds * that I/O.
5721da177e4SLinus Torvalds *
57367be2dd1SMartin Waitz * Basically, this is a convenience function for fsync().
57467be2dd1SMartin Waitz * @mapping is a file or directory which needs those buffers to be written for
57567be2dd1SMartin Waitz * a successful fsync().
5761da177e4SLinus Torvalds */
sync_mapping_buffers(struct address_space * mapping)5771da177e4SLinus Torvalds int sync_mapping_buffers(struct address_space *mapping)
5781da177e4SLinus Torvalds {
579600f111eSMatthew Wilcox (Oracle) struct address_space *buffer_mapping = mapping->i_private_data;
5801da177e4SLinus Torvalds
581600f111eSMatthew Wilcox (Oracle) if (buffer_mapping == NULL || list_empty(&mapping->i_private_list))
5821da177e4SLinus Torvalds return 0;
5831da177e4SLinus Torvalds
584600f111eSMatthew Wilcox (Oracle) return fsync_buffers_list(&buffer_mapping->i_private_lock,
585600f111eSMatthew Wilcox (Oracle) &mapping->i_private_list);
5861da177e4SLinus Torvalds }
5871da177e4SLinus Torvalds EXPORT_SYMBOL(sync_mapping_buffers);
5881da177e4SLinus Torvalds
58931b2ebc0SRitesh Harjani (IBM) /**
59031b2ebc0SRitesh Harjani (IBM) * generic_buffers_fsync_noflush - generic buffer fsync implementation
59131b2ebc0SRitesh Harjani (IBM) * for simple filesystems with no inode lock
59231b2ebc0SRitesh Harjani (IBM) *
59331b2ebc0SRitesh Harjani (IBM) * @file: file to synchronize
59431b2ebc0SRitesh Harjani (IBM) * @start: start offset in bytes
59531b2ebc0SRitesh Harjani (IBM) * @end: end offset in bytes (inclusive)
59631b2ebc0SRitesh Harjani (IBM) * @datasync: only synchronize essential metadata if true
59731b2ebc0SRitesh Harjani (IBM) *
59831b2ebc0SRitesh Harjani (IBM) * This is a generic implementation of the fsync method for simple
59931b2ebc0SRitesh Harjani (IBM) * filesystems which track all non-inode metadata in the buffers list
60031b2ebc0SRitesh Harjani (IBM) * hanging off the address_space structure.
60131b2ebc0SRitesh Harjani (IBM) */
generic_buffers_fsync_noflush(struct file * file,loff_t start,loff_t end,bool datasync)60231b2ebc0SRitesh Harjani (IBM) int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end,
60331b2ebc0SRitesh Harjani (IBM) bool datasync)
60431b2ebc0SRitesh Harjani (IBM) {
60531b2ebc0SRitesh Harjani (IBM) struct inode *inode = file->f_mapping->host;
60631b2ebc0SRitesh Harjani (IBM) int err;
60731b2ebc0SRitesh Harjani (IBM) int ret;
60831b2ebc0SRitesh Harjani (IBM)
60931b2ebc0SRitesh Harjani (IBM) err = file_write_and_wait_range(file, start, end);
61031b2ebc0SRitesh Harjani (IBM) if (err)
61131b2ebc0SRitesh Harjani (IBM) return err;
61231b2ebc0SRitesh Harjani (IBM)
61331b2ebc0SRitesh Harjani (IBM) ret = sync_mapping_buffers(inode->i_mapping);
61431b2ebc0SRitesh Harjani (IBM) if (!(inode->i_state & I_DIRTY_ALL))
61531b2ebc0SRitesh Harjani (IBM) goto out;
61631b2ebc0SRitesh Harjani (IBM) if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
61731b2ebc0SRitesh Harjani (IBM) goto out;
61831b2ebc0SRitesh Harjani (IBM)
61931b2ebc0SRitesh Harjani (IBM) err = sync_inode_metadata(inode, 1);
62031b2ebc0SRitesh Harjani (IBM) if (ret == 0)
62131b2ebc0SRitesh Harjani (IBM) ret = err;
62231b2ebc0SRitesh Harjani (IBM)
62331b2ebc0SRitesh Harjani (IBM) out:
62431b2ebc0SRitesh Harjani (IBM) /* check and advance again to catch errors after syncing out buffers */
62531b2ebc0SRitesh Harjani (IBM) err = file_check_and_advance_wb_err(file);
62631b2ebc0SRitesh Harjani (IBM) if (ret == 0)
62731b2ebc0SRitesh Harjani (IBM) ret = err;
62831b2ebc0SRitesh Harjani (IBM) return ret;
62931b2ebc0SRitesh Harjani (IBM) }
63031b2ebc0SRitesh Harjani (IBM) EXPORT_SYMBOL(generic_buffers_fsync_noflush);
63131b2ebc0SRitesh Harjani (IBM)
63231b2ebc0SRitesh Harjani (IBM) /**
63331b2ebc0SRitesh Harjani (IBM) * generic_buffers_fsync - generic buffer fsync implementation
63431b2ebc0SRitesh Harjani (IBM) * for simple filesystems with no inode lock
63531b2ebc0SRitesh Harjani (IBM) *
63631b2ebc0SRitesh Harjani (IBM) * @file: file to synchronize
63731b2ebc0SRitesh Harjani (IBM) * @start: start offset in bytes
63831b2ebc0SRitesh Harjani (IBM) * @end: end offset in bytes (inclusive)
63931b2ebc0SRitesh Harjani (IBM) * @datasync: only synchronize essential metadata if true
64031b2ebc0SRitesh Harjani (IBM) *
64131b2ebc0SRitesh Harjani (IBM) * This is a generic implementation of the fsync method for simple
64231b2ebc0SRitesh Harjani (IBM) * filesystems which track all non-inode metadata in the buffers list
64331b2ebc0SRitesh Harjani (IBM) * hanging off the address_space structure. This also makes sure that
64431b2ebc0SRitesh Harjani (IBM) * a device cache flush operation is called at the end.
64531b2ebc0SRitesh Harjani (IBM) */
generic_buffers_fsync(struct file * file,loff_t start,loff_t end,bool datasync)64631b2ebc0SRitesh Harjani (IBM) int generic_buffers_fsync(struct file *file, loff_t start, loff_t end,
64731b2ebc0SRitesh Harjani (IBM) bool datasync)
64831b2ebc0SRitesh Harjani (IBM) {
64931b2ebc0SRitesh Harjani (IBM) struct inode *inode = file->f_mapping->host;
65031b2ebc0SRitesh Harjani (IBM) int ret;
65131b2ebc0SRitesh Harjani (IBM)
65231b2ebc0SRitesh Harjani (IBM) ret = generic_buffers_fsync_noflush(file, start, end, datasync);
65331b2ebc0SRitesh Harjani (IBM) if (!ret)
65431b2ebc0SRitesh Harjani (IBM) ret = blkdev_issue_flush(inode->i_sb->s_bdev);
65531b2ebc0SRitesh Harjani (IBM) return ret;
65631b2ebc0SRitesh Harjani (IBM) }
65731b2ebc0SRitesh Harjani (IBM) EXPORT_SYMBOL(generic_buffers_fsync);
65831b2ebc0SRitesh Harjani (IBM)
6591da177e4SLinus Torvalds /*
6601da177e4SLinus Torvalds * Called when we've recently written block `bblock', and it is known that
6611da177e4SLinus Torvalds * `bblock' was for a buffer_boundary() buffer. This means that the block at
6621da177e4SLinus Torvalds * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
6631da177e4SLinus Torvalds * dirty, schedule it for IO. So that indirects merge nicely with their data.
6641da177e4SLinus Torvalds */
write_boundary_block(struct block_device * bdev,sector_t bblock,unsigned blocksize)6651da177e4SLinus Torvalds void write_boundary_block(struct block_device *bdev,
6661da177e4SLinus Torvalds sector_t bblock, unsigned blocksize)
6671da177e4SLinus Torvalds {
6685b67d439SDavidlohr Bueso struct buffer_head *bh;
6695b67d439SDavidlohr Bueso
6705b67d439SDavidlohr Bueso bh = __find_get_block_nonatomic(bdev, bblock + 1, blocksize);
6711da177e4SLinus Torvalds if (bh) {
6721da177e4SLinus Torvalds if (buffer_dirty(bh))
673e7ea1129SZhang Yi write_dirty_buffer(bh, 0);
6741da177e4SLinus Torvalds put_bh(bh);
6751da177e4SLinus Torvalds }
6761da177e4SLinus Torvalds }
6771da177e4SLinus Torvalds
mark_buffer_dirty_inode(struct buffer_head * bh,struct inode * inode)6781da177e4SLinus Torvalds void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
6791da177e4SLinus Torvalds {
6801da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping;
681abc8a8a2SMatthew Wilcox (Oracle) struct address_space *buffer_mapping = bh->b_folio->mapping;
6821da177e4SLinus Torvalds
6831da177e4SLinus Torvalds mark_buffer_dirty(bh);
684600f111eSMatthew Wilcox (Oracle) if (!mapping->i_private_data) {
685600f111eSMatthew Wilcox (Oracle) mapping->i_private_data = buffer_mapping;
6861da177e4SLinus Torvalds } else {
687600f111eSMatthew Wilcox (Oracle) BUG_ON(mapping->i_private_data != buffer_mapping);
6881da177e4SLinus Torvalds }
689535ee2fbSJan Kara if (!bh->b_assoc_map) {
690600f111eSMatthew Wilcox (Oracle) spin_lock(&buffer_mapping->i_private_lock);
6911da177e4SLinus Torvalds list_move_tail(&bh->b_assoc_buffers,
692600f111eSMatthew Wilcox (Oracle) &mapping->i_private_list);
69358ff407bSJan Kara bh->b_assoc_map = mapping;
694600f111eSMatthew Wilcox (Oracle) spin_unlock(&buffer_mapping->i_private_lock);
6951da177e4SLinus Torvalds }
6961da177e4SLinus Torvalds }
6971da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_dirty_inode);
6981da177e4SLinus Torvalds
6993814ec89SMatthew Wilcox (Oracle) /**
7003814ec89SMatthew Wilcox (Oracle) * block_dirty_folio - Mark a folio as dirty.
7013814ec89SMatthew Wilcox (Oracle) * @mapping: The address space containing this folio.
7023814ec89SMatthew Wilcox (Oracle) * @folio: The folio to mark dirty.
7031da177e4SLinus Torvalds *
7043814ec89SMatthew Wilcox (Oracle) * Filesystems which use buffer_heads can use this function as their
7053814ec89SMatthew Wilcox (Oracle) * ->dirty_folio implementation. Some filesystems need to do a little
7063814ec89SMatthew Wilcox (Oracle) * work before calling this function. Filesystems which do not use
7073814ec89SMatthew Wilcox (Oracle) * buffer_heads should call filemap_dirty_folio() instead.
7081da177e4SLinus Torvalds *
7093814ec89SMatthew Wilcox (Oracle) * If the folio has buffers, the uptodate buffers are set dirty, to
7103814ec89SMatthew Wilcox (Oracle) * preserve dirty-state coherency between the folio and the buffers.
7113814ec89SMatthew Wilcox (Oracle) * Buffers added to a dirty folio are created dirty.
7121da177e4SLinus Torvalds *
7133814ec89SMatthew Wilcox (Oracle) * The buffers are dirtied before the folio is dirtied. There's a small
7143814ec89SMatthew Wilcox (Oracle) * race window in which writeback may see the folio cleanness but not the
7153814ec89SMatthew Wilcox (Oracle) * buffer dirtiness. That's fine. If this code were to set the folio
7163814ec89SMatthew Wilcox (Oracle) * dirty before the buffers, writeback could clear the folio dirty flag,
7173814ec89SMatthew Wilcox (Oracle) * see a bunch of clean buffers and we'd end up with dirty buffers/clean
7183814ec89SMatthew Wilcox (Oracle) * folio on the dirty folio list.
7191da177e4SLinus Torvalds *
7203814ec89SMatthew Wilcox (Oracle) * We use i_private_lock to lock against try_to_free_buffers() while
7213814ec89SMatthew Wilcox (Oracle) * using the folio's buffer list. This also prevents clean buffers
7223814ec89SMatthew Wilcox (Oracle) * being added to the folio after it was set dirty.
7231da177e4SLinus Torvalds *
7243814ec89SMatthew Wilcox (Oracle) * Context: May only be called from process context. Does not sleep.
7253814ec89SMatthew Wilcox (Oracle) * Caller must ensure that @folio cannot be truncated during this call,
7263814ec89SMatthew Wilcox (Oracle) * typically by holding the folio lock or having a page in the folio
7273814ec89SMatthew Wilcox (Oracle) * mapped and holding the page table lock.
7283814ec89SMatthew Wilcox (Oracle) *
7293814ec89SMatthew Wilcox (Oracle) * Return: True if the folio was dirtied; false if it was already dirtied.
7301da177e4SLinus Torvalds */
block_dirty_folio(struct address_space * mapping,struct folio * folio)731e621900aSMatthew Wilcox (Oracle) bool block_dirty_folio(struct address_space *mapping, struct folio *folio)
7321da177e4SLinus Torvalds {
733e621900aSMatthew Wilcox (Oracle) struct buffer_head *head;
734e621900aSMatthew Wilcox (Oracle) bool newly_dirty;
7351da177e4SLinus Torvalds
736600f111eSMatthew Wilcox (Oracle) spin_lock(&mapping->i_private_lock);
737e621900aSMatthew Wilcox (Oracle) head = folio_buffers(folio);
738e621900aSMatthew Wilcox (Oracle) if (head) {
7391da177e4SLinus Torvalds struct buffer_head *bh = head;
7401da177e4SLinus Torvalds
7411da177e4SLinus Torvalds do {
7421da177e4SLinus Torvalds set_buffer_dirty(bh);
7431da177e4SLinus Torvalds bh = bh->b_this_page;
7441da177e4SLinus Torvalds } while (bh != head);
7451da177e4SLinus Torvalds }
746c4843a75SGreg Thelen /*
747bcfe06bfSRoman Gushchin * Lock out page's memcg migration to keep PageDirty
74881f8c3a4SJohannes Weiner * synchronized with per-memcg dirty page counters.
749c4843a75SGreg Thelen */
750e621900aSMatthew Wilcox (Oracle) newly_dirty = !folio_test_set_dirty(folio);
751600f111eSMatthew Wilcox (Oracle) spin_unlock(&mapping->i_private_lock);
7521da177e4SLinus Torvalds
753a8e7d49aSLinus Torvalds if (newly_dirty)
754e621900aSMatthew Wilcox (Oracle) __folio_mark_dirty(folio, mapping, 1);
755c4843a75SGreg Thelen
756c4843a75SGreg Thelen if (newly_dirty)
757c4843a75SGreg Thelen __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
758c4843a75SGreg Thelen
759a8e7d49aSLinus Torvalds return newly_dirty;
7601da177e4SLinus Torvalds }
761e621900aSMatthew Wilcox (Oracle) EXPORT_SYMBOL(block_dirty_folio);
7621da177e4SLinus Torvalds
7631da177e4SLinus Torvalds /*
7641da177e4SLinus Torvalds * Write out and wait upon a list of buffers.
7651da177e4SLinus Torvalds *
7661da177e4SLinus Torvalds * We have conflicting pressures: we want to make sure that all
7671da177e4SLinus Torvalds * initially dirty buffers get waited on, but that any subsequently
7681da177e4SLinus Torvalds * dirtied buffers don't. After all, we don't want fsync to last
7691da177e4SLinus Torvalds * forever if somebody is actively writing to the file.
7701da177e4SLinus Torvalds *
7711da177e4SLinus Torvalds * Do this in two main stages: first we copy dirty buffers to a
7721da177e4SLinus Torvalds * temporary inode list, queueing the writes as we go. Then we clean
7731da177e4SLinus Torvalds * up, waiting for those writes to complete.
7741da177e4SLinus Torvalds *
7751da177e4SLinus Torvalds * During this second stage, any subsequent updates to the file may end
7761da177e4SLinus Torvalds * up refiling the buffer on the original inode's dirty list again, so
7771da177e4SLinus Torvalds * there is a chance we will end up with a buffer queued for write but
7781da177e4SLinus Torvalds * not yet completed on that list. So, as a final cleanup we go through
7791da177e4SLinus Torvalds * the osync code to catch these locked, dirty buffers without requeuing
7801da177e4SLinus Torvalds * any newly dirty buffers for write.
7811da177e4SLinus Torvalds */
fsync_buffers_list(spinlock_t * lock,struct list_head * list)7821da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
7831da177e4SLinus Torvalds {
7841da177e4SLinus Torvalds struct buffer_head *bh;
7857eaceaccSJens Axboe struct address_space *mapping;
7861da177e4SLinus Torvalds int err = 0, err2;
7874ee2491eSJens Axboe struct blk_plug plug;
78873ce1c9fSHongbo Li LIST_HEAD(tmp);
7891da177e4SLinus Torvalds
7904ee2491eSJens Axboe blk_start_plug(&plug);
7911da177e4SLinus Torvalds
7921da177e4SLinus Torvalds spin_lock(lock);
7931da177e4SLinus Torvalds while (!list_empty(list)) {
7941da177e4SLinus Torvalds bh = BH_ENTRY(list->next);
795535ee2fbSJan Kara mapping = bh->b_assoc_map;
79658ff407bSJan Kara __remove_assoc_queue(bh);
797535ee2fbSJan Kara /* Avoid race with mark_buffer_dirty_inode() which does
798535ee2fbSJan Kara * a lockless check and we rely on seeing the dirty bit */
799535ee2fbSJan Kara smp_mb();
8001da177e4SLinus Torvalds if (buffer_dirty(bh) || buffer_locked(bh)) {
8011da177e4SLinus Torvalds list_add(&bh->b_assoc_buffers, &tmp);
802535ee2fbSJan Kara bh->b_assoc_map = mapping;
8031da177e4SLinus Torvalds if (buffer_dirty(bh)) {
8041da177e4SLinus Torvalds get_bh(bh);
8051da177e4SLinus Torvalds spin_unlock(lock);
8061da177e4SLinus Torvalds /*
8071da177e4SLinus Torvalds * Ensure any pending I/O completes so that
8089cb569d6SChristoph Hellwig * write_dirty_buffer() actually writes the
8099cb569d6SChristoph Hellwig * current contents - it is a noop if I/O is
8109cb569d6SChristoph Hellwig * still in flight on potentially older
8119cb569d6SChristoph Hellwig * contents.
8121da177e4SLinus Torvalds */
81370fd7614SChristoph Hellwig write_dirty_buffer(bh, REQ_SYNC);
8149cf6b720SJens Axboe
8159cf6b720SJens Axboe /*
8169cf6b720SJens Axboe * Kick off IO for the previous mapping. Note
8179cf6b720SJens Axboe * that we will not run the very last mapping,
8189cf6b720SJens Axboe * wait_on_buffer() will do that for us
8199cf6b720SJens Axboe * through sync_buffer().
8209cf6b720SJens Axboe */
8211da177e4SLinus Torvalds brelse(bh);
8221da177e4SLinus Torvalds spin_lock(lock);
8231da177e4SLinus Torvalds }
8241da177e4SLinus Torvalds }
8251da177e4SLinus Torvalds }
8261da177e4SLinus Torvalds
8274ee2491eSJens Axboe spin_unlock(lock);
8284ee2491eSJens Axboe blk_finish_plug(&plug);
8294ee2491eSJens Axboe spin_lock(lock);
8304ee2491eSJens Axboe
8311da177e4SLinus Torvalds while (!list_empty(&tmp)) {
8321da177e4SLinus Torvalds bh = BH_ENTRY(tmp.prev);
8331da177e4SLinus Torvalds get_bh(bh);
834535ee2fbSJan Kara mapping = bh->b_assoc_map;
835535ee2fbSJan Kara __remove_assoc_queue(bh);
836535ee2fbSJan Kara /* Avoid race with mark_buffer_dirty_inode() which does
837535ee2fbSJan Kara * a lockless check and we rely on seeing the dirty bit */
838535ee2fbSJan Kara smp_mb();
839535ee2fbSJan Kara if (buffer_dirty(bh)) {
840535ee2fbSJan Kara list_add(&bh->b_assoc_buffers,
841600f111eSMatthew Wilcox (Oracle) &mapping->i_private_list);
842535ee2fbSJan Kara bh->b_assoc_map = mapping;
843535ee2fbSJan Kara }
8441da177e4SLinus Torvalds spin_unlock(lock);
8451da177e4SLinus Torvalds wait_on_buffer(bh);
8461da177e4SLinus Torvalds if (!buffer_uptodate(bh))
8471da177e4SLinus Torvalds err = -EIO;
8481da177e4SLinus Torvalds brelse(bh);
8491da177e4SLinus Torvalds spin_lock(lock);
8501da177e4SLinus Torvalds }
8511da177e4SLinus Torvalds
8521da177e4SLinus Torvalds spin_unlock(lock);
8531da177e4SLinus Torvalds err2 = osync_buffers_list(lock, list);
8541da177e4SLinus Torvalds if (err)
8551da177e4SLinus Torvalds return err;
8561da177e4SLinus Torvalds else
8571da177e4SLinus Torvalds return err2;
8581da177e4SLinus Torvalds }
8591da177e4SLinus Torvalds
8601da177e4SLinus Torvalds /*
8611da177e4SLinus Torvalds * Invalidate any and all dirty buffers on a given inode. We are
8621da177e4SLinus Torvalds * probably unmounting the fs, but that doesn't mean we have already
8631da177e4SLinus Torvalds * done a sync(). Just drop the buffers from the inode list.
8641da177e4SLinus Torvalds *
865600f111eSMatthew Wilcox (Oracle) * NOTE: we take the inode's blockdev's mapping's i_private_lock. Which
866fb6f20ecSJan Kara * assumes that all the buffers are against the blockdev.
8671da177e4SLinus Torvalds */
invalidate_inode_buffers(struct inode * inode)8681da177e4SLinus Torvalds void invalidate_inode_buffers(struct inode *inode)
8691da177e4SLinus Torvalds {
8701da177e4SLinus Torvalds if (inode_has_buffers(inode)) {
8711da177e4SLinus Torvalds struct address_space *mapping = &inode->i_data;
872600f111eSMatthew Wilcox (Oracle) struct list_head *list = &mapping->i_private_list;
873600f111eSMatthew Wilcox (Oracle) struct address_space *buffer_mapping = mapping->i_private_data;
8741da177e4SLinus Torvalds
875600f111eSMatthew Wilcox (Oracle) spin_lock(&buffer_mapping->i_private_lock);
8761da177e4SLinus Torvalds while (!list_empty(list))
8771da177e4SLinus Torvalds __remove_assoc_queue(BH_ENTRY(list->next));
878600f111eSMatthew Wilcox (Oracle) spin_unlock(&buffer_mapping->i_private_lock);
8791da177e4SLinus Torvalds }
8801da177e4SLinus Torvalds }
88152b19ac9SJan Kara EXPORT_SYMBOL(invalidate_inode_buffers);
8821da177e4SLinus Torvalds
8831da177e4SLinus Torvalds /*
8841da177e4SLinus Torvalds * Remove any clean buffers from the inode's buffer list. This is called
8851da177e4SLinus Torvalds * when we're trying to free the inode itself. Those buffers can pin it.
8861da177e4SLinus Torvalds *
8871da177e4SLinus Torvalds * Returns true if all buffers were removed.
8881da177e4SLinus Torvalds */
remove_inode_buffers(struct inode * inode)8891da177e4SLinus Torvalds int remove_inode_buffers(struct inode *inode)
8901da177e4SLinus Torvalds {
8911da177e4SLinus Torvalds int ret = 1;
8921da177e4SLinus Torvalds
8931da177e4SLinus Torvalds if (inode_has_buffers(inode)) {
8941da177e4SLinus Torvalds struct address_space *mapping = &inode->i_data;
895600f111eSMatthew Wilcox (Oracle) struct list_head *list = &mapping->i_private_list;
896600f111eSMatthew Wilcox (Oracle) struct address_space *buffer_mapping = mapping->i_private_data;
8971da177e4SLinus Torvalds
898600f111eSMatthew Wilcox (Oracle) spin_lock(&buffer_mapping->i_private_lock);
8991da177e4SLinus Torvalds while (!list_empty(list)) {
9001da177e4SLinus Torvalds struct buffer_head *bh = BH_ENTRY(list->next);
9011da177e4SLinus Torvalds if (buffer_dirty(bh)) {
9021da177e4SLinus Torvalds ret = 0;
9031da177e4SLinus Torvalds break;
9041da177e4SLinus Torvalds }
9051da177e4SLinus Torvalds __remove_assoc_queue(bh);
9061da177e4SLinus Torvalds }
907600f111eSMatthew Wilcox (Oracle) spin_unlock(&buffer_mapping->i_private_lock);
9081da177e4SLinus Torvalds }
9091da177e4SLinus Torvalds return ret;
9101da177e4SLinus Torvalds }
9111da177e4SLinus Torvalds
9121da177e4SLinus Torvalds /*
913c71124a8SPankaj Raghav * Create the appropriate buffers when given a folio for data area and
9141da177e4SLinus Torvalds * the size of each buffer.. Use the bh->b_this_page linked list to
9151da177e4SLinus Torvalds * follow the buffers created. Return NULL if unable to create more
9161da177e4SLinus Torvalds * buffers.
9171da177e4SLinus Torvalds *
9181da177e4SLinus Torvalds * The retry flag is used to differentiate async IO (paging, swapping)
9191da177e4SLinus Torvalds * which may not fail from ordinary buffer allocations.
9201da177e4SLinus Torvalds */
folio_alloc_buffers(struct folio * folio,unsigned long size,gfp_t gfp)921c71124a8SPankaj Raghav struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
9222a418157SMatthew Wilcox (Oracle) gfp_t gfp)
9231da177e4SLinus Torvalds {
9241da177e4SLinus Torvalds struct buffer_head *bh, *head;
9251da177e4SLinus Torvalds long offset;
926b87d8cefSRoman Gushchin struct mem_cgroup *memcg, *old_memcg;
9271da177e4SLinus Torvalds
928c71124a8SPankaj Raghav /* The folio lock pins the memcg */
929c71124a8SPankaj Raghav memcg = folio_memcg(folio);
930b87d8cefSRoman Gushchin old_memcg = set_active_memcg(memcg);
931f745c6f5SShakeel Butt
9321da177e4SLinus Torvalds head = NULL;
933c71124a8SPankaj Raghav offset = folio_size(folio);
9341da177e4SLinus Torvalds while ((offset -= size) >= 0) {
935640ab98fSJens Axboe bh = alloc_buffer_head(gfp);
9361da177e4SLinus Torvalds if (!bh)
9371da177e4SLinus Torvalds goto no_grow;
9381da177e4SLinus Torvalds
9391da177e4SLinus Torvalds bh->b_this_page = head;
9401da177e4SLinus Torvalds bh->b_blocknr = -1;
9411da177e4SLinus Torvalds head = bh;
9421da177e4SLinus Torvalds
9431da177e4SLinus Torvalds bh->b_size = size;
9441da177e4SLinus Torvalds
945c71124a8SPankaj Raghav /* Link the buffer to its folio */
946c71124a8SPankaj Raghav folio_set_bh(bh, folio, offset);
9471da177e4SLinus Torvalds }
948f745c6f5SShakeel Butt out:
949b87d8cefSRoman Gushchin set_active_memcg(old_memcg);
9501da177e4SLinus Torvalds return head;
9511da177e4SLinus Torvalds /*
9521da177e4SLinus Torvalds * In case anything failed, we just free everything we got.
9531da177e4SLinus Torvalds */
9541da177e4SLinus Torvalds no_grow:
9551da177e4SLinus Torvalds if (head) {
9561da177e4SLinus Torvalds do {
9571da177e4SLinus Torvalds bh = head;
9581da177e4SLinus Torvalds head = head->b_this_page;
9591da177e4SLinus Torvalds free_buffer_head(bh);
9601da177e4SLinus Torvalds } while (head);
9611da177e4SLinus Torvalds }
9621da177e4SLinus Torvalds
963f745c6f5SShakeel Butt goto out;
9641da177e4SLinus Torvalds }
965c71124a8SPankaj Raghav EXPORT_SYMBOL_GPL(folio_alloc_buffers);
966c71124a8SPankaj Raghav
alloc_page_buffers(struct page * page,unsigned long size)9675c40e050SMichal Hocko struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size)
968c71124a8SPankaj Raghav {
9692a418157SMatthew Wilcox (Oracle) gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT;
9702a418157SMatthew Wilcox (Oracle)
9712a418157SMatthew Wilcox (Oracle) return folio_alloc_buffers(page_folio(page), size, gfp);
972c71124a8SPankaj Raghav }
9731da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(alloc_page_buffers);
9741da177e4SLinus Torvalds
link_dev_buffers(struct folio * folio,struct buffer_head * head)97508d84addSMatthew Wilcox (Oracle) static inline void link_dev_buffers(struct folio *folio,
97608d84addSMatthew Wilcox (Oracle) struct buffer_head *head)
9771da177e4SLinus Torvalds {
9781da177e4SLinus Torvalds struct buffer_head *bh, *tail;
9791da177e4SLinus Torvalds
9801da177e4SLinus Torvalds bh = head;
9811da177e4SLinus Torvalds do {
9821da177e4SLinus Torvalds tail = bh;
9831da177e4SLinus Torvalds bh = bh->b_this_page;
9841da177e4SLinus Torvalds } while (bh);
9851da177e4SLinus Torvalds tail->b_this_page = head;
98608d84addSMatthew Wilcox (Oracle) folio_attach_private(folio, head);
9871da177e4SLinus Torvalds }
9881da177e4SLinus Torvalds
blkdev_max_block(struct block_device * bdev,unsigned int size)989bbec0270SLinus Torvalds static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
990bbec0270SLinus Torvalds {
991bbec0270SLinus Torvalds sector_t retval = ~((sector_t)0);
992b86058f9SChristoph Hellwig loff_t sz = bdev_nr_bytes(bdev);
993bbec0270SLinus Torvalds
994bbec0270SLinus Torvalds if (sz) {
995bbec0270SLinus Torvalds unsigned int sizebits = blksize_bits(size);
996bbec0270SLinus Torvalds retval = (sz >> sizebits);
997bbec0270SLinus Torvalds }
998bbec0270SLinus Torvalds return retval;
999bbec0270SLinus Torvalds }
1000bbec0270SLinus Torvalds
10011da177e4SLinus Torvalds /*
10026f24ce6bSMatthew Wilcox (Oracle) * Initialise the state of a blockdev folio's buffers.
10031da177e4SLinus Torvalds */
folio_init_buffers(struct folio * folio,struct block_device * bdev,unsigned size)10046f24ce6bSMatthew Wilcox (Oracle) static sector_t folio_init_buffers(struct folio *folio,
1005382497adSMatthew Wilcox (Oracle) struct block_device *bdev, unsigned size)
10061da177e4SLinus Torvalds {
10076f24ce6bSMatthew Wilcox (Oracle) struct buffer_head *head = folio_buffers(folio);
10081da177e4SLinus Torvalds struct buffer_head *bh = head;
10096f24ce6bSMatthew Wilcox (Oracle) bool uptodate = folio_test_uptodate(folio);
1010382497adSMatthew Wilcox (Oracle) sector_t block = div_u64(folio_pos(folio), size);
1011bcd1d063SChristoph Hellwig sector_t end_block = blkdev_max_block(bdev, size);
10121da177e4SLinus Torvalds
10131da177e4SLinus Torvalds do {
10141da177e4SLinus Torvalds if (!buffer_mapped(bh)) {
101501950a34SEric Biggers bh->b_end_io = NULL;
101601950a34SEric Biggers bh->b_private = NULL;
10171da177e4SLinus Torvalds bh->b_bdev = bdev;
10181da177e4SLinus Torvalds bh->b_blocknr = block;
10191da177e4SLinus Torvalds if (uptodate)
10201da177e4SLinus Torvalds set_buffer_uptodate(bh);
1021080399aaSJeff Moyer if (block < end_block)
10221da177e4SLinus Torvalds set_buffer_mapped(bh);
10231da177e4SLinus Torvalds }
10241da177e4SLinus Torvalds block++;
10251da177e4SLinus Torvalds bh = bh->b_this_page;
10261da177e4SLinus Torvalds } while (bh != head);
1027676ce6d5SHugh Dickins
1028676ce6d5SHugh Dickins /*
1029676ce6d5SHugh Dickins * Caller needs to validate requested block against end of device.
1030676ce6d5SHugh Dickins */
1031676ce6d5SHugh Dickins return end_block;
10321da177e4SLinus Torvalds }
10331da177e4SLinus Torvalds
10341da177e4SLinus Torvalds /*
10356d840a18SMatthew Wilcox (Oracle) * Create the page-cache folio that contains the requested block.
10361da177e4SLinus Torvalds *
1037676ce6d5SHugh Dickins * This is used purely for blockdev mappings.
10386d840a18SMatthew Wilcox (Oracle) *
1039bcd30d4cSMatthew Wilcox (Oracle) * Returns false if we have a failure which cannot be cured by retrying
1040bcd30d4cSMatthew Wilcox (Oracle) * without sleeping. Returns true if we succeeded, or the caller should retry.
10411da177e4SLinus Torvalds */
grow_dev_folio(struct block_device * bdev,sector_t block,pgoff_t index,unsigned size,gfp_t gfp)10426d840a18SMatthew Wilcox (Oracle) static bool grow_dev_folio(struct block_device *bdev, sector_t block,
1043382497adSMatthew Wilcox (Oracle) pgoff_t index, unsigned size, gfp_t gfp)
10441da177e4SLinus Torvalds {
104522f89a4fSAl Viro struct address_space *mapping = bdev->bd_mapping;
10463c98a41cSMatthew Wilcox (Oracle) struct folio *folio;
10471da177e4SLinus Torvalds struct buffer_head *bh;
10486d840a18SMatthew Wilcox (Oracle) sector_t end_block = 0;
104984235de3SJohannes Weiner
105022f89a4fSAl Viro folio = __filemap_get_folio(mapping, index,
10513ed65f04SMatthew Wilcox (Oracle) FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
10523ed65f04SMatthew Wilcox (Oracle) if (IS_ERR(folio))
10536d840a18SMatthew Wilcox (Oracle) return false;
10541da177e4SLinus Torvalds
10553c98a41cSMatthew Wilcox (Oracle) bh = folio_buffers(folio);
10563c98a41cSMatthew Wilcox (Oracle) if (bh) {
10571da177e4SLinus Torvalds if (bh->b_size == size) {
1058382497adSMatthew Wilcox (Oracle) end_block = folio_init_buffers(folio, bdev, size);
10596d840a18SMatthew Wilcox (Oracle) goto unlock;
10601da177e4SLinus Torvalds }
10611da177e4SLinus Torvalds
1062bcd30d4cSMatthew Wilcox (Oracle) /*
1063bcd30d4cSMatthew Wilcox (Oracle) * Retrying may succeed; for example the folio may finish
1064bcd30d4cSMatthew Wilcox (Oracle) * writeback, or buffers may be cleaned. This should not
1065bcd30d4cSMatthew Wilcox (Oracle) * happen very often; maybe we have old buffers attached to
1066bcd30d4cSMatthew Wilcox (Oracle) * this blockdev's page cache and we're trying to change
1067bcd30d4cSMatthew Wilcox (Oracle) * the block size?
1068bcd30d4cSMatthew Wilcox (Oracle) */
1069bcd30d4cSMatthew Wilcox (Oracle) if (!try_to_free_buffers(folio)) {
10706d840a18SMatthew Wilcox (Oracle) end_block = ~0ULL;
10716d840a18SMatthew Wilcox (Oracle) goto unlock;
10726d840a18SMatthew Wilcox (Oracle) }
1073bcd30d4cSMatthew Wilcox (Oracle) }
10746d840a18SMatthew Wilcox (Oracle)
10753ed65f04SMatthew Wilcox (Oracle) bh = folio_alloc_buffers(folio, size, gfp | __GFP_ACCOUNT);
10763ed65f04SMatthew Wilcox (Oracle) if (!bh)
10776d840a18SMatthew Wilcox (Oracle) goto unlock;
10781da177e4SLinus Torvalds
10791da177e4SLinus Torvalds /*
10803c98a41cSMatthew Wilcox (Oracle) * Link the folio to the buffers and initialise them. Take the
10811da177e4SLinus Torvalds * lock to be atomic wrt __find_get_block(), which does not
10823c98a41cSMatthew Wilcox (Oracle) * run under the folio lock.
10831da177e4SLinus Torvalds */
108422f89a4fSAl Viro spin_lock(&mapping->i_private_lock);
108508d84addSMatthew Wilcox (Oracle) link_dev_buffers(folio, bh);
1086382497adSMatthew Wilcox (Oracle) end_block = folio_init_buffers(folio, bdev, size);
108722f89a4fSAl Viro spin_unlock(&mapping->i_private_lock);
10886d840a18SMatthew Wilcox (Oracle) unlock:
10893c98a41cSMatthew Wilcox (Oracle) folio_unlock(folio);
10903c98a41cSMatthew Wilcox (Oracle) folio_put(folio);
10916d840a18SMatthew Wilcox (Oracle) return block < end_block;
10921da177e4SLinus Torvalds }
10931da177e4SLinus Torvalds
10941da177e4SLinus Torvalds /*
10956d840a18SMatthew Wilcox (Oracle) * Create buffers for the specified block device block's folio. If
10966d840a18SMatthew Wilcox (Oracle) * that folio was dirty, the buffers are set dirty also. Returns false
10976d840a18SMatthew Wilcox (Oracle) * if we've hit a permanent error.
10981da177e4SLinus Torvalds */
grow_buffers(struct block_device * bdev,sector_t block,unsigned size,gfp_t gfp)10996d840a18SMatthew Wilcox (Oracle) static bool grow_buffers(struct block_device *bdev, sector_t block,
11006d840a18SMatthew Wilcox (Oracle) unsigned size, gfp_t gfp)
11011da177e4SLinus Torvalds {
11025f3bd90dSMatthew Wilcox (Oracle) loff_t pos;
11031da177e4SLinus Torvalds
1104e5657933SAndrew Morton /*
11055f3bd90dSMatthew Wilcox (Oracle) * Check for a block which lies outside our maximum possible
11065f3bd90dSMatthew Wilcox (Oracle) * pagecache index.
1107e5657933SAndrew Morton */
11085f3bd90dSMatthew Wilcox (Oracle) if (check_mul_overflow(block, (sector_t)size, &pos) || pos > MAX_LFS_FILESIZE) {
11095f3bd90dSMatthew Wilcox (Oracle) printk(KERN_ERR "%s: requested out-of-range block %llu for device %pg\n",
11108e24eea7SHarvey Harrison __func__, (unsigned long long)block,
1111a1c6f057SDmitry Monakhov bdev);
11126d840a18SMatthew Wilcox (Oracle) return false;
1113e5657933SAndrew Morton }
1114676ce6d5SHugh Dickins
11156d840a18SMatthew Wilcox (Oracle) /* Create a folio with the proper size buffers */
11165f3bd90dSMatthew Wilcox (Oracle) return grow_dev_folio(bdev, block, pos / PAGE_SIZE, size, gfp);
11171da177e4SLinus Torvalds }
11181da177e4SLinus Torvalds
11190026ba40SEric Biggers static struct buffer_head *
__getblk_slow(struct block_device * bdev,sector_t block,unsigned size,gfp_t gfp)11203b5e6454SGioh Kim __getblk_slow(struct block_device *bdev, sector_t block,
11213b5e6454SGioh Kim unsigned size, gfp_t gfp)
11221da177e4SLinus Torvalds {
1123fb27226cSDavidlohr Bueso bool blocking = gfpflags_allow_blocking(gfp);
1124fb27226cSDavidlohr Bueso
11251da177e4SLinus Torvalds /* Size must be multiple of hard sectorsize */
1126e1defc4fSMartin K. Petersen if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
11271da177e4SLinus Torvalds (size < 512 || size > PAGE_SIZE))) {
11281da177e4SLinus Torvalds printk(KERN_ERR "getblk(): invalid block size %d requested\n",
11291da177e4SLinus Torvalds size);
1130e1defc4fSMartin K. Petersen printk(KERN_ERR "logical block size: %d\n",
1131e1defc4fSMartin K. Petersen bdev_logical_block_size(bdev));
11321da177e4SLinus Torvalds
11331da177e4SLinus Torvalds dump_stack();
11341da177e4SLinus Torvalds return NULL;
11351da177e4SLinus Torvalds }
11361da177e4SLinus Torvalds
1137676ce6d5SHugh Dickins for (;;) {
1138676ce6d5SHugh Dickins struct buffer_head *bh;
1139676ce6d5SHugh Dickins
114098a6ca16SDavidlohr Bueso if (!grow_buffers(bdev, block, size, gfp))
114198a6ca16SDavidlohr Bueso return NULL;
114298a6ca16SDavidlohr Bueso
1143fb27226cSDavidlohr Bueso if (blocking)
1144fb27226cSDavidlohr Bueso bh = __find_get_block_nonatomic(bdev, block, size);
1145fb27226cSDavidlohr Bueso else
11461da177e4SLinus Torvalds bh = __find_get_block(bdev, block, size);
11471da177e4SLinus Torvalds if (bh)
11481da177e4SLinus Torvalds return bh;
1149676ce6d5SHugh Dickins }
11501da177e4SLinus Torvalds }
11511da177e4SLinus Torvalds
11521da177e4SLinus Torvalds /*
11531da177e4SLinus Torvalds * The relationship between dirty buffers and dirty pages:
11541da177e4SLinus Torvalds *
11551da177e4SLinus Torvalds * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1156ec82e1c1SMatthew Wilcox * the page is tagged dirty in the page cache.
11571da177e4SLinus Torvalds *
11581da177e4SLinus Torvalds * At all times, the dirtiness of the buffers represents the dirtiness of
11591da177e4SLinus Torvalds * subsections of the page. If the page has buffers, the page dirty bit is
11601da177e4SLinus Torvalds * merely a hint about the true dirty state.
11611da177e4SLinus Torvalds *
11621da177e4SLinus Torvalds * When a page is set dirty in its entirety, all its buffers are marked dirty
11631da177e4SLinus Torvalds * (if the page has buffers).
11641da177e4SLinus Torvalds *
11651da177e4SLinus Torvalds * When a buffer is marked dirty, its page is dirtied, but the page's other
11661da177e4SLinus Torvalds * buffers are not.
11671da177e4SLinus Torvalds *
11681da177e4SLinus Torvalds * Also. When blockdev buffers are explicitly read with bread(), they
11691da177e4SLinus Torvalds * individually become uptodate. But their backing page remains not
11701da177e4SLinus Torvalds * uptodate - even if all of its buffers are uptodate. A subsequent
11712c69e205SMatthew Wilcox (Oracle) * block_read_full_folio() against that folio will discover all the uptodate
11722c69e205SMatthew Wilcox (Oracle) * buffers, will set the folio uptodate and will perform no I/O.
11731da177e4SLinus Torvalds */
11741da177e4SLinus Torvalds
11751da177e4SLinus Torvalds /**
11761da177e4SLinus Torvalds * mark_buffer_dirty - mark a buffer_head as needing writeout
117767be2dd1SMartin Waitz * @bh: the buffer_head to mark dirty
11781da177e4SLinus Torvalds *
1179ec82e1c1SMatthew Wilcox * mark_buffer_dirty() will set the dirty bit against the buffer, then set
1180ec82e1c1SMatthew Wilcox * its backing page dirty, then tag the page as dirty in the page cache
1181ec82e1c1SMatthew Wilcox * and then attach the address_space's inode to its superblock's dirty
11821da177e4SLinus Torvalds * inode list.
11831da177e4SLinus Torvalds *
1184600f111eSMatthew Wilcox (Oracle) * mark_buffer_dirty() is atomic. It takes bh->b_folio->mapping->i_private_lock,
1185b93b0163SMatthew Wilcox * i_pages lock and mapping->host->i_lock.
11861da177e4SLinus Torvalds */
mark_buffer_dirty(struct buffer_head * bh)1187fc9b52cdSHarvey Harrison void mark_buffer_dirty(struct buffer_head *bh)
11881da177e4SLinus Torvalds {
1189787d2214SNick Piggin WARN_ON_ONCE(!buffer_uptodate(bh));
11901be62dc1SLinus Torvalds
11915305cb83STejun Heo trace_block_dirty_buffer(bh);
11925305cb83STejun Heo
11931be62dc1SLinus Torvalds /*
11941be62dc1SLinus Torvalds * Very *carefully* optimize the it-is-already-dirty case.
11951be62dc1SLinus Torvalds *
11961be62dc1SLinus Torvalds * Don't let the final "is it dirty" escape to before we
11971be62dc1SLinus Torvalds * perhaps modified the buffer.
11981be62dc1SLinus Torvalds */
11991be62dc1SLinus Torvalds if (buffer_dirty(bh)) {
12001be62dc1SLinus Torvalds smp_mb();
12011be62dc1SLinus Torvalds if (buffer_dirty(bh))
12021be62dc1SLinus Torvalds return;
12031be62dc1SLinus Torvalds }
12041be62dc1SLinus Torvalds
1205a8e7d49aSLinus Torvalds if (!test_set_buffer_dirty(bh)) {
1206cf1d3417SMatthew Wilcox (Oracle) struct folio *folio = bh->b_folio;
1207c4843a75SGreg Thelen struct address_space *mapping = NULL;
1208c4843a75SGreg Thelen
1209cf1d3417SMatthew Wilcox (Oracle) if (!folio_test_set_dirty(folio)) {
1210cf1d3417SMatthew Wilcox (Oracle) mapping = folio->mapping;
12118e9d78edSLinus Torvalds if (mapping)
1212cf1d3417SMatthew Wilcox (Oracle) __folio_mark_dirty(folio, mapping, 0);
12138e9d78edSLinus Torvalds }
1214c4843a75SGreg Thelen if (mapping)
1215c4843a75SGreg Thelen __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1216a8e7d49aSLinus Torvalds }
12171da177e4SLinus Torvalds }
12181fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(mark_buffer_dirty);
12191da177e4SLinus Torvalds
mark_buffer_write_io_error(struct buffer_head * bh)122087354e5dSJeff Layton void mark_buffer_write_io_error(struct buffer_head *bh)
122187354e5dSJeff Layton {
122287354e5dSJeff Layton set_buffer_write_io_error(bh);
122387354e5dSJeff Layton /* FIXME: do we need to set this in both places? */
1224abc8a8a2SMatthew Wilcox (Oracle) if (bh->b_folio && bh->b_folio->mapping)
1225abc8a8a2SMatthew Wilcox (Oracle) mapping_set_error(bh->b_folio->mapping, -EIO);
122604679f3cSJeremy Bongio if (bh->b_assoc_map)
122787354e5dSJeff Layton mapping_set_error(bh->b_assoc_map, -EIO);
122887354e5dSJeff Layton }
122987354e5dSJeff Layton EXPORT_SYMBOL(mark_buffer_write_io_error);
123087354e5dSJeff Layton
123166924fdaSMatthew Wilcox (Oracle) /**
123266924fdaSMatthew Wilcox (Oracle) * __brelse - Release a buffer.
123366924fdaSMatthew Wilcox (Oracle) * @bh: The buffer to release.
123466924fdaSMatthew Wilcox (Oracle) *
123566924fdaSMatthew Wilcox (Oracle) * This variant of brelse() can be called if @bh is guaranteed to not be NULL.
12361da177e4SLinus Torvalds */
__brelse(struct buffer_head * bh)123766924fdaSMatthew Wilcox (Oracle) void __brelse(struct buffer_head *bh)
12381da177e4SLinus Torvalds {
123966924fdaSMatthew Wilcox (Oracle) if (atomic_read(&bh->b_count)) {
124066924fdaSMatthew Wilcox (Oracle) put_bh(bh);
12411da177e4SLinus Torvalds return;
12421da177e4SLinus Torvalds }
12435c752ad9SArjan van de Ven WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
12441da177e4SLinus Torvalds }
12451fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(__brelse);
12461da177e4SLinus Torvalds
1247b73a936fSMatthew Wilcox (Oracle) /**
1248b73a936fSMatthew Wilcox (Oracle) * __bforget - Discard any dirty data in a buffer.
1249b73a936fSMatthew Wilcox (Oracle) * @bh: The buffer to forget.
1250b73a936fSMatthew Wilcox (Oracle) *
1251b73a936fSMatthew Wilcox (Oracle) * This variant of bforget() can be called if @bh is guaranteed to not
1252b73a936fSMatthew Wilcox (Oracle) * be NULL.
12531da177e4SLinus Torvalds */
__bforget(struct buffer_head * bh)12541da177e4SLinus Torvalds void __bforget(struct buffer_head *bh)
12551da177e4SLinus Torvalds {
12561da177e4SLinus Torvalds clear_buffer_dirty(bh);
1257535ee2fbSJan Kara if (bh->b_assoc_map) {
1258abc8a8a2SMatthew Wilcox (Oracle) struct address_space *buffer_mapping = bh->b_folio->mapping;
12591da177e4SLinus Torvalds
1260600f111eSMatthew Wilcox (Oracle) spin_lock(&buffer_mapping->i_private_lock);
12611da177e4SLinus Torvalds list_del_init(&bh->b_assoc_buffers);
126258ff407bSJan Kara bh->b_assoc_map = NULL;
1263600f111eSMatthew Wilcox (Oracle) spin_unlock(&buffer_mapping->i_private_lock);
12641da177e4SLinus Torvalds }
12651da177e4SLinus Torvalds __brelse(bh);
12661da177e4SLinus Torvalds }
12671fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(__bforget);
12681da177e4SLinus Torvalds
__bread_slow(struct buffer_head * bh)12691da177e4SLinus Torvalds static struct buffer_head *__bread_slow(struct buffer_head *bh)
12701da177e4SLinus Torvalds {
12711da177e4SLinus Torvalds lock_buffer(bh);
12721da177e4SLinus Torvalds if (buffer_uptodate(bh)) {
12731da177e4SLinus Torvalds unlock_buffer(bh);
12741da177e4SLinus Torvalds return bh;
12751da177e4SLinus Torvalds } else {
12761da177e4SLinus Torvalds get_bh(bh);
12771da177e4SLinus Torvalds bh->b_end_io = end_buffer_read_sync;
12781420c4a5SBart Van Assche submit_bh(REQ_OP_READ, bh);
12791da177e4SLinus Torvalds wait_on_buffer(bh);
12801da177e4SLinus Torvalds if (buffer_uptodate(bh))
12811da177e4SLinus Torvalds return bh;
12821da177e4SLinus Torvalds }
12831da177e4SLinus Torvalds brelse(bh);
12841da177e4SLinus Torvalds return NULL;
12851da177e4SLinus Torvalds }
12861da177e4SLinus Torvalds
12871da177e4SLinus Torvalds /*
12881da177e4SLinus Torvalds * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
12891da177e4SLinus Torvalds * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
12901da177e4SLinus Torvalds * refcount elevated by one when they're in an LRU. A buffer can only appear
12911da177e4SLinus Torvalds * once in a particular CPU's LRU. A single buffer can be present in multiple
12921da177e4SLinus Torvalds * CPU's LRUs at the same time.
12931da177e4SLinus Torvalds *
12941da177e4SLinus Torvalds * This is a transparent caching front-end to sb_bread(), sb_getblk() and
12951da177e4SLinus Torvalds * sb_find_get_block().
12961da177e4SLinus Torvalds *
12971da177e4SLinus Torvalds * The LRUs themselves only need locking against invalidate_bh_lrus. We use
12981da177e4SLinus Torvalds * a local interrupt disable for that.
12991da177e4SLinus Torvalds */
13001da177e4SLinus Torvalds
130186cf78d7SSebastien Buisson #define BH_LRU_SIZE 16
13021da177e4SLinus Torvalds
13031da177e4SLinus Torvalds struct bh_lru {
13041da177e4SLinus Torvalds struct buffer_head *bhs[BH_LRU_SIZE];
13051da177e4SLinus Torvalds };
13061da177e4SLinus Torvalds
13071da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
13081da177e4SLinus Torvalds
13091da177e4SLinus Torvalds #ifdef CONFIG_SMP
13101da177e4SLinus Torvalds #define bh_lru_lock() local_irq_disable()
13111da177e4SLinus Torvalds #define bh_lru_unlock() local_irq_enable()
13121da177e4SLinus Torvalds #else
13131da177e4SLinus Torvalds #define bh_lru_lock() preempt_disable()
13141da177e4SLinus Torvalds #define bh_lru_unlock() preempt_enable()
13151da177e4SLinus Torvalds #endif
13161da177e4SLinus Torvalds
check_irqs_on(void)13171da177e4SLinus Torvalds static inline void check_irqs_on(void)
13181da177e4SLinus Torvalds {
13191da177e4SLinus Torvalds #ifdef irqs_disabled
13201da177e4SLinus Torvalds BUG_ON(irqs_disabled());
13211da177e4SLinus Torvalds #endif
13221da177e4SLinus Torvalds }
13231da177e4SLinus Torvalds
13241da177e4SLinus Torvalds /*
1325241f01fbSEric Biggers * Install a buffer_head into this cpu's LRU. If not already in the LRU, it is
1326241f01fbSEric Biggers * inserted at the front, and the buffer_head at the back if any is evicted.
1327241f01fbSEric Biggers * Or, if already in the LRU it is moved to the front.
13281da177e4SLinus Torvalds */
bh_lru_install(struct buffer_head * bh)13291da177e4SLinus Torvalds static void bh_lru_install(struct buffer_head *bh)
13301da177e4SLinus Torvalds {
1331241f01fbSEric Biggers struct buffer_head *evictee = bh;
1332241f01fbSEric Biggers struct bh_lru *b;
1333241f01fbSEric Biggers int i;
13341da177e4SLinus Torvalds
13351da177e4SLinus Torvalds check_irqs_on();
1336c0226eb8SMinchan Kim bh_lru_lock();
1337c0226eb8SMinchan Kim
13388cc621d2SMinchan Kim /*
13398cc621d2SMinchan Kim * the refcount of buffer_head in bh_lru prevents dropping the
13408cc621d2SMinchan Kim * attached page(i.e., try_to_free_buffers) so it could cause
13418cc621d2SMinchan Kim * failing page migration.
13428cc621d2SMinchan Kim * Skip putting upcoming bh into bh_lru until migration is done.
13438cc621d2SMinchan Kim */
13448a237adfSMarcelo Tosatti if (lru_cache_disabled() || cpu_is_isolated(smp_processor_id())) {
1345c0226eb8SMinchan Kim bh_lru_unlock();
13468cc621d2SMinchan Kim return;
1347c0226eb8SMinchan Kim }
1348241f01fbSEric Biggers
1349241f01fbSEric Biggers b = this_cpu_ptr(&bh_lrus);
1350241f01fbSEric Biggers for (i = 0; i < BH_LRU_SIZE; i++) {
1351241f01fbSEric Biggers swap(evictee, b->bhs[i]);
1352241f01fbSEric Biggers if (evictee == bh) {
1353241f01fbSEric Biggers bh_lru_unlock();
1354241f01fbSEric Biggers return;
1355241f01fbSEric Biggers }
1356241f01fbSEric Biggers }
13571da177e4SLinus Torvalds
13581da177e4SLinus Torvalds get_bh(bh);
13591da177e4SLinus Torvalds bh_lru_unlock();
1360241f01fbSEric Biggers brelse(evictee);
13611da177e4SLinus Torvalds }
13621da177e4SLinus Torvalds
13631da177e4SLinus Torvalds /*
13641da177e4SLinus Torvalds * Look up the bh in this cpu's LRU. If it's there, move it to the head.
13651da177e4SLinus Torvalds */
1366858119e1SArjan van de Ven static struct buffer_head *
lookup_bh_lru(struct block_device * bdev,sector_t block,unsigned size)13673991d3bdSTomasz Kvarsin lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
13681da177e4SLinus Torvalds {
13691da177e4SLinus Torvalds struct buffer_head *ret = NULL;
13703991d3bdSTomasz Kvarsin unsigned int i;
13711da177e4SLinus Torvalds
13721da177e4SLinus Torvalds check_irqs_on();
13731da177e4SLinus Torvalds bh_lru_lock();
13748a237adfSMarcelo Tosatti if (cpu_is_isolated(smp_processor_id())) {
13758a237adfSMarcelo Tosatti bh_lru_unlock();
13768a237adfSMarcelo Tosatti return NULL;
13778a237adfSMarcelo Tosatti }
13781da177e4SLinus Torvalds for (i = 0; i < BH_LRU_SIZE; i++) {
1379c7b92516SChristoph Lameter struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
13801da177e4SLinus Torvalds
13819470dd5dSZach Brown if (bh && bh->b_blocknr == block && bh->b_bdev == bdev &&
13829470dd5dSZach Brown bh->b_size == size) {
13831da177e4SLinus Torvalds if (i) {
13841da177e4SLinus Torvalds while (i) {
1385c7b92516SChristoph Lameter __this_cpu_write(bh_lrus.bhs[i],
1386c7b92516SChristoph Lameter __this_cpu_read(bh_lrus.bhs[i - 1]));
13871da177e4SLinus Torvalds i--;
13881da177e4SLinus Torvalds }
1389c7b92516SChristoph Lameter __this_cpu_write(bh_lrus.bhs[0], bh);
13901da177e4SLinus Torvalds }
13911da177e4SLinus Torvalds get_bh(bh);
13921da177e4SLinus Torvalds ret = bh;
13931da177e4SLinus Torvalds break;
13941da177e4SLinus Torvalds }
13951da177e4SLinus Torvalds }
13961da177e4SLinus Torvalds bh_lru_unlock();
13971da177e4SLinus Torvalds return ret;
13981da177e4SLinus Torvalds }
13991da177e4SLinus Torvalds
14001da177e4SLinus Torvalds /*
14011da177e4SLinus Torvalds * Perform a pagecache lookup for the matching buffer. If it's there, refresh
14021da177e4SLinus Torvalds * it in the LRU and mark it as accessed. If it is not present then return
14032d900effSDavidlohr Bueso * NULL. Atomic context callers may also return NULL if the buffer is being
14042d900effSDavidlohr Bueso * migrated; similarly the page is not marked accessed either.
14051da177e4SLinus Torvalds */
14067ffe3de5SDavidlohr Bueso static struct buffer_head *
find_get_block_common(struct block_device * bdev,sector_t block,unsigned size,bool atomic)14077ffe3de5SDavidlohr Bueso find_get_block_common(struct block_device *bdev, sector_t block,
14087ffe3de5SDavidlohr Bueso unsigned size, bool atomic)
14091da177e4SLinus Torvalds {
14101da177e4SLinus Torvalds struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
14111da177e4SLinus Torvalds
14121da177e4SLinus Torvalds if (bh == NULL) {
14132457aec6SMel Gorman /* __find_get_block_slow will mark the page accessed */
14147ffe3de5SDavidlohr Bueso bh = __find_get_block_slow(bdev, block, atomic);
14151da177e4SLinus Torvalds if (bh)
14161da177e4SLinus Torvalds bh_lru_install(bh);
14172457aec6SMel Gorman } else
14181da177e4SLinus Torvalds touch_buffer(bh);
14192457aec6SMel Gorman
14201da177e4SLinus Torvalds return bh;
14211da177e4SLinus Torvalds }
14227ffe3de5SDavidlohr Bueso
14237ffe3de5SDavidlohr Bueso struct buffer_head *
__find_get_block(struct block_device * bdev,sector_t block,unsigned size)14247ffe3de5SDavidlohr Bueso __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
14257ffe3de5SDavidlohr Bueso {
14267ffe3de5SDavidlohr Bueso return find_get_block_common(bdev, block, size, true);
14277ffe3de5SDavidlohr Bueso }
14281da177e4SLinus Torvalds EXPORT_SYMBOL(__find_get_block);
14291da177e4SLinus Torvalds
14302814a7d3SDavidlohr Bueso /* same as __find_get_block() but allows sleeping contexts */
14312814a7d3SDavidlohr Bueso struct buffer_head *
__find_get_block_nonatomic(struct block_device * bdev,sector_t block,unsigned size)14322814a7d3SDavidlohr Bueso __find_get_block_nonatomic(struct block_device *bdev, sector_t block,
14332814a7d3SDavidlohr Bueso unsigned size)
14342814a7d3SDavidlohr Bueso {
14352814a7d3SDavidlohr Bueso return find_get_block_common(bdev, block, size, false);
14362814a7d3SDavidlohr Bueso }
14372814a7d3SDavidlohr Bueso EXPORT_SYMBOL(__find_get_block_nonatomic);
14382814a7d3SDavidlohr Bueso
14393ed65f04SMatthew Wilcox (Oracle) /**
14403ed65f04SMatthew Wilcox (Oracle) * bdev_getblk - Get a buffer_head in a block device's buffer cache.
14413ed65f04SMatthew Wilcox (Oracle) * @bdev: The block device.
14423ed65f04SMatthew Wilcox (Oracle) * @block: The block number.
14433ed65f04SMatthew Wilcox (Oracle) * @size: The size of buffer_heads for this @bdev.
14443ed65f04SMatthew Wilcox (Oracle) * @gfp: The memory allocation flags to use.
14453ed65f04SMatthew Wilcox (Oracle) *
14460b116ff4SMatthew Wilcox (Oracle) * The returned buffer head has its reference count incremented, but is
14470b116ff4SMatthew Wilcox (Oracle) * not locked. The caller should call brelse() when it has finished
14480b116ff4SMatthew Wilcox (Oracle) * with the buffer. The buffer may not be uptodate. If needed, the
14490b116ff4SMatthew Wilcox (Oracle) * caller can bring it uptodate either by reading it or overwriting it.
14500b116ff4SMatthew Wilcox (Oracle) *
14513ed65f04SMatthew Wilcox (Oracle) * Return: The buffer head, or NULL if memory could not be allocated.
14523ed65f04SMatthew Wilcox (Oracle) */
bdev_getblk(struct block_device * bdev,sector_t block,unsigned size,gfp_t gfp)14533ed65f04SMatthew Wilcox (Oracle) struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block,
14543ed65f04SMatthew Wilcox (Oracle) unsigned size, gfp_t gfp)
14553ed65f04SMatthew Wilcox (Oracle) {
14565b67d439SDavidlohr Bueso struct buffer_head *bh;
14575b67d439SDavidlohr Bueso
14585b67d439SDavidlohr Bueso if (gfpflags_allow_blocking(gfp))
14595b67d439SDavidlohr Bueso bh = __find_get_block_nonatomic(bdev, block, size);
14605b67d439SDavidlohr Bueso else
14615b67d439SDavidlohr Bueso bh = __find_get_block(bdev, block, size);
14623ed65f04SMatthew Wilcox (Oracle)
14633ed65f04SMatthew Wilcox (Oracle) might_alloc(gfp);
14643ed65f04SMatthew Wilcox (Oracle) if (bh)
14653ed65f04SMatthew Wilcox (Oracle) return bh;
14663ed65f04SMatthew Wilcox (Oracle)
14673ed65f04SMatthew Wilcox (Oracle) return __getblk_slow(bdev, block, size, gfp);
14683ed65f04SMatthew Wilcox (Oracle) }
14693ed65f04SMatthew Wilcox (Oracle) EXPORT_SYMBOL(bdev_getblk);
14703ed65f04SMatthew Wilcox (Oracle)
14711da177e4SLinus Torvalds /*
14721da177e4SLinus Torvalds * Do async read-ahead on a buffer..
14731da177e4SLinus Torvalds */
__breadahead(struct block_device * bdev,sector_t block,unsigned size)14743991d3bdSTomasz Kvarsin void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
14751da177e4SLinus Torvalds {
1476775d9b10SMatthew Wilcox (Oracle) struct buffer_head *bh = bdev_getblk(bdev, block, size,
1477775d9b10SMatthew Wilcox (Oracle) GFP_NOWAIT | __GFP_MOVABLE);
1478775d9b10SMatthew Wilcox (Oracle)
1479a3e713b5SAndrew Morton if (likely(bh)) {
1480e7ea1129SZhang Yi bh_readahead(bh, REQ_RAHEAD);
14811da177e4SLinus Torvalds brelse(bh);
14821da177e4SLinus Torvalds }
1483a3e713b5SAndrew Morton }
14841da177e4SLinus Torvalds EXPORT_SYMBOL(__breadahead);
14851da177e4SLinus Torvalds
14861da177e4SLinus Torvalds /**
1487324ecaeeSMatthew Wilcox (Oracle) * __bread_gfp() - Read a block.
1488324ecaeeSMatthew Wilcox (Oracle) * @bdev: The block device to read from.
1489324ecaeeSMatthew Wilcox (Oracle) * @block: Block number in units of block size.
1490324ecaeeSMatthew Wilcox (Oracle) * @size: The block size of this device in bytes.
1491324ecaeeSMatthew Wilcox (Oracle) * @gfp: Not page allocation flags; see below.
14921da177e4SLinus Torvalds *
1493324ecaeeSMatthew Wilcox (Oracle) * You are not expected to call this function. You should use one of
1494324ecaeeSMatthew Wilcox (Oracle) * sb_bread(), sb_bread_unmovable() or __bread().
1495324ecaeeSMatthew Wilcox (Oracle) *
1496324ecaeeSMatthew Wilcox (Oracle) * Read a specified block, and return the buffer head that refers to it.
1497324ecaeeSMatthew Wilcox (Oracle) * If @gfp is 0, the memory will be allocated using the block device's
1498324ecaeeSMatthew Wilcox (Oracle) * default GFP flags. If @gfp is __GFP_MOVABLE, the memory may be
1499324ecaeeSMatthew Wilcox (Oracle) * allocated from a movable area. Do not pass in a complete set of
1500324ecaeeSMatthew Wilcox (Oracle) * GFP flags.
1501324ecaeeSMatthew Wilcox (Oracle) *
1502324ecaeeSMatthew Wilcox (Oracle) * The returned buffer head has its refcount increased. The caller should
1503324ecaeeSMatthew Wilcox (Oracle) * call brelse() when it has finished with the buffer.
1504324ecaeeSMatthew Wilcox (Oracle) *
1505324ecaeeSMatthew Wilcox (Oracle) * Context: May sleep waiting for I/O.
1506324ecaeeSMatthew Wilcox (Oracle) * Return: NULL if the block was unreadable.
15071da177e4SLinus Torvalds */
__bread_gfp(struct block_device * bdev,sector_t block,unsigned size,gfp_t gfp)1508324ecaeeSMatthew Wilcox (Oracle) struct buffer_head *__bread_gfp(struct block_device *bdev, sector_t block,
15093b5e6454SGioh Kim unsigned size, gfp_t gfp)
15101da177e4SLinus Torvalds {
151193b13ecaSMatthew Wilcox (Oracle) struct buffer_head *bh;
151293b13ecaSMatthew Wilcox (Oracle)
1513224941e8SAl Viro gfp |= mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS);
151493b13ecaSMatthew Wilcox (Oracle)
151593b13ecaSMatthew Wilcox (Oracle) /*
151693b13ecaSMatthew Wilcox (Oracle) * Prefer looping in the allocator rather than here, at least that
151793b13ecaSMatthew Wilcox (Oracle) * code knows what it's doing.
151893b13ecaSMatthew Wilcox (Oracle) */
151993b13ecaSMatthew Wilcox (Oracle) gfp |= __GFP_NOFAIL;
152093b13ecaSMatthew Wilcox (Oracle)
152193b13ecaSMatthew Wilcox (Oracle) bh = bdev_getblk(bdev, block, size, gfp);
15221da177e4SLinus Torvalds
1523a3e713b5SAndrew Morton if (likely(bh) && !buffer_uptodate(bh))
15241da177e4SLinus Torvalds bh = __bread_slow(bh);
15251da177e4SLinus Torvalds return bh;
15261da177e4SLinus Torvalds }
15273b5e6454SGioh Kim EXPORT_SYMBOL(__bread_gfp);
15281da177e4SLinus Torvalds
__invalidate_bh_lrus(struct bh_lru * b)15298cc621d2SMinchan Kim static void __invalidate_bh_lrus(struct bh_lru *b)
15308cc621d2SMinchan Kim {
15318cc621d2SMinchan Kim int i;
15328cc621d2SMinchan Kim
15338cc621d2SMinchan Kim for (i = 0; i < BH_LRU_SIZE; i++) {
15348cc621d2SMinchan Kim brelse(b->bhs[i]);
15358cc621d2SMinchan Kim b->bhs[i] = NULL;
15368cc621d2SMinchan Kim }
15378cc621d2SMinchan Kim }
15381da177e4SLinus Torvalds /*
15391da177e4SLinus Torvalds * invalidate_bh_lrus() is called rarely - but not only at unmount.
15401da177e4SLinus Torvalds * This doesn't race because it runs in each cpu either in irq
15411da177e4SLinus Torvalds * or with preempt disabled.
15421da177e4SLinus Torvalds */
invalidate_bh_lru(void * arg)15431da177e4SLinus Torvalds static void invalidate_bh_lru(void *arg)
15441da177e4SLinus Torvalds {
15451da177e4SLinus Torvalds struct bh_lru *b = &get_cpu_var(bh_lrus);
15461da177e4SLinus Torvalds
15478cc621d2SMinchan Kim __invalidate_bh_lrus(b);
15481da177e4SLinus Torvalds put_cpu_var(bh_lrus);
15491da177e4SLinus Torvalds }
15501da177e4SLinus Torvalds
has_bh_in_lru(int cpu,void * dummy)15518cc621d2SMinchan Kim bool has_bh_in_lru(int cpu, void *dummy)
155242be35d0SGilad Ben-Yossef {
155342be35d0SGilad Ben-Yossef struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
155442be35d0SGilad Ben-Yossef int i;
155542be35d0SGilad Ben-Yossef
155642be35d0SGilad Ben-Yossef for (i = 0; i < BH_LRU_SIZE; i++) {
155742be35d0SGilad Ben-Yossef if (b->bhs[i])
15581d706679SSaurav Girepunje return true;
155942be35d0SGilad Ben-Yossef }
156042be35d0SGilad Ben-Yossef
15611d706679SSaurav Girepunje return false;
156242be35d0SGilad Ben-Yossef }
156342be35d0SGilad Ben-Yossef
invalidate_bh_lrus(void)1564f9a14399SPeter Zijlstra void invalidate_bh_lrus(void)
15651da177e4SLinus Torvalds {
1566cb923159SSebastian Andrzej Siewior on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1);
15671da177e4SLinus Torvalds }
15689db5579bSNick Piggin EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
15691da177e4SLinus Torvalds
1570243418e3SMinchan Kim /*
1571243418e3SMinchan Kim * It's called from workqueue context so we need a bh_lru_lock to close
1572243418e3SMinchan Kim * the race with preemption/irq.
1573243418e3SMinchan Kim */
invalidate_bh_lrus_cpu(void)1574243418e3SMinchan Kim void invalidate_bh_lrus_cpu(void)
15758cc621d2SMinchan Kim {
15768cc621d2SMinchan Kim struct bh_lru *b;
15778cc621d2SMinchan Kim
15788cc621d2SMinchan Kim bh_lru_lock();
1579243418e3SMinchan Kim b = this_cpu_ptr(&bh_lrus);
15808cc621d2SMinchan Kim __invalidate_bh_lrus(b);
15818cc621d2SMinchan Kim bh_lru_unlock();
15828cc621d2SMinchan Kim }
15838cc621d2SMinchan Kim
folio_set_bh(struct buffer_head * bh,struct folio * folio,unsigned long offset)1584465e5e6aSPankaj Raghav void folio_set_bh(struct buffer_head *bh, struct folio *folio,
1585465e5e6aSPankaj Raghav unsigned long offset)
1586465e5e6aSPankaj Raghav {
1587465e5e6aSPankaj Raghav bh->b_folio = folio;
1588465e5e6aSPankaj Raghav BUG_ON(offset >= folio_size(folio));
1589465e5e6aSPankaj Raghav if (folio_test_highmem(folio))
1590465e5e6aSPankaj Raghav /*
1591465e5e6aSPankaj Raghav * This catches illegal uses and preserves the offset:
1592465e5e6aSPankaj Raghav */
1593465e5e6aSPankaj Raghav bh->b_data = (char *)(0 + offset);
1594465e5e6aSPankaj Raghav else
1595465e5e6aSPankaj Raghav bh->b_data = folio_address(folio) + offset;
1596465e5e6aSPankaj Raghav }
1597465e5e6aSPankaj Raghav EXPORT_SYMBOL(folio_set_bh);
1598465e5e6aSPankaj Raghav
15991da177e4SLinus Torvalds /*
16001da177e4SLinus Torvalds * Called when truncating a buffer on a page completely.
16011da177e4SLinus Torvalds */
1602e7470ee8SMel Gorman
1603e7470ee8SMel Gorman /* Bits that are cleared during an invalidate */
1604e7470ee8SMel Gorman #define BUFFER_FLAGS_DISCARD \
1605e7470ee8SMel Gorman (1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \
1606e7470ee8SMel Gorman 1 << BH_Delay | 1 << BH_Unwritten)
1607e7470ee8SMel Gorman
discard_buffer(struct buffer_head * bh)1608858119e1SArjan van de Ven static void discard_buffer(struct buffer_head * bh)
16091da177e4SLinus Torvalds {
1610b0192296SUros Bizjak unsigned long b_state;
1611e7470ee8SMel Gorman
16121da177e4SLinus Torvalds lock_buffer(bh);
16131da177e4SLinus Torvalds clear_buffer_dirty(bh);
16141da177e4SLinus Torvalds bh->b_bdev = NULL;
1615b0192296SUros Bizjak b_state = READ_ONCE(bh->b_state);
1616b0192296SUros Bizjak do {
1617*8e184bf1SDavidlohr Bueso } while (!try_cmpxchg_relaxed(&bh->b_state, &b_state,
1618b0192296SUros Bizjak b_state & ~BUFFER_FLAGS_DISCARD));
16191da177e4SLinus Torvalds unlock_buffer(bh);
16201da177e4SLinus Torvalds }
16211da177e4SLinus Torvalds
16221da177e4SLinus Torvalds /**
16237ba13abbSMatthew Wilcox (Oracle) * block_invalidate_folio - Invalidate part or all of a buffer-backed folio.
16247ba13abbSMatthew Wilcox (Oracle) * @folio: The folio which is affected.
1625d47992f8SLukas Czerner * @offset: start of the range to invalidate
1626d47992f8SLukas Czerner * @length: length of the range to invalidate
16271da177e4SLinus Torvalds *
16287ba13abbSMatthew Wilcox (Oracle) * block_invalidate_folio() is called when all or part of the folio has been
16291da177e4SLinus Torvalds * invalidated by a truncate operation.
16301da177e4SLinus Torvalds *
16317ba13abbSMatthew Wilcox (Oracle) * block_invalidate_folio() does not have to release all buffers, but it must
16321da177e4SLinus Torvalds * ensure that no dirty buffer is left outside @offset and that no I/O
16331da177e4SLinus Torvalds * is underway against any of the blocks which are outside the truncation
16341da177e4SLinus Torvalds * point. Because the caller is about to free (and possibly reuse) those
16351da177e4SLinus Torvalds * blocks on-disk.
16361da177e4SLinus Torvalds */
block_invalidate_folio(struct folio * folio,size_t offset,size_t length)16377ba13abbSMatthew Wilcox (Oracle) void block_invalidate_folio(struct folio *folio, size_t offset, size_t length)
16381da177e4SLinus Torvalds {
16391da177e4SLinus Torvalds struct buffer_head *head, *bh, *next;
16407ba13abbSMatthew Wilcox (Oracle) size_t curr_off = 0;
16417ba13abbSMatthew Wilcox (Oracle) size_t stop = length + offset;
16421da177e4SLinus Torvalds
16437ba13abbSMatthew Wilcox (Oracle) BUG_ON(!folio_test_locked(folio));
16441da177e4SLinus Torvalds
1645d47992f8SLukas Czerner /*
1646d47992f8SLukas Czerner * Check for overflow
1647d47992f8SLukas Czerner */
16487ba13abbSMatthew Wilcox (Oracle) BUG_ON(stop > folio_size(folio) || stop < length);
1649d47992f8SLukas Czerner
16507ba13abbSMatthew Wilcox (Oracle) head = folio_buffers(folio);
16517ba13abbSMatthew Wilcox (Oracle) if (!head)
16527ba13abbSMatthew Wilcox (Oracle) return;
16537ba13abbSMatthew Wilcox (Oracle)
16541da177e4SLinus Torvalds bh = head;
16551da177e4SLinus Torvalds do {
16567ba13abbSMatthew Wilcox (Oracle) size_t next_off = curr_off + bh->b_size;
16571da177e4SLinus Torvalds next = bh->b_this_page;
16581da177e4SLinus Torvalds
16591da177e4SLinus Torvalds /*
1660d47992f8SLukas Czerner * Are we still fully in range ?
1661d47992f8SLukas Czerner */
1662d47992f8SLukas Czerner if (next_off > stop)
1663d47992f8SLukas Czerner goto out;
1664d47992f8SLukas Czerner
1665d47992f8SLukas Czerner /*
16661da177e4SLinus Torvalds * is this block fully invalidated?
16671da177e4SLinus Torvalds */
16681da177e4SLinus Torvalds if (offset <= curr_off)
16691da177e4SLinus Torvalds discard_buffer(bh);
16701da177e4SLinus Torvalds curr_off = next_off;
16711da177e4SLinus Torvalds bh = next;
16721da177e4SLinus Torvalds } while (bh != head);
16731da177e4SLinus Torvalds
16741da177e4SLinus Torvalds /*
16757ba13abbSMatthew Wilcox (Oracle) * We release buffers only if the entire folio is being invalidated.
16761da177e4SLinus Torvalds * The get_block cached value has been unconditionally invalidated,
16771da177e4SLinus Torvalds * so real IO is not possible anymore.
16781da177e4SLinus Torvalds */
16797ba13abbSMatthew Wilcox (Oracle) if (length == folio_size(folio))
16807ba13abbSMatthew Wilcox (Oracle) filemap_release_folio(folio, 0);
16811da177e4SLinus Torvalds out:
16829c33d85eSMatthew Wilcox (Oracle) folio_clear_mappedtodisk(folio);
16831da177e4SLinus Torvalds }
16847ba13abbSMatthew Wilcox (Oracle) EXPORT_SYMBOL(block_invalidate_folio);
16851da177e4SLinus Torvalds
16861da177e4SLinus Torvalds /*
16871da177e4SLinus Torvalds * We attach and possibly dirty the buffers atomically wrt
1688600f111eSMatthew Wilcox (Oracle) * block_dirty_folio() via i_private_lock. try_to_free_buffers
16898e2e1756SPankaj Raghav * is already excluded via the folio lock.
16901da177e4SLinus Torvalds */
create_empty_buffers(struct folio * folio,unsigned long blocksize,unsigned long b_state)16910a88810dSMatthew Wilcox (Oracle) struct buffer_head *create_empty_buffers(struct folio *folio,
16923decb856SMatthew Wilcox (Oracle) unsigned long blocksize, unsigned long b_state)
16931da177e4SLinus Torvalds {
16941da177e4SLinus Torvalds struct buffer_head *bh, *head, *tail;
16952a418157SMatthew Wilcox (Oracle) gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT | __GFP_NOFAIL;
16961da177e4SLinus Torvalds
16972a418157SMatthew Wilcox (Oracle) head = folio_alloc_buffers(folio, blocksize, gfp);
16981da177e4SLinus Torvalds bh = head;
16991da177e4SLinus Torvalds do {
17001da177e4SLinus Torvalds bh->b_state |= b_state;
17011da177e4SLinus Torvalds tail = bh;
17021da177e4SLinus Torvalds bh = bh->b_this_page;
17031da177e4SLinus Torvalds } while (bh);
17041da177e4SLinus Torvalds tail->b_this_page = head;
17051da177e4SLinus Torvalds
1706600f111eSMatthew Wilcox (Oracle) spin_lock(&folio->mapping->i_private_lock);
17078e2e1756SPankaj Raghav if (folio_test_uptodate(folio) || folio_test_dirty(folio)) {
17081da177e4SLinus Torvalds bh = head;
17091da177e4SLinus Torvalds do {
17108e2e1756SPankaj Raghav if (folio_test_dirty(folio))
17111da177e4SLinus Torvalds set_buffer_dirty(bh);
17128e2e1756SPankaj Raghav if (folio_test_uptodate(folio))
17131da177e4SLinus Torvalds set_buffer_uptodate(bh);
17141da177e4SLinus Torvalds bh = bh->b_this_page;
17151da177e4SLinus Torvalds } while (bh != head);
17161da177e4SLinus Torvalds }
17178e2e1756SPankaj Raghav folio_attach_private(folio, head);
1718600f111eSMatthew Wilcox (Oracle) spin_unlock(&folio->mapping->i_private_lock);
17193decb856SMatthew Wilcox (Oracle)
17203decb856SMatthew Wilcox (Oracle) return head;
17218e2e1756SPankaj Raghav }
17221da177e4SLinus Torvalds EXPORT_SYMBOL(create_empty_buffers);
17231da177e4SLinus Torvalds
172429f3ad7dSJan Kara /**
172529f3ad7dSJan Kara * clean_bdev_aliases: clean a range of buffers in block device
172629f3ad7dSJan Kara * @bdev: Block device to clean buffers in
172729f3ad7dSJan Kara * @block: Start of a range of blocks to clean
172829f3ad7dSJan Kara * @len: Number of blocks to clean
17291da177e4SLinus Torvalds *
173029f3ad7dSJan Kara * We are taking a range of blocks for data and we don't want writeback of any
173129f3ad7dSJan Kara * buffer-cache aliases starting from return from this function and until the
173229f3ad7dSJan Kara * moment when something will explicitly mark the buffer dirty (hopefully that
173329f3ad7dSJan Kara * will not happen until we will free that block ;-) We don't even need to mark
173429f3ad7dSJan Kara * it not-uptodate - nobody can expect anything from a newly allocated buffer
173529f3ad7dSJan Kara * anyway. We used to use unmap_buffer() for such invalidation, but that was
173629f3ad7dSJan Kara * wrong. We definitely don't want to mark the alias unmapped, for example - it
173729f3ad7dSJan Kara * would confuse anyone who might pick it with bread() afterwards...
173829f3ad7dSJan Kara *
173929f3ad7dSJan Kara * Also.. Note that bforget() doesn't lock the buffer. So there can be
174029f3ad7dSJan Kara * writeout I/O going on against recently-freed buffers. We don't wait on that
174129f3ad7dSJan Kara * I/O in bforget() - it's more efficient to wait on the I/O only if we really
174229f3ad7dSJan Kara * need to. That happens here.
17431da177e4SLinus Torvalds */
clean_bdev_aliases(struct block_device * bdev,sector_t block,sector_t len)174429f3ad7dSJan Kara void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
17451da177e4SLinus Torvalds {
174653cd4cd3SAl Viro struct address_space *bd_mapping = bdev->bd_mapping;
174753cd4cd3SAl Viro const int blkbits = bd_mapping->host->i_blkbits;
17489e0b6f31SMatthew Wilcox (Oracle) struct folio_batch fbatch;
174953cd4cd3SAl Viro pgoff_t index = ((loff_t)block << blkbits) / PAGE_SIZE;
175029f3ad7dSJan Kara pgoff_t end;
1751c10f778dSJan Kara int i, count;
175229f3ad7dSJan Kara struct buffer_head *bh;
175329f3ad7dSJan Kara struct buffer_head *head;
17541da177e4SLinus Torvalds
175553cd4cd3SAl Viro end = ((loff_t)(block + len - 1) << blkbits) / PAGE_SIZE;
17569e0b6f31SMatthew Wilcox (Oracle) folio_batch_init(&fbatch);
17579e0b6f31SMatthew Wilcox (Oracle) while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) {
17589e0b6f31SMatthew Wilcox (Oracle) count = folio_batch_count(&fbatch);
1759c10f778dSJan Kara for (i = 0; i < count; i++) {
17609e0b6f31SMatthew Wilcox (Oracle) struct folio *folio = fbatch.folios[i];
17611da177e4SLinus Torvalds
17629e0b6f31SMatthew Wilcox (Oracle) if (!folio_buffers(folio))
176329f3ad7dSJan Kara continue;
176429f3ad7dSJan Kara /*
1765600f111eSMatthew Wilcox (Oracle) * We use folio lock instead of bd_mapping->i_private_lock
176629f3ad7dSJan Kara * to pin buffers here since we can afford to sleep and
176729f3ad7dSJan Kara * it scales better than a global spinlock lock.
176829f3ad7dSJan Kara */
17699e0b6f31SMatthew Wilcox (Oracle) folio_lock(folio);
17709e0b6f31SMatthew Wilcox (Oracle) /* Recheck when the folio is locked which pins bhs */
17719e0b6f31SMatthew Wilcox (Oracle) head = folio_buffers(folio);
17729e0b6f31SMatthew Wilcox (Oracle) if (!head)
177329f3ad7dSJan Kara goto unlock_page;
177429f3ad7dSJan Kara bh = head;
177529f3ad7dSJan Kara do {
17766c006a9dSChandan Rajendra if (!buffer_mapped(bh) || (bh->b_blocknr < block))
177729f3ad7dSJan Kara goto next;
177829f3ad7dSJan Kara if (bh->b_blocknr >= block + len)
177929f3ad7dSJan Kara break;
178029f3ad7dSJan Kara clear_buffer_dirty(bh);
178129f3ad7dSJan Kara wait_on_buffer(bh);
178229f3ad7dSJan Kara clear_buffer_req(bh);
178329f3ad7dSJan Kara next:
178429f3ad7dSJan Kara bh = bh->b_this_page;
178529f3ad7dSJan Kara } while (bh != head);
178629f3ad7dSJan Kara unlock_page:
17879e0b6f31SMatthew Wilcox (Oracle) folio_unlock(folio);
178829f3ad7dSJan Kara }
17899e0b6f31SMatthew Wilcox (Oracle) folio_batch_release(&fbatch);
179029f3ad7dSJan Kara cond_resched();
1791c10f778dSJan Kara /* End of range already reached? */
1792c10f778dSJan Kara if (index > end || !index)
1793c10f778dSJan Kara break;
17941da177e4SLinus Torvalds }
17951da177e4SLinus Torvalds }
179629f3ad7dSJan Kara EXPORT_SYMBOL(clean_bdev_aliases);
17971da177e4SLinus Torvalds
folio_create_buffers(struct folio * folio,struct inode * inode,unsigned int b_state)1798c6c8c3e7SPankaj Raghav static struct buffer_head *folio_create_buffers(struct folio *folio,
1799c6c8c3e7SPankaj Raghav struct inode *inode,
1800c6c8c3e7SPankaj Raghav unsigned int b_state)
180145bce8f3SLinus Torvalds {
18023decb856SMatthew Wilcox (Oracle) struct buffer_head *bh;
18033decb856SMatthew Wilcox (Oracle)
1804c6c8c3e7SPankaj Raghav BUG_ON(!folio_test_locked(folio));
180545bce8f3SLinus Torvalds
18063decb856SMatthew Wilcox (Oracle) bh = folio_buffers(folio);
18073decb856SMatthew Wilcox (Oracle) if (!bh)
18080a88810dSMatthew Wilcox (Oracle) bh = create_empty_buffers(folio,
18093decb856SMatthew Wilcox (Oracle) 1 << READ_ONCE(inode->i_blkbits), b_state);
18103decb856SMatthew Wilcox (Oracle) return bh;
181145bce8f3SLinus Torvalds }
181245bce8f3SLinus Torvalds
181345bce8f3SLinus Torvalds /*
18141da177e4SLinus Torvalds * NOTE! All mapped/uptodate combinations are valid:
18151da177e4SLinus Torvalds *
18161da177e4SLinus Torvalds * Mapped Uptodate Meaning
18171da177e4SLinus Torvalds *
18181da177e4SLinus Torvalds * No No "unknown" - must do get_block()
18191da177e4SLinus Torvalds * No Yes "hole" - zero-filled
18201da177e4SLinus Torvalds * Yes No "allocated" - allocated on disk, not read in
18211da177e4SLinus Torvalds * Yes Yes "valid" - allocated and up-to-date in memory.
18221da177e4SLinus Torvalds *
18231da177e4SLinus Torvalds * "Dirty" is valid only with the last case (mapped+uptodate).
18241da177e4SLinus Torvalds */
18251da177e4SLinus Torvalds
18261da177e4SLinus Torvalds /*
182717bf23a9SMatthew Wilcox (Oracle) * While block_write_full_folio is writing back the dirty buffers under
18281da177e4SLinus Torvalds * the page lock, whoever dirtied the buffers may decide to clean them
18291da177e4SLinus Torvalds * again at any time. We handle that by only looking at the buffer
18301da177e4SLinus Torvalds * state inside lock_buffer().
18311da177e4SLinus Torvalds *
183217bf23a9SMatthew Wilcox (Oracle) * If block_write_full_folio() is called for regular writeback
18331da177e4SLinus Torvalds * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
18341da177e4SLinus Torvalds * locked buffer. This only can happen if someone has written the buffer
18351da177e4SLinus Torvalds * directly, with submit_bh(). At the address_space level PageWriteback
18361da177e4SLinus Torvalds * prevents this contention from occurring.
18376e34eeddSTheodore Ts'o *
183817bf23a9SMatthew Wilcox (Oracle) * If block_write_full_folio() is called with wbc->sync_mode ==
183970fd7614SChristoph Hellwig * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this
1840721a9602SJens Axboe * causes the writes to be flagged as synchronous writes.
18411da177e4SLinus Torvalds */
__block_write_full_folio(struct inode * inode,struct folio * folio,get_block_t * get_block,struct writeback_control * wbc)184253418a18SMatthew Wilcox (Oracle) int __block_write_full_folio(struct inode *inode, struct folio *folio,
184314059f66SMatthew Wilcox (Oracle) get_block_t *get_block, struct writeback_control *wbc)
18441da177e4SLinus Torvalds {
18451da177e4SLinus Torvalds int err;
18461da177e4SLinus Torvalds sector_t block;
18471da177e4SLinus Torvalds sector_t last_block;
1848f0fbd5fcSAndrew Morton struct buffer_head *bh, *head;
1849fa399c31SMatthew Wilcox (Oracle) size_t blocksize;
18501da177e4SLinus Torvalds int nr_underway = 0;
18513ae72869SBart Van Assche blk_opf_t write_flags = wbc_to_write_flags(wbc);
18521da177e4SLinus Torvalds
185353418a18SMatthew Wilcox (Oracle) head = folio_create_buffers(folio, inode,
18541da177e4SLinus Torvalds (1 << BH_Dirty) | (1 << BH_Uptodate));
18551da177e4SLinus Torvalds
18561da177e4SLinus Torvalds /*
1857e621900aSMatthew Wilcox (Oracle) * Be very careful. We have no exclusion from block_dirty_folio
18581da177e4SLinus Torvalds * here, and the (potentially unmapped) buffers may become dirty at
18591da177e4SLinus Torvalds * any time. If a buffer becomes dirty here after we've inspected it
186053418a18SMatthew Wilcox (Oracle) * then we just miss that fact, and the folio stays dirty.
18611da177e4SLinus Torvalds *
1862e621900aSMatthew Wilcox (Oracle) * Buffers outside i_size may be dirtied by block_dirty_folio;
18631da177e4SLinus Torvalds * handle that here by just cleaning them.
18641da177e4SLinus Torvalds */
18651da177e4SLinus Torvalds
18661da177e4SLinus Torvalds bh = head;
186745bce8f3SLinus Torvalds blocksize = bh->b_size;
186845bce8f3SLinus Torvalds
1869fa399c31SMatthew Wilcox (Oracle) block = div_u64(folio_pos(folio), blocksize);
1870fa399c31SMatthew Wilcox (Oracle) last_block = div_u64(i_size_read(inode) - 1, blocksize);
18711da177e4SLinus Torvalds
18721da177e4SLinus Torvalds /*
18731da177e4SLinus Torvalds * Get all the dirty buffers mapped to disk addresses and
18741da177e4SLinus Torvalds * handle any aliases from the underlying blockdev's mapping.
18751da177e4SLinus Torvalds */
18761da177e4SLinus Torvalds do {
18771da177e4SLinus Torvalds if (block > last_block) {
18781da177e4SLinus Torvalds /*
18791da177e4SLinus Torvalds * mapped buffers outside i_size will occur, because
188053418a18SMatthew Wilcox (Oracle) * this folio can be outside i_size when there is a
18811da177e4SLinus Torvalds * truncate in progress.
18821da177e4SLinus Torvalds */
18831da177e4SLinus Torvalds /*
188417bf23a9SMatthew Wilcox (Oracle) * The buffer was zeroed by block_write_full_folio()
18851da177e4SLinus Torvalds */
18861da177e4SLinus Torvalds clear_buffer_dirty(bh);
18871da177e4SLinus Torvalds set_buffer_uptodate(bh);
188829a814d2SAlex Tomas } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
188929a814d2SAlex Tomas buffer_dirty(bh)) {
1890b0cf2321SBadari Pulavarty WARN_ON(bh->b_size != blocksize);
18911da177e4SLinus Torvalds err = get_block(inode, block, bh, 1);
18921da177e4SLinus Torvalds if (err)
18931da177e4SLinus Torvalds goto recover;
189429a814d2SAlex Tomas clear_buffer_delay(bh);
18951da177e4SLinus Torvalds if (buffer_new(bh)) {
18961da177e4SLinus Torvalds /* blockdev mappings never come here */
18971da177e4SLinus Torvalds clear_buffer_new(bh);
1898e64855c6SJan Kara clean_bdev_bh_alias(bh);
18991da177e4SLinus Torvalds }
19001da177e4SLinus Torvalds }
19011da177e4SLinus Torvalds bh = bh->b_this_page;
19021da177e4SLinus Torvalds block++;
19031da177e4SLinus Torvalds } while (bh != head);
19041da177e4SLinus Torvalds
19051da177e4SLinus Torvalds do {
19061da177e4SLinus Torvalds if (!buffer_mapped(bh))
19071da177e4SLinus Torvalds continue;
19081da177e4SLinus Torvalds /*
19091da177e4SLinus Torvalds * If it's a fully non-blocking write attempt and we cannot
191053418a18SMatthew Wilcox (Oracle) * lock the buffer then redirty the folio. Note that this can
19115b0830cbSJens Axboe * potentially cause a busy-wait loop from writeback threads
19125b0830cbSJens Axboe * and kswapd activity, but those code paths have their own
19135b0830cbSJens Axboe * higher-level throttling.
19141da177e4SLinus Torvalds */
19151b430beeSWu Fengguang if (wbc->sync_mode != WB_SYNC_NONE) {
19161da177e4SLinus Torvalds lock_buffer(bh);
1917ca5de404SNick Piggin } else if (!trylock_buffer(bh)) {
191853418a18SMatthew Wilcox (Oracle) folio_redirty_for_writepage(wbc, folio);
19191da177e4SLinus Torvalds continue;
19201da177e4SLinus Torvalds }
19211da177e4SLinus Torvalds if (test_clear_buffer_dirty(bh)) {
192214059f66SMatthew Wilcox (Oracle) mark_buffer_async_write_endio(bh,
192314059f66SMatthew Wilcox (Oracle) end_buffer_async_write);
19241da177e4SLinus Torvalds } else {
19251da177e4SLinus Torvalds unlock_buffer(bh);
19261da177e4SLinus Torvalds }
19271da177e4SLinus Torvalds } while ((bh = bh->b_this_page) != head);
19281da177e4SLinus Torvalds
19291da177e4SLinus Torvalds /*
193053418a18SMatthew Wilcox (Oracle) * The folio and its buffers are protected by the writeback flag,
193153418a18SMatthew Wilcox (Oracle) * so we can drop the bh refcounts early.
19321da177e4SLinus Torvalds */
193353418a18SMatthew Wilcox (Oracle) BUG_ON(folio_test_writeback(folio));
193453418a18SMatthew Wilcox (Oracle) folio_start_writeback(folio);
19351da177e4SLinus Torvalds
19361da177e4SLinus Torvalds do {
19371da177e4SLinus Torvalds struct buffer_head *next = bh->b_this_page;
19381da177e4SLinus Torvalds if (buffer_async_write(bh)) {
193944981351SBart Van Assche submit_bh_wbc(REQ_OP_WRITE | write_flags, bh,
194044981351SBart Van Assche inode->i_write_hint, wbc);
19411da177e4SLinus Torvalds nr_underway++;
1942ad576e63SNick Piggin }
19431da177e4SLinus Torvalds bh = next;
19441da177e4SLinus Torvalds } while (bh != head);
194553418a18SMatthew Wilcox (Oracle) folio_unlock(folio);
19461da177e4SLinus Torvalds
19471da177e4SLinus Torvalds err = 0;
19481da177e4SLinus Torvalds done:
19491da177e4SLinus Torvalds if (nr_underway == 0) {
19501da177e4SLinus Torvalds /*
195153418a18SMatthew Wilcox (Oracle) * The folio was marked dirty, but the buffers were
19521da177e4SLinus Torvalds * clean. Someone wrote them back by hand with
195379f59784SZhang Yi * write_dirty_buffer/submit_bh. A rare case.
19541da177e4SLinus Torvalds */
195553418a18SMatthew Wilcox (Oracle) folio_end_writeback(folio);
19563d67f2d7SNick Piggin
19571da177e4SLinus Torvalds /*
195853418a18SMatthew Wilcox (Oracle) * The folio and buffer_heads can be released at any time from
19591da177e4SLinus Torvalds * here on.
19601da177e4SLinus Torvalds */
19611da177e4SLinus Torvalds }
19621da177e4SLinus Torvalds return err;
19631da177e4SLinus Torvalds
19641da177e4SLinus Torvalds recover:
19651da177e4SLinus Torvalds /*
19661da177e4SLinus Torvalds * ENOSPC, or some other error. We may already have added some
19671da177e4SLinus Torvalds * blocks to the file, so we need to write these out to avoid
19681da177e4SLinus Torvalds * exposing stale data.
196953418a18SMatthew Wilcox (Oracle) * The folio is currently locked and not marked for writeback
19701da177e4SLinus Torvalds */
19711da177e4SLinus Torvalds bh = head;
19721da177e4SLinus Torvalds /* Recovery: lock and submit the mapped buffers */
19731da177e4SLinus Torvalds do {
197429a814d2SAlex Tomas if (buffer_mapped(bh) && buffer_dirty(bh) &&
197529a814d2SAlex Tomas !buffer_delay(bh)) {
19761da177e4SLinus Torvalds lock_buffer(bh);
197714059f66SMatthew Wilcox (Oracle) mark_buffer_async_write_endio(bh,
197814059f66SMatthew Wilcox (Oracle) end_buffer_async_write);
19791da177e4SLinus Torvalds } else {
19801da177e4SLinus Torvalds /*
19811da177e4SLinus Torvalds * The buffer may have been set dirty during
198253418a18SMatthew Wilcox (Oracle) * attachment to a dirty folio.
19831da177e4SLinus Torvalds */
19841da177e4SLinus Torvalds clear_buffer_dirty(bh);
19851da177e4SLinus Torvalds }
19861da177e4SLinus Torvalds } while ((bh = bh->b_this_page) != head);
198753418a18SMatthew Wilcox (Oracle) BUG_ON(folio_test_writeback(folio));
198853418a18SMatthew Wilcox (Oracle) mapping_set_error(folio->mapping, err);
198953418a18SMatthew Wilcox (Oracle) folio_start_writeback(folio);
19901da177e4SLinus Torvalds do {
19911da177e4SLinus Torvalds struct buffer_head *next = bh->b_this_page;
19921da177e4SLinus Torvalds if (buffer_async_write(bh)) {
19931da177e4SLinus Torvalds clear_buffer_dirty(bh);
199444981351SBart Van Assche submit_bh_wbc(REQ_OP_WRITE | write_flags, bh,
199544981351SBart Van Assche inode->i_write_hint, wbc);
19961da177e4SLinus Torvalds nr_underway++;
1997ad576e63SNick Piggin }
19981da177e4SLinus Torvalds bh = next;
19991da177e4SLinus Torvalds } while (bh != head);
200053418a18SMatthew Wilcox (Oracle) folio_unlock(folio);
20011da177e4SLinus Torvalds goto done;
20021da177e4SLinus Torvalds }
200353418a18SMatthew Wilcox (Oracle) EXPORT_SYMBOL(__block_write_full_folio);
20041da177e4SLinus Torvalds
2005afddba49SNick Piggin /*
20064a9622f2SMatthew Wilcox (Oracle) * If a folio has any new buffers, zero them out here, and mark them uptodate
2007afddba49SNick Piggin * and dirty so they'll be written out (in order to prevent uninitialised
2008afddba49SNick Piggin * block data from leaking). And clear the new bit.
2009afddba49SNick Piggin */
folio_zero_new_buffers(struct folio * folio,size_t from,size_t to)20104a9622f2SMatthew Wilcox (Oracle) void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to)
2011afddba49SNick Piggin {
20124a9622f2SMatthew Wilcox (Oracle) size_t block_start, block_end;
2013afddba49SNick Piggin struct buffer_head *head, *bh;
2014afddba49SNick Piggin
20154a9622f2SMatthew Wilcox (Oracle) BUG_ON(!folio_test_locked(folio));
20164a9622f2SMatthew Wilcox (Oracle) head = folio_buffers(folio);
20174a9622f2SMatthew Wilcox (Oracle) if (!head)
2018afddba49SNick Piggin return;
2019afddba49SNick Piggin
20204a9622f2SMatthew Wilcox (Oracle) bh = head;
2021afddba49SNick Piggin block_start = 0;
2022afddba49SNick Piggin do {
2023afddba49SNick Piggin block_end = block_start + bh->b_size;
2024afddba49SNick Piggin
2025afddba49SNick Piggin if (buffer_new(bh)) {
2026afddba49SNick Piggin if (block_end > from && block_start < to) {
20274a9622f2SMatthew Wilcox (Oracle) if (!folio_test_uptodate(folio)) {
20284a9622f2SMatthew Wilcox (Oracle) size_t start, xend;
2029afddba49SNick Piggin
2030afddba49SNick Piggin start = max(from, block_start);
20314a9622f2SMatthew Wilcox (Oracle) xend = min(to, block_end);
2032afddba49SNick Piggin
20334a9622f2SMatthew Wilcox (Oracle) folio_zero_segment(folio, start, xend);
2034afddba49SNick Piggin set_buffer_uptodate(bh);
2035afddba49SNick Piggin }
2036afddba49SNick Piggin
2037afddba49SNick Piggin clear_buffer_new(bh);
2038afddba49SNick Piggin mark_buffer_dirty(bh);
2039afddba49SNick Piggin }
2040afddba49SNick Piggin }
2041afddba49SNick Piggin
2042afddba49SNick Piggin block_start = block_end;
2043afddba49SNick Piggin bh = bh->b_this_page;
2044afddba49SNick Piggin } while (bh != head);
2045afddba49SNick Piggin }
20464a9622f2SMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_zero_new_buffers);
2047afddba49SNick Piggin
20484aa8cdd5SChristoph Hellwig static int
iomap_to_bh(struct inode * inode,sector_t block,struct buffer_head * bh,const struct iomap * iomap)2049ae259a9cSChristoph Hellwig iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
20506d49cc85SChristoph Hellwig const struct iomap *iomap)
2051ae259a9cSChristoph Hellwig {
205280844194SMatthew Wilcox (Oracle) loff_t offset = (loff_t)block << inode->i_blkbits;
2053ae259a9cSChristoph Hellwig
2054ae259a9cSChristoph Hellwig bh->b_bdev = iomap->bdev;
2055ae259a9cSChristoph Hellwig
2056ae259a9cSChristoph Hellwig /*
2057ae259a9cSChristoph Hellwig * Block points to offset in file we need to map, iomap contains
2058ae259a9cSChristoph Hellwig * the offset at which the map starts. If the map ends before the
2059ae259a9cSChristoph Hellwig * current block, then do not map the buffer and let the caller
2060ae259a9cSChristoph Hellwig * handle it.
2061ae259a9cSChristoph Hellwig */
20624aa8cdd5SChristoph Hellwig if (offset >= iomap->offset + iomap->length)
20634aa8cdd5SChristoph Hellwig return -EIO;
2064ae259a9cSChristoph Hellwig
2065ae259a9cSChristoph Hellwig switch (iomap->type) {
2066ae259a9cSChristoph Hellwig case IOMAP_HOLE:
2067ae259a9cSChristoph Hellwig /*
2068ae259a9cSChristoph Hellwig * If the buffer is not up to date or beyond the current EOF,
2069ae259a9cSChristoph Hellwig * we need to mark it as new to ensure sub-block zeroing is
2070ae259a9cSChristoph Hellwig * executed if necessary.
2071ae259a9cSChristoph Hellwig */
2072ae259a9cSChristoph Hellwig if (!buffer_uptodate(bh) ||
2073ae259a9cSChristoph Hellwig (offset >= i_size_read(inode)))
2074ae259a9cSChristoph Hellwig set_buffer_new(bh);
20754aa8cdd5SChristoph Hellwig return 0;
2076ae259a9cSChristoph Hellwig case IOMAP_DELALLOC:
2077ae259a9cSChristoph Hellwig if (!buffer_uptodate(bh) ||
2078ae259a9cSChristoph Hellwig (offset >= i_size_read(inode)))
2079ae259a9cSChristoph Hellwig set_buffer_new(bh);
2080ae259a9cSChristoph Hellwig set_buffer_uptodate(bh);
2081ae259a9cSChristoph Hellwig set_buffer_mapped(bh);
2082ae259a9cSChristoph Hellwig set_buffer_delay(bh);
20834aa8cdd5SChristoph Hellwig return 0;
2084ae259a9cSChristoph Hellwig case IOMAP_UNWRITTEN:
2085ae259a9cSChristoph Hellwig /*
20863d7b6b21SAndreas Gruenbacher * For unwritten regions, we always need to ensure that regions
20873d7b6b21SAndreas Gruenbacher * in the block we are not writing to are zeroed. Mark the
20883d7b6b21SAndreas Gruenbacher * buffer as new to ensure this.
2089ae259a9cSChristoph Hellwig */
2090ae259a9cSChristoph Hellwig set_buffer_new(bh);
2091ae259a9cSChristoph Hellwig set_buffer_unwritten(bh);
2092df561f66SGustavo A. R. Silva fallthrough;
2093ae259a9cSChristoph Hellwig case IOMAP_MAPPED:
20943d7b6b21SAndreas Gruenbacher if ((iomap->flags & IOMAP_F_NEW) ||
2095381c0432SChristoph Hellwig offset >= i_size_read(inode)) {
2096381c0432SChristoph Hellwig /*
2097381c0432SChristoph Hellwig * This can happen if truncating the block device races
2098381c0432SChristoph Hellwig * with the check in the caller as i_size updates on
2099381c0432SChristoph Hellwig * block devices aren't synchronized by i_rwsem for
2100381c0432SChristoph Hellwig * block devices.
2101381c0432SChristoph Hellwig */
2102381c0432SChristoph Hellwig if (S_ISBLK(inode->i_mode))
2103381c0432SChristoph Hellwig return -EIO;
2104ae259a9cSChristoph Hellwig set_buffer_new(bh);
2105381c0432SChristoph Hellwig }
210619fe5f64SAndreas Gruenbacher bh->b_blocknr = (iomap->addr + offset - iomap->offset) >>
210719fe5f64SAndreas Gruenbacher inode->i_blkbits;
2108ae259a9cSChristoph Hellwig set_buffer_mapped(bh);
21094aa8cdd5SChristoph Hellwig return 0;
21104aa8cdd5SChristoph Hellwig default:
21114aa8cdd5SChristoph Hellwig WARN_ON_ONCE(1);
21124aa8cdd5SChristoph Hellwig return -EIO;
2113ae259a9cSChristoph Hellwig }
2114ae259a9cSChristoph Hellwig }
2115ae259a9cSChristoph Hellwig
__block_write_begin_int(struct folio * folio,loff_t pos,unsigned len,get_block_t * get_block,const struct iomap * iomap)2116d1bd0b4eSMatthew Wilcox (Oracle) int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
21176d49cc85SChristoph Hellwig get_block_t *get_block, const struct iomap *iomap)
21181da177e4SLinus Torvalds {
2119b0619401SMatthew Wilcox (Oracle) size_t from = offset_in_folio(folio, pos);
2120b0619401SMatthew Wilcox (Oracle) size_t to = from + len;
2121d1bd0b4eSMatthew Wilcox (Oracle) struct inode *inode = folio->mapping->host;
2122b0619401SMatthew Wilcox (Oracle) size_t block_start, block_end;
21231da177e4SLinus Torvalds sector_t block;
21241da177e4SLinus Torvalds int err = 0;
2125b0619401SMatthew Wilcox (Oracle) size_t blocksize;
21261da177e4SLinus Torvalds struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
21271da177e4SLinus Torvalds
2128d1bd0b4eSMatthew Wilcox (Oracle) BUG_ON(!folio_test_locked(folio));
2129b0619401SMatthew Wilcox (Oracle) BUG_ON(to > folio_size(folio));
21301da177e4SLinus Torvalds BUG_ON(from > to);
21311da177e4SLinus Torvalds
2132c6c8c3e7SPankaj Raghav head = folio_create_buffers(folio, inode, 0);
213345bce8f3SLinus Torvalds blocksize = head->b_size;
2134b0619401SMatthew Wilcox (Oracle) block = div_u64(folio_pos(folio), blocksize);
21351da177e4SLinus Torvalds
21361da177e4SLinus Torvalds for (bh = head, block_start = 0; bh != head || !block_start;
21371da177e4SLinus Torvalds block++, block_start=block_end, bh = bh->b_this_page) {
21381da177e4SLinus Torvalds block_end = block_start + blocksize;
21391da177e4SLinus Torvalds if (block_end <= from || block_start >= to) {
2140d1bd0b4eSMatthew Wilcox (Oracle) if (folio_test_uptodate(folio)) {
21411da177e4SLinus Torvalds if (!buffer_uptodate(bh))
21421da177e4SLinus Torvalds set_buffer_uptodate(bh);
21431da177e4SLinus Torvalds }
21441da177e4SLinus Torvalds continue;
21451da177e4SLinus Torvalds }
21461da177e4SLinus Torvalds if (buffer_new(bh))
21471da177e4SLinus Torvalds clear_buffer_new(bh);
21481da177e4SLinus Torvalds if (!buffer_mapped(bh)) {
2149b0cf2321SBadari Pulavarty WARN_ON(bh->b_size != blocksize);
21504aa8cdd5SChristoph Hellwig if (get_block)
21511da177e4SLinus Torvalds err = get_block(inode, block, bh, 1);
21524aa8cdd5SChristoph Hellwig else
21534aa8cdd5SChristoph Hellwig err = iomap_to_bh(inode, block, bh, iomap);
21541da177e4SLinus Torvalds if (err)
2155f3ddbdc6SNick Piggin break;
2156ae259a9cSChristoph Hellwig
21571da177e4SLinus Torvalds if (buffer_new(bh)) {
2158e64855c6SJan Kara clean_bdev_bh_alias(bh);
2159d1bd0b4eSMatthew Wilcox (Oracle) if (folio_test_uptodate(folio)) {
2160637aff46SNick Piggin clear_buffer_new(bh);
21611da177e4SLinus Torvalds set_buffer_uptodate(bh);
2162637aff46SNick Piggin mark_buffer_dirty(bh);
21631da177e4SLinus Torvalds continue;
21641da177e4SLinus Torvalds }
2165eebd2aa3SChristoph Lameter if (block_end > to || block_start < from)
2166d1bd0b4eSMatthew Wilcox (Oracle) folio_zero_segments(folio,
2167eebd2aa3SChristoph Lameter to, block_end,
2168eebd2aa3SChristoph Lameter block_start, from);
21691da177e4SLinus Torvalds continue;
21701da177e4SLinus Torvalds }
21711da177e4SLinus Torvalds }
2172d1bd0b4eSMatthew Wilcox (Oracle) if (folio_test_uptodate(folio)) {
21731da177e4SLinus Torvalds if (!buffer_uptodate(bh))
21741da177e4SLinus Torvalds set_buffer_uptodate(bh);
21751da177e4SLinus Torvalds continue;
21761da177e4SLinus Torvalds }
21771da177e4SLinus Torvalds if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
217833a266ddSDavid Chinner !buffer_unwritten(bh) &&
21791da177e4SLinus Torvalds (block_start < from || block_end > to)) {
2180e7ea1129SZhang Yi bh_read_nowait(bh, 0);
21811da177e4SLinus Torvalds *wait_bh++=bh;
21821da177e4SLinus Torvalds }
21831da177e4SLinus Torvalds }
21841da177e4SLinus Torvalds /*
21851da177e4SLinus Torvalds * If we issued read requests - let them complete.
21861da177e4SLinus Torvalds */
21871da177e4SLinus Torvalds while(wait_bh > wait) {
21881da177e4SLinus Torvalds wait_on_buffer(*--wait_bh);
21891da177e4SLinus Torvalds if (!buffer_uptodate(*wait_bh))
2190f3ddbdc6SNick Piggin err = -EIO;
21911da177e4SLinus Torvalds }
2192f9f07b6cSJan Kara if (unlikely(err))
21934a9622f2SMatthew Wilcox (Oracle) folio_zero_new_buffers(folio, from, to);
21941da177e4SLinus Torvalds return err;
21951da177e4SLinus Torvalds }
2196ae259a9cSChristoph Hellwig
__block_write_begin(struct folio * folio,loff_t pos,unsigned len,get_block_t * get_block)21979f04609fSMatthew Wilcox (Oracle) int __block_write_begin(struct folio *folio, loff_t pos, unsigned len,
2198ae259a9cSChristoph Hellwig get_block_t *get_block)
2199ae259a9cSChristoph Hellwig {
22009f04609fSMatthew Wilcox (Oracle) return __block_write_begin_int(folio, pos, len, get_block, NULL);
2201ae259a9cSChristoph Hellwig }
2202ebdec241SChristoph Hellwig EXPORT_SYMBOL(__block_write_begin);
22031da177e4SLinus Torvalds
block_commit_write(struct folio * folio,size_t from,size_t to)220463a23847SMatthew Wilcox (Oracle) void block_commit_write(struct folio *folio, size_t from, size_t to)
22051da177e4SLinus Torvalds {
22068c6cb3e3SMatthew Wilcox (Oracle) size_t block_start, block_end;
22078c6cb3e3SMatthew Wilcox (Oracle) bool partial = false;
22081da177e4SLinus Torvalds unsigned blocksize;
22091da177e4SLinus Torvalds struct buffer_head *bh, *head;
22101da177e4SLinus Torvalds
22118c6cb3e3SMatthew Wilcox (Oracle) bh = head = folio_buffers(folio);
221283f4414bSWojciech Gładysz if (!bh)
221383f4414bSWojciech Gładysz return;
221445bce8f3SLinus Torvalds blocksize = bh->b_size;
22151da177e4SLinus Torvalds
221645bce8f3SLinus Torvalds block_start = 0;
221745bce8f3SLinus Torvalds do {
22181da177e4SLinus Torvalds block_end = block_start + blocksize;
22191da177e4SLinus Torvalds if (block_end <= from || block_start >= to) {
22201da177e4SLinus Torvalds if (!buffer_uptodate(bh))
22218c6cb3e3SMatthew Wilcox (Oracle) partial = true;
22221da177e4SLinus Torvalds } else {
22231da177e4SLinus Torvalds set_buffer_uptodate(bh);
22241da177e4SLinus Torvalds mark_buffer_dirty(bh);
22251da177e4SLinus Torvalds }
22264ebd3aecSYang Guo if (buffer_new(bh))
2227afddba49SNick Piggin clear_buffer_new(bh);
222845bce8f3SLinus Torvalds
222945bce8f3SLinus Torvalds block_start = block_end;
223045bce8f3SLinus Torvalds bh = bh->b_this_page;
223145bce8f3SLinus Torvalds } while (bh != head);
22321da177e4SLinus Torvalds
22331da177e4SLinus Torvalds /*
22341da177e4SLinus Torvalds * If this is a partial write which happened to make all buffers
22352c69e205SMatthew Wilcox (Oracle) * uptodate then we can optimize away a bogus read_folio() for
22368c6cb3e3SMatthew Wilcox (Oracle) * the next read(). Here we 'discover' whether the folio went
22371da177e4SLinus Torvalds * uptodate as a result of this (potentially partial) write.
22381da177e4SLinus Torvalds */
22391da177e4SLinus Torvalds if (!partial)
22408c6cb3e3SMatthew Wilcox (Oracle) folio_mark_uptodate(folio);
22411da177e4SLinus Torvalds }
224263a23847SMatthew Wilcox (Oracle) EXPORT_SYMBOL(block_commit_write);
22431da177e4SLinus Torvalds
22441da177e4SLinus Torvalds /*
2245155130a4SChristoph Hellwig * block_write_begin takes care of the basic task of block allocation and
2246155130a4SChristoph Hellwig * bringing partial write blocks uptodate first.
2247155130a4SChristoph Hellwig *
22487bb46a67S[email protected] * The filesystem needs to handle block truncation upon failure.
2249afddba49SNick Piggin */
block_write_begin(struct address_space * mapping,loff_t pos,unsigned len,struct folio ** foliop,get_block_t * get_block)2250155130a4SChristoph Hellwig int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
22511da86618SMatthew Wilcox (Oracle) struct folio **foliop, get_block_t *get_block)
2252afddba49SNick Piggin {
225309cbfeafSKirill A. Shutemov pgoff_t index = pos >> PAGE_SHIFT;
22548eb835a1SMatthew Wilcox (Oracle) struct folio *folio;
22556e1db88dSChristoph Hellwig int status;
2256afddba49SNick Piggin
22578eb835a1SMatthew Wilcox (Oracle) folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
22588eb835a1SMatthew Wilcox (Oracle) mapping_gfp_mask(mapping));
22598eb835a1SMatthew Wilcox (Oracle) if (IS_ERR(folio))
22608eb835a1SMatthew Wilcox (Oracle) return PTR_ERR(folio);
2261afddba49SNick Piggin
22628eb835a1SMatthew Wilcox (Oracle) status = __block_write_begin_int(folio, pos, len, get_block, NULL);
2263afddba49SNick Piggin if (unlikely(status)) {
22648eb835a1SMatthew Wilcox (Oracle) folio_unlock(folio);
22658eb835a1SMatthew Wilcox (Oracle) folio_put(folio);
22668eb835a1SMatthew Wilcox (Oracle) folio = NULL;
2267afddba49SNick Piggin }
2268afddba49SNick Piggin
22691da86618SMatthew Wilcox (Oracle) *foliop = folio;
2270afddba49SNick Piggin return status;
2271afddba49SNick Piggin }
2272afddba49SNick Piggin EXPORT_SYMBOL(block_write_begin);
2273afddba49SNick Piggin
block_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct folio * folio,void * fsdata)2274afddba49SNick Piggin int block_write_end(struct file *file, struct address_space *mapping,
2275afddba49SNick Piggin loff_t pos, unsigned len, unsigned copied,
227697edbc02SMatthew Wilcox (Oracle) struct folio *folio, void *fsdata)
2277afddba49SNick Piggin {
22788c6cb3e3SMatthew Wilcox (Oracle) size_t start = pos - folio_pos(folio);
2279afddba49SNick Piggin
2280afddba49SNick Piggin if (unlikely(copied < len)) {
2281afddba49SNick Piggin /*
22822c69e205SMatthew Wilcox (Oracle) * The buffers that were written will now be uptodate, so
22832c69e205SMatthew Wilcox (Oracle) * we don't have to worry about a read_folio reading them
22842c69e205SMatthew Wilcox (Oracle) * and overwriting a partial write. However if we have
22852c69e205SMatthew Wilcox (Oracle) * encountered a short write and only partially written
22862c69e205SMatthew Wilcox (Oracle) * into a buffer, it will not be marked uptodate, so a
22872c69e205SMatthew Wilcox (Oracle) * read_folio might come in and destroy our partial write.
2288afddba49SNick Piggin *
2289afddba49SNick Piggin * Do the simplest thing, and just treat any short write to a
22908c6cb3e3SMatthew Wilcox (Oracle) * non uptodate folio as a zero-length write, and force the
2291afddba49SNick Piggin * caller to redo the whole thing.
2292afddba49SNick Piggin */
22938c6cb3e3SMatthew Wilcox (Oracle) if (!folio_test_uptodate(folio))
2294afddba49SNick Piggin copied = 0;
2295afddba49SNick Piggin
22964a9622f2SMatthew Wilcox (Oracle) folio_zero_new_buffers(folio, start+copied, start+len);
2297afddba49SNick Piggin }
22988c6cb3e3SMatthew Wilcox (Oracle) flush_dcache_folio(folio);
2299afddba49SNick Piggin
2300afddba49SNick Piggin /* This could be a short (even 0-length) commit */
230163a23847SMatthew Wilcox (Oracle) block_commit_write(folio, start, start + copied);
2302afddba49SNick Piggin
2303afddba49SNick Piggin return copied;
2304afddba49SNick Piggin }
2305afddba49SNick Piggin EXPORT_SYMBOL(block_write_end);
2306afddba49SNick Piggin
generic_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct folio * folio,void * fsdata)2307afddba49SNick Piggin int generic_write_end(struct file *file, struct address_space *mapping,
2308afddba49SNick Piggin loff_t pos, unsigned len, unsigned copied,
2309a225800fSMatthew Wilcox (Oracle) struct folio *folio, void *fsdata)
2310afddba49SNick Piggin {
23118af54f29SChristoph Hellwig struct inode *inode = mapping->host;
23128af54f29SChristoph Hellwig loff_t old_size = inode->i_size;
23138af54f29SChristoph Hellwig bool i_size_changed = false;
23148af54f29SChristoph Hellwig
231597edbc02SMatthew Wilcox (Oracle) copied = block_write_end(file, mapping, pos, len, copied, folio, fsdata);
23168af54f29SChristoph Hellwig
23178af54f29SChristoph Hellwig /*
23188af54f29SChristoph Hellwig * No need to use i_size_read() here, the i_size cannot change under us
23198af54f29SChristoph Hellwig * because we hold i_rwsem.
23208af54f29SChristoph Hellwig *
2321696876d0SMatthew Wilcox (Oracle) * But it's important to update i_size while still holding folio lock:
23228af54f29SChristoph Hellwig * page writeout could otherwise come in and zero beyond i_size.
23238af54f29SChristoph Hellwig */
23248af54f29SChristoph Hellwig if (pos + copied > inode->i_size) {
23258af54f29SChristoph Hellwig i_size_write(inode, pos + copied);
23268af54f29SChristoph Hellwig i_size_changed = true;
23278af54f29SChristoph Hellwig }
23288af54f29SChristoph Hellwig
2329696876d0SMatthew Wilcox (Oracle) folio_unlock(folio);
2330696876d0SMatthew Wilcox (Oracle) folio_put(folio);
23318af54f29SChristoph Hellwig
23328af54f29SChristoph Hellwig if (old_size < pos)
23338af54f29SChristoph Hellwig pagecache_isize_extended(inode, old_size, pos);
23348af54f29SChristoph Hellwig /*
23358af54f29SChristoph Hellwig * Don't mark the inode dirty under page lock. First, it unnecessarily
23368af54f29SChristoph Hellwig * makes the holding time of page lock longer. Second, it forces lock
23378af54f29SChristoph Hellwig * ordering of page lock and transaction start for journaling
23388af54f29SChristoph Hellwig * filesystems.
23398af54f29SChristoph Hellwig */
23408af54f29SChristoph Hellwig if (i_size_changed)
23418af54f29SChristoph Hellwig mark_inode_dirty(inode);
234226ddb1f4SAndreas Gruenbacher return copied;
2343afddba49SNick Piggin }
2344afddba49SNick Piggin EXPORT_SYMBOL(generic_write_end);
2345afddba49SNick Piggin
2346afddba49SNick Piggin /*
23472e7e80f7SMatthew Wilcox (Oracle) * block_is_partially_uptodate checks whether buffers within a folio are
23488ab22b9aSHisashi Hifumi * uptodate or not.
23498ab22b9aSHisashi Hifumi *
23502e7e80f7SMatthew Wilcox (Oracle) * Returns true if all buffers which correspond to the specified part
23512e7e80f7SMatthew Wilcox (Oracle) * of the folio are uptodate.
23528ab22b9aSHisashi Hifumi */
block_is_partially_uptodate(struct folio * folio,size_t from,size_t count)23532e7e80f7SMatthew Wilcox (Oracle) bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
23548ab22b9aSHisashi Hifumi {
23558ab22b9aSHisashi Hifumi unsigned block_start, block_end, blocksize;
23568ab22b9aSHisashi Hifumi unsigned to;
23578ab22b9aSHisashi Hifumi struct buffer_head *bh, *head;
23582e7e80f7SMatthew Wilcox (Oracle) bool ret = true;
23598ab22b9aSHisashi Hifumi
23602e7e80f7SMatthew Wilcox (Oracle) head = folio_buffers(folio);
23612e7e80f7SMatthew Wilcox (Oracle) if (!head)
23622e7e80f7SMatthew Wilcox (Oracle) return false;
236345bce8f3SLinus Torvalds blocksize = head->b_size;
23642e7e80f7SMatthew Wilcox (Oracle) to = min_t(unsigned, folio_size(folio) - from, count);
23658ab22b9aSHisashi Hifumi to = from + to;
23662e7e80f7SMatthew Wilcox (Oracle) if (from < blocksize && to > folio_size(folio) - blocksize)
23672e7e80f7SMatthew Wilcox (Oracle) return false;
23688ab22b9aSHisashi Hifumi
23698ab22b9aSHisashi Hifumi bh = head;
23708ab22b9aSHisashi Hifumi block_start = 0;
23718ab22b9aSHisashi Hifumi do {
23728ab22b9aSHisashi Hifumi block_end = block_start + blocksize;
23738ab22b9aSHisashi Hifumi if (block_end > from && block_start < to) {
23748ab22b9aSHisashi Hifumi if (!buffer_uptodate(bh)) {
23752e7e80f7SMatthew Wilcox (Oracle) ret = false;
23768ab22b9aSHisashi Hifumi break;
23778ab22b9aSHisashi Hifumi }
23788ab22b9aSHisashi Hifumi if (block_end >= to)
23798ab22b9aSHisashi Hifumi break;
23808ab22b9aSHisashi Hifumi }
23818ab22b9aSHisashi Hifumi block_start = block_end;
23828ab22b9aSHisashi Hifumi bh = bh->b_this_page;
23838ab22b9aSHisashi Hifumi } while (bh != head);
23848ab22b9aSHisashi Hifumi
23858ab22b9aSHisashi Hifumi return ret;
23868ab22b9aSHisashi Hifumi }
23878ab22b9aSHisashi Hifumi EXPORT_SYMBOL(block_is_partially_uptodate);
23888ab22b9aSHisashi Hifumi
23898ab22b9aSHisashi Hifumi /*
23902c69e205SMatthew Wilcox (Oracle) * Generic "read_folio" function for block devices that have the normal
23911da177e4SLinus Torvalds * get_block functionality. This is most of the block device filesystems.
23922c69e205SMatthew Wilcox (Oracle) * Reads the folio asynchronously --- the unlock_buffer() and
23931da177e4SLinus Torvalds * set/clear_buffer_uptodate() functions propagate buffer state into the
23942c69e205SMatthew Wilcox (Oracle) * folio once IO has completed.
23951da177e4SLinus Torvalds */
block_read_full_folio(struct folio * folio,get_block_t * get_block)23962c69e205SMatthew Wilcox (Oracle) int block_read_full_folio(struct folio *folio, get_block_t *get_block)
23971da177e4SLinus Torvalds {
23982c69e205SMatthew Wilcox (Oracle) struct inode *inode = folio->mapping->host;
23991da177e4SLinus Torvalds sector_t iblock, lblock;
2400b72e591fSMatthew Wilcox struct buffer_head *bh, *head, *prev = NULL;
2401fa399c31SMatthew Wilcox (Oracle) size_t blocksize;
24021da177e4SLinus Torvalds int fully_mapped = 1;
2403b7a6eb22SMatthew Wilcox (Oracle) bool page_error = false;
24044fa512ceSEric Biggers loff_t limit = i_size_read(inode);
24054fa512ceSEric Biggers
24064fa512ceSEric Biggers /* This is needed for ext4. */
24074fa512ceSEric Biggers if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
24084fa512ceSEric Biggers limit = inode->i_sb->s_maxbytes;
24091da177e4SLinus Torvalds
2410c6c8c3e7SPankaj Raghav head = folio_create_buffers(folio, inode, 0);
241145bce8f3SLinus Torvalds blocksize = head->b_size;
24121da177e4SLinus Torvalds
2413fa399c31SMatthew Wilcox (Oracle) iblock = div_u64(folio_pos(folio), blocksize);
2414fa399c31SMatthew Wilcox (Oracle) lblock = div_u64(limit + blocksize - 1, blocksize);
24151da177e4SLinus Torvalds bh = head;
24161da177e4SLinus Torvalds
24171da177e4SLinus Torvalds do {
24181da177e4SLinus Torvalds if (buffer_uptodate(bh))
24191da177e4SLinus Torvalds continue;
24201da177e4SLinus Torvalds
24211da177e4SLinus Torvalds if (!buffer_mapped(bh)) {
2422c64610baSAndrew Morton int err = 0;
2423c64610baSAndrew Morton
24241da177e4SLinus Torvalds fully_mapped = 0;
24251da177e4SLinus Torvalds if (iblock < lblock) {
2426b0cf2321SBadari Pulavarty WARN_ON(bh->b_size != blocksize);
2427c64610baSAndrew Morton err = get_block(inode, iblock, bh, 0);
24287ad635eaSMatthew Wilcox (Oracle) if (err)
2429b7a6eb22SMatthew Wilcox (Oracle) page_error = true;
2430b7a6eb22SMatthew Wilcox (Oracle) }
24311da177e4SLinus Torvalds if (!buffer_mapped(bh)) {
2432753aadebSLuis Chamberlain folio_zero_range(folio, bh_offset(bh),
24332c69e205SMatthew Wilcox (Oracle) blocksize);
2434c64610baSAndrew Morton if (!err)
24351da177e4SLinus Torvalds set_buffer_uptodate(bh);
24361da177e4SLinus Torvalds continue;
24371da177e4SLinus Torvalds }
24381da177e4SLinus Torvalds /*
24391da177e4SLinus Torvalds * get_block() might have updated the buffer
24401da177e4SLinus Torvalds * synchronously
24411da177e4SLinus Torvalds */
24421da177e4SLinus Torvalds if (buffer_uptodate(bh))
24431da177e4SLinus Torvalds continue;
24441da177e4SLinus Torvalds }
2445b72e591fSMatthew Wilcox
2446b72e591fSMatthew Wilcox lock_buffer(bh);
2447b72e591fSMatthew Wilcox if (buffer_uptodate(bh)) {
2448b72e591fSMatthew Wilcox unlock_buffer(bh);
2449b72e591fSMatthew Wilcox continue;
2450b72e591fSMatthew Wilcox }
2451b72e591fSMatthew Wilcox
2452b72e591fSMatthew Wilcox mark_buffer_async_read(bh);
2453b72e591fSMatthew Wilcox if (prev)
2454b72e591fSMatthew Wilcox submit_bh(REQ_OP_READ, prev);
2455b72e591fSMatthew Wilcox prev = bh;
2456753aadebSLuis Chamberlain } while (iblock++, (bh = bh->b_this_page) != head);
24571da177e4SLinus Torvalds
24581da177e4SLinus Torvalds if (fully_mapped)
24592c69e205SMatthew Wilcox (Oracle) folio_set_mappedtodisk(folio);
24601da177e4SLinus Torvalds
24611da177e4SLinus Torvalds /*
2462b72e591fSMatthew Wilcox * All buffers are uptodate or get_block() returned an error
2463b72e591fSMatthew Wilcox * when trying to map them - we must finish the read because
2464b72e591fSMatthew Wilcox * end_buffer_async_read() will never be called on any buffer
2465b72e591fSMatthew Wilcox * in this folio.
24661da177e4SLinus Torvalds */
2467b72e591fSMatthew Wilcox if (prev)
2468b72e591fSMatthew Wilcox submit_bh(REQ_OP_READ, prev);
24691da177e4SLinus Torvalds else
2470b72e591fSMatthew Wilcox folio_end_read(folio, !page_error);
2471b72e591fSMatthew Wilcox
24721da177e4SLinus Torvalds return 0;
24731da177e4SLinus Torvalds }
24742c69e205SMatthew Wilcox (Oracle) EXPORT_SYMBOL(block_read_full_folio);
24751da177e4SLinus Torvalds
24761da177e4SLinus Torvalds /* utility function for filesystems that need to do work on expanding
247789e10787SNick Piggin * truncates. Uses filesystem pagecache writes to allow the filesystem to
24781da177e4SLinus Torvalds * deal with the hole.
24791da177e4SLinus Torvalds */
generic_cont_expand_simple(struct inode * inode,loff_t size)248089e10787SNick Piggin int generic_cont_expand_simple(struct inode *inode, loff_t size)
24811da177e4SLinus Torvalds {
24821da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping;
248353b524b8SMatthew Wilcox (Oracle) const struct address_space_operations *aops = mapping->a_ops;
24841da86618SMatthew Wilcox (Oracle) struct folio *folio;
24851468c6f4SAlexander Potapenko void *fsdata = NULL;
24861da177e4SLinus Torvalds int err;
24871da177e4SLinus Torvalds
2488c08d3b0eS[email protected] err = inode_newsize_ok(inode, size);
2489c08d3b0eS[email protected] if (err)
24901da177e4SLinus Torvalds goto out;
24911da177e4SLinus Torvalds
24921da86618SMatthew Wilcox (Oracle) err = aops->write_begin(NULL, mapping, size, 0, &folio, &fsdata);
249389e10787SNick Piggin if (err)
249405eb0b51SOGAWA Hirofumi goto out;
249505eb0b51SOGAWA Hirofumi
24961da86618SMatthew Wilcox (Oracle) err = aops->write_end(NULL, mapping, size, 0, 0, folio, fsdata);
249789e10787SNick Piggin BUG_ON(err > 0);
249805eb0b51SOGAWA Hirofumi
249905eb0b51SOGAWA Hirofumi out:
250005eb0b51SOGAWA Hirofumi return err;
250105eb0b51SOGAWA Hirofumi }
25021fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(generic_cont_expand_simple);
250305eb0b51SOGAWA Hirofumi
cont_expand_zero(struct file * file,struct address_space * mapping,loff_t pos,loff_t * bytes)2504f1e3af72SAdrian Bunk static int cont_expand_zero(struct file *file, struct address_space *mapping,
250589e10787SNick Piggin loff_t pos, loff_t *bytes)
250605eb0b51SOGAWA Hirofumi {
250789e10787SNick Piggin struct inode *inode = mapping->host;
250853b524b8SMatthew Wilcox (Oracle) const struct address_space_operations *aops = mapping->a_ops;
250993407472SFabian Frederick unsigned int blocksize = i_blocksize(inode);
25101da86618SMatthew Wilcox (Oracle) struct folio *folio;
25111468c6f4SAlexander Potapenko void *fsdata = NULL;
251289e10787SNick Piggin pgoff_t index, curidx;
251389e10787SNick Piggin loff_t curpos;
251489e10787SNick Piggin unsigned zerofrom, offset, len;
251589e10787SNick Piggin int err = 0;
251605eb0b51SOGAWA Hirofumi
251709cbfeafSKirill A. Shutemov index = pos >> PAGE_SHIFT;
251809cbfeafSKirill A. Shutemov offset = pos & ~PAGE_MASK;
251989e10787SNick Piggin
252009cbfeafSKirill A. Shutemov while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) {
252109cbfeafSKirill A. Shutemov zerofrom = curpos & ~PAGE_MASK;
252289e10787SNick Piggin if (zerofrom & (blocksize-1)) {
252389e10787SNick Piggin *bytes |= (blocksize-1);
252489e10787SNick Piggin (*bytes)++;
252589e10787SNick Piggin }
252609cbfeafSKirill A. Shutemov len = PAGE_SIZE - zerofrom;
252789e10787SNick Piggin
252853b524b8SMatthew Wilcox (Oracle) err = aops->write_begin(file, mapping, curpos, len,
25291da86618SMatthew Wilcox (Oracle) &folio, &fsdata);
253089e10787SNick Piggin if (err)
253189e10787SNick Piggin goto out;
25321da86618SMatthew Wilcox (Oracle) folio_zero_range(folio, offset_in_folio(folio, curpos), len);
253353b524b8SMatthew Wilcox (Oracle) err = aops->write_end(file, mapping, curpos, len, len,
25341da86618SMatthew Wilcox (Oracle) folio, fsdata);
253589e10787SNick Piggin if (err < 0)
253689e10787SNick Piggin goto out;
253789e10787SNick Piggin BUG_ON(err != len);
253889e10787SNick Piggin err = 0;
2539061e9746SOGAWA Hirofumi
2540061e9746SOGAWA Hirofumi balance_dirty_pages_ratelimited(mapping);
2541c2ca0fcdSMikulas Patocka
254208d405c8SDavidlohr Bueso if (fatal_signal_pending(current)) {
2543c2ca0fcdSMikulas Patocka err = -EINTR;
2544c2ca0fcdSMikulas Patocka goto out;
2545c2ca0fcdSMikulas Patocka }
254689e10787SNick Piggin }
254789e10787SNick Piggin
254889e10787SNick Piggin /* page covers the boundary, find the boundary offset */
254989e10787SNick Piggin if (index == curidx) {
255009cbfeafSKirill A. Shutemov zerofrom = curpos & ~PAGE_MASK;
255189e10787SNick Piggin /* if we will expand the thing last block will be filled */
255289e10787SNick Piggin if (offset <= zerofrom) {
255389e10787SNick Piggin goto out;
255489e10787SNick Piggin }
255589e10787SNick Piggin if (zerofrom & (blocksize-1)) {
255689e10787SNick Piggin *bytes |= (blocksize-1);
255789e10787SNick Piggin (*bytes)++;
255889e10787SNick Piggin }
255989e10787SNick Piggin len = offset - zerofrom;
256089e10787SNick Piggin
256153b524b8SMatthew Wilcox (Oracle) err = aops->write_begin(file, mapping, curpos, len,
25621da86618SMatthew Wilcox (Oracle) &folio, &fsdata);
256389e10787SNick Piggin if (err)
256489e10787SNick Piggin goto out;
25651da86618SMatthew Wilcox (Oracle) folio_zero_range(folio, offset_in_folio(folio, curpos), len);
256653b524b8SMatthew Wilcox (Oracle) err = aops->write_end(file, mapping, curpos, len, len,
25671da86618SMatthew Wilcox (Oracle) folio, fsdata);
256889e10787SNick Piggin if (err < 0)
256989e10787SNick Piggin goto out;
257089e10787SNick Piggin BUG_ON(err != len);
257189e10787SNick Piggin err = 0;
257289e10787SNick Piggin }
257389e10787SNick Piggin out:
257489e10787SNick Piggin return err;
25751da177e4SLinus Torvalds }
25761da177e4SLinus Torvalds
25771da177e4SLinus Torvalds /*
25781da177e4SLinus Torvalds * For moronic filesystems that do not allow holes in file.
25791da177e4SLinus Torvalds * We may have to extend the file.
25801da177e4SLinus Torvalds */
cont_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,struct folio ** foliop,void ** fsdata,get_block_t * get_block,loff_t * bytes)2581282dc178SChristoph Hellwig int cont_write_begin(struct file *file, struct address_space *mapping,
2582be3bbbc5SMatthew Wilcox (Oracle) loff_t pos, unsigned len,
25831da86618SMatthew Wilcox (Oracle) struct folio **foliop, void **fsdata,
258489e10787SNick Piggin get_block_t *get_block, loff_t *bytes)
25851da177e4SLinus Torvalds {
25861da177e4SLinus Torvalds struct inode *inode = mapping->host;
258793407472SFabian Frederick unsigned int blocksize = i_blocksize(inode);
258893407472SFabian Frederick unsigned int zerofrom;
258989e10787SNick Piggin int err;
25901da177e4SLinus Torvalds
259189e10787SNick Piggin err = cont_expand_zero(file, mapping, pos, bytes);
259289e10787SNick Piggin if (err)
2593155130a4SChristoph Hellwig return err;
25941da177e4SLinus Torvalds
259509cbfeafSKirill A. Shutemov zerofrom = *bytes & ~PAGE_MASK;
259689e10787SNick Piggin if (pos+len > *bytes && zerofrom & (blocksize-1)) {
25971da177e4SLinus Torvalds *bytes |= (blocksize-1);
25981da177e4SLinus Torvalds (*bytes)++;
25991da177e4SLinus Torvalds }
26001da177e4SLinus Torvalds
26011da86618SMatthew Wilcox (Oracle) return block_write_begin(mapping, pos, len, foliop, get_block);
26021da177e4SLinus Torvalds }
26031fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(cont_write_begin);
26041da177e4SLinus Torvalds
260554171690SDavid Chinner /*
260654171690SDavid Chinner * block_page_mkwrite() is not allowed to change the file size as it gets
260754171690SDavid Chinner * called from a page fault handler when a page is first dirtied. Hence we must
260854171690SDavid Chinner * be careful to check for EOF conditions here. We set the page up correctly
260954171690SDavid Chinner * for a written page which means we get ENOSPC checking when writing into
261054171690SDavid Chinner * holes and correct delalloc and unwritten extent mapping on filesystems that
261154171690SDavid Chinner * support these features.
261254171690SDavid Chinner *
261354171690SDavid Chinner * We are not allowed to take the i_mutex here so we have to play games to
261454171690SDavid Chinner * protect against truncate races as the page could now be beyond EOF. Because
26157bb46a67S[email protected] * truncate writes the inode size before removing pages, once we have the
261654171690SDavid Chinner * page lock we can determine safely if the page is beyond EOF. If it is not
261754171690SDavid Chinner * beyond EOF, then the page is guaranteed safe against truncation until we
261854171690SDavid Chinner * unlock the page.
2619ea13a864SJan Kara *
262014da9200SJan Kara * Direct callers of this function should protect against filesystem freezing
26215c500029SRoss Zwisler * using sb_start_pagefault() - sb_end_pagefault() functions.
262254171690SDavid Chinner */
block_page_mkwrite(struct vm_area_struct * vma,struct vm_fault * vmf,get_block_t get_block)26235c500029SRoss Zwisler int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
262454171690SDavid Chinner get_block_t get_block)
262554171690SDavid Chinner {
2626fe181377SMatthew Wilcox (Oracle) struct folio *folio = page_folio(vmf->page);
2627496ad9aaSAl Viro struct inode *inode = file_inode(vma->vm_file);
262854171690SDavid Chinner unsigned long end;
262954171690SDavid Chinner loff_t size;
263024da4fabSJan Kara int ret;
263154171690SDavid Chinner
2632fe181377SMatthew Wilcox (Oracle) folio_lock(folio);
263354171690SDavid Chinner size = i_size_read(inode);
2634fe181377SMatthew Wilcox (Oracle) if ((folio->mapping != inode->i_mapping) ||
2635fe181377SMatthew Wilcox (Oracle) (folio_pos(folio) >= size)) {
263624da4fabSJan Kara /* We overload EFAULT to mean page got truncated */
263724da4fabSJan Kara ret = -EFAULT;
263824da4fabSJan Kara goto out_unlock;
263954171690SDavid Chinner }
264054171690SDavid Chinner
2641fe181377SMatthew Wilcox (Oracle) end = folio_size(folio);
2642fe181377SMatthew Wilcox (Oracle) /* folio is wholly or partially inside EOF */
2643fe181377SMatthew Wilcox (Oracle) if (folio_pos(folio) + end > size)
2644fe181377SMatthew Wilcox (Oracle) end = size - folio_pos(folio);
264554171690SDavid Chinner
2646fe181377SMatthew Wilcox (Oracle) ret = __block_write_begin_int(folio, 0, end, get_block, NULL);
2647a524fcfeSBean Huo if (unlikely(ret))
264824da4fabSJan Kara goto out_unlock;
2649a524fcfeSBean Huo
265063a23847SMatthew Wilcox (Oracle) block_commit_write(folio, 0, end);
2651a524fcfeSBean Huo
2652fe181377SMatthew Wilcox (Oracle) folio_mark_dirty(folio);
2653fe181377SMatthew Wilcox (Oracle) folio_wait_stable(folio);
265424da4fabSJan Kara return 0;
265524da4fabSJan Kara out_unlock:
2656fe181377SMatthew Wilcox (Oracle) folio_unlock(folio);
265754171690SDavid Chinner return ret;
265854171690SDavid Chinner }
26591fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_page_mkwrite);
26601da177e4SLinus Torvalds
block_truncate_page(struct address_space * mapping,loff_t from,get_block_t * get_block)26611da177e4SLinus Torvalds int block_truncate_page(struct address_space *mapping,
26621da177e4SLinus Torvalds loff_t from, get_block_t *get_block)
26631da177e4SLinus Torvalds {
266409cbfeafSKirill A. Shutemov pgoff_t index = from >> PAGE_SHIFT;
26651da177e4SLinus Torvalds unsigned blocksize;
266654b21a79SAndrew Morton sector_t iblock;
26676d68f644SMatthew Wilcox (Oracle) size_t offset, length, pos;
26681da177e4SLinus Torvalds struct inode *inode = mapping->host;
26696d68f644SMatthew Wilcox (Oracle) struct folio *folio;
26701da177e4SLinus Torvalds struct buffer_head *bh;
2671dc7cb2d2SJiapeng Chong int err = 0;
26721da177e4SLinus Torvalds
267393407472SFabian Frederick blocksize = i_blocksize(inode);
26746d68f644SMatthew Wilcox (Oracle) length = from & (blocksize - 1);
26751da177e4SLinus Torvalds
26761da177e4SLinus Torvalds /* Block boundary? Nothing to do */
26771da177e4SLinus Torvalds if (!length)
26781da177e4SLinus Torvalds return 0;
26791da177e4SLinus Torvalds
26801da177e4SLinus Torvalds length = blocksize - length;
26814b04646cSMatthew Wilcox (Oracle) iblock = ((loff_t)index * PAGE_SIZE) >> inode->i_blkbits;
26821da177e4SLinus Torvalds
26836d68f644SMatthew Wilcox (Oracle) folio = filemap_grab_folio(mapping, index);
26846d68f644SMatthew Wilcox (Oracle) if (IS_ERR(folio))
26856d68f644SMatthew Wilcox (Oracle) return PTR_ERR(folio);
26861da177e4SLinus Torvalds
26876d68f644SMatthew Wilcox (Oracle) bh = folio_buffers(folio);
26883decb856SMatthew Wilcox (Oracle) if (!bh)
26890a88810dSMatthew Wilcox (Oracle) bh = create_empty_buffers(folio, blocksize, 0);
26901da177e4SLinus Torvalds
26911da177e4SLinus Torvalds /* Find the buffer that contains "offset" */
26926d68f644SMatthew Wilcox (Oracle) offset = offset_in_folio(folio, from);
26931da177e4SLinus Torvalds pos = blocksize;
26941da177e4SLinus Torvalds while (offset >= pos) {
26951da177e4SLinus Torvalds bh = bh->b_this_page;
26961da177e4SLinus Torvalds iblock++;
26971da177e4SLinus Torvalds pos += blocksize;
26981da177e4SLinus Torvalds }
26991da177e4SLinus Torvalds
27001da177e4SLinus Torvalds if (!buffer_mapped(bh)) {
2701b0cf2321SBadari Pulavarty WARN_ON(bh->b_size != blocksize);
27021da177e4SLinus Torvalds err = get_block(inode, iblock, bh, 0);
27031da177e4SLinus Torvalds if (err)
27041da177e4SLinus Torvalds goto unlock;
27051da177e4SLinus Torvalds /* unmapped? It's a hole - nothing to do */
27061da177e4SLinus Torvalds if (!buffer_mapped(bh))
27071da177e4SLinus Torvalds goto unlock;
27081da177e4SLinus Torvalds }
27091da177e4SLinus Torvalds
27101da177e4SLinus Torvalds /* Ok, it's mapped. Make sure it's up-to-date */
27116d68f644SMatthew Wilcox (Oracle) if (folio_test_uptodate(folio))
27121da177e4SLinus Torvalds set_buffer_uptodate(bh);
27131da177e4SLinus Torvalds
271433a266ddSDavid Chinner if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2715e7ea1129SZhang Yi err = bh_read(bh, 0);
27161da177e4SLinus Torvalds /* Uhhuh. Read error. Complain and punt. */
2717e7ea1129SZhang Yi if (err < 0)
27181da177e4SLinus Torvalds goto unlock;
27191da177e4SLinus Torvalds }
27201da177e4SLinus Torvalds
27216d68f644SMatthew Wilcox (Oracle) folio_zero_range(folio, offset, length);
27221da177e4SLinus Torvalds mark_buffer_dirty(bh);
27231da177e4SLinus Torvalds
27241da177e4SLinus Torvalds unlock:
27256d68f644SMatthew Wilcox (Oracle) folio_unlock(folio);
27266d68f644SMatthew Wilcox (Oracle) folio_put(folio);
2727dc7cb2d2SJiapeng Chong
27281da177e4SLinus Torvalds return err;
27291da177e4SLinus Torvalds }
27301fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_truncate_page);
27311da177e4SLinus Torvalds
27321da177e4SLinus Torvalds /*
27331da177e4SLinus Torvalds * The generic ->writepage function for buffer-backed address_spaces
27341da177e4SLinus Torvalds */
block_write_full_folio(struct folio * folio,struct writeback_control * wbc,void * get_block)273517bf23a9SMatthew Wilcox (Oracle) int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
273617bf23a9SMatthew Wilcox (Oracle) void *get_block)
27371da177e4SLinus Torvalds {
2738bb0ea598SMatthew Wilcox (Oracle) struct inode * const inode = folio->mapping->host;
27391da177e4SLinus Torvalds loff_t i_size = i_size_read(inode);
27401da177e4SLinus Torvalds
2741bb0ea598SMatthew Wilcox (Oracle) /* Is the folio fully inside i_size? */
2742bb0ea598SMatthew Wilcox (Oracle) if (folio_pos(folio) + folio_size(folio) <= i_size)
274314059f66SMatthew Wilcox (Oracle) return __block_write_full_folio(inode, folio, get_block, wbc);
27441da177e4SLinus Torvalds
2745bb0ea598SMatthew Wilcox (Oracle) /* Is the folio fully outside i_size? (truncate in progress) */
2746bb0ea598SMatthew Wilcox (Oracle) if (folio_pos(folio) >= i_size) {
274753418a18SMatthew Wilcox (Oracle) folio_unlock(folio);
27481da177e4SLinus Torvalds return 0; /* don't care */
27491da177e4SLinus Torvalds }
27501da177e4SLinus Torvalds
27511da177e4SLinus Torvalds /*
2752bb0ea598SMatthew Wilcox (Oracle) * The folio straddles i_size. It must be zeroed out on each and every
27532a61aa40SAdam Buchbinder * writepage invocation because it may be mmapped. "A file is mapped
27541da177e4SLinus Torvalds * in multiples of the page size. For a file that is not a multiple of
27551da177e4SLinus Torvalds * the page size, the remaining memory is zeroed when mapped, and
27561da177e4SLinus Torvalds * writes to that region are not written out to the file."
27571da177e4SLinus Torvalds */
2758bb0ea598SMatthew Wilcox (Oracle) folio_zero_segment(folio, offset_in_folio(folio, i_size),
2759bb0ea598SMatthew Wilcox (Oracle) folio_size(folio));
276014059f66SMatthew Wilcox (Oracle) return __block_write_full_folio(inode, folio, get_block, wbc);
276135c80d5fSChris Mason }
276235c80d5fSChris Mason
generic_block_bmap(struct address_space * mapping,sector_t block,get_block_t * get_block)27631da177e4SLinus Torvalds sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
27641da177e4SLinus Torvalds get_block_t *get_block)
27651da177e4SLinus Torvalds {
27661da177e4SLinus Torvalds struct inode *inode = mapping->host;
27672a527d68SAlexander Potapenko struct buffer_head tmp = {
27682a527d68SAlexander Potapenko .b_size = i_blocksize(inode),
27692a527d68SAlexander Potapenko };
27702a527d68SAlexander Potapenko
27711da177e4SLinus Torvalds get_block(inode, block, &tmp, 0);
27721da177e4SLinus Torvalds return tmp.b_blocknr;
27731da177e4SLinus Torvalds }
27741fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(generic_block_bmap);
27751da177e4SLinus Torvalds
end_bio_bh_io_sync(struct bio * bio)27764246a0b6SChristoph Hellwig static void end_bio_bh_io_sync(struct bio *bio)
27771da177e4SLinus Torvalds {
27781da177e4SLinus Torvalds struct buffer_head *bh = bio->bi_private;
27791da177e4SLinus Torvalds
2780b7c44ed9SJens Axboe if (unlikely(bio_flagged(bio, BIO_QUIET)))
278108bafc03SKeith Mannthey set_bit(BH_Quiet, &bh->b_state);
278208bafc03SKeith Mannthey
27834e4cbee9SChristoph Hellwig bh->b_end_io(bh, !bio->bi_status);
27841da177e4SLinus Torvalds bio_put(bio);
27851da177e4SLinus Torvalds }
27861da177e4SLinus Torvalds
submit_bh_wbc(blk_opf_t opf,struct buffer_head * bh,enum rw_hint write_hint,struct writeback_control * wbc)27875bdf402aSRitesh Harjani (IBM) static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
278844981351SBart Van Assche enum rw_hint write_hint,
27891420c4a5SBart Van Assche struct writeback_control *wbc)
27901da177e4SLinus Torvalds {
27911420c4a5SBart Van Assche const enum req_op op = opf & REQ_OP_MASK;
27921da177e4SLinus Torvalds struct bio *bio;
27931da177e4SLinus Torvalds
27941da177e4SLinus Torvalds BUG_ON(!buffer_locked(bh));
27951da177e4SLinus Torvalds BUG_ON(!buffer_mapped(bh));
27961da177e4SLinus Torvalds BUG_ON(!bh->b_end_io);
27978fb0e342SAneesh Kumar K.V BUG_ON(buffer_delay(bh));
27988fb0e342SAneesh Kumar K.V BUG_ON(buffer_unwritten(bh));
27991da177e4SLinus Torvalds
280048fd4f93SJens Axboe /*
280148fd4f93SJens Axboe * Only clear out a write error when rewriting
28021da177e4SLinus Torvalds */
28032a222ca9SMike Christie if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE))
28041da177e4SLinus Torvalds clear_buffer_write_io_error(bh);
28051da177e4SLinus Torvalds
280607888c66SChristoph Hellwig if (buffer_meta(bh))
28071420c4a5SBart Van Assche opf |= REQ_META;
280807888c66SChristoph Hellwig if (buffer_prio(bh))
28091420c4a5SBart Van Assche opf |= REQ_PRIO;
281007888c66SChristoph Hellwig
28111420c4a5SBart Van Assche bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO);
28121da177e4SLinus Torvalds
28134f74d15fSEric Biggers fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
28144f74d15fSEric Biggers
28154f024f37SKent Overstreet bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
281644981351SBart Van Assche bio->bi_write_hint = write_hint;
28171da177e4SLinus Torvalds
281830dac24eSPankaj Raghav bio_add_folio_nofail(bio, bh->b_folio, bh->b_size, bh_offset(bh));
28191da177e4SLinus Torvalds
28201da177e4SLinus Torvalds bio->bi_end_io = end_bio_bh_io_sync;
28211da177e4SLinus Torvalds bio->bi_private = bh;
28221da177e4SLinus Torvalds
282383c9c547SMing Lei /* Take care of bh's that straddle the end of the device */
282483c9c547SMing Lei guard_bio_eod(bio);
282583c9c547SMing Lei
2826fd42df30SDennis Zhou if (wbc) {
2827fd42df30SDennis Zhou wbc_init_bio(wbc, bio);
282830dac24eSPankaj Raghav wbc_account_cgroup_owner(wbc, bh->b_folio, bh->b_size);
2829fd42df30SDennis Zhou }
2830fd42df30SDennis Zhou
28314e49ea4aSMike Christie submit_bio(bio);
28321da177e4SLinus Torvalds }
2833bafc0dbaSTejun Heo
submit_bh(blk_opf_t opf,struct buffer_head * bh)28345bdf402aSRitesh Harjani (IBM) void submit_bh(blk_opf_t opf, struct buffer_head *bh)
283571368511SDarrick J. Wong {
283644981351SBart Van Assche submit_bh_wbc(opf, bh, WRITE_LIFE_NOT_SET, NULL);
283771368511SDarrick J. Wong }
28381fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(submit_bh);
28391da177e4SLinus Torvalds
write_dirty_buffer(struct buffer_head * bh,blk_opf_t op_flags)28403ae72869SBart Van Assche void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
28419cb569d6SChristoph Hellwig {
28429cb569d6SChristoph Hellwig lock_buffer(bh);
28439cb569d6SChristoph Hellwig if (!test_clear_buffer_dirty(bh)) {
28449cb569d6SChristoph Hellwig unlock_buffer(bh);
28459cb569d6SChristoph Hellwig return;
28469cb569d6SChristoph Hellwig }
28479cb569d6SChristoph Hellwig bh->b_end_io = end_buffer_write_sync;
28489cb569d6SChristoph Hellwig get_bh(bh);
28491420c4a5SBart Van Assche submit_bh(REQ_OP_WRITE | op_flags, bh);
28509cb569d6SChristoph Hellwig }
28519cb569d6SChristoph Hellwig EXPORT_SYMBOL(write_dirty_buffer);
28529cb569d6SChristoph Hellwig
28531da177e4SLinus Torvalds /*
28541da177e4SLinus Torvalds * For a data-integrity writeout, we need to wait upon any in-progress I/O
28551da177e4SLinus Torvalds * and then start new I/O and then wait upon it. The caller must have a ref on
28561da177e4SLinus Torvalds * the buffer_head.
28571da177e4SLinus Torvalds */
__sync_dirty_buffer(struct buffer_head * bh,blk_opf_t op_flags)28583ae72869SBart Van Assche int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
28591da177e4SLinus Torvalds {
28601da177e4SLinus Torvalds WARN_ON(atomic_read(&bh->b_count) < 1);
28611da177e4SLinus Torvalds lock_buffer(bh);
28621da177e4SLinus Torvalds if (test_clear_buffer_dirty(bh)) {
2863377254b2SXianting Tian /*
2864377254b2SXianting Tian * The bh should be mapped, but it might not be if the
2865377254b2SXianting Tian * device was hot-removed. Not much we can do but fail the I/O.
2866377254b2SXianting Tian */
2867377254b2SXianting Tian if (!buffer_mapped(bh)) {
2868377254b2SXianting Tian unlock_buffer(bh);
2869377254b2SXianting Tian return -EIO;
2870377254b2SXianting Tian }
2871377254b2SXianting Tian
28721da177e4SLinus Torvalds get_bh(bh);
28731da177e4SLinus Torvalds bh->b_end_io = end_buffer_write_sync;
2874ab620620SRitesh Harjani (IBM) submit_bh(REQ_OP_WRITE | op_flags, bh);
28751da177e4SLinus Torvalds wait_on_buffer(bh);
2876ab620620SRitesh Harjani (IBM) if (!buffer_uptodate(bh))
2877ab620620SRitesh Harjani (IBM) return -EIO;
28781da177e4SLinus Torvalds } else {
28791da177e4SLinus Torvalds unlock_buffer(bh);
28801da177e4SLinus Torvalds }
2881ab620620SRitesh Harjani (IBM) return 0;
28821da177e4SLinus Torvalds }
288387e99511SChristoph Hellwig EXPORT_SYMBOL(__sync_dirty_buffer);
288487e99511SChristoph Hellwig
sync_dirty_buffer(struct buffer_head * bh)288587e99511SChristoph Hellwig int sync_dirty_buffer(struct buffer_head *bh)
288687e99511SChristoph Hellwig {
288770fd7614SChristoph Hellwig return __sync_dirty_buffer(bh, REQ_SYNC);
288887e99511SChristoph Hellwig }
28891fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(sync_dirty_buffer);
28901da177e4SLinus Torvalds
buffer_busy(struct buffer_head * bh)28911da177e4SLinus Torvalds static inline int buffer_busy(struct buffer_head *bh)
28921da177e4SLinus Torvalds {
28931da177e4SLinus Torvalds return atomic_read(&bh->b_count) |
28941da177e4SLinus Torvalds (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
28951da177e4SLinus Torvalds }
28961da177e4SLinus Torvalds
289764394763SMatthew Wilcox (Oracle) static bool
drop_buffers(struct folio * folio,struct buffer_head ** buffers_to_free)289864394763SMatthew Wilcox (Oracle) drop_buffers(struct folio *folio, struct buffer_head **buffers_to_free)
28991da177e4SLinus Torvalds {
290064394763SMatthew Wilcox (Oracle) struct buffer_head *head = folio_buffers(folio);
29011da177e4SLinus Torvalds struct buffer_head *bh;
29021da177e4SLinus Torvalds
29031da177e4SLinus Torvalds bh = head;
29041da177e4SLinus Torvalds do {
29051da177e4SLinus Torvalds if (buffer_busy(bh))
29061da177e4SLinus Torvalds goto failed;
29071da177e4SLinus Torvalds bh = bh->b_this_page;
29081da177e4SLinus Torvalds } while (bh != head);
29091da177e4SLinus Torvalds
29101da177e4SLinus Torvalds do {
29111da177e4SLinus Torvalds struct buffer_head *next = bh->b_this_page;
29121da177e4SLinus Torvalds
2913535ee2fbSJan Kara if (bh->b_assoc_map)
29141da177e4SLinus Torvalds __remove_assoc_queue(bh);
29151da177e4SLinus Torvalds bh = next;
29161da177e4SLinus Torvalds } while (bh != head);
29171da177e4SLinus Torvalds *buffers_to_free = head;
291864394763SMatthew Wilcox (Oracle) folio_detach_private(folio);
291964394763SMatthew Wilcox (Oracle) return true;
29201da177e4SLinus Torvalds failed:
292164394763SMatthew Wilcox (Oracle) return false;
29221da177e4SLinus Torvalds }
29231da177e4SLinus Torvalds
2924b1888d14SMatthew Wilcox (Oracle) /**
2925b1888d14SMatthew Wilcox (Oracle) * try_to_free_buffers - Release buffers attached to this folio.
2926b1888d14SMatthew Wilcox (Oracle) * @folio: The folio.
2927b1888d14SMatthew Wilcox (Oracle) *
2928b1888d14SMatthew Wilcox (Oracle) * If any buffers are in use (dirty, under writeback, elevated refcount),
2929b1888d14SMatthew Wilcox (Oracle) * no buffers will be freed.
2930b1888d14SMatthew Wilcox (Oracle) *
2931b1888d14SMatthew Wilcox (Oracle) * If the folio is dirty but all the buffers are clean then we need to
2932b1888d14SMatthew Wilcox (Oracle) * be sure to mark the folio clean as well. This is because the folio
2933b1888d14SMatthew Wilcox (Oracle) * may be against a block device, and a later reattachment of buffers
2934b1888d14SMatthew Wilcox (Oracle) * to a dirty folio will set *all* buffers dirty. Which would corrupt
2935b1888d14SMatthew Wilcox (Oracle) * filesystem data on the same device.
2936b1888d14SMatthew Wilcox (Oracle) *
2937b1888d14SMatthew Wilcox (Oracle) * The same applies to regular filesystem folios: if all the buffers are
2938b1888d14SMatthew Wilcox (Oracle) * clean then we set the folio clean and proceed. To do that, we require
2939b1888d14SMatthew Wilcox (Oracle) * total exclusion from block_dirty_folio(). That is obtained with
2940b1888d14SMatthew Wilcox (Oracle) * i_private_lock.
2941b1888d14SMatthew Wilcox (Oracle) *
2942b1888d14SMatthew Wilcox (Oracle) * Exclusion against try_to_free_buffers may be obtained by either
2943b1888d14SMatthew Wilcox (Oracle) * locking the folio or by holding its mapping's i_private_lock.
2944b1888d14SMatthew Wilcox (Oracle) *
2945b1888d14SMatthew Wilcox (Oracle) * Context: Process context. @folio must be locked. Will not sleep.
2946b1888d14SMatthew Wilcox (Oracle) * Return: true if all buffers attached to this folio were freed.
2947b1888d14SMatthew Wilcox (Oracle) */
try_to_free_buffers(struct folio * folio)294868189fefSMatthew Wilcox (Oracle) bool try_to_free_buffers(struct folio *folio)
29491da177e4SLinus Torvalds {
295068189fefSMatthew Wilcox (Oracle) struct address_space * const mapping = folio->mapping;
29511da177e4SLinus Torvalds struct buffer_head *buffers_to_free = NULL;
295268189fefSMatthew Wilcox (Oracle) bool ret = 0;
29531da177e4SLinus Torvalds
295468189fefSMatthew Wilcox (Oracle) BUG_ON(!folio_test_locked(folio));
295568189fefSMatthew Wilcox (Oracle) if (folio_test_writeback(folio))
295668189fefSMatthew Wilcox (Oracle) return false;
29571da177e4SLinus Torvalds
29581da177e4SLinus Torvalds if (mapping == NULL) { /* can this still happen? */
295964394763SMatthew Wilcox (Oracle) ret = drop_buffers(folio, &buffers_to_free);
29601da177e4SLinus Torvalds goto out;
29611da177e4SLinus Torvalds }
29621da177e4SLinus Torvalds
2963600f111eSMatthew Wilcox (Oracle) spin_lock(&mapping->i_private_lock);
296464394763SMatthew Wilcox (Oracle) ret = drop_buffers(folio, &buffers_to_free);
2965ecdfc978SLinus Torvalds
2966ecdfc978SLinus Torvalds /*
2967ecdfc978SLinus Torvalds * If the filesystem writes its buffers by hand (eg ext3)
296868189fefSMatthew Wilcox (Oracle) * then we can have clean buffers against a dirty folio. We
296968189fefSMatthew Wilcox (Oracle) * clean the folio here; otherwise the VM will never notice
2970ecdfc978SLinus Torvalds * that the filesystem did any IO at all.
2971ecdfc978SLinus Torvalds *
2972ecdfc978SLinus Torvalds * Also, during truncate, discard_buffer will have marked all
297368189fefSMatthew Wilcox (Oracle) * the folio's buffers clean. We discover that here and clean
297468189fefSMatthew Wilcox (Oracle) * the folio also.
297587df7241SNick Piggin *
2976600f111eSMatthew Wilcox (Oracle) * i_private_lock must be held over this entire operation in order
2977e621900aSMatthew Wilcox (Oracle) * to synchronise against block_dirty_folio and prevent the
297887df7241SNick Piggin * dirty bit from being lost.
2979ecdfc978SLinus Torvalds */
298011f81becSTejun Heo if (ret)
298168189fefSMatthew Wilcox (Oracle) folio_cancel_dirty(folio);
2982600f111eSMatthew Wilcox (Oracle) spin_unlock(&mapping->i_private_lock);
29831da177e4SLinus Torvalds out:
29841da177e4SLinus Torvalds if (buffers_to_free) {
29851da177e4SLinus Torvalds struct buffer_head *bh = buffers_to_free;
29861da177e4SLinus Torvalds
29871da177e4SLinus Torvalds do {
29881da177e4SLinus Torvalds struct buffer_head *next = bh->b_this_page;
29891da177e4SLinus Torvalds free_buffer_head(bh);
29901da177e4SLinus Torvalds bh = next;
29911da177e4SLinus Torvalds } while (bh != buffers_to_free);
29921da177e4SLinus Torvalds }
29931da177e4SLinus Torvalds return ret;
29941da177e4SLinus Torvalds }
29951da177e4SLinus Torvalds EXPORT_SYMBOL(try_to_free_buffers);
29961da177e4SLinus Torvalds
29971da177e4SLinus Torvalds /*
29981da177e4SLinus Torvalds * Buffer-head allocation
29991da177e4SLinus Torvalds */
300068279f9cSAlexey Dobriyan static struct kmem_cache *bh_cachep __ro_after_init;
30011da177e4SLinus Torvalds
30021da177e4SLinus Torvalds /*
30031da177e4SLinus Torvalds * Once the number of bh's in the machine exceeds this level, we start
30041da177e4SLinus Torvalds * stripping them in writeback.
30051da177e4SLinus Torvalds */
300668279f9cSAlexey Dobriyan static unsigned long max_buffer_heads __ro_after_init;
30071da177e4SLinus Torvalds
30081da177e4SLinus Torvalds int buffer_heads_over_limit;
30091da177e4SLinus Torvalds
30101da177e4SLinus Torvalds struct bh_accounting {
30111da177e4SLinus Torvalds int nr; /* Number of live bh's */
30121da177e4SLinus Torvalds int ratelimit; /* Limit cacheline bouncing */
30131da177e4SLinus Torvalds };
30141da177e4SLinus Torvalds
30151da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
30161da177e4SLinus Torvalds
recalc_bh_state(void)30171da177e4SLinus Torvalds static void recalc_bh_state(void)
30181da177e4SLinus Torvalds {
30191da177e4SLinus Torvalds int i;
30201da177e4SLinus Torvalds int tot = 0;
30211da177e4SLinus Torvalds
3022ee1be862SChristoph Lameter if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
30231da177e4SLinus Torvalds return;
3024c7b92516SChristoph Lameter __this_cpu_write(bh_accounting.ratelimit, 0);
30258a143426SEric Dumazet for_each_online_cpu(i)
30261da177e4SLinus Torvalds tot += per_cpu(bh_accounting, i).nr;
30271da177e4SLinus Torvalds buffer_heads_over_limit = (tot > max_buffer_heads);
30281da177e4SLinus Torvalds }
30291da177e4SLinus Torvalds
alloc_buffer_head(gfp_t gfp_flags)3030dd0fc66fSAl Viro struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
30311da177e4SLinus Torvalds {
3032019b4d12SRichard Kennedy struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
30331da177e4SLinus Torvalds if (ret) {
3034a35afb83SChristoph Lameter INIT_LIST_HEAD(&ret->b_assoc_buffers);
3035f1e67e35SThomas Gleixner spin_lock_init(&ret->b_uptodate_lock);
3036c7b92516SChristoph Lameter preempt_disable();
3037c7b92516SChristoph Lameter __this_cpu_inc(bh_accounting.nr);
30381da177e4SLinus Torvalds recalc_bh_state();
3039c7b92516SChristoph Lameter preempt_enable();
30401da177e4SLinus Torvalds }
30411da177e4SLinus Torvalds return ret;
30421da177e4SLinus Torvalds }
30431da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_buffer_head);
30441da177e4SLinus Torvalds
free_buffer_head(struct buffer_head * bh)30451da177e4SLinus Torvalds void free_buffer_head(struct buffer_head *bh)
30461da177e4SLinus Torvalds {
30471da177e4SLinus Torvalds BUG_ON(!list_empty(&bh->b_assoc_buffers));
30481da177e4SLinus Torvalds kmem_cache_free(bh_cachep, bh);
3049c7b92516SChristoph Lameter preempt_disable();
3050c7b92516SChristoph Lameter __this_cpu_dec(bh_accounting.nr);
30511da177e4SLinus Torvalds recalc_bh_state();
3052c7b92516SChristoph Lameter preempt_enable();
30531da177e4SLinus Torvalds }
30541da177e4SLinus Torvalds EXPORT_SYMBOL(free_buffer_head);
30551da177e4SLinus Torvalds
buffer_exit_cpu_dead(unsigned int cpu)3056fc4d24c9SSebastian Andrzej Siewior static int buffer_exit_cpu_dead(unsigned int cpu)
30571da177e4SLinus Torvalds {
30581da177e4SLinus Torvalds int i;
30591da177e4SLinus Torvalds struct bh_lru *b = &per_cpu(bh_lrus, cpu);
30601da177e4SLinus Torvalds
30611da177e4SLinus Torvalds for (i = 0; i < BH_LRU_SIZE; i++) {
30621da177e4SLinus Torvalds brelse(b->bhs[i]);
30631da177e4SLinus Torvalds b->bhs[i] = NULL;
30641da177e4SLinus Torvalds }
3065c7b92516SChristoph Lameter this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
30668a143426SEric Dumazet per_cpu(bh_accounting, cpu).nr = 0;
3067fc4d24c9SSebastian Andrzej Siewior return 0;
30681da177e4SLinus Torvalds }
30691da177e4SLinus Torvalds
3070389d1b08SAneesh Kumar K.V /**
3071a6b91919SRandy Dunlap * bh_uptodate_or_lock - Test whether the buffer is uptodate
3072389d1b08SAneesh Kumar K.V * @bh: struct buffer_head
3073389d1b08SAneesh Kumar K.V *
3074389d1b08SAneesh Kumar K.V * Return true if the buffer is up-to-date and false,
3075389d1b08SAneesh Kumar K.V * with the buffer locked, if not.
3076389d1b08SAneesh Kumar K.V */
bh_uptodate_or_lock(struct buffer_head * bh)3077389d1b08SAneesh Kumar K.V int bh_uptodate_or_lock(struct buffer_head *bh)
3078389d1b08SAneesh Kumar K.V {
3079389d1b08SAneesh Kumar K.V if (!buffer_uptodate(bh)) {
3080389d1b08SAneesh Kumar K.V lock_buffer(bh);
3081389d1b08SAneesh Kumar K.V if (!buffer_uptodate(bh))
3082389d1b08SAneesh Kumar K.V return 0;
3083389d1b08SAneesh Kumar K.V unlock_buffer(bh);
3084389d1b08SAneesh Kumar K.V }
3085389d1b08SAneesh Kumar K.V return 1;
3086389d1b08SAneesh Kumar K.V }
3087389d1b08SAneesh Kumar K.V EXPORT_SYMBOL(bh_uptodate_or_lock);
3088389d1b08SAneesh Kumar K.V
3089389d1b08SAneesh Kumar K.V /**
3090fdee117eSZhang Yi * __bh_read - Submit read for a locked buffer
3091389d1b08SAneesh Kumar K.V * @bh: struct buffer_head
3092fdee117eSZhang Yi * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3093fdee117eSZhang Yi * @wait: wait until reading finish
3094389d1b08SAneesh Kumar K.V *
3095fdee117eSZhang Yi * Returns zero on success or don't wait, and -EIO on error.
3096389d1b08SAneesh Kumar K.V */
__bh_read(struct buffer_head * bh,blk_opf_t op_flags,bool wait)3097fdee117eSZhang Yi int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait)
3098389d1b08SAneesh Kumar K.V {
3099fdee117eSZhang Yi int ret = 0;
3100389d1b08SAneesh Kumar K.V
3101fdee117eSZhang Yi BUG_ON(!buffer_locked(bh));
3102389d1b08SAneesh Kumar K.V
3103389d1b08SAneesh Kumar K.V get_bh(bh);
3104389d1b08SAneesh Kumar K.V bh->b_end_io = end_buffer_read_sync;
3105fdee117eSZhang Yi submit_bh(REQ_OP_READ | op_flags, bh);
3106fdee117eSZhang Yi if (wait) {
3107389d1b08SAneesh Kumar K.V wait_on_buffer(bh);
3108fdee117eSZhang Yi if (!buffer_uptodate(bh))
3109fdee117eSZhang Yi ret = -EIO;
3110389d1b08SAneesh Kumar K.V }
3111fdee117eSZhang Yi return ret;
3112fdee117eSZhang Yi }
3113fdee117eSZhang Yi EXPORT_SYMBOL(__bh_read);
3114fdee117eSZhang Yi
3115fdee117eSZhang Yi /**
3116fdee117eSZhang Yi * __bh_read_batch - Submit read for a batch of unlocked buffers
3117fdee117eSZhang Yi * @nr: entry number of the buffer batch
3118fdee117eSZhang Yi * @bhs: a batch of struct buffer_head
3119fdee117eSZhang Yi * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3120fdee117eSZhang Yi * @force_lock: force to get a lock on the buffer if set, otherwise drops any
3121fdee117eSZhang Yi * buffer that cannot lock.
3122fdee117eSZhang Yi *
3123fdee117eSZhang Yi * Returns zero on success or don't wait, and -EIO on error.
3124fdee117eSZhang Yi */
__bh_read_batch(int nr,struct buffer_head * bhs[],blk_opf_t op_flags,bool force_lock)3125fdee117eSZhang Yi void __bh_read_batch(int nr, struct buffer_head *bhs[],
3126fdee117eSZhang Yi blk_opf_t op_flags, bool force_lock)
3127fdee117eSZhang Yi {
3128fdee117eSZhang Yi int i;
3129fdee117eSZhang Yi
3130fdee117eSZhang Yi for (i = 0; i < nr; i++) {
3131fdee117eSZhang Yi struct buffer_head *bh = bhs[i];
3132fdee117eSZhang Yi
3133fdee117eSZhang Yi if (buffer_uptodate(bh))
3134fdee117eSZhang Yi continue;
3135fdee117eSZhang Yi
3136fdee117eSZhang Yi if (force_lock)
3137fdee117eSZhang Yi lock_buffer(bh);
3138fdee117eSZhang Yi else
3139fdee117eSZhang Yi if (!trylock_buffer(bh))
3140fdee117eSZhang Yi continue;
3141fdee117eSZhang Yi
3142fdee117eSZhang Yi if (buffer_uptodate(bh)) {
3143fdee117eSZhang Yi unlock_buffer(bh);
3144fdee117eSZhang Yi continue;
3145fdee117eSZhang Yi }
3146fdee117eSZhang Yi
3147fdee117eSZhang Yi bh->b_end_io = end_buffer_read_sync;
3148fdee117eSZhang Yi get_bh(bh);
3149fdee117eSZhang Yi submit_bh(REQ_OP_READ | op_flags, bh);
3150fdee117eSZhang Yi }
3151fdee117eSZhang Yi }
3152fdee117eSZhang Yi EXPORT_SYMBOL(__bh_read_batch);
3153389d1b08SAneesh Kumar K.V
buffer_init(void)31541da177e4SLinus Torvalds void __init buffer_init(void)
31551da177e4SLinus Torvalds {
315643be594aSZhang Yanfei unsigned long nrpages;
3157fc4d24c9SSebastian Andrzej Siewior int ret;
31581da177e4SLinus Torvalds
3159de8a3207SKunwu Chan bh_cachep = KMEM_CACHE(buffer_head,
3160c997d683SChengming Zhou SLAB_RECLAIM_ACCOUNT|SLAB_PANIC);
31611da177e4SLinus Torvalds /*
31621da177e4SLinus Torvalds * Limit the bh occupancy to 10% of ZONE_NORMAL
31631da177e4SLinus Torvalds */
31641da177e4SLinus Torvalds nrpages = (nr_free_buffer_pages() * 10) / 100;
31651da177e4SLinus Torvalds max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3166fc4d24c9SSebastian Andrzej Siewior ret = cpuhp_setup_state_nocalls(CPUHP_FS_BUFF_DEAD, "fs/buffer:dead",
3167fc4d24c9SSebastian Andrzej Siewior NULL, buffer_exit_cpu_dead);
3168fc4d24c9SSebastian Andrzej Siewior WARN_ON(ret < 0);
31691da177e4SLinus Torvalds }
3170