1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * include/linux/buffer_head.h 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * Everything to do with buffer_heads. 61da177e4SLinus Torvalds */ 71da177e4SLinus Torvalds 81da177e4SLinus Torvalds #ifndef _LINUX_BUFFER_HEAD_H 91da177e4SLinus Torvalds #define _LINUX_BUFFER_HEAD_H 101da177e4SLinus Torvalds 111da177e4SLinus Torvalds #include <linux/types.h> 123ae72869SBart Van Assche #include <linux/blk_types.h> 131da177e4SLinus Torvalds #include <linux/fs.h> 141da177e4SLinus Torvalds #include <linux/linkage.h> 151da177e4SLinus Torvalds #include <linux/pagemap.h> 161da177e4SLinus Torvalds #include <linux/wait.h> 1760063497SArun Sharma #include <linux/atomic.h> 181da177e4SLinus Torvalds 191da177e4SLinus Torvalds enum bh_state_bits { 201da177e4SLinus Torvalds BH_Uptodate, /* Contains valid data */ 211da177e4SLinus Torvalds BH_Dirty, /* Is dirty */ 221da177e4SLinus Torvalds BH_Lock, /* Is locked */ 231da177e4SLinus Torvalds BH_Req, /* Has been submitted for I/O */ 241da177e4SLinus Torvalds 251da177e4SLinus Torvalds BH_Mapped, /* Has a disk mapping */ 261da177e4SLinus Torvalds BH_New, /* Disk mapping was newly created by get_block */ 271da177e4SLinus Torvalds BH_Async_Read, /* Is under end_buffer_async_read I/O */ 281da177e4SLinus Torvalds BH_Async_Write, /* Is under end_buffer_async_write I/O */ 291da177e4SLinus Torvalds BH_Delay, /* Buffer is not yet allocated on disk */ 301da177e4SLinus Torvalds BH_Boundary, /* Block is followed by a discontiguity */ 311da177e4SLinus Torvalds BH_Write_EIO, /* I/O error on write */ 3233a266ddSDavid Chinner BH_Unwritten, /* Buffer is allocated on disk but not written */ 3308bafc03SKeith Mannthey BH_Quiet, /* Buffer Error Prinks to be quiet */ 34877f962cSTheodore Ts'o BH_Meta, /* Buffer contains metadata */ 35877f962cSTheodore Ts'o BH_Prio, /* Buffer should be submitted with REQ_PRIO */ 367b7a8665SChristoph Hellwig BH_Defer_Completion, /* Defer AIO completion to workqueue */ 371da177e4SLinus Torvalds 381da177e4SLinus Torvalds BH_PrivateStart,/* not a state bit, but the first bit available 391da177e4SLinus Torvalds * for private allocation by other entities 401da177e4SLinus Torvalds */ 411da177e4SLinus Torvalds }; 421da177e4SLinus Torvalds 4309cbfeafSKirill A. Shutemov #define MAX_BUF_PER_PAGE (PAGE_SIZE / 512) 441da177e4SLinus Torvalds 451da177e4SLinus Torvalds struct page; 461da177e4SLinus Torvalds struct buffer_head; 471da177e4SLinus Torvalds struct address_space; 481da177e4SLinus Torvalds typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate); 491da177e4SLinus Torvalds 501da177e4SLinus Torvalds /* 51205f87f6SBadari Pulavarty * Historically, a buffer_head was used to map a single block 52205f87f6SBadari Pulavarty * within a page, and of course as the unit of I/O through the 53205f87f6SBadari Pulavarty * filesystem and block layers. Nowadays the basic I/O unit 54205f87f6SBadari Pulavarty * is the bio, and buffer_heads are used for extracting block 55205f87f6SBadari Pulavarty * mappings (via a get_block_t call), for tracking state within 56205f87f6SBadari Pulavarty * a page (via a page_mapping) and for wrapping bio submission 57205f87f6SBadari Pulavarty * for backward compatibility reasons (e.g. submit_bh). 581da177e4SLinus Torvalds */ 591da177e4SLinus Torvalds struct buffer_head { 601da177e4SLinus Torvalds unsigned long b_state; /* buffer state bitmap (see above) */ 611da177e4SLinus Torvalds struct buffer_head *b_this_page;/* circular list of page's buffers */ 62d685c668SMatthew Wilcox (Oracle) union { 631da177e4SLinus Torvalds struct page *b_page; /* the page this bh is mapped to */ 64d685c668SMatthew Wilcox (Oracle) struct folio *b_folio; /* the folio this bh is mapped to */ 65d685c668SMatthew Wilcox (Oracle) }; 661da177e4SLinus Torvalds 67205f87f6SBadari Pulavarty sector_t b_blocknr; /* start block number */ 68205f87f6SBadari Pulavarty size_t b_size; /* size of mapping */ 69205f87f6SBadari Pulavarty char *b_data; /* pointer to data within the page */ 701da177e4SLinus Torvalds 711da177e4SLinus Torvalds struct block_device *b_bdev; 721da177e4SLinus Torvalds bh_end_io_t *b_end_io; /* I/O completion */ 731da177e4SLinus Torvalds void *b_private; /* reserved for b_end_io */ 741da177e4SLinus Torvalds struct list_head b_assoc_buffers; /* associated with another mapping */ 7558ff407bSJan Kara struct address_space *b_assoc_map; /* mapping this buffer is 7658ff407bSJan Kara associated with */ 77205f87f6SBadari Pulavarty atomic_t b_count; /* users using this buffer_head */ 78f1e67e35SThomas Gleixner spinlock_t b_uptodate_lock; /* Used by the first bh in a page, to 79f1e67e35SThomas Gleixner * serialise IO completion of other 80f1e67e35SThomas Gleixner * buffers in the page */ 811da177e4SLinus Torvalds }; 821da177e4SLinus Torvalds 831da177e4SLinus Torvalds /* 841da177e4SLinus Torvalds * macro tricks to expand the set_buffer_foo(), clear_buffer_foo() 851da177e4SLinus Torvalds * and buffer_foo() functions. 8660f91826SKemi Wang * To avoid reset buffer flags that are already set, because that causes 8760f91826SKemi Wang * a costly cache line transition, check the flag first. 881da177e4SLinus Torvalds */ 891da177e4SLinus Torvalds #define BUFFER_FNS(bit, name) \ 90ee91ef61SDenys Vlasenko static __always_inline void set_buffer_##name(struct buffer_head *bh) \ 911da177e4SLinus Torvalds { \ 9260f91826SKemi Wang if (!test_bit(BH_##bit, &(bh)->b_state)) \ 931da177e4SLinus Torvalds set_bit(BH_##bit, &(bh)->b_state); \ 941da177e4SLinus Torvalds } \ 95ee91ef61SDenys Vlasenko static __always_inline void clear_buffer_##name(struct buffer_head *bh) \ 961da177e4SLinus Torvalds { \ 971da177e4SLinus Torvalds clear_bit(BH_##bit, &(bh)->b_state); \ 981da177e4SLinus Torvalds } \ 99ee91ef61SDenys Vlasenko static __always_inline int buffer_##name(const struct buffer_head *bh) \ 1001da177e4SLinus Torvalds { \ 1011da177e4SLinus Torvalds return test_bit(BH_##bit, &(bh)->b_state); \ 1021da177e4SLinus Torvalds } 1031da177e4SLinus Torvalds 1041da177e4SLinus Torvalds /* 1051da177e4SLinus Torvalds * test_set_buffer_foo() and test_clear_buffer_foo() 1061da177e4SLinus Torvalds */ 1071da177e4SLinus Torvalds #define TAS_BUFFER_FNS(bit, name) \ 108ee91ef61SDenys Vlasenko static __always_inline int test_set_buffer_##name(struct buffer_head *bh) \ 1091da177e4SLinus Torvalds { \ 1101da177e4SLinus Torvalds return test_and_set_bit(BH_##bit, &(bh)->b_state); \ 1111da177e4SLinus Torvalds } \ 112ee91ef61SDenys Vlasenko static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \ 1131da177e4SLinus Torvalds { \ 1141da177e4SLinus Torvalds return test_and_clear_bit(BH_##bit, &(bh)->b_state); \ 1151da177e4SLinus Torvalds } \ 1161da177e4SLinus Torvalds 1171da177e4SLinus Torvalds /* 1181da177e4SLinus Torvalds * Emit the buffer bitops functions. Note that there are also functions 1191da177e4SLinus Torvalds * of the form "mark_buffer_foo()". These are higher-level functions which 1201da177e4SLinus Torvalds * do something in addition to setting a b_state bit. 1211da177e4SLinus Torvalds */ 1221da177e4SLinus Torvalds BUFFER_FNS(Dirty, dirty) 1231da177e4SLinus Torvalds TAS_BUFFER_FNS(Dirty, dirty) 1241da177e4SLinus Torvalds BUFFER_FNS(Lock, locked) 1251da177e4SLinus Torvalds BUFFER_FNS(Req, req) 1261da177e4SLinus Torvalds TAS_BUFFER_FNS(Req, req) 1271da177e4SLinus Torvalds BUFFER_FNS(Mapped, mapped) 1281da177e4SLinus Torvalds BUFFER_FNS(New, new) 1291da177e4SLinus Torvalds BUFFER_FNS(Async_Read, async_read) 1301da177e4SLinus Torvalds BUFFER_FNS(Async_Write, async_write) 1311da177e4SLinus Torvalds BUFFER_FNS(Delay, delay) 1321da177e4SLinus Torvalds BUFFER_FNS(Boundary, boundary) 1331da177e4SLinus Torvalds BUFFER_FNS(Write_EIO, write_io_error) 13433a266ddSDavid Chinner BUFFER_FNS(Unwritten, unwritten) 135877f962cSTheodore Ts'o BUFFER_FNS(Meta, meta) 136877f962cSTheodore Ts'o BUFFER_FNS(Prio, prio) 1377b7a8665SChristoph Hellwig BUFFER_FNS(Defer_Completion, defer_completion) 1381da177e4SLinus Torvalds 139d4252071SMikulas Patocka static __always_inline void set_buffer_uptodate(struct buffer_head *bh) 140d4252071SMikulas Patocka { 141d4252071SMikulas Patocka /* 1422f79cdfeSLinus Torvalds * If somebody else already set this uptodate, they will 1432f79cdfeSLinus Torvalds * have done the memory barrier, and a reader will thus 1442f79cdfeSLinus Torvalds * see *some* valid buffer state. 1452f79cdfeSLinus Torvalds * 1462f79cdfeSLinus Torvalds * Any other serialization (with IO errors or whatever that 1472f79cdfeSLinus Torvalds * might clear the bit) has to come from other state (eg BH_Lock). 1482f79cdfeSLinus Torvalds */ 1492f79cdfeSLinus Torvalds if (test_bit(BH_Uptodate, &bh->b_state)) 1502f79cdfeSLinus Torvalds return; 1512f79cdfeSLinus Torvalds 1522f79cdfeSLinus Torvalds /* 153d4252071SMikulas Patocka * make it consistent with folio_mark_uptodate 154d4252071SMikulas Patocka * pairs with smp_load_acquire in buffer_uptodate 155d4252071SMikulas Patocka */ 156d4252071SMikulas Patocka smp_mb__before_atomic(); 157d4252071SMikulas Patocka set_bit(BH_Uptodate, &bh->b_state); 158d4252071SMikulas Patocka } 159d4252071SMikulas Patocka 160d4252071SMikulas Patocka static __always_inline void clear_buffer_uptodate(struct buffer_head *bh) 161d4252071SMikulas Patocka { 162d4252071SMikulas Patocka clear_bit(BH_Uptodate, &bh->b_state); 163d4252071SMikulas Patocka } 164d4252071SMikulas Patocka 165d4252071SMikulas Patocka static __always_inline int buffer_uptodate(const struct buffer_head *bh) 166d4252071SMikulas Patocka { 167d4252071SMikulas Patocka /* 168d4252071SMikulas Patocka * make it consistent with folio_test_uptodate 169d4252071SMikulas Patocka * pairs with smp_mb__before_atomic in set_buffer_uptodate 170d4252071SMikulas Patocka */ 1718238b457SMikulas Patocka return test_bit_acquire(BH_Uptodate, &bh->b_state); 172d4252071SMikulas Patocka } 173d4252071SMikulas Patocka 174f94cf220SMatthew Wilcox (Oracle) static inline unsigned long bh_offset(const struct buffer_head *bh) 175f94cf220SMatthew Wilcox (Oracle) { 176f94cf220SMatthew Wilcox (Oracle) return (unsigned long)(bh)->b_data & (page_size(bh->b_page) - 1); 177f94cf220SMatthew Wilcox (Oracle) } 1781da177e4SLinus Torvalds 1791da177e4SLinus Torvalds /* If we *know* page->private refers to buffer_heads */ 1801da177e4SLinus Torvalds #define page_buffers(page) \ 1811da177e4SLinus Torvalds ({ \ 1821da177e4SLinus Torvalds BUG_ON(!PagePrivate(page)); \ 1834c21e2f2SHugh Dickins ((struct buffer_head *)page_private(page)); \ 1841da177e4SLinus Torvalds }) 1851da177e4SLinus Torvalds #define page_has_buffers(page) PagePrivate(page) 186cd1067beSMatthew Wilcox (Oracle) #define folio_buffers(folio) folio_get_private(folio) 1871da177e4SLinus Torvalds 188520f301cSMatthew Wilcox (Oracle) void buffer_check_dirty_writeback(struct folio *folio, 189b4597226SMel Gorman bool *dirty, bool *writeback); 190b4597226SMel Gorman 1911da177e4SLinus Torvalds /* 1921da177e4SLinus Torvalds * Declarations 1931da177e4SLinus Torvalds */ 1941da177e4SLinus Torvalds 195b3c97528SHarvey Harrison void mark_buffer_dirty(struct buffer_head *bh); 19687354e5dSJeff Layton void mark_buffer_write_io_error(struct buffer_head *bh); 197f0059afdSTejun Heo void touch_buffer(struct buffer_head *bh); 198465e5e6aSPankaj Raghav void folio_set_bh(struct buffer_head *bh, struct folio *folio, 199465e5e6aSPankaj Raghav unsigned long offset); 200c71124a8SPankaj Raghav struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size, 2012a418157SMatthew Wilcox (Oracle) gfp_t gfp); 2021da177e4SLinus Torvalds struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size, 203640ab98fSJens Axboe bool retry); 2041da177e4SLinus Torvalds void create_empty_buffers(struct page *, unsigned long, 2051da177e4SLinus Torvalds unsigned long b_state); 206*3decb856SMatthew Wilcox (Oracle) struct buffer_head *folio_create_empty_buffers(struct folio *folio, 207*3decb856SMatthew Wilcox (Oracle) unsigned long blocksize, unsigned long b_state); 2081da177e4SLinus Torvalds void end_buffer_read_sync(struct buffer_head *bh, int uptodate); 2091da177e4SLinus Torvalds void end_buffer_write_sync(struct buffer_head *bh, int uptodate); 21035c80d5fSChris Mason void end_buffer_async_write(struct buffer_head *bh, int uptodate); 2111da177e4SLinus Torvalds 2121da177e4SLinus Torvalds /* Things to do with buffers at mapping->private_list */ 2131da177e4SLinus Torvalds void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode); 21431b2ebc0SRitesh Harjani (IBM) int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end, 21531b2ebc0SRitesh Harjani (IBM) bool datasync); 21631b2ebc0SRitesh Harjani (IBM) int generic_buffers_fsync(struct file *file, loff_t start, loff_t end, 21731b2ebc0SRitesh Harjani (IBM) bool datasync); 21829f3ad7dSJan Kara void clean_bdev_aliases(struct block_device *bdev, sector_t block, 21929f3ad7dSJan Kara sector_t len); 220e64855c6SJan Kara static inline void clean_bdev_bh_alias(struct buffer_head *bh) 221e64855c6SJan Kara { 222e64855c6SJan Kara clean_bdev_aliases(bh->b_bdev, bh->b_blocknr, 1); 223e64855c6SJan Kara } 2241da177e4SLinus Torvalds 2251da177e4SLinus Torvalds void mark_buffer_async_write(struct buffer_head *bh); 2261da177e4SLinus Torvalds void __wait_on_buffer(struct buffer_head *); 2271da177e4SLinus Torvalds wait_queue_head_t *bh_waitq_head(struct buffer_head *bh); 2283991d3bdSTomasz Kvarsin struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block, 2293991d3bdSTomasz Kvarsin unsigned size); 2303ed65f04SMatthew Wilcox (Oracle) struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block, 2313ed65f04SMatthew Wilcox (Oracle) unsigned size, gfp_t gfp); 2321da177e4SLinus Torvalds void __brelse(struct buffer_head *); 2331da177e4SLinus Torvalds void __bforget(struct buffer_head *); 2343991d3bdSTomasz Kvarsin void __breadahead(struct block_device *, sector_t block, unsigned int size); 2353b5e6454SGioh Kim struct buffer_head *__bread_gfp(struct block_device *, 2363b5e6454SGioh Kim sector_t block, unsigned size, gfp_t gfp); 237dd0fc66fSAl Viro struct buffer_head *alloc_buffer_head(gfp_t gfp_flags); 2381da177e4SLinus Torvalds void free_buffer_head(struct buffer_head * bh); 239b3c97528SHarvey Harrison void unlock_buffer(struct buffer_head *bh); 240b3c97528SHarvey Harrison void __lock_buffer(struct buffer_head *bh); 2411da177e4SLinus Torvalds int sync_dirty_buffer(struct buffer_head *bh); 2423ae72869SBart Van Assche int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags); 2433ae72869SBart Van Assche void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags); 2445bdf402aSRitesh Harjani (IBM) void submit_bh(blk_opf_t, struct buffer_head *); 2451da177e4SLinus Torvalds void write_boundary_block(struct block_device *bdev, 2461da177e4SLinus Torvalds sector_t bblock, unsigned blocksize); 247389d1b08SAneesh Kumar K.V int bh_uptodate_or_lock(struct buffer_head *bh); 248fdee117eSZhang Yi int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait); 249fdee117eSZhang Yi void __bh_read_batch(int nr, struct buffer_head *bhs[], 250fdee117eSZhang Yi blk_opf_t op_flags, bool force_lock); 2511da177e4SLinus Torvalds 2521da177e4SLinus Torvalds /* 2531da177e4SLinus Torvalds * Generic address_space_operations implementations for buffer_head-backed 2541da177e4SLinus Torvalds * address_spaces. 2551da177e4SLinus Torvalds */ 2567ba13abbSMatthew Wilcox (Oracle) void block_invalidate_folio(struct folio *folio, size_t offset, size_t length); 2571da177e4SLinus Torvalds int block_write_full_page(struct page *page, get_block_t *get_block, 2581da177e4SLinus Torvalds struct writeback_control *wbc); 25953418a18SMatthew Wilcox (Oracle) int __block_write_full_folio(struct inode *inode, struct folio *folio, 260b4bba389SBenjamin Marzinski get_block_t *get_block, struct writeback_control *wbc, 261b4bba389SBenjamin Marzinski bh_end_io_t *handler); 2622c69e205SMatthew Wilcox (Oracle) int block_read_full_folio(struct folio *, get_block_t *); 2632e7e80f7SMatthew Wilcox (Oracle) bool block_is_partially_uptodate(struct folio *, size_t from, size_t count); 264155130a4SChristoph Hellwig int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, 265b3992d1eSMatthew Wilcox (Oracle) struct page **pagep, get_block_t *get_block); 2666e1db88dSChristoph Hellwig int __block_write_begin(struct page *page, loff_t pos, unsigned len, 2676e1db88dSChristoph Hellwig get_block_t *get_block); 268afddba49SNick Piggin int block_write_end(struct file *, struct address_space *, 269afddba49SNick Piggin loff_t, unsigned, unsigned, 270afddba49SNick Piggin struct page *, void *); 271afddba49SNick Piggin int generic_write_end(struct file *, struct address_space *, 272afddba49SNick Piggin loff_t, unsigned, unsigned, 273afddba49SNick Piggin struct page *, void *); 2744a9622f2SMatthew Wilcox (Oracle) void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to); 275f892760aSMatthew Wilcox void clean_page_buffers(struct page *page); 27689e10787SNick Piggin int cont_write_begin(struct file *, struct address_space *, loff_t, 277be3bbbc5SMatthew Wilcox (Oracle) unsigned, struct page **, void **, 27889e10787SNick Piggin get_block_t *, loff_t *); 27905eb0b51SOGAWA Hirofumi int generic_cont_expand_simple(struct inode *inode, loff_t size); 280a524fcfeSBean Huo void block_commit_write(struct page *page, unsigned int from, unsigned int to); 281c2ec175cSNick Piggin int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, 28254171690SDavid Chinner get_block_t get_block); 2831da177e4SLinus Torvalds sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); 2841da177e4SLinus Torvalds int block_truncate_page(struct address_space *, loff_t, get_block_t *); 2851da177e4SLinus Torvalds 28667235182SMatthew Wilcox (Oracle) #ifdef CONFIG_MIGRATION 28767235182SMatthew Wilcox (Oracle) extern int buffer_migrate_folio(struct address_space *, 28867235182SMatthew Wilcox (Oracle) struct folio *dst, struct folio *src, enum migrate_mode); 28967235182SMatthew Wilcox (Oracle) extern int buffer_migrate_folio_norefs(struct address_space *, 29067235182SMatthew Wilcox (Oracle) struct folio *dst, struct folio *src, enum migrate_mode); 29167235182SMatthew Wilcox (Oracle) #else 29267235182SMatthew Wilcox (Oracle) #define buffer_migrate_folio NULL 29367235182SMatthew Wilcox (Oracle) #define buffer_migrate_folio_norefs NULL 29467235182SMatthew Wilcox (Oracle) #endif 2951da177e4SLinus Torvalds 2961da177e4SLinus Torvalds /* 2971da177e4SLinus Torvalds * inline definitions 2981da177e4SLinus Torvalds */ 2991da177e4SLinus Torvalds 3001da177e4SLinus Torvalds static inline void get_bh(struct buffer_head *bh) 3011da177e4SLinus Torvalds { 3021da177e4SLinus Torvalds atomic_inc(&bh->b_count); 3031da177e4SLinus Torvalds } 3041da177e4SLinus Torvalds 3051da177e4SLinus Torvalds static inline void put_bh(struct buffer_head *bh) 3061da177e4SLinus Torvalds { 3074e857c58SPeter Zijlstra smp_mb__before_atomic(); 3081da177e4SLinus Torvalds atomic_dec(&bh->b_count); 3091da177e4SLinus Torvalds } 3101da177e4SLinus Torvalds 3111da177e4SLinus Torvalds static inline void brelse(struct buffer_head *bh) 3121da177e4SLinus Torvalds { 3131da177e4SLinus Torvalds if (bh) 3141da177e4SLinus Torvalds __brelse(bh); 3151da177e4SLinus Torvalds } 3161da177e4SLinus Torvalds 3171da177e4SLinus Torvalds static inline void bforget(struct buffer_head *bh) 3181da177e4SLinus Torvalds { 3191da177e4SLinus Torvalds if (bh) 3201da177e4SLinus Torvalds __bforget(bh); 3211da177e4SLinus Torvalds } 3221da177e4SLinus Torvalds 3231da177e4SLinus Torvalds static inline struct buffer_head * 3241da177e4SLinus Torvalds sb_bread(struct super_block *sb, sector_t block) 3251da177e4SLinus Torvalds { 3263b5e6454SGioh Kim return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE); 3273b5e6454SGioh Kim } 3283b5e6454SGioh Kim 3293b5e6454SGioh Kim static inline struct buffer_head * 3303b5e6454SGioh Kim sb_bread_unmovable(struct super_block *sb, sector_t block) 3313b5e6454SGioh Kim { 3323b5e6454SGioh Kim return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, 0); 3331da177e4SLinus Torvalds } 3341da177e4SLinus Torvalds 3351da177e4SLinus Torvalds static inline void 3361da177e4SLinus Torvalds sb_breadahead(struct super_block *sb, sector_t block) 3371da177e4SLinus Torvalds { 3381da177e4SLinus Torvalds __breadahead(sb->s_bdev, block, sb->s_blocksize); 3391da177e4SLinus Torvalds } 3401da177e4SLinus Torvalds 341c645e65cSMatthew Wilcox (Oracle) static inline struct buffer_head *getblk_unmovable(struct block_device *bdev, 342c645e65cSMatthew Wilcox (Oracle) sector_t block, unsigned size) 343c645e65cSMatthew Wilcox (Oracle) { 344c645e65cSMatthew Wilcox (Oracle) gfp_t gfp; 345c645e65cSMatthew Wilcox (Oracle) 346c645e65cSMatthew Wilcox (Oracle) gfp = mapping_gfp_constraint(bdev->bd_inode->i_mapping, ~__GFP_FS); 347c645e65cSMatthew Wilcox (Oracle) gfp |= __GFP_NOFAIL; 348c645e65cSMatthew Wilcox (Oracle) 349c645e65cSMatthew Wilcox (Oracle) return bdev_getblk(bdev, block, size, gfp); 350c645e65cSMatthew Wilcox (Oracle) } 351c645e65cSMatthew Wilcox (Oracle) 352c645e65cSMatthew Wilcox (Oracle) static inline struct buffer_head *__getblk(struct block_device *bdev, 353c645e65cSMatthew Wilcox (Oracle) sector_t block, unsigned size) 354c645e65cSMatthew Wilcox (Oracle) { 355c645e65cSMatthew Wilcox (Oracle) gfp_t gfp; 356c645e65cSMatthew Wilcox (Oracle) 357c645e65cSMatthew Wilcox (Oracle) gfp = mapping_gfp_constraint(bdev->bd_inode->i_mapping, ~__GFP_FS); 358c645e65cSMatthew Wilcox (Oracle) gfp |= __GFP_MOVABLE | __GFP_NOFAIL; 359c645e65cSMatthew Wilcox (Oracle) 360c645e65cSMatthew Wilcox (Oracle) return bdev_getblk(bdev, block, size, gfp); 361c645e65cSMatthew Wilcox (Oracle) } 362c645e65cSMatthew Wilcox (Oracle) 3634b9c8b19SMatthew Wilcox (Oracle) static inline struct buffer_head *sb_getblk(struct super_block *sb, 3644b9c8b19SMatthew Wilcox (Oracle) sector_t block) 3651da177e4SLinus Torvalds { 3664b9c8b19SMatthew Wilcox (Oracle) return __getblk(sb->s_bdev, block, sb->s_blocksize); 3671da177e4SLinus Torvalds } 3681da177e4SLinus Torvalds 3698a83ac54SMatthew Wilcox (Oracle) static inline struct buffer_head *sb_getblk_gfp(struct super_block *sb, 3708a83ac54SMatthew Wilcox (Oracle) sector_t block, gfp_t gfp) 371bd7ade3cSNikolay Borisov { 3728a83ac54SMatthew Wilcox (Oracle) return bdev_getblk(sb->s_bdev, block, sb->s_blocksize, gfp); 373bd7ade3cSNikolay Borisov } 374bd7ade3cSNikolay Borisov 3751da177e4SLinus Torvalds static inline struct buffer_head * 3761da177e4SLinus Torvalds sb_find_get_block(struct super_block *sb, sector_t block) 3771da177e4SLinus Torvalds { 3781da177e4SLinus Torvalds return __find_get_block(sb->s_bdev, block, sb->s_blocksize); 3791da177e4SLinus Torvalds } 3801da177e4SLinus Torvalds 3811da177e4SLinus Torvalds static inline void 3821da177e4SLinus Torvalds map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block) 3831da177e4SLinus Torvalds { 3841da177e4SLinus Torvalds set_buffer_mapped(bh); 3851da177e4SLinus Torvalds bh->b_bdev = sb->s_bdev; 3861da177e4SLinus Torvalds bh->b_blocknr = block; 387b0cf2321SBadari Pulavarty bh->b_size = sb->s_blocksize; 3881da177e4SLinus Torvalds } 3891da177e4SLinus Torvalds 3901da177e4SLinus Torvalds static inline void wait_on_buffer(struct buffer_head *bh) 3911da177e4SLinus Torvalds { 3921da177e4SLinus Torvalds might_sleep(); 393a9877cc2SRichard Kennedy if (buffer_locked(bh)) 3941da177e4SLinus Torvalds __wait_on_buffer(bh); 3951da177e4SLinus Torvalds } 3961da177e4SLinus Torvalds 397ca5de404SNick Piggin static inline int trylock_buffer(struct buffer_head *bh) 398ca5de404SNick Piggin { 39951b07fc3SNick Piggin return likely(!test_and_set_bit_lock(BH_Lock, &bh->b_state)); 400ca5de404SNick Piggin } 401ca5de404SNick Piggin 4021da177e4SLinus Torvalds static inline void lock_buffer(struct buffer_head *bh) 4031da177e4SLinus Torvalds { 4041da177e4SLinus Torvalds might_sleep(); 405ca5de404SNick Piggin if (!trylock_buffer(bh)) 4061da177e4SLinus Torvalds __lock_buffer(bh); 4071da177e4SLinus Torvalds } 4081da177e4SLinus Torvalds 409fdee117eSZhang Yi static inline void bh_readahead(struct buffer_head *bh, blk_opf_t op_flags) 410fdee117eSZhang Yi { 411fdee117eSZhang Yi if (!buffer_uptodate(bh) && trylock_buffer(bh)) { 412fdee117eSZhang Yi if (!buffer_uptodate(bh)) 413fdee117eSZhang Yi __bh_read(bh, op_flags, false); 414fdee117eSZhang Yi else 415fdee117eSZhang Yi unlock_buffer(bh); 416fdee117eSZhang Yi } 417fdee117eSZhang Yi } 418fdee117eSZhang Yi 419fdee117eSZhang Yi static inline void bh_read_nowait(struct buffer_head *bh, blk_opf_t op_flags) 420fdee117eSZhang Yi { 421fdee117eSZhang Yi if (!bh_uptodate_or_lock(bh)) 422fdee117eSZhang Yi __bh_read(bh, op_flags, false); 423fdee117eSZhang Yi } 424fdee117eSZhang Yi 425fdee117eSZhang Yi /* Returns 1 if buffer uptodated, 0 on success, and -EIO on error. */ 426fdee117eSZhang Yi static inline int bh_read(struct buffer_head *bh, blk_opf_t op_flags) 427fdee117eSZhang Yi { 428fdee117eSZhang Yi if (bh_uptodate_or_lock(bh)) 429fdee117eSZhang Yi return 1; 430fdee117eSZhang Yi return __bh_read(bh, op_flags, true); 431fdee117eSZhang Yi } 432fdee117eSZhang Yi 433fdee117eSZhang Yi static inline void bh_read_batch(int nr, struct buffer_head *bhs[]) 434fdee117eSZhang Yi { 435fdee117eSZhang Yi __bh_read_batch(nr, bhs, 0, true); 436fdee117eSZhang Yi } 437fdee117eSZhang Yi 438fdee117eSZhang Yi static inline void bh_readahead_batch(int nr, struct buffer_head *bhs[], 439fdee117eSZhang Yi blk_opf_t op_flags) 440fdee117eSZhang Yi { 441fdee117eSZhang Yi __bh_read_batch(nr, bhs, op_flags, false); 442fdee117eSZhang Yi } 443fdee117eSZhang Yi 4443b5e6454SGioh Kim /** 4453b5e6454SGioh Kim * __bread() - reads a specified block and returns the bh 4463b5e6454SGioh Kim * @bdev: the block_device to read from 4473b5e6454SGioh Kim * @block: number of block 4483b5e6454SGioh Kim * @size: size (in bytes) to read 4493b5e6454SGioh Kim * 4503b5e6454SGioh Kim * Reads a specified block, and returns buffer head that contains it. 4513b5e6454SGioh Kim * The page cache is allocated from movable area so that it can be migrated. 4523b5e6454SGioh Kim * It returns NULL if the block was unreadable. 4533b5e6454SGioh Kim */ 4543b5e6454SGioh Kim static inline struct buffer_head * 4553b5e6454SGioh Kim __bread(struct block_device *bdev, sector_t block, unsigned size) 4563b5e6454SGioh Kim { 4573b5e6454SGioh Kim return __bread_gfp(bdev, block, size, __GFP_MOVABLE); 4583b5e6454SGioh Kim } 4593b5e6454SGioh Kim 460e621900aSMatthew Wilcox (Oracle) bool block_dirty_folio(struct address_space *mapping, struct folio *folio); 4619361401eSDavid Howells 462925c86a1SChristoph Hellwig #ifdef CONFIG_BUFFER_HEAD 463925c86a1SChristoph Hellwig 464925c86a1SChristoph Hellwig void buffer_init(void); 465925c86a1SChristoph Hellwig bool try_to_free_buffers(struct folio *folio); 466925c86a1SChristoph Hellwig int inode_has_buffers(struct inode *inode); 467925c86a1SChristoph Hellwig void invalidate_inode_buffers(struct inode *inode); 468925c86a1SChristoph Hellwig int remove_inode_buffers(struct inode *inode); 469925c86a1SChristoph Hellwig int sync_mapping_buffers(struct address_space *mapping); 470925c86a1SChristoph Hellwig void invalidate_bh_lrus(void); 471925c86a1SChristoph Hellwig void invalidate_bh_lrus_cpu(void); 472925c86a1SChristoph Hellwig bool has_bh_in_lru(int cpu, void *dummy); 473925c86a1SChristoph Hellwig extern int buffer_heads_over_limit; 474925c86a1SChristoph Hellwig 475925c86a1SChristoph Hellwig #else /* CONFIG_BUFFER_HEAD */ 4769361401eSDavid Howells 4779361401eSDavid Howells static inline void buffer_init(void) {} 47868189fefSMatthew Wilcox (Oracle) static inline bool try_to_free_buffers(struct folio *folio) { return true; } 4799361401eSDavid Howells static inline int inode_has_buffers(struct inode *inode) { return 0; } 4809361401eSDavid Howells static inline void invalidate_inode_buffers(struct inode *inode) {} 4819361401eSDavid Howells static inline int remove_inode_buffers(struct inode *inode) { return 1; } 4829361401eSDavid Howells static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; } 483925c86a1SChristoph Hellwig static inline void invalidate_bh_lrus(void) {} 484243418e3SMinchan Kim static inline void invalidate_bh_lrus_cpu(void) {} 4856de522d1SJing Yangyang static inline bool has_bh_in_lru(int cpu, void *dummy) { return false; } 486d2de7ea4SChristoph Hellwig #define buffer_heads_over_limit 0 4879361401eSDavid Howells 488925c86a1SChristoph Hellwig #endif /* CONFIG_BUFFER_HEAD */ 4891da177e4SLinus Torvalds #endif /* _LINUX_BUFFER_HEAD_H */ 490