xref: /linux-6.15/include/linux/buffer_head.h (revision 97edbc02)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  * include/linux/buffer_head.h
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  * Everything to do with buffer_heads.
61da177e4SLinus Torvalds  */
71da177e4SLinus Torvalds 
81da177e4SLinus Torvalds #ifndef _LINUX_BUFFER_HEAD_H
91da177e4SLinus Torvalds #define _LINUX_BUFFER_HEAD_H
101da177e4SLinus Torvalds 
111da177e4SLinus Torvalds #include <linux/types.h>
123ae72869SBart Van Assche #include <linux/blk_types.h>
131da177e4SLinus Torvalds #include <linux/fs.h>
141da177e4SLinus Torvalds #include <linux/linkage.h>
151da177e4SLinus Torvalds #include <linux/pagemap.h>
161da177e4SLinus Torvalds #include <linux/wait.h>
1760063497SArun Sharma #include <linux/atomic.h>
181da177e4SLinus Torvalds 
191da177e4SLinus Torvalds enum bh_state_bits {
201da177e4SLinus Torvalds 	BH_Uptodate,	/* Contains valid data */
211da177e4SLinus Torvalds 	BH_Dirty,	/* Is dirty */
221da177e4SLinus Torvalds 	BH_Lock,	/* Is locked */
231da177e4SLinus Torvalds 	BH_Req,		/* Has been submitted for I/O */
241da177e4SLinus Torvalds 
251da177e4SLinus Torvalds 	BH_Mapped,	/* Has a disk mapping */
261da177e4SLinus Torvalds 	BH_New,		/* Disk mapping was newly created by get_block */
271da177e4SLinus Torvalds 	BH_Async_Read,	/* Is under end_buffer_async_read I/O */
281da177e4SLinus Torvalds 	BH_Async_Write,	/* Is under end_buffer_async_write I/O */
291da177e4SLinus Torvalds 	BH_Delay,	/* Buffer is not yet allocated on disk */
301da177e4SLinus Torvalds 	BH_Boundary,	/* Block is followed by a discontiguity */
311da177e4SLinus Torvalds 	BH_Write_EIO,	/* I/O error on write */
3233a266ddSDavid Chinner 	BH_Unwritten,	/* Buffer is allocated on disk but not written */
3308bafc03SKeith Mannthey 	BH_Quiet,	/* Buffer Error Prinks to be quiet */
34877f962cSTheodore Ts'o 	BH_Meta,	/* Buffer contains metadata */
35877f962cSTheodore Ts'o 	BH_Prio,	/* Buffer should be submitted with REQ_PRIO */
367b7a8665SChristoph Hellwig 	BH_Defer_Completion, /* Defer AIO completion to workqueue */
371da177e4SLinus Torvalds 
381da177e4SLinus Torvalds 	BH_PrivateStart,/* not a state bit, but the first bit available
391da177e4SLinus Torvalds 			 * for private allocation by other entities
401da177e4SLinus Torvalds 			 */
411da177e4SLinus Torvalds };
421da177e4SLinus Torvalds 
4309cbfeafSKirill A. Shutemov #define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
441da177e4SLinus Torvalds 
451da177e4SLinus Torvalds struct page;
461da177e4SLinus Torvalds struct buffer_head;
471da177e4SLinus Torvalds struct address_space;
481da177e4SLinus Torvalds typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
491da177e4SLinus Torvalds 
501da177e4SLinus Torvalds /*
51205f87f6SBadari Pulavarty  * Historically, a buffer_head was used to map a single block
52205f87f6SBadari Pulavarty  * within a page, and of course as the unit of I/O through the
53205f87f6SBadari Pulavarty  * filesystem and block layers.  Nowadays the basic I/O unit
54205f87f6SBadari Pulavarty  * is the bio, and buffer_heads are used for extracting block
55205f87f6SBadari Pulavarty  * mappings (via a get_block_t call), for tracking state within
5606668257SMatthew Wilcox (Oracle)  * a folio (via a folio_mapping) and for wrapping bio submission
57205f87f6SBadari Pulavarty  * for backward compatibility reasons (e.g. submit_bh).
581da177e4SLinus Torvalds  */
591da177e4SLinus Torvalds struct buffer_head {
601da177e4SLinus Torvalds 	unsigned long b_state;		/* buffer state bitmap (see above) */
611da177e4SLinus Torvalds 	struct buffer_head *b_this_page;/* circular list of page's buffers */
62d685c668SMatthew Wilcox (Oracle) 	union {
631da177e4SLinus Torvalds 		struct page *b_page;	/* the page this bh is mapped to */
64d685c668SMatthew Wilcox (Oracle) 		struct folio *b_folio;	/* the folio this bh is mapped to */
65d685c668SMatthew Wilcox (Oracle) 	};
661da177e4SLinus Torvalds 
67205f87f6SBadari Pulavarty 	sector_t b_blocknr;		/* start block number */
68205f87f6SBadari Pulavarty 	size_t b_size;			/* size of mapping */
69205f87f6SBadari Pulavarty 	char *b_data;			/* pointer to data within the page */
701da177e4SLinus Torvalds 
711da177e4SLinus Torvalds 	struct block_device *b_bdev;
721da177e4SLinus Torvalds 	bh_end_io_t *b_end_io;		/* I/O completion */
731da177e4SLinus Torvalds  	void *b_private;		/* reserved for b_end_io */
741da177e4SLinus Torvalds 	struct list_head b_assoc_buffers; /* associated with another mapping */
7558ff407bSJan Kara 	struct address_space *b_assoc_map;	/* mapping this buffer is
7658ff407bSJan Kara 						   associated with */
77205f87f6SBadari Pulavarty 	atomic_t b_count;		/* users using this buffer_head */
78f1e67e35SThomas Gleixner 	spinlock_t b_uptodate_lock;	/* Used by the first bh in a page, to
79f1e67e35SThomas Gleixner 					 * serialise IO completion of other
80f1e67e35SThomas Gleixner 					 * buffers in the page */
811da177e4SLinus Torvalds };
821da177e4SLinus Torvalds 
831da177e4SLinus Torvalds /*
841da177e4SLinus Torvalds  * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
851da177e4SLinus Torvalds  * and buffer_foo() functions.
8660f91826SKemi Wang  * To avoid reset buffer flags that are already set, because that causes
8760f91826SKemi Wang  * a costly cache line transition, check the flag first.
881da177e4SLinus Torvalds  */
891da177e4SLinus Torvalds #define BUFFER_FNS(bit, name)						\
90ee91ef61SDenys Vlasenko static __always_inline void set_buffer_##name(struct buffer_head *bh)	\
911da177e4SLinus Torvalds {									\
9260f91826SKemi Wang 	if (!test_bit(BH_##bit, &(bh)->b_state))			\
931da177e4SLinus Torvalds 		set_bit(BH_##bit, &(bh)->b_state);			\
941da177e4SLinus Torvalds }									\
95ee91ef61SDenys Vlasenko static __always_inline void clear_buffer_##name(struct buffer_head *bh)	\
961da177e4SLinus Torvalds {									\
971da177e4SLinus Torvalds 	clear_bit(BH_##bit, &(bh)->b_state);				\
981da177e4SLinus Torvalds }									\
99ee91ef61SDenys Vlasenko static __always_inline int buffer_##name(const struct buffer_head *bh)	\
1001da177e4SLinus Torvalds {									\
1011da177e4SLinus Torvalds 	return test_bit(BH_##bit, &(bh)->b_state);			\
1021da177e4SLinus Torvalds }
1031da177e4SLinus Torvalds 
1041da177e4SLinus Torvalds /*
1051da177e4SLinus Torvalds  * test_set_buffer_foo() and test_clear_buffer_foo()
1061da177e4SLinus Torvalds  */
1071da177e4SLinus Torvalds #define TAS_BUFFER_FNS(bit, name)					\
108ee91ef61SDenys Vlasenko static __always_inline int test_set_buffer_##name(struct buffer_head *bh) \
1091da177e4SLinus Torvalds {									\
1101da177e4SLinus Torvalds 	return test_and_set_bit(BH_##bit, &(bh)->b_state);		\
1111da177e4SLinus Torvalds }									\
112ee91ef61SDenys Vlasenko static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \
1131da177e4SLinus Torvalds {									\
1141da177e4SLinus Torvalds 	return test_and_clear_bit(BH_##bit, &(bh)->b_state);		\
1151da177e4SLinus Torvalds }									\
1161da177e4SLinus Torvalds 
1171da177e4SLinus Torvalds /*
1181da177e4SLinus Torvalds  * Emit the buffer bitops functions.   Note that there are also functions
1191da177e4SLinus Torvalds  * of the form "mark_buffer_foo()".  These are higher-level functions which
1201da177e4SLinus Torvalds  * do something in addition to setting a b_state bit.
1211da177e4SLinus Torvalds  */
1221da177e4SLinus Torvalds BUFFER_FNS(Dirty, dirty)
1231da177e4SLinus Torvalds TAS_BUFFER_FNS(Dirty, dirty)
1241da177e4SLinus Torvalds BUFFER_FNS(Lock, locked)
1251da177e4SLinus Torvalds BUFFER_FNS(Req, req)
1261da177e4SLinus Torvalds TAS_BUFFER_FNS(Req, req)
1271da177e4SLinus Torvalds BUFFER_FNS(Mapped, mapped)
1281da177e4SLinus Torvalds BUFFER_FNS(New, new)
1291da177e4SLinus Torvalds BUFFER_FNS(Async_Read, async_read)
1301da177e4SLinus Torvalds BUFFER_FNS(Async_Write, async_write)
1311da177e4SLinus Torvalds BUFFER_FNS(Delay, delay)
1321da177e4SLinus Torvalds BUFFER_FNS(Boundary, boundary)
1331da177e4SLinus Torvalds BUFFER_FNS(Write_EIO, write_io_error)
13433a266ddSDavid Chinner BUFFER_FNS(Unwritten, unwritten)
135877f962cSTheodore Ts'o BUFFER_FNS(Meta, meta)
136877f962cSTheodore Ts'o BUFFER_FNS(Prio, prio)
1377b7a8665SChristoph Hellwig BUFFER_FNS(Defer_Completion, defer_completion)
1381da177e4SLinus Torvalds 
139d4252071SMikulas Patocka static __always_inline void set_buffer_uptodate(struct buffer_head *bh)
140d4252071SMikulas Patocka {
141d4252071SMikulas Patocka 	/*
1422f79cdfeSLinus Torvalds 	 * If somebody else already set this uptodate, they will
1432f79cdfeSLinus Torvalds 	 * have done the memory barrier, and a reader will thus
1442f79cdfeSLinus Torvalds 	 * see *some* valid buffer state.
1452f79cdfeSLinus Torvalds 	 *
1462f79cdfeSLinus Torvalds 	 * Any other serialization (with IO errors or whatever that
1472f79cdfeSLinus Torvalds 	 * might clear the bit) has to come from other state (eg BH_Lock).
1482f79cdfeSLinus Torvalds 	 */
1492f79cdfeSLinus Torvalds 	if (test_bit(BH_Uptodate, &bh->b_state))
1502f79cdfeSLinus Torvalds 		return;
1512f79cdfeSLinus Torvalds 
1522f79cdfeSLinus Torvalds 	/*
153d4252071SMikulas Patocka 	 * make it consistent with folio_mark_uptodate
154d4252071SMikulas Patocka 	 * pairs with smp_load_acquire in buffer_uptodate
155d4252071SMikulas Patocka 	 */
156d4252071SMikulas Patocka 	smp_mb__before_atomic();
157d4252071SMikulas Patocka 	set_bit(BH_Uptodate, &bh->b_state);
158d4252071SMikulas Patocka }
159d4252071SMikulas Patocka 
160d4252071SMikulas Patocka static __always_inline void clear_buffer_uptodate(struct buffer_head *bh)
161d4252071SMikulas Patocka {
162d4252071SMikulas Patocka 	clear_bit(BH_Uptodate, &bh->b_state);
163d4252071SMikulas Patocka }
164d4252071SMikulas Patocka 
165d4252071SMikulas Patocka static __always_inline int buffer_uptodate(const struct buffer_head *bh)
166d4252071SMikulas Patocka {
167d4252071SMikulas Patocka 	/*
168d4252071SMikulas Patocka 	 * make it consistent with folio_test_uptodate
169d4252071SMikulas Patocka 	 * pairs with smp_mb__before_atomic in set_buffer_uptodate
170d4252071SMikulas Patocka 	 */
1718238b457SMikulas Patocka 	return test_bit_acquire(BH_Uptodate, &bh->b_state);
172d4252071SMikulas Patocka }
173d4252071SMikulas Patocka 
174f94cf220SMatthew Wilcox (Oracle) static inline unsigned long bh_offset(const struct buffer_head *bh)
175f94cf220SMatthew Wilcox (Oracle) {
176f94cf220SMatthew Wilcox (Oracle) 	return (unsigned long)(bh)->b_data & (page_size(bh->b_page) - 1);
177f94cf220SMatthew Wilcox (Oracle) }
1781da177e4SLinus Torvalds 
1791da177e4SLinus Torvalds /* If we *know* page->private refers to buffer_heads */
1801da177e4SLinus Torvalds #define page_buffers(page)					\
1811da177e4SLinus Torvalds 	({							\
1821da177e4SLinus Torvalds 		BUG_ON(!PagePrivate(page));			\
1834c21e2f2SHugh Dickins 		((struct buffer_head *)page_private(page));	\
1841da177e4SLinus Torvalds 	})
1851da177e4SLinus Torvalds #define page_has_buffers(page)	PagePrivate(page)
186cd1067beSMatthew Wilcox (Oracle) #define folio_buffers(folio)		folio_get_private(folio)
1871da177e4SLinus Torvalds 
188520f301cSMatthew Wilcox (Oracle) void buffer_check_dirty_writeback(struct folio *folio,
189b4597226SMel Gorman 				     bool *dirty, bool *writeback);
190b4597226SMel Gorman 
1911da177e4SLinus Torvalds /*
1921da177e4SLinus Torvalds  * Declarations
1931da177e4SLinus Torvalds  */
1941da177e4SLinus Torvalds 
195b3c97528SHarvey Harrison void mark_buffer_dirty(struct buffer_head *bh);
19687354e5dSJeff Layton void mark_buffer_write_io_error(struct buffer_head *bh);
197f0059afdSTejun Heo void touch_buffer(struct buffer_head *bh);
198465e5e6aSPankaj Raghav void folio_set_bh(struct buffer_head *bh, struct folio *folio,
199465e5e6aSPankaj Raghav 		  unsigned long offset);
200c71124a8SPankaj Raghav struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
2012a418157SMatthew Wilcox (Oracle) 					gfp_t gfp);
2021da177e4SLinus Torvalds struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
203640ab98fSJens Axboe 		bool retry);
2040a88810dSMatthew Wilcox (Oracle) struct buffer_head *create_empty_buffers(struct folio *folio,
2053decb856SMatthew Wilcox (Oracle) 		unsigned long blocksize, unsigned long b_state);
2061da177e4SLinus Torvalds void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
2071da177e4SLinus Torvalds void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
2081da177e4SLinus Torvalds 
2091da177e4SLinus Torvalds /* Things to do with buffers at mapping->private_list */
2101da177e4SLinus Torvalds void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
21131b2ebc0SRitesh Harjani (IBM) int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end,
21231b2ebc0SRitesh Harjani (IBM) 				  bool datasync);
21331b2ebc0SRitesh Harjani (IBM) int generic_buffers_fsync(struct file *file, loff_t start, loff_t end,
21431b2ebc0SRitesh Harjani (IBM) 			  bool datasync);
21529f3ad7dSJan Kara void clean_bdev_aliases(struct block_device *bdev, sector_t block,
21629f3ad7dSJan Kara 			sector_t len);
217e64855c6SJan Kara static inline void clean_bdev_bh_alias(struct buffer_head *bh)
218e64855c6SJan Kara {
219e64855c6SJan Kara 	clean_bdev_aliases(bh->b_bdev, bh->b_blocknr, 1);
220e64855c6SJan Kara }
2211da177e4SLinus Torvalds 
2221da177e4SLinus Torvalds void mark_buffer_async_write(struct buffer_head *bh);
2231da177e4SLinus Torvalds void __wait_on_buffer(struct buffer_head *);
2241da177e4SLinus Torvalds wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
2253991d3bdSTomasz Kvarsin struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
2263991d3bdSTomasz Kvarsin 			unsigned size);
2273ed65f04SMatthew Wilcox (Oracle) struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block,
2283ed65f04SMatthew Wilcox (Oracle) 		unsigned size, gfp_t gfp);
2291da177e4SLinus Torvalds void __brelse(struct buffer_head *);
2301da177e4SLinus Torvalds void __bforget(struct buffer_head *);
2313991d3bdSTomasz Kvarsin void __breadahead(struct block_device *, sector_t block, unsigned int size);
2323b5e6454SGioh Kim struct buffer_head *__bread_gfp(struct block_device *,
2333b5e6454SGioh Kim 				sector_t block, unsigned size, gfp_t gfp);
234dd0fc66fSAl Viro struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
2351da177e4SLinus Torvalds void free_buffer_head(struct buffer_head * bh);
236b3c97528SHarvey Harrison void unlock_buffer(struct buffer_head *bh);
237b3c97528SHarvey Harrison void __lock_buffer(struct buffer_head *bh);
2381da177e4SLinus Torvalds int sync_dirty_buffer(struct buffer_head *bh);
2393ae72869SBart Van Assche int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
2403ae72869SBart Van Assche void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
2415bdf402aSRitesh Harjani (IBM) void submit_bh(blk_opf_t, struct buffer_head *);
2421da177e4SLinus Torvalds void write_boundary_block(struct block_device *bdev,
2431da177e4SLinus Torvalds 			sector_t bblock, unsigned blocksize);
244389d1b08SAneesh Kumar K.V int bh_uptodate_or_lock(struct buffer_head *bh);
245fdee117eSZhang Yi int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait);
246fdee117eSZhang Yi void __bh_read_batch(int nr, struct buffer_head *bhs[],
247fdee117eSZhang Yi 		     blk_opf_t op_flags, bool force_lock);
2481da177e4SLinus Torvalds 
2491da177e4SLinus Torvalds /*
2501da177e4SLinus Torvalds  * Generic address_space_operations implementations for buffer_head-backed
2511da177e4SLinus Torvalds  * address_spaces.
2521da177e4SLinus Torvalds  */
2537ba13abbSMatthew Wilcox (Oracle) void block_invalidate_folio(struct folio *folio, size_t offset, size_t length);
25417bf23a9SMatthew Wilcox (Oracle) int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
25517bf23a9SMatthew Wilcox (Oracle) 		void *get_block);
25653418a18SMatthew Wilcox (Oracle) int __block_write_full_folio(struct inode *inode, struct folio *folio,
25714059f66SMatthew Wilcox (Oracle) 		get_block_t *get_block, struct writeback_control *wbc);
2582c69e205SMatthew Wilcox (Oracle) int block_read_full_folio(struct folio *, get_block_t *);
2592e7e80f7SMatthew Wilcox (Oracle) bool block_is_partially_uptodate(struct folio *, size_t from, size_t count);
260155130a4SChristoph Hellwig int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
261b3992d1eSMatthew Wilcox (Oracle) 		struct page **pagep, get_block_t *get_block);
2626e1db88dSChristoph Hellwig int __block_write_begin(struct page *page, loff_t pos, unsigned len,
2636e1db88dSChristoph Hellwig 		get_block_t *get_block);
264afddba49SNick Piggin int block_write_end(struct file *, struct address_space *,
265*97edbc02SMatthew Wilcox (Oracle) 				loff_t, unsigned len, unsigned copied,
266*97edbc02SMatthew Wilcox (Oracle) 				struct folio *, void *);
267afddba49SNick Piggin int generic_write_end(struct file *, struct address_space *,
268afddba49SNick Piggin 				loff_t, unsigned, unsigned,
269afddba49SNick Piggin 				struct page *, void *);
2704a9622f2SMatthew Wilcox (Oracle) void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to);
27189e10787SNick Piggin int cont_write_begin(struct file *, struct address_space *, loff_t,
272be3bbbc5SMatthew Wilcox (Oracle) 			unsigned, struct page **, void **,
27389e10787SNick Piggin 			get_block_t *, loff_t *);
27405eb0b51SOGAWA Hirofumi int generic_cont_expand_simple(struct inode *inode, loff_t size);
275a524fcfeSBean Huo void block_commit_write(struct page *page, unsigned int from, unsigned int to);
276c2ec175cSNick Piggin int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
27754171690SDavid Chinner 				get_block_t get_block);
2781da177e4SLinus Torvalds sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
2791da177e4SLinus Torvalds int block_truncate_page(struct address_space *, loff_t, get_block_t *);
2801da177e4SLinus Torvalds 
28167235182SMatthew Wilcox (Oracle) #ifdef CONFIG_MIGRATION
28267235182SMatthew Wilcox (Oracle) extern int buffer_migrate_folio(struct address_space *,
28367235182SMatthew Wilcox (Oracle) 		struct folio *dst, struct folio *src, enum migrate_mode);
28467235182SMatthew Wilcox (Oracle) extern int buffer_migrate_folio_norefs(struct address_space *,
28567235182SMatthew Wilcox (Oracle) 		struct folio *dst, struct folio *src, enum migrate_mode);
28667235182SMatthew Wilcox (Oracle) #else
28767235182SMatthew Wilcox (Oracle) #define buffer_migrate_folio NULL
28867235182SMatthew Wilcox (Oracle) #define buffer_migrate_folio_norefs NULL
28967235182SMatthew Wilcox (Oracle) #endif
2901da177e4SLinus Torvalds 
2911da177e4SLinus Torvalds /*
2921da177e4SLinus Torvalds  * inline definitions
2931da177e4SLinus Torvalds  */
2941da177e4SLinus Torvalds 
2951da177e4SLinus Torvalds static inline void get_bh(struct buffer_head *bh)
2961da177e4SLinus Torvalds {
2971da177e4SLinus Torvalds         atomic_inc(&bh->b_count);
2981da177e4SLinus Torvalds }
2991da177e4SLinus Torvalds 
3001da177e4SLinus Torvalds static inline void put_bh(struct buffer_head *bh)
3011da177e4SLinus Torvalds {
3024e857c58SPeter Zijlstra         smp_mb__before_atomic();
3031da177e4SLinus Torvalds         atomic_dec(&bh->b_count);
3041da177e4SLinus Torvalds }
3051da177e4SLinus Torvalds 
30666924fdaSMatthew Wilcox (Oracle) /**
30766924fdaSMatthew Wilcox (Oracle)  * brelse - Release a buffer.
30866924fdaSMatthew Wilcox (Oracle)  * @bh: The buffer to release.
30966924fdaSMatthew Wilcox (Oracle)  *
31066924fdaSMatthew Wilcox (Oracle)  * Decrement a buffer_head's reference count.  If @bh is NULL, this
31166924fdaSMatthew Wilcox (Oracle)  * function is a no-op.
31266924fdaSMatthew Wilcox (Oracle)  *
31366924fdaSMatthew Wilcox (Oracle)  * If all buffers on a folio have zero reference count, are clean
31466924fdaSMatthew Wilcox (Oracle)  * and unlocked, and if the folio is unlocked and not under writeback
31566924fdaSMatthew Wilcox (Oracle)  * then try_to_free_buffers() may strip the buffers from the folio in
31666924fdaSMatthew Wilcox (Oracle)  * preparation for freeing it (sometimes, rarely, buffers are removed
31766924fdaSMatthew Wilcox (Oracle)  * from a folio but it ends up not being freed, and buffers may later
31866924fdaSMatthew Wilcox (Oracle)  * be reattached).
31966924fdaSMatthew Wilcox (Oracle)  *
32066924fdaSMatthew Wilcox (Oracle)  * Context: Any context.
32166924fdaSMatthew Wilcox (Oracle)  */
3221da177e4SLinus Torvalds static inline void brelse(struct buffer_head *bh)
3231da177e4SLinus Torvalds {
3241da177e4SLinus Torvalds 	if (bh)
3251da177e4SLinus Torvalds 		__brelse(bh);
3261da177e4SLinus Torvalds }
3271da177e4SLinus Torvalds 
328b73a936fSMatthew Wilcox (Oracle) /**
329b73a936fSMatthew Wilcox (Oracle)  * bforget - Discard any dirty data in a buffer.
330b73a936fSMatthew Wilcox (Oracle)  * @bh: The buffer to forget.
331b73a936fSMatthew Wilcox (Oracle)  *
332b73a936fSMatthew Wilcox (Oracle)  * Call this function instead of brelse() if the data written to a buffer
333b73a936fSMatthew Wilcox (Oracle)  * no longer needs to be written back.  It will clear the buffer's dirty
334b73a936fSMatthew Wilcox (Oracle)  * flag so writeback of this buffer will be skipped.
335b73a936fSMatthew Wilcox (Oracle)  *
336b73a936fSMatthew Wilcox (Oracle)  * Context: Any context.
337b73a936fSMatthew Wilcox (Oracle)  */
3381da177e4SLinus Torvalds static inline void bforget(struct buffer_head *bh)
3391da177e4SLinus Torvalds {
3401da177e4SLinus Torvalds 	if (bh)
3411da177e4SLinus Torvalds 		__bforget(bh);
3421da177e4SLinus Torvalds }
3431da177e4SLinus Torvalds 
3441da177e4SLinus Torvalds static inline struct buffer_head *
3451da177e4SLinus Torvalds sb_bread(struct super_block *sb, sector_t block)
3461da177e4SLinus Torvalds {
3473b5e6454SGioh Kim 	return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
3483b5e6454SGioh Kim }
3493b5e6454SGioh Kim 
3503b5e6454SGioh Kim static inline struct buffer_head *
3513b5e6454SGioh Kim sb_bread_unmovable(struct super_block *sb, sector_t block)
3523b5e6454SGioh Kim {
3533b5e6454SGioh Kim 	return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
3541da177e4SLinus Torvalds }
3551da177e4SLinus Torvalds 
3561da177e4SLinus Torvalds static inline void
3571da177e4SLinus Torvalds sb_breadahead(struct super_block *sb, sector_t block)
3581da177e4SLinus Torvalds {
3591da177e4SLinus Torvalds 	__breadahead(sb->s_bdev, block, sb->s_blocksize);
3601da177e4SLinus Torvalds }
3611da177e4SLinus Torvalds 
362c645e65cSMatthew Wilcox (Oracle) static inline struct buffer_head *getblk_unmovable(struct block_device *bdev,
363c645e65cSMatthew Wilcox (Oracle) 		sector_t block, unsigned size)
364c645e65cSMatthew Wilcox (Oracle) {
365c645e65cSMatthew Wilcox (Oracle) 	gfp_t gfp;
366c645e65cSMatthew Wilcox (Oracle) 
367224941e8SAl Viro 	gfp = mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS);
368c645e65cSMatthew Wilcox (Oracle) 	gfp |= __GFP_NOFAIL;
369c645e65cSMatthew Wilcox (Oracle) 
370c645e65cSMatthew Wilcox (Oracle) 	return bdev_getblk(bdev, block, size, gfp);
371c645e65cSMatthew Wilcox (Oracle) }
372c645e65cSMatthew Wilcox (Oracle) 
373c645e65cSMatthew Wilcox (Oracle) static inline struct buffer_head *__getblk(struct block_device *bdev,
374c645e65cSMatthew Wilcox (Oracle) 		sector_t block, unsigned size)
375c645e65cSMatthew Wilcox (Oracle) {
376c645e65cSMatthew Wilcox (Oracle) 	gfp_t gfp;
377c645e65cSMatthew Wilcox (Oracle) 
378224941e8SAl Viro 	gfp = mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS);
379c645e65cSMatthew Wilcox (Oracle) 	gfp |= __GFP_MOVABLE | __GFP_NOFAIL;
380c645e65cSMatthew Wilcox (Oracle) 
381c645e65cSMatthew Wilcox (Oracle) 	return bdev_getblk(bdev, block, size, gfp);
382c645e65cSMatthew Wilcox (Oracle) }
383c645e65cSMatthew Wilcox (Oracle) 
3844b9c8b19SMatthew Wilcox (Oracle) static inline struct buffer_head *sb_getblk(struct super_block *sb,
3854b9c8b19SMatthew Wilcox (Oracle) 		sector_t block)
3861da177e4SLinus Torvalds {
3874b9c8b19SMatthew Wilcox (Oracle) 	return __getblk(sb->s_bdev, block, sb->s_blocksize);
3881da177e4SLinus Torvalds }
3891da177e4SLinus Torvalds 
3908a83ac54SMatthew Wilcox (Oracle) static inline struct buffer_head *sb_getblk_gfp(struct super_block *sb,
3918a83ac54SMatthew Wilcox (Oracle) 		sector_t block, gfp_t gfp)
392bd7ade3cSNikolay Borisov {
3938a83ac54SMatthew Wilcox (Oracle) 	return bdev_getblk(sb->s_bdev, block, sb->s_blocksize, gfp);
394bd7ade3cSNikolay Borisov }
395bd7ade3cSNikolay Borisov 
3961da177e4SLinus Torvalds static inline struct buffer_head *
3971da177e4SLinus Torvalds sb_find_get_block(struct super_block *sb, sector_t block)
3981da177e4SLinus Torvalds {
3991da177e4SLinus Torvalds 	return __find_get_block(sb->s_bdev, block, sb->s_blocksize);
4001da177e4SLinus Torvalds }
4011da177e4SLinus Torvalds 
4021da177e4SLinus Torvalds static inline void
4031da177e4SLinus Torvalds map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
4041da177e4SLinus Torvalds {
4051da177e4SLinus Torvalds 	set_buffer_mapped(bh);
4061da177e4SLinus Torvalds 	bh->b_bdev = sb->s_bdev;
4071da177e4SLinus Torvalds 	bh->b_blocknr = block;
408b0cf2321SBadari Pulavarty 	bh->b_size = sb->s_blocksize;
4091da177e4SLinus Torvalds }
4101da177e4SLinus Torvalds 
4111da177e4SLinus Torvalds static inline void wait_on_buffer(struct buffer_head *bh)
4121da177e4SLinus Torvalds {
4131da177e4SLinus Torvalds 	might_sleep();
414a9877cc2SRichard Kennedy 	if (buffer_locked(bh))
4151da177e4SLinus Torvalds 		__wait_on_buffer(bh);
4161da177e4SLinus Torvalds }
4171da177e4SLinus Torvalds 
418ca5de404SNick Piggin static inline int trylock_buffer(struct buffer_head *bh)
419ca5de404SNick Piggin {
42051b07fc3SNick Piggin 	return likely(!test_and_set_bit_lock(BH_Lock, &bh->b_state));
421ca5de404SNick Piggin }
422ca5de404SNick Piggin 
4231da177e4SLinus Torvalds static inline void lock_buffer(struct buffer_head *bh)
4241da177e4SLinus Torvalds {
4251da177e4SLinus Torvalds 	might_sleep();
426ca5de404SNick Piggin 	if (!trylock_buffer(bh))
4271da177e4SLinus Torvalds 		__lock_buffer(bh);
4281da177e4SLinus Torvalds }
4291da177e4SLinus Torvalds 
430fdee117eSZhang Yi static inline void bh_readahead(struct buffer_head *bh, blk_opf_t op_flags)
431fdee117eSZhang Yi {
432fdee117eSZhang Yi 	if (!buffer_uptodate(bh) && trylock_buffer(bh)) {
433fdee117eSZhang Yi 		if (!buffer_uptodate(bh))
434fdee117eSZhang Yi 			__bh_read(bh, op_flags, false);
435fdee117eSZhang Yi 		else
436fdee117eSZhang Yi 			unlock_buffer(bh);
437fdee117eSZhang Yi 	}
438fdee117eSZhang Yi }
439fdee117eSZhang Yi 
440fdee117eSZhang Yi static inline void bh_read_nowait(struct buffer_head *bh, blk_opf_t op_flags)
441fdee117eSZhang Yi {
442fdee117eSZhang Yi 	if (!bh_uptodate_or_lock(bh))
443fdee117eSZhang Yi 		__bh_read(bh, op_flags, false);
444fdee117eSZhang Yi }
445fdee117eSZhang Yi 
446fdee117eSZhang Yi /* Returns 1 if buffer uptodated, 0 on success, and -EIO on error. */
447fdee117eSZhang Yi static inline int bh_read(struct buffer_head *bh, blk_opf_t op_flags)
448fdee117eSZhang Yi {
449fdee117eSZhang Yi 	if (bh_uptodate_or_lock(bh))
450fdee117eSZhang Yi 		return 1;
451fdee117eSZhang Yi 	return __bh_read(bh, op_flags, true);
452fdee117eSZhang Yi }
453fdee117eSZhang Yi 
454fdee117eSZhang Yi static inline void bh_read_batch(int nr, struct buffer_head *bhs[])
455fdee117eSZhang Yi {
456fdee117eSZhang Yi 	__bh_read_batch(nr, bhs, 0, true);
457fdee117eSZhang Yi }
458fdee117eSZhang Yi 
459fdee117eSZhang Yi static inline void bh_readahead_batch(int nr, struct buffer_head *bhs[],
460fdee117eSZhang Yi 				      blk_opf_t op_flags)
461fdee117eSZhang Yi {
462fdee117eSZhang Yi 	__bh_read_batch(nr, bhs, op_flags, false);
463fdee117eSZhang Yi }
464fdee117eSZhang Yi 
4653b5e6454SGioh Kim /**
466324ecaeeSMatthew Wilcox (Oracle)  * __bread() - Read a block.
467324ecaeeSMatthew Wilcox (Oracle)  * @bdev: The block device to read from.
468324ecaeeSMatthew Wilcox (Oracle)  * @block: Block number in units of block size.
469324ecaeeSMatthew Wilcox (Oracle)  * @size: The block size of this device in bytes.
4703b5e6454SGioh Kim  *
471324ecaeeSMatthew Wilcox (Oracle)  * Read a specified block, and return the buffer head that refers
472324ecaeeSMatthew Wilcox (Oracle)  * to it.  The memory is allocated from the movable area so that it can
473324ecaeeSMatthew Wilcox (Oracle)  * be migrated.  The returned buffer head has its refcount increased.
474324ecaeeSMatthew Wilcox (Oracle)  * The caller should call brelse() when it has finished with the buffer.
475324ecaeeSMatthew Wilcox (Oracle)  *
476324ecaeeSMatthew Wilcox (Oracle)  * Context: May sleep waiting for I/O.
477324ecaeeSMatthew Wilcox (Oracle)  * Return: NULL if the block was unreadable.
4783b5e6454SGioh Kim  */
479324ecaeeSMatthew Wilcox (Oracle) static inline struct buffer_head *__bread(struct block_device *bdev,
480324ecaeeSMatthew Wilcox (Oracle) 		sector_t block, unsigned size)
4813b5e6454SGioh Kim {
4823b5e6454SGioh Kim 	return __bread_gfp(bdev, block, size, __GFP_MOVABLE);
4833b5e6454SGioh Kim }
4843b5e6454SGioh Kim 
4850217fbb0SMatthew Wilcox (Oracle) /**
4860217fbb0SMatthew Wilcox (Oracle)  * get_nth_bh - Get a reference on the n'th buffer after this one.
4870217fbb0SMatthew Wilcox (Oracle)  * @bh: The buffer to start counting from.
4880217fbb0SMatthew Wilcox (Oracle)  * @count: How many buffers to skip.
4890217fbb0SMatthew Wilcox (Oracle)  *
4900217fbb0SMatthew Wilcox (Oracle)  * This is primarily useful for finding the nth buffer in a folio; in
4910217fbb0SMatthew Wilcox (Oracle)  * that case you pass the head buffer and the byte offset in the folio
4920217fbb0SMatthew Wilcox (Oracle)  * divided by the block size.  It can be used for other purposes, but
4930217fbb0SMatthew Wilcox (Oracle)  * it will wrap at the end of the folio rather than returning NULL or
4940217fbb0SMatthew Wilcox (Oracle)  * proceeding to the next folio for you.
4950217fbb0SMatthew Wilcox (Oracle)  *
4960217fbb0SMatthew Wilcox (Oracle)  * Return: The requested buffer with an elevated refcount.
4970217fbb0SMatthew Wilcox (Oracle)  */
4980217fbb0SMatthew Wilcox (Oracle) static inline __must_check
4990217fbb0SMatthew Wilcox (Oracle) struct buffer_head *get_nth_bh(struct buffer_head *bh, unsigned int count)
5000217fbb0SMatthew Wilcox (Oracle) {
5010217fbb0SMatthew Wilcox (Oracle) 	while (count--)
5020217fbb0SMatthew Wilcox (Oracle) 		bh = bh->b_this_page;
5030217fbb0SMatthew Wilcox (Oracle) 	get_bh(bh);
5040217fbb0SMatthew Wilcox (Oracle) 	return bh;
5050217fbb0SMatthew Wilcox (Oracle) }
5060217fbb0SMatthew Wilcox (Oracle) 
507e621900aSMatthew Wilcox (Oracle) bool block_dirty_folio(struct address_space *mapping, struct folio *folio);
5089361401eSDavid Howells 
509925c86a1SChristoph Hellwig #ifdef CONFIG_BUFFER_HEAD
510925c86a1SChristoph Hellwig 
511925c86a1SChristoph Hellwig void buffer_init(void);
512925c86a1SChristoph Hellwig bool try_to_free_buffers(struct folio *folio);
513925c86a1SChristoph Hellwig int inode_has_buffers(struct inode *inode);
514925c86a1SChristoph Hellwig void invalidate_inode_buffers(struct inode *inode);
515925c86a1SChristoph Hellwig int remove_inode_buffers(struct inode *inode);
516925c86a1SChristoph Hellwig int sync_mapping_buffers(struct address_space *mapping);
517925c86a1SChristoph Hellwig void invalidate_bh_lrus(void);
518925c86a1SChristoph Hellwig void invalidate_bh_lrus_cpu(void);
519925c86a1SChristoph Hellwig bool has_bh_in_lru(int cpu, void *dummy);
520925c86a1SChristoph Hellwig extern int buffer_heads_over_limit;
521925c86a1SChristoph Hellwig 
522925c86a1SChristoph Hellwig #else /* CONFIG_BUFFER_HEAD */
5239361401eSDavid Howells 
5249361401eSDavid Howells static inline void buffer_init(void) {}
52568189fefSMatthew Wilcox (Oracle) static inline bool try_to_free_buffers(struct folio *folio) { return true; }
5269361401eSDavid Howells static inline int inode_has_buffers(struct inode *inode) { return 0; }
5279361401eSDavid Howells static inline void invalidate_inode_buffers(struct inode *inode) {}
5289361401eSDavid Howells static inline int remove_inode_buffers(struct inode *inode) { return 1; }
5299361401eSDavid Howells static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
530925c86a1SChristoph Hellwig static inline void invalidate_bh_lrus(void) {}
531243418e3SMinchan Kim static inline void invalidate_bh_lrus_cpu(void) {}
5326de522d1SJing Yangyang static inline bool has_bh_in_lru(int cpu, void *dummy) { return false; }
533d2de7ea4SChristoph Hellwig #define buffer_heads_over_limit 0
5349361401eSDavid Howells 
535925c86a1SChristoph Hellwig #endif /* CONFIG_BUFFER_HEAD */
5361da177e4SLinus Torvalds #endif /* _LINUX_BUFFER_HEAD_H */
537