xref: /linux-6.15/include/linux/buffer_head.h (revision baf2c002)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * include/linux/buffer_head.h
4  *
5  * Everything to do with buffer_heads.
6  */
7 
8 #ifndef _LINUX_BUFFER_HEAD_H
9 #define _LINUX_BUFFER_HEAD_H
10 
11 #include <linux/types.h>
12 #include <linux/blk_types.h>
13 #include <linux/fs.h>
14 #include <linux/linkage.h>
15 #include <linux/pagemap.h>
16 #include <linux/wait.h>
17 #include <linux/atomic.h>
18 
19 #ifdef CONFIG_BLOCK
20 
21 enum bh_state_bits {
22 	BH_Uptodate,	/* Contains valid data */
23 	BH_Dirty,	/* Is dirty */
24 	BH_Lock,	/* Is locked */
25 	BH_Req,		/* Has been submitted for I/O */
26 
27 	BH_Mapped,	/* Has a disk mapping */
28 	BH_New,		/* Disk mapping was newly created by get_block */
29 	BH_Async_Read,	/* Is under end_buffer_async_read I/O */
30 	BH_Async_Write,	/* Is under end_buffer_async_write I/O */
31 	BH_Delay,	/* Buffer is not yet allocated on disk */
32 	BH_Boundary,	/* Block is followed by a discontiguity */
33 	BH_Write_EIO,	/* I/O error on write */
34 	BH_Unwritten,	/* Buffer is allocated on disk but not written */
35 	BH_Quiet,	/* Buffer Error Prinks to be quiet */
36 	BH_Meta,	/* Buffer contains metadata */
37 	BH_Prio,	/* Buffer should be submitted with REQ_PRIO */
38 	BH_Defer_Completion, /* Defer AIO completion to workqueue */
39 
40 	BH_PrivateStart,/* not a state bit, but the first bit available
41 			 * for private allocation by other entities
42 			 */
43 };
44 
45 #define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
46 
47 struct page;
48 struct buffer_head;
49 struct address_space;
50 typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
51 
52 /*
53  * Historically, a buffer_head was used to map a single block
54  * within a page, and of course as the unit of I/O through the
55  * filesystem and block layers.  Nowadays the basic I/O unit
56  * is the bio, and buffer_heads are used for extracting block
57  * mappings (via a get_block_t call), for tracking state within
58  * a page (via a page_mapping) and for wrapping bio submission
59  * for backward compatibility reasons (e.g. submit_bh).
60  */
61 struct buffer_head {
62 	unsigned long b_state;		/* buffer state bitmap (see above) */
63 	struct buffer_head *b_this_page;/* circular list of page's buffers */
64 	struct page *b_page;		/* the page this bh is mapped to */
65 
66 	sector_t b_blocknr;		/* start block number */
67 	size_t b_size;			/* size of mapping */
68 	char *b_data;			/* pointer to data within the page */
69 
70 	struct block_device *b_bdev;
71 	bh_end_io_t *b_end_io;		/* I/O completion */
72  	void *b_private;		/* reserved for b_end_io */
73 	struct list_head b_assoc_buffers; /* associated with another mapping */
74 	struct address_space *b_assoc_map;	/* mapping this buffer is
75 						   associated with */
76 	atomic_t b_count;		/* users using this buffer_head */
77 	spinlock_t b_uptodate_lock;	/* Used by the first bh in a page, to
78 					 * serialise IO completion of other
79 					 * buffers in the page */
80 };
81 
82 /*
83  * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
84  * and buffer_foo() functions.
85  * To avoid reset buffer flags that are already set, because that causes
86  * a costly cache line transition, check the flag first.
87  */
88 #define BUFFER_FNS(bit, name)						\
89 static __always_inline void set_buffer_##name(struct buffer_head *bh)	\
90 {									\
91 	if (!test_bit(BH_##bit, &(bh)->b_state))			\
92 		set_bit(BH_##bit, &(bh)->b_state);			\
93 }									\
94 static __always_inline void clear_buffer_##name(struct buffer_head *bh)	\
95 {									\
96 	clear_bit(BH_##bit, &(bh)->b_state);				\
97 }									\
98 static __always_inline int buffer_##name(const struct buffer_head *bh)	\
99 {									\
100 	return test_bit(BH_##bit, &(bh)->b_state);			\
101 }
102 
103 /*
104  * test_set_buffer_foo() and test_clear_buffer_foo()
105  */
106 #define TAS_BUFFER_FNS(bit, name)					\
107 static __always_inline int test_set_buffer_##name(struct buffer_head *bh) \
108 {									\
109 	return test_and_set_bit(BH_##bit, &(bh)->b_state);		\
110 }									\
111 static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \
112 {									\
113 	return test_and_clear_bit(BH_##bit, &(bh)->b_state);		\
114 }									\
115 
116 /*
117  * Emit the buffer bitops functions.   Note that there are also functions
118  * of the form "mark_buffer_foo()".  These are higher-level functions which
119  * do something in addition to setting a b_state bit.
120  */
121 BUFFER_FNS(Dirty, dirty)
122 TAS_BUFFER_FNS(Dirty, dirty)
123 BUFFER_FNS(Lock, locked)
124 BUFFER_FNS(Req, req)
125 TAS_BUFFER_FNS(Req, req)
126 BUFFER_FNS(Mapped, mapped)
127 BUFFER_FNS(New, new)
128 BUFFER_FNS(Async_Read, async_read)
129 BUFFER_FNS(Async_Write, async_write)
130 BUFFER_FNS(Delay, delay)
131 BUFFER_FNS(Boundary, boundary)
132 BUFFER_FNS(Write_EIO, write_io_error)
133 BUFFER_FNS(Unwritten, unwritten)
134 BUFFER_FNS(Meta, meta)
135 BUFFER_FNS(Prio, prio)
136 BUFFER_FNS(Defer_Completion, defer_completion)
137 
138 static __always_inline void set_buffer_uptodate(struct buffer_head *bh)
139 {
140 	/*
141 	 * make it consistent with folio_mark_uptodate
142 	 * pairs with smp_load_acquire in buffer_uptodate
143 	 */
144 	smp_mb__before_atomic();
145 	set_bit(BH_Uptodate, &bh->b_state);
146 }
147 
148 static __always_inline void clear_buffer_uptodate(struct buffer_head *bh)
149 {
150 	clear_bit(BH_Uptodate, &bh->b_state);
151 }
152 
153 static __always_inline int buffer_uptodate(const struct buffer_head *bh)
154 {
155 	/*
156 	 * make it consistent with folio_test_uptodate
157 	 * pairs with smp_mb__before_atomic in set_buffer_uptodate
158 	 */
159 	return test_bit_acquire(BH_Uptodate, &bh->b_state);
160 }
161 
162 #define bh_offset(bh)		((unsigned long)(bh)->b_data & ~PAGE_MASK)
163 
164 /* If we *know* page->private refers to buffer_heads */
165 #define page_buffers(page)					\
166 	({							\
167 		BUG_ON(!PagePrivate(page));			\
168 		((struct buffer_head *)page_private(page));	\
169 	})
170 #define page_has_buffers(page)	PagePrivate(page)
171 #define folio_buffers(folio)		folio_get_private(folio)
172 
173 void buffer_check_dirty_writeback(struct folio *folio,
174 				     bool *dirty, bool *writeback);
175 
176 /*
177  * Declarations
178  */
179 
180 void mark_buffer_dirty(struct buffer_head *bh);
181 void mark_buffer_write_io_error(struct buffer_head *bh);
182 void touch_buffer(struct buffer_head *bh);
183 void set_bh_page(struct buffer_head *bh,
184 		struct page *page, unsigned long offset);
185 bool try_to_free_buffers(struct folio *);
186 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
187 		bool retry);
188 void create_empty_buffers(struct page *, unsigned long,
189 			unsigned long b_state);
190 void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
191 void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
192 void end_buffer_async_write(struct buffer_head *bh, int uptodate);
193 
194 /* Things to do with buffers at mapping->private_list */
195 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
196 int inode_has_buffers(struct inode *);
197 void invalidate_inode_buffers(struct inode *);
198 int remove_inode_buffers(struct inode *inode);
199 int sync_mapping_buffers(struct address_space *mapping);
200 void clean_bdev_aliases(struct block_device *bdev, sector_t block,
201 			sector_t len);
202 static inline void clean_bdev_bh_alias(struct buffer_head *bh)
203 {
204 	clean_bdev_aliases(bh->b_bdev, bh->b_blocknr, 1);
205 }
206 
207 void mark_buffer_async_write(struct buffer_head *bh);
208 void __wait_on_buffer(struct buffer_head *);
209 wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
210 struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
211 			unsigned size);
212 struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block,
213 				  unsigned size, gfp_t gfp);
214 void __brelse(struct buffer_head *);
215 void __bforget(struct buffer_head *);
216 void __breadahead(struct block_device *, sector_t block, unsigned int size);
217 void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size,
218 		  gfp_t gfp);
219 struct buffer_head *__bread_gfp(struct block_device *,
220 				sector_t block, unsigned size, gfp_t gfp);
221 void invalidate_bh_lrus(void);
222 void invalidate_bh_lrus_cpu(void);
223 bool has_bh_in_lru(int cpu, void *dummy);
224 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
225 void free_buffer_head(struct buffer_head * bh);
226 void unlock_buffer(struct buffer_head *bh);
227 void __lock_buffer(struct buffer_head *bh);
228 void ll_rw_block(blk_opf_t, int, struct buffer_head * bh[]);
229 int sync_dirty_buffer(struct buffer_head *bh);
230 int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
231 void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
232 int submit_bh(blk_opf_t, struct buffer_head *);
233 void write_boundary_block(struct block_device *bdev,
234 			sector_t bblock, unsigned blocksize);
235 int bh_uptodate_or_lock(struct buffer_head *bh);
236 int bh_submit_read(struct buffer_head *bh);
237 
238 extern int buffer_heads_over_limit;
239 
240 /*
241  * Generic address_space_operations implementations for buffer_head-backed
242  * address_spaces.
243  */
244 void block_invalidate_folio(struct folio *folio, size_t offset, size_t length);
245 int block_write_full_page(struct page *page, get_block_t *get_block,
246 				struct writeback_control *wbc);
247 int __block_write_full_page(struct inode *inode, struct page *page,
248 			get_block_t *get_block, struct writeback_control *wbc,
249 			bh_end_io_t *handler);
250 int block_read_full_folio(struct folio *, get_block_t *);
251 bool block_is_partially_uptodate(struct folio *, size_t from, size_t count);
252 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
253 		struct page **pagep, get_block_t *get_block);
254 int __block_write_begin(struct page *page, loff_t pos, unsigned len,
255 		get_block_t *get_block);
256 int block_write_end(struct file *, struct address_space *,
257 				loff_t, unsigned, unsigned,
258 				struct page *, void *);
259 int generic_write_end(struct file *, struct address_space *,
260 				loff_t, unsigned, unsigned,
261 				struct page *, void *);
262 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
263 void clean_page_buffers(struct page *page);
264 int cont_write_begin(struct file *, struct address_space *, loff_t,
265 			unsigned, struct page **, void **,
266 			get_block_t *, loff_t *);
267 int generic_cont_expand_simple(struct inode *inode, loff_t size);
268 int block_commit_write(struct page *page, unsigned from, unsigned to);
269 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
270 				get_block_t get_block);
271 /* Convert errno to return value from ->page_mkwrite() call */
272 static inline vm_fault_t block_page_mkwrite_return(int err)
273 {
274 	if (err == 0)
275 		return VM_FAULT_LOCKED;
276 	if (err == -EFAULT || err == -EAGAIN)
277 		return VM_FAULT_NOPAGE;
278 	if (err == -ENOMEM)
279 		return VM_FAULT_OOM;
280 	/* -ENOSPC, -EDQUOT, -EIO ... */
281 	return VM_FAULT_SIGBUS;
282 }
283 sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
284 int block_truncate_page(struct address_space *, loff_t, get_block_t *);
285 
286 #ifdef CONFIG_MIGRATION
287 extern int buffer_migrate_folio(struct address_space *,
288 		struct folio *dst, struct folio *src, enum migrate_mode);
289 extern int buffer_migrate_folio_norefs(struct address_space *,
290 		struct folio *dst, struct folio *src, enum migrate_mode);
291 #else
292 #define buffer_migrate_folio NULL
293 #define buffer_migrate_folio_norefs NULL
294 #endif
295 
296 void buffer_init(void);
297 
298 /*
299  * inline definitions
300  */
301 
302 static inline void get_bh(struct buffer_head *bh)
303 {
304         atomic_inc(&bh->b_count);
305 }
306 
307 static inline void put_bh(struct buffer_head *bh)
308 {
309         smp_mb__before_atomic();
310         atomic_dec(&bh->b_count);
311 }
312 
313 static inline void brelse(struct buffer_head *bh)
314 {
315 	if (bh)
316 		__brelse(bh);
317 }
318 
319 static inline void bforget(struct buffer_head *bh)
320 {
321 	if (bh)
322 		__bforget(bh);
323 }
324 
325 static inline struct buffer_head *
326 sb_bread(struct super_block *sb, sector_t block)
327 {
328 	return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
329 }
330 
331 static inline struct buffer_head *
332 sb_bread_unmovable(struct super_block *sb, sector_t block)
333 {
334 	return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
335 }
336 
337 static inline void
338 sb_breadahead(struct super_block *sb, sector_t block)
339 {
340 	__breadahead(sb->s_bdev, block, sb->s_blocksize);
341 }
342 
343 static inline void
344 sb_breadahead_unmovable(struct super_block *sb, sector_t block)
345 {
346 	__breadahead_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
347 }
348 
349 static inline struct buffer_head *
350 sb_getblk(struct super_block *sb, sector_t block)
351 {
352 	return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
353 }
354 
355 
356 static inline struct buffer_head *
357 sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp)
358 {
359 	return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp);
360 }
361 
362 static inline struct buffer_head *
363 sb_find_get_block(struct super_block *sb, sector_t block)
364 {
365 	return __find_get_block(sb->s_bdev, block, sb->s_blocksize);
366 }
367 
368 static inline void
369 map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
370 {
371 	set_buffer_mapped(bh);
372 	bh->b_bdev = sb->s_bdev;
373 	bh->b_blocknr = block;
374 	bh->b_size = sb->s_blocksize;
375 }
376 
377 static inline void wait_on_buffer(struct buffer_head *bh)
378 {
379 	might_sleep();
380 	if (buffer_locked(bh))
381 		__wait_on_buffer(bh);
382 }
383 
384 static inline int trylock_buffer(struct buffer_head *bh)
385 {
386 	return likely(!test_and_set_bit_lock(BH_Lock, &bh->b_state));
387 }
388 
389 static inline void lock_buffer(struct buffer_head *bh)
390 {
391 	might_sleep();
392 	if (!trylock_buffer(bh))
393 		__lock_buffer(bh);
394 }
395 
396 static inline struct buffer_head *getblk_unmovable(struct block_device *bdev,
397 						   sector_t block,
398 						   unsigned size)
399 {
400 	return __getblk_gfp(bdev, block, size, 0);
401 }
402 
403 static inline struct buffer_head *__getblk(struct block_device *bdev,
404 					   sector_t block,
405 					   unsigned size)
406 {
407 	return __getblk_gfp(bdev, block, size, __GFP_MOVABLE);
408 }
409 
410 /**
411  *  __bread() - reads a specified block and returns the bh
412  *  @bdev: the block_device to read from
413  *  @block: number of block
414  *  @size: size (in bytes) to read
415  *
416  *  Reads a specified block, and returns buffer head that contains it.
417  *  The page cache is allocated from movable area so that it can be migrated.
418  *  It returns NULL if the block was unreadable.
419  */
420 static inline struct buffer_head *
421 __bread(struct block_device *bdev, sector_t block, unsigned size)
422 {
423 	return __bread_gfp(bdev, block, size, __GFP_MOVABLE);
424 }
425 
426 bool block_dirty_folio(struct address_space *mapping, struct folio *folio);
427 
428 #else /* CONFIG_BLOCK */
429 
430 static inline void buffer_init(void) {}
431 static inline bool try_to_free_buffers(struct folio *folio) { return true; }
432 static inline int inode_has_buffers(struct inode *inode) { return 0; }
433 static inline void invalidate_inode_buffers(struct inode *inode) {}
434 static inline int remove_inode_buffers(struct inode *inode) { return 1; }
435 static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
436 static inline void invalidate_bh_lrus_cpu(void) {}
437 static inline bool has_bh_in_lru(int cpu, void *dummy) { return false; }
438 #define buffer_heads_over_limit 0
439 
440 #endif /* CONFIG_BLOCK */
441 #endif /* _LINUX_BUFFER_HEAD_H */
442