xref: /linux-6.15/include/linux/buffer_head.h (revision 93f14468)
1 /*
2  * include/linux/buffer_head.h
3  *
4  * Everything to do with buffer_heads.
5  */
6 
7 #ifndef _LINUX_BUFFER_HEAD_H
8 #define _LINUX_BUFFER_HEAD_H
9 
10 #include <linux/types.h>
11 #include <linux/fs.h>
12 #include <linux/linkage.h>
13 #include <linux/pagemap.h>
14 #include <linux/wait.h>
15 #include <linux/atomic.h>
16 
17 #ifdef CONFIG_BLOCK
18 
19 enum bh_state_bits {
20 	BH_Uptodate,	/* Contains valid data */
21 	BH_Dirty,	/* Is dirty */
22 	BH_Lock,	/* Is locked */
23 	BH_Req,		/* Has been submitted for I/O */
24 	BH_Uptodate_Lock,/* Used by the first bh in a page, to serialise
25 			  * IO completion of other buffers in the page
26 			  */
27 
28 	BH_Mapped,	/* Has a disk mapping */
29 	BH_New,		/* Disk mapping was newly created by get_block */
30 	BH_Async_Read,	/* Is under end_buffer_async_read I/O */
31 	BH_Async_Write,	/* Is under end_buffer_async_write I/O */
32 	BH_Delay,	/* Buffer is not yet allocated on disk */
33 	BH_Boundary,	/* Block is followed by a discontiguity */
34 	BH_Write_EIO,	/* I/O error on write */
35 	BH_Unwritten,	/* Buffer is allocated on disk but not written */
36 	BH_Quiet,	/* Buffer Error Prinks to be quiet */
37 	BH_Meta,	/* Buffer contains metadata */
38 	BH_Prio,	/* Buffer should be submitted with REQ_PRIO */
39 
40 	BH_PrivateStart,/* not a state bit, but the first bit available
41 			 * for private allocation by other entities
42 			 */
43 };
44 
45 #define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512)
46 
47 struct page;
48 struct buffer_head;
49 struct address_space;
50 typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
51 
52 /*
53  * Historically, a buffer_head was used to map a single block
54  * within a page, and of course as the unit of I/O through the
55  * filesystem and block layers.  Nowadays the basic I/O unit
56  * is the bio, and buffer_heads are used for extracting block
57  * mappings (via a get_block_t call), for tracking state within
58  * a page (via a page_mapping) and for wrapping bio submission
59  * for backward compatibility reasons (e.g. submit_bh).
60  */
61 struct buffer_head {
62 	unsigned long b_state;		/* buffer state bitmap (see above) */
63 	struct buffer_head *b_this_page;/* circular list of page's buffers */
64 	struct page *b_page;		/* the page this bh is mapped to */
65 
66 	sector_t b_blocknr;		/* start block number */
67 	size_t b_size;			/* size of mapping */
68 	char *b_data;			/* pointer to data within the page */
69 
70 	struct block_device *b_bdev;
71 	bh_end_io_t *b_end_io;		/* I/O completion */
72  	void *b_private;		/* reserved for b_end_io */
73 	struct list_head b_assoc_buffers; /* associated with another mapping */
74 	struct address_space *b_assoc_map;	/* mapping this buffer is
75 						   associated with */
76 	atomic_t b_count;		/* users using this buffer_head */
77 };
78 
79 /*
80  * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
81  * and buffer_foo() functions.
82  */
83 #define BUFFER_FNS(bit, name)						\
84 static inline void set_buffer_##name(struct buffer_head *bh)		\
85 {									\
86 	set_bit(BH_##bit, &(bh)->b_state);				\
87 }									\
88 static inline void clear_buffer_##name(struct buffer_head *bh)		\
89 {									\
90 	clear_bit(BH_##bit, &(bh)->b_state);				\
91 }									\
92 static inline int buffer_##name(const struct buffer_head *bh)		\
93 {									\
94 	return test_bit(BH_##bit, &(bh)->b_state);			\
95 }
96 
97 /*
98  * test_set_buffer_foo() and test_clear_buffer_foo()
99  */
100 #define TAS_BUFFER_FNS(bit, name)					\
101 static inline int test_set_buffer_##name(struct buffer_head *bh)	\
102 {									\
103 	return test_and_set_bit(BH_##bit, &(bh)->b_state);		\
104 }									\
105 static inline int test_clear_buffer_##name(struct buffer_head *bh)	\
106 {									\
107 	return test_and_clear_bit(BH_##bit, &(bh)->b_state);		\
108 }									\
109 
110 /*
111  * Emit the buffer bitops functions.   Note that there are also functions
112  * of the form "mark_buffer_foo()".  These are higher-level functions which
113  * do something in addition to setting a b_state bit.
114  */
115 BUFFER_FNS(Uptodate, uptodate)
116 BUFFER_FNS(Dirty, dirty)
117 TAS_BUFFER_FNS(Dirty, dirty)
118 BUFFER_FNS(Lock, locked)
119 BUFFER_FNS(Req, req)
120 TAS_BUFFER_FNS(Req, req)
121 BUFFER_FNS(Mapped, mapped)
122 BUFFER_FNS(New, new)
123 BUFFER_FNS(Async_Read, async_read)
124 BUFFER_FNS(Async_Write, async_write)
125 BUFFER_FNS(Delay, delay)
126 BUFFER_FNS(Boundary, boundary)
127 BUFFER_FNS(Write_EIO, write_io_error)
128 BUFFER_FNS(Unwritten, unwritten)
129 BUFFER_FNS(Meta, meta)
130 BUFFER_FNS(Prio, prio)
131 
132 #define bh_offset(bh)		((unsigned long)(bh)->b_data & ~PAGE_MASK)
133 
134 /* If we *know* page->private refers to buffer_heads */
135 #define page_buffers(page)					\
136 	({							\
137 		BUG_ON(!PagePrivate(page));			\
138 		((struct buffer_head *)page_private(page));	\
139 	})
140 #define page_has_buffers(page)	PagePrivate(page)
141 
142 /*
143  * Declarations
144  */
145 
146 void mark_buffer_dirty(struct buffer_head *bh);
147 void init_buffer(struct buffer_head *, bh_end_io_t *, void *);
148 void touch_buffer(struct buffer_head *bh);
149 void set_bh_page(struct buffer_head *bh,
150 		struct page *page, unsigned long offset);
151 int try_to_free_buffers(struct page *);
152 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
153 		int retry);
154 void create_empty_buffers(struct page *, unsigned long,
155 			unsigned long b_state);
156 void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
157 void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
158 void end_buffer_async_write(struct buffer_head *bh, int uptodate);
159 
160 /* Things to do with buffers at mapping->private_list */
161 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
162 int inode_has_buffers(struct inode *);
163 void invalidate_inode_buffers(struct inode *);
164 int remove_inode_buffers(struct inode *inode);
165 int sync_mapping_buffers(struct address_space *mapping);
166 void unmap_underlying_metadata(struct block_device *bdev, sector_t block);
167 
168 void mark_buffer_async_write(struct buffer_head *bh);
169 void __wait_on_buffer(struct buffer_head *);
170 wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
171 struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
172 			unsigned size);
173 struct buffer_head *__getblk(struct block_device *bdev, sector_t block,
174 			unsigned size);
175 void __brelse(struct buffer_head *);
176 void __bforget(struct buffer_head *);
177 void __breadahead(struct block_device *, sector_t block, unsigned int size);
178 struct buffer_head *__bread(struct block_device *, sector_t block, unsigned size);
179 void invalidate_bh_lrus(void);
180 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
181 void free_buffer_head(struct buffer_head * bh);
182 void unlock_buffer(struct buffer_head *bh);
183 void __lock_buffer(struct buffer_head *bh);
184 void ll_rw_block(int, int, struct buffer_head * bh[]);
185 int sync_dirty_buffer(struct buffer_head *bh);
186 int __sync_dirty_buffer(struct buffer_head *bh, int rw);
187 void write_dirty_buffer(struct buffer_head *bh, int rw);
188 int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags);
189 int submit_bh(int, struct buffer_head *);
190 void write_boundary_block(struct block_device *bdev,
191 			sector_t bblock, unsigned blocksize);
192 int bh_uptodate_or_lock(struct buffer_head *bh);
193 int bh_submit_read(struct buffer_head *bh);
194 
195 extern int buffer_heads_over_limit;
196 
197 /*
198  * Generic address_space_operations implementations for buffer_head-backed
199  * address_spaces.
200  */
201 void block_invalidatepage(struct page *page, unsigned long offset);
202 int block_write_full_page(struct page *page, get_block_t *get_block,
203 				struct writeback_control *wbc);
204 int block_write_full_page_endio(struct page *page, get_block_t *get_block,
205 			struct writeback_control *wbc, bh_end_io_t *handler);
206 int block_read_full_page(struct page*, get_block_t*);
207 int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
208 				unsigned long from);
209 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
210 		unsigned flags, struct page **pagep, get_block_t *get_block);
211 int __block_write_begin(struct page *page, loff_t pos, unsigned len,
212 		get_block_t *get_block);
213 int block_write_end(struct file *, struct address_space *,
214 				loff_t, unsigned, unsigned,
215 				struct page *, void *);
216 int generic_write_end(struct file *, struct address_space *,
217 				loff_t, unsigned, unsigned,
218 				struct page *, void *);
219 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
220 int cont_write_begin(struct file *, struct address_space *, loff_t,
221 			unsigned, unsigned, struct page **, void **,
222 			get_block_t *, loff_t *);
223 int generic_cont_expand_simple(struct inode *inode, loff_t size);
224 int block_commit_write(struct page *page, unsigned from, unsigned to);
225 int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
226 				get_block_t get_block);
227 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
228 				get_block_t get_block);
229 /* Convert errno to return value from ->page_mkwrite() call */
230 static inline int block_page_mkwrite_return(int err)
231 {
232 	if (err == 0)
233 		return VM_FAULT_LOCKED;
234 	if (err == -EFAULT)
235 		return VM_FAULT_NOPAGE;
236 	if (err == -ENOMEM)
237 		return VM_FAULT_OOM;
238 	if (err == -EAGAIN)
239 		return VM_FAULT_RETRY;
240 	/* -ENOSPC, -EDQUOT, -EIO ... */
241 	return VM_FAULT_SIGBUS;
242 }
243 sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
244 int block_truncate_page(struct address_space *, loff_t, get_block_t *);
245 int nobh_write_begin(struct address_space *, loff_t, unsigned, unsigned,
246 				struct page **, void **, get_block_t*);
247 int nobh_write_end(struct file *, struct address_space *,
248 				loff_t, unsigned, unsigned,
249 				struct page *, void *);
250 int nobh_truncate_page(struct address_space *, loff_t, get_block_t *);
251 int nobh_writepage(struct page *page, get_block_t *get_block,
252                         struct writeback_control *wbc);
253 
254 void buffer_init(void);
255 
256 /*
257  * inline definitions
258  */
259 
260 static inline void attach_page_buffers(struct page *page,
261 		struct buffer_head *head)
262 {
263 	page_cache_get(page);
264 	SetPagePrivate(page);
265 	set_page_private(page, (unsigned long)head);
266 }
267 
268 static inline void get_bh(struct buffer_head *bh)
269 {
270         atomic_inc(&bh->b_count);
271 }
272 
273 static inline void put_bh(struct buffer_head *bh)
274 {
275         smp_mb__before_atomic_dec();
276         atomic_dec(&bh->b_count);
277 }
278 
279 static inline void brelse(struct buffer_head *bh)
280 {
281 	if (bh)
282 		__brelse(bh);
283 }
284 
285 static inline void bforget(struct buffer_head *bh)
286 {
287 	if (bh)
288 		__bforget(bh);
289 }
290 
291 static inline struct buffer_head *
292 sb_bread(struct super_block *sb, sector_t block)
293 {
294 	return __bread(sb->s_bdev, block, sb->s_blocksize);
295 }
296 
297 static inline void
298 sb_breadahead(struct super_block *sb, sector_t block)
299 {
300 	__breadahead(sb->s_bdev, block, sb->s_blocksize);
301 }
302 
303 static inline struct buffer_head *
304 sb_getblk(struct super_block *sb, sector_t block)
305 {
306 	return __getblk(sb->s_bdev, block, sb->s_blocksize);
307 }
308 
309 static inline struct buffer_head *
310 sb_find_get_block(struct super_block *sb, sector_t block)
311 {
312 	return __find_get_block(sb->s_bdev, block, sb->s_blocksize);
313 }
314 
315 static inline void
316 map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
317 {
318 	set_buffer_mapped(bh);
319 	bh->b_bdev = sb->s_bdev;
320 	bh->b_blocknr = block;
321 	bh->b_size = sb->s_blocksize;
322 }
323 
324 static inline void wait_on_buffer(struct buffer_head *bh)
325 {
326 	might_sleep();
327 	if (buffer_locked(bh))
328 		__wait_on_buffer(bh);
329 }
330 
331 static inline int trylock_buffer(struct buffer_head *bh)
332 {
333 	return likely(!test_and_set_bit_lock(BH_Lock, &bh->b_state));
334 }
335 
336 static inline void lock_buffer(struct buffer_head *bh)
337 {
338 	might_sleep();
339 	if (!trylock_buffer(bh))
340 		__lock_buffer(bh);
341 }
342 
343 extern int __set_page_dirty_buffers(struct page *page);
344 
345 #else /* CONFIG_BLOCK */
346 
347 static inline void buffer_init(void) {}
348 static inline int try_to_free_buffers(struct page *page) { return 1; }
349 static inline int inode_has_buffers(struct inode *inode) { return 0; }
350 static inline void invalidate_inode_buffers(struct inode *inode) {}
351 static inline int remove_inode_buffers(struct inode *inode) { return 1; }
352 static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
353 
354 #endif /* CONFIG_BLOCK */
355 #endif /* _LINUX_BUFFER_HEAD_H */
356