xref: /linux-6.15/include/linux/bio.h (revision 17c324fa)
1 /*
2  * 2.5 block I/O model
3  *
4  * Copyright (C) 2001 Jens Axboe <[email protected]>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public Licens
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
19  */
20 #ifndef __LINUX_BIO_H
21 #define __LINUX_BIO_H
22 
23 #include <linux/highmem.h>
24 #include <linux/mempool.h>
25 #include <linux/ioprio.h>
26 
27 #ifdef CONFIG_BLOCK
28 
29 #include <asm/io.h>
30 
31 #define BIO_DEBUG
32 
33 #ifdef BIO_DEBUG
34 #define BIO_BUG_ON	BUG_ON
35 #else
36 #define BIO_BUG_ON
37 #endif
38 
39 #define BIO_MAX_PAGES		256
40 #define BIO_MAX_SIZE		(BIO_MAX_PAGES << PAGE_CACHE_SHIFT)
41 #define BIO_MAX_SECTORS		(BIO_MAX_SIZE >> 9)
42 
43 /*
44  * was unsigned short, but we might as well be ready for > 64kB I/O pages
45  */
46 struct bio_vec {
47 	struct page	*bv_page;
48 	unsigned int	bv_len;
49 	unsigned int	bv_offset;
50 };
51 
52 struct bio_set;
53 struct bio;
54 struct bio_integrity_payload;
55 typedef void (bio_end_io_t) (struct bio *, int);
56 typedef void (bio_destructor_t) (struct bio *);
57 
58 /*
59  * main unit of I/O for the block layer and lower layers (ie drivers and
60  * stacking drivers)
61  */
62 struct bio {
63 	sector_t		bi_sector;	/* device address in 512 byte
64 						   sectors */
65 	struct bio		*bi_next;	/* request queue link */
66 	struct block_device	*bi_bdev;
67 	unsigned long		bi_flags;	/* status, command, etc */
68 	unsigned long		bi_rw;		/* bottom bits READ/WRITE,
69 						 * top bits priority
70 						 */
71 
72 	unsigned short		bi_vcnt;	/* how many bio_vec's */
73 	unsigned short		bi_idx;		/* current index into bvl_vec */
74 
75 	/* Number of segments in this BIO after
76 	 * physical address coalescing is performed.
77 	 */
78 	unsigned int		bi_phys_segments;
79 
80 	unsigned int		bi_size;	/* residual I/O count */
81 
82 	/*
83 	 * To keep track of the max segment size, we account for the
84 	 * sizes of the first and last mergeable segments in this bio.
85 	 */
86 	unsigned int		bi_seg_front_size;
87 	unsigned int		bi_seg_back_size;
88 
89 	unsigned int		bi_max_vecs;	/* max bvl_vecs we can hold */
90 
91 	unsigned int		bi_comp_cpu;	/* completion CPU */
92 
93 	struct bio_vec		*bi_io_vec;	/* the actual vec list */
94 
95 	bio_end_io_t		*bi_end_io;
96 	atomic_t		bi_cnt;		/* pin count */
97 
98 	void			*bi_private;
99 #if defined(CONFIG_BLK_DEV_INTEGRITY)
100 	struct bio_integrity_payload *bi_integrity;  /* data integrity */
101 #endif
102 
103 	bio_destructor_t	*bi_destructor;	/* destructor */
104 };
105 
106 /*
107  * bio flags
108  */
109 #define BIO_UPTODATE	0	/* ok after I/O completion */
110 #define BIO_RW_BLOCK	1	/* RW_AHEAD set, and read/write would block */
111 #define BIO_EOF		2	/* out-out-bounds error */
112 #define BIO_SEG_VALID	3	/* bi_phys_segments valid */
113 #define BIO_CLONED	4	/* doesn't own data */
114 #define BIO_BOUNCED	5	/* bio is a bounce bio */
115 #define BIO_USER_MAPPED 6	/* contains user pages */
116 #define BIO_EOPNOTSUPP	7	/* not supported */
117 #define BIO_CPU_AFFINE	8	/* complete bio on same CPU as submitted */
118 #define BIO_NULL_MAPPED 9	/* contains invalid user pages */
119 #define BIO_FS_INTEGRITY 10	/* fs owns integrity data, not block layer */
120 #define bio_flagged(bio, flag)	((bio)->bi_flags & (1 << (flag)))
121 
122 /*
123  * top 4 bits of bio flags indicate the pool this bio came from
124  */
125 #define BIO_POOL_BITS		(4)
126 #define BIO_POOL_OFFSET		(BITS_PER_LONG - BIO_POOL_BITS)
127 #define BIO_POOL_MASK		(1UL << BIO_POOL_OFFSET)
128 #define BIO_POOL_IDX(bio)	((bio)->bi_flags >> BIO_POOL_OFFSET)
129 
130 /*
131  * bio bi_rw flags
132  *
133  * bit 0 -- data direction
134  *	If not set, bio is a read from device. If set, it's a write to device.
135  * bit 1 -- rw-ahead when set
136  * bit 2 -- barrier
137  *	Insert a serialization point in the IO queue, forcing previously
138  *	submitted IO to be completed before this oen is issued.
139  * bit 3 -- synchronous I/O hint: the block layer will unplug immediately
140  *	Note that this does NOT indicate that the IO itself is sync, just
141  *	that the block layer will not postpone issue of this IO by plugging.
142  * bit 4 -- metadata request
143  *	Used for tracing to differentiate metadata and data IO. May also
144  *	get some preferential treatment in the IO scheduler
145  * bit 5 -- discard sectors
146  *	Informs the lower level device that this range of sectors is no longer
147  *	used by the file system and may thus be freed by the device. Used
148  *	for flash based storage.
149  * bit 6 -- fail fast device errors
150  * bit 7 -- fail fast transport errors
151  * bit 8 -- fail fast driver errors
152  *	Don't want driver retries for any fast fail whatever the reason.
153  */
154 #define BIO_RW		0	/* Must match RW in req flags (blkdev.h) */
155 #define BIO_RW_AHEAD	1	/* Must match FAILFAST in req flags */
156 #define BIO_RW_BARRIER	2
157 #define BIO_RW_SYNC	3
158 #define BIO_RW_META	4
159 #define BIO_RW_DISCARD	5
160 #define BIO_RW_FAILFAST_DEV		6
161 #define BIO_RW_FAILFAST_TRANSPORT	7
162 #define BIO_RW_FAILFAST_DRIVER		8
163 
164 /*
165  * upper 16 bits of bi_rw define the io priority of this bio
166  */
167 #define BIO_PRIO_SHIFT	(8 * sizeof(unsigned long) - IOPRIO_BITS)
168 #define bio_prio(bio)	((bio)->bi_rw >> BIO_PRIO_SHIFT)
169 #define bio_prio_valid(bio)	ioprio_valid(bio_prio(bio))
170 
171 #define bio_set_prio(bio, prio)		do {			\
172 	WARN_ON(prio >= (1 << IOPRIO_BITS));			\
173 	(bio)->bi_rw &= ((1UL << BIO_PRIO_SHIFT) - 1);		\
174 	(bio)->bi_rw |= ((unsigned long) (prio) << BIO_PRIO_SHIFT);	\
175 } while (0)
176 
177 /*
178  * various member access, note that bio_data should of course not be used
179  * on highmem page vectors
180  */
181 #define bio_iovec_idx(bio, idx)	(&((bio)->bi_io_vec[(idx)]))
182 #define bio_iovec(bio)		bio_iovec_idx((bio), (bio)->bi_idx)
183 #define bio_page(bio)		bio_iovec((bio))->bv_page
184 #define bio_offset(bio)		bio_iovec((bio))->bv_offset
185 #define bio_segments(bio)	((bio)->bi_vcnt - (bio)->bi_idx)
186 #define bio_sectors(bio)	((bio)->bi_size >> 9)
187 #define bio_barrier(bio)	((bio)->bi_rw & (1 << BIO_RW_BARRIER))
188 #define bio_sync(bio)		((bio)->bi_rw & (1 << BIO_RW_SYNC))
189 #define bio_failfast_dev(bio)	((bio)->bi_rw &	(1 << BIO_RW_FAILFAST_DEV))
190 #define bio_failfast_transport(bio)	\
191 	((bio)->bi_rw & (1 << BIO_RW_FAILFAST_TRANSPORT))
192 #define bio_failfast_driver(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST_DRIVER))
193 #define bio_rw_ahead(bio)	((bio)->bi_rw & (1 << BIO_RW_AHEAD))
194 #define bio_rw_meta(bio)	((bio)->bi_rw & (1 << BIO_RW_META))
195 #define bio_discard(bio)	((bio)->bi_rw & (1 << BIO_RW_DISCARD))
196 #define bio_empty_barrier(bio)	(bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio))
197 
198 static inline unsigned int bio_cur_sectors(struct bio *bio)
199 {
200 	if (bio->bi_vcnt)
201 		return bio_iovec(bio)->bv_len >> 9;
202 	else /* dataless requests such as discard */
203 		return bio->bi_size >> 9;
204 }
205 
206 static inline void *bio_data(struct bio *bio)
207 {
208 	if (bio->bi_vcnt)
209 		return page_address(bio_page(bio)) + bio_offset(bio);
210 
211 	return NULL;
212 }
213 
214 /*
215  * will die
216  */
217 #define bio_to_phys(bio)	(page_to_phys(bio_page((bio))) + (unsigned long) bio_offset((bio)))
218 #define bvec_to_phys(bv)	(page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
219 
220 /*
221  * queues that have highmem support enabled may still need to revert to
222  * PIO transfers occasionally and thus map high pages temporarily. For
223  * permanent PIO fall back, user is probably better off disabling highmem
224  * I/O completely on that queue (see ide-dma for example)
225  */
226 #define __bio_kmap_atomic(bio, idx, kmtype)				\
227 	(kmap_atomic(bio_iovec_idx((bio), (idx))->bv_page, kmtype) +	\
228 		bio_iovec_idx((bio), (idx))->bv_offset)
229 
230 #define __bio_kunmap_atomic(addr, kmtype) kunmap_atomic(addr, kmtype)
231 
232 /*
233  * merge helpers etc
234  */
235 
236 #define __BVEC_END(bio)		bio_iovec_idx((bio), (bio)->bi_vcnt - 1)
237 #define __BVEC_START(bio)	bio_iovec_idx((bio), (bio)->bi_idx)
238 
239 /* Default implementation of BIOVEC_PHYS_MERGEABLE */
240 #define __BIOVEC_PHYS_MERGEABLE(vec1, vec2)	\
241 	((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
242 
243 /*
244  * allow arch override, for eg virtualized architectures (put in asm/io.h)
245  */
246 #ifndef BIOVEC_PHYS_MERGEABLE
247 #define BIOVEC_PHYS_MERGEABLE(vec1, vec2)	\
248 	__BIOVEC_PHYS_MERGEABLE(vec1, vec2)
249 #endif
250 
251 #define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
252 	(((addr1) | (mask)) == (((addr2) - 1) | (mask)))
253 #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
254 	__BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, (q)->seg_boundary_mask)
255 #define BIO_SEG_BOUNDARY(q, b1, b2) \
256 	BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2)))
257 
258 #define bio_io_error(bio) bio_endio((bio), -EIO)
259 
260 /*
261  * drivers should not use the __ version unless they _really_ want to
262  * run through the entire bio and not just pending pieces
263  */
264 #define __bio_for_each_segment(bvl, bio, i, start_idx)			\
265 	for (bvl = bio_iovec_idx((bio), (start_idx)), i = (start_idx);	\
266 	     i < (bio)->bi_vcnt;					\
267 	     bvl++, i++)
268 
269 #define bio_for_each_segment(bvl, bio, i)				\
270 	__bio_for_each_segment(bvl, bio, i, (bio)->bi_idx)
271 
272 /*
273  * get a reference to a bio, so it won't disappear. the intended use is
274  * something like:
275  *
276  * bio_get(bio);
277  * submit_bio(rw, bio);
278  * if (bio->bi_flags ...)
279  *	do_something
280  * bio_put(bio);
281  *
282  * without the bio_get(), it could potentially complete I/O before submit_bio
283  * returns. and then bio would be freed memory when if (bio->bi_flags ...)
284  * runs
285  */
286 #define bio_get(bio)	atomic_inc(&(bio)->bi_cnt)
287 
288 #if defined(CONFIG_BLK_DEV_INTEGRITY)
289 /*
290  * bio integrity payload
291  */
292 struct bio_integrity_payload {
293 	struct bio		*bip_bio;	/* parent bio */
294 	struct bio_vec		*bip_vec;	/* integrity data vector */
295 
296 	sector_t		bip_sector;	/* virtual start sector */
297 
298 	void			*bip_buf;	/* generated integrity data */
299 	bio_end_io_t		*bip_end_io;	/* saved I/O completion fn */
300 
301 	int			bip_error;	/* saved I/O error */
302 	unsigned int		bip_size;
303 
304 	unsigned short		bip_pool;	/* pool the ivec came from */
305 	unsigned short		bip_vcnt;	/* # of integrity bio_vecs */
306 	unsigned short		bip_idx;	/* current bip_vec index */
307 
308 	struct work_struct	bip_work;	/* I/O completion */
309 };
310 #endif /* CONFIG_BLK_DEV_INTEGRITY */
311 
312 /*
313  * A bio_pair is used when we need to split a bio.
314  * This can only happen for a bio that refers to just one
315  * page of data, and in the unusual situation when the
316  * page crosses a chunk/device boundary
317  *
318  * The address of the master bio is stored in bio1.bi_private
319  * The address of the pool the pair was allocated from is stored
320  *   in bio2.bi_private
321  */
322 struct bio_pair {
323 	struct bio			bio1, bio2;
324 	struct bio_vec			bv1, bv2;
325 #if defined(CONFIG_BLK_DEV_INTEGRITY)
326 	struct bio_integrity_payload	bip1, bip2;
327 	struct bio_vec			iv1, iv2;
328 #endif
329 	atomic_t			cnt;
330 	int				error;
331 };
332 extern struct bio_pair *bio_split(struct bio *bi, int first_sectors);
333 extern void bio_pair_release(struct bio_pair *dbio);
334 
335 extern struct bio_set *bioset_create(int, int);
336 extern void bioset_free(struct bio_set *);
337 
338 extern struct bio *bio_alloc(gfp_t, int);
339 extern struct bio *bio_kmalloc(gfp_t, int);
340 extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
341 extern void bio_put(struct bio *);
342 extern void bio_free(struct bio *, struct bio_set *);
343 
344 extern void bio_endio(struct bio *, int);
345 struct request_queue;
346 extern int bio_phys_segments(struct request_queue *, struct bio *);
347 
348 extern void __bio_clone(struct bio *, struct bio *);
349 extern struct bio *bio_clone(struct bio *, gfp_t);
350 
351 extern void bio_init(struct bio *);
352 
353 extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
354 extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
355 			   unsigned int, unsigned int);
356 extern int bio_get_nr_vecs(struct block_device *);
357 extern sector_t bio_sector_offset(struct bio *, unsigned short, unsigned int);
358 extern struct bio *bio_map_user(struct request_queue *, struct block_device *,
359 				unsigned long, unsigned int, int, gfp_t);
360 struct sg_iovec;
361 struct rq_map_data;
362 extern struct bio *bio_map_user_iov(struct request_queue *,
363 				    struct block_device *,
364 				    struct sg_iovec *, int, int, gfp_t);
365 extern void bio_unmap_user(struct bio *);
366 extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
367 				gfp_t);
368 extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
369 				 gfp_t, int);
370 extern void bio_set_pages_dirty(struct bio *bio);
371 extern void bio_check_pages_dirty(struct bio *bio);
372 extern struct bio *bio_copy_user(struct request_queue *, struct rq_map_data *,
373 				 unsigned long, unsigned int, int, gfp_t);
374 extern struct bio *bio_copy_user_iov(struct request_queue *,
375 				     struct rq_map_data *, struct sg_iovec *,
376 				     int, int, gfp_t);
377 extern int bio_uncopy_user(struct bio *);
378 void zero_fill_bio(struct bio *bio);
379 extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *);
380 extern unsigned int bvec_nr_vecs(unsigned short idx);
381 
382 /*
383  * Allow queuer to specify a completion CPU for this bio
384  */
385 static inline void bio_set_completion_cpu(struct bio *bio, unsigned int cpu)
386 {
387 	bio->bi_comp_cpu = cpu;
388 }
389 
390 /*
391  * bio_set is used to allow other portions of the IO system to
392  * allocate their own private memory pools for bio and iovec structures.
393  * These memory pools in turn all allocate from the bio_slab
394  * and the bvec_slabs[].
395  */
396 #define BIO_POOL_SIZE 2
397 #define BIOVEC_NR_POOLS 6
398 
399 struct bio_set {
400 	mempool_t *bio_pool;
401 #if defined(CONFIG_BLK_DEV_INTEGRITY)
402 	mempool_t *bio_integrity_pool;
403 #endif
404 	mempool_t *bvec_pools[BIOVEC_NR_POOLS];
405 };
406 
407 struct biovec_slab {
408 	int nr_vecs;
409 	char *name;
410 	struct kmem_cache *slab;
411 };
412 
413 extern struct bio_set *fs_bio_set;
414 
415 /*
416  * a small number of entries is fine, not going to be performance critical.
417  * basically we just need to survive
418  */
419 #define BIO_SPLIT_ENTRIES 2
420 
421 #ifdef CONFIG_HIGHMEM
422 /*
423  * remember to add offset! and never ever reenable interrupts between a
424  * bvec_kmap_irq and bvec_kunmap_irq!!
425  *
426  * This function MUST be inlined - it plays with the CPU interrupt flags.
427  */
428 static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
429 {
430 	unsigned long addr;
431 
432 	/*
433 	 * might not be a highmem page, but the preempt/irq count
434 	 * balancing is a lot nicer this way
435 	 */
436 	local_irq_save(*flags);
437 	addr = (unsigned long) kmap_atomic(bvec->bv_page, KM_BIO_SRC_IRQ);
438 
439 	BUG_ON(addr & ~PAGE_MASK);
440 
441 	return (char *) addr + bvec->bv_offset;
442 }
443 
444 static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
445 {
446 	unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
447 
448 	kunmap_atomic((void *) ptr, KM_BIO_SRC_IRQ);
449 	local_irq_restore(*flags);
450 }
451 
452 #else
453 #define bvec_kmap_irq(bvec, flags)	(page_address((bvec)->bv_page) + (bvec)->bv_offset)
454 #define bvec_kunmap_irq(buf, flags)	do { *(flags) = 0; } while (0)
455 #endif
456 
457 static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
458 				   unsigned long *flags)
459 {
460 	return bvec_kmap_irq(bio_iovec_idx(bio, idx), flags);
461 }
462 #define __bio_kunmap_irq(buf, flags)	bvec_kunmap_irq(buf, flags)
463 
464 #define bio_kmap_irq(bio, flags) \
465 	__bio_kmap_irq((bio), (bio)->bi_idx, (flags))
466 #define bio_kunmap_irq(buf,flags)	__bio_kunmap_irq(buf, flags)
467 
468 /*
469  * Check whether this bio carries any data or not. A NULL bio is allowed.
470  */
471 static inline int bio_has_data(struct bio *bio)
472 {
473 	return bio && bio->bi_io_vec != NULL;
474 }
475 
476 #if defined(CONFIG_BLK_DEV_INTEGRITY)
477 
478 #define bip_vec_idx(bip, idx)	(&(bip->bip_vec[(idx)]))
479 #define bip_vec(bip)		bip_vec_idx(bip, 0)
480 
481 #define __bip_for_each_vec(bvl, bip, i, start_idx)			\
482 	for (bvl = bip_vec_idx((bip), (start_idx)), i = (start_idx);	\
483 	     i < (bip)->bip_vcnt;					\
484 	     bvl++, i++)
485 
486 #define bip_for_each_vec(bvl, bip, i)					\
487 	__bip_for_each_vec(bvl, bip, i, (bip)->bip_idx)
488 
489 #define bio_integrity(bio) (bio->bi_integrity != NULL)
490 
491 extern struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *, gfp_t, unsigned int, struct bio_set *);
492 extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
493 extern void bio_integrity_free(struct bio *, struct bio_set *);
494 extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
495 extern int bio_integrity_enabled(struct bio *bio);
496 extern int bio_integrity_set_tag(struct bio *, void *, unsigned int);
497 extern int bio_integrity_get_tag(struct bio *, void *, unsigned int);
498 extern int bio_integrity_prep(struct bio *);
499 extern void bio_integrity_endio(struct bio *, int);
500 extern void bio_integrity_advance(struct bio *, unsigned int);
501 extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
502 extern void bio_integrity_split(struct bio *, struct bio_pair *, int);
503 extern int bio_integrity_clone(struct bio *, struct bio *, struct bio_set *);
504 extern int bioset_integrity_create(struct bio_set *, int);
505 extern void bioset_integrity_free(struct bio_set *);
506 extern void bio_integrity_init_slab(void);
507 
508 #else /* CONFIG_BLK_DEV_INTEGRITY */
509 
510 #define bio_integrity(a)		(0)
511 #define bioset_integrity_create(a, b)	(0)
512 #define bio_integrity_prep(a)		(0)
513 #define bio_integrity_enabled(a)	(0)
514 #define bio_integrity_clone(a, b, c)	(0)
515 #define bioset_integrity_free(a)	do { } while (0)
516 #define bio_integrity_free(a, b)	do { } while (0)
517 #define bio_integrity_endio(a, b)	do { } while (0)
518 #define bio_integrity_advance(a, b)	do { } while (0)
519 #define bio_integrity_trim(a, b, c)	do { } while (0)
520 #define bio_integrity_split(a, b, c)	do { } while (0)
521 #define bio_integrity_set_tag(a, b, c)	do { } while (0)
522 #define bio_integrity_get_tag(a, b, c)	do { } while (0)
523 #define bio_integrity_init_slab(a)	do { } while (0)
524 
525 #endif /* CONFIG_BLK_DEV_INTEGRITY */
526 
527 #endif /* CONFIG_BLOCK */
528 #endif /* __LINUX_BIO_H */
529