xref: /linux-6.15/include/linux/blk_types.h (revision e00a844a)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Block data types and constants.  Directly include this file only to
4  * break include dependency loop.
5  */
6 #ifndef __LINUX_BLK_TYPES_H
7 #define __LINUX_BLK_TYPES_H
8 
9 #include <linux/types.h>
10 #include <linux/bvec.h>
11 
12 struct bio_set;
13 struct bio;
14 struct bio_integrity_payload;
15 struct page;
16 struct block_device;
17 struct io_context;
18 struct cgroup_subsys_state;
19 typedef void (bio_end_io_t) (struct bio *);
20 
21 /*
22  * Block error status values.  See block/blk-core:blk_errors for the details.
23  */
24 typedef u8 __bitwise blk_status_t;
25 #define	BLK_STS_OK 0
26 #define BLK_STS_NOTSUPP		((__force blk_status_t)1)
27 #define BLK_STS_TIMEOUT		((__force blk_status_t)2)
28 #define BLK_STS_NOSPC		((__force blk_status_t)3)
29 #define BLK_STS_TRANSPORT	((__force blk_status_t)4)
30 #define BLK_STS_TARGET		((__force blk_status_t)5)
31 #define BLK_STS_NEXUS		((__force blk_status_t)6)
32 #define BLK_STS_MEDIUM		((__force blk_status_t)7)
33 #define BLK_STS_PROTECTION	((__force blk_status_t)8)
34 #define BLK_STS_RESOURCE	((__force blk_status_t)9)
35 #define BLK_STS_IOERR		((__force blk_status_t)10)
36 
37 /* hack for device mapper, don't use elsewhere: */
38 #define BLK_STS_DM_REQUEUE    ((__force blk_status_t)11)
39 
40 #define BLK_STS_AGAIN		((__force blk_status_t)12)
41 
42 struct blk_issue_stat {
43 	u64 stat;
44 };
45 
46 /*
47  * main unit of I/O for the block layer and lower layers (ie drivers and
48  * stacking drivers)
49  */
50 struct bio {
51 	struct bio		*bi_next;	/* request queue link */
52 	struct gendisk		*bi_disk;
53 	u8			bi_partno;
54 	blk_status_t		bi_status;
55 	unsigned int		bi_opf;		/* bottom bits req flags,
56 						 * top bits REQ_OP. Use
57 						 * accessors.
58 						 */
59 	unsigned short		bi_flags;	/* status, etc and bvec pool number */
60 	unsigned short		bi_ioprio;
61 	unsigned short		bi_write_hint;
62 
63 	struct bvec_iter	bi_iter;
64 
65 	/* Number of segments in this BIO after
66 	 * physical address coalescing is performed.
67 	 */
68 	unsigned int		bi_phys_segments;
69 
70 	/*
71 	 * To keep track of the max segment size, we account for the
72 	 * sizes of the first and last mergeable segments in this bio.
73 	 */
74 	unsigned int		bi_seg_front_size;
75 	unsigned int		bi_seg_back_size;
76 
77 	atomic_t		__bi_remaining;
78 
79 	bio_end_io_t		*bi_end_io;
80 
81 	void			*bi_private;
82 #ifdef CONFIG_BLK_CGROUP
83 	/*
84 	 * Optional ioc and css associated with this bio.  Put on bio
85 	 * release.  Read comment on top of bio_associate_current().
86 	 */
87 	struct io_context	*bi_ioc;
88 	struct cgroup_subsys_state *bi_css;
89 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
90 	void			*bi_cg_private;
91 	struct blk_issue_stat	bi_issue_stat;
92 #endif
93 #endif
94 	union {
95 #if defined(CONFIG_BLK_DEV_INTEGRITY)
96 		struct bio_integrity_payload *bi_integrity; /* data integrity */
97 #endif
98 	};
99 
100 	unsigned short		bi_vcnt;	/* how many bio_vec's */
101 
102 	/*
103 	 * Everything starting with bi_max_vecs will be preserved by bio_reset()
104 	 */
105 
106 	unsigned short		bi_max_vecs;	/* max bvl_vecs we can hold */
107 
108 	atomic_t		__bi_cnt;	/* pin count */
109 
110 	struct bio_vec		*bi_io_vec;	/* the actual vec list */
111 
112 	struct bio_set		*bi_pool;
113 
114 	/*
115 	 * We can inline a number of vecs at the end of the bio, to avoid
116 	 * double allocations for a small number of bio_vecs. This member
117 	 * MUST obviously be kept at the very end of the bio.
118 	 */
119 	struct bio_vec		bi_inline_vecs[0];
120 };
121 
122 #define BIO_RESET_BYTES		offsetof(struct bio, bi_max_vecs)
123 
124 /*
125  * bio flags
126  */
127 #define BIO_SEG_VALID	1	/* bi_phys_segments valid */
128 #define BIO_CLONED	2	/* doesn't own data */
129 #define BIO_BOUNCED	3	/* bio is a bounce bio */
130 #define BIO_USER_MAPPED 4	/* contains user pages */
131 #define BIO_NULL_MAPPED 5	/* contains invalid user pages */
132 #define BIO_QUIET	6	/* Make BIO Quiet */
133 #define BIO_CHAIN	7	/* chained bio, ->bi_remaining in effect */
134 #define BIO_REFFED	8	/* bio has elevated ->bi_cnt */
135 #define BIO_THROTTLED	9	/* This bio has already been subjected to
136 				 * throttling rules. Don't do it again. */
137 #define BIO_TRACE_COMPLETION 10	/* bio_endio() should trace the final completion
138 				 * of this bio. */
139 /* See BVEC_POOL_OFFSET below before adding new flags */
140 
141 /*
142  * We support 6 different bvec pools, the last one is magic in that it
143  * is backed by a mempool.
144  */
145 #define BVEC_POOL_NR		6
146 #define BVEC_POOL_MAX		(BVEC_POOL_NR - 1)
147 
148 /*
149  * Top 3 bits of bio flags indicate the pool the bvecs came from.  We add
150  * 1 to the actual index so that 0 indicates that there are no bvecs to be
151  * freed.
152  */
153 #define BVEC_POOL_BITS		(3)
154 #define BVEC_POOL_OFFSET	(16 - BVEC_POOL_BITS)
155 #define BVEC_POOL_IDX(bio)	((bio)->bi_flags >> BVEC_POOL_OFFSET)
156 #if (1<< BVEC_POOL_BITS) < (BVEC_POOL_NR+1)
157 # error "BVEC_POOL_BITS is too small"
158 #endif
159 
160 /*
161  * Flags starting here get preserved by bio_reset() - this includes
162  * only BVEC_POOL_IDX()
163  */
164 #define BIO_RESET_BITS	BVEC_POOL_OFFSET
165 
166 typedef __u32 __bitwise blk_mq_req_flags_t;
167 
168 /*
169  * Operations and flags common to the bio and request structures.
170  * We use 8 bits for encoding the operation, and the remaining 24 for flags.
171  *
172  * The least significant bit of the operation number indicates the data
173  * transfer direction:
174  *
175  *   - if the least significant bit is set transfers are TO the device
176  *   - if the least significant bit is not set transfers are FROM the device
177  *
178  * If a operation does not transfer data the least significant bit has no
179  * meaning.
180  */
181 #define REQ_OP_BITS	8
182 #define REQ_OP_MASK	((1 << REQ_OP_BITS) - 1)
183 #define REQ_FLAG_BITS	24
184 
185 enum req_opf {
186 	/* read sectors from the device */
187 	REQ_OP_READ		= 0,
188 	/* write sectors to the device */
189 	REQ_OP_WRITE		= 1,
190 	/* flush the volatile write cache */
191 	REQ_OP_FLUSH		= 2,
192 	/* discard sectors */
193 	REQ_OP_DISCARD		= 3,
194 	/* get zone information */
195 	REQ_OP_ZONE_REPORT	= 4,
196 	/* securely erase sectors */
197 	REQ_OP_SECURE_ERASE	= 5,
198 	/* seset a zone write pointer */
199 	REQ_OP_ZONE_RESET	= 6,
200 	/* write the same sector many times */
201 	REQ_OP_WRITE_SAME	= 7,
202 	/* write the zero filled sector many times */
203 	REQ_OP_WRITE_ZEROES	= 9,
204 
205 	/* SCSI passthrough using struct scsi_request */
206 	REQ_OP_SCSI_IN		= 32,
207 	REQ_OP_SCSI_OUT		= 33,
208 	/* Driver private requests */
209 	REQ_OP_DRV_IN		= 34,
210 	REQ_OP_DRV_OUT		= 35,
211 
212 	REQ_OP_LAST,
213 };
214 
215 enum req_flag_bits {
216 	__REQ_FAILFAST_DEV =	/* no driver retries of device errors */
217 		REQ_OP_BITS,
218 	__REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
219 	__REQ_FAILFAST_DRIVER,	/* no driver retries of driver errors */
220 	__REQ_SYNC,		/* request is sync (sync write or read) */
221 	__REQ_META,		/* metadata io request */
222 	__REQ_PRIO,		/* boost priority in cfq */
223 	__REQ_NOMERGE,		/* don't touch this for merging */
224 	__REQ_IDLE,		/* anticipate more IO after this one */
225 	__REQ_INTEGRITY,	/* I/O includes block integrity payload */
226 	__REQ_FUA,		/* forced unit access */
227 	__REQ_PREFLUSH,		/* request for cache flush */
228 	__REQ_RAHEAD,		/* read ahead, can fail anytime */
229 	__REQ_BACKGROUND,	/* background IO */
230 	__REQ_NOWAIT,           /* Don't wait if request will block */
231 
232 	/* command specific flags for REQ_OP_WRITE_ZEROES: */
233 	__REQ_NOUNMAP,		/* do not free blocks when zeroing */
234 
235 	/* for driver use */
236 	__REQ_DRV,
237 
238 	__REQ_NR_BITS,		/* stops here */
239 };
240 
241 #define REQ_FAILFAST_DEV	(1ULL << __REQ_FAILFAST_DEV)
242 #define REQ_FAILFAST_TRANSPORT	(1ULL << __REQ_FAILFAST_TRANSPORT)
243 #define REQ_FAILFAST_DRIVER	(1ULL << __REQ_FAILFAST_DRIVER)
244 #define REQ_SYNC		(1ULL << __REQ_SYNC)
245 #define REQ_META		(1ULL << __REQ_META)
246 #define REQ_PRIO		(1ULL << __REQ_PRIO)
247 #define REQ_NOMERGE		(1ULL << __REQ_NOMERGE)
248 #define REQ_IDLE		(1ULL << __REQ_IDLE)
249 #define REQ_INTEGRITY		(1ULL << __REQ_INTEGRITY)
250 #define REQ_FUA			(1ULL << __REQ_FUA)
251 #define REQ_PREFLUSH		(1ULL << __REQ_PREFLUSH)
252 #define REQ_RAHEAD		(1ULL << __REQ_RAHEAD)
253 #define REQ_BACKGROUND		(1ULL << __REQ_BACKGROUND)
254 #define REQ_NOWAIT		(1ULL << __REQ_NOWAIT)
255 
256 #define REQ_NOUNMAP		(1ULL << __REQ_NOUNMAP)
257 
258 #define REQ_DRV			(1ULL << __REQ_DRV)
259 
260 #define REQ_FAILFAST_MASK \
261 	(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
262 
263 #define REQ_NOMERGE_FLAGS \
264 	(REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
265 
266 #define bio_op(bio) \
267 	((bio)->bi_opf & REQ_OP_MASK)
268 #define req_op(req) \
269 	((req)->cmd_flags & REQ_OP_MASK)
270 
271 /* obsolete, don't use in new code */
272 static inline void bio_set_op_attrs(struct bio *bio, unsigned op,
273 		unsigned op_flags)
274 {
275 	bio->bi_opf = op | op_flags;
276 }
277 
278 static inline bool op_is_write(unsigned int op)
279 {
280 	return (op & 1);
281 }
282 
283 /*
284  * Check if the bio or request is one that needs special treatment in the
285  * flush state machine.
286  */
287 static inline bool op_is_flush(unsigned int op)
288 {
289 	return op & (REQ_FUA | REQ_PREFLUSH);
290 }
291 
292 /*
293  * Reads are always treated as synchronous, as are requests with the FUA or
294  * PREFLUSH flag.  Other operations may be marked as synchronous using the
295  * REQ_SYNC flag.
296  */
297 static inline bool op_is_sync(unsigned int op)
298 {
299 	return (op & REQ_OP_MASK) == REQ_OP_READ ||
300 		(op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
301 }
302 
303 typedef unsigned int blk_qc_t;
304 #define BLK_QC_T_NONE		-1U
305 #define BLK_QC_T_SHIFT		16
306 #define BLK_QC_T_INTERNAL	(1U << 31)
307 
308 static inline bool blk_qc_t_valid(blk_qc_t cookie)
309 {
310 	return cookie != BLK_QC_T_NONE;
311 }
312 
313 static inline blk_qc_t blk_tag_to_qc_t(unsigned int tag, unsigned int queue_num,
314 				       bool internal)
315 {
316 	blk_qc_t ret = tag | (queue_num << BLK_QC_T_SHIFT);
317 
318 	if (internal)
319 		ret |= BLK_QC_T_INTERNAL;
320 
321 	return ret;
322 }
323 
324 static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
325 {
326 	return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT;
327 }
328 
329 static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
330 {
331 	return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
332 }
333 
334 static inline bool blk_qc_t_is_internal(blk_qc_t cookie)
335 {
336 	return (cookie & BLK_QC_T_INTERNAL) != 0;
337 }
338 
339 struct blk_rq_stat {
340 	u64 mean;
341 	u64 min;
342 	u64 max;
343 	u32 nr_samples;
344 	u64 batch;
345 };
346 
347 #endif /* __LINUX_BLK_TYPES_H */
348