xref: /linux-6.15/include/linux/blk_types.h (revision bbb03029)
1 /*
2  * Block data types and constants.  Directly include this file only to
3  * break include dependency loop.
4  */
5 #ifndef __LINUX_BLK_TYPES_H
6 #define __LINUX_BLK_TYPES_H
7 
8 #include <linux/types.h>
9 #include <linux/bvec.h>
10 
11 struct bio_set;
12 struct bio;
13 struct bio_integrity_payload;
14 struct page;
15 struct block_device;
16 struct io_context;
17 struct cgroup_subsys_state;
18 typedef void (bio_end_io_t) (struct bio *);
19 
20 /*
21  * Block error status values.  See block/blk-core:blk_errors for the details.
22  */
23 typedef u8 __bitwise blk_status_t;
24 #define	BLK_STS_OK 0
25 #define BLK_STS_NOTSUPP		((__force blk_status_t)1)
26 #define BLK_STS_TIMEOUT		((__force blk_status_t)2)
27 #define BLK_STS_NOSPC		((__force blk_status_t)3)
28 #define BLK_STS_TRANSPORT	((__force blk_status_t)4)
29 #define BLK_STS_TARGET		((__force blk_status_t)5)
30 #define BLK_STS_NEXUS		((__force blk_status_t)6)
31 #define BLK_STS_MEDIUM		((__force blk_status_t)7)
32 #define BLK_STS_PROTECTION	((__force blk_status_t)8)
33 #define BLK_STS_RESOURCE	((__force blk_status_t)9)
34 #define BLK_STS_IOERR		((__force blk_status_t)10)
35 
36 /* hack for device mapper, don't use elsewhere: */
37 #define BLK_STS_DM_REQUEUE    ((__force blk_status_t)11)
38 
39 #define BLK_STS_AGAIN		((__force blk_status_t)12)
40 
41 struct blk_issue_stat {
42 	u64 stat;
43 };
44 
45 /*
46  * main unit of I/O for the block layer and lower layers (ie drivers and
47  * stacking drivers)
48  */
49 struct bio {
50 	struct bio		*bi_next;	/* request queue link */
51 	struct block_device	*bi_bdev;
52 	blk_status_t		bi_status;
53 	unsigned int		bi_opf;		/* bottom bits req flags,
54 						 * top bits REQ_OP. Use
55 						 * accessors.
56 						 */
57 	unsigned short		bi_flags;	/* status, etc and bvec pool number */
58 	unsigned short		bi_ioprio;
59 	unsigned short		bi_write_hint;
60 
61 	struct bvec_iter	bi_iter;
62 
63 	/* Number of segments in this BIO after
64 	 * physical address coalescing is performed.
65 	 */
66 	unsigned int		bi_phys_segments;
67 
68 	/*
69 	 * To keep track of the max segment size, we account for the
70 	 * sizes of the first and last mergeable segments in this bio.
71 	 */
72 	unsigned int		bi_seg_front_size;
73 	unsigned int		bi_seg_back_size;
74 
75 	atomic_t		__bi_remaining;
76 
77 	bio_end_io_t		*bi_end_io;
78 
79 	void			*bi_private;
80 #ifdef CONFIG_BLK_CGROUP
81 	/*
82 	 * Optional ioc and css associated with this bio.  Put on bio
83 	 * release.  Read comment on top of bio_associate_current().
84 	 */
85 	struct io_context	*bi_ioc;
86 	struct cgroup_subsys_state *bi_css;
87 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
88 	void			*bi_cg_private;
89 	struct blk_issue_stat	bi_issue_stat;
90 #endif
91 #endif
92 	union {
93 #if defined(CONFIG_BLK_DEV_INTEGRITY)
94 		struct bio_integrity_payload *bi_integrity; /* data integrity */
95 #endif
96 	};
97 
98 	unsigned short		bi_vcnt;	/* how many bio_vec's */
99 
100 	/*
101 	 * Everything starting with bi_max_vecs will be preserved by bio_reset()
102 	 */
103 
104 	unsigned short		bi_max_vecs;	/* max bvl_vecs we can hold */
105 
106 	atomic_t		__bi_cnt;	/* pin count */
107 
108 	struct bio_vec		*bi_io_vec;	/* the actual vec list */
109 
110 	struct bio_set		*bi_pool;
111 
112 	/*
113 	 * We can inline a number of vecs at the end of the bio, to avoid
114 	 * double allocations for a small number of bio_vecs. This member
115 	 * MUST obviously be kept at the very end of the bio.
116 	 */
117 	struct bio_vec		bi_inline_vecs[0];
118 };
119 
120 #define BIO_RESET_BYTES		offsetof(struct bio, bi_max_vecs)
121 
122 /*
123  * bio flags
124  */
125 #define BIO_SEG_VALID	1	/* bi_phys_segments valid */
126 #define BIO_CLONED	2	/* doesn't own data */
127 #define BIO_BOUNCED	3	/* bio is a bounce bio */
128 #define BIO_USER_MAPPED 4	/* contains user pages */
129 #define BIO_NULL_MAPPED 5	/* contains invalid user pages */
130 #define BIO_QUIET	6	/* Make BIO Quiet */
131 #define BIO_CHAIN	7	/* chained bio, ->bi_remaining in effect */
132 #define BIO_REFFED	8	/* bio has elevated ->bi_cnt */
133 #define BIO_THROTTLED	9	/* This bio has already been subjected to
134 				 * throttling rules. Don't do it again. */
135 #define BIO_TRACE_COMPLETION 10	/* bio_endio() should trace the final completion
136 				 * of this bio. */
137 /* See BVEC_POOL_OFFSET below before adding new flags */
138 
139 /*
140  * We support 6 different bvec pools, the last one is magic in that it
141  * is backed by a mempool.
142  */
143 #define BVEC_POOL_NR		6
144 #define BVEC_POOL_MAX		(BVEC_POOL_NR - 1)
145 
146 /*
147  * Top 3 bits of bio flags indicate the pool the bvecs came from.  We add
148  * 1 to the actual index so that 0 indicates that there are no bvecs to be
149  * freed.
150  */
151 #define BVEC_POOL_BITS		(3)
152 #define BVEC_POOL_OFFSET	(16 - BVEC_POOL_BITS)
153 #define BVEC_POOL_IDX(bio)	((bio)->bi_flags >> BVEC_POOL_OFFSET)
154 #if (1<< BVEC_POOL_BITS) < (BVEC_POOL_NR+1)
155 # error "BVEC_POOL_BITS is too small"
156 #endif
157 
158 /*
159  * Flags starting here get preserved by bio_reset() - this includes
160  * only BVEC_POOL_IDX()
161  */
162 #define BIO_RESET_BITS	BVEC_POOL_OFFSET
163 
164 /*
165  * Operations and flags common to the bio and request structures.
166  * We use 8 bits for encoding the operation, and the remaining 24 for flags.
167  *
168  * The least significant bit of the operation number indicates the data
169  * transfer direction:
170  *
171  *   - if the least significant bit is set transfers are TO the device
172  *   - if the least significant bit is not set transfers are FROM the device
173  *
174  * If a operation does not transfer data the least significant bit has no
175  * meaning.
176  */
177 #define REQ_OP_BITS	8
178 #define REQ_OP_MASK	((1 << REQ_OP_BITS) - 1)
179 #define REQ_FLAG_BITS	24
180 
181 enum req_opf {
182 	/* read sectors from the device */
183 	REQ_OP_READ		= 0,
184 	/* write sectors to the device */
185 	REQ_OP_WRITE		= 1,
186 	/* flush the volatile write cache */
187 	REQ_OP_FLUSH		= 2,
188 	/* discard sectors */
189 	REQ_OP_DISCARD		= 3,
190 	/* get zone information */
191 	REQ_OP_ZONE_REPORT	= 4,
192 	/* securely erase sectors */
193 	REQ_OP_SECURE_ERASE	= 5,
194 	/* seset a zone write pointer */
195 	REQ_OP_ZONE_RESET	= 6,
196 	/* write the same sector many times */
197 	REQ_OP_WRITE_SAME	= 7,
198 	/* write the zero filled sector many times */
199 	REQ_OP_WRITE_ZEROES	= 9,
200 
201 	/* SCSI passthrough using struct scsi_request */
202 	REQ_OP_SCSI_IN		= 32,
203 	REQ_OP_SCSI_OUT		= 33,
204 	/* Driver private requests */
205 	REQ_OP_DRV_IN		= 34,
206 	REQ_OP_DRV_OUT		= 35,
207 
208 	REQ_OP_LAST,
209 };
210 
211 enum req_flag_bits {
212 	__REQ_FAILFAST_DEV =	/* no driver retries of device errors */
213 		REQ_OP_BITS,
214 	__REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
215 	__REQ_FAILFAST_DRIVER,	/* no driver retries of driver errors */
216 	__REQ_SYNC,		/* request is sync (sync write or read) */
217 	__REQ_META,		/* metadata io request */
218 	__REQ_PRIO,		/* boost priority in cfq */
219 	__REQ_NOMERGE,		/* don't touch this for merging */
220 	__REQ_IDLE,		/* anticipate more IO after this one */
221 	__REQ_INTEGRITY,	/* I/O includes block integrity payload */
222 	__REQ_FUA,		/* forced unit access */
223 	__REQ_PREFLUSH,		/* request for cache flush */
224 	__REQ_RAHEAD,		/* read ahead, can fail anytime */
225 	__REQ_BACKGROUND,	/* background IO */
226 
227 	/* command specific flags for REQ_OP_WRITE_ZEROES: */
228 	__REQ_NOUNMAP,		/* do not free blocks when zeroing */
229 
230 	__REQ_NOWAIT,           /* Don't wait if request will block */
231 	__REQ_NR_BITS,		/* stops here */
232 };
233 
234 #define REQ_FAILFAST_DEV	(1ULL << __REQ_FAILFAST_DEV)
235 #define REQ_FAILFAST_TRANSPORT	(1ULL << __REQ_FAILFAST_TRANSPORT)
236 #define REQ_FAILFAST_DRIVER	(1ULL << __REQ_FAILFAST_DRIVER)
237 #define REQ_SYNC		(1ULL << __REQ_SYNC)
238 #define REQ_META		(1ULL << __REQ_META)
239 #define REQ_PRIO		(1ULL << __REQ_PRIO)
240 #define REQ_NOMERGE		(1ULL << __REQ_NOMERGE)
241 #define REQ_IDLE		(1ULL << __REQ_IDLE)
242 #define REQ_INTEGRITY		(1ULL << __REQ_INTEGRITY)
243 #define REQ_FUA			(1ULL << __REQ_FUA)
244 #define REQ_PREFLUSH		(1ULL << __REQ_PREFLUSH)
245 #define REQ_RAHEAD		(1ULL << __REQ_RAHEAD)
246 #define REQ_BACKGROUND		(1ULL << __REQ_BACKGROUND)
247 
248 #define REQ_NOUNMAP		(1ULL << __REQ_NOUNMAP)
249 #define REQ_NOWAIT		(1ULL << __REQ_NOWAIT)
250 
251 #define REQ_FAILFAST_MASK \
252 	(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
253 
254 #define REQ_NOMERGE_FLAGS \
255 	(REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
256 
257 #define bio_op(bio) \
258 	((bio)->bi_opf & REQ_OP_MASK)
259 #define req_op(req) \
260 	((req)->cmd_flags & REQ_OP_MASK)
261 
262 /* obsolete, don't use in new code */
263 static inline void bio_set_op_attrs(struct bio *bio, unsigned op,
264 		unsigned op_flags)
265 {
266 	bio->bi_opf = op | op_flags;
267 }
268 
269 static inline bool op_is_write(unsigned int op)
270 {
271 	return (op & 1);
272 }
273 
274 /*
275  * Check if the bio or request is one that needs special treatment in the
276  * flush state machine.
277  */
278 static inline bool op_is_flush(unsigned int op)
279 {
280 	return op & (REQ_FUA | REQ_PREFLUSH);
281 }
282 
283 /*
284  * Reads are always treated as synchronous, as are requests with the FUA or
285  * PREFLUSH flag.  Other operations may be marked as synchronous using the
286  * REQ_SYNC flag.
287  */
288 static inline bool op_is_sync(unsigned int op)
289 {
290 	return (op & REQ_OP_MASK) == REQ_OP_READ ||
291 		(op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
292 }
293 
294 typedef unsigned int blk_qc_t;
295 #define BLK_QC_T_NONE		-1U
296 #define BLK_QC_T_SHIFT		16
297 #define BLK_QC_T_INTERNAL	(1U << 31)
298 
299 static inline bool blk_qc_t_valid(blk_qc_t cookie)
300 {
301 	return cookie != BLK_QC_T_NONE;
302 }
303 
304 static inline blk_qc_t blk_tag_to_qc_t(unsigned int tag, unsigned int queue_num,
305 				       bool internal)
306 {
307 	blk_qc_t ret = tag | (queue_num << BLK_QC_T_SHIFT);
308 
309 	if (internal)
310 		ret |= BLK_QC_T_INTERNAL;
311 
312 	return ret;
313 }
314 
315 static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
316 {
317 	return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT;
318 }
319 
320 static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
321 {
322 	return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
323 }
324 
325 static inline bool blk_qc_t_is_internal(blk_qc_t cookie)
326 {
327 	return (cookie & BLK_QC_T_INTERNAL) != 0;
328 }
329 
330 struct blk_rq_stat {
331 	s64 mean;
332 	u64 min;
333 	u64 max;
334 	s32 nr_samples;
335 	s32 nr_batch;
336 	u64 batch;
337 };
338 
339 #endif /* __LINUX_BLK_TYPES_H */
340