xref: /linux-6.15/include/linux/blk_types.h (revision 4e108d4f)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Block data types and constants.  Directly include this file only to
4  * break include dependency loop.
5  */
6 #ifndef __LINUX_BLK_TYPES_H
7 #define __LINUX_BLK_TYPES_H
8 
9 #include <linux/types.h>
10 #include <linux/bvec.h>
11 #include <linux/ktime.h>
12 
13 struct bio_set;
14 struct bio;
15 struct bio_integrity_payload;
16 struct page;
17 struct block_device;
18 struct io_context;
19 struct cgroup_subsys_state;
20 typedef void (bio_end_io_t) (struct bio *);
21 struct bio_crypt_ctx;
22 
23 /*
24  * Block error status values.  See block/blk-core:blk_errors for the details.
25  * Alpha cannot write a byte atomically, so we need to use 32-bit value.
26  */
27 #if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__)
28 typedef u32 __bitwise blk_status_t;
29 #else
30 typedef u8 __bitwise blk_status_t;
31 #endif
32 #define	BLK_STS_OK 0
33 #define BLK_STS_NOTSUPP		((__force blk_status_t)1)
34 #define BLK_STS_TIMEOUT		((__force blk_status_t)2)
35 #define BLK_STS_NOSPC		((__force blk_status_t)3)
36 #define BLK_STS_TRANSPORT	((__force blk_status_t)4)
37 #define BLK_STS_TARGET		((__force blk_status_t)5)
38 #define BLK_STS_NEXUS		((__force blk_status_t)6)
39 #define BLK_STS_MEDIUM		((__force blk_status_t)7)
40 #define BLK_STS_PROTECTION	((__force blk_status_t)8)
41 #define BLK_STS_RESOURCE	((__force blk_status_t)9)
42 #define BLK_STS_IOERR		((__force blk_status_t)10)
43 
44 /* hack for device mapper, don't use elsewhere: */
45 #define BLK_STS_DM_REQUEUE    ((__force blk_status_t)11)
46 
47 #define BLK_STS_AGAIN		((__force blk_status_t)12)
48 
49 /*
50  * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if
51  * device related resources are unavailable, but the driver can guarantee
52  * that the queue will be rerun in the future once resources become
53  * available again. This is typically the case for device specific
54  * resources that are consumed for IO. If the driver fails allocating these
55  * resources, we know that inflight (or pending) IO will free these
56  * resource upon completion.
57  *
58  * This is different from BLK_STS_RESOURCE in that it explicitly references
59  * a device specific resource. For resources of wider scope, allocation
60  * failure can happen without having pending IO. This means that we can't
61  * rely on request completions freeing these resources, as IO may not be in
62  * flight. Examples of that are kernel memory allocations, DMA mappings, or
63  * any other system wide resources.
64  */
65 #define BLK_STS_DEV_RESOURCE	((__force blk_status_t)13)
66 
67 /*
68  * BLK_STS_ZONE_RESOURCE is returned from the driver to the block layer if zone
69  * related resources are unavailable, but the driver can guarantee the queue
70  * will be rerun in the future once the resources become available again.
71  *
72  * This is different from BLK_STS_DEV_RESOURCE in that it explicitly references
73  * a zone specific resource and IO to a different zone on the same device could
74  * still be served. Examples of that are zones that are write-locked, but a read
75  * to the same zone could be served.
76  */
77 #define BLK_STS_ZONE_RESOURCE	((__force blk_status_t)14)
78 
79 /**
80  * blk_path_error - returns true if error may be path related
81  * @error: status the request was completed with
82  *
83  * Description:
84  *     This classifies block error status into non-retryable errors and ones
85  *     that may be successful if retried on a failover path.
86  *
87  * Return:
88  *     %false - retrying failover path will not help
89  *     %true  - may succeed if retried
90  */
91 static inline bool blk_path_error(blk_status_t error)
92 {
93 	switch (error) {
94 	case BLK_STS_NOTSUPP:
95 	case BLK_STS_NOSPC:
96 	case BLK_STS_TARGET:
97 	case BLK_STS_NEXUS:
98 	case BLK_STS_MEDIUM:
99 	case BLK_STS_PROTECTION:
100 		return false;
101 	}
102 
103 	/* Anything else could be a path failure, so should be retried */
104 	return true;
105 }
106 
107 /*
108  * From most significant bit:
109  * 1 bit: reserved for other usage, see below
110  * 12 bits: original size of bio
111  * 51 bits: issue time of bio
112  */
113 #define BIO_ISSUE_RES_BITS      1
114 #define BIO_ISSUE_SIZE_BITS     12
115 #define BIO_ISSUE_RES_SHIFT     (64 - BIO_ISSUE_RES_BITS)
116 #define BIO_ISSUE_SIZE_SHIFT    (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS)
117 #define BIO_ISSUE_TIME_MASK     ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1)
118 #define BIO_ISSUE_SIZE_MASK     \
119 	(((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT)
120 #define BIO_ISSUE_RES_MASK      (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1))
121 
122 /* Reserved bit for blk-throtl */
123 #define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63)
124 
125 struct bio_issue {
126 	u64 value;
127 };
128 
129 static inline u64 __bio_issue_time(u64 time)
130 {
131 	return time & BIO_ISSUE_TIME_MASK;
132 }
133 
134 static inline u64 bio_issue_time(struct bio_issue *issue)
135 {
136 	return __bio_issue_time(issue->value);
137 }
138 
139 static inline sector_t bio_issue_size(struct bio_issue *issue)
140 {
141 	return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT);
142 }
143 
144 static inline void bio_issue_init(struct bio_issue *issue,
145 				       sector_t size)
146 {
147 	size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1;
148 	issue->value = ((issue->value & BIO_ISSUE_RES_MASK) |
149 			(ktime_get_ns() & BIO_ISSUE_TIME_MASK) |
150 			((u64)size << BIO_ISSUE_SIZE_SHIFT));
151 }
152 
153 /*
154  * main unit of I/O for the block layer and lower layers (ie drivers and
155  * stacking drivers)
156  */
157 struct bio {
158 	struct bio		*bi_next;	/* request queue link */
159 	struct gendisk		*bi_disk;
160 	unsigned int		bi_opf;		/* bottom bits req flags,
161 						 * top bits REQ_OP. Use
162 						 * accessors.
163 						 */
164 	unsigned short		bi_flags;	/* status, etc and bvec pool number */
165 	unsigned short		bi_ioprio;
166 	unsigned short		bi_write_hint;
167 	blk_status_t		bi_status;
168 	u8			bi_partno;
169 	atomic_t		__bi_remaining;
170 
171 	struct bvec_iter	bi_iter;
172 
173 	bio_end_io_t		*bi_end_io;
174 
175 	void			*bi_private;
176 #ifdef CONFIG_BLK_CGROUP
177 	/*
178 	 * Represents the association of the css and request_queue for the bio.
179 	 * If a bio goes direct to device, it will not have a blkg as it will
180 	 * not have a request_queue associated with it.  The reference is put
181 	 * on release of the bio.
182 	 */
183 	struct blkcg_gq		*bi_blkg;
184 	struct bio_issue	bi_issue;
185 #ifdef CONFIG_BLK_CGROUP_IOCOST
186 	u64			bi_iocost_cost;
187 #endif
188 #endif
189 
190 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
191 	struct bio_crypt_ctx	*bi_crypt_context;
192 #endif
193 
194 	union {
195 #if defined(CONFIG_BLK_DEV_INTEGRITY)
196 		struct bio_integrity_payload *bi_integrity; /* data integrity */
197 #endif
198 	};
199 
200 	unsigned short		bi_vcnt;	/* how many bio_vec's */
201 
202 	/*
203 	 * Everything starting with bi_max_vecs will be preserved by bio_reset()
204 	 */
205 
206 	unsigned short		bi_max_vecs;	/* max bvl_vecs we can hold */
207 
208 	atomic_t		__bi_cnt;	/* pin count */
209 
210 	struct bio_vec		*bi_io_vec;	/* the actual vec list */
211 
212 	struct bio_set		*bi_pool;
213 
214 	/*
215 	 * We can inline a number of vecs at the end of the bio, to avoid
216 	 * double allocations for a small number of bio_vecs. This member
217 	 * MUST obviously be kept at the very end of the bio.
218 	 */
219 	struct bio_vec		bi_inline_vecs[];
220 };
221 
222 #define BIO_RESET_BYTES		offsetof(struct bio, bi_max_vecs)
223 
224 /*
225  * bio flags
226  */
227 enum {
228 	BIO_NO_PAGE_REF,	/* don't put release vec pages */
229 	BIO_CLONED,		/* doesn't own data */
230 	BIO_BOUNCED,		/* bio is a bounce bio */
231 	BIO_USER_MAPPED,	/* contains user pages */
232 	BIO_NULL_MAPPED,	/* contains invalid user pages */
233 	BIO_WORKINGSET,		/* contains userspace workingset pages */
234 	BIO_QUIET,		/* Make BIO Quiet */
235 	BIO_CHAIN,		/* chained bio, ->bi_remaining in effect */
236 	BIO_REFFED,		/* bio has elevated ->bi_cnt */
237 	BIO_THROTTLED,		/* This bio has already been subjected to
238 				 * throttling rules. Don't do it again. */
239 	BIO_TRACE_COMPLETION,	/* bio_endio() should trace the final completion
240 				 * of this bio. */
241 	BIO_CGROUP_ACCT,	/* has been accounted to a cgroup */
242 	BIO_TRACKED,		/* set if bio goes through the rq_qos path */
243 	BIO_FLAG_LAST
244 };
245 
246 /* See BVEC_POOL_OFFSET below before adding new flags */
247 
248 /*
249  * We support 6 different bvec pools, the last one is magic in that it
250  * is backed by a mempool.
251  */
252 #define BVEC_POOL_NR		6
253 #define BVEC_POOL_MAX		(BVEC_POOL_NR - 1)
254 
255 /*
256  * Top 3 bits of bio flags indicate the pool the bvecs came from.  We add
257  * 1 to the actual index so that 0 indicates that there are no bvecs to be
258  * freed.
259  */
260 #define BVEC_POOL_BITS		(3)
261 #define BVEC_POOL_OFFSET	(16 - BVEC_POOL_BITS)
262 #define BVEC_POOL_IDX(bio)	((bio)->bi_flags >> BVEC_POOL_OFFSET)
263 #if (1<< BVEC_POOL_BITS) < (BVEC_POOL_NR+1)
264 # error "BVEC_POOL_BITS is too small"
265 #endif
266 
267 /*
268  * Flags starting here get preserved by bio_reset() - this includes
269  * only BVEC_POOL_IDX()
270  */
271 #define BIO_RESET_BITS	BVEC_POOL_OFFSET
272 
273 typedef __u32 __bitwise blk_mq_req_flags_t;
274 
275 /*
276  * Operations and flags common to the bio and request structures.
277  * We use 8 bits for encoding the operation, and the remaining 24 for flags.
278  *
279  * The least significant bit of the operation number indicates the data
280  * transfer direction:
281  *
282  *   - if the least significant bit is set transfers are TO the device
283  *   - if the least significant bit is not set transfers are FROM the device
284  *
285  * If a operation does not transfer data the least significant bit has no
286  * meaning.
287  */
288 #define REQ_OP_BITS	8
289 #define REQ_OP_MASK	((1 << REQ_OP_BITS) - 1)
290 #define REQ_FLAG_BITS	24
291 
292 enum req_opf {
293 	/* read sectors from the device */
294 	REQ_OP_READ		= 0,
295 	/* write sectors to the device */
296 	REQ_OP_WRITE		= 1,
297 	/* flush the volatile write cache */
298 	REQ_OP_FLUSH		= 2,
299 	/* discard sectors */
300 	REQ_OP_DISCARD		= 3,
301 	/* securely erase sectors */
302 	REQ_OP_SECURE_ERASE	= 5,
303 	/* reset a zone write pointer */
304 	REQ_OP_ZONE_RESET	= 6,
305 	/* write the same sector many times */
306 	REQ_OP_WRITE_SAME	= 7,
307 	/* reset all the zone present on the device */
308 	REQ_OP_ZONE_RESET_ALL	= 8,
309 	/* write the zero filled sector many times */
310 	REQ_OP_WRITE_ZEROES	= 9,
311 	/* Open a zone */
312 	REQ_OP_ZONE_OPEN	= 10,
313 	/* Close a zone */
314 	REQ_OP_ZONE_CLOSE	= 11,
315 	/* Transition a zone to full */
316 	REQ_OP_ZONE_FINISH	= 12,
317 	/* write data at the current zone write pointer */
318 	REQ_OP_ZONE_APPEND	= 13,
319 
320 	/* SCSI passthrough using struct scsi_request */
321 	REQ_OP_SCSI_IN		= 32,
322 	REQ_OP_SCSI_OUT		= 33,
323 	/* Driver private requests */
324 	REQ_OP_DRV_IN		= 34,
325 	REQ_OP_DRV_OUT		= 35,
326 
327 	REQ_OP_LAST,
328 };
329 
330 enum req_flag_bits {
331 	__REQ_FAILFAST_DEV =	/* no driver retries of device errors */
332 		REQ_OP_BITS,
333 	__REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
334 	__REQ_FAILFAST_DRIVER,	/* no driver retries of driver errors */
335 	__REQ_SYNC,		/* request is sync (sync write or read) */
336 	__REQ_META,		/* metadata io request */
337 	__REQ_PRIO,		/* boost priority in cfq */
338 	__REQ_NOMERGE,		/* don't touch this for merging */
339 	__REQ_IDLE,		/* anticipate more IO after this one */
340 	__REQ_INTEGRITY,	/* I/O includes block integrity payload */
341 	__REQ_FUA,		/* forced unit access */
342 	__REQ_PREFLUSH,		/* request for cache flush */
343 	__REQ_RAHEAD,		/* read ahead, can fail anytime */
344 	__REQ_BACKGROUND,	/* background IO */
345 	__REQ_NOWAIT,           /* Don't wait if request will block */
346 	/*
347 	 * When a shared kthread needs to issue a bio for a cgroup, doing
348 	 * so synchronously can lead to priority inversions as the kthread
349 	 * can be trapped waiting for that cgroup.  CGROUP_PUNT flag makes
350 	 * submit_bio() punt the actual issuing to a dedicated per-blkcg
351 	 * work item to avoid such priority inversions.
352 	 */
353 	__REQ_CGROUP_PUNT,
354 
355 	/* command specific flags for REQ_OP_WRITE_ZEROES: */
356 	__REQ_NOUNMAP,		/* do not free blocks when zeroing */
357 
358 	__REQ_HIPRI,
359 
360 	/* for driver use */
361 	__REQ_DRV,
362 	__REQ_SWAP,		/* swapping request. */
363 	__REQ_NR_BITS,		/* stops here */
364 };
365 
366 #define REQ_FAILFAST_DEV	(1ULL << __REQ_FAILFAST_DEV)
367 #define REQ_FAILFAST_TRANSPORT	(1ULL << __REQ_FAILFAST_TRANSPORT)
368 #define REQ_FAILFAST_DRIVER	(1ULL << __REQ_FAILFAST_DRIVER)
369 #define REQ_SYNC		(1ULL << __REQ_SYNC)
370 #define REQ_META		(1ULL << __REQ_META)
371 #define REQ_PRIO		(1ULL << __REQ_PRIO)
372 #define REQ_NOMERGE		(1ULL << __REQ_NOMERGE)
373 #define REQ_IDLE		(1ULL << __REQ_IDLE)
374 #define REQ_INTEGRITY		(1ULL << __REQ_INTEGRITY)
375 #define REQ_FUA			(1ULL << __REQ_FUA)
376 #define REQ_PREFLUSH		(1ULL << __REQ_PREFLUSH)
377 #define REQ_RAHEAD		(1ULL << __REQ_RAHEAD)
378 #define REQ_BACKGROUND		(1ULL << __REQ_BACKGROUND)
379 #define REQ_NOWAIT		(1ULL << __REQ_NOWAIT)
380 #define REQ_CGROUP_PUNT		(1ULL << __REQ_CGROUP_PUNT)
381 
382 #define REQ_NOUNMAP		(1ULL << __REQ_NOUNMAP)
383 #define REQ_HIPRI		(1ULL << __REQ_HIPRI)
384 
385 #define REQ_DRV			(1ULL << __REQ_DRV)
386 #define REQ_SWAP		(1ULL << __REQ_SWAP)
387 
388 #define REQ_FAILFAST_MASK \
389 	(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
390 
391 #define REQ_NOMERGE_FLAGS \
392 	(REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
393 
394 enum stat_group {
395 	STAT_READ,
396 	STAT_WRITE,
397 	STAT_DISCARD,
398 	STAT_FLUSH,
399 
400 	NR_STAT_GROUPS
401 };
402 
403 #define bio_op(bio) \
404 	((bio)->bi_opf & REQ_OP_MASK)
405 #define req_op(req) \
406 	((req)->cmd_flags & REQ_OP_MASK)
407 
408 /* obsolete, don't use in new code */
409 static inline void bio_set_op_attrs(struct bio *bio, unsigned op,
410 		unsigned op_flags)
411 {
412 	bio->bi_opf = op | op_flags;
413 }
414 
415 static inline bool op_is_write(unsigned int op)
416 {
417 	return (op & 1);
418 }
419 
420 /*
421  * Check if the bio or request is one that needs special treatment in the
422  * flush state machine.
423  */
424 static inline bool op_is_flush(unsigned int op)
425 {
426 	return op & (REQ_FUA | REQ_PREFLUSH);
427 }
428 
429 /*
430  * Reads are always treated as synchronous, as are requests with the FUA or
431  * PREFLUSH flag.  Other operations may be marked as synchronous using the
432  * REQ_SYNC flag.
433  */
434 static inline bool op_is_sync(unsigned int op)
435 {
436 	return (op & REQ_OP_MASK) == REQ_OP_READ ||
437 		(op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
438 }
439 
440 static inline bool op_is_discard(unsigned int op)
441 {
442 	return (op & REQ_OP_MASK) == REQ_OP_DISCARD;
443 }
444 
445 /*
446  * Check if a bio or request operation is a zone management operation, with
447  * the exception of REQ_OP_ZONE_RESET_ALL which is treated as a special case
448  * due to its different handling in the block layer and device response in
449  * case of command failure.
450  */
451 static inline bool op_is_zone_mgmt(enum req_opf op)
452 {
453 	switch (op & REQ_OP_MASK) {
454 	case REQ_OP_ZONE_RESET:
455 	case REQ_OP_ZONE_OPEN:
456 	case REQ_OP_ZONE_CLOSE:
457 	case REQ_OP_ZONE_FINISH:
458 		return true;
459 	default:
460 		return false;
461 	}
462 }
463 
464 static inline int op_stat_group(unsigned int op)
465 {
466 	if (op_is_discard(op))
467 		return STAT_DISCARD;
468 	return op_is_write(op);
469 }
470 
471 typedef unsigned int blk_qc_t;
472 #define BLK_QC_T_NONE		-1U
473 #define BLK_QC_T_EAGAIN		-2U
474 #define BLK_QC_T_SHIFT		16
475 #define BLK_QC_T_INTERNAL	(1U << 31)
476 
477 static inline bool blk_qc_t_valid(blk_qc_t cookie)
478 {
479 	return cookie != BLK_QC_T_NONE && cookie != BLK_QC_T_EAGAIN;
480 }
481 
482 static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
483 {
484 	return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT;
485 }
486 
487 static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
488 {
489 	return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
490 }
491 
492 static inline bool blk_qc_t_is_internal(blk_qc_t cookie)
493 {
494 	return (cookie & BLK_QC_T_INTERNAL) != 0;
495 }
496 
497 struct blk_rq_stat {
498 	u64 mean;
499 	u64 min;
500 	u64 max;
501 	u32 nr_samples;
502 	u64 batch;
503 };
504 
505 #endif /* __LINUX_BLK_TYPES_H */
506