1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Block data types and constants. Directly include this file only to 4 * break include dependency loop. 5 */ 6 #ifndef __LINUX_BLK_TYPES_H 7 #define __LINUX_BLK_TYPES_H 8 9 #include <linux/types.h> 10 #include <linux/bvec.h> 11 #include <linux/ktime.h> 12 13 struct bio_set; 14 struct bio; 15 struct bio_integrity_payload; 16 struct page; 17 struct block_device; 18 struct io_context; 19 struct cgroup_subsys_state; 20 typedef void (bio_end_io_t) (struct bio *); 21 22 /* 23 * Block error status values. See block/blk-core:blk_errors for the details. 24 * Alpha cannot write a byte atomically, so we need to use 32-bit value. 25 */ 26 #if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__) 27 typedef u32 __bitwise blk_status_t; 28 #else 29 typedef u8 __bitwise blk_status_t; 30 #endif 31 #define BLK_STS_OK 0 32 #define BLK_STS_NOTSUPP ((__force blk_status_t)1) 33 #define BLK_STS_TIMEOUT ((__force blk_status_t)2) 34 #define BLK_STS_NOSPC ((__force blk_status_t)3) 35 #define BLK_STS_TRANSPORT ((__force blk_status_t)4) 36 #define BLK_STS_TARGET ((__force blk_status_t)5) 37 #define BLK_STS_NEXUS ((__force blk_status_t)6) 38 #define BLK_STS_MEDIUM ((__force blk_status_t)7) 39 #define BLK_STS_PROTECTION ((__force blk_status_t)8) 40 #define BLK_STS_RESOURCE ((__force blk_status_t)9) 41 #define BLK_STS_IOERR ((__force blk_status_t)10) 42 43 /* hack for device mapper, don't use elsewhere: */ 44 #define BLK_STS_DM_REQUEUE ((__force blk_status_t)11) 45 46 #define BLK_STS_AGAIN ((__force blk_status_t)12) 47 48 /* 49 * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if 50 * device related resources are unavailable, but the driver can guarantee 51 * that the queue will be rerun in the future once resources become 52 * available again. This is typically the case for device specific 53 * resources that are consumed for IO. If the driver fails allocating these 54 * resources, we know that inflight (or pending) IO will free these 55 * resource upon completion. 56 * 57 * This is different from BLK_STS_RESOURCE in that it explicitly references 58 * a device specific resource. For resources of wider scope, allocation 59 * failure can happen without having pending IO. This means that we can't 60 * rely on request completions freeing these resources, as IO may not be in 61 * flight. Examples of that are kernel memory allocations, DMA mappings, or 62 * any other system wide resources. 63 */ 64 #define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13) 65 66 /** 67 * blk_path_error - returns true if error may be path related 68 * @error: status the request was completed with 69 * 70 * Description: 71 * This classifies block error status into non-retryable errors and ones 72 * that may be successful if retried on a failover path. 73 * 74 * Return: 75 * %false - retrying failover path will not help 76 * %true - may succeed if retried 77 */ 78 static inline bool blk_path_error(blk_status_t error) 79 { 80 switch (error) { 81 case BLK_STS_NOTSUPP: 82 case BLK_STS_NOSPC: 83 case BLK_STS_TARGET: 84 case BLK_STS_NEXUS: 85 case BLK_STS_MEDIUM: 86 case BLK_STS_PROTECTION: 87 return false; 88 } 89 90 /* Anything else could be a path failure, so should be retried */ 91 return true; 92 } 93 94 /* 95 * From most significant bit: 96 * 1 bit: reserved for other usage, see below 97 * 12 bits: original size of bio 98 * 51 bits: issue time of bio 99 */ 100 #define BIO_ISSUE_RES_BITS 1 101 #define BIO_ISSUE_SIZE_BITS 12 102 #define BIO_ISSUE_RES_SHIFT (64 - BIO_ISSUE_RES_BITS) 103 #define BIO_ISSUE_SIZE_SHIFT (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS) 104 #define BIO_ISSUE_TIME_MASK ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1) 105 #define BIO_ISSUE_SIZE_MASK \ 106 (((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT) 107 #define BIO_ISSUE_RES_MASK (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1)) 108 109 /* Reserved bit for blk-throtl */ 110 #define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63) 111 112 struct bio_issue { 113 u64 value; 114 }; 115 116 static inline u64 __bio_issue_time(u64 time) 117 { 118 return time & BIO_ISSUE_TIME_MASK; 119 } 120 121 static inline u64 bio_issue_time(struct bio_issue *issue) 122 { 123 return __bio_issue_time(issue->value); 124 } 125 126 static inline sector_t bio_issue_size(struct bio_issue *issue) 127 { 128 return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT); 129 } 130 131 static inline void bio_issue_init(struct bio_issue *issue, 132 sector_t size) 133 { 134 size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1; 135 issue->value = ((issue->value & BIO_ISSUE_RES_MASK) | 136 (ktime_get_ns() & BIO_ISSUE_TIME_MASK) | 137 ((u64)size << BIO_ISSUE_SIZE_SHIFT)); 138 } 139 140 /* 141 * main unit of I/O for the block layer and lower layers (ie drivers and 142 * stacking drivers) 143 */ 144 struct bio { 145 struct bio *bi_next; /* request queue link */ 146 struct gendisk *bi_disk; 147 unsigned int bi_opf; /* bottom bits req flags, 148 * top bits REQ_OP. Use 149 * accessors. 150 */ 151 unsigned short bi_flags; /* status, etc and bvec pool number */ 152 unsigned short bi_ioprio; 153 unsigned short bi_write_hint; 154 blk_status_t bi_status; 155 u8 bi_partno; 156 157 /* Number of segments in this BIO after 158 * physical address coalescing is performed. 159 */ 160 unsigned int bi_phys_segments; 161 162 /* 163 * To keep track of the max segment size, we account for the 164 * sizes of the first and last mergeable segments in this bio. 165 */ 166 unsigned int bi_seg_front_size; 167 unsigned int bi_seg_back_size; 168 169 struct bvec_iter bi_iter; 170 171 atomic_t __bi_remaining; 172 bio_end_io_t *bi_end_io; 173 174 void *bi_private; 175 #ifdef CONFIG_BLK_CGROUP 176 /* 177 * Represents the association of the css and request_queue for the bio. 178 * If a bio goes direct to device, it will not have a blkg as it will 179 * not have a request_queue associated with it. The reference is put 180 * on release of the bio. 181 */ 182 struct blkcg_gq *bi_blkg; 183 struct bio_issue bi_issue; 184 #endif 185 union { 186 #if defined(CONFIG_BLK_DEV_INTEGRITY) 187 struct bio_integrity_payload *bi_integrity; /* data integrity */ 188 #endif 189 }; 190 191 unsigned short bi_vcnt; /* how many bio_vec's */ 192 193 /* 194 * Everything starting with bi_max_vecs will be preserved by bio_reset() 195 */ 196 197 unsigned short bi_max_vecs; /* max bvl_vecs we can hold */ 198 199 atomic_t __bi_cnt; /* pin count */ 200 201 struct bio_vec *bi_io_vec; /* the actual vec list */ 202 203 struct bio_set *bi_pool; 204 205 /* 206 * We can inline a number of vecs at the end of the bio, to avoid 207 * double allocations for a small number of bio_vecs. This member 208 * MUST obviously be kept at the very end of the bio. 209 */ 210 struct bio_vec bi_inline_vecs[0]; 211 }; 212 213 #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs) 214 215 /* 216 * bio flags 217 */ 218 #define BIO_SEG_VALID 1 /* bi_phys_segments valid */ 219 #define BIO_CLONED 2 /* doesn't own data */ 220 #define BIO_BOUNCED 3 /* bio is a bounce bio */ 221 #define BIO_USER_MAPPED 4 /* contains user pages */ 222 #define BIO_NULL_MAPPED 5 /* contains invalid user pages */ 223 #define BIO_QUIET 6 /* Make BIO Quiet */ 224 #define BIO_CHAIN 7 /* chained bio, ->bi_remaining in effect */ 225 #define BIO_REFFED 8 /* bio has elevated ->bi_cnt */ 226 #define BIO_THROTTLED 9 /* This bio has already been subjected to 227 * throttling rules. Don't do it again. */ 228 #define BIO_TRACE_COMPLETION 10 /* bio_endio() should trace the final completion 229 * of this bio. */ 230 #define BIO_QUEUE_ENTERED 11 /* can use blk_queue_enter_live() */ 231 #define BIO_TRACKED 12 /* set if bio goes through the rq_qos path */ 232 233 /* See BVEC_POOL_OFFSET below before adding new flags */ 234 235 /* 236 * We support 6 different bvec pools, the last one is magic in that it 237 * is backed by a mempool. 238 */ 239 #define BVEC_POOL_NR 6 240 #define BVEC_POOL_MAX (BVEC_POOL_NR - 1) 241 242 /* 243 * Top 3 bits of bio flags indicate the pool the bvecs came from. We add 244 * 1 to the actual index so that 0 indicates that there are no bvecs to be 245 * freed. 246 */ 247 #define BVEC_POOL_BITS (3) 248 #define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS) 249 #define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET) 250 #if (1<< BVEC_POOL_BITS) < (BVEC_POOL_NR+1) 251 # error "BVEC_POOL_BITS is too small" 252 #endif 253 254 /* 255 * Flags starting here get preserved by bio_reset() - this includes 256 * only BVEC_POOL_IDX() 257 */ 258 #define BIO_RESET_BITS BVEC_POOL_OFFSET 259 260 typedef __u32 __bitwise blk_mq_req_flags_t; 261 262 /* 263 * Operations and flags common to the bio and request structures. 264 * We use 8 bits for encoding the operation, and the remaining 24 for flags. 265 * 266 * The least significant bit of the operation number indicates the data 267 * transfer direction: 268 * 269 * - if the least significant bit is set transfers are TO the device 270 * - if the least significant bit is not set transfers are FROM the device 271 * 272 * If a operation does not transfer data the least significant bit has no 273 * meaning. 274 */ 275 #define REQ_OP_BITS 8 276 #define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1) 277 #define REQ_FLAG_BITS 24 278 279 enum req_opf { 280 /* read sectors from the device */ 281 REQ_OP_READ = 0, 282 /* write sectors to the device */ 283 REQ_OP_WRITE = 1, 284 /* flush the volatile write cache */ 285 REQ_OP_FLUSH = 2, 286 /* discard sectors */ 287 REQ_OP_DISCARD = 3, 288 /* securely erase sectors */ 289 REQ_OP_SECURE_ERASE = 5, 290 /* reset a zone write pointer */ 291 REQ_OP_ZONE_RESET = 6, 292 /* write the same sector many times */ 293 REQ_OP_WRITE_SAME = 7, 294 /* write the zero filled sector many times */ 295 REQ_OP_WRITE_ZEROES = 9, 296 297 /* SCSI passthrough using struct scsi_request */ 298 REQ_OP_SCSI_IN = 32, 299 REQ_OP_SCSI_OUT = 33, 300 /* Driver private requests */ 301 REQ_OP_DRV_IN = 34, 302 REQ_OP_DRV_OUT = 35, 303 304 REQ_OP_LAST, 305 }; 306 307 enum req_flag_bits { 308 __REQ_FAILFAST_DEV = /* no driver retries of device errors */ 309 REQ_OP_BITS, 310 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ 311 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ 312 __REQ_SYNC, /* request is sync (sync write or read) */ 313 __REQ_META, /* metadata io request */ 314 __REQ_PRIO, /* boost priority in cfq */ 315 __REQ_NOMERGE, /* don't touch this for merging */ 316 __REQ_IDLE, /* anticipate more IO after this one */ 317 __REQ_INTEGRITY, /* I/O includes block integrity payload */ 318 __REQ_FUA, /* forced unit access */ 319 __REQ_PREFLUSH, /* request for cache flush */ 320 __REQ_RAHEAD, /* read ahead, can fail anytime */ 321 __REQ_BACKGROUND, /* background IO */ 322 __REQ_NOWAIT, /* Don't wait if request will block */ 323 324 /* command specific flags for REQ_OP_WRITE_ZEROES: */ 325 __REQ_NOUNMAP, /* do not free blocks when zeroing */ 326 327 __REQ_HIPRI, 328 329 /* for driver use */ 330 __REQ_DRV, 331 __REQ_SWAP, /* swapping request. */ 332 __REQ_NR_BITS, /* stops here */ 333 }; 334 335 #define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV) 336 #define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT) 337 #define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER) 338 #define REQ_SYNC (1ULL << __REQ_SYNC) 339 #define REQ_META (1ULL << __REQ_META) 340 #define REQ_PRIO (1ULL << __REQ_PRIO) 341 #define REQ_NOMERGE (1ULL << __REQ_NOMERGE) 342 #define REQ_IDLE (1ULL << __REQ_IDLE) 343 #define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY) 344 #define REQ_FUA (1ULL << __REQ_FUA) 345 #define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH) 346 #define REQ_RAHEAD (1ULL << __REQ_RAHEAD) 347 #define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND) 348 #define REQ_NOWAIT (1ULL << __REQ_NOWAIT) 349 #define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP) 350 #define REQ_HIPRI (1ULL << __REQ_HIPRI) 351 352 #define REQ_DRV (1ULL << __REQ_DRV) 353 #define REQ_SWAP (1ULL << __REQ_SWAP) 354 355 #define REQ_FAILFAST_MASK \ 356 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) 357 358 #define REQ_NOMERGE_FLAGS \ 359 (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA) 360 361 enum stat_group { 362 STAT_READ, 363 STAT_WRITE, 364 STAT_DISCARD, 365 366 NR_STAT_GROUPS 367 }; 368 369 #define bio_op(bio) \ 370 ((bio)->bi_opf & REQ_OP_MASK) 371 #define req_op(req) \ 372 ((req)->cmd_flags & REQ_OP_MASK) 373 374 /* obsolete, don't use in new code */ 375 static inline void bio_set_op_attrs(struct bio *bio, unsigned op, 376 unsigned op_flags) 377 { 378 bio->bi_opf = op | op_flags; 379 } 380 381 static inline bool op_is_write(unsigned int op) 382 { 383 return (op & 1); 384 } 385 386 /* 387 * Check if the bio or request is one that needs special treatment in the 388 * flush state machine. 389 */ 390 static inline bool op_is_flush(unsigned int op) 391 { 392 return op & (REQ_FUA | REQ_PREFLUSH); 393 } 394 395 /* 396 * Reads are always treated as synchronous, as are requests with the FUA or 397 * PREFLUSH flag. Other operations may be marked as synchronous using the 398 * REQ_SYNC flag. 399 */ 400 static inline bool op_is_sync(unsigned int op) 401 { 402 return (op & REQ_OP_MASK) == REQ_OP_READ || 403 (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH)); 404 } 405 406 static inline bool op_is_discard(unsigned int op) 407 { 408 return (op & REQ_OP_MASK) == REQ_OP_DISCARD; 409 } 410 411 static inline int op_stat_group(unsigned int op) 412 { 413 if (op_is_discard(op)) 414 return STAT_DISCARD; 415 return op_is_write(op); 416 } 417 418 typedef unsigned int blk_qc_t; 419 #define BLK_QC_T_NONE -1U 420 #define BLK_QC_T_SHIFT 16 421 #define BLK_QC_T_INTERNAL (1U << 31) 422 423 static inline bool blk_qc_t_valid(blk_qc_t cookie) 424 { 425 return cookie != BLK_QC_T_NONE; 426 } 427 428 static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie) 429 { 430 return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT; 431 } 432 433 static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie) 434 { 435 return cookie & ((1u << BLK_QC_T_SHIFT) - 1); 436 } 437 438 static inline bool blk_qc_t_is_internal(blk_qc_t cookie) 439 { 440 return (cookie & BLK_QC_T_INTERNAL) != 0; 441 } 442 443 struct blk_rq_stat { 444 u64 mean; 445 u64 min; 446 u64 max; 447 u32 nr_samples; 448 u64 batch; 449 }; 450 451 #endif /* __LINUX_BLK_TYPES_H */ 452