1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Block data types and constants. Directly include this file only to 4 * break include dependency loop. 5 */ 6 #ifndef __LINUX_BLK_TYPES_H 7 #define __LINUX_BLK_TYPES_H 8 9 #include <linux/types.h> 10 #include <linux/bvec.h> 11 #include <linux/ktime.h> 12 13 struct bio_set; 14 struct bio; 15 struct bio_integrity_payload; 16 struct page; 17 struct io_context; 18 struct cgroup_subsys_state; 19 typedef void (bio_end_io_t) (struct bio *); 20 struct bio_crypt_ctx; 21 22 struct block_device { 23 dev_t bd_dev; 24 int bd_openers; 25 struct inode * bd_inode; /* will die */ 26 struct super_block * bd_super; 27 struct mutex bd_mutex; /* open/close mutex */ 28 void * bd_claiming; 29 void * bd_holder; 30 int bd_holders; 31 bool bd_write_holder; 32 #ifdef CONFIG_SYSFS 33 struct list_head bd_holder_disks; 34 #endif 35 struct block_device * bd_contains; 36 u8 bd_partno; 37 struct hd_struct * bd_part; 38 /* number of times partitions within this device have been opened. */ 39 unsigned bd_part_count; 40 41 spinlock_t bd_size_lock; /* for bd_inode->i_size updates */ 42 struct gendisk * bd_disk; 43 struct backing_dev_info *bd_bdi; 44 45 /* The counter of freeze processes */ 46 int bd_fsfreeze_count; 47 /* Mutex for freeze */ 48 struct mutex bd_fsfreeze_mutex; 49 } __randomize_layout; 50 51 /* 52 * Block error status values. See block/blk-core:blk_errors for the details. 53 * Alpha cannot write a byte atomically, so we need to use 32-bit value. 54 */ 55 #if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__) 56 typedef u32 __bitwise blk_status_t; 57 #else 58 typedef u8 __bitwise blk_status_t; 59 #endif 60 #define BLK_STS_OK 0 61 #define BLK_STS_NOTSUPP ((__force blk_status_t)1) 62 #define BLK_STS_TIMEOUT ((__force blk_status_t)2) 63 #define BLK_STS_NOSPC ((__force blk_status_t)3) 64 #define BLK_STS_TRANSPORT ((__force blk_status_t)4) 65 #define BLK_STS_TARGET ((__force blk_status_t)5) 66 #define BLK_STS_NEXUS ((__force blk_status_t)6) 67 #define BLK_STS_MEDIUM ((__force blk_status_t)7) 68 #define BLK_STS_PROTECTION ((__force blk_status_t)8) 69 #define BLK_STS_RESOURCE ((__force blk_status_t)9) 70 #define BLK_STS_IOERR ((__force blk_status_t)10) 71 72 /* hack for device mapper, don't use elsewhere: */ 73 #define BLK_STS_DM_REQUEUE ((__force blk_status_t)11) 74 75 #define BLK_STS_AGAIN ((__force blk_status_t)12) 76 77 /* 78 * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if 79 * device related resources are unavailable, but the driver can guarantee 80 * that the queue will be rerun in the future once resources become 81 * available again. This is typically the case for device specific 82 * resources that are consumed for IO. If the driver fails allocating these 83 * resources, we know that inflight (or pending) IO will free these 84 * resource upon completion. 85 * 86 * This is different from BLK_STS_RESOURCE in that it explicitly references 87 * a device specific resource. For resources of wider scope, allocation 88 * failure can happen without having pending IO. This means that we can't 89 * rely on request completions freeing these resources, as IO may not be in 90 * flight. Examples of that are kernel memory allocations, DMA mappings, or 91 * any other system wide resources. 92 */ 93 #define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13) 94 95 /* 96 * BLK_STS_ZONE_RESOURCE is returned from the driver to the block layer if zone 97 * related resources are unavailable, but the driver can guarantee the queue 98 * will be rerun in the future once the resources become available again. 99 * 100 * This is different from BLK_STS_DEV_RESOURCE in that it explicitly references 101 * a zone specific resource and IO to a different zone on the same device could 102 * still be served. Examples of that are zones that are write-locked, but a read 103 * to the same zone could be served. 104 */ 105 #define BLK_STS_ZONE_RESOURCE ((__force blk_status_t)14) 106 107 /** 108 * blk_path_error - returns true if error may be path related 109 * @error: status the request was completed with 110 * 111 * Description: 112 * This classifies block error status into non-retryable errors and ones 113 * that may be successful if retried on a failover path. 114 * 115 * Return: 116 * %false - retrying failover path will not help 117 * %true - may succeed if retried 118 */ 119 static inline bool blk_path_error(blk_status_t error) 120 { 121 switch (error) { 122 case BLK_STS_NOTSUPP: 123 case BLK_STS_NOSPC: 124 case BLK_STS_TARGET: 125 case BLK_STS_NEXUS: 126 case BLK_STS_MEDIUM: 127 case BLK_STS_PROTECTION: 128 return false; 129 } 130 131 /* Anything else could be a path failure, so should be retried */ 132 return true; 133 } 134 135 /* 136 * From most significant bit: 137 * 1 bit: reserved for other usage, see below 138 * 12 bits: original size of bio 139 * 51 bits: issue time of bio 140 */ 141 #define BIO_ISSUE_RES_BITS 1 142 #define BIO_ISSUE_SIZE_BITS 12 143 #define BIO_ISSUE_RES_SHIFT (64 - BIO_ISSUE_RES_BITS) 144 #define BIO_ISSUE_SIZE_SHIFT (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS) 145 #define BIO_ISSUE_TIME_MASK ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1) 146 #define BIO_ISSUE_SIZE_MASK \ 147 (((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT) 148 #define BIO_ISSUE_RES_MASK (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1)) 149 150 /* Reserved bit for blk-throtl */ 151 #define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63) 152 153 struct bio_issue { 154 u64 value; 155 }; 156 157 static inline u64 __bio_issue_time(u64 time) 158 { 159 return time & BIO_ISSUE_TIME_MASK; 160 } 161 162 static inline u64 bio_issue_time(struct bio_issue *issue) 163 { 164 return __bio_issue_time(issue->value); 165 } 166 167 static inline sector_t bio_issue_size(struct bio_issue *issue) 168 { 169 return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT); 170 } 171 172 static inline void bio_issue_init(struct bio_issue *issue, 173 sector_t size) 174 { 175 size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1; 176 issue->value = ((issue->value & BIO_ISSUE_RES_MASK) | 177 (ktime_get_ns() & BIO_ISSUE_TIME_MASK) | 178 ((u64)size << BIO_ISSUE_SIZE_SHIFT)); 179 } 180 181 /* 182 * main unit of I/O for the block layer and lower layers (ie drivers and 183 * stacking drivers) 184 */ 185 struct bio { 186 struct bio *bi_next; /* request queue link */ 187 struct gendisk *bi_disk; 188 unsigned int bi_opf; /* bottom bits req flags, 189 * top bits REQ_OP. Use 190 * accessors. 191 */ 192 unsigned short bi_flags; /* status, etc and bvec pool number */ 193 unsigned short bi_ioprio; 194 unsigned short bi_write_hint; 195 blk_status_t bi_status; 196 u8 bi_partno; 197 atomic_t __bi_remaining; 198 199 struct bvec_iter bi_iter; 200 201 bio_end_io_t *bi_end_io; 202 203 void *bi_private; 204 #ifdef CONFIG_BLK_CGROUP 205 /* 206 * Represents the association of the css and request_queue for the bio. 207 * If a bio goes direct to device, it will not have a blkg as it will 208 * not have a request_queue associated with it. The reference is put 209 * on release of the bio. 210 */ 211 struct blkcg_gq *bi_blkg; 212 struct bio_issue bi_issue; 213 #ifdef CONFIG_BLK_CGROUP_IOCOST 214 u64 bi_iocost_cost; 215 #endif 216 #endif 217 218 #ifdef CONFIG_BLK_INLINE_ENCRYPTION 219 struct bio_crypt_ctx *bi_crypt_context; 220 #endif 221 222 union { 223 #if defined(CONFIG_BLK_DEV_INTEGRITY) 224 struct bio_integrity_payload *bi_integrity; /* data integrity */ 225 #endif 226 }; 227 228 unsigned short bi_vcnt; /* how many bio_vec's */ 229 230 /* 231 * Everything starting with bi_max_vecs will be preserved by bio_reset() 232 */ 233 234 unsigned short bi_max_vecs; /* max bvl_vecs we can hold */ 235 236 atomic_t __bi_cnt; /* pin count */ 237 238 struct bio_vec *bi_io_vec; /* the actual vec list */ 239 240 struct bio_set *bi_pool; 241 242 /* 243 * We can inline a number of vecs at the end of the bio, to avoid 244 * double allocations for a small number of bio_vecs. This member 245 * MUST obviously be kept at the very end of the bio. 246 */ 247 struct bio_vec bi_inline_vecs[]; 248 }; 249 250 #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs) 251 252 /* 253 * bio flags 254 */ 255 enum { 256 BIO_NO_PAGE_REF, /* don't put release vec pages */ 257 BIO_CLONED, /* doesn't own data */ 258 BIO_BOUNCED, /* bio is a bounce bio */ 259 BIO_WORKINGSET, /* contains userspace workingset pages */ 260 BIO_QUIET, /* Make BIO Quiet */ 261 BIO_CHAIN, /* chained bio, ->bi_remaining in effect */ 262 BIO_REFFED, /* bio has elevated ->bi_cnt */ 263 BIO_THROTTLED, /* This bio has already been subjected to 264 * throttling rules. Don't do it again. */ 265 BIO_TRACE_COMPLETION, /* bio_endio() should trace the final completion 266 * of this bio. */ 267 BIO_CGROUP_ACCT, /* has been accounted to a cgroup */ 268 BIO_TRACKED, /* set if bio goes through the rq_qos path */ 269 BIO_FLAG_LAST 270 }; 271 272 /* See BVEC_POOL_OFFSET below before adding new flags */ 273 274 /* 275 * We support 6 different bvec pools, the last one is magic in that it 276 * is backed by a mempool. 277 */ 278 #define BVEC_POOL_NR 6 279 #define BVEC_POOL_MAX (BVEC_POOL_NR - 1) 280 281 /* 282 * Top 3 bits of bio flags indicate the pool the bvecs came from. We add 283 * 1 to the actual index so that 0 indicates that there are no bvecs to be 284 * freed. 285 */ 286 #define BVEC_POOL_BITS (3) 287 #define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS) 288 #define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET) 289 #if (1<< BVEC_POOL_BITS) < (BVEC_POOL_NR+1) 290 # error "BVEC_POOL_BITS is too small" 291 #endif 292 293 /* 294 * Flags starting here get preserved by bio_reset() - this includes 295 * only BVEC_POOL_IDX() 296 */ 297 #define BIO_RESET_BITS BVEC_POOL_OFFSET 298 299 typedef __u32 __bitwise blk_mq_req_flags_t; 300 301 /* 302 * Operations and flags common to the bio and request structures. 303 * We use 8 bits for encoding the operation, and the remaining 24 for flags. 304 * 305 * The least significant bit of the operation number indicates the data 306 * transfer direction: 307 * 308 * - if the least significant bit is set transfers are TO the device 309 * - if the least significant bit is not set transfers are FROM the device 310 * 311 * If a operation does not transfer data the least significant bit has no 312 * meaning. 313 */ 314 #define REQ_OP_BITS 8 315 #define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1) 316 #define REQ_FLAG_BITS 24 317 318 enum req_opf { 319 /* read sectors from the device */ 320 REQ_OP_READ = 0, 321 /* write sectors to the device */ 322 REQ_OP_WRITE = 1, 323 /* flush the volatile write cache */ 324 REQ_OP_FLUSH = 2, 325 /* discard sectors */ 326 REQ_OP_DISCARD = 3, 327 /* securely erase sectors */ 328 REQ_OP_SECURE_ERASE = 5, 329 /* write the same sector many times */ 330 REQ_OP_WRITE_SAME = 7, 331 /* write the zero filled sector many times */ 332 REQ_OP_WRITE_ZEROES = 9, 333 /* Open a zone */ 334 REQ_OP_ZONE_OPEN = 10, 335 /* Close a zone */ 336 REQ_OP_ZONE_CLOSE = 11, 337 /* Transition a zone to full */ 338 REQ_OP_ZONE_FINISH = 12, 339 /* write data at the current zone write pointer */ 340 REQ_OP_ZONE_APPEND = 13, 341 /* reset a zone write pointer */ 342 REQ_OP_ZONE_RESET = 15, 343 /* reset all the zone present on the device */ 344 REQ_OP_ZONE_RESET_ALL = 17, 345 346 /* SCSI passthrough using struct scsi_request */ 347 REQ_OP_SCSI_IN = 32, 348 REQ_OP_SCSI_OUT = 33, 349 /* Driver private requests */ 350 REQ_OP_DRV_IN = 34, 351 REQ_OP_DRV_OUT = 35, 352 353 REQ_OP_LAST, 354 }; 355 356 enum req_flag_bits { 357 __REQ_FAILFAST_DEV = /* no driver retries of device errors */ 358 REQ_OP_BITS, 359 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ 360 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ 361 __REQ_SYNC, /* request is sync (sync write or read) */ 362 __REQ_META, /* metadata io request */ 363 __REQ_PRIO, /* boost priority in cfq */ 364 __REQ_NOMERGE, /* don't touch this for merging */ 365 __REQ_IDLE, /* anticipate more IO after this one */ 366 __REQ_INTEGRITY, /* I/O includes block integrity payload */ 367 __REQ_FUA, /* forced unit access */ 368 __REQ_PREFLUSH, /* request for cache flush */ 369 __REQ_RAHEAD, /* read ahead, can fail anytime */ 370 __REQ_BACKGROUND, /* background IO */ 371 __REQ_NOWAIT, /* Don't wait if request will block */ 372 /* 373 * When a shared kthread needs to issue a bio for a cgroup, doing 374 * so synchronously can lead to priority inversions as the kthread 375 * can be trapped waiting for that cgroup. CGROUP_PUNT flag makes 376 * submit_bio() punt the actual issuing to a dedicated per-blkcg 377 * work item to avoid such priority inversions. 378 */ 379 __REQ_CGROUP_PUNT, 380 381 /* command specific flags for REQ_OP_WRITE_ZEROES: */ 382 __REQ_NOUNMAP, /* do not free blocks when zeroing */ 383 384 __REQ_HIPRI, 385 386 /* for driver use */ 387 __REQ_DRV, 388 __REQ_SWAP, /* swapping request. */ 389 __REQ_NR_BITS, /* stops here */ 390 }; 391 392 #define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV) 393 #define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT) 394 #define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER) 395 #define REQ_SYNC (1ULL << __REQ_SYNC) 396 #define REQ_META (1ULL << __REQ_META) 397 #define REQ_PRIO (1ULL << __REQ_PRIO) 398 #define REQ_NOMERGE (1ULL << __REQ_NOMERGE) 399 #define REQ_IDLE (1ULL << __REQ_IDLE) 400 #define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY) 401 #define REQ_FUA (1ULL << __REQ_FUA) 402 #define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH) 403 #define REQ_RAHEAD (1ULL << __REQ_RAHEAD) 404 #define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND) 405 #define REQ_NOWAIT (1ULL << __REQ_NOWAIT) 406 #define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT) 407 408 #define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP) 409 #define REQ_HIPRI (1ULL << __REQ_HIPRI) 410 411 #define REQ_DRV (1ULL << __REQ_DRV) 412 #define REQ_SWAP (1ULL << __REQ_SWAP) 413 414 #define REQ_FAILFAST_MASK \ 415 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) 416 417 #define REQ_NOMERGE_FLAGS \ 418 (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA) 419 420 enum stat_group { 421 STAT_READ, 422 STAT_WRITE, 423 STAT_DISCARD, 424 STAT_FLUSH, 425 426 NR_STAT_GROUPS 427 }; 428 429 #define bio_op(bio) \ 430 ((bio)->bi_opf & REQ_OP_MASK) 431 #define req_op(req) \ 432 ((req)->cmd_flags & REQ_OP_MASK) 433 434 /* obsolete, don't use in new code */ 435 static inline void bio_set_op_attrs(struct bio *bio, unsigned op, 436 unsigned op_flags) 437 { 438 bio->bi_opf = op | op_flags; 439 } 440 441 static inline bool op_is_write(unsigned int op) 442 { 443 return (op & 1); 444 } 445 446 /* 447 * Check if the bio or request is one that needs special treatment in the 448 * flush state machine. 449 */ 450 static inline bool op_is_flush(unsigned int op) 451 { 452 return op & (REQ_FUA | REQ_PREFLUSH); 453 } 454 455 /* 456 * Reads are always treated as synchronous, as are requests with the FUA or 457 * PREFLUSH flag. Other operations may be marked as synchronous using the 458 * REQ_SYNC flag. 459 */ 460 static inline bool op_is_sync(unsigned int op) 461 { 462 return (op & REQ_OP_MASK) == REQ_OP_READ || 463 (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH)); 464 } 465 466 static inline bool op_is_discard(unsigned int op) 467 { 468 return (op & REQ_OP_MASK) == REQ_OP_DISCARD; 469 } 470 471 /* 472 * Check if a bio or request operation is a zone management operation, with 473 * the exception of REQ_OP_ZONE_RESET_ALL which is treated as a special case 474 * due to its different handling in the block layer and device response in 475 * case of command failure. 476 */ 477 static inline bool op_is_zone_mgmt(enum req_opf op) 478 { 479 switch (op & REQ_OP_MASK) { 480 case REQ_OP_ZONE_RESET: 481 case REQ_OP_ZONE_OPEN: 482 case REQ_OP_ZONE_CLOSE: 483 case REQ_OP_ZONE_FINISH: 484 return true; 485 default: 486 return false; 487 } 488 } 489 490 static inline int op_stat_group(unsigned int op) 491 { 492 if (op_is_discard(op)) 493 return STAT_DISCARD; 494 return op_is_write(op); 495 } 496 497 typedef unsigned int blk_qc_t; 498 #define BLK_QC_T_NONE -1U 499 #define BLK_QC_T_SHIFT 16 500 #define BLK_QC_T_INTERNAL (1U << 31) 501 502 static inline bool blk_qc_t_valid(blk_qc_t cookie) 503 { 504 return cookie != BLK_QC_T_NONE; 505 } 506 507 static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie) 508 { 509 return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT; 510 } 511 512 static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie) 513 { 514 return cookie & ((1u << BLK_QC_T_SHIFT) - 1); 515 } 516 517 static inline bool blk_qc_t_is_internal(blk_qc_t cookie) 518 { 519 return (cookie & BLK_QC_T_INTERNAL) != 0; 520 } 521 522 struct blk_rq_stat { 523 u64 mean; 524 u64 min; 525 u64 max; 526 u32 nr_samples; 527 u64 batch; 528 }; 529 530 #endif /* __LINUX_BLK_TYPES_H */ 531