1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Block data types and constants. Directly include this file only to 4 * break include dependency loop. 5 */ 6 #ifndef __LINUX_BLK_TYPES_H 7 #define __LINUX_BLK_TYPES_H 8 9 #include <linux/types.h> 10 #include <linux/bvec.h> 11 #include <linux/device.h> 12 #include <linux/ktime.h> 13 14 struct bio_set; 15 struct bio; 16 struct bio_integrity_payload; 17 struct page; 18 struct io_context; 19 struct cgroup_subsys_state; 20 typedef void (bio_end_io_t) (struct bio *); 21 struct bio_crypt_ctx; 22 23 /* 24 * The basic unit of block I/O is a sector. It is used in a number of contexts 25 * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9 26 * bytes. Variables of type sector_t represent an offset or size that is a 27 * multiple of 512 bytes. Hence these two constants. 28 */ 29 #ifndef SECTOR_SHIFT 30 #define SECTOR_SHIFT 9 31 #endif 32 #ifndef SECTOR_SIZE 33 #define SECTOR_SIZE (1 << SECTOR_SHIFT) 34 #endif 35 36 #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) 37 #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT) 38 #define SECTOR_MASK (PAGE_SECTORS - 1) 39 40 struct block_device { 41 sector_t bd_start_sect; 42 sector_t bd_nr_sectors; 43 struct gendisk * bd_disk; 44 struct request_queue * bd_queue; 45 struct disk_stats __percpu *bd_stats; 46 unsigned long bd_stamp; 47 bool bd_read_only; /* read-only policy */ 48 u8 bd_partno; 49 bool bd_write_holder; 50 bool bd_has_submit_bio; 51 dev_t bd_dev; 52 atomic_t bd_openers; 53 spinlock_t bd_size_lock; /* for bd_inode->i_size updates */ 54 struct inode * bd_inode; /* will die */ 55 void * bd_claiming; 56 void * bd_holder; 57 const struct blk_holder_ops *bd_holder_ops; 58 struct mutex bd_holder_lock; 59 int bd_holders; 60 struct kobject *bd_holder_dir; 61 62 atomic_t bd_fsfreeze_count; /* number of freeze requests */ 63 struct mutex bd_fsfreeze_mutex; /* serialize freeze/thaw */ 64 65 struct partition_meta_info *bd_meta_info; 66 #ifdef CONFIG_FAIL_MAKE_REQUEST 67 bool bd_make_it_fail; 68 #endif 69 int bd_writers; 70 /* 71 * keep this out-of-line as it's both big and not needed in the fast 72 * path 73 */ 74 struct device bd_device; 75 } __randomize_layout; 76 77 #define bdev_whole(_bdev) \ 78 ((_bdev)->bd_disk->part0) 79 80 #define dev_to_bdev(device) \ 81 container_of((device), struct block_device, bd_device) 82 83 #define bdev_kobj(_bdev) \ 84 (&((_bdev)->bd_device.kobj)) 85 86 /* 87 * Block error status values. See block/blk-core:blk_errors for the details. 88 * Alpha cannot write a byte atomically, so we need to use 32-bit value. 89 */ 90 #if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__) 91 typedef u32 __bitwise blk_status_t; 92 typedef u32 blk_short_t; 93 #else 94 typedef u8 __bitwise blk_status_t; 95 typedef u16 blk_short_t; 96 #endif 97 #define BLK_STS_OK 0 98 #define BLK_STS_NOTSUPP ((__force blk_status_t)1) 99 #define BLK_STS_TIMEOUT ((__force blk_status_t)2) 100 #define BLK_STS_NOSPC ((__force blk_status_t)3) 101 #define BLK_STS_TRANSPORT ((__force blk_status_t)4) 102 #define BLK_STS_TARGET ((__force blk_status_t)5) 103 #define BLK_STS_RESV_CONFLICT ((__force blk_status_t)6) 104 #define BLK_STS_MEDIUM ((__force blk_status_t)7) 105 #define BLK_STS_PROTECTION ((__force blk_status_t)8) 106 #define BLK_STS_RESOURCE ((__force blk_status_t)9) 107 #define BLK_STS_IOERR ((__force blk_status_t)10) 108 109 /* hack for device mapper, don't use elsewhere: */ 110 #define BLK_STS_DM_REQUEUE ((__force blk_status_t)11) 111 112 /* 113 * BLK_STS_AGAIN should only be returned if RQF_NOWAIT is set 114 * and the bio would block (cf bio_wouldblock_error()) 115 */ 116 #define BLK_STS_AGAIN ((__force blk_status_t)12) 117 118 /* 119 * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if 120 * device related resources are unavailable, but the driver can guarantee 121 * that the queue will be rerun in the future once resources become 122 * available again. This is typically the case for device specific 123 * resources that are consumed for IO. If the driver fails allocating these 124 * resources, we know that inflight (or pending) IO will free these 125 * resource upon completion. 126 * 127 * This is different from BLK_STS_RESOURCE in that it explicitly references 128 * a device specific resource. For resources of wider scope, allocation 129 * failure can happen without having pending IO. This means that we can't 130 * rely on request completions freeing these resources, as IO may not be in 131 * flight. Examples of that are kernel memory allocations, DMA mappings, or 132 * any other system wide resources. 133 */ 134 #define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13) 135 136 /* 137 * BLK_STS_ZONE_RESOURCE is returned from the driver to the block layer if zone 138 * related resources are unavailable, but the driver can guarantee the queue 139 * will be rerun in the future once the resources become available again. 140 * 141 * This is different from BLK_STS_DEV_RESOURCE in that it explicitly references 142 * a zone specific resource and IO to a different zone on the same device could 143 * still be served. Examples of that are zones that are write-locked, but a read 144 * to the same zone could be served. 145 */ 146 #define BLK_STS_ZONE_RESOURCE ((__force blk_status_t)14) 147 148 /* 149 * BLK_STS_ZONE_OPEN_RESOURCE is returned from the driver in the completion 150 * path if the device returns a status indicating that too many zone resources 151 * are currently open. The same command should be successful if resubmitted 152 * after the number of open zones decreases below the device's limits, which is 153 * reported in the request_queue's max_open_zones. 154 */ 155 #define BLK_STS_ZONE_OPEN_RESOURCE ((__force blk_status_t)15) 156 157 /* 158 * BLK_STS_ZONE_ACTIVE_RESOURCE is returned from the driver in the completion 159 * path if the device returns a status indicating that too many zone resources 160 * are currently active. The same command should be successful if resubmitted 161 * after the number of active zones decreases below the device's limits, which 162 * is reported in the request_queue's max_active_zones. 163 */ 164 #define BLK_STS_ZONE_ACTIVE_RESOURCE ((__force blk_status_t)16) 165 166 /* 167 * BLK_STS_OFFLINE is returned from the driver when the target device is offline 168 * or is being taken offline. This could help differentiate the case where a 169 * device is intentionally being shut down from a real I/O error. 170 */ 171 #define BLK_STS_OFFLINE ((__force blk_status_t)17) 172 173 /* 174 * BLK_STS_DURATION_LIMIT is returned from the driver when the target device 175 * aborted the command because it exceeded one of its Command Duration Limits. 176 */ 177 #define BLK_STS_DURATION_LIMIT ((__force blk_status_t)18) 178 179 /** 180 * blk_path_error - returns true if error may be path related 181 * @error: status the request was completed with 182 * 183 * Description: 184 * This classifies block error status into non-retryable errors and ones 185 * that may be successful if retried on a failover path. 186 * 187 * Return: 188 * %false - retrying failover path will not help 189 * %true - may succeed if retried 190 */ 191 static inline bool blk_path_error(blk_status_t error) 192 { 193 switch (error) { 194 case BLK_STS_NOTSUPP: 195 case BLK_STS_NOSPC: 196 case BLK_STS_TARGET: 197 case BLK_STS_RESV_CONFLICT: 198 case BLK_STS_MEDIUM: 199 case BLK_STS_PROTECTION: 200 return false; 201 } 202 203 /* Anything else could be a path failure, so should be retried */ 204 return true; 205 } 206 207 /* 208 * From most significant bit: 209 * 1 bit: reserved for other usage, see below 210 * 12 bits: original size of bio 211 * 51 bits: issue time of bio 212 */ 213 #define BIO_ISSUE_RES_BITS 1 214 #define BIO_ISSUE_SIZE_BITS 12 215 #define BIO_ISSUE_RES_SHIFT (64 - BIO_ISSUE_RES_BITS) 216 #define BIO_ISSUE_SIZE_SHIFT (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS) 217 #define BIO_ISSUE_TIME_MASK ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1) 218 #define BIO_ISSUE_SIZE_MASK \ 219 (((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT) 220 #define BIO_ISSUE_RES_MASK (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1)) 221 222 /* Reserved bit for blk-throtl */ 223 #define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63) 224 225 struct bio_issue { 226 u64 value; 227 }; 228 229 static inline u64 __bio_issue_time(u64 time) 230 { 231 return time & BIO_ISSUE_TIME_MASK; 232 } 233 234 static inline u64 bio_issue_time(struct bio_issue *issue) 235 { 236 return __bio_issue_time(issue->value); 237 } 238 239 static inline sector_t bio_issue_size(struct bio_issue *issue) 240 { 241 return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT); 242 } 243 244 static inline void bio_issue_init(struct bio_issue *issue, 245 sector_t size) 246 { 247 size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1; 248 issue->value = ((issue->value & BIO_ISSUE_RES_MASK) | 249 (ktime_get_ns() & BIO_ISSUE_TIME_MASK) | 250 ((u64)size << BIO_ISSUE_SIZE_SHIFT)); 251 } 252 253 typedef __u32 __bitwise blk_opf_t; 254 255 typedef unsigned int blk_qc_t; 256 #define BLK_QC_T_NONE -1U 257 258 /* 259 * main unit of I/O for the block layer and lower layers (ie drivers and 260 * stacking drivers) 261 */ 262 struct bio { 263 struct bio *bi_next; /* request queue link */ 264 struct block_device *bi_bdev; 265 blk_opf_t bi_opf; /* bottom bits REQ_OP, top bits 266 * req_flags. 267 */ 268 unsigned short bi_flags; /* BIO_* below */ 269 unsigned short bi_ioprio; 270 blk_status_t bi_status; 271 atomic_t __bi_remaining; 272 273 struct bvec_iter bi_iter; 274 275 blk_qc_t bi_cookie; 276 bio_end_io_t *bi_end_io; 277 void *bi_private; 278 #ifdef CONFIG_BLK_CGROUP 279 /* 280 * Represents the association of the css and request_queue for the bio. 281 * If a bio goes direct to device, it will not have a blkg as it will 282 * not have a request_queue associated with it. The reference is put 283 * on release of the bio. 284 */ 285 struct blkcg_gq *bi_blkg; 286 struct bio_issue bi_issue; 287 #ifdef CONFIG_BLK_CGROUP_IOCOST 288 u64 bi_iocost_cost; 289 #endif 290 #endif 291 292 #ifdef CONFIG_BLK_INLINE_ENCRYPTION 293 struct bio_crypt_ctx *bi_crypt_context; 294 #endif 295 296 union { 297 #if defined(CONFIG_BLK_DEV_INTEGRITY) 298 struct bio_integrity_payload *bi_integrity; /* data integrity */ 299 #endif 300 }; 301 302 unsigned short bi_vcnt; /* how many bio_vec's */ 303 304 /* 305 * Everything starting with bi_max_vecs will be preserved by bio_reset() 306 */ 307 308 unsigned short bi_max_vecs; /* max bvl_vecs we can hold */ 309 310 atomic_t __bi_cnt; /* pin count */ 311 312 struct bio_vec *bi_io_vec; /* the actual vec list */ 313 314 struct bio_set *bi_pool; 315 316 /* 317 * We can inline a number of vecs at the end of the bio, to avoid 318 * double allocations for a small number of bio_vecs. This member 319 * MUST obviously be kept at the very end of the bio. 320 */ 321 struct bio_vec bi_inline_vecs[]; 322 }; 323 324 #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs) 325 #define BIO_MAX_SECTORS (UINT_MAX >> SECTOR_SHIFT) 326 327 /* 328 * bio flags 329 */ 330 enum { 331 BIO_PAGE_PINNED, /* Unpin pages in bio_release_pages() */ 332 BIO_CLONED, /* doesn't own data */ 333 BIO_BOUNCED, /* bio is a bounce bio */ 334 BIO_QUIET, /* Make BIO Quiet */ 335 BIO_CHAIN, /* chained bio, ->bi_remaining in effect */ 336 BIO_REFFED, /* bio has elevated ->bi_cnt */ 337 BIO_BPS_THROTTLED, /* This bio has already been subjected to 338 * throttling rules. Don't do it again. */ 339 BIO_TRACE_COMPLETION, /* bio_endio() should trace the final completion 340 * of this bio. */ 341 BIO_CGROUP_ACCT, /* has been accounted to a cgroup */ 342 BIO_QOS_THROTTLED, /* bio went through rq_qos throttle path */ 343 BIO_QOS_MERGED, /* but went through rq_qos merge path */ 344 BIO_REMAPPED, 345 BIO_ZONE_WRITE_LOCKED, /* Owns a zoned device zone write lock */ 346 BIO_FLAG_LAST 347 }; 348 349 typedef __u32 __bitwise blk_mq_req_flags_t; 350 351 #define REQ_OP_BITS 8 352 #define REQ_OP_MASK (__force blk_opf_t)((1 << REQ_OP_BITS) - 1) 353 #define REQ_FLAG_BITS 24 354 355 /** 356 * enum req_op - Operations common to the bio and request structures. 357 * We use 8 bits for encoding the operation, and the remaining 24 for flags. 358 * 359 * The least significant bit of the operation number indicates the data 360 * transfer direction: 361 * 362 * - if the least significant bit is set transfers are TO the device 363 * - if the least significant bit is not set transfers are FROM the device 364 * 365 * If a operation does not transfer data the least significant bit has no 366 * meaning. 367 */ 368 enum req_op { 369 /* read sectors from the device */ 370 REQ_OP_READ = (__force blk_opf_t)0, 371 /* write sectors to the device */ 372 REQ_OP_WRITE = (__force blk_opf_t)1, 373 /* flush the volatile write cache */ 374 REQ_OP_FLUSH = (__force blk_opf_t)2, 375 /* discard sectors */ 376 REQ_OP_DISCARD = (__force blk_opf_t)3, 377 /* securely erase sectors */ 378 REQ_OP_SECURE_ERASE = (__force blk_opf_t)5, 379 /* write the zero filled sector many times */ 380 REQ_OP_WRITE_ZEROES = (__force blk_opf_t)9, 381 /* Open a zone */ 382 REQ_OP_ZONE_OPEN = (__force blk_opf_t)10, 383 /* Close a zone */ 384 REQ_OP_ZONE_CLOSE = (__force blk_opf_t)11, 385 /* Transition a zone to full */ 386 REQ_OP_ZONE_FINISH = (__force blk_opf_t)12, 387 /* write data at the current zone write pointer */ 388 REQ_OP_ZONE_APPEND = (__force blk_opf_t)13, 389 /* reset a zone write pointer */ 390 REQ_OP_ZONE_RESET = (__force blk_opf_t)15, 391 /* reset all the zone present on the device */ 392 REQ_OP_ZONE_RESET_ALL = (__force blk_opf_t)17, 393 394 /* Driver private requests */ 395 REQ_OP_DRV_IN = (__force blk_opf_t)34, 396 REQ_OP_DRV_OUT = (__force blk_opf_t)35, 397 398 REQ_OP_LAST = (__force blk_opf_t)36, 399 }; 400 401 enum req_flag_bits { 402 __REQ_FAILFAST_DEV = /* no driver retries of device errors */ 403 REQ_OP_BITS, 404 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ 405 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ 406 __REQ_SYNC, /* request is sync (sync write or read) */ 407 __REQ_META, /* metadata io request */ 408 __REQ_PRIO, /* boost priority in cfq */ 409 __REQ_NOMERGE, /* don't touch this for merging */ 410 __REQ_IDLE, /* anticipate more IO after this one */ 411 __REQ_INTEGRITY, /* I/O includes block integrity payload */ 412 __REQ_FUA, /* forced unit access */ 413 __REQ_PREFLUSH, /* request for cache flush */ 414 __REQ_RAHEAD, /* read ahead, can fail anytime */ 415 __REQ_BACKGROUND, /* background IO */ 416 __REQ_NOWAIT, /* Don't wait if request will block */ 417 __REQ_POLLED, /* caller polls for completion using bio_poll */ 418 __REQ_ALLOC_CACHE, /* allocate IO from cache if available */ 419 __REQ_SWAP, /* swap I/O */ 420 __REQ_DRV, /* for driver use */ 421 __REQ_FS_PRIVATE, /* for file system (submitter) use */ 422 423 /* 424 * Command specific flags, keep last: 425 */ 426 /* for REQ_OP_WRITE_ZEROES: */ 427 __REQ_NOUNMAP, /* do not free blocks when zeroing */ 428 429 __REQ_NR_BITS, /* stops here */ 430 }; 431 432 #define REQ_FAILFAST_DEV \ 433 (__force blk_opf_t)(1ULL << __REQ_FAILFAST_DEV) 434 #define REQ_FAILFAST_TRANSPORT \ 435 (__force blk_opf_t)(1ULL << __REQ_FAILFAST_TRANSPORT) 436 #define REQ_FAILFAST_DRIVER \ 437 (__force blk_opf_t)(1ULL << __REQ_FAILFAST_DRIVER) 438 #define REQ_SYNC (__force blk_opf_t)(1ULL << __REQ_SYNC) 439 #define REQ_META (__force blk_opf_t)(1ULL << __REQ_META) 440 #define REQ_PRIO (__force blk_opf_t)(1ULL << __REQ_PRIO) 441 #define REQ_NOMERGE (__force blk_opf_t)(1ULL << __REQ_NOMERGE) 442 #define REQ_IDLE (__force blk_opf_t)(1ULL << __REQ_IDLE) 443 #define REQ_INTEGRITY (__force blk_opf_t)(1ULL << __REQ_INTEGRITY) 444 #define REQ_FUA (__force blk_opf_t)(1ULL << __REQ_FUA) 445 #define REQ_PREFLUSH (__force blk_opf_t)(1ULL << __REQ_PREFLUSH) 446 #define REQ_RAHEAD (__force blk_opf_t)(1ULL << __REQ_RAHEAD) 447 #define REQ_BACKGROUND (__force blk_opf_t)(1ULL << __REQ_BACKGROUND) 448 #define REQ_NOWAIT (__force blk_opf_t)(1ULL << __REQ_NOWAIT) 449 #define REQ_POLLED (__force blk_opf_t)(1ULL << __REQ_POLLED) 450 #define REQ_ALLOC_CACHE (__force blk_opf_t)(1ULL << __REQ_ALLOC_CACHE) 451 #define REQ_SWAP (__force blk_opf_t)(1ULL << __REQ_SWAP) 452 #define REQ_DRV (__force blk_opf_t)(1ULL << __REQ_DRV) 453 #define REQ_FS_PRIVATE (__force blk_opf_t)(1ULL << __REQ_FS_PRIVATE) 454 455 #define REQ_NOUNMAP (__force blk_opf_t)(1ULL << __REQ_NOUNMAP) 456 457 #define REQ_FAILFAST_MASK \ 458 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) 459 460 #define REQ_NOMERGE_FLAGS \ 461 (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA) 462 463 enum stat_group { 464 STAT_READ, 465 STAT_WRITE, 466 STAT_DISCARD, 467 STAT_FLUSH, 468 469 NR_STAT_GROUPS 470 }; 471 472 static inline enum req_op bio_op(const struct bio *bio) 473 { 474 return bio->bi_opf & REQ_OP_MASK; 475 } 476 477 static inline bool op_is_write(blk_opf_t op) 478 { 479 return !!(op & (__force blk_opf_t)1); 480 } 481 482 /* 483 * Check if the bio or request is one that needs special treatment in the 484 * flush state machine. 485 */ 486 static inline bool op_is_flush(blk_opf_t op) 487 { 488 return op & (REQ_FUA | REQ_PREFLUSH); 489 } 490 491 /* 492 * Reads are always treated as synchronous, as are requests with the FUA or 493 * PREFLUSH flag. Other operations may be marked as synchronous using the 494 * REQ_SYNC flag. 495 */ 496 static inline bool op_is_sync(blk_opf_t op) 497 { 498 return (op & REQ_OP_MASK) == REQ_OP_READ || 499 (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH)); 500 } 501 502 static inline bool op_is_discard(blk_opf_t op) 503 { 504 return (op & REQ_OP_MASK) == REQ_OP_DISCARD; 505 } 506 507 /* 508 * Check if a bio or request operation is a zone management operation, with 509 * the exception of REQ_OP_ZONE_RESET_ALL which is treated as a special case 510 * due to its different handling in the block layer and device response in 511 * case of command failure. 512 */ 513 static inline bool op_is_zone_mgmt(enum req_op op) 514 { 515 switch (op & REQ_OP_MASK) { 516 case REQ_OP_ZONE_RESET: 517 case REQ_OP_ZONE_OPEN: 518 case REQ_OP_ZONE_CLOSE: 519 case REQ_OP_ZONE_FINISH: 520 return true; 521 default: 522 return false; 523 } 524 } 525 526 static inline int op_stat_group(enum req_op op) 527 { 528 if (op_is_discard(op)) 529 return STAT_DISCARD; 530 return op_is_write(op); 531 } 532 533 struct blk_rq_stat { 534 u64 mean; 535 u64 min; 536 u64 max; 537 u32 nr_samples; 538 u64 batch; 539 }; 540 541 #endif /* __LINUX_BLK_TYPES_H */ 542