1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Block data types and constants. Directly include this file only to 4 * break include dependency loop. 5 */ 6 #ifndef __LINUX_BLK_TYPES_H 7 #define __LINUX_BLK_TYPES_H 8 9 #include <linux/types.h> 10 #include <linux/bvec.h> 11 #include <linux/device.h> 12 #include <linux/ktime.h> 13 #include <linux/rw_hint.h> 14 15 struct bio_set; 16 struct bio; 17 struct bio_integrity_payload; 18 struct page; 19 struct io_context; 20 struct cgroup_subsys_state; 21 typedef void (bio_end_io_t) (struct bio *); 22 struct bio_crypt_ctx; 23 24 /* 25 * The basic unit of block I/O is a sector. It is used in a number of contexts 26 * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9 27 * bytes. Variables of type sector_t represent an offset or size that is a 28 * multiple of 512 bytes. Hence these two constants. 29 */ 30 #ifndef SECTOR_SHIFT 31 #define SECTOR_SHIFT 9 32 #endif 33 #ifndef SECTOR_SIZE 34 #define SECTOR_SIZE (1 << SECTOR_SHIFT) 35 #endif 36 37 #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) 38 #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT) 39 #define SECTOR_MASK (PAGE_SECTORS - 1) 40 41 struct block_device { 42 sector_t bd_start_sect; 43 sector_t bd_nr_sectors; 44 struct gendisk * bd_disk; 45 struct request_queue * bd_queue; 46 struct disk_stats __percpu *bd_stats; 47 unsigned long bd_stamp; 48 bool bd_read_only; /* read-only policy */ 49 u8 bd_partno; 50 bool bd_write_holder; 51 bool bd_has_submit_bio; 52 dev_t bd_dev; 53 struct inode *bd_inode; /* will die */ 54 struct address_space *bd_mapping; /* page cache */ 55 56 atomic_t bd_openers; 57 spinlock_t bd_size_lock; /* for bd_inode->i_size updates */ 58 void * bd_claiming; 59 void * bd_holder; 60 const struct blk_holder_ops *bd_holder_ops; 61 struct mutex bd_holder_lock; 62 int bd_holders; 63 struct kobject *bd_holder_dir; 64 65 atomic_t bd_fsfreeze_count; /* number of freeze requests */ 66 struct mutex bd_fsfreeze_mutex; /* serialize freeze/thaw */ 67 68 struct partition_meta_info *bd_meta_info; 69 #ifdef CONFIG_FAIL_MAKE_REQUEST 70 bool bd_make_it_fail; 71 #endif 72 bool bd_ro_warned; 73 int bd_writers; 74 /* 75 * keep this out-of-line as it's both big and not needed in the fast 76 * path 77 */ 78 struct device bd_device; 79 } __randomize_layout; 80 81 #define bdev_whole(_bdev) \ 82 ((_bdev)->bd_disk->part0) 83 84 #define dev_to_bdev(device) \ 85 container_of((device), struct block_device, bd_device) 86 87 #define bdev_kobj(_bdev) \ 88 (&((_bdev)->bd_device.kobj)) 89 90 /* 91 * Block error status values. See block/blk-core:blk_errors for the details. 92 * Alpha cannot write a byte atomically, so we need to use 32-bit value. 93 */ 94 #if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__) 95 typedef u32 __bitwise blk_status_t; 96 typedef u32 blk_short_t; 97 #else 98 typedef u8 __bitwise blk_status_t; 99 typedef u16 blk_short_t; 100 #endif 101 #define BLK_STS_OK 0 102 #define BLK_STS_NOTSUPP ((__force blk_status_t)1) 103 #define BLK_STS_TIMEOUT ((__force blk_status_t)2) 104 #define BLK_STS_NOSPC ((__force blk_status_t)3) 105 #define BLK_STS_TRANSPORT ((__force blk_status_t)4) 106 #define BLK_STS_TARGET ((__force blk_status_t)5) 107 #define BLK_STS_RESV_CONFLICT ((__force blk_status_t)6) 108 #define BLK_STS_MEDIUM ((__force blk_status_t)7) 109 #define BLK_STS_PROTECTION ((__force blk_status_t)8) 110 #define BLK_STS_RESOURCE ((__force blk_status_t)9) 111 #define BLK_STS_IOERR ((__force blk_status_t)10) 112 113 /* hack for device mapper, don't use elsewhere: */ 114 #define BLK_STS_DM_REQUEUE ((__force blk_status_t)11) 115 116 /* 117 * BLK_STS_AGAIN should only be returned if RQF_NOWAIT is set 118 * and the bio would block (cf bio_wouldblock_error()) 119 */ 120 #define BLK_STS_AGAIN ((__force blk_status_t)12) 121 122 /* 123 * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if 124 * device related resources are unavailable, but the driver can guarantee 125 * that the queue will be rerun in the future once resources become 126 * available again. This is typically the case for device specific 127 * resources that are consumed for IO. If the driver fails allocating these 128 * resources, we know that inflight (or pending) IO will free these 129 * resource upon completion. 130 * 131 * This is different from BLK_STS_RESOURCE in that it explicitly references 132 * a device specific resource. For resources of wider scope, allocation 133 * failure can happen without having pending IO. This means that we can't 134 * rely on request completions freeing these resources, as IO may not be in 135 * flight. Examples of that are kernel memory allocations, DMA mappings, or 136 * any other system wide resources. 137 */ 138 #define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13) 139 140 /* 141 * BLK_STS_ZONE_RESOURCE is returned from the driver to the block layer if zone 142 * related resources are unavailable, but the driver can guarantee the queue 143 * will be rerun in the future once the resources become available again. 144 * 145 * This is different from BLK_STS_DEV_RESOURCE in that it explicitly references 146 * a zone specific resource and IO to a different zone on the same device could 147 * still be served. Examples of that are zones that are write-locked, but a read 148 * to the same zone could be served. 149 */ 150 #define BLK_STS_ZONE_RESOURCE ((__force blk_status_t)14) 151 152 /* 153 * BLK_STS_ZONE_OPEN_RESOURCE is returned from the driver in the completion 154 * path if the device returns a status indicating that too many zone resources 155 * are currently open. The same command should be successful if resubmitted 156 * after the number of open zones decreases below the device's limits, which is 157 * reported in the request_queue's max_open_zones. 158 */ 159 #define BLK_STS_ZONE_OPEN_RESOURCE ((__force blk_status_t)15) 160 161 /* 162 * BLK_STS_ZONE_ACTIVE_RESOURCE is returned from the driver in the completion 163 * path if the device returns a status indicating that too many zone resources 164 * are currently active. The same command should be successful if resubmitted 165 * after the number of active zones decreases below the device's limits, which 166 * is reported in the request_queue's max_active_zones. 167 */ 168 #define BLK_STS_ZONE_ACTIVE_RESOURCE ((__force blk_status_t)16) 169 170 /* 171 * BLK_STS_OFFLINE is returned from the driver when the target device is offline 172 * or is being taken offline. This could help differentiate the case where a 173 * device is intentionally being shut down from a real I/O error. 174 */ 175 #define BLK_STS_OFFLINE ((__force blk_status_t)17) 176 177 /* 178 * BLK_STS_DURATION_LIMIT is returned from the driver when the target device 179 * aborted the command because it exceeded one of its Command Duration Limits. 180 */ 181 #define BLK_STS_DURATION_LIMIT ((__force blk_status_t)18) 182 183 /** 184 * blk_path_error - returns true if error may be path related 185 * @error: status the request was completed with 186 * 187 * Description: 188 * This classifies block error status into non-retryable errors and ones 189 * that may be successful if retried on a failover path. 190 * 191 * Return: 192 * %false - retrying failover path will not help 193 * %true - may succeed if retried 194 */ 195 static inline bool blk_path_error(blk_status_t error) 196 { 197 switch (error) { 198 case BLK_STS_NOTSUPP: 199 case BLK_STS_NOSPC: 200 case BLK_STS_TARGET: 201 case BLK_STS_RESV_CONFLICT: 202 case BLK_STS_MEDIUM: 203 case BLK_STS_PROTECTION: 204 return false; 205 } 206 207 /* Anything else could be a path failure, so should be retried */ 208 return true; 209 } 210 211 struct bio_issue { 212 u64 value; 213 }; 214 215 typedef __u32 __bitwise blk_opf_t; 216 217 typedef unsigned int blk_qc_t; 218 #define BLK_QC_T_NONE -1U 219 220 /* 221 * main unit of I/O for the block layer and lower layers (ie drivers and 222 * stacking drivers) 223 */ 224 struct bio { 225 struct bio *bi_next; /* request queue link */ 226 struct block_device *bi_bdev; 227 blk_opf_t bi_opf; /* bottom bits REQ_OP, top bits 228 * req_flags. 229 */ 230 unsigned short bi_flags; /* BIO_* below */ 231 unsigned short bi_ioprio; 232 enum rw_hint bi_write_hint; 233 blk_status_t bi_status; 234 atomic_t __bi_remaining; 235 236 struct bvec_iter bi_iter; 237 238 blk_qc_t bi_cookie; 239 bio_end_io_t *bi_end_io; 240 void *bi_private; 241 #ifdef CONFIG_BLK_CGROUP 242 /* 243 * Represents the association of the css and request_queue for the bio. 244 * If a bio goes direct to device, it will not have a blkg as it will 245 * not have a request_queue associated with it. The reference is put 246 * on release of the bio. 247 */ 248 struct blkcg_gq *bi_blkg; 249 struct bio_issue bi_issue; 250 #ifdef CONFIG_BLK_CGROUP_IOCOST 251 u64 bi_iocost_cost; 252 #endif 253 #endif 254 255 #ifdef CONFIG_BLK_INLINE_ENCRYPTION 256 struct bio_crypt_ctx *bi_crypt_context; 257 #endif 258 259 union { 260 #if defined(CONFIG_BLK_DEV_INTEGRITY) 261 struct bio_integrity_payload *bi_integrity; /* data integrity */ 262 #endif 263 }; 264 265 unsigned short bi_vcnt; /* how many bio_vec's */ 266 267 /* 268 * Everything starting with bi_max_vecs will be preserved by bio_reset() 269 */ 270 271 unsigned short bi_max_vecs; /* max bvl_vecs we can hold */ 272 273 atomic_t __bi_cnt; /* pin count */ 274 275 struct bio_vec *bi_io_vec; /* the actual vec list */ 276 277 struct bio_set *bi_pool; 278 279 /* 280 * We can inline a number of vecs at the end of the bio, to avoid 281 * double allocations for a small number of bio_vecs. This member 282 * MUST obviously be kept at the very end of the bio. 283 */ 284 struct bio_vec bi_inline_vecs[]; 285 }; 286 287 #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs) 288 #define BIO_MAX_SECTORS (UINT_MAX >> SECTOR_SHIFT) 289 290 /* 291 * bio flags 292 */ 293 enum { 294 BIO_PAGE_PINNED, /* Unpin pages in bio_release_pages() */ 295 BIO_CLONED, /* doesn't own data */ 296 BIO_BOUNCED, /* bio is a bounce bio */ 297 BIO_QUIET, /* Make BIO Quiet */ 298 BIO_CHAIN, /* chained bio, ->bi_remaining in effect */ 299 BIO_REFFED, /* bio has elevated ->bi_cnt */ 300 BIO_BPS_THROTTLED, /* This bio has already been subjected to 301 * throttling rules. Don't do it again. */ 302 BIO_TRACE_COMPLETION, /* bio_endio() should trace the final completion 303 * of this bio. */ 304 BIO_CGROUP_ACCT, /* has been accounted to a cgroup */ 305 BIO_QOS_THROTTLED, /* bio went through rq_qos throttle path */ 306 BIO_QOS_MERGED, /* but went through rq_qos merge path */ 307 BIO_REMAPPED, 308 BIO_ZONE_WRITE_LOCKED, /* Owns a zoned device zone write lock */ 309 BIO_FLAG_LAST 310 }; 311 312 typedef __u32 __bitwise blk_mq_req_flags_t; 313 314 #define REQ_OP_BITS 8 315 #define REQ_OP_MASK (__force blk_opf_t)((1 << REQ_OP_BITS) - 1) 316 #define REQ_FLAG_BITS 24 317 318 /** 319 * enum req_op - Operations common to the bio and request structures. 320 * We use 8 bits for encoding the operation, and the remaining 24 for flags. 321 * 322 * The least significant bit of the operation number indicates the data 323 * transfer direction: 324 * 325 * - if the least significant bit is set transfers are TO the device 326 * - if the least significant bit is not set transfers are FROM the device 327 * 328 * If a operation does not transfer data the least significant bit has no 329 * meaning. 330 */ 331 enum req_op { 332 /* read sectors from the device */ 333 REQ_OP_READ = (__force blk_opf_t)0, 334 /* write sectors to the device */ 335 REQ_OP_WRITE = (__force blk_opf_t)1, 336 /* flush the volatile write cache */ 337 REQ_OP_FLUSH = (__force blk_opf_t)2, 338 /* discard sectors */ 339 REQ_OP_DISCARD = (__force blk_opf_t)3, 340 /* securely erase sectors */ 341 REQ_OP_SECURE_ERASE = (__force blk_opf_t)5, 342 /* write data at the current zone write pointer */ 343 REQ_OP_ZONE_APPEND = (__force blk_opf_t)7, 344 /* write the zero filled sector many times */ 345 REQ_OP_WRITE_ZEROES = (__force blk_opf_t)9, 346 /* Open a zone */ 347 REQ_OP_ZONE_OPEN = (__force blk_opf_t)10, 348 /* Close a zone */ 349 REQ_OP_ZONE_CLOSE = (__force blk_opf_t)11, 350 /* Transition a zone to full */ 351 REQ_OP_ZONE_FINISH = (__force blk_opf_t)12, 352 /* reset a zone write pointer */ 353 REQ_OP_ZONE_RESET = (__force blk_opf_t)13, 354 /* reset all the zone present on the device */ 355 REQ_OP_ZONE_RESET_ALL = (__force blk_opf_t)15, 356 357 /* Driver private requests */ 358 REQ_OP_DRV_IN = (__force blk_opf_t)34, 359 REQ_OP_DRV_OUT = (__force blk_opf_t)35, 360 361 REQ_OP_LAST = (__force blk_opf_t)36, 362 }; 363 364 enum req_flag_bits { 365 __REQ_FAILFAST_DEV = /* no driver retries of device errors */ 366 REQ_OP_BITS, 367 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ 368 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ 369 __REQ_SYNC, /* request is sync (sync write or read) */ 370 __REQ_META, /* metadata io request */ 371 __REQ_PRIO, /* boost priority in cfq */ 372 __REQ_NOMERGE, /* don't touch this for merging */ 373 __REQ_IDLE, /* anticipate more IO after this one */ 374 __REQ_INTEGRITY, /* I/O includes block integrity payload */ 375 __REQ_FUA, /* forced unit access */ 376 __REQ_PREFLUSH, /* request for cache flush */ 377 __REQ_RAHEAD, /* read ahead, can fail anytime */ 378 __REQ_BACKGROUND, /* background IO */ 379 __REQ_NOWAIT, /* Don't wait if request will block */ 380 __REQ_POLLED, /* caller polls for completion using bio_poll */ 381 __REQ_ALLOC_CACHE, /* allocate IO from cache if available */ 382 __REQ_SWAP, /* swap I/O */ 383 __REQ_DRV, /* for driver use */ 384 __REQ_FS_PRIVATE, /* for file system (submitter) use */ 385 386 /* 387 * Command specific flags, keep last: 388 */ 389 /* for REQ_OP_WRITE_ZEROES: */ 390 __REQ_NOUNMAP, /* do not free blocks when zeroing */ 391 392 __REQ_NR_BITS, /* stops here */ 393 }; 394 395 #define REQ_FAILFAST_DEV \ 396 (__force blk_opf_t)(1ULL << __REQ_FAILFAST_DEV) 397 #define REQ_FAILFAST_TRANSPORT \ 398 (__force blk_opf_t)(1ULL << __REQ_FAILFAST_TRANSPORT) 399 #define REQ_FAILFAST_DRIVER \ 400 (__force blk_opf_t)(1ULL << __REQ_FAILFAST_DRIVER) 401 #define REQ_SYNC (__force blk_opf_t)(1ULL << __REQ_SYNC) 402 #define REQ_META (__force blk_opf_t)(1ULL << __REQ_META) 403 #define REQ_PRIO (__force blk_opf_t)(1ULL << __REQ_PRIO) 404 #define REQ_NOMERGE (__force blk_opf_t)(1ULL << __REQ_NOMERGE) 405 #define REQ_IDLE (__force blk_opf_t)(1ULL << __REQ_IDLE) 406 #define REQ_INTEGRITY (__force blk_opf_t)(1ULL << __REQ_INTEGRITY) 407 #define REQ_FUA (__force blk_opf_t)(1ULL << __REQ_FUA) 408 #define REQ_PREFLUSH (__force blk_opf_t)(1ULL << __REQ_PREFLUSH) 409 #define REQ_RAHEAD (__force blk_opf_t)(1ULL << __REQ_RAHEAD) 410 #define REQ_BACKGROUND (__force blk_opf_t)(1ULL << __REQ_BACKGROUND) 411 #define REQ_NOWAIT (__force blk_opf_t)(1ULL << __REQ_NOWAIT) 412 #define REQ_POLLED (__force blk_opf_t)(1ULL << __REQ_POLLED) 413 #define REQ_ALLOC_CACHE (__force blk_opf_t)(1ULL << __REQ_ALLOC_CACHE) 414 #define REQ_SWAP (__force blk_opf_t)(1ULL << __REQ_SWAP) 415 #define REQ_DRV (__force blk_opf_t)(1ULL << __REQ_DRV) 416 #define REQ_FS_PRIVATE (__force blk_opf_t)(1ULL << __REQ_FS_PRIVATE) 417 418 #define REQ_NOUNMAP (__force blk_opf_t)(1ULL << __REQ_NOUNMAP) 419 420 #define REQ_FAILFAST_MASK \ 421 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) 422 423 #define REQ_NOMERGE_FLAGS \ 424 (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA) 425 426 enum stat_group { 427 STAT_READ, 428 STAT_WRITE, 429 STAT_DISCARD, 430 STAT_FLUSH, 431 432 NR_STAT_GROUPS 433 }; 434 435 static inline enum req_op bio_op(const struct bio *bio) 436 { 437 return bio->bi_opf & REQ_OP_MASK; 438 } 439 440 static inline bool op_is_write(blk_opf_t op) 441 { 442 return !!(op & (__force blk_opf_t)1); 443 } 444 445 /* 446 * Check if the bio or request is one that needs special treatment in the 447 * flush state machine. 448 */ 449 static inline bool op_is_flush(blk_opf_t op) 450 { 451 return op & (REQ_FUA | REQ_PREFLUSH); 452 } 453 454 /* 455 * Reads are always treated as synchronous, as are requests with the FUA or 456 * PREFLUSH flag. Other operations may be marked as synchronous using the 457 * REQ_SYNC flag. 458 */ 459 static inline bool op_is_sync(blk_opf_t op) 460 { 461 return (op & REQ_OP_MASK) == REQ_OP_READ || 462 (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH)); 463 } 464 465 static inline bool op_is_discard(blk_opf_t op) 466 { 467 return (op & REQ_OP_MASK) == REQ_OP_DISCARD; 468 } 469 470 /* 471 * Check if a bio or request operation is a zone management operation, with 472 * the exception of REQ_OP_ZONE_RESET_ALL which is treated as a special case 473 * due to its different handling in the block layer and device response in 474 * case of command failure. 475 */ 476 static inline bool op_is_zone_mgmt(enum req_op op) 477 { 478 switch (op & REQ_OP_MASK) { 479 case REQ_OP_ZONE_RESET: 480 case REQ_OP_ZONE_OPEN: 481 case REQ_OP_ZONE_CLOSE: 482 case REQ_OP_ZONE_FINISH: 483 return true; 484 default: 485 return false; 486 } 487 } 488 489 static inline int op_stat_group(enum req_op op) 490 { 491 if (op_is_discard(op)) 492 return STAT_DISCARD; 493 return op_is_write(op); 494 } 495 496 struct blk_rq_stat { 497 u64 mean; 498 u64 min; 499 u64 max; 500 u32 nr_samples; 501 u64 batch; 502 }; 503 504 #endif /* __LINUX_BLK_TYPES_H */ 505