1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Block data types and constants. Directly include this file only to 4 * break include dependency loop. 5 */ 6 #ifndef __LINUX_BLK_TYPES_H 7 #define __LINUX_BLK_TYPES_H 8 9 #include <linux/types.h> 10 #include <linux/bvec.h> 11 12 struct bio_set; 13 struct bio; 14 struct bio_integrity_payload; 15 struct page; 16 struct block_device; 17 struct io_context; 18 struct cgroup_subsys_state; 19 typedef void (bio_end_io_t) (struct bio *); 20 21 /* 22 * Block error status values. See block/blk-core:blk_errors for the details. 23 */ 24 typedef u8 __bitwise blk_status_t; 25 #define BLK_STS_OK 0 26 #define BLK_STS_NOTSUPP ((__force blk_status_t)1) 27 #define BLK_STS_TIMEOUT ((__force blk_status_t)2) 28 #define BLK_STS_NOSPC ((__force blk_status_t)3) 29 #define BLK_STS_TRANSPORT ((__force blk_status_t)4) 30 #define BLK_STS_TARGET ((__force blk_status_t)5) 31 #define BLK_STS_NEXUS ((__force blk_status_t)6) 32 #define BLK_STS_MEDIUM ((__force blk_status_t)7) 33 #define BLK_STS_PROTECTION ((__force blk_status_t)8) 34 #define BLK_STS_RESOURCE ((__force blk_status_t)9) 35 #define BLK_STS_IOERR ((__force blk_status_t)10) 36 37 /* hack for device mapper, don't use elsewhere: */ 38 #define BLK_STS_DM_REQUEUE ((__force blk_status_t)11) 39 40 #define BLK_STS_AGAIN ((__force blk_status_t)12) 41 42 /** 43 * blk_path_error - returns true if error may be path related 44 * @error: status the request was completed with 45 * 46 * Description: 47 * This classifies block error status into non-retryable errors and ones 48 * that may be successful if retried on a failover path. 49 * 50 * Return: 51 * %false - retrying failover path will not help 52 * %true - may succeed if retried 53 */ 54 static inline bool blk_path_error(blk_status_t error) 55 { 56 switch (error) { 57 case BLK_STS_NOTSUPP: 58 case BLK_STS_NOSPC: 59 case BLK_STS_TARGET: 60 case BLK_STS_NEXUS: 61 case BLK_STS_MEDIUM: 62 case BLK_STS_PROTECTION: 63 return false; 64 } 65 66 /* Anything else could be a path failure, so should be retried */ 67 return true; 68 } 69 70 struct blk_issue_stat { 71 u64 stat; 72 }; 73 74 /* 75 * main unit of I/O for the block layer and lower layers (ie drivers and 76 * stacking drivers) 77 */ 78 struct bio { 79 struct bio *bi_next; /* request queue link */ 80 struct gendisk *bi_disk; 81 unsigned int bi_opf; /* bottom bits req flags, 82 * top bits REQ_OP. Use 83 * accessors. 84 */ 85 unsigned short bi_flags; /* status, etc and bvec pool number */ 86 unsigned short bi_ioprio; 87 unsigned short bi_write_hint; 88 blk_status_t bi_status; 89 u8 bi_partno; 90 91 /* Number of segments in this BIO after 92 * physical address coalescing is performed. 93 */ 94 unsigned int bi_phys_segments; 95 96 /* 97 * To keep track of the max segment size, we account for the 98 * sizes of the first and last mergeable segments in this bio. 99 */ 100 unsigned int bi_seg_front_size; 101 unsigned int bi_seg_back_size; 102 103 struct bvec_iter bi_iter; 104 105 atomic_t __bi_remaining; 106 bio_end_io_t *bi_end_io; 107 108 void *bi_private; 109 #ifdef CONFIG_BLK_CGROUP 110 /* 111 * Optional ioc and css associated with this bio. Put on bio 112 * release. Read comment on top of bio_associate_current(). 113 */ 114 struct io_context *bi_ioc; 115 struct cgroup_subsys_state *bi_css; 116 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 117 void *bi_cg_private; 118 struct blk_issue_stat bi_issue_stat; 119 #endif 120 #endif 121 union { 122 #if defined(CONFIG_BLK_DEV_INTEGRITY) 123 struct bio_integrity_payload *bi_integrity; /* data integrity */ 124 #endif 125 }; 126 127 unsigned short bi_vcnt; /* how many bio_vec's */ 128 129 /* 130 * Everything starting with bi_max_vecs will be preserved by bio_reset() 131 */ 132 133 unsigned short bi_max_vecs; /* max bvl_vecs we can hold */ 134 135 atomic_t __bi_cnt; /* pin count */ 136 137 struct bio_vec *bi_io_vec; /* the actual vec list */ 138 139 struct bio_set *bi_pool; 140 141 /* 142 * We can inline a number of vecs at the end of the bio, to avoid 143 * double allocations for a small number of bio_vecs. This member 144 * MUST obviously be kept at the very end of the bio. 145 */ 146 struct bio_vec bi_inline_vecs[0]; 147 }; 148 149 #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs) 150 151 /* 152 * bio flags 153 */ 154 #define BIO_SEG_VALID 1 /* bi_phys_segments valid */ 155 #define BIO_CLONED 2 /* doesn't own data */ 156 #define BIO_BOUNCED 3 /* bio is a bounce bio */ 157 #define BIO_USER_MAPPED 4 /* contains user pages */ 158 #define BIO_NULL_MAPPED 5 /* contains invalid user pages */ 159 #define BIO_QUIET 6 /* Make BIO Quiet */ 160 #define BIO_CHAIN 7 /* chained bio, ->bi_remaining in effect */ 161 #define BIO_REFFED 8 /* bio has elevated ->bi_cnt */ 162 #define BIO_THROTTLED 9 /* This bio has already been subjected to 163 * throttling rules. Don't do it again. */ 164 #define BIO_TRACE_COMPLETION 10 /* bio_endio() should trace the final completion 165 * of this bio. */ 166 /* See BVEC_POOL_OFFSET below before adding new flags */ 167 168 /* 169 * We support 6 different bvec pools, the last one is magic in that it 170 * is backed by a mempool. 171 */ 172 #define BVEC_POOL_NR 6 173 #define BVEC_POOL_MAX (BVEC_POOL_NR - 1) 174 175 /* 176 * Top 3 bits of bio flags indicate the pool the bvecs came from. We add 177 * 1 to the actual index so that 0 indicates that there are no bvecs to be 178 * freed. 179 */ 180 #define BVEC_POOL_BITS (3) 181 #define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS) 182 #define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET) 183 #if (1<< BVEC_POOL_BITS) < (BVEC_POOL_NR+1) 184 # error "BVEC_POOL_BITS is too small" 185 #endif 186 187 /* 188 * Flags starting here get preserved by bio_reset() - this includes 189 * only BVEC_POOL_IDX() 190 */ 191 #define BIO_RESET_BITS BVEC_POOL_OFFSET 192 193 typedef __u32 __bitwise blk_mq_req_flags_t; 194 195 /* 196 * Operations and flags common to the bio and request structures. 197 * We use 8 bits for encoding the operation, and the remaining 24 for flags. 198 * 199 * The least significant bit of the operation number indicates the data 200 * transfer direction: 201 * 202 * - if the least significant bit is set transfers are TO the device 203 * - if the least significant bit is not set transfers are FROM the device 204 * 205 * If a operation does not transfer data the least significant bit has no 206 * meaning. 207 */ 208 #define REQ_OP_BITS 8 209 #define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1) 210 #define REQ_FLAG_BITS 24 211 212 enum req_opf { 213 /* read sectors from the device */ 214 REQ_OP_READ = 0, 215 /* write sectors to the device */ 216 REQ_OP_WRITE = 1, 217 /* flush the volatile write cache */ 218 REQ_OP_FLUSH = 2, 219 /* discard sectors */ 220 REQ_OP_DISCARD = 3, 221 /* get zone information */ 222 REQ_OP_ZONE_REPORT = 4, 223 /* securely erase sectors */ 224 REQ_OP_SECURE_ERASE = 5, 225 /* seset a zone write pointer */ 226 REQ_OP_ZONE_RESET = 6, 227 /* write the same sector many times */ 228 REQ_OP_WRITE_SAME = 7, 229 /* write the zero filled sector many times */ 230 REQ_OP_WRITE_ZEROES = 9, 231 232 /* SCSI passthrough using struct scsi_request */ 233 REQ_OP_SCSI_IN = 32, 234 REQ_OP_SCSI_OUT = 33, 235 /* Driver private requests */ 236 REQ_OP_DRV_IN = 34, 237 REQ_OP_DRV_OUT = 35, 238 239 REQ_OP_LAST, 240 }; 241 242 enum req_flag_bits { 243 __REQ_FAILFAST_DEV = /* no driver retries of device errors */ 244 REQ_OP_BITS, 245 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ 246 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ 247 __REQ_SYNC, /* request is sync (sync write or read) */ 248 __REQ_META, /* metadata io request */ 249 __REQ_PRIO, /* boost priority in cfq */ 250 __REQ_NOMERGE, /* don't touch this for merging */ 251 __REQ_IDLE, /* anticipate more IO after this one */ 252 __REQ_INTEGRITY, /* I/O includes block integrity payload */ 253 __REQ_FUA, /* forced unit access */ 254 __REQ_PREFLUSH, /* request for cache flush */ 255 __REQ_RAHEAD, /* read ahead, can fail anytime */ 256 __REQ_BACKGROUND, /* background IO */ 257 __REQ_NOWAIT, /* Don't wait if request will block */ 258 259 /* command specific flags for REQ_OP_WRITE_ZEROES: */ 260 __REQ_NOUNMAP, /* do not free blocks when zeroing */ 261 262 /* for driver use */ 263 __REQ_DRV, 264 265 __REQ_NR_BITS, /* stops here */ 266 }; 267 268 #define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV) 269 #define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT) 270 #define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER) 271 #define REQ_SYNC (1ULL << __REQ_SYNC) 272 #define REQ_META (1ULL << __REQ_META) 273 #define REQ_PRIO (1ULL << __REQ_PRIO) 274 #define REQ_NOMERGE (1ULL << __REQ_NOMERGE) 275 #define REQ_IDLE (1ULL << __REQ_IDLE) 276 #define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY) 277 #define REQ_FUA (1ULL << __REQ_FUA) 278 #define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH) 279 #define REQ_RAHEAD (1ULL << __REQ_RAHEAD) 280 #define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND) 281 #define REQ_NOWAIT (1ULL << __REQ_NOWAIT) 282 283 #define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP) 284 285 #define REQ_DRV (1ULL << __REQ_DRV) 286 287 #define REQ_FAILFAST_MASK \ 288 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) 289 290 #define REQ_NOMERGE_FLAGS \ 291 (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA) 292 293 #define bio_op(bio) \ 294 ((bio)->bi_opf & REQ_OP_MASK) 295 #define req_op(req) \ 296 ((req)->cmd_flags & REQ_OP_MASK) 297 298 /* obsolete, don't use in new code */ 299 static inline void bio_set_op_attrs(struct bio *bio, unsigned op, 300 unsigned op_flags) 301 { 302 bio->bi_opf = op | op_flags; 303 } 304 305 static inline bool op_is_write(unsigned int op) 306 { 307 return (op & 1); 308 } 309 310 /* 311 * Check if the bio or request is one that needs special treatment in the 312 * flush state machine. 313 */ 314 static inline bool op_is_flush(unsigned int op) 315 { 316 return op & (REQ_FUA | REQ_PREFLUSH); 317 } 318 319 /* 320 * Reads are always treated as synchronous, as are requests with the FUA or 321 * PREFLUSH flag. Other operations may be marked as synchronous using the 322 * REQ_SYNC flag. 323 */ 324 static inline bool op_is_sync(unsigned int op) 325 { 326 return (op & REQ_OP_MASK) == REQ_OP_READ || 327 (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH)); 328 } 329 330 typedef unsigned int blk_qc_t; 331 #define BLK_QC_T_NONE -1U 332 #define BLK_QC_T_SHIFT 16 333 #define BLK_QC_T_INTERNAL (1U << 31) 334 335 static inline bool blk_qc_t_valid(blk_qc_t cookie) 336 { 337 return cookie != BLK_QC_T_NONE; 338 } 339 340 static inline blk_qc_t blk_tag_to_qc_t(unsigned int tag, unsigned int queue_num, 341 bool internal) 342 { 343 blk_qc_t ret = tag | (queue_num << BLK_QC_T_SHIFT); 344 345 if (internal) 346 ret |= BLK_QC_T_INTERNAL; 347 348 return ret; 349 } 350 351 static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie) 352 { 353 return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT; 354 } 355 356 static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie) 357 { 358 return cookie & ((1u << BLK_QC_T_SHIFT) - 1); 359 } 360 361 static inline bool blk_qc_t_is_internal(blk_qc_t cookie) 362 { 363 return (cookie & BLK_QC_T_INTERNAL) != 0; 364 } 365 366 struct blk_rq_stat { 367 u64 mean; 368 u64 min; 369 u64 max; 370 u32 nr_samples; 371 u64 batch; 372 }; 373 374 #endif /* __LINUX_BLK_TYPES_H */ 375