1 /* 2 * Block data types and constants. Directly include this file only to 3 * break include dependency loop. 4 */ 5 #ifndef __LINUX_BLK_TYPES_H 6 #define __LINUX_BLK_TYPES_H 7 8 #include <linux/types.h> 9 #include <linux/bvec.h> 10 11 struct bio_set; 12 struct bio; 13 struct bio_integrity_payload; 14 struct page; 15 struct block_device; 16 struct io_context; 17 struct cgroup_subsys_state; 18 typedef void (bio_end_io_t) (struct bio *); 19 20 /* 21 * main unit of I/O for the block layer and lower layers (ie drivers and 22 * stacking drivers) 23 */ 24 struct bio { 25 struct bio *bi_next; /* request queue link */ 26 struct block_device *bi_bdev; 27 int bi_error; 28 unsigned int bi_opf; /* bottom bits req flags, 29 * top bits REQ_OP. Use 30 * accessors. 31 */ 32 unsigned short bi_flags; /* status, command, etc */ 33 unsigned short bi_ioprio; 34 35 struct bvec_iter bi_iter; 36 37 /* Number of segments in this BIO after 38 * physical address coalescing is performed. 39 */ 40 unsigned int bi_phys_segments; 41 42 /* 43 * To keep track of the max segment size, we account for the 44 * sizes of the first and last mergeable segments in this bio. 45 */ 46 unsigned int bi_seg_front_size; 47 unsigned int bi_seg_back_size; 48 49 atomic_t __bi_remaining; 50 51 bio_end_io_t *bi_end_io; 52 53 void *bi_private; 54 #ifdef CONFIG_BLK_CGROUP 55 /* 56 * Optional ioc and css associated with this bio. Put on bio 57 * release. Read comment on top of bio_associate_current(). 58 */ 59 struct io_context *bi_ioc; 60 struct cgroup_subsys_state *bi_css; 61 #endif 62 union { 63 #if defined(CONFIG_BLK_DEV_INTEGRITY) 64 struct bio_integrity_payload *bi_integrity; /* data integrity */ 65 #endif 66 }; 67 68 unsigned short bi_vcnt; /* how many bio_vec's */ 69 70 /* 71 * Everything starting with bi_max_vecs will be preserved by bio_reset() 72 */ 73 74 unsigned short bi_max_vecs; /* max bvl_vecs we can hold */ 75 76 atomic_t __bi_cnt; /* pin count */ 77 78 struct bio_vec *bi_io_vec; /* the actual vec list */ 79 80 struct bio_set *bi_pool; 81 82 /* 83 * We can inline a number of vecs at the end of the bio, to avoid 84 * double allocations for a small number of bio_vecs. This member 85 * MUST obviously be kept at the very end of the bio. 86 */ 87 struct bio_vec bi_inline_vecs[0]; 88 }; 89 90 #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs) 91 92 /* 93 * bio flags 94 */ 95 #define BIO_SEG_VALID 1 /* bi_phys_segments valid */ 96 #define BIO_CLONED 2 /* doesn't own data */ 97 #define BIO_BOUNCED 3 /* bio is a bounce bio */ 98 #define BIO_USER_MAPPED 4 /* contains user pages */ 99 #define BIO_NULL_MAPPED 5 /* contains invalid user pages */ 100 #define BIO_QUIET 6 /* Make BIO Quiet */ 101 #define BIO_CHAIN 7 /* chained bio, ->bi_remaining in effect */ 102 #define BIO_REFFED 8 /* bio has elevated ->bi_cnt */ 103 #define BIO_THROTTLED 9 /* This bio has already been subjected to 104 * throttling rules. Don't do it again. */ 105 106 /* 107 * Flags starting here get preserved by bio_reset() - this includes 108 * BVEC_POOL_IDX() 109 */ 110 #define BIO_RESET_BITS 10 111 112 /* 113 * We support 6 different bvec pools, the last one is magic in that it 114 * is backed by a mempool. 115 */ 116 #define BVEC_POOL_NR 6 117 #define BVEC_POOL_MAX (BVEC_POOL_NR - 1) 118 119 /* 120 * Top 4 bits of bio flags indicate the pool the bvecs came from. We add 121 * 1 to the actual index so that 0 indicates that there are no bvecs to be 122 * freed. 123 */ 124 #define BVEC_POOL_BITS (4) 125 #define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS) 126 #define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET) 127 128 /* 129 * Operations and flags common to the bio and request structures. 130 * We use 8 bits for encoding the operation, and the remaining 24 for flags. 131 * 132 * The least significant bit of the operation number indicates the data 133 * transfer direction: 134 * 135 * - if the least significant bit is set transfers are TO the device 136 * - if the least significant bit is not set transfers are FROM the device 137 * 138 * If a operation does not transfer data the least significant bit has no 139 * meaning. 140 */ 141 #define REQ_OP_BITS 8 142 #define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1) 143 #define REQ_FLAG_BITS 24 144 145 enum req_opf { 146 /* read sectors from the device */ 147 REQ_OP_READ = 0, 148 /* write sectors to the device */ 149 REQ_OP_WRITE = 1, 150 /* flush the volatile write cache */ 151 REQ_OP_FLUSH = 2, 152 /* discard sectors */ 153 REQ_OP_DISCARD = 3, 154 /* get zone information */ 155 REQ_OP_ZONE_REPORT = 4, 156 /* securely erase sectors */ 157 REQ_OP_SECURE_ERASE = 5, 158 /* seset a zone write pointer */ 159 REQ_OP_ZONE_RESET = 6, 160 /* write the same sector many times */ 161 REQ_OP_WRITE_SAME = 7, 162 /* write the zero filled sector many times */ 163 REQ_OP_WRITE_ZEROES = 8, 164 165 REQ_OP_LAST, 166 }; 167 168 enum req_flag_bits { 169 __REQ_FAILFAST_DEV = /* no driver retries of device errors */ 170 REQ_OP_BITS, 171 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ 172 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ 173 __REQ_SYNC, /* request is sync (sync write or read) */ 174 __REQ_META, /* metadata io request */ 175 __REQ_PRIO, /* boost priority in cfq */ 176 __REQ_NOMERGE, /* don't touch this for merging */ 177 __REQ_IDLE, /* anticipate more IO after this one */ 178 __REQ_INTEGRITY, /* I/O includes block integrity payload */ 179 __REQ_FUA, /* forced unit access */ 180 __REQ_PREFLUSH, /* request for cache flush */ 181 __REQ_RAHEAD, /* read ahead, can fail anytime */ 182 __REQ_BACKGROUND, /* background IO */ 183 __REQ_NR_BITS, /* stops here */ 184 }; 185 186 #define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV) 187 #define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT) 188 #define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER) 189 #define REQ_SYNC (1ULL << __REQ_SYNC) 190 #define REQ_META (1ULL << __REQ_META) 191 #define REQ_PRIO (1ULL << __REQ_PRIO) 192 #define REQ_NOMERGE (1ULL << __REQ_NOMERGE) 193 #define REQ_IDLE (1ULL << __REQ_IDLE) 194 #define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY) 195 #define REQ_FUA (1ULL << __REQ_FUA) 196 #define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH) 197 #define REQ_RAHEAD (1ULL << __REQ_RAHEAD) 198 #define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND) 199 200 #define REQ_FAILFAST_MASK \ 201 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) 202 203 #define REQ_NOMERGE_FLAGS \ 204 (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA) 205 206 #define bio_op(bio) \ 207 ((bio)->bi_opf & REQ_OP_MASK) 208 #define req_op(req) \ 209 ((req)->cmd_flags & REQ_OP_MASK) 210 211 /* obsolete, don't use in new code */ 212 static inline void bio_set_op_attrs(struct bio *bio, unsigned op, 213 unsigned op_flags) 214 { 215 bio->bi_opf = op | op_flags; 216 } 217 218 static inline bool op_is_write(unsigned int op) 219 { 220 return (op & 1); 221 } 222 223 /* 224 * Reads are always treated as synchronous, as are requests with the FUA or 225 * PREFLUSH flag. Other operations may be marked as synchronous using the 226 * REQ_SYNC flag. 227 */ 228 static inline bool op_is_sync(unsigned int op) 229 { 230 return (op & REQ_OP_MASK) == REQ_OP_READ || 231 (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH)); 232 } 233 234 typedef unsigned int blk_qc_t; 235 #define BLK_QC_T_NONE -1U 236 #define BLK_QC_T_SHIFT 16 237 238 static inline bool blk_qc_t_valid(blk_qc_t cookie) 239 { 240 return cookie != BLK_QC_T_NONE; 241 } 242 243 static inline blk_qc_t blk_tag_to_qc_t(unsigned int tag, unsigned int queue_num) 244 { 245 return tag | (queue_num << BLK_QC_T_SHIFT); 246 } 247 248 static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie) 249 { 250 return cookie >> BLK_QC_T_SHIFT; 251 } 252 253 static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie) 254 { 255 return cookie & ((1u << BLK_QC_T_SHIFT) - 1); 256 } 257 258 struct blk_issue_stat { 259 u64 time; 260 }; 261 262 #define BLK_RQ_STAT_BATCH 64 263 264 struct blk_rq_stat { 265 s64 mean; 266 u64 min; 267 u64 max; 268 s32 nr_samples; 269 s32 nr_batch; 270 u64 batch; 271 s64 time; 272 }; 273 274 #endif /* __LINUX_BLK_TYPES_H */ 275