1 #ifndef BLK_MQ_H 2 #define BLK_MQ_H 3 4 #include <linux/blkdev.h> 5 #include <linux/sbitmap.h> 6 #include <linux/srcu.h> 7 8 struct blk_mq_tags; 9 struct blk_flush_queue; 10 11 struct blk_mq_hw_ctx { 12 struct { 13 spinlock_t lock; 14 struct list_head dispatch; 15 unsigned long state; /* BLK_MQ_S_* flags */ 16 } ____cacheline_aligned_in_smp; 17 18 struct delayed_work run_work; 19 cpumask_var_t cpumask; 20 int next_cpu; 21 int next_cpu_batch; 22 23 unsigned long flags; /* BLK_MQ_F_* flags */ 24 25 void *sched_data; 26 struct request_queue *queue; 27 struct blk_flush_queue *fq; 28 29 void *driver_data; 30 31 struct sbitmap ctx_map; 32 33 struct blk_mq_ctx **ctxs; 34 unsigned int nr_ctx; 35 36 wait_queue_entry_t dispatch_wait; 37 atomic_t wait_index; 38 39 struct blk_mq_tags *tags; 40 struct blk_mq_tags *sched_tags; 41 42 unsigned long queued; 43 unsigned long run; 44 #define BLK_MQ_MAX_DISPATCH_ORDER 7 45 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; 46 47 unsigned int numa_node; 48 unsigned int queue_num; 49 50 atomic_t nr_active; 51 52 struct hlist_node cpuhp_dead; 53 struct kobject kobj; 54 55 unsigned long poll_considered; 56 unsigned long poll_invoked; 57 unsigned long poll_success; 58 59 #ifdef CONFIG_BLK_DEBUG_FS 60 struct dentry *debugfs_dir; 61 struct dentry *sched_debugfs_dir; 62 #endif 63 64 /* Must be the last member - see also blk_mq_hw_ctx_size(). */ 65 struct srcu_struct queue_rq_srcu[0]; 66 }; 67 68 struct blk_mq_tag_set { 69 unsigned int *mq_map; 70 const struct blk_mq_ops *ops; 71 unsigned int nr_hw_queues; 72 unsigned int queue_depth; /* max hw supported */ 73 unsigned int reserved_tags; 74 unsigned int cmd_size; /* per-request extra data */ 75 int numa_node; 76 unsigned int timeout; 77 unsigned int flags; /* BLK_MQ_F_* */ 78 void *driver_data; 79 80 struct blk_mq_tags **tags; 81 82 struct mutex tag_list_lock; 83 struct list_head tag_list; 84 }; 85 86 struct blk_mq_queue_data { 87 struct request *rq; 88 bool last; 89 }; 90 91 typedef blk_status_t (queue_rq_fn)(struct blk_mq_hw_ctx *, 92 const struct blk_mq_queue_data *); 93 typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool); 94 typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); 95 typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); 96 typedef int (init_request_fn)(struct blk_mq_tag_set *set, struct request *, 97 unsigned int, unsigned int); 98 typedef void (exit_request_fn)(struct blk_mq_tag_set *set, struct request *, 99 unsigned int); 100 101 typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *, 102 bool); 103 typedef void (busy_tag_iter_fn)(struct request *, void *, bool); 104 typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int); 105 typedef int (map_queues_fn)(struct blk_mq_tag_set *set); 106 107 108 struct blk_mq_ops { 109 /* 110 * Queue request 111 */ 112 queue_rq_fn *queue_rq; 113 114 /* 115 * Called on request timeout 116 */ 117 timeout_fn *timeout; 118 119 /* 120 * Called to poll for completion of a specific tag. 121 */ 122 poll_fn *poll; 123 124 softirq_done_fn *complete; 125 126 /* 127 * Called when the block layer side of a hardware queue has been 128 * set up, allowing the driver to allocate/init matching structures. 129 * Ditto for exit/teardown. 130 */ 131 init_hctx_fn *init_hctx; 132 exit_hctx_fn *exit_hctx; 133 134 /* 135 * Called for every command allocated by the block layer to allow 136 * the driver to set up driver specific data. 137 * 138 * Tag greater than or equal to queue_depth is for setting up 139 * flush request. 140 * 141 * Ditto for exit/teardown. 142 */ 143 init_request_fn *init_request; 144 exit_request_fn *exit_request; 145 /* Called from inside blk_get_request() */ 146 void (*initialize_rq_fn)(struct request *rq); 147 148 map_queues_fn *map_queues; 149 150 #ifdef CONFIG_BLK_DEBUG_FS 151 /* 152 * Used by the debugfs implementation to show driver-specific 153 * information about a request. 154 */ 155 void (*show_rq)(struct seq_file *m, struct request *rq); 156 #endif 157 }; 158 159 enum { 160 BLK_MQ_F_SHOULD_MERGE = 1 << 0, 161 BLK_MQ_F_TAG_SHARED = 1 << 1, 162 BLK_MQ_F_SG_MERGE = 1 << 2, 163 BLK_MQ_F_BLOCKING = 1 << 5, 164 BLK_MQ_F_NO_SCHED = 1 << 6, 165 BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, 166 BLK_MQ_F_ALLOC_POLICY_BITS = 1, 167 168 BLK_MQ_S_STOPPED = 0, 169 BLK_MQ_S_TAG_ACTIVE = 1, 170 BLK_MQ_S_SCHED_RESTART = 2, 171 BLK_MQ_S_TAG_WAITING = 3, 172 BLK_MQ_S_START_ON_RUN = 4, 173 174 BLK_MQ_MAX_DEPTH = 10240, 175 176 BLK_MQ_CPU_WORK_BATCH = 8, 177 }; 178 #define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \ 179 ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \ 180 ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) 181 #define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \ 182 ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \ 183 << BLK_MQ_F_ALLOC_POLICY_START_BIT) 184 185 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); 186 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, 187 struct request_queue *q); 188 int blk_mq_register_dev(struct device *, struct request_queue *); 189 void blk_mq_unregister_dev(struct device *, struct request_queue *); 190 191 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set); 192 void blk_mq_free_tag_set(struct blk_mq_tag_set *set); 193 194 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); 195 196 void blk_mq_free_request(struct request *rq); 197 bool blk_mq_can_queue(struct blk_mq_hw_ctx *); 198 199 enum { 200 BLK_MQ_REQ_NOWAIT = (1 << 0), /* return when out of requests */ 201 BLK_MQ_REQ_RESERVED = (1 << 1), /* allocate from reserved pool */ 202 BLK_MQ_REQ_INTERNAL = (1 << 2), /* allocate internal/sched tag */ 203 }; 204 205 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, 206 unsigned int flags); 207 struct request *blk_mq_alloc_request_hctx(struct request_queue *q, 208 unsigned int op, unsigned int flags, unsigned int hctx_idx); 209 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); 210 211 enum { 212 BLK_MQ_UNIQUE_TAG_BITS = 16, 213 BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1, 214 }; 215 216 u32 blk_mq_unique_tag(struct request *rq); 217 218 static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag) 219 { 220 return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS; 221 } 222 223 static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) 224 { 225 return unique_tag & BLK_MQ_UNIQUE_TAG_MASK; 226 } 227 228 229 int blk_mq_request_started(struct request *rq); 230 void blk_mq_start_request(struct request *rq); 231 void blk_mq_end_request(struct request *rq, blk_status_t error); 232 void __blk_mq_end_request(struct request *rq, blk_status_t error); 233 234 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list); 235 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, 236 bool kick_requeue_list); 237 void blk_mq_kick_requeue_list(struct request_queue *q); 238 void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); 239 void blk_mq_complete_request(struct request *rq); 240 241 bool blk_mq_queue_stopped(struct request_queue *q); 242 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); 243 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); 244 void blk_mq_stop_hw_queues(struct request_queue *q); 245 void blk_mq_start_hw_queues(struct request_queue *q); 246 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); 247 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); 248 void blk_mq_quiesce_queue(struct request_queue *q); 249 void blk_mq_unquiesce_queue(struct request_queue *q); 250 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); 251 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); 252 void blk_mq_run_hw_queues(struct request_queue *q, bool async); 253 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); 254 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, 255 busy_tag_iter_fn *fn, void *priv); 256 void blk_mq_freeze_queue(struct request_queue *q); 257 void blk_mq_unfreeze_queue(struct request_queue *q); 258 void blk_freeze_queue_start(struct request_queue *q); 259 void blk_mq_freeze_queue_wait(struct request_queue *q); 260 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, 261 unsigned long timeout); 262 int blk_mq_reinit_tagset(struct blk_mq_tag_set *set, 263 int (reinit_request)(void *, struct request *)); 264 265 int blk_mq_map_queues(struct blk_mq_tag_set *set); 266 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues); 267 268 void blk_mq_quiesce_queue_nowait(struct request_queue *q); 269 270 /* 271 * Driver command data is immediately after the request. So subtract request 272 * size to get back to the original request, add request size to get the PDU. 273 */ 274 static inline struct request *blk_mq_rq_from_pdu(void *pdu) 275 { 276 return pdu - sizeof(struct request); 277 } 278 static inline void *blk_mq_rq_to_pdu(struct request *rq) 279 { 280 return rq + 1; 281 } 282 283 #define queue_for_each_hw_ctx(q, hctx, i) \ 284 for ((i) = 0; (i) < (q)->nr_hw_queues && \ 285 ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++) 286 287 #define hctx_for_each_ctx(hctx, ctx, i) \ 288 for ((i) = 0; (i) < (hctx)->nr_ctx && \ 289 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++) 290 291 #endif 292