1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef BLK_MQ_H 3 #define BLK_MQ_H 4 5 #include <linux/blkdev.h> 6 #include <linux/sbitmap.h> 7 #include <linux/lockdep.h> 8 #include <linux/scatterlist.h> 9 #include <linux/prefetch.h> 10 11 struct blk_mq_tags; 12 struct blk_flush_queue; 13 14 #define BLKDEV_MIN_RQ 4 15 #define BLKDEV_DEFAULT_RQ 128 16 17 enum rq_end_io_ret { 18 RQ_END_IO_NONE, 19 RQ_END_IO_FREE, 20 }; 21 22 typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t); 23 24 /* 25 * request flags */ 26 typedef __u32 __bitwise req_flags_t; 27 28 /* drive already may have started this one */ 29 #define RQF_STARTED ((__force req_flags_t)(1 << 1)) 30 /* may not be passed by ioscheduler */ 31 #define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3)) 32 /* request for flush sequence */ 33 #define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4)) 34 /* merge of different types, fail separately */ 35 #define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5)) 36 /* track inflight for MQ */ 37 #define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6)) 38 /* don't call prep for this one */ 39 #define RQF_DONTPREP ((__force req_flags_t)(1 << 7)) 40 /* vaguely specified driver internal error. Ignored by the block layer */ 41 #define RQF_FAILED ((__force req_flags_t)(1 << 10)) 42 /* don't warn about errors */ 43 #define RQF_QUIET ((__force req_flags_t)(1 << 11)) 44 /* elevator private data attached */ 45 #define RQF_ELVPRIV ((__force req_flags_t)(1 << 12)) 46 /* account into disk and partition IO statistics */ 47 #define RQF_IO_STAT ((__force req_flags_t)(1 << 13)) 48 /* runtime pm request */ 49 #define RQF_PM ((__force req_flags_t)(1 << 15)) 50 /* on IO scheduler merge hash */ 51 #define RQF_HASHED ((__force req_flags_t)(1 << 16)) 52 /* track IO completion time */ 53 #define RQF_STATS ((__force req_flags_t)(1 << 17)) 54 /* Look at ->special_vec for the actual data payload instead of the 55 bio chain. */ 56 #define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18)) 57 /* The per-zone write lock is held for this request */ 58 #define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19)) 59 /* already slept for hybrid poll */ 60 #define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 20)) 61 /* ->timeout has been called, don't expire again */ 62 #define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21)) 63 /* queue has elevator attached */ 64 #define RQF_ELV ((__force req_flags_t)(1 << 22)) 65 #define RQF_RESV ((__force req_flags_t)(1 << 23)) 66 67 /* flags that prevent us from merging requests: */ 68 #define RQF_NOMERGE_FLAGS \ 69 (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD) 70 71 enum mq_rq_state { 72 MQ_RQ_IDLE = 0, 73 MQ_RQ_IN_FLIGHT = 1, 74 MQ_RQ_COMPLETE = 2, 75 }; 76 77 /* 78 * Try to put the fields that are referenced together in the same cacheline. 79 * 80 * If you modify this structure, make sure to update blk_rq_init() and 81 * especially blk_mq_rq_ctx_init() to take care of the added fields. 82 */ 83 struct request { 84 struct request_queue *q; 85 struct blk_mq_ctx *mq_ctx; 86 struct blk_mq_hw_ctx *mq_hctx; 87 88 blk_opf_t cmd_flags; /* op and common flags */ 89 req_flags_t rq_flags; 90 91 int tag; 92 int internal_tag; 93 94 unsigned int timeout; 95 96 /* the following two fields are internal, NEVER access directly */ 97 unsigned int __data_len; /* total data len */ 98 sector_t __sector; /* sector cursor */ 99 100 struct bio *bio; 101 struct bio *biotail; 102 103 union { 104 struct list_head queuelist; 105 struct request *rq_next; 106 }; 107 108 struct block_device *part; 109 #ifdef CONFIG_BLK_RQ_ALLOC_TIME 110 /* Time that the first bio started allocating this request. */ 111 u64 alloc_time_ns; 112 #endif 113 /* Time that this request was allocated for this IO. */ 114 u64 start_time_ns; 115 /* Time that I/O was submitted to the device. */ 116 u64 io_start_time_ns; 117 118 #ifdef CONFIG_BLK_WBT 119 unsigned short wbt_flags; 120 #endif 121 /* 122 * rq sectors used for blk stats. It has the same value 123 * with blk_rq_sectors(rq), except that it never be zeroed 124 * by completion. 125 */ 126 unsigned short stats_sectors; 127 128 /* 129 * Number of scatter-gather DMA addr+len pairs after 130 * physical address coalescing is performed. 131 */ 132 unsigned short nr_phys_segments; 133 134 #ifdef CONFIG_BLK_DEV_INTEGRITY 135 unsigned short nr_integrity_segments; 136 #endif 137 138 #ifdef CONFIG_BLK_INLINE_ENCRYPTION 139 struct bio_crypt_ctx *crypt_ctx; 140 struct blk_crypto_keyslot *crypt_keyslot; 141 #endif 142 143 unsigned short write_hint; 144 unsigned short ioprio; 145 146 enum mq_rq_state state; 147 atomic_t ref; 148 149 unsigned long deadline; 150 151 /* 152 * The hash is used inside the scheduler, and killed once the 153 * request reaches the dispatch list. The ipi_list is only used 154 * to queue the request for softirq completion, which is long 155 * after the request has been unhashed (and even removed from 156 * the dispatch list). 157 */ 158 union { 159 struct hlist_node hash; /* merge hash */ 160 struct llist_node ipi_list; 161 }; 162 163 /* 164 * The rb_node is only used inside the io scheduler, requests 165 * are pruned when moved to the dispatch queue. So let the 166 * completion_data share space with the rb_node. 167 */ 168 union { 169 struct rb_node rb_node; /* sort/lookup */ 170 struct bio_vec special_vec; 171 void *completion_data; 172 }; 173 174 175 /* 176 * Three pointers are available for the IO schedulers, if they need 177 * more they have to dynamically allocate it. Flush requests are 178 * never put on the IO scheduler. So let the flush fields share 179 * space with the elevator data. 180 */ 181 union { 182 struct { 183 struct io_cq *icq; 184 void *priv[2]; 185 } elv; 186 187 struct { 188 unsigned int seq; 189 struct list_head list; 190 rq_end_io_fn *saved_end_io; 191 } flush; 192 }; 193 194 union { 195 struct __call_single_data csd; 196 u64 fifo_time; 197 }; 198 199 /* 200 * completion callback. 201 */ 202 rq_end_io_fn *end_io; 203 void *end_io_data; 204 }; 205 206 static inline enum req_op req_op(const struct request *req) 207 { 208 return req->cmd_flags & REQ_OP_MASK; 209 } 210 211 static inline bool blk_rq_is_passthrough(struct request *rq) 212 { 213 return blk_op_is_passthrough(req_op(rq)); 214 } 215 216 static inline unsigned short req_get_ioprio(struct request *req) 217 { 218 return req->ioprio; 219 } 220 221 #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) 222 223 #define rq_dma_dir(rq) \ 224 (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE) 225 226 #define rq_list_add(listptr, rq) do { \ 227 (rq)->rq_next = *(listptr); \ 228 *(listptr) = rq; \ 229 } while (0) 230 231 #define rq_list_pop(listptr) \ 232 ({ \ 233 struct request *__req = NULL; \ 234 if ((listptr) && *(listptr)) { \ 235 __req = *(listptr); \ 236 *(listptr) = __req->rq_next; \ 237 } \ 238 __req; \ 239 }) 240 241 #define rq_list_peek(listptr) \ 242 ({ \ 243 struct request *__req = NULL; \ 244 if ((listptr) && *(listptr)) \ 245 __req = *(listptr); \ 246 __req; \ 247 }) 248 249 #define rq_list_for_each(listptr, pos) \ 250 for (pos = rq_list_peek((listptr)); pos; pos = rq_list_next(pos)) 251 252 #define rq_list_for_each_safe(listptr, pos, nxt) \ 253 for (pos = rq_list_peek((listptr)), nxt = rq_list_next(pos); \ 254 pos; pos = nxt, nxt = pos ? rq_list_next(pos) : NULL) 255 256 #define rq_list_next(rq) (rq)->rq_next 257 #define rq_list_empty(list) ((list) == (struct request *) NULL) 258 259 /** 260 * rq_list_move() - move a struct request from one list to another 261 * @src: The source list @rq is currently in 262 * @dst: The destination list that @rq will be appended to 263 * @rq: The request to move 264 * @prev: The request preceding @rq in @src (NULL if @rq is the head) 265 */ 266 static inline void rq_list_move(struct request **src, struct request **dst, 267 struct request *rq, struct request *prev) 268 { 269 if (prev) 270 prev->rq_next = rq->rq_next; 271 else 272 *src = rq->rq_next; 273 rq_list_add(dst, rq); 274 } 275 276 /** 277 * enum blk_eh_timer_return - How the timeout handler should proceed 278 * @BLK_EH_DONE: The block driver completed the command or will complete it at 279 * a later time. 280 * @BLK_EH_RESET_TIMER: Reset the request timer and continue waiting for the 281 * request to complete. 282 */ 283 enum blk_eh_timer_return { 284 BLK_EH_DONE, 285 BLK_EH_RESET_TIMER, 286 }; 287 288 #define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */ 289 #define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */ 290 291 /** 292 * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware 293 * block device 294 */ 295 struct blk_mq_hw_ctx { 296 struct { 297 /** @lock: Protects the dispatch list. */ 298 spinlock_t lock; 299 /** 300 * @dispatch: Used for requests that are ready to be 301 * dispatched to the hardware but for some reason (e.g. lack of 302 * resources) could not be sent to the hardware. As soon as the 303 * driver can send new requests, requests at this list will 304 * be sent first for a fairer dispatch. 305 */ 306 struct list_head dispatch; 307 /** 308 * @state: BLK_MQ_S_* flags. Defines the state of the hw 309 * queue (active, scheduled to restart, stopped). 310 */ 311 unsigned long state; 312 } ____cacheline_aligned_in_smp; 313 314 /** 315 * @run_work: Used for scheduling a hardware queue run at a later time. 316 */ 317 struct delayed_work run_work; 318 /** @cpumask: Map of available CPUs where this hctx can run. */ 319 cpumask_var_t cpumask; 320 /** 321 * @next_cpu: Used by blk_mq_hctx_next_cpu() for round-robin CPU 322 * selection from @cpumask. 323 */ 324 int next_cpu; 325 /** 326 * @next_cpu_batch: Counter of how many works left in the batch before 327 * changing to the next CPU. 328 */ 329 int next_cpu_batch; 330 331 /** @flags: BLK_MQ_F_* flags. Defines the behaviour of the queue. */ 332 unsigned long flags; 333 334 /** 335 * @sched_data: Pointer owned by the IO scheduler attached to a request 336 * queue. It's up to the IO scheduler how to use this pointer. 337 */ 338 void *sched_data; 339 /** 340 * @queue: Pointer to the request queue that owns this hardware context. 341 */ 342 struct request_queue *queue; 343 /** @fq: Queue of requests that need to perform a flush operation. */ 344 struct blk_flush_queue *fq; 345 346 /** 347 * @driver_data: Pointer to data owned by the block driver that created 348 * this hctx 349 */ 350 void *driver_data; 351 352 /** 353 * @ctx_map: Bitmap for each software queue. If bit is on, there is a 354 * pending request in that software queue. 355 */ 356 struct sbitmap ctx_map; 357 358 /** 359 * @dispatch_from: Software queue to be used when no scheduler was 360 * selected. 361 */ 362 struct blk_mq_ctx *dispatch_from; 363 /** 364 * @dispatch_busy: Number used by blk_mq_update_dispatch_busy() to 365 * decide if the hw_queue is busy using Exponential Weighted Moving 366 * Average algorithm. 367 */ 368 unsigned int dispatch_busy; 369 370 /** @type: HCTX_TYPE_* flags. Type of hardware queue. */ 371 unsigned short type; 372 /** @nr_ctx: Number of software queues. */ 373 unsigned short nr_ctx; 374 /** @ctxs: Array of software queues. */ 375 struct blk_mq_ctx **ctxs; 376 377 /** @dispatch_wait_lock: Lock for dispatch_wait queue. */ 378 spinlock_t dispatch_wait_lock; 379 /** 380 * @dispatch_wait: Waitqueue to put requests when there is no tag 381 * available at the moment, to wait for another try in the future. 382 */ 383 wait_queue_entry_t dispatch_wait; 384 385 /** 386 * @wait_index: Index of next available dispatch_wait queue to insert 387 * requests. 388 */ 389 atomic_t wait_index; 390 391 /** 392 * @tags: Tags owned by the block driver. A tag at this set is only 393 * assigned when a request is dispatched from a hardware queue. 394 */ 395 struct blk_mq_tags *tags; 396 /** 397 * @sched_tags: Tags owned by I/O scheduler. If there is an I/O 398 * scheduler associated with a request queue, a tag is assigned when 399 * that request is allocated. Else, this member is not used. 400 */ 401 struct blk_mq_tags *sched_tags; 402 403 /** @queued: Number of queued requests. */ 404 unsigned long queued; 405 /** @run: Number of dispatched requests. */ 406 unsigned long run; 407 408 /** @numa_node: NUMA node the storage adapter has been connected to. */ 409 unsigned int numa_node; 410 /** @queue_num: Index of this hardware queue. */ 411 unsigned int queue_num; 412 413 /** 414 * @nr_active: Number of active requests. Only used when a tag set is 415 * shared across request queues. 416 */ 417 atomic_t nr_active; 418 419 /** @cpuhp_online: List to store request if CPU is going to die */ 420 struct hlist_node cpuhp_online; 421 /** @cpuhp_dead: List to store request if some CPU die. */ 422 struct hlist_node cpuhp_dead; 423 /** @kobj: Kernel object for sysfs. */ 424 struct kobject kobj; 425 426 #ifdef CONFIG_BLK_DEBUG_FS 427 /** 428 * @debugfs_dir: debugfs directory for this hardware queue. Named 429 * as cpu<cpu_number>. 430 */ 431 struct dentry *debugfs_dir; 432 /** @sched_debugfs_dir: debugfs directory for the scheduler. */ 433 struct dentry *sched_debugfs_dir; 434 #endif 435 436 /** 437 * @hctx_list: if this hctx is not in use, this is an entry in 438 * q->unused_hctx_list. 439 */ 440 struct list_head hctx_list; 441 }; 442 443 /** 444 * struct blk_mq_queue_map - Map software queues to hardware queues 445 * @mq_map: CPU ID to hardware queue index map. This is an array 446 * with nr_cpu_ids elements. Each element has a value in the range 447 * [@queue_offset, @queue_offset + @nr_queues). 448 * @nr_queues: Number of hardware queues to map CPU IDs onto. 449 * @queue_offset: First hardware queue to map onto. Used by the PCIe NVMe 450 * driver to map each hardware queue type (enum hctx_type) onto a distinct 451 * set of hardware queues. 452 */ 453 struct blk_mq_queue_map { 454 unsigned int *mq_map; 455 unsigned int nr_queues; 456 unsigned int queue_offset; 457 }; 458 459 /** 460 * enum hctx_type - Type of hardware queue 461 * @HCTX_TYPE_DEFAULT: All I/O not otherwise accounted for. 462 * @HCTX_TYPE_READ: Just for READ I/O. 463 * @HCTX_TYPE_POLL: Polled I/O of any kind. 464 * @HCTX_MAX_TYPES: Number of types of hctx. 465 */ 466 enum hctx_type { 467 HCTX_TYPE_DEFAULT, 468 HCTX_TYPE_READ, 469 HCTX_TYPE_POLL, 470 471 HCTX_MAX_TYPES, 472 }; 473 474 /** 475 * struct blk_mq_tag_set - tag set that can be shared between request queues 476 * @map: One or more ctx -> hctx mappings. One map exists for each 477 * hardware queue type (enum hctx_type) that the driver wishes 478 * to support. There are no restrictions on maps being of the 479 * same size, and it's perfectly legal to share maps between 480 * types. 481 * @nr_maps: Number of elements in the @map array. A number in the range 482 * [1, HCTX_MAX_TYPES]. 483 * @ops: Pointers to functions that implement block driver behavior. 484 * @nr_hw_queues: Number of hardware queues supported by the block driver that 485 * owns this data structure. 486 * @queue_depth: Number of tags per hardware queue, reserved tags included. 487 * @reserved_tags: Number of tags to set aside for BLK_MQ_REQ_RESERVED tag 488 * allocations. 489 * @cmd_size: Number of additional bytes to allocate per request. The block 490 * driver owns these additional bytes. 491 * @numa_node: NUMA node the storage adapter has been connected to. 492 * @timeout: Request processing timeout in jiffies. 493 * @flags: Zero or more BLK_MQ_F_* flags. 494 * @driver_data: Pointer to data owned by the block driver that created this 495 * tag set. 496 * @tags: Tag sets. One tag set per hardware queue. Has @nr_hw_queues 497 * elements. 498 * @shared_tags: 499 * Shared set of tags. Has @nr_hw_queues elements. If set, 500 * shared by all @tags. 501 * @tag_list_lock: Serializes tag_list accesses. 502 * @tag_list: List of the request queues that use this tag set. See also 503 * request_queue.tag_set_list. 504 */ 505 struct blk_mq_tag_set { 506 struct blk_mq_queue_map map[HCTX_MAX_TYPES]; 507 unsigned int nr_maps; 508 const struct blk_mq_ops *ops; 509 unsigned int nr_hw_queues; 510 unsigned int queue_depth; 511 unsigned int reserved_tags; 512 unsigned int cmd_size; 513 int numa_node; 514 unsigned int timeout; 515 unsigned int flags; 516 void *driver_data; 517 518 struct blk_mq_tags **tags; 519 520 struct blk_mq_tags *shared_tags; 521 522 struct mutex tag_list_lock; 523 struct list_head tag_list; 524 }; 525 526 /** 527 * struct blk_mq_queue_data - Data about a request inserted in a queue 528 * 529 * @rq: Request pointer. 530 * @last: If it is the last request in the queue. 531 */ 532 struct blk_mq_queue_data { 533 struct request *rq; 534 bool last; 535 }; 536 537 typedef bool (busy_tag_iter_fn)(struct request *, void *); 538 539 /** 540 * struct blk_mq_ops - Callback functions that implements block driver 541 * behaviour. 542 */ 543 struct blk_mq_ops { 544 /** 545 * @queue_rq: Queue a new request from block IO. 546 */ 547 blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *, 548 const struct blk_mq_queue_data *); 549 550 /** 551 * @commit_rqs: If a driver uses bd->last to judge when to submit 552 * requests to hardware, it must define this function. In case of errors 553 * that make us stop issuing further requests, this hook serves the 554 * purpose of kicking the hardware (which the last request otherwise 555 * would have done). 556 */ 557 void (*commit_rqs)(struct blk_mq_hw_ctx *); 558 559 /** 560 * @queue_rqs: Queue a list of new requests. Driver is guaranteed 561 * that each request belongs to the same queue. If the driver doesn't 562 * empty the @rqlist completely, then the rest will be queued 563 * individually by the block layer upon return. 564 */ 565 void (*queue_rqs)(struct request **rqlist); 566 567 /** 568 * @get_budget: Reserve budget before queue request, once .queue_rq is 569 * run, it is driver's responsibility to release the 570 * reserved budget. Also we have to handle failure case 571 * of .get_budget for avoiding I/O deadlock. 572 */ 573 int (*get_budget)(struct request_queue *); 574 575 /** 576 * @put_budget: Release the reserved budget. 577 */ 578 void (*put_budget)(struct request_queue *, int); 579 580 /** 581 * @set_rq_budget_token: store rq's budget token 582 */ 583 void (*set_rq_budget_token)(struct request *, int); 584 /** 585 * @get_rq_budget_token: retrieve rq's budget token 586 */ 587 int (*get_rq_budget_token)(struct request *); 588 589 /** 590 * @timeout: Called on request timeout. 591 */ 592 enum blk_eh_timer_return (*timeout)(struct request *); 593 594 /** 595 * @poll: Called to poll for completion of a specific tag. 596 */ 597 int (*poll)(struct blk_mq_hw_ctx *, struct io_comp_batch *); 598 599 /** 600 * @complete: Mark the request as complete. 601 */ 602 void (*complete)(struct request *); 603 604 /** 605 * @init_hctx: Called when the block layer side of a hardware queue has 606 * been set up, allowing the driver to allocate/init matching 607 * structures. 608 */ 609 int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int); 610 /** 611 * @exit_hctx: Ditto for exit/teardown. 612 */ 613 void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int); 614 615 /** 616 * @init_request: Called for every command allocated by the block layer 617 * to allow the driver to set up driver specific data. 618 * 619 * Tag greater than or equal to queue_depth is for setting up 620 * flush request. 621 */ 622 int (*init_request)(struct blk_mq_tag_set *set, struct request *, 623 unsigned int, unsigned int); 624 /** 625 * @exit_request: Ditto for exit/teardown. 626 */ 627 void (*exit_request)(struct blk_mq_tag_set *set, struct request *, 628 unsigned int); 629 630 /** 631 * @cleanup_rq: Called before freeing one request which isn't completed 632 * yet, and usually for freeing the driver private data. 633 */ 634 void (*cleanup_rq)(struct request *); 635 636 /** 637 * @busy: If set, returns whether or not this queue currently is busy. 638 */ 639 bool (*busy)(struct request_queue *); 640 641 /** 642 * @map_queues: This allows drivers specify their own queue mapping by 643 * overriding the setup-time function that builds the mq_map. 644 */ 645 void (*map_queues)(struct blk_mq_tag_set *set); 646 647 #ifdef CONFIG_BLK_DEBUG_FS 648 /** 649 * @show_rq: Used by the debugfs implementation to show driver-specific 650 * information about a request. 651 */ 652 void (*show_rq)(struct seq_file *m, struct request *rq); 653 #endif 654 }; 655 656 enum { 657 BLK_MQ_F_SHOULD_MERGE = 1 << 0, 658 BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1, 659 /* 660 * Set when this device requires underlying blk-mq device for 661 * completing IO: 662 */ 663 BLK_MQ_F_STACKING = 1 << 2, 664 BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3, 665 BLK_MQ_F_BLOCKING = 1 << 5, 666 /* Do not allow an I/O scheduler to be configured. */ 667 BLK_MQ_F_NO_SCHED = 1 << 6, 668 /* 669 * Select 'none' during queue registration in case of a single hwq 670 * or shared hwqs instead of 'mq-deadline'. 671 */ 672 BLK_MQ_F_NO_SCHED_BY_DEFAULT = 1 << 7, 673 BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, 674 BLK_MQ_F_ALLOC_POLICY_BITS = 1, 675 676 BLK_MQ_S_STOPPED = 0, 677 BLK_MQ_S_TAG_ACTIVE = 1, 678 BLK_MQ_S_SCHED_RESTART = 2, 679 680 /* hw queue is inactive after all its CPUs become offline */ 681 BLK_MQ_S_INACTIVE = 3, 682 683 BLK_MQ_MAX_DEPTH = 10240, 684 685 BLK_MQ_CPU_WORK_BATCH = 8, 686 }; 687 #define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \ 688 ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \ 689 ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) 690 #define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \ 691 ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \ 692 << BLK_MQ_F_ALLOC_POLICY_START_BIT) 693 694 #define BLK_MQ_NO_HCTX_IDX (-1U) 695 696 struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata, 697 struct lock_class_key *lkclass); 698 #define blk_mq_alloc_disk(set, queuedata) \ 699 ({ \ 700 static struct lock_class_key __key; \ 701 \ 702 __blk_mq_alloc_disk(set, queuedata, &__key); \ 703 }) 704 struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q, 705 struct lock_class_key *lkclass); 706 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); 707 int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, 708 struct request_queue *q); 709 void blk_mq_destroy_queue(struct request_queue *); 710 711 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set); 712 int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set, 713 const struct blk_mq_ops *ops, unsigned int queue_depth, 714 unsigned int set_flags); 715 void blk_mq_free_tag_set(struct blk_mq_tag_set *set); 716 717 void blk_mq_free_request(struct request *rq); 718 719 bool blk_mq_queue_inflight(struct request_queue *q); 720 721 enum { 722 /* return when out of requests */ 723 BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0), 724 /* allocate from reserved pool */ 725 BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1), 726 /* set RQF_PM */ 727 BLK_MQ_REQ_PM = (__force blk_mq_req_flags_t)(1 << 2), 728 }; 729 730 struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf, 731 blk_mq_req_flags_t flags); 732 struct request *blk_mq_alloc_request_hctx(struct request_queue *q, 733 blk_opf_t opf, blk_mq_req_flags_t flags, 734 unsigned int hctx_idx); 735 736 /* 737 * Tag address space map. 738 */ 739 struct blk_mq_tags { 740 unsigned int nr_tags; 741 unsigned int nr_reserved_tags; 742 743 atomic_t active_queues; 744 745 struct sbitmap_queue bitmap_tags; 746 struct sbitmap_queue breserved_tags; 747 748 struct request **rqs; 749 struct request **static_rqs; 750 struct list_head page_list; 751 752 /* 753 * used to clear request reference in rqs[] before freeing one 754 * request pool 755 */ 756 spinlock_t lock; 757 }; 758 759 static inline struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, 760 unsigned int tag) 761 { 762 if (tag < tags->nr_tags) { 763 prefetch(tags->rqs[tag]); 764 return tags->rqs[tag]; 765 } 766 767 return NULL; 768 } 769 770 enum { 771 BLK_MQ_UNIQUE_TAG_BITS = 16, 772 BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1, 773 }; 774 775 u32 blk_mq_unique_tag(struct request *rq); 776 777 static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag) 778 { 779 return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS; 780 } 781 782 static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) 783 { 784 return unique_tag & BLK_MQ_UNIQUE_TAG_MASK; 785 } 786 787 /** 788 * blk_mq_rq_state() - read the current MQ_RQ_* state of a request 789 * @rq: target request. 790 */ 791 static inline enum mq_rq_state blk_mq_rq_state(struct request *rq) 792 { 793 return READ_ONCE(rq->state); 794 } 795 796 static inline int blk_mq_request_started(struct request *rq) 797 { 798 return blk_mq_rq_state(rq) != MQ_RQ_IDLE; 799 } 800 801 static inline int blk_mq_request_completed(struct request *rq) 802 { 803 return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE; 804 } 805 806 /* 807 * 808 * Set the state to complete when completing a request from inside ->queue_rq. 809 * This is used by drivers that want to ensure special complete actions that 810 * need access to the request are called on failure, e.g. by nvme for 811 * multipathing. 812 */ 813 static inline void blk_mq_set_request_complete(struct request *rq) 814 { 815 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); 816 } 817 818 /* 819 * Complete the request directly instead of deferring it to softirq or 820 * completing it another CPU. Useful in preemptible instead of an interrupt. 821 */ 822 static inline void blk_mq_complete_request_direct(struct request *rq, 823 void (*complete)(struct request *rq)) 824 { 825 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); 826 complete(rq); 827 } 828 829 void blk_mq_start_request(struct request *rq); 830 void blk_mq_end_request(struct request *rq, blk_status_t error); 831 void __blk_mq_end_request(struct request *rq, blk_status_t error); 832 void blk_mq_end_request_batch(struct io_comp_batch *ib); 833 834 /* 835 * Only need start/end time stamping if we have iostat or 836 * blk stats enabled, or using an IO scheduler. 837 */ 838 static inline bool blk_mq_need_time_stamp(struct request *rq) 839 { 840 return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_ELV)); 841 } 842 843 static inline bool blk_mq_is_reserved_rq(struct request *rq) 844 { 845 return rq->rq_flags & RQF_RESV; 846 } 847 848 /* 849 * Batched completions only work when there is no I/O error and no special 850 * ->end_io handler. 851 */ 852 static inline bool blk_mq_add_to_batch(struct request *req, 853 struct io_comp_batch *iob, int ioerror, 854 void (*complete)(struct io_comp_batch *)) 855 { 856 if (!iob || (req->rq_flags & RQF_ELV) || ioerror) 857 return false; 858 859 if (!iob->complete) 860 iob->complete = complete; 861 else if (iob->complete != complete) 862 return false; 863 iob->need_ts |= blk_mq_need_time_stamp(req); 864 rq_list_add(&iob->req_list, req); 865 return true; 866 } 867 868 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list); 869 void blk_mq_kick_requeue_list(struct request_queue *q); 870 void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); 871 void blk_mq_complete_request(struct request *rq); 872 bool blk_mq_complete_request_remote(struct request *rq); 873 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); 874 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); 875 void blk_mq_stop_hw_queues(struct request_queue *q); 876 void blk_mq_start_hw_queues(struct request_queue *q); 877 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); 878 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); 879 void blk_mq_quiesce_queue(struct request_queue *q); 880 void blk_mq_wait_quiesce_done(struct request_queue *q); 881 void blk_mq_unquiesce_queue(struct request_queue *q); 882 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); 883 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); 884 void blk_mq_run_hw_queues(struct request_queue *q, bool async); 885 void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs); 886 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, 887 busy_tag_iter_fn *fn, void *priv); 888 void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset); 889 void blk_mq_freeze_queue(struct request_queue *q); 890 void blk_mq_unfreeze_queue(struct request_queue *q); 891 void blk_freeze_queue_start(struct request_queue *q); 892 void blk_mq_freeze_queue_wait(struct request_queue *q); 893 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, 894 unsigned long timeout); 895 896 void blk_mq_map_queues(struct blk_mq_queue_map *qmap); 897 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues); 898 899 void blk_mq_quiesce_queue_nowait(struct request_queue *q); 900 901 unsigned int blk_mq_rq_cpu(struct request *rq); 902 903 bool __blk_should_fake_timeout(struct request_queue *q); 904 static inline bool blk_should_fake_timeout(struct request_queue *q) 905 { 906 if (IS_ENABLED(CONFIG_FAIL_IO_TIMEOUT) && 907 test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags)) 908 return __blk_should_fake_timeout(q); 909 return false; 910 } 911 912 /** 913 * blk_mq_rq_from_pdu - cast a PDU to a request 914 * @pdu: the PDU (Protocol Data Unit) to be casted 915 * 916 * Return: request 917 * 918 * Driver command data is immediately after the request. So subtract request 919 * size to get back to the original request. 920 */ 921 static inline struct request *blk_mq_rq_from_pdu(void *pdu) 922 { 923 return pdu - sizeof(struct request); 924 } 925 926 /** 927 * blk_mq_rq_to_pdu - cast a request to a PDU 928 * @rq: the request to be casted 929 * 930 * Return: pointer to the PDU 931 * 932 * Driver command data is immediately after the request. So add request to get 933 * the PDU. 934 */ 935 static inline void *blk_mq_rq_to_pdu(struct request *rq) 936 { 937 return rq + 1; 938 } 939 940 #define queue_for_each_hw_ctx(q, hctx, i) \ 941 xa_for_each(&(q)->hctx_table, (i), (hctx)) 942 943 #define hctx_for_each_ctx(hctx, ctx, i) \ 944 for ((i) = 0; (i) < (hctx)->nr_ctx && \ 945 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++) 946 947 static inline void blk_mq_cleanup_rq(struct request *rq) 948 { 949 if (rq->q->mq_ops->cleanup_rq) 950 rq->q->mq_ops->cleanup_rq(rq); 951 } 952 953 static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio, 954 unsigned int nr_segs) 955 { 956 rq->nr_phys_segments = nr_segs; 957 rq->__data_len = bio->bi_iter.bi_size; 958 rq->bio = rq->biotail = bio; 959 rq->ioprio = bio_prio(bio); 960 } 961 962 void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx, 963 struct lock_class_key *key); 964 965 static inline bool rq_is_sync(struct request *rq) 966 { 967 return op_is_sync(rq->cmd_flags); 968 } 969 970 void blk_rq_init(struct request_queue *q, struct request *rq); 971 int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 972 struct bio_set *bs, gfp_t gfp_mask, 973 int (*bio_ctr)(struct bio *, struct bio *, void *), void *data); 974 void blk_rq_unprep_clone(struct request *rq); 975 blk_status_t blk_insert_cloned_request(struct request *rq); 976 977 struct rq_map_data { 978 struct page **pages; 979 unsigned long offset; 980 unsigned short page_order; 981 unsigned short nr_entries; 982 bool null_mapped; 983 bool from_user; 984 }; 985 986 int blk_rq_map_user(struct request_queue *, struct request *, 987 struct rq_map_data *, void __user *, unsigned long, gfp_t); 988 int blk_rq_map_user_io(struct request *, struct rq_map_data *, 989 void __user *, unsigned long, gfp_t, bool, int, bool, int); 990 int blk_rq_map_user_iov(struct request_queue *, struct request *, 991 struct rq_map_data *, const struct iov_iter *, gfp_t); 992 int blk_rq_unmap_user(struct bio *); 993 int blk_rq_map_kern(struct request_queue *, struct request *, void *, 994 unsigned int, gfp_t); 995 int blk_rq_append_bio(struct request *rq, struct bio *bio); 996 void blk_execute_rq_nowait(struct request *rq, bool at_head); 997 blk_status_t blk_execute_rq(struct request *rq, bool at_head); 998 bool blk_rq_is_poll(struct request *rq); 999 1000 struct req_iterator { 1001 struct bvec_iter iter; 1002 struct bio *bio; 1003 }; 1004 1005 #define __rq_for_each_bio(_bio, rq) \ 1006 if ((rq->bio)) \ 1007 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) 1008 1009 #define rq_for_each_segment(bvl, _rq, _iter) \ 1010 __rq_for_each_bio(_iter.bio, _rq) \ 1011 bio_for_each_segment(bvl, _iter.bio, _iter.iter) 1012 1013 #define rq_for_each_bvec(bvl, _rq, _iter) \ 1014 __rq_for_each_bio(_iter.bio, _rq) \ 1015 bio_for_each_bvec(bvl, _iter.bio, _iter.iter) 1016 1017 #define rq_iter_last(bvec, _iter) \ 1018 (_iter.bio->bi_next == NULL && \ 1019 bio_iter_last(bvec, _iter.iter)) 1020 1021 /* 1022 * blk_rq_pos() : the current sector 1023 * blk_rq_bytes() : bytes left in the entire request 1024 * blk_rq_cur_bytes() : bytes left in the current segment 1025 * blk_rq_sectors() : sectors left in the entire request 1026 * blk_rq_cur_sectors() : sectors left in the current segment 1027 * blk_rq_stats_sectors() : sectors of the entire request used for stats 1028 */ 1029 static inline sector_t blk_rq_pos(const struct request *rq) 1030 { 1031 return rq->__sector; 1032 } 1033 1034 static inline unsigned int blk_rq_bytes(const struct request *rq) 1035 { 1036 return rq->__data_len; 1037 } 1038 1039 static inline int blk_rq_cur_bytes(const struct request *rq) 1040 { 1041 if (!rq->bio) 1042 return 0; 1043 if (!bio_has_data(rq->bio)) /* dataless requests such as discard */ 1044 return rq->bio->bi_iter.bi_size; 1045 return bio_iovec(rq->bio).bv_len; 1046 } 1047 1048 static inline unsigned int blk_rq_sectors(const struct request *rq) 1049 { 1050 return blk_rq_bytes(rq) >> SECTOR_SHIFT; 1051 } 1052 1053 static inline unsigned int blk_rq_cur_sectors(const struct request *rq) 1054 { 1055 return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT; 1056 } 1057 1058 static inline unsigned int blk_rq_stats_sectors(const struct request *rq) 1059 { 1060 return rq->stats_sectors; 1061 } 1062 1063 /* 1064 * Some commands like WRITE SAME have a payload or data transfer size which 1065 * is different from the size of the request. Any driver that supports such 1066 * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to 1067 * calculate the data transfer size. 1068 */ 1069 static inline unsigned int blk_rq_payload_bytes(struct request *rq) 1070 { 1071 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) 1072 return rq->special_vec.bv_len; 1073 return blk_rq_bytes(rq); 1074 } 1075 1076 /* 1077 * Return the first full biovec in the request. The caller needs to check that 1078 * there are any bvecs before calling this helper. 1079 */ 1080 static inline struct bio_vec req_bvec(struct request *rq) 1081 { 1082 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) 1083 return rq->special_vec; 1084 return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter); 1085 } 1086 1087 static inline unsigned int blk_rq_count_bios(struct request *rq) 1088 { 1089 unsigned int nr_bios = 0; 1090 struct bio *bio; 1091 1092 __rq_for_each_bio(bio, rq) 1093 nr_bios++; 1094 1095 return nr_bios; 1096 } 1097 1098 void blk_steal_bios(struct bio_list *list, struct request *rq); 1099 1100 /* 1101 * Request completion related functions. 1102 * 1103 * blk_update_request() completes given number of bytes and updates 1104 * the request without completing it. 1105 */ 1106 bool blk_update_request(struct request *rq, blk_status_t error, 1107 unsigned int nr_bytes); 1108 void blk_abort_request(struct request *); 1109 1110 /* 1111 * Number of physical segments as sent to the device. 1112 * 1113 * Normally this is the number of discontiguous data segments sent by the 1114 * submitter. But for data-less command like discard we might have no 1115 * actual data segments submitted, but the driver might have to add it's 1116 * own special payload. In that case we still return 1 here so that this 1117 * special payload will be mapped. 1118 */ 1119 static inline unsigned short blk_rq_nr_phys_segments(struct request *rq) 1120 { 1121 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) 1122 return 1; 1123 return rq->nr_phys_segments; 1124 } 1125 1126 /* 1127 * Number of discard segments (or ranges) the driver needs to fill in. 1128 * Each discard bio merged into a request is counted as one segment. 1129 */ 1130 static inline unsigned short blk_rq_nr_discard_segments(struct request *rq) 1131 { 1132 return max_t(unsigned short, rq->nr_phys_segments, 1); 1133 } 1134 1135 int __blk_rq_map_sg(struct request_queue *q, struct request *rq, 1136 struct scatterlist *sglist, struct scatterlist **last_sg); 1137 static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq, 1138 struct scatterlist *sglist) 1139 { 1140 struct scatterlist *last_sg = NULL; 1141 1142 return __blk_rq_map_sg(q, rq, sglist, &last_sg); 1143 } 1144 void blk_dump_rq_flags(struct request *, char *); 1145 1146 #ifdef CONFIG_BLK_DEV_ZONED 1147 static inline unsigned int blk_rq_zone_no(struct request *rq) 1148 { 1149 return disk_zone_no(rq->q->disk, blk_rq_pos(rq)); 1150 } 1151 1152 static inline unsigned int blk_rq_zone_is_seq(struct request *rq) 1153 { 1154 return disk_zone_is_seq(rq->q->disk, blk_rq_pos(rq)); 1155 } 1156 1157 bool blk_req_needs_zone_write_lock(struct request *rq); 1158 bool blk_req_zone_write_trylock(struct request *rq); 1159 void __blk_req_zone_write_lock(struct request *rq); 1160 void __blk_req_zone_write_unlock(struct request *rq); 1161 1162 static inline void blk_req_zone_write_lock(struct request *rq) 1163 { 1164 if (blk_req_needs_zone_write_lock(rq)) 1165 __blk_req_zone_write_lock(rq); 1166 } 1167 1168 static inline void blk_req_zone_write_unlock(struct request *rq) 1169 { 1170 if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED) 1171 __blk_req_zone_write_unlock(rq); 1172 } 1173 1174 static inline bool blk_req_zone_is_write_locked(struct request *rq) 1175 { 1176 return rq->q->disk->seq_zones_wlock && 1177 test_bit(blk_rq_zone_no(rq), rq->q->disk->seq_zones_wlock); 1178 } 1179 1180 static inline bool blk_req_can_dispatch_to_zone(struct request *rq) 1181 { 1182 if (!blk_req_needs_zone_write_lock(rq)) 1183 return true; 1184 return !blk_req_zone_is_write_locked(rq); 1185 } 1186 #else /* CONFIG_BLK_DEV_ZONED */ 1187 static inline bool blk_req_needs_zone_write_lock(struct request *rq) 1188 { 1189 return false; 1190 } 1191 1192 static inline void blk_req_zone_write_lock(struct request *rq) 1193 { 1194 } 1195 1196 static inline void blk_req_zone_write_unlock(struct request *rq) 1197 { 1198 } 1199 static inline bool blk_req_zone_is_write_locked(struct request *rq) 1200 { 1201 return false; 1202 } 1203 1204 static inline bool blk_req_can_dispatch_to_zone(struct request *rq) 1205 { 1206 return true; 1207 } 1208 #endif /* CONFIG_BLK_DEV_ZONED */ 1209 1210 #endif /* BLK_MQ_H */ 1211