1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef BLK_MQ_H 3 #define BLK_MQ_H 4 5 #include <linux/blkdev.h> 6 #include <linux/sbitmap.h> 7 #include <linux/lockdep.h> 8 #include <linux/scatterlist.h> 9 #include <linux/prefetch.h> 10 #include <linux/srcu.h> 11 12 struct blk_mq_tags; 13 struct blk_flush_queue; 14 15 #define BLKDEV_MIN_RQ 4 16 #define BLKDEV_DEFAULT_RQ 128 17 18 enum rq_end_io_ret { 19 RQ_END_IO_NONE, 20 RQ_END_IO_FREE, 21 }; 22 23 typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t); 24 25 /* 26 * request flags */ 27 typedef __u32 __bitwise req_flags_t; 28 29 /* drive already may have started this one */ 30 #define RQF_STARTED ((__force req_flags_t)(1 << 1)) 31 /* request for flush sequence */ 32 #define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4)) 33 /* merge of different types, fail separately */ 34 #define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5)) 35 /* track inflight for MQ */ 36 #define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6)) 37 /* don't call prep for this one */ 38 #define RQF_DONTPREP ((__force req_flags_t)(1 << 7)) 39 /* use hctx->sched_tags */ 40 #define RQF_SCHED_TAGS ((__force req_flags_t)(1 << 8)) 41 /* use an I/O scheduler for this request */ 42 #define RQF_USE_SCHED ((__force req_flags_t)(1 << 9)) 43 /* vaguely specified driver internal error. Ignored by the block layer */ 44 #define RQF_FAILED ((__force req_flags_t)(1 << 10)) 45 /* don't warn about errors */ 46 #define RQF_QUIET ((__force req_flags_t)(1 << 11)) 47 /* account into disk and partition IO statistics */ 48 #define RQF_IO_STAT ((__force req_flags_t)(1 << 13)) 49 /* runtime pm request */ 50 #define RQF_PM ((__force req_flags_t)(1 << 15)) 51 /* on IO scheduler merge hash */ 52 #define RQF_HASHED ((__force req_flags_t)(1 << 16)) 53 /* track IO completion time */ 54 #define RQF_STATS ((__force req_flags_t)(1 << 17)) 55 /* Look at ->special_vec for the actual data payload instead of the 56 bio chain. */ 57 #define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18)) 58 /* The per-zone write lock is held for this request */ 59 #define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19)) 60 /* ->timeout has been called, don't expire again */ 61 #define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21)) 62 #define RQF_RESV ((__force req_flags_t)(1 << 23)) 63 64 /* flags that prevent us from merging requests: */ 65 #define RQF_NOMERGE_FLAGS \ 66 (RQF_STARTED | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD) 67 68 enum mq_rq_state { 69 MQ_RQ_IDLE = 0, 70 MQ_RQ_IN_FLIGHT = 1, 71 MQ_RQ_COMPLETE = 2, 72 }; 73 74 /* 75 * Try to put the fields that are referenced together in the same cacheline. 76 * 77 * If you modify this structure, make sure to update blk_rq_init() and 78 * especially blk_mq_rq_ctx_init() to take care of the added fields. 79 */ 80 struct request { 81 struct request_queue *q; 82 struct blk_mq_ctx *mq_ctx; 83 struct blk_mq_hw_ctx *mq_hctx; 84 85 blk_opf_t cmd_flags; /* op and common flags */ 86 req_flags_t rq_flags; 87 88 int tag; 89 int internal_tag; 90 91 unsigned int timeout; 92 93 /* the following two fields are internal, NEVER access directly */ 94 unsigned int __data_len; /* total data len */ 95 sector_t __sector; /* sector cursor */ 96 97 struct bio *bio; 98 struct bio *biotail; 99 100 union { 101 struct list_head queuelist; 102 struct request *rq_next; 103 }; 104 105 struct block_device *part; 106 #ifdef CONFIG_BLK_RQ_ALLOC_TIME 107 /* Time that the first bio started allocating this request. */ 108 u64 alloc_time_ns; 109 #endif 110 /* Time that this request was allocated for this IO. */ 111 u64 start_time_ns; 112 /* Time that I/O was submitted to the device. */ 113 u64 io_start_time_ns; 114 115 #ifdef CONFIG_BLK_WBT 116 unsigned short wbt_flags; 117 #endif 118 /* 119 * rq sectors used for blk stats. It has the same value 120 * with blk_rq_sectors(rq), except that it never be zeroed 121 * by completion. 122 */ 123 unsigned short stats_sectors; 124 125 /* 126 * Number of scatter-gather DMA addr+len pairs after 127 * physical address coalescing is performed. 128 */ 129 unsigned short nr_phys_segments; 130 131 #ifdef CONFIG_BLK_DEV_INTEGRITY 132 unsigned short nr_integrity_segments; 133 #endif 134 135 #ifdef CONFIG_BLK_INLINE_ENCRYPTION 136 struct bio_crypt_ctx *crypt_ctx; 137 struct blk_crypto_keyslot *crypt_keyslot; 138 #endif 139 140 unsigned short ioprio; 141 142 enum mq_rq_state state; 143 atomic_t ref; 144 145 unsigned long deadline; 146 147 /* 148 * The hash is used inside the scheduler, and killed once the 149 * request reaches the dispatch list. The ipi_list is only used 150 * to queue the request for softirq completion, which is long 151 * after the request has been unhashed (and even removed from 152 * the dispatch list). 153 */ 154 union { 155 struct hlist_node hash; /* merge hash */ 156 struct llist_node ipi_list; 157 }; 158 159 /* 160 * The rb_node is only used inside the io scheduler, requests 161 * are pruned when moved to the dispatch queue. special_vec must 162 * only be used if RQF_SPECIAL_PAYLOAD is set, and those cannot be 163 * insert into an IO scheduler. 164 */ 165 union { 166 struct rb_node rb_node; /* sort/lookup */ 167 struct bio_vec special_vec; 168 }; 169 170 /* 171 * Three pointers are available for the IO schedulers, if they need 172 * more they have to dynamically allocate it. 173 */ 174 struct { 175 struct io_cq *icq; 176 void *priv[2]; 177 } elv; 178 179 struct { 180 unsigned int seq; 181 rq_end_io_fn *saved_end_io; 182 } flush; 183 184 u64 fifo_time; 185 186 /* 187 * completion callback. 188 */ 189 rq_end_io_fn *end_io; 190 void *end_io_data; 191 }; 192 193 static inline enum req_op req_op(const struct request *req) 194 { 195 return req->cmd_flags & REQ_OP_MASK; 196 } 197 198 static inline bool blk_rq_is_passthrough(struct request *rq) 199 { 200 return blk_op_is_passthrough(rq->cmd_flags); 201 } 202 203 static inline unsigned short req_get_ioprio(struct request *req) 204 { 205 return req->ioprio; 206 } 207 208 #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) 209 210 #define rq_dma_dir(rq) \ 211 (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE) 212 213 #define rq_list_add(listptr, rq) do { \ 214 (rq)->rq_next = *(listptr); \ 215 *(listptr) = rq; \ 216 } while (0) 217 218 #define rq_list_add_tail(lastpptr, rq) do { \ 219 (rq)->rq_next = NULL; \ 220 **(lastpptr) = rq; \ 221 *(lastpptr) = &rq->rq_next; \ 222 } while (0) 223 224 #define rq_list_pop(listptr) \ 225 ({ \ 226 struct request *__req = NULL; \ 227 if ((listptr) && *(listptr)) { \ 228 __req = *(listptr); \ 229 *(listptr) = __req->rq_next; \ 230 } \ 231 __req; \ 232 }) 233 234 #define rq_list_peek(listptr) \ 235 ({ \ 236 struct request *__req = NULL; \ 237 if ((listptr) && *(listptr)) \ 238 __req = *(listptr); \ 239 __req; \ 240 }) 241 242 #define rq_list_for_each(listptr, pos) \ 243 for (pos = rq_list_peek((listptr)); pos; pos = rq_list_next(pos)) 244 245 #define rq_list_for_each_safe(listptr, pos, nxt) \ 246 for (pos = rq_list_peek((listptr)), nxt = rq_list_next(pos); \ 247 pos; pos = nxt, nxt = pos ? rq_list_next(pos) : NULL) 248 249 #define rq_list_next(rq) (rq)->rq_next 250 #define rq_list_empty(list) ((list) == (struct request *) NULL) 251 252 /** 253 * rq_list_move() - move a struct request from one list to another 254 * @src: The source list @rq is currently in 255 * @dst: The destination list that @rq will be appended to 256 * @rq: The request to move 257 * @prev: The request preceding @rq in @src (NULL if @rq is the head) 258 */ 259 static inline void rq_list_move(struct request **src, struct request **dst, 260 struct request *rq, struct request *prev) 261 { 262 if (prev) 263 prev->rq_next = rq->rq_next; 264 else 265 *src = rq->rq_next; 266 rq_list_add(dst, rq); 267 } 268 269 /** 270 * enum blk_eh_timer_return - How the timeout handler should proceed 271 * @BLK_EH_DONE: The block driver completed the command or will complete it at 272 * a later time. 273 * @BLK_EH_RESET_TIMER: Reset the request timer and continue waiting for the 274 * request to complete. 275 */ 276 enum blk_eh_timer_return { 277 BLK_EH_DONE, 278 BLK_EH_RESET_TIMER, 279 }; 280 281 #define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */ 282 #define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */ 283 284 /** 285 * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware 286 * block device 287 */ 288 struct blk_mq_hw_ctx { 289 struct { 290 /** @lock: Protects the dispatch list. */ 291 spinlock_t lock; 292 /** 293 * @dispatch: Used for requests that are ready to be 294 * dispatched to the hardware but for some reason (e.g. lack of 295 * resources) could not be sent to the hardware. As soon as the 296 * driver can send new requests, requests at this list will 297 * be sent first for a fairer dispatch. 298 */ 299 struct list_head dispatch; 300 /** 301 * @state: BLK_MQ_S_* flags. Defines the state of the hw 302 * queue (active, scheduled to restart, stopped). 303 */ 304 unsigned long state; 305 } ____cacheline_aligned_in_smp; 306 307 /** 308 * @run_work: Used for scheduling a hardware queue run at a later time. 309 */ 310 struct delayed_work run_work; 311 /** @cpumask: Map of available CPUs where this hctx can run. */ 312 cpumask_var_t cpumask; 313 /** 314 * @next_cpu: Used by blk_mq_hctx_next_cpu() for round-robin CPU 315 * selection from @cpumask. 316 */ 317 int next_cpu; 318 /** 319 * @next_cpu_batch: Counter of how many works left in the batch before 320 * changing to the next CPU. 321 */ 322 int next_cpu_batch; 323 324 /** @flags: BLK_MQ_F_* flags. Defines the behaviour of the queue. */ 325 unsigned long flags; 326 327 /** 328 * @sched_data: Pointer owned by the IO scheduler attached to a request 329 * queue. It's up to the IO scheduler how to use this pointer. 330 */ 331 void *sched_data; 332 /** 333 * @queue: Pointer to the request queue that owns this hardware context. 334 */ 335 struct request_queue *queue; 336 /** @fq: Queue of requests that need to perform a flush operation. */ 337 struct blk_flush_queue *fq; 338 339 /** 340 * @driver_data: Pointer to data owned by the block driver that created 341 * this hctx 342 */ 343 void *driver_data; 344 345 /** 346 * @ctx_map: Bitmap for each software queue. If bit is on, there is a 347 * pending request in that software queue. 348 */ 349 struct sbitmap ctx_map; 350 351 /** 352 * @dispatch_from: Software queue to be used when no scheduler was 353 * selected. 354 */ 355 struct blk_mq_ctx *dispatch_from; 356 /** 357 * @dispatch_busy: Number used by blk_mq_update_dispatch_busy() to 358 * decide if the hw_queue is busy using Exponential Weighted Moving 359 * Average algorithm. 360 */ 361 unsigned int dispatch_busy; 362 363 /** @type: HCTX_TYPE_* flags. Type of hardware queue. */ 364 unsigned short type; 365 /** @nr_ctx: Number of software queues. */ 366 unsigned short nr_ctx; 367 /** @ctxs: Array of software queues. */ 368 struct blk_mq_ctx **ctxs; 369 370 /** @dispatch_wait_lock: Lock for dispatch_wait queue. */ 371 spinlock_t dispatch_wait_lock; 372 /** 373 * @dispatch_wait: Waitqueue to put requests when there is no tag 374 * available at the moment, to wait for another try in the future. 375 */ 376 wait_queue_entry_t dispatch_wait; 377 378 /** 379 * @wait_index: Index of next available dispatch_wait queue to insert 380 * requests. 381 */ 382 atomic_t wait_index; 383 384 /** 385 * @tags: Tags owned by the block driver. A tag at this set is only 386 * assigned when a request is dispatched from a hardware queue. 387 */ 388 struct blk_mq_tags *tags; 389 /** 390 * @sched_tags: Tags owned by I/O scheduler. If there is an I/O 391 * scheduler associated with a request queue, a tag is assigned when 392 * that request is allocated. Else, this member is not used. 393 */ 394 struct blk_mq_tags *sched_tags; 395 396 /** @run: Number of dispatched requests. */ 397 unsigned long run; 398 399 /** @numa_node: NUMA node the storage adapter has been connected to. */ 400 unsigned int numa_node; 401 /** @queue_num: Index of this hardware queue. */ 402 unsigned int queue_num; 403 404 /** 405 * @nr_active: Number of active requests. Only used when a tag set is 406 * shared across request queues. 407 */ 408 atomic_t nr_active; 409 410 /** @cpuhp_online: List to store request if CPU is going to die */ 411 struct hlist_node cpuhp_online; 412 /** @cpuhp_dead: List to store request if some CPU die. */ 413 struct hlist_node cpuhp_dead; 414 /** @kobj: Kernel object for sysfs. */ 415 struct kobject kobj; 416 417 #ifdef CONFIG_BLK_DEBUG_FS 418 /** 419 * @debugfs_dir: debugfs directory for this hardware queue. Named 420 * as cpu<cpu_number>. 421 */ 422 struct dentry *debugfs_dir; 423 /** @sched_debugfs_dir: debugfs directory for the scheduler. */ 424 struct dentry *sched_debugfs_dir; 425 #endif 426 427 /** 428 * @hctx_list: if this hctx is not in use, this is an entry in 429 * q->unused_hctx_list. 430 */ 431 struct list_head hctx_list; 432 }; 433 434 /** 435 * struct blk_mq_queue_map - Map software queues to hardware queues 436 * @mq_map: CPU ID to hardware queue index map. This is an array 437 * with nr_cpu_ids elements. Each element has a value in the range 438 * [@queue_offset, @queue_offset + @nr_queues). 439 * @nr_queues: Number of hardware queues to map CPU IDs onto. 440 * @queue_offset: First hardware queue to map onto. Used by the PCIe NVMe 441 * driver to map each hardware queue type (enum hctx_type) onto a distinct 442 * set of hardware queues. 443 */ 444 struct blk_mq_queue_map { 445 unsigned int *mq_map; 446 unsigned int nr_queues; 447 unsigned int queue_offset; 448 }; 449 450 /** 451 * enum hctx_type - Type of hardware queue 452 * @HCTX_TYPE_DEFAULT: All I/O not otherwise accounted for. 453 * @HCTX_TYPE_READ: Just for READ I/O. 454 * @HCTX_TYPE_POLL: Polled I/O of any kind. 455 * @HCTX_MAX_TYPES: Number of types of hctx. 456 */ 457 enum hctx_type { 458 HCTX_TYPE_DEFAULT, 459 HCTX_TYPE_READ, 460 HCTX_TYPE_POLL, 461 462 HCTX_MAX_TYPES, 463 }; 464 465 /** 466 * struct blk_mq_tag_set - tag set that can be shared between request queues 467 * @ops: Pointers to functions that implement block driver behavior. 468 * @map: One or more ctx -> hctx mappings. One map exists for each 469 * hardware queue type (enum hctx_type) that the driver wishes 470 * to support. There are no restrictions on maps being of the 471 * same size, and it's perfectly legal to share maps between 472 * types. 473 * @nr_maps: Number of elements in the @map array. A number in the range 474 * [1, HCTX_MAX_TYPES]. 475 * @nr_hw_queues: Number of hardware queues supported by the block driver that 476 * owns this data structure. 477 * @queue_depth: Number of tags per hardware queue, reserved tags included. 478 * @reserved_tags: Number of tags to set aside for BLK_MQ_REQ_RESERVED tag 479 * allocations. 480 * @cmd_size: Number of additional bytes to allocate per request. The block 481 * driver owns these additional bytes. 482 * @numa_node: NUMA node the storage adapter has been connected to. 483 * @timeout: Request processing timeout in jiffies. 484 * @flags: Zero or more BLK_MQ_F_* flags. 485 * @driver_data: Pointer to data owned by the block driver that created this 486 * tag set. 487 * @tags: Tag sets. One tag set per hardware queue. Has @nr_hw_queues 488 * elements. 489 * @shared_tags: 490 * Shared set of tags. Has @nr_hw_queues elements. If set, 491 * shared by all @tags. 492 * @tag_list_lock: Serializes tag_list accesses. 493 * @tag_list: List of the request queues that use this tag set. See also 494 * request_queue.tag_set_list. 495 * @srcu: Use as lock when type of the request queue is blocking 496 * (BLK_MQ_F_BLOCKING). 497 */ 498 struct blk_mq_tag_set { 499 const struct blk_mq_ops *ops; 500 struct blk_mq_queue_map map[HCTX_MAX_TYPES]; 501 unsigned int nr_maps; 502 unsigned int nr_hw_queues; 503 unsigned int queue_depth; 504 unsigned int reserved_tags; 505 unsigned int cmd_size; 506 int numa_node; 507 unsigned int timeout; 508 unsigned int flags; 509 void *driver_data; 510 511 struct blk_mq_tags **tags; 512 513 struct blk_mq_tags *shared_tags; 514 515 struct mutex tag_list_lock; 516 struct list_head tag_list; 517 struct srcu_struct *srcu; 518 }; 519 520 /** 521 * struct blk_mq_queue_data - Data about a request inserted in a queue 522 * 523 * @rq: Request pointer. 524 * @last: If it is the last request in the queue. 525 */ 526 struct blk_mq_queue_data { 527 struct request *rq; 528 bool last; 529 }; 530 531 typedef bool (busy_tag_iter_fn)(struct request *, void *); 532 533 /** 534 * struct blk_mq_ops - Callback functions that implements block driver 535 * behaviour. 536 */ 537 struct blk_mq_ops { 538 /** 539 * @queue_rq: Queue a new request from block IO. 540 */ 541 blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *, 542 const struct blk_mq_queue_data *); 543 544 /** 545 * @commit_rqs: If a driver uses bd->last to judge when to submit 546 * requests to hardware, it must define this function. In case of errors 547 * that make us stop issuing further requests, this hook serves the 548 * purpose of kicking the hardware (which the last request otherwise 549 * would have done). 550 */ 551 void (*commit_rqs)(struct blk_mq_hw_ctx *); 552 553 /** 554 * @queue_rqs: Queue a list of new requests. Driver is guaranteed 555 * that each request belongs to the same queue. If the driver doesn't 556 * empty the @rqlist completely, then the rest will be queued 557 * individually by the block layer upon return. 558 */ 559 void (*queue_rqs)(struct request **rqlist); 560 561 /** 562 * @get_budget: Reserve budget before queue request, once .queue_rq is 563 * run, it is driver's responsibility to release the 564 * reserved budget. Also we have to handle failure case 565 * of .get_budget for avoiding I/O deadlock. 566 */ 567 int (*get_budget)(struct request_queue *); 568 569 /** 570 * @put_budget: Release the reserved budget. 571 */ 572 void (*put_budget)(struct request_queue *, int); 573 574 /** 575 * @set_rq_budget_token: store rq's budget token 576 */ 577 void (*set_rq_budget_token)(struct request *, int); 578 /** 579 * @get_rq_budget_token: retrieve rq's budget token 580 */ 581 int (*get_rq_budget_token)(struct request *); 582 583 /** 584 * @timeout: Called on request timeout. 585 */ 586 enum blk_eh_timer_return (*timeout)(struct request *); 587 588 /** 589 * @poll: Called to poll for completion of a specific tag. 590 */ 591 int (*poll)(struct blk_mq_hw_ctx *, struct io_comp_batch *); 592 593 /** 594 * @complete: Mark the request as complete. 595 */ 596 void (*complete)(struct request *); 597 598 /** 599 * @init_hctx: Called when the block layer side of a hardware queue has 600 * been set up, allowing the driver to allocate/init matching 601 * structures. 602 */ 603 int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int); 604 /** 605 * @exit_hctx: Ditto for exit/teardown. 606 */ 607 void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int); 608 609 /** 610 * @init_request: Called for every command allocated by the block layer 611 * to allow the driver to set up driver specific data. 612 * 613 * Tag greater than or equal to queue_depth is for setting up 614 * flush request. 615 */ 616 int (*init_request)(struct blk_mq_tag_set *set, struct request *, 617 unsigned int, unsigned int); 618 /** 619 * @exit_request: Ditto for exit/teardown. 620 */ 621 void (*exit_request)(struct blk_mq_tag_set *set, struct request *, 622 unsigned int); 623 624 /** 625 * @cleanup_rq: Called before freeing one request which isn't completed 626 * yet, and usually for freeing the driver private data. 627 */ 628 void (*cleanup_rq)(struct request *); 629 630 /** 631 * @busy: If set, returns whether or not this queue currently is busy. 632 */ 633 bool (*busy)(struct request_queue *); 634 635 /** 636 * @map_queues: This allows drivers specify their own queue mapping by 637 * overriding the setup-time function that builds the mq_map. 638 */ 639 void (*map_queues)(struct blk_mq_tag_set *set); 640 641 #ifdef CONFIG_BLK_DEBUG_FS 642 /** 643 * @show_rq: Used by the debugfs implementation to show driver-specific 644 * information about a request. 645 */ 646 void (*show_rq)(struct seq_file *m, struct request *rq); 647 #endif 648 }; 649 650 enum { 651 BLK_MQ_F_SHOULD_MERGE = 1 << 0, 652 BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1, 653 /* 654 * Set when this device requires underlying blk-mq device for 655 * completing IO: 656 */ 657 BLK_MQ_F_STACKING = 1 << 2, 658 BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3, 659 BLK_MQ_F_BLOCKING = 1 << 5, 660 /* Do not allow an I/O scheduler to be configured. */ 661 BLK_MQ_F_NO_SCHED = 1 << 6, 662 /* 663 * Select 'none' during queue registration in case of a single hwq 664 * or shared hwqs instead of 'mq-deadline'. 665 */ 666 BLK_MQ_F_NO_SCHED_BY_DEFAULT = 1 << 7, 667 BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, 668 BLK_MQ_F_ALLOC_POLICY_BITS = 1, 669 670 BLK_MQ_S_STOPPED = 0, 671 BLK_MQ_S_TAG_ACTIVE = 1, 672 BLK_MQ_S_SCHED_RESTART = 2, 673 674 /* hw queue is inactive after all its CPUs become offline */ 675 BLK_MQ_S_INACTIVE = 3, 676 677 BLK_MQ_MAX_DEPTH = 10240, 678 679 BLK_MQ_CPU_WORK_BATCH = 8, 680 }; 681 #define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \ 682 ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \ 683 ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) 684 #define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \ 685 ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \ 686 << BLK_MQ_F_ALLOC_POLICY_START_BIT) 687 688 #define BLK_MQ_NO_HCTX_IDX (-1U) 689 690 struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata, 691 struct lock_class_key *lkclass); 692 #define blk_mq_alloc_disk(set, queuedata) \ 693 ({ \ 694 static struct lock_class_key __key; \ 695 \ 696 __blk_mq_alloc_disk(set, queuedata, &__key); \ 697 }) 698 struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q, 699 struct lock_class_key *lkclass); 700 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); 701 int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, 702 struct request_queue *q); 703 void blk_mq_destroy_queue(struct request_queue *); 704 705 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set); 706 int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set, 707 const struct blk_mq_ops *ops, unsigned int queue_depth, 708 unsigned int set_flags); 709 void blk_mq_free_tag_set(struct blk_mq_tag_set *set); 710 711 void blk_mq_free_request(struct request *rq); 712 int blk_rq_poll(struct request *rq, struct io_comp_batch *iob, 713 unsigned int poll_flags); 714 715 bool blk_mq_queue_inflight(struct request_queue *q); 716 717 enum { 718 /* return when out of requests */ 719 BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0), 720 /* allocate from reserved pool */ 721 BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1), 722 /* set RQF_PM */ 723 BLK_MQ_REQ_PM = (__force blk_mq_req_flags_t)(1 << 2), 724 }; 725 726 struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf, 727 blk_mq_req_flags_t flags); 728 struct request *blk_mq_alloc_request_hctx(struct request_queue *q, 729 blk_opf_t opf, blk_mq_req_flags_t flags, 730 unsigned int hctx_idx); 731 732 /* 733 * Tag address space map. 734 */ 735 struct blk_mq_tags { 736 unsigned int nr_tags; 737 unsigned int nr_reserved_tags; 738 unsigned int active_queues; 739 740 struct sbitmap_queue bitmap_tags; 741 struct sbitmap_queue breserved_tags; 742 743 struct request **rqs; 744 struct request **static_rqs; 745 struct list_head page_list; 746 747 /* 748 * used to clear request reference in rqs[] before freeing one 749 * request pool 750 */ 751 spinlock_t lock; 752 }; 753 754 static inline struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, 755 unsigned int tag) 756 { 757 if (tag < tags->nr_tags) { 758 prefetch(tags->rqs[tag]); 759 return tags->rqs[tag]; 760 } 761 762 return NULL; 763 } 764 765 enum { 766 BLK_MQ_UNIQUE_TAG_BITS = 16, 767 BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1, 768 }; 769 770 u32 blk_mq_unique_tag(struct request *rq); 771 772 static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag) 773 { 774 return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS; 775 } 776 777 static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) 778 { 779 return unique_tag & BLK_MQ_UNIQUE_TAG_MASK; 780 } 781 782 /** 783 * blk_mq_rq_state() - read the current MQ_RQ_* state of a request 784 * @rq: target request. 785 */ 786 static inline enum mq_rq_state blk_mq_rq_state(struct request *rq) 787 { 788 return READ_ONCE(rq->state); 789 } 790 791 static inline int blk_mq_request_started(struct request *rq) 792 { 793 return blk_mq_rq_state(rq) != MQ_RQ_IDLE; 794 } 795 796 static inline int blk_mq_request_completed(struct request *rq) 797 { 798 return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE; 799 } 800 801 /* 802 * 803 * Set the state to complete when completing a request from inside ->queue_rq. 804 * This is used by drivers that want to ensure special complete actions that 805 * need access to the request are called on failure, e.g. by nvme for 806 * multipathing. 807 */ 808 static inline void blk_mq_set_request_complete(struct request *rq) 809 { 810 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); 811 } 812 813 /* 814 * Complete the request directly instead of deferring it to softirq or 815 * completing it another CPU. Useful in preemptible instead of an interrupt. 816 */ 817 static inline void blk_mq_complete_request_direct(struct request *rq, 818 void (*complete)(struct request *rq)) 819 { 820 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); 821 complete(rq); 822 } 823 824 void blk_mq_start_request(struct request *rq); 825 void blk_mq_end_request(struct request *rq, blk_status_t error); 826 void __blk_mq_end_request(struct request *rq, blk_status_t error); 827 void blk_mq_end_request_batch(struct io_comp_batch *ib); 828 829 /* 830 * Only need start/end time stamping if we have iostat or 831 * blk stats enabled, or using an IO scheduler. 832 */ 833 static inline bool blk_mq_need_time_stamp(struct request *rq) 834 { 835 return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_USE_SCHED)); 836 } 837 838 static inline bool blk_mq_is_reserved_rq(struct request *rq) 839 { 840 return rq->rq_flags & RQF_RESV; 841 } 842 843 /* 844 * Batched completions only work when there is no I/O error and no special 845 * ->end_io handler. 846 */ 847 static inline bool blk_mq_add_to_batch(struct request *req, 848 struct io_comp_batch *iob, int ioerror, 849 void (*complete)(struct io_comp_batch *)) 850 { 851 /* 852 * blk_mq_end_request_batch() can't end request allocated from 853 * sched tags 854 */ 855 if (!iob || (req->rq_flags & RQF_SCHED_TAGS) || ioerror || 856 (req->end_io && !blk_rq_is_passthrough(req))) 857 return false; 858 859 if (!iob->complete) 860 iob->complete = complete; 861 else if (iob->complete != complete) 862 return false; 863 iob->need_ts |= blk_mq_need_time_stamp(req); 864 rq_list_add(&iob->req_list, req); 865 return true; 866 } 867 868 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list); 869 void blk_mq_kick_requeue_list(struct request_queue *q); 870 void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); 871 void blk_mq_complete_request(struct request *rq); 872 bool blk_mq_complete_request_remote(struct request *rq); 873 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); 874 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); 875 void blk_mq_stop_hw_queues(struct request_queue *q); 876 void blk_mq_start_hw_queues(struct request_queue *q); 877 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); 878 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); 879 void blk_mq_quiesce_queue(struct request_queue *q); 880 void blk_mq_wait_quiesce_done(struct blk_mq_tag_set *set); 881 void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set); 882 void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set); 883 void blk_mq_unquiesce_queue(struct request_queue *q); 884 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); 885 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); 886 void blk_mq_run_hw_queues(struct request_queue *q, bool async); 887 void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs); 888 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, 889 busy_tag_iter_fn *fn, void *priv); 890 void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset); 891 void blk_mq_freeze_queue(struct request_queue *q); 892 void blk_mq_unfreeze_queue(struct request_queue *q); 893 void blk_freeze_queue_start(struct request_queue *q); 894 void blk_mq_freeze_queue_wait(struct request_queue *q); 895 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, 896 unsigned long timeout); 897 898 void blk_mq_map_queues(struct blk_mq_queue_map *qmap); 899 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues); 900 901 void blk_mq_quiesce_queue_nowait(struct request_queue *q); 902 903 unsigned int blk_mq_rq_cpu(struct request *rq); 904 905 bool __blk_should_fake_timeout(struct request_queue *q); 906 static inline bool blk_should_fake_timeout(struct request_queue *q) 907 { 908 if (IS_ENABLED(CONFIG_FAIL_IO_TIMEOUT) && 909 test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags)) 910 return __blk_should_fake_timeout(q); 911 return false; 912 } 913 914 /** 915 * blk_mq_rq_from_pdu - cast a PDU to a request 916 * @pdu: the PDU (Protocol Data Unit) to be casted 917 * 918 * Return: request 919 * 920 * Driver command data is immediately after the request. So subtract request 921 * size to get back to the original request. 922 */ 923 static inline struct request *blk_mq_rq_from_pdu(void *pdu) 924 { 925 return pdu - sizeof(struct request); 926 } 927 928 /** 929 * blk_mq_rq_to_pdu - cast a request to a PDU 930 * @rq: the request to be casted 931 * 932 * Return: pointer to the PDU 933 * 934 * Driver command data is immediately after the request. So add request to get 935 * the PDU. 936 */ 937 static inline void *blk_mq_rq_to_pdu(struct request *rq) 938 { 939 return rq + 1; 940 } 941 942 #define queue_for_each_hw_ctx(q, hctx, i) \ 943 xa_for_each(&(q)->hctx_table, (i), (hctx)) 944 945 #define hctx_for_each_ctx(hctx, ctx, i) \ 946 for ((i) = 0; (i) < (hctx)->nr_ctx && \ 947 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++) 948 949 static inline void blk_mq_cleanup_rq(struct request *rq) 950 { 951 if (rq->q->mq_ops->cleanup_rq) 952 rq->q->mq_ops->cleanup_rq(rq); 953 } 954 955 static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio, 956 unsigned int nr_segs) 957 { 958 rq->nr_phys_segments = nr_segs; 959 rq->__data_len = bio->bi_iter.bi_size; 960 rq->bio = rq->biotail = bio; 961 rq->ioprio = bio_prio(bio); 962 } 963 964 void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx, 965 struct lock_class_key *key); 966 967 static inline bool rq_is_sync(struct request *rq) 968 { 969 return op_is_sync(rq->cmd_flags); 970 } 971 972 void blk_rq_init(struct request_queue *q, struct request *rq); 973 int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 974 struct bio_set *bs, gfp_t gfp_mask, 975 int (*bio_ctr)(struct bio *, struct bio *, void *), void *data); 976 void blk_rq_unprep_clone(struct request *rq); 977 blk_status_t blk_insert_cloned_request(struct request *rq); 978 979 struct rq_map_data { 980 struct page **pages; 981 unsigned long offset; 982 unsigned short page_order; 983 unsigned short nr_entries; 984 bool null_mapped; 985 bool from_user; 986 }; 987 988 int blk_rq_map_user(struct request_queue *, struct request *, 989 struct rq_map_data *, void __user *, unsigned long, gfp_t); 990 int blk_rq_map_user_io(struct request *, struct rq_map_data *, 991 void __user *, unsigned long, gfp_t, bool, int, bool, int); 992 int blk_rq_map_user_iov(struct request_queue *, struct request *, 993 struct rq_map_data *, const struct iov_iter *, gfp_t); 994 int blk_rq_unmap_user(struct bio *); 995 int blk_rq_map_kern(struct request_queue *, struct request *, void *, 996 unsigned int, gfp_t); 997 int blk_rq_append_bio(struct request *rq, struct bio *bio); 998 void blk_execute_rq_nowait(struct request *rq, bool at_head); 999 blk_status_t blk_execute_rq(struct request *rq, bool at_head); 1000 bool blk_rq_is_poll(struct request *rq); 1001 1002 struct req_iterator { 1003 struct bvec_iter iter; 1004 struct bio *bio; 1005 }; 1006 1007 #define __rq_for_each_bio(_bio, rq) \ 1008 if ((rq->bio)) \ 1009 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) 1010 1011 #define rq_for_each_segment(bvl, _rq, _iter) \ 1012 __rq_for_each_bio(_iter.bio, _rq) \ 1013 bio_for_each_segment(bvl, _iter.bio, _iter.iter) 1014 1015 #define rq_for_each_bvec(bvl, _rq, _iter) \ 1016 __rq_for_each_bio(_iter.bio, _rq) \ 1017 bio_for_each_bvec(bvl, _iter.bio, _iter.iter) 1018 1019 #define rq_iter_last(bvec, _iter) \ 1020 (_iter.bio->bi_next == NULL && \ 1021 bio_iter_last(bvec, _iter.iter)) 1022 1023 /* 1024 * blk_rq_pos() : the current sector 1025 * blk_rq_bytes() : bytes left in the entire request 1026 * blk_rq_cur_bytes() : bytes left in the current segment 1027 * blk_rq_sectors() : sectors left in the entire request 1028 * blk_rq_cur_sectors() : sectors left in the current segment 1029 * blk_rq_stats_sectors() : sectors of the entire request used for stats 1030 */ 1031 static inline sector_t blk_rq_pos(const struct request *rq) 1032 { 1033 return rq->__sector; 1034 } 1035 1036 static inline unsigned int blk_rq_bytes(const struct request *rq) 1037 { 1038 return rq->__data_len; 1039 } 1040 1041 static inline int blk_rq_cur_bytes(const struct request *rq) 1042 { 1043 if (!rq->bio) 1044 return 0; 1045 if (!bio_has_data(rq->bio)) /* dataless requests such as discard */ 1046 return rq->bio->bi_iter.bi_size; 1047 return bio_iovec(rq->bio).bv_len; 1048 } 1049 1050 static inline unsigned int blk_rq_sectors(const struct request *rq) 1051 { 1052 return blk_rq_bytes(rq) >> SECTOR_SHIFT; 1053 } 1054 1055 static inline unsigned int blk_rq_cur_sectors(const struct request *rq) 1056 { 1057 return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT; 1058 } 1059 1060 static inline unsigned int blk_rq_stats_sectors(const struct request *rq) 1061 { 1062 return rq->stats_sectors; 1063 } 1064 1065 /* 1066 * Some commands like WRITE SAME have a payload or data transfer size which 1067 * is different from the size of the request. Any driver that supports such 1068 * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to 1069 * calculate the data transfer size. 1070 */ 1071 static inline unsigned int blk_rq_payload_bytes(struct request *rq) 1072 { 1073 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) 1074 return rq->special_vec.bv_len; 1075 return blk_rq_bytes(rq); 1076 } 1077 1078 /* 1079 * Return the first full biovec in the request. The caller needs to check that 1080 * there are any bvecs before calling this helper. 1081 */ 1082 static inline struct bio_vec req_bvec(struct request *rq) 1083 { 1084 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) 1085 return rq->special_vec; 1086 return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter); 1087 } 1088 1089 static inline unsigned int blk_rq_count_bios(struct request *rq) 1090 { 1091 unsigned int nr_bios = 0; 1092 struct bio *bio; 1093 1094 __rq_for_each_bio(bio, rq) 1095 nr_bios++; 1096 1097 return nr_bios; 1098 } 1099 1100 void blk_steal_bios(struct bio_list *list, struct request *rq); 1101 1102 /* 1103 * Request completion related functions. 1104 * 1105 * blk_update_request() completes given number of bytes and updates 1106 * the request without completing it. 1107 */ 1108 bool blk_update_request(struct request *rq, blk_status_t error, 1109 unsigned int nr_bytes); 1110 void blk_abort_request(struct request *); 1111 1112 /* 1113 * Number of physical segments as sent to the device. 1114 * 1115 * Normally this is the number of discontiguous data segments sent by the 1116 * submitter. But for data-less command like discard we might have no 1117 * actual data segments submitted, but the driver might have to add it's 1118 * own special payload. In that case we still return 1 here so that this 1119 * special payload will be mapped. 1120 */ 1121 static inline unsigned short blk_rq_nr_phys_segments(struct request *rq) 1122 { 1123 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) 1124 return 1; 1125 return rq->nr_phys_segments; 1126 } 1127 1128 /* 1129 * Number of discard segments (or ranges) the driver needs to fill in. 1130 * Each discard bio merged into a request is counted as one segment. 1131 */ 1132 static inline unsigned short blk_rq_nr_discard_segments(struct request *rq) 1133 { 1134 return max_t(unsigned short, rq->nr_phys_segments, 1); 1135 } 1136 1137 int __blk_rq_map_sg(struct request_queue *q, struct request *rq, 1138 struct scatterlist *sglist, struct scatterlist **last_sg); 1139 static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq, 1140 struct scatterlist *sglist) 1141 { 1142 struct scatterlist *last_sg = NULL; 1143 1144 return __blk_rq_map_sg(q, rq, sglist, &last_sg); 1145 } 1146 void blk_dump_rq_flags(struct request *, char *); 1147 1148 #ifdef CONFIG_BLK_DEV_ZONED 1149 static inline unsigned int blk_rq_zone_no(struct request *rq) 1150 { 1151 return disk_zone_no(rq->q->disk, blk_rq_pos(rq)); 1152 } 1153 1154 static inline unsigned int blk_rq_zone_is_seq(struct request *rq) 1155 { 1156 return disk_zone_is_seq(rq->q->disk, blk_rq_pos(rq)); 1157 } 1158 1159 /** 1160 * blk_rq_is_seq_zoned_write() - Check if @rq requires write serialization. 1161 * @rq: Request to examine. 1162 * 1163 * Note: REQ_OP_ZONE_APPEND requests do not require serialization. 1164 */ 1165 static inline bool blk_rq_is_seq_zoned_write(struct request *rq) 1166 { 1167 return op_needs_zoned_write_locking(req_op(rq)) && 1168 blk_rq_zone_is_seq(rq); 1169 } 1170 1171 bool blk_req_needs_zone_write_lock(struct request *rq); 1172 bool blk_req_zone_write_trylock(struct request *rq); 1173 void __blk_req_zone_write_lock(struct request *rq); 1174 void __blk_req_zone_write_unlock(struct request *rq); 1175 1176 static inline void blk_req_zone_write_lock(struct request *rq) 1177 { 1178 if (blk_req_needs_zone_write_lock(rq)) 1179 __blk_req_zone_write_lock(rq); 1180 } 1181 1182 static inline void blk_req_zone_write_unlock(struct request *rq) 1183 { 1184 if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED) 1185 __blk_req_zone_write_unlock(rq); 1186 } 1187 1188 static inline bool blk_req_zone_is_write_locked(struct request *rq) 1189 { 1190 return rq->q->disk->seq_zones_wlock && 1191 test_bit(blk_rq_zone_no(rq), rq->q->disk->seq_zones_wlock); 1192 } 1193 1194 static inline bool blk_req_can_dispatch_to_zone(struct request *rq) 1195 { 1196 if (!blk_req_needs_zone_write_lock(rq)) 1197 return true; 1198 return !blk_req_zone_is_write_locked(rq); 1199 } 1200 #else /* CONFIG_BLK_DEV_ZONED */ 1201 static inline bool blk_rq_is_seq_zoned_write(struct request *rq) 1202 { 1203 return false; 1204 } 1205 1206 static inline bool blk_req_needs_zone_write_lock(struct request *rq) 1207 { 1208 return false; 1209 } 1210 1211 static inline void blk_req_zone_write_lock(struct request *rq) 1212 { 1213 } 1214 1215 static inline void blk_req_zone_write_unlock(struct request *rq) 1216 { 1217 } 1218 static inline bool blk_req_zone_is_write_locked(struct request *rq) 1219 { 1220 return false; 1221 } 1222 1223 static inline bool blk_req_can_dispatch_to_zone(struct request *rq) 1224 { 1225 return true; 1226 } 1227 #endif /* CONFIG_BLK_DEV_ZONED */ 1228 1229 #endif /* BLK_MQ_H */ 1230