xref: /linux-6.15/include/linux/blk-mq.h (revision fc4eb486)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef BLK_MQ_H
3 #define BLK_MQ_H
4 
5 #include <linux/blkdev.h>
6 #include <linux/sbitmap.h>
7 #include <linux/lockdep.h>
8 #include <linux/scatterlist.h>
9 #include <linux/prefetch.h>
10 
11 struct blk_mq_tags;
12 struct blk_flush_queue;
13 
14 #define BLKDEV_MIN_RQ	4
15 #define BLKDEV_DEFAULT_RQ	128
16 
17 typedef void (rq_end_io_fn)(struct request *, blk_status_t);
18 
19 /*
20  * request flags */
21 typedef __u32 __bitwise req_flags_t;
22 
23 /* drive already may have started this one */
24 #define RQF_STARTED		((__force req_flags_t)(1 << 1))
25 /* may not be passed by ioscheduler */
26 #define RQF_SOFTBARRIER		((__force req_flags_t)(1 << 3))
27 /* request for flush sequence */
28 #define RQF_FLUSH_SEQ		((__force req_flags_t)(1 << 4))
29 /* merge of different types, fail separately */
30 #define RQF_MIXED_MERGE		((__force req_flags_t)(1 << 5))
31 /* track inflight for MQ */
32 #define RQF_MQ_INFLIGHT		((__force req_flags_t)(1 << 6))
33 /* don't call prep for this one */
34 #define RQF_DONTPREP		((__force req_flags_t)(1 << 7))
35 /* vaguely specified driver internal error.  Ignored by the block layer */
36 #define RQF_FAILED		((__force req_flags_t)(1 << 10))
37 /* don't warn about errors */
38 #define RQF_QUIET		((__force req_flags_t)(1 << 11))
39 /* elevator private data attached */
40 #define RQF_ELVPRIV		((__force req_flags_t)(1 << 12))
41 /* account into disk and partition IO statistics */
42 #define RQF_IO_STAT		((__force req_flags_t)(1 << 13))
43 /* runtime pm request */
44 #define RQF_PM			((__force req_flags_t)(1 << 15))
45 /* on IO scheduler merge hash */
46 #define RQF_HASHED		((__force req_flags_t)(1 << 16))
47 /* track IO completion time */
48 #define RQF_STATS		((__force req_flags_t)(1 << 17))
49 /* Look at ->special_vec for the actual data payload instead of the
50    bio chain. */
51 #define RQF_SPECIAL_PAYLOAD	((__force req_flags_t)(1 << 18))
52 /* The per-zone write lock is held for this request */
53 #define RQF_ZONE_WRITE_LOCKED	((__force req_flags_t)(1 << 19))
54 /* already slept for hybrid poll */
55 #define RQF_MQ_POLL_SLEPT	((__force req_flags_t)(1 << 20))
56 /* ->timeout has been called, don't expire again */
57 #define RQF_TIMED_OUT		((__force req_flags_t)(1 << 21))
58 /* queue has elevator attached */
59 #define RQF_ELV			((__force req_flags_t)(1 << 22))
60 
61 /* flags that prevent us from merging requests: */
62 #define RQF_NOMERGE_FLAGS \
63 	(RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
64 
65 enum mq_rq_state {
66 	MQ_RQ_IDLE		= 0,
67 	MQ_RQ_IN_FLIGHT		= 1,
68 	MQ_RQ_COMPLETE		= 2,
69 };
70 
71 /*
72  * Try to put the fields that are referenced together in the same cacheline.
73  *
74  * If you modify this structure, make sure to update blk_rq_init() and
75  * especially blk_mq_rq_ctx_init() to take care of the added fields.
76  */
77 struct request {
78 	struct request_queue *q;
79 	struct blk_mq_ctx *mq_ctx;
80 	struct blk_mq_hw_ctx *mq_hctx;
81 
82 	unsigned int cmd_flags;		/* op and common flags */
83 	req_flags_t rq_flags;
84 
85 	int tag;
86 	int internal_tag;
87 
88 	unsigned int timeout;
89 
90 	/* the following two fields are internal, NEVER access directly */
91 	unsigned int __data_len;	/* total data len */
92 	sector_t __sector;		/* sector cursor */
93 
94 	struct bio *bio;
95 	struct bio *biotail;
96 
97 	union {
98 		struct list_head queuelist;
99 		struct request *rq_next;
100 	};
101 
102 	struct block_device *part;
103 #ifdef CONFIG_BLK_RQ_ALLOC_TIME
104 	/* Time that the first bio started allocating this request. */
105 	u64 alloc_time_ns;
106 #endif
107 	/* Time that this request was allocated for this IO. */
108 	u64 start_time_ns;
109 	/* Time that I/O was submitted to the device. */
110 	u64 io_start_time_ns;
111 
112 #ifdef CONFIG_BLK_WBT
113 	unsigned short wbt_flags;
114 #endif
115 	/*
116 	 * rq sectors used for blk stats. It has the same value
117 	 * with blk_rq_sectors(rq), except that it never be zeroed
118 	 * by completion.
119 	 */
120 	unsigned short stats_sectors;
121 
122 	/*
123 	 * Number of scatter-gather DMA addr+len pairs after
124 	 * physical address coalescing is performed.
125 	 */
126 	unsigned short nr_phys_segments;
127 
128 #ifdef CONFIG_BLK_DEV_INTEGRITY
129 	unsigned short nr_integrity_segments;
130 #endif
131 
132 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
133 	struct bio_crypt_ctx *crypt_ctx;
134 	struct blk_crypto_keyslot *crypt_keyslot;
135 #endif
136 
137 	unsigned short write_hint;
138 	unsigned short ioprio;
139 
140 	enum mq_rq_state state;
141 	atomic_t ref;
142 
143 	unsigned long deadline;
144 
145 	/*
146 	 * The hash is used inside the scheduler, and killed once the
147 	 * request reaches the dispatch list. The ipi_list is only used
148 	 * to queue the request for softirq completion, which is long
149 	 * after the request has been unhashed (and even removed from
150 	 * the dispatch list).
151 	 */
152 	union {
153 		struct hlist_node hash;	/* merge hash */
154 		struct llist_node ipi_list;
155 	};
156 
157 	/*
158 	 * The rb_node is only used inside the io scheduler, requests
159 	 * are pruned when moved to the dispatch queue. So let the
160 	 * completion_data share space with the rb_node.
161 	 */
162 	union {
163 		struct rb_node rb_node;	/* sort/lookup */
164 		struct bio_vec special_vec;
165 		void *completion_data;
166 		int error_count; /* for legacy drivers, don't use */
167 	};
168 
169 
170 	/*
171 	 * Three pointers are available for the IO schedulers, if they need
172 	 * more they have to dynamically allocate it.  Flush requests are
173 	 * never put on the IO scheduler. So let the flush fields share
174 	 * space with the elevator data.
175 	 */
176 	union {
177 		struct {
178 			struct io_cq		*icq;
179 			void			*priv[2];
180 		} elv;
181 
182 		struct {
183 			unsigned int		seq;
184 			struct list_head	list;
185 			rq_end_io_fn		*saved_end_io;
186 		} flush;
187 	};
188 
189 	union {
190 		struct __call_single_data csd;
191 		u64 fifo_time;
192 	};
193 
194 	/*
195 	 * completion callback.
196 	 */
197 	rq_end_io_fn *end_io;
198 	void *end_io_data;
199 };
200 
201 #define req_op(req) \
202 	((req)->cmd_flags & REQ_OP_MASK)
203 
204 static inline bool blk_rq_is_passthrough(struct request *rq)
205 {
206 	return blk_op_is_passthrough(req_op(rq));
207 }
208 
209 static inline unsigned short req_get_ioprio(struct request *req)
210 {
211 	return req->ioprio;
212 }
213 
214 #define rq_data_dir(rq)		(op_is_write(req_op(rq)) ? WRITE : READ)
215 
216 #define rq_dma_dir(rq) \
217 	(op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
218 
219 #define rq_list_add(listptr, rq)	do {		\
220 	(rq)->rq_next = *(listptr);			\
221 	*(listptr) = rq;				\
222 } while (0)
223 
224 #define rq_list_pop(listptr)				\
225 ({							\
226 	struct request *__req = NULL;			\
227 	if ((listptr) && *(listptr))	{		\
228 		__req = *(listptr);			\
229 		*(listptr) = __req->rq_next;		\
230 	}						\
231 	__req;						\
232 })
233 
234 #define rq_list_peek(listptr)				\
235 ({							\
236 	struct request *__req = NULL;			\
237 	if ((listptr) && *(listptr))			\
238 		__req = *(listptr);			\
239 	__req;						\
240 })
241 
242 #define rq_list_for_each(listptr, pos)			\
243 	for (pos = rq_list_peek((listptr)); pos; pos = rq_list_next(pos))
244 
245 #define rq_list_for_each_safe(listptr, pos, nxt)			\
246 	for (pos = rq_list_peek((listptr)), nxt = rq_list_next(pos);	\
247 		pos; pos = nxt, nxt = pos ? rq_list_next(pos) : NULL)
248 
249 #define rq_list_next(rq)	(rq)->rq_next
250 #define rq_list_empty(list)	((list) == (struct request *) NULL)
251 
252 /**
253  * rq_list_move() - move a struct request from one list to another
254  * @src: The source list @rq is currently in
255  * @dst: The destination list that @rq will be appended to
256  * @rq: The request to move
257  * @prev: The request preceding @rq in @src (NULL if @rq is the head)
258  */
259 static inline void rq_list_move(struct request **src, struct request **dst,
260 				struct request *rq, struct request *prev)
261 {
262 	if (prev)
263 		prev->rq_next = rq->rq_next;
264 	else
265 		*src = rq->rq_next;
266 	rq_list_add(dst, rq);
267 }
268 
269 enum blk_eh_timer_return {
270 	BLK_EH_DONE,		/* drivers has completed the command */
271 	BLK_EH_RESET_TIMER,	/* reset timer and try again */
272 };
273 
274 #define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
275 #define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
276 
277 /**
278  * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware
279  * block device
280  */
281 struct blk_mq_hw_ctx {
282 	struct {
283 		/** @lock: Protects the dispatch list. */
284 		spinlock_t		lock;
285 		/**
286 		 * @dispatch: Used for requests that are ready to be
287 		 * dispatched to the hardware but for some reason (e.g. lack of
288 		 * resources) could not be sent to the hardware. As soon as the
289 		 * driver can send new requests, requests at this list will
290 		 * be sent first for a fairer dispatch.
291 		 */
292 		struct list_head	dispatch;
293 		 /**
294 		  * @state: BLK_MQ_S_* flags. Defines the state of the hw
295 		  * queue (active, scheduled to restart, stopped).
296 		  */
297 		unsigned long		state;
298 	} ____cacheline_aligned_in_smp;
299 
300 	/**
301 	 * @run_work: Used for scheduling a hardware queue run at a later time.
302 	 */
303 	struct delayed_work	run_work;
304 	/** @cpumask: Map of available CPUs where this hctx can run. */
305 	cpumask_var_t		cpumask;
306 	/**
307 	 * @next_cpu: Used by blk_mq_hctx_next_cpu() for round-robin CPU
308 	 * selection from @cpumask.
309 	 */
310 	int			next_cpu;
311 	/**
312 	 * @next_cpu_batch: Counter of how many works left in the batch before
313 	 * changing to the next CPU.
314 	 */
315 	int			next_cpu_batch;
316 
317 	/** @flags: BLK_MQ_F_* flags. Defines the behaviour of the queue. */
318 	unsigned long		flags;
319 
320 	/**
321 	 * @sched_data: Pointer owned by the IO scheduler attached to a request
322 	 * queue. It's up to the IO scheduler how to use this pointer.
323 	 */
324 	void			*sched_data;
325 	/**
326 	 * @queue: Pointer to the request queue that owns this hardware context.
327 	 */
328 	struct request_queue	*queue;
329 	/** @fq: Queue of requests that need to perform a flush operation. */
330 	struct blk_flush_queue	*fq;
331 
332 	/**
333 	 * @driver_data: Pointer to data owned by the block driver that created
334 	 * this hctx
335 	 */
336 	void			*driver_data;
337 
338 	/**
339 	 * @ctx_map: Bitmap for each software queue. If bit is on, there is a
340 	 * pending request in that software queue.
341 	 */
342 	struct sbitmap		ctx_map;
343 
344 	/**
345 	 * @dispatch_from: Software queue to be used when no scheduler was
346 	 * selected.
347 	 */
348 	struct blk_mq_ctx	*dispatch_from;
349 	/**
350 	 * @dispatch_busy: Number used by blk_mq_update_dispatch_busy() to
351 	 * decide if the hw_queue is busy using Exponential Weighted Moving
352 	 * Average algorithm.
353 	 */
354 	unsigned int		dispatch_busy;
355 
356 	/** @type: HCTX_TYPE_* flags. Type of hardware queue. */
357 	unsigned short		type;
358 	/** @nr_ctx: Number of software queues. */
359 	unsigned short		nr_ctx;
360 	/** @ctxs: Array of software queues. */
361 	struct blk_mq_ctx	**ctxs;
362 
363 	/** @dispatch_wait_lock: Lock for dispatch_wait queue. */
364 	spinlock_t		dispatch_wait_lock;
365 	/**
366 	 * @dispatch_wait: Waitqueue to put requests when there is no tag
367 	 * available at the moment, to wait for another try in the future.
368 	 */
369 	wait_queue_entry_t	dispatch_wait;
370 
371 	/**
372 	 * @wait_index: Index of next available dispatch_wait queue to insert
373 	 * requests.
374 	 */
375 	atomic_t		wait_index;
376 
377 	/**
378 	 * @tags: Tags owned by the block driver. A tag at this set is only
379 	 * assigned when a request is dispatched from a hardware queue.
380 	 */
381 	struct blk_mq_tags	*tags;
382 	/**
383 	 * @sched_tags: Tags owned by I/O scheduler. If there is an I/O
384 	 * scheduler associated with a request queue, a tag is assigned when
385 	 * that request is allocated. Else, this member is not used.
386 	 */
387 	struct blk_mq_tags	*sched_tags;
388 
389 	/** @queued: Number of queued requests. */
390 	unsigned long		queued;
391 	/** @run: Number of dispatched requests. */
392 	unsigned long		run;
393 
394 	/** @numa_node: NUMA node the storage adapter has been connected to. */
395 	unsigned int		numa_node;
396 	/** @queue_num: Index of this hardware queue. */
397 	unsigned int		queue_num;
398 
399 	/**
400 	 * @nr_active: Number of active requests. Only used when a tag set is
401 	 * shared across request queues.
402 	 */
403 	atomic_t		nr_active;
404 
405 	/** @cpuhp_online: List to store request if CPU is going to die */
406 	struct hlist_node	cpuhp_online;
407 	/** @cpuhp_dead: List to store request if some CPU die. */
408 	struct hlist_node	cpuhp_dead;
409 	/** @kobj: Kernel object for sysfs. */
410 	struct kobject		kobj;
411 
412 #ifdef CONFIG_BLK_DEBUG_FS
413 	/**
414 	 * @debugfs_dir: debugfs directory for this hardware queue. Named
415 	 * as cpu<cpu_number>.
416 	 */
417 	struct dentry		*debugfs_dir;
418 	/** @sched_debugfs_dir:	debugfs directory for the scheduler. */
419 	struct dentry		*sched_debugfs_dir;
420 #endif
421 
422 	/**
423 	 * @hctx_list: if this hctx is not in use, this is an entry in
424 	 * q->unused_hctx_list.
425 	 */
426 	struct list_head	hctx_list;
427 };
428 
429 /**
430  * struct blk_mq_queue_map - Map software queues to hardware queues
431  * @mq_map:       CPU ID to hardware queue index map. This is an array
432  *	with nr_cpu_ids elements. Each element has a value in the range
433  *	[@queue_offset, @queue_offset + @nr_queues).
434  * @nr_queues:    Number of hardware queues to map CPU IDs onto.
435  * @queue_offset: First hardware queue to map onto. Used by the PCIe NVMe
436  *	driver to map each hardware queue type (enum hctx_type) onto a distinct
437  *	set of hardware queues.
438  */
439 struct blk_mq_queue_map {
440 	unsigned int *mq_map;
441 	unsigned int nr_queues;
442 	unsigned int queue_offset;
443 };
444 
445 /**
446  * enum hctx_type - Type of hardware queue
447  * @HCTX_TYPE_DEFAULT:	All I/O not otherwise accounted for.
448  * @HCTX_TYPE_READ:	Just for READ I/O.
449  * @HCTX_TYPE_POLL:	Polled I/O of any kind.
450  * @HCTX_MAX_TYPES:	Number of types of hctx.
451  */
452 enum hctx_type {
453 	HCTX_TYPE_DEFAULT,
454 	HCTX_TYPE_READ,
455 	HCTX_TYPE_POLL,
456 
457 	HCTX_MAX_TYPES,
458 };
459 
460 /**
461  * struct blk_mq_tag_set - tag set that can be shared between request queues
462  * @map:	   One or more ctx -> hctx mappings. One map exists for each
463  *		   hardware queue type (enum hctx_type) that the driver wishes
464  *		   to support. There are no restrictions on maps being of the
465  *		   same size, and it's perfectly legal to share maps between
466  *		   types.
467  * @nr_maps:	   Number of elements in the @map array. A number in the range
468  *		   [1, HCTX_MAX_TYPES].
469  * @ops:	   Pointers to functions that implement block driver behavior.
470  * @nr_hw_queues:  Number of hardware queues supported by the block driver that
471  *		   owns this data structure.
472  * @queue_depth:   Number of tags per hardware queue, reserved tags included.
473  * @reserved_tags: Number of tags to set aside for BLK_MQ_REQ_RESERVED tag
474  *		   allocations.
475  * @cmd_size:	   Number of additional bytes to allocate per request. The block
476  *		   driver owns these additional bytes.
477  * @numa_node:	   NUMA node the storage adapter has been connected to.
478  * @timeout:	   Request processing timeout in jiffies.
479  * @flags:	   Zero or more BLK_MQ_F_* flags.
480  * @driver_data:   Pointer to data owned by the block driver that created this
481  *		   tag set.
482  * @tags:	   Tag sets. One tag set per hardware queue. Has @nr_hw_queues
483  *		   elements.
484  * @shared_tags:
485  *		   Shared set of tags. Has @nr_hw_queues elements. If set,
486  *		   shared by all @tags.
487  * @tag_list_lock: Serializes tag_list accesses.
488  * @tag_list:	   List of the request queues that use this tag set. See also
489  *		   request_queue.tag_set_list.
490  */
491 struct blk_mq_tag_set {
492 	struct blk_mq_queue_map	map[HCTX_MAX_TYPES];
493 	unsigned int		nr_maps;
494 	const struct blk_mq_ops	*ops;
495 	unsigned int		nr_hw_queues;
496 	unsigned int		queue_depth;
497 	unsigned int		reserved_tags;
498 	unsigned int		cmd_size;
499 	int			numa_node;
500 	unsigned int		timeout;
501 	unsigned int		flags;
502 	void			*driver_data;
503 
504 	struct blk_mq_tags	**tags;
505 
506 	struct blk_mq_tags	*shared_tags;
507 
508 	struct mutex		tag_list_lock;
509 	struct list_head	tag_list;
510 };
511 
512 /**
513  * struct blk_mq_queue_data - Data about a request inserted in a queue
514  *
515  * @rq:   Request pointer.
516  * @last: If it is the last request in the queue.
517  */
518 struct blk_mq_queue_data {
519 	struct request *rq;
520 	bool last;
521 };
522 
523 typedef bool (busy_tag_iter_fn)(struct request *, void *, bool);
524 
525 /**
526  * struct blk_mq_ops - Callback functions that implements block driver
527  * behaviour.
528  */
529 struct blk_mq_ops {
530 	/**
531 	 * @queue_rq: Queue a new request from block IO.
532 	 */
533 	blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *,
534 				 const struct blk_mq_queue_data *);
535 
536 	/**
537 	 * @commit_rqs: If a driver uses bd->last to judge when to submit
538 	 * requests to hardware, it must define this function. In case of errors
539 	 * that make us stop issuing further requests, this hook serves the
540 	 * purpose of kicking the hardware (which the last request otherwise
541 	 * would have done).
542 	 */
543 	void (*commit_rqs)(struct blk_mq_hw_ctx *);
544 
545 	/**
546 	 * @queue_rqs: Queue a list of new requests. Driver is guaranteed
547 	 * that each request belongs to the same queue. If the driver doesn't
548 	 * empty the @rqlist completely, then the rest will be queued
549 	 * individually by the block layer upon return.
550 	 */
551 	void (*queue_rqs)(struct request **rqlist);
552 
553 	/**
554 	 * @get_budget: Reserve budget before queue request, once .queue_rq is
555 	 * run, it is driver's responsibility to release the
556 	 * reserved budget. Also we have to handle failure case
557 	 * of .get_budget for avoiding I/O deadlock.
558 	 */
559 	int (*get_budget)(struct request_queue *);
560 
561 	/**
562 	 * @put_budget: Release the reserved budget.
563 	 */
564 	void (*put_budget)(struct request_queue *, int);
565 
566 	/**
567 	 * @set_rq_budget_token: store rq's budget token
568 	 */
569 	void (*set_rq_budget_token)(struct request *, int);
570 	/**
571 	 * @get_rq_budget_token: retrieve rq's budget token
572 	 */
573 	int (*get_rq_budget_token)(struct request *);
574 
575 	/**
576 	 * @timeout: Called on request timeout.
577 	 */
578 	enum blk_eh_timer_return (*timeout)(struct request *, bool);
579 
580 	/**
581 	 * @poll: Called to poll for completion of a specific tag.
582 	 */
583 	int (*poll)(struct blk_mq_hw_ctx *, struct io_comp_batch *);
584 
585 	/**
586 	 * @complete: Mark the request as complete.
587 	 */
588 	void (*complete)(struct request *);
589 
590 	/**
591 	 * @init_hctx: Called when the block layer side of a hardware queue has
592 	 * been set up, allowing the driver to allocate/init matching
593 	 * structures.
594 	 */
595 	int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int);
596 	/**
597 	 * @exit_hctx: Ditto for exit/teardown.
598 	 */
599 	void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
600 
601 	/**
602 	 * @init_request: Called for every command allocated by the block layer
603 	 * to allow the driver to set up driver specific data.
604 	 *
605 	 * Tag greater than or equal to queue_depth is for setting up
606 	 * flush request.
607 	 */
608 	int (*init_request)(struct blk_mq_tag_set *set, struct request *,
609 			    unsigned int, unsigned int);
610 	/**
611 	 * @exit_request: Ditto for exit/teardown.
612 	 */
613 	void (*exit_request)(struct blk_mq_tag_set *set, struct request *,
614 			     unsigned int);
615 
616 	/**
617 	 * @cleanup_rq: Called before freeing one request which isn't completed
618 	 * yet, and usually for freeing the driver private data.
619 	 */
620 	void (*cleanup_rq)(struct request *);
621 
622 	/**
623 	 * @busy: If set, returns whether or not this queue currently is busy.
624 	 */
625 	bool (*busy)(struct request_queue *);
626 
627 	/**
628 	 * @map_queues: This allows drivers specify their own queue mapping by
629 	 * overriding the setup-time function that builds the mq_map.
630 	 */
631 	int (*map_queues)(struct blk_mq_tag_set *set);
632 
633 #ifdef CONFIG_BLK_DEBUG_FS
634 	/**
635 	 * @show_rq: Used by the debugfs implementation to show driver-specific
636 	 * information about a request.
637 	 */
638 	void (*show_rq)(struct seq_file *m, struct request *rq);
639 #endif
640 };
641 
642 enum {
643 	BLK_MQ_F_SHOULD_MERGE	= 1 << 0,
644 	BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1,
645 	/*
646 	 * Set when this device requires underlying blk-mq device for
647 	 * completing IO:
648 	 */
649 	BLK_MQ_F_STACKING	= 1 << 2,
650 	BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3,
651 	BLK_MQ_F_BLOCKING	= 1 << 5,
652 	/* Do not allow an I/O scheduler to be configured. */
653 	BLK_MQ_F_NO_SCHED	= 1 << 6,
654 	/*
655 	 * Select 'none' during queue registration in case of a single hwq
656 	 * or shared hwqs instead of 'mq-deadline'.
657 	 */
658 	BLK_MQ_F_NO_SCHED_BY_DEFAULT	= 1 << 7,
659 	BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
660 	BLK_MQ_F_ALLOC_POLICY_BITS = 1,
661 
662 	BLK_MQ_S_STOPPED	= 0,
663 	BLK_MQ_S_TAG_ACTIVE	= 1,
664 	BLK_MQ_S_SCHED_RESTART	= 2,
665 
666 	/* hw queue is inactive after all its CPUs become offline */
667 	BLK_MQ_S_INACTIVE	= 3,
668 
669 	BLK_MQ_MAX_DEPTH	= 10240,
670 
671 	BLK_MQ_CPU_WORK_BATCH	= 8,
672 };
673 #define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \
674 	((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \
675 		((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1))
676 #define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \
677 	((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
678 		<< BLK_MQ_F_ALLOC_POLICY_START_BIT)
679 
680 #define BLK_MQ_NO_HCTX_IDX	(-1U)
681 
682 struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
683 		struct lock_class_key *lkclass);
684 #define blk_mq_alloc_disk(set, queuedata)				\
685 ({									\
686 	static struct lock_class_key __key;				\
687 									\
688 	__blk_mq_alloc_disk(set, queuedata, &__key);			\
689 })
690 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
691 int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
692 		struct request_queue *q);
693 void blk_mq_unregister_dev(struct device *, struct request_queue *);
694 
695 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
696 int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
697 		const struct blk_mq_ops *ops, unsigned int queue_depth,
698 		unsigned int set_flags);
699 void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
700 
701 void blk_mq_free_request(struct request *rq);
702 
703 bool blk_mq_queue_inflight(struct request_queue *q);
704 
705 enum {
706 	/* return when out of requests */
707 	BLK_MQ_REQ_NOWAIT	= (__force blk_mq_req_flags_t)(1 << 0),
708 	/* allocate from reserved pool */
709 	BLK_MQ_REQ_RESERVED	= (__force blk_mq_req_flags_t)(1 << 1),
710 	/* set RQF_PM */
711 	BLK_MQ_REQ_PM		= (__force blk_mq_req_flags_t)(1 << 2),
712 };
713 
714 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
715 		blk_mq_req_flags_t flags);
716 struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
717 		unsigned int op, blk_mq_req_flags_t flags,
718 		unsigned int hctx_idx);
719 
720 /*
721  * Tag address space map.
722  */
723 struct blk_mq_tags {
724 	unsigned int nr_tags;
725 	unsigned int nr_reserved_tags;
726 
727 	atomic_t active_queues;
728 
729 	struct sbitmap_queue bitmap_tags;
730 	struct sbitmap_queue breserved_tags;
731 
732 	struct request **rqs;
733 	struct request **static_rqs;
734 	struct list_head page_list;
735 
736 	/*
737 	 * used to clear request reference in rqs[] before freeing one
738 	 * request pool
739 	 */
740 	spinlock_t lock;
741 };
742 
743 static inline struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags,
744 					       unsigned int tag)
745 {
746 	if (tag < tags->nr_tags) {
747 		prefetch(tags->rqs[tag]);
748 		return tags->rqs[tag];
749 	}
750 
751 	return NULL;
752 }
753 
754 enum {
755 	BLK_MQ_UNIQUE_TAG_BITS = 16,
756 	BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1,
757 };
758 
759 u32 blk_mq_unique_tag(struct request *rq);
760 
761 static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag)
762 {
763 	return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS;
764 }
765 
766 static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
767 {
768 	return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
769 }
770 
771 /**
772  * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
773  * @rq: target request.
774  */
775 static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
776 {
777 	return READ_ONCE(rq->state);
778 }
779 
780 static inline int blk_mq_request_started(struct request *rq)
781 {
782 	return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
783 }
784 
785 static inline int blk_mq_request_completed(struct request *rq)
786 {
787 	return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE;
788 }
789 
790 /*
791  *
792  * Set the state to complete when completing a request from inside ->queue_rq.
793  * This is used by drivers that want to ensure special complete actions that
794  * need access to the request are called on failure, e.g. by nvme for
795  * multipathing.
796  */
797 static inline void blk_mq_set_request_complete(struct request *rq)
798 {
799 	WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
800 }
801 
802 /*
803  * Complete the request directly instead of deferring it to softirq or
804  * completing it another CPU. Useful in preemptible instead of an interrupt.
805  */
806 static inline void blk_mq_complete_request_direct(struct request *rq,
807 		   void (*complete)(struct request *rq))
808 {
809 	WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
810 	complete(rq);
811 }
812 
813 void blk_mq_start_request(struct request *rq);
814 void blk_mq_end_request(struct request *rq, blk_status_t error);
815 void __blk_mq_end_request(struct request *rq, blk_status_t error);
816 void blk_mq_end_request_batch(struct io_comp_batch *ib);
817 
818 /*
819  * Only need start/end time stamping if we have iostat or
820  * blk stats enabled, or using an IO scheduler.
821  */
822 static inline bool blk_mq_need_time_stamp(struct request *rq)
823 {
824 	return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_ELV));
825 }
826 
827 /*
828  * Batched completions only work when there is no I/O error and no special
829  * ->end_io handler.
830  */
831 static inline bool blk_mq_add_to_batch(struct request *req,
832 				       struct io_comp_batch *iob, int ioerror,
833 				       void (*complete)(struct io_comp_batch *))
834 {
835 	if (!iob || (req->rq_flags & RQF_ELV) || req->end_io || ioerror)
836 		return false;
837 	if (!iob->complete)
838 		iob->complete = complete;
839 	else if (iob->complete != complete)
840 		return false;
841 	iob->need_ts |= blk_mq_need_time_stamp(req);
842 	rq_list_add(&iob->req_list, req);
843 	return true;
844 }
845 
846 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
847 void blk_mq_kick_requeue_list(struct request_queue *q);
848 void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
849 void blk_mq_complete_request(struct request *rq);
850 bool blk_mq_complete_request_remote(struct request *rq);
851 bool blk_mq_queue_stopped(struct request_queue *q);
852 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
853 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
854 void blk_mq_stop_hw_queues(struct request_queue *q);
855 void blk_mq_start_hw_queues(struct request_queue *q);
856 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
857 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
858 void blk_mq_quiesce_queue(struct request_queue *q);
859 void blk_mq_wait_quiesce_done(struct request_queue *q);
860 void blk_mq_unquiesce_queue(struct request_queue *q);
861 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
862 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
863 void blk_mq_run_hw_queues(struct request_queue *q, bool async);
864 void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs);
865 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
866 		busy_tag_iter_fn *fn, void *priv);
867 void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset);
868 void blk_mq_freeze_queue(struct request_queue *q);
869 void blk_mq_unfreeze_queue(struct request_queue *q);
870 void blk_freeze_queue_start(struct request_queue *q);
871 void blk_mq_freeze_queue_wait(struct request_queue *q);
872 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
873 				     unsigned long timeout);
874 
875 int blk_mq_map_queues(struct blk_mq_queue_map *qmap);
876 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
877 
878 void blk_mq_quiesce_queue_nowait(struct request_queue *q);
879 
880 unsigned int blk_mq_rq_cpu(struct request *rq);
881 
882 bool __blk_should_fake_timeout(struct request_queue *q);
883 static inline bool blk_should_fake_timeout(struct request_queue *q)
884 {
885 	if (IS_ENABLED(CONFIG_FAIL_IO_TIMEOUT) &&
886 	    test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
887 		return __blk_should_fake_timeout(q);
888 	return false;
889 }
890 
891 /**
892  * blk_mq_rq_from_pdu - cast a PDU to a request
893  * @pdu: the PDU (Protocol Data Unit) to be casted
894  *
895  * Return: request
896  *
897  * Driver command data is immediately after the request. So subtract request
898  * size to get back to the original request.
899  */
900 static inline struct request *blk_mq_rq_from_pdu(void *pdu)
901 {
902 	return pdu - sizeof(struct request);
903 }
904 
905 /**
906  * blk_mq_rq_to_pdu - cast a request to a PDU
907  * @rq: the request to be casted
908  *
909  * Return: pointer to the PDU
910  *
911  * Driver command data is immediately after the request. So add request to get
912  * the PDU.
913  */
914 static inline void *blk_mq_rq_to_pdu(struct request *rq)
915 {
916 	return rq + 1;
917 }
918 
919 #define queue_for_each_hw_ctx(q, hctx, i)				\
920 	for ((i) = 0; (i) < (q)->nr_hw_queues &&			\
921 	     ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
922 
923 #define hctx_for_each_ctx(hctx, ctx, i)					\
924 	for ((i) = 0; (i) < (hctx)->nr_ctx &&				\
925 	     ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
926 
927 static inline void blk_mq_cleanup_rq(struct request *rq)
928 {
929 	if (rq->q->mq_ops->cleanup_rq)
930 		rq->q->mq_ops->cleanup_rq(rq);
931 }
932 
933 static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
934 		unsigned int nr_segs)
935 {
936 	rq->nr_phys_segments = nr_segs;
937 	rq->__data_len = bio->bi_iter.bi_size;
938 	rq->bio = rq->biotail = bio;
939 	rq->ioprio = bio_prio(bio);
940 }
941 
942 void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
943 		struct lock_class_key *key);
944 
945 static inline bool rq_is_sync(struct request *rq)
946 {
947 	return op_is_sync(rq->cmd_flags);
948 }
949 
950 void blk_rq_init(struct request_queue *q, struct request *rq);
951 int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
952 		struct bio_set *bs, gfp_t gfp_mask,
953 		int (*bio_ctr)(struct bio *, struct bio *, void *), void *data);
954 void blk_rq_unprep_clone(struct request *rq);
955 blk_status_t blk_insert_cloned_request(struct request_queue *q,
956 		struct request *rq);
957 
958 struct rq_map_data {
959 	struct page **pages;
960 	int page_order;
961 	int nr_entries;
962 	unsigned long offset;
963 	int null_mapped;
964 	int from_user;
965 };
966 
967 int blk_rq_map_user(struct request_queue *, struct request *,
968 		struct rq_map_data *, void __user *, unsigned long, gfp_t);
969 int blk_rq_map_user_iov(struct request_queue *, struct request *,
970 		struct rq_map_data *, const struct iov_iter *, gfp_t);
971 int blk_rq_unmap_user(struct bio *);
972 int blk_rq_map_kern(struct request_queue *, struct request *, void *,
973 		unsigned int, gfp_t);
974 int blk_rq_append_bio(struct request *rq, struct bio *bio);
975 void blk_execute_rq_nowait(struct request *rq, bool at_head,
976 		rq_end_io_fn *end_io);
977 blk_status_t blk_execute_rq(struct request *rq, bool at_head);
978 
979 struct req_iterator {
980 	struct bvec_iter iter;
981 	struct bio *bio;
982 };
983 
984 #define __rq_for_each_bio(_bio, rq)	\
985 	if ((rq->bio))			\
986 		for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
987 
988 #define rq_for_each_segment(bvl, _rq, _iter)			\
989 	__rq_for_each_bio(_iter.bio, _rq)			\
990 		bio_for_each_segment(bvl, _iter.bio, _iter.iter)
991 
992 #define rq_for_each_bvec(bvl, _rq, _iter)			\
993 	__rq_for_each_bio(_iter.bio, _rq)			\
994 		bio_for_each_bvec(bvl, _iter.bio, _iter.iter)
995 
996 #define rq_iter_last(bvec, _iter)				\
997 		(_iter.bio->bi_next == NULL &&			\
998 		 bio_iter_last(bvec, _iter.iter))
999 
1000 /*
1001  * blk_rq_pos()			: the current sector
1002  * blk_rq_bytes()		: bytes left in the entire request
1003  * blk_rq_cur_bytes()		: bytes left in the current segment
1004  * blk_rq_sectors()		: sectors left in the entire request
1005  * blk_rq_cur_sectors()		: sectors left in the current segment
1006  * blk_rq_stats_sectors()	: sectors of the entire request used for stats
1007  */
1008 static inline sector_t blk_rq_pos(const struct request *rq)
1009 {
1010 	return rq->__sector;
1011 }
1012 
1013 static inline unsigned int blk_rq_bytes(const struct request *rq)
1014 {
1015 	return rq->__data_len;
1016 }
1017 
1018 static inline int blk_rq_cur_bytes(const struct request *rq)
1019 {
1020 	if (!rq->bio)
1021 		return 0;
1022 	if (!bio_has_data(rq->bio))	/* dataless requests such as discard */
1023 		return rq->bio->bi_iter.bi_size;
1024 	return bio_iovec(rq->bio).bv_len;
1025 }
1026 
1027 static inline unsigned int blk_rq_sectors(const struct request *rq)
1028 {
1029 	return blk_rq_bytes(rq) >> SECTOR_SHIFT;
1030 }
1031 
1032 static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
1033 {
1034 	return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
1035 }
1036 
1037 static inline unsigned int blk_rq_stats_sectors(const struct request *rq)
1038 {
1039 	return rq->stats_sectors;
1040 }
1041 
1042 /*
1043  * Some commands like WRITE SAME have a payload or data transfer size which
1044  * is different from the size of the request.  Any driver that supports such
1045  * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to
1046  * calculate the data transfer size.
1047  */
1048 static inline unsigned int blk_rq_payload_bytes(struct request *rq)
1049 {
1050 	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1051 		return rq->special_vec.bv_len;
1052 	return blk_rq_bytes(rq);
1053 }
1054 
1055 /*
1056  * Return the first full biovec in the request.  The caller needs to check that
1057  * there are any bvecs before calling this helper.
1058  */
1059 static inline struct bio_vec req_bvec(struct request *rq)
1060 {
1061 	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1062 		return rq->special_vec;
1063 	return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter);
1064 }
1065 
1066 static inline unsigned int blk_rq_count_bios(struct request *rq)
1067 {
1068 	unsigned int nr_bios = 0;
1069 	struct bio *bio;
1070 
1071 	__rq_for_each_bio(bio, rq)
1072 		nr_bios++;
1073 
1074 	return nr_bios;
1075 }
1076 
1077 void blk_steal_bios(struct bio_list *list, struct request *rq);
1078 
1079 /*
1080  * Request completion related functions.
1081  *
1082  * blk_update_request() completes given number of bytes and updates
1083  * the request without completing it.
1084  */
1085 bool blk_update_request(struct request *rq, blk_status_t error,
1086 			       unsigned int nr_bytes);
1087 void blk_abort_request(struct request *);
1088 
1089 /*
1090  * Number of physical segments as sent to the device.
1091  *
1092  * Normally this is the number of discontiguous data segments sent by the
1093  * submitter.  But for data-less command like discard we might have no
1094  * actual data segments submitted, but the driver might have to add it's
1095  * own special payload.  In that case we still return 1 here so that this
1096  * special payload will be mapped.
1097  */
1098 static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
1099 {
1100 	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1101 		return 1;
1102 	return rq->nr_phys_segments;
1103 }
1104 
1105 /*
1106  * Number of discard segments (or ranges) the driver needs to fill in.
1107  * Each discard bio merged into a request is counted as one segment.
1108  */
1109 static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
1110 {
1111 	return max_t(unsigned short, rq->nr_phys_segments, 1);
1112 }
1113 
1114 int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
1115 		struct scatterlist *sglist, struct scatterlist **last_sg);
1116 static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq,
1117 		struct scatterlist *sglist)
1118 {
1119 	struct scatterlist *last_sg = NULL;
1120 
1121 	return __blk_rq_map_sg(q, rq, sglist, &last_sg);
1122 }
1123 void blk_dump_rq_flags(struct request *, char *);
1124 
1125 #ifdef CONFIG_BLK_DEV_ZONED
1126 static inline unsigned int blk_rq_zone_no(struct request *rq)
1127 {
1128 	return blk_queue_zone_no(rq->q, blk_rq_pos(rq));
1129 }
1130 
1131 static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
1132 {
1133 	return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq));
1134 }
1135 
1136 bool blk_req_needs_zone_write_lock(struct request *rq);
1137 bool blk_req_zone_write_trylock(struct request *rq);
1138 void __blk_req_zone_write_lock(struct request *rq);
1139 void __blk_req_zone_write_unlock(struct request *rq);
1140 
1141 static inline void blk_req_zone_write_lock(struct request *rq)
1142 {
1143 	if (blk_req_needs_zone_write_lock(rq))
1144 		__blk_req_zone_write_lock(rq);
1145 }
1146 
1147 static inline void blk_req_zone_write_unlock(struct request *rq)
1148 {
1149 	if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED)
1150 		__blk_req_zone_write_unlock(rq);
1151 }
1152 
1153 static inline bool blk_req_zone_is_write_locked(struct request *rq)
1154 {
1155 	return rq->q->seq_zones_wlock &&
1156 		test_bit(blk_rq_zone_no(rq), rq->q->seq_zones_wlock);
1157 }
1158 
1159 static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
1160 {
1161 	if (!blk_req_needs_zone_write_lock(rq))
1162 		return true;
1163 	return !blk_req_zone_is_write_locked(rq);
1164 }
1165 #else /* CONFIG_BLK_DEV_ZONED */
1166 static inline bool blk_req_needs_zone_write_lock(struct request *rq)
1167 {
1168 	return false;
1169 }
1170 
1171 static inline void blk_req_zone_write_lock(struct request *rq)
1172 {
1173 }
1174 
1175 static inline void blk_req_zone_write_unlock(struct request *rq)
1176 {
1177 }
1178 static inline bool blk_req_zone_is_write_locked(struct request *rq)
1179 {
1180 	return false;
1181 }
1182 
1183 static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
1184 {
1185 	return true;
1186 }
1187 #endif /* CONFIG_BLK_DEV_ZONED */
1188 
1189 #endif /* BLK_MQ_H */
1190