xref: /linux-6.15/include/linux/blk-mq.h (revision bb66fc67)
1 #ifndef BLK_MQ_H
2 #define BLK_MQ_H
3 
4 #include <linux/blkdev.h>
5 
6 struct blk_mq_tags;
7 
8 struct blk_mq_cpu_notifier {
9 	struct list_head list;
10 	void *data;
11 	void (*notify)(void *data, unsigned long action, unsigned int cpu);
12 };
13 
14 struct blk_mq_hw_ctx {
15 	struct {
16 		spinlock_t		lock;
17 		struct list_head	dispatch;
18 	} ____cacheline_aligned_in_smp;
19 
20 	unsigned long		state;		/* BLK_MQ_S_* flags */
21 	struct delayed_work	delayed_work;
22 
23 	unsigned long		flags;		/* BLK_MQ_F_* flags */
24 
25 	struct request_queue	*queue;
26 	unsigned int		queue_num;
27 
28 	void			*driver_data;
29 
30 	unsigned int		nr_ctx;
31 	struct blk_mq_ctx	**ctxs;
32 	unsigned int 		nr_ctx_map;
33 	unsigned long		*ctx_map;
34 
35 	struct request		**rqs;
36 	struct list_head	page_list;
37 	struct blk_mq_tags	*tags;
38 
39 	unsigned long		queued;
40 	unsigned long		run;
41 #define BLK_MQ_MAX_DISPATCH_ORDER	10
42 	unsigned long		dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
43 
44 	unsigned int		queue_depth;
45 	unsigned int		numa_node;
46 	unsigned int		cmd_size;	/* per-request extra data */
47 
48 	struct blk_mq_cpu_notifier	cpu_notifier;
49 	struct kobject		kobj;
50 };
51 
52 struct blk_mq_reg {
53 	struct blk_mq_ops	*ops;
54 	unsigned int		nr_hw_queues;
55 	unsigned int		queue_depth;
56 	unsigned int		reserved_tags;
57 	unsigned int		cmd_size;	/* per-request extra data */
58 	int			numa_node;
59 	unsigned int		timeout;
60 	unsigned int		flags;		/* BLK_MQ_F_* */
61 };
62 
63 typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *);
64 typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int);
65 typedef struct blk_mq_hw_ctx *(alloc_hctx_fn)(struct blk_mq_reg *,unsigned int);
66 typedef void (free_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
67 typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
68 typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
69 
70 struct blk_mq_ops {
71 	/*
72 	 * Queue request
73 	 */
74 	queue_rq_fn		*queue_rq;
75 
76 	/*
77 	 * Map to specific hardware queue
78 	 */
79 	map_queue_fn		*map_queue;
80 
81 	/*
82 	 * Called on request timeout
83 	 */
84 	rq_timed_out_fn		*timeout;
85 
86 	softirq_done_fn		*complete;
87 
88 	/*
89 	 * Override for hctx allocations (should probably go)
90 	 */
91 	alloc_hctx_fn		*alloc_hctx;
92 	free_hctx_fn		*free_hctx;
93 
94 	/*
95 	 * Called when the block layer side of a hardware queue has been
96 	 * set up, allowing the driver to allocate/init matching structures.
97 	 * Ditto for exit/teardown.
98 	 */
99 	init_hctx_fn		*init_hctx;
100 	exit_hctx_fn		*exit_hctx;
101 };
102 
103 enum {
104 	BLK_MQ_RQ_QUEUE_OK	= 0,	/* queued fine */
105 	BLK_MQ_RQ_QUEUE_BUSY	= 1,	/* requeue IO for later */
106 	BLK_MQ_RQ_QUEUE_ERROR	= 2,	/* end IO with error */
107 
108 	BLK_MQ_F_SHOULD_MERGE	= 1 << 0,
109 	BLK_MQ_F_SHOULD_SORT	= 1 << 1,
110 	BLK_MQ_F_SHOULD_IPI	= 1 << 2,
111 
112 	BLK_MQ_S_STOPPED	= 0,
113 
114 	BLK_MQ_MAX_DEPTH	= 2048,
115 };
116 
117 struct request_queue *blk_mq_init_queue(struct blk_mq_reg *, void *);
118 int blk_mq_register_disk(struct gendisk *);
119 void blk_mq_unregister_disk(struct gendisk *);
120 int blk_mq_init_commands(struct request_queue *, int (*init)(void *data, struct blk_mq_hw_ctx *, struct request *, unsigned int), void *data);
121 void blk_mq_free_commands(struct request_queue *, void (*free)(void *data, struct blk_mq_hw_ctx *, struct request *, unsigned int), void *data);
122 
123 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
124 
125 void blk_mq_insert_request(struct request *, bool, bool, bool);
126 void blk_mq_run_queues(struct request_queue *q, bool async);
127 void blk_mq_free_request(struct request *rq);
128 bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
129 struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp);
130 struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp);
131 struct request *blk_mq_rq_from_tag(struct request_queue *q, unsigned int tag);
132 
133 struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
134 struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_reg *, unsigned int);
135 void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int);
136 
137 bool blk_mq_end_io_partial(struct request *rq, int error,
138 		unsigned int nr_bytes);
139 static inline void blk_mq_end_io(struct request *rq, int error)
140 {
141 	bool done = !blk_mq_end_io_partial(rq, error, blk_rq_bytes(rq));
142 	BUG_ON(!done);
143 }
144 
145 void blk_mq_complete_request(struct request *rq);
146 
147 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
148 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
149 void blk_mq_stop_hw_queues(struct request_queue *q);
150 void blk_mq_start_stopped_hw_queues(struct request_queue *q);
151 
152 /*
153  * Driver command data is immediately after the request. So subtract request
154  * size to get back to the original request.
155  */
156 static inline struct request *blk_mq_rq_from_pdu(void *pdu)
157 {
158 	return pdu - sizeof(struct request);
159 }
160 static inline void *blk_mq_rq_to_pdu(struct request *rq)
161 {
162 	return (void *) rq + sizeof(*rq);
163 }
164 
165 static inline struct request *blk_mq_tag_to_rq(struct blk_mq_hw_ctx *hctx,
166 					       unsigned int tag)
167 {
168 	return hctx->rqs[tag];
169 }
170 
171 #define queue_for_each_hw_ctx(q, hctx, i)				\
172 	for ((i) = 0; (i) < (q)->nr_hw_queues &&			\
173 	     ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
174 
175 #define queue_for_each_ctx(q, ctx, i)					\
176 	for ((i) = 0; (i) < (q)->nr_queues &&				\
177 	     ({ ctx = per_cpu_ptr((q)->queue_ctx, (i)); 1; }); (i)++)
178 
179 #define hctx_for_each_ctx(hctx, ctx, i)					\
180 	for ((i) = 0; (i) < (hctx)->nr_ctx &&				\
181 	     ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
182 
183 #define blk_ctx_sum(q, sum)						\
184 ({									\
185 	struct blk_mq_ctx *__x;						\
186 	unsigned int __ret = 0, __i;					\
187 									\
188 	queue_for_each_ctx((q), __x, __i)				\
189 		__ret += sum;						\
190 	__ret;								\
191 })
192 
193 #endif
194