xref: /linux-6.15/include/linux/blkdev.h (revision 4c5a116a)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_BLKDEV_H
3 #define _LINUX_BLKDEV_H
4 
5 #include <linux/sched.h>
6 #include <linux/sched/clock.h>
7 #include <linux/major.h>
8 #include <linux/genhd.h>
9 #include <linux/list.h>
10 #include <linux/llist.h>
11 #include <linux/timer.h>
12 #include <linux/workqueue.h>
13 #include <linux/pagemap.h>
14 #include <linux/backing-dev-defs.h>
15 #include <linux/wait.h>
16 #include <linux/mempool.h>
17 #include <linux/pfn.h>
18 #include <linux/bio.h>
19 #include <linux/stringify.h>
20 #include <linux/gfp.h>
21 #include <linux/bsg.h>
22 #include <linux/smp.h>
23 #include <linux/rcupdate.h>
24 #include <linux/percpu-refcount.h>
25 #include <linux/scatterlist.h>
26 #include <linux/blkzoned.h>
27 
28 struct module;
29 struct scsi_ioctl_command;
30 
31 struct request_queue;
32 struct elevator_queue;
33 struct blk_trace;
34 struct request;
35 struct sg_io_hdr;
36 struct bsg_job;
37 struct blkcg_gq;
38 struct blk_flush_queue;
39 struct pr_ops;
40 struct rq_qos;
41 struct blk_queue_stats;
42 struct blk_stat_callback;
43 struct blk_keyslot_manager;
44 
45 #define BLKDEV_MIN_RQ	4
46 #define BLKDEV_MAX_RQ	128	/* Default maximum */
47 
48 /* Must be consistent with blk_mq_poll_stats_bkt() */
49 #define BLK_MQ_POLL_STATS_BKTS 16
50 
51 /* Doing classic polling */
52 #define BLK_MQ_POLL_CLASSIC -1
53 
54 /*
55  * Maximum number of blkcg policies allowed to be registered concurrently.
56  * Defined here to simplify include dependency.
57  */
58 #define BLKCG_MAX_POLS		5
59 
60 typedef void (rq_end_io_fn)(struct request *, blk_status_t);
61 
62 /*
63  * request flags */
64 typedef __u32 __bitwise req_flags_t;
65 
66 /* elevator knows about this request */
67 #define RQF_SORTED		((__force req_flags_t)(1 << 0))
68 /* drive already may have started this one */
69 #define RQF_STARTED		((__force req_flags_t)(1 << 1))
70 /* may not be passed by ioscheduler */
71 #define RQF_SOFTBARRIER		((__force req_flags_t)(1 << 3))
72 /* request for flush sequence */
73 #define RQF_FLUSH_SEQ		((__force req_flags_t)(1 << 4))
74 /* merge of different types, fail separately */
75 #define RQF_MIXED_MERGE		((__force req_flags_t)(1 << 5))
76 /* track inflight for MQ */
77 #define RQF_MQ_INFLIGHT		((__force req_flags_t)(1 << 6))
78 /* don't call prep for this one */
79 #define RQF_DONTPREP		((__force req_flags_t)(1 << 7))
80 /* set for "ide_preempt" requests and also for requests for which the SCSI
81    "quiesce" state must be ignored. */
82 #define RQF_PREEMPT		((__force req_flags_t)(1 << 8))
83 /* vaguely specified driver internal error.  Ignored by the block layer */
84 #define RQF_FAILED		((__force req_flags_t)(1 << 10))
85 /* don't warn about errors */
86 #define RQF_QUIET		((__force req_flags_t)(1 << 11))
87 /* elevator private data attached */
88 #define RQF_ELVPRIV		((__force req_flags_t)(1 << 12))
89 /* account into disk and partition IO statistics */
90 #define RQF_IO_STAT		((__force req_flags_t)(1 << 13))
91 /* request came from our alloc pool */
92 #define RQF_ALLOCED		((__force req_flags_t)(1 << 14))
93 /* runtime pm request */
94 #define RQF_PM			((__force req_flags_t)(1 << 15))
95 /* on IO scheduler merge hash */
96 #define RQF_HASHED		((__force req_flags_t)(1 << 16))
97 /* track IO completion time */
98 #define RQF_STATS		((__force req_flags_t)(1 << 17))
99 /* Look at ->special_vec for the actual data payload instead of the
100    bio chain. */
101 #define RQF_SPECIAL_PAYLOAD	((__force req_flags_t)(1 << 18))
102 /* The per-zone write lock is held for this request */
103 #define RQF_ZONE_WRITE_LOCKED	((__force req_flags_t)(1 << 19))
104 /* already slept for hybrid poll */
105 #define RQF_MQ_POLL_SLEPT	((__force req_flags_t)(1 << 20))
106 /* ->timeout has been called, don't expire again */
107 #define RQF_TIMED_OUT		((__force req_flags_t)(1 << 21))
108 
109 /* flags that prevent us from merging requests: */
110 #define RQF_NOMERGE_FLAGS \
111 	(RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
112 
113 /*
114  * Request state for blk-mq.
115  */
116 enum mq_rq_state {
117 	MQ_RQ_IDLE		= 0,
118 	MQ_RQ_IN_FLIGHT		= 1,
119 	MQ_RQ_COMPLETE		= 2,
120 };
121 
122 /*
123  * Try to put the fields that are referenced together in the same cacheline.
124  *
125  * If you modify this structure, make sure to update blk_rq_init() and
126  * especially blk_mq_rq_ctx_init() to take care of the added fields.
127  */
128 struct request {
129 	struct request_queue *q;
130 	struct blk_mq_ctx *mq_ctx;
131 	struct blk_mq_hw_ctx *mq_hctx;
132 
133 	unsigned int cmd_flags;		/* op and common flags */
134 	req_flags_t rq_flags;
135 
136 	int tag;
137 	int internal_tag;
138 
139 	/* the following two fields are internal, NEVER access directly */
140 	unsigned int __data_len;	/* total data len */
141 	sector_t __sector;		/* sector cursor */
142 
143 	struct bio *bio;
144 	struct bio *biotail;
145 
146 	struct list_head queuelist;
147 
148 	/*
149 	 * The hash is used inside the scheduler, and killed once the
150 	 * request reaches the dispatch list. The ipi_list is only used
151 	 * to queue the request for softirq completion, which is long
152 	 * after the request has been unhashed (and even removed from
153 	 * the dispatch list).
154 	 */
155 	union {
156 		struct hlist_node hash;	/* merge hash */
157 		struct list_head ipi_list;
158 	};
159 
160 	/*
161 	 * The rb_node is only used inside the io scheduler, requests
162 	 * are pruned when moved to the dispatch queue. So let the
163 	 * completion_data share space with the rb_node.
164 	 */
165 	union {
166 		struct rb_node rb_node;	/* sort/lookup */
167 		struct bio_vec special_vec;
168 		void *completion_data;
169 		int error_count; /* for legacy drivers, don't use */
170 	};
171 
172 	/*
173 	 * Three pointers are available for the IO schedulers, if they need
174 	 * more they have to dynamically allocate it.  Flush requests are
175 	 * never put on the IO scheduler. So let the flush fields share
176 	 * space with the elevator data.
177 	 */
178 	union {
179 		struct {
180 			struct io_cq		*icq;
181 			void			*priv[2];
182 		} elv;
183 
184 		struct {
185 			unsigned int		seq;
186 			struct list_head	list;
187 			rq_end_io_fn		*saved_end_io;
188 		} flush;
189 	};
190 
191 	struct gendisk *rq_disk;
192 	struct hd_struct *part;
193 #ifdef CONFIG_BLK_RQ_ALLOC_TIME
194 	/* Time that the first bio started allocating this request. */
195 	u64 alloc_time_ns;
196 #endif
197 	/* Time that this request was allocated for this IO. */
198 	u64 start_time_ns;
199 	/* Time that I/O was submitted to the device. */
200 	u64 io_start_time_ns;
201 
202 #ifdef CONFIG_BLK_WBT
203 	unsigned short wbt_flags;
204 #endif
205 	/*
206 	 * rq sectors used for blk stats. It has the same value
207 	 * with blk_rq_sectors(rq), except that it never be zeroed
208 	 * by completion.
209 	 */
210 	unsigned short stats_sectors;
211 
212 	/*
213 	 * Number of scatter-gather DMA addr+len pairs after
214 	 * physical address coalescing is performed.
215 	 */
216 	unsigned short nr_phys_segments;
217 
218 #if defined(CONFIG_BLK_DEV_INTEGRITY)
219 	unsigned short nr_integrity_segments;
220 #endif
221 
222 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
223 	struct bio_crypt_ctx *crypt_ctx;
224 	struct blk_ksm_keyslot *crypt_keyslot;
225 #endif
226 
227 	unsigned short write_hint;
228 	unsigned short ioprio;
229 
230 	enum mq_rq_state state;
231 	refcount_t ref;
232 
233 	unsigned int timeout;
234 	unsigned long deadline;
235 
236 	union {
237 		struct __call_single_data csd;
238 		u64 fifo_time;
239 	};
240 
241 	/*
242 	 * completion callback.
243 	 */
244 	rq_end_io_fn *end_io;
245 	void *end_io_data;
246 };
247 
248 static inline bool blk_op_is_scsi(unsigned int op)
249 {
250 	return op == REQ_OP_SCSI_IN || op == REQ_OP_SCSI_OUT;
251 }
252 
253 static inline bool blk_op_is_private(unsigned int op)
254 {
255 	return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT;
256 }
257 
258 static inline bool blk_rq_is_scsi(struct request *rq)
259 {
260 	return blk_op_is_scsi(req_op(rq));
261 }
262 
263 static inline bool blk_rq_is_private(struct request *rq)
264 {
265 	return blk_op_is_private(req_op(rq));
266 }
267 
268 static inline bool blk_rq_is_passthrough(struct request *rq)
269 {
270 	return blk_rq_is_scsi(rq) || blk_rq_is_private(rq);
271 }
272 
273 static inline bool bio_is_passthrough(struct bio *bio)
274 {
275 	unsigned op = bio_op(bio);
276 
277 	return blk_op_is_scsi(op) || blk_op_is_private(op);
278 }
279 
280 static inline unsigned short req_get_ioprio(struct request *req)
281 {
282 	return req->ioprio;
283 }
284 
285 #include <linux/elevator.h>
286 
287 struct blk_queue_ctx;
288 
289 struct bio_vec;
290 
291 enum blk_eh_timer_return {
292 	BLK_EH_DONE,		/* drivers has completed the command */
293 	BLK_EH_RESET_TIMER,	/* reset timer and try again */
294 };
295 
296 enum blk_queue_state {
297 	Queue_down,
298 	Queue_up,
299 };
300 
301 #define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
302 #define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
303 
304 #define BLK_SCSI_MAX_CMDS	(256)
305 #define BLK_SCSI_CMD_PER_LONG	(BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
306 
307 /*
308  * Zoned block device models (zoned limit).
309  */
310 enum blk_zoned_model {
311 	BLK_ZONED_NONE,	/* Regular block device */
312 	BLK_ZONED_HA,	/* Host-aware zoned block device */
313 	BLK_ZONED_HM,	/* Host-managed zoned block device */
314 };
315 
316 struct queue_limits {
317 	unsigned long		bounce_pfn;
318 	unsigned long		seg_boundary_mask;
319 	unsigned long		virt_boundary_mask;
320 
321 	unsigned int		max_hw_sectors;
322 	unsigned int		max_dev_sectors;
323 	unsigned int		chunk_sectors;
324 	unsigned int		max_sectors;
325 	unsigned int		max_segment_size;
326 	unsigned int		physical_block_size;
327 	unsigned int		logical_block_size;
328 	unsigned int		alignment_offset;
329 	unsigned int		io_min;
330 	unsigned int		io_opt;
331 	unsigned int		max_discard_sectors;
332 	unsigned int		max_hw_discard_sectors;
333 	unsigned int		max_write_same_sectors;
334 	unsigned int		max_write_zeroes_sectors;
335 	unsigned int		max_zone_append_sectors;
336 	unsigned int		discard_granularity;
337 	unsigned int		discard_alignment;
338 
339 	unsigned short		max_segments;
340 	unsigned short		max_integrity_segments;
341 	unsigned short		max_discard_segments;
342 
343 	unsigned char		misaligned;
344 	unsigned char		discard_misaligned;
345 	unsigned char		raid_partial_stripes_expensive;
346 	enum blk_zoned_model	zoned;
347 };
348 
349 typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
350 			       void *data);
351 
352 #ifdef CONFIG_BLK_DEV_ZONED
353 
354 #define BLK_ALL_ZONES  ((unsigned int)-1)
355 int blkdev_report_zones(struct block_device *bdev, sector_t sector,
356 			unsigned int nr_zones, report_zones_cb cb, void *data);
357 unsigned int blkdev_nr_zones(struct gendisk *disk);
358 extern int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
359 			    sector_t sectors, sector_t nr_sectors,
360 			    gfp_t gfp_mask);
361 int blk_revalidate_disk_zones(struct gendisk *disk,
362 			      void (*update_driver_data)(struct gendisk *disk));
363 
364 extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
365 				     unsigned int cmd, unsigned long arg);
366 extern int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
367 				  unsigned int cmd, unsigned long arg);
368 
369 #else /* CONFIG_BLK_DEV_ZONED */
370 
371 static inline unsigned int blkdev_nr_zones(struct gendisk *disk)
372 {
373 	return 0;
374 }
375 
376 static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
377 					    fmode_t mode, unsigned int cmd,
378 					    unsigned long arg)
379 {
380 	return -ENOTTY;
381 }
382 
383 static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev,
384 					 fmode_t mode, unsigned int cmd,
385 					 unsigned long arg)
386 {
387 	return -ENOTTY;
388 }
389 
390 #endif /* CONFIG_BLK_DEV_ZONED */
391 
392 struct request_queue {
393 	struct request		*last_merge;
394 	struct elevator_queue	*elevator;
395 
396 	struct blk_queue_stats	*stats;
397 	struct rq_qos		*rq_qos;
398 
399 	const struct blk_mq_ops	*mq_ops;
400 
401 	/* sw queues */
402 	struct blk_mq_ctx __percpu	*queue_ctx;
403 
404 	unsigned int		queue_depth;
405 
406 	/* hw dispatch queues */
407 	struct blk_mq_hw_ctx	**queue_hw_ctx;
408 	unsigned int		nr_hw_queues;
409 
410 	struct backing_dev_info	*backing_dev_info;
411 
412 	/*
413 	 * The queue owner gets to use this for whatever they like.
414 	 * ll_rw_blk doesn't touch it.
415 	 */
416 	void			*queuedata;
417 
418 	/*
419 	 * various queue flags, see QUEUE_* below
420 	 */
421 	unsigned long		queue_flags;
422 	/*
423 	 * Number of contexts that have called blk_set_pm_only(). If this
424 	 * counter is above zero then only RQF_PM and RQF_PREEMPT requests are
425 	 * processed.
426 	 */
427 	atomic_t		pm_only;
428 
429 	/*
430 	 * ida allocated id for this queue.  Used to index queues from
431 	 * ioctx.
432 	 */
433 	int			id;
434 
435 	/*
436 	 * queue needs bounce pages for pages above this limit
437 	 */
438 	gfp_t			bounce_gfp;
439 
440 	spinlock_t		queue_lock;
441 
442 	/*
443 	 * queue kobject
444 	 */
445 	struct kobject kobj;
446 
447 	/*
448 	 * mq queue kobject
449 	 */
450 	struct kobject *mq_kobj;
451 
452 #ifdef  CONFIG_BLK_DEV_INTEGRITY
453 	struct blk_integrity integrity;
454 #endif	/* CONFIG_BLK_DEV_INTEGRITY */
455 
456 #ifdef CONFIG_PM
457 	struct device		*dev;
458 	int			rpm_status;
459 	unsigned int		nr_pending;
460 #endif
461 
462 	/*
463 	 * queue settings
464 	 */
465 	unsigned long		nr_requests;	/* Max # of requests */
466 
467 	unsigned int		dma_pad_mask;
468 	unsigned int		dma_alignment;
469 
470 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
471 	/* Inline crypto capabilities */
472 	struct blk_keyslot_manager *ksm;
473 #endif
474 
475 	unsigned int		rq_timeout;
476 	int			poll_nsec;
477 
478 	struct blk_stat_callback	*poll_cb;
479 	struct blk_rq_stat	poll_stat[BLK_MQ_POLL_STATS_BKTS];
480 
481 	struct timer_list	timeout;
482 	struct work_struct	timeout_work;
483 
484 	struct list_head	icq_list;
485 #ifdef CONFIG_BLK_CGROUP
486 	DECLARE_BITMAP		(blkcg_pols, BLKCG_MAX_POLS);
487 	struct blkcg_gq		*root_blkg;
488 	struct list_head	blkg_list;
489 #endif
490 
491 	struct queue_limits	limits;
492 
493 	unsigned int		required_elevator_features;
494 
495 #ifdef CONFIG_BLK_DEV_ZONED
496 	/*
497 	 * Zoned block device information for request dispatch control.
498 	 * nr_zones is the total number of zones of the device. This is always
499 	 * 0 for regular block devices. conv_zones_bitmap is a bitmap of nr_zones
500 	 * bits which indicates if a zone is conventional (bit set) or
501 	 * sequential (bit clear). seq_zones_wlock is a bitmap of nr_zones
502 	 * bits which indicates if a zone is write locked, that is, if a write
503 	 * request targeting the zone was dispatched. All three fields are
504 	 * initialized by the low level device driver (e.g. scsi/sd.c).
505 	 * Stacking drivers (device mappers) may or may not initialize
506 	 * these fields.
507 	 *
508 	 * Reads of this information must be protected with blk_queue_enter() /
509 	 * blk_queue_exit(). Modifying this information is only allowed while
510 	 * no requests are being processed. See also blk_mq_freeze_queue() and
511 	 * blk_mq_unfreeze_queue().
512 	 */
513 	unsigned int		nr_zones;
514 	unsigned long		*conv_zones_bitmap;
515 	unsigned long		*seq_zones_wlock;
516 #endif /* CONFIG_BLK_DEV_ZONED */
517 
518 	/*
519 	 * sg stuff
520 	 */
521 	unsigned int		sg_timeout;
522 	unsigned int		sg_reserved_size;
523 	int			node;
524 	struct mutex		debugfs_mutex;
525 #ifdef CONFIG_BLK_DEV_IO_TRACE
526 	struct blk_trace __rcu	*blk_trace;
527 #endif
528 	/*
529 	 * for flush operations
530 	 */
531 	struct blk_flush_queue	*fq;
532 
533 	struct list_head	requeue_list;
534 	spinlock_t		requeue_lock;
535 	struct delayed_work	requeue_work;
536 
537 	struct mutex		sysfs_lock;
538 	struct mutex		sysfs_dir_lock;
539 
540 	/*
541 	 * for reusing dead hctx instance in case of updating
542 	 * nr_hw_queues
543 	 */
544 	struct list_head	unused_hctx_list;
545 	spinlock_t		unused_hctx_lock;
546 
547 	int			mq_freeze_depth;
548 
549 #if defined(CONFIG_BLK_DEV_BSG)
550 	struct bsg_class_device bsg_dev;
551 #endif
552 
553 #ifdef CONFIG_BLK_DEV_THROTTLING
554 	/* Throttle data */
555 	struct throtl_data *td;
556 #endif
557 	struct rcu_head		rcu_head;
558 	wait_queue_head_t	mq_freeze_wq;
559 	/*
560 	 * Protect concurrent access to q_usage_counter by
561 	 * percpu_ref_kill() and percpu_ref_reinit().
562 	 */
563 	struct mutex		mq_freeze_lock;
564 	struct percpu_ref	q_usage_counter;
565 
566 	struct blk_mq_tag_set	*tag_set;
567 	struct list_head	tag_set_list;
568 	struct bio_set		bio_split;
569 
570 	struct dentry		*debugfs_dir;
571 
572 #ifdef CONFIG_BLK_DEBUG_FS
573 	struct dentry		*sched_debugfs_dir;
574 	struct dentry		*rqos_debugfs_dir;
575 #endif
576 
577 	bool			mq_sysfs_init_done;
578 
579 	size_t			cmd_size;
580 
581 #define BLK_MAX_WRITE_HINTS	5
582 	u64			write_hints[BLK_MAX_WRITE_HINTS];
583 };
584 
585 /* Keep blk_queue_flag_name[] in sync with the definitions below */
586 #define QUEUE_FLAG_STOPPED	0	/* queue is stopped */
587 #define QUEUE_FLAG_DYING	1	/* queue being torn down */
588 #define QUEUE_FLAG_NOMERGES     3	/* disable merge attempts */
589 #define QUEUE_FLAG_SAME_COMP	4	/* complete on same CPU-group */
590 #define QUEUE_FLAG_FAIL_IO	5	/* fake timeout */
591 #define QUEUE_FLAG_NONROT	6	/* non-rotational device (SSD) */
592 #define QUEUE_FLAG_VIRT		QUEUE_FLAG_NONROT /* paravirt device */
593 #define QUEUE_FLAG_IO_STAT	7	/* do disk/partitions IO accounting */
594 #define QUEUE_FLAG_DISCARD	8	/* supports DISCARD */
595 #define QUEUE_FLAG_NOXMERGES	9	/* No extended merges */
596 #define QUEUE_FLAG_ADD_RANDOM	10	/* Contributes to random pool */
597 #define QUEUE_FLAG_SECERASE	11	/* supports secure erase */
598 #define QUEUE_FLAG_SAME_FORCE	12	/* force complete on same CPU */
599 #define QUEUE_FLAG_DEAD		13	/* queue tear-down finished */
600 #define QUEUE_FLAG_INIT_DONE	14	/* queue is initialized */
601 #define QUEUE_FLAG_POLL		16	/* IO polling enabled if set */
602 #define QUEUE_FLAG_WC		17	/* Write back caching */
603 #define QUEUE_FLAG_FUA		18	/* device supports FUA writes */
604 #define QUEUE_FLAG_DAX		19	/* device supports DAX */
605 #define QUEUE_FLAG_STATS	20	/* track IO start and completion times */
606 #define QUEUE_FLAG_POLL_STATS	21	/* collecting stats for hybrid polling */
607 #define QUEUE_FLAG_REGISTERED	22	/* queue has been registered to a disk */
608 #define QUEUE_FLAG_SCSI_PASSTHROUGH 23	/* queue supports SCSI commands */
609 #define QUEUE_FLAG_QUIESCED	24	/* queue has been quiesced */
610 #define QUEUE_FLAG_PCI_P2PDMA	25	/* device supports PCI p2p requests */
611 #define QUEUE_FLAG_ZONE_RESETALL 26	/* supports Zone Reset All */
612 #define QUEUE_FLAG_RQ_ALLOC_TIME 27	/* record rq->alloc_time_ns */
613 
614 #define QUEUE_FLAG_MQ_DEFAULT	((1 << QUEUE_FLAG_IO_STAT) |		\
615 				 (1 << QUEUE_FLAG_SAME_COMP))
616 
617 void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
618 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
619 bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
620 
621 #define blk_queue_stopped(q)	test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
622 #define blk_queue_dying(q)	test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
623 #define blk_queue_dead(q)	test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
624 #define blk_queue_init_done(q)	test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
625 #define blk_queue_nomerges(q)	test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
626 #define blk_queue_noxmerges(q)	\
627 	test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
628 #define blk_queue_nonrot(q)	test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
629 #define blk_queue_io_stat(q)	test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
630 #define blk_queue_add_random(q)	test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
631 #define blk_queue_discard(q)	test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
632 #define blk_queue_zone_resetall(q)	\
633 	test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags)
634 #define blk_queue_secure_erase(q) \
635 	(test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
636 #define blk_queue_dax(q)	test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
637 #define blk_queue_scsi_passthrough(q)	\
638 	test_bit(QUEUE_FLAG_SCSI_PASSTHROUGH, &(q)->queue_flags)
639 #define blk_queue_pci_p2pdma(q)	\
640 	test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags)
641 #ifdef CONFIG_BLK_RQ_ALLOC_TIME
642 #define blk_queue_rq_alloc_time(q)	\
643 	test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags)
644 #else
645 #define blk_queue_rq_alloc_time(q)	false
646 #endif
647 
648 #define blk_noretry_request(rq) \
649 	((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
650 			     REQ_FAILFAST_DRIVER))
651 #define blk_queue_quiesced(q)	test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
652 #define blk_queue_pm_only(q)	atomic_read(&(q)->pm_only)
653 #define blk_queue_fua(q)	test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags)
654 #define blk_queue_registered(q)	test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags)
655 
656 extern void blk_set_pm_only(struct request_queue *q);
657 extern void blk_clear_pm_only(struct request_queue *q);
658 
659 static inline bool blk_account_rq(struct request *rq)
660 {
661 	return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq);
662 }
663 
664 #define list_entry_rq(ptr)	list_entry((ptr), struct request, queuelist)
665 
666 #define rq_data_dir(rq)		(op_is_write(req_op(rq)) ? WRITE : READ)
667 
668 #define rq_dma_dir(rq) \
669 	(op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
670 
671 #define dma_map_bvec(dev, bv, dir, attrs) \
672 	dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \
673 	(dir), (attrs))
674 
675 static inline bool queue_is_mq(struct request_queue *q)
676 {
677 	return q->mq_ops;
678 }
679 
680 static inline enum blk_zoned_model
681 blk_queue_zoned_model(struct request_queue *q)
682 {
683 	return q->limits.zoned;
684 }
685 
686 static inline bool blk_queue_is_zoned(struct request_queue *q)
687 {
688 	switch (blk_queue_zoned_model(q)) {
689 	case BLK_ZONED_HA:
690 	case BLK_ZONED_HM:
691 		return true;
692 	default:
693 		return false;
694 	}
695 }
696 
697 static inline sector_t blk_queue_zone_sectors(struct request_queue *q)
698 {
699 	return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
700 }
701 
702 #ifdef CONFIG_BLK_DEV_ZONED
703 static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
704 {
705 	return blk_queue_is_zoned(q) ? q->nr_zones : 0;
706 }
707 
708 static inline unsigned int blk_queue_zone_no(struct request_queue *q,
709 					     sector_t sector)
710 {
711 	if (!blk_queue_is_zoned(q))
712 		return 0;
713 	return sector >> ilog2(q->limits.chunk_sectors);
714 }
715 
716 static inline bool blk_queue_zone_is_seq(struct request_queue *q,
717 					 sector_t sector)
718 {
719 	if (!blk_queue_is_zoned(q))
720 		return false;
721 	if (!q->conv_zones_bitmap)
722 		return true;
723 	return !test_bit(blk_queue_zone_no(q, sector), q->conv_zones_bitmap);
724 }
725 #else /* CONFIG_BLK_DEV_ZONED */
726 static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
727 {
728 	return 0;
729 }
730 static inline bool blk_queue_zone_is_seq(struct request_queue *q,
731 					 sector_t sector)
732 {
733 	return false;
734 }
735 static inline unsigned int blk_queue_zone_no(struct request_queue *q,
736 					     sector_t sector)
737 {
738 	return 0;
739 }
740 #endif /* CONFIG_BLK_DEV_ZONED */
741 
742 static inline bool rq_is_sync(struct request *rq)
743 {
744 	return op_is_sync(rq->cmd_flags);
745 }
746 
747 static inline bool rq_mergeable(struct request *rq)
748 {
749 	if (blk_rq_is_passthrough(rq))
750 		return false;
751 
752 	if (req_op(rq) == REQ_OP_FLUSH)
753 		return false;
754 
755 	if (req_op(rq) == REQ_OP_WRITE_ZEROES)
756 		return false;
757 
758 	if (req_op(rq) == REQ_OP_ZONE_APPEND)
759 		return false;
760 
761 	if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
762 		return false;
763 	if (rq->rq_flags & RQF_NOMERGE_FLAGS)
764 		return false;
765 
766 	return true;
767 }
768 
769 static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
770 {
771 	if (bio_page(a) == bio_page(b) &&
772 	    bio_offset(a) == bio_offset(b))
773 		return true;
774 
775 	return false;
776 }
777 
778 static inline unsigned int blk_queue_depth(struct request_queue *q)
779 {
780 	if (q->queue_depth)
781 		return q->queue_depth;
782 
783 	return q->nr_requests;
784 }
785 
786 extern unsigned long blk_max_low_pfn, blk_max_pfn;
787 
788 /*
789  * standard bounce addresses:
790  *
791  * BLK_BOUNCE_HIGH	: bounce all highmem pages
792  * BLK_BOUNCE_ANY	: don't bounce anything
793  * BLK_BOUNCE_ISA	: bounce pages above ISA DMA boundary
794  */
795 
796 #if BITS_PER_LONG == 32
797 #define BLK_BOUNCE_HIGH		((u64)blk_max_low_pfn << PAGE_SHIFT)
798 #else
799 #define BLK_BOUNCE_HIGH		-1ULL
800 #endif
801 #define BLK_BOUNCE_ANY		(-1ULL)
802 #define BLK_BOUNCE_ISA		(DMA_BIT_MASK(24))
803 
804 /*
805  * default timeout for SG_IO if none specified
806  */
807 #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
808 #define BLK_MIN_SG_TIMEOUT	(7 * HZ)
809 
810 struct rq_map_data {
811 	struct page **pages;
812 	int page_order;
813 	int nr_entries;
814 	unsigned long offset;
815 	int null_mapped;
816 	int from_user;
817 };
818 
819 struct req_iterator {
820 	struct bvec_iter iter;
821 	struct bio *bio;
822 };
823 
824 /* This should not be used directly - use rq_for_each_segment */
825 #define for_each_bio(_bio)		\
826 	for (; _bio; _bio = _bio->bi_next)
827 #define __rq_for_each_bio(_bio, rq)	\
828 	if ((rq->bio))			\
829 		for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
830 
831 #define rq_for_each_segment(bvl, _rq, _iter)			\
832 	__rq_for_each_bio(_iter.bio, _rq)			\
833 		bio_for_each_segment(bvl, _iter.bio, _iter.iter)
834 
835 #define rq_for_each_bvec(bvl, _rq, _iter)			\
836 	__rq_for_each_bio(_iter.bio, _rq)			\
837 		bio_for_each_bvec(bvl, _iter.bio, _iter.iter)
838 
839 #define rq_iter_last(bvec, _iter)				\
840 		(_iter.bio->bi_next == NULL &&			\
841 		 bio_iter_last(bvec, _iter.iter))
842 
843 #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
844 # error	"You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
845 #endif
846 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
847 extern void rq_flush_dcache_pages(struct request *rq);
848 #else
849 static inline void rq_flush_dcache_pages(struct request *rq)
850 {
851 }
852 #endif
853 
854 extern int blk_register_queue(struct gendisk *disk);
855 extern void blk_unregister_queue(struct gendisk *disk);
856 blk_qc_t submit_bio_noacct(struct bio *bio);
857 extern void blk_rq_init(struct request_queue *q, struct request *rq);
858 extern void blk_put_request(struct request *);
859 extern struct request *blk_get_request(struct request_queue *, unsigned int op,
860 				       blk_mq_req_flags_t flags);
861 extern int blk_lld_busy(struct request_queue *q);
862 extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
863 			     struct bio_set *bs, gfp_t gfp_mask,
864 			     int (*bio_ctr)(struct bio *, struct bio *, void *),
865 			     void *data);
866 extern void blk_rq_unprep_clone(struct request *rq);
867 extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
868 				     struct request *rq);
869 extern int blk_rq_append_bio(struct request *rq, struct bio **bio);
870 extern void blk_queue_split(struct bio **);
871 extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
872 extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
873 			      unsigned int, void __user *);
874 extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
875 			  unsigned int, void __user *);
876 extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
877 			 struct scsi_ioctl_command __user *);
878 extern int get_sg_io_hdr(struct sg_io_hdr *hdr, const void __user *argp);
879 extern int put_sg_io_hdr(const struct sg_io_hdr *hdr, void __user *argp);
880 
881 extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
882 extern void blk_queue_exit(struct request_queue *q);
883 extern void blk_sync_queue(struct request_queue *q);
884 extern int blk_rq_map_user(struct request_queue *, struct request *,
885 			   struct rq_map_data *, void __user *, unsigned long,
886 			   gfp_t);
887 extern int blk_rq_unmap_user(struct bio *);
888 extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
889 extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
890 			       struct rq_map_data *, const struct iov_iter *,
891 			       gfp_t);
892 extern void blk_execute_rq(struct request_queue *, struct gendisk *,
893 			  struct request *, int);
894 extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
895 				  struct request *, int, rq_end_io_fn *);
896 
897 /* Helper to convert REQ_OP_XXX to its string format XXX */
898 extern const char *blk_op_str(unsigned int op);
899 
900 int blk_status_to_errno(blk_status_t status);
901 blk_status_t errno_to_blk_status(int errno);
902 
903 int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin);
904 
905 static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
906 {
907 	return bdev->bd_disk->queue;	/* this is never NULL */
908 }
909 
910 /*
911  * The basic unit of block I/O is a sector. It is used in a number of contexts
912  * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9
913  * bytes. Variables of type sector_t represent an offset or size that is a
914  * multiple of 512 bytes. Hence these two constants.
915  */
916 #ifndef SECTOR_SHIFT
917 #define SECTOR_SHIFT 9
918 #endif
919 #ifndef SECTOR_SIZE
920 #define SECTOR_SIZE (1 << SECTOR_SHIFT)
921 #endif
922 
923 /*
924  * blk_rq_pos()			: the current sector
925  * blk_rq_bytes()		: bytes left in the entire request
926  * blk_rq_cur_bytes()		: bytes left in the current segment
927  * blk_rq_err_bytes()		: bytes left till the next error boundary
928  * blk_rq_sectors()		: sectors left in the entire request
929  * blk_rq_cur_sectors()		: sectors left in the current segment
930  * blk_rq_stats_sectors()	: sectors of the entire request used for stats
931  */
932 static inline sector_t blk_rq_pos(const struct request *rq)
933 {
934 	return rq->__sector;
935 }
936 
937 static inline unsigned int blk_rq_bytes(const struct request *rq)
938 {
939 	return rq->__data_len;
940 }
941 
942 static inline int blk_rq_cur_bytes(const struct request *rq)
943 {
944 	return rq->bio ? bio_cur_bytes(rq->bio) : 0;
945 }
946 
947 extern unsigned int blk_rq_err_bytes(const struct request *rq);
948 
949 static inline unsigned int blk_rq_sectors(const struct request *rq)
950 {
951 	return blk_rq_bytes(rq) >> SECTOR_SHIFT;
952 }
953 
954 static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
955 {
956 	return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
957 }
958 
959 static inline unsigned int blk_rq_stats_sectors(const struct request *rq)
960 {
961 	return rq->stats_sectors;
962 }
963 
964 #ifdef CONFIG_BLK_DEV_ZONED
965 
966 /* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */
967 const char *blk_zone_cond_str(enum blk_zone_cond zone_cond);
968 
969 static inline unsigned int blk_rq_zone_no(struct request *rq)
970 {
971 	return blk_queue_zone_no(rq->q, blk_rq_pos(rq));
972 }
973 
974 static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
975 {
976 	return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq));
977 }
978 #endif /* CONFIG_BLK_DEV_ZONED */
979 
980 /*
981  * Some commands like WRITE SAME have a payload or data transfer size which
982  * is different from the size of the request.  Any driver that supports such
983  * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to
984  * calculate the data transfer size.
985  */
986 static inline unsigned int blk_rq_payload_bytes(struct request *rq)
987 {
988 	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
989 		return rq->special_vec.bv_len;
990 	return blk_rq_bytes(rq);
991 }
992 
993 /*
994  * Return the first full biovec in the request.  The caller needs to check that
995  * there are any bvecs before calling this helper.
996  */
997 static inline struct bio_vec req_bvec(struct request *rq)
998 {
999 	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1000 		return rq->special_vec;
1001 	return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter);
1002 }
1003 
1004 static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
1005 						     int op)
1006 {
1007 	if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
1008 		return min(q->limits.max_discard_sectors,
1009 			   UINT_MAX >> SECTOR_SHIFT);
1010 
1011 	if (unlikely(op == REQ_OP_WRITE_SAME))
1012 		return q->limits.max_write_same_sectors;
1013 
1014 	if (unlikely(op == REQ_OP_WRITE_ZEROES))
1015 		return q->limits.max_write_zeroes_sectors;
1016 
1017 	return q->limits.max_sectors;
1018 }
1019 
1020 /*
1021  * Return maximum size of a request at given offset. Only valid for
1022  * file system requests.
1023  */
1024 static inline unsigned int blk_max_size_offset(struct request_queue *q,
1025 					       sector_t offset)
1026 {
1027 	if (!q->limits.chunk_sectors)
1028 		return q->limits.max_sectors;
1029 
1030 	return min(q->limits.max_sectors, (unsigned int)(q->limits.chunk_sectors -
1031 			(offset & (q->limits.chunk_sectors - 1))));
1032 }
1033 
1034 static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
1035 						  sector_t offset)
1036 {
1037 	struct request_queue *q = rq->q;
1038 
1039 	if (blk_rq_is_passthrough(rq))
1040 		return q->limits.max_hw_sectors;
1041 
1042 	if (!q->limits.chunk_sectors ||
1043 	    req_op(rq) == REQ_OP_DISCARD ||
1044 	    req_op(rq) == REQ_OP_SECURE_ERASE)
1045 		return blk_queue_get_max_sectors(q, req_op(rq));
1046 
1047 	return min(blk_max_size_offset(q, offset),
1048 			blk_queue_get_max_sectors(q, req_op(rq)));
1049 }
1050 
1051 static inline unsigned int blk_rq_count_bios(struct request *rq)
1052 {
1053 	unsigned int nr_bios = 0;
1054 	struct bio *bio;
1055 
1056 	__rq_for_each_bio(bio, rq)
1057 		nr_bios++;
1058 
1059 	return nr_bios;
1060 }
1061 
1062 void blk_steal_bios(struct bio_list *list, struct request *rq);
1063 
1064 /*
1065  * Request completion related functions.
1066  *
1067  * blk_update_request() completes given number of bytes and updates
1068  * the request without completing it.
1069  */
1070 extern bool blk_update_request(struct request *rq, blk_status_t error,
1071 			       unsigned int nr_bytes);
1072 
1073 extern void blk_abort_request(struct request *);
1074 
1075 /*
1076  * Access functions for manipulating queue properties
1077  */
1078 extern void blk_cleanup_queue(struct request_queue *);
1079 extern void blk_queue_bounce_limit(struct request_queue *, u64);
1080 extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
1081 extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
1082 extern void blk_queue_max_segments(struct request_queue *, unsigned short);
1083 extern void blk_queue_max_discard_segments(struct request_queue *,
1084 		unsigned short);
1085 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
1086 extern void blk_queue_max_discard_sectors(struct request_queue *q,
1087 		unsigned int max_discard_sectors);
1088 extern void blk_queue_max_write_same_sectors(struct request_queue *q,
1089 		unsigned int max_write_same_sectors);
1090 extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
1091 		unsigned int max_write_same_sectors);
1092 extern void blk_queue_logical_block_size(struct request_queue *, unsigned int);
1093 extern void blk_queue_max_zone_append_sectors(struct request_queue *q,
1094 		unsigned int max_zone_append_sectors);
1095 extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
1096 extern void blk_queue_alignment_offset(struct request_queue *q,
1097 				       unsigned int alignment);
1098 extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
1099 extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
1100 extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
1101 extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
1102 extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
1103 extern void blk_set_default_limits(struct queue_limits *lim);
1104 extern void blk_set_stacking_limits(struct queue_limits *lim);
1105 extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
1106 			    sector_t offset);
1107 extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
1108 			    sector_t offset);
1109 extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
1110 			      sector_t offset);
1111 extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
1112 extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
1113 extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
1114 extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
1115 extern void blk_queue_dma_alignment(struct request_queue *, int);
1116 extern void blk_queue_update_dma_alignment(struct request_queue *, int);
1117 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
1118 extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
1119 extern void blk_queue_required_elevator_features(struct request_queue *q,
1120 						 unsigned int features);
1121 extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
1122 					      struct device *dev);
1123 
1124 /*
1125  * Number of physical segments as sent to the device.
1126  *
1127  * Normally this is the number of discontiguous data segments sent by the
1128  * submitter.  But for data-less command like discard we might have no
1129  * actual data segments submitted, but the driver might have to add it's
1130  * own special payload.  In that case we still return 1 here so that this
1131  * special payload will be mapped.
1132  */
1133 static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
1134 {
1135 	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1136 		return 1;
1137 	return rq->nr_phys_segments;
1138 }
1139 
1140 /*
1141  * Number of discard segments (or ranges) the driver needs to fill in.
1142  * Each discard bio merged into a request is counted as one segment.
1143  */
1144 static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
1145 {
1146 	return max_t(unsigned short, rq->nr_phys_segments, 1);
1147 }
1148 
1149 int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
1150 		struct scatterlist *sglist, struct scatterlist **last_sg);
1151 static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq,
1152 		struct scatterlist *sglist)
1153 {
1154 	struct scatterlist *last_sg = NULL;
1155 
1156 	return __blk_rq_map_sg(q, rq, sglist, &last_sg);
1157 }
1158 extern void blk_dump_rq_flags(struct request *, char *);
1159 
1160 bool __must_check blk_get_queue(struct request_queue *);
1161 struct request_queue *blk_alloc_queue(int node_id);
1162 extern void blk_put_queue(struct request_queue *);
1163 extern void blk_set_queue_dying(struct request_queue *);
1164 
1165 #ifdef CONFIG_BLOCK
1166 /*
1167  * blk_plug permits building a queue of related requests by holding the I/O
1168  * fragments for a short period. This allows merging of sequential requests
1169  * into single larger request. As the requests are moved from a per-task list to
1170  * the device's request_queue in a batch, this results in improved scalability
1171  * as the lock contention for request_queue lock is reduced.
1172  *
1173  * It is ok not to disable preemption when adding the request to the plug list
1174  * or when attempting a merge, because blk_schedule_flush_list() will only flush
1175  * the plug list when the task sleeps by itself. For details, please see
1176  * schedule() where blk_schedule_flush_plug() is called.
1177  */
1178 struct blk_plug {
1179 	struct list_head mq_list; /* blk-mq requests */
1180 	struct list_head cb_list; /* md requires an unplug callback */
1181 	unsigned short rq_count;
1182 	bool multiple_queues;
1183 	bool nowait;
1184 };
1185 #define BLK_MAX_REQUEST_COUNT 16
1186 #define BLK_PLUG_FLUSH_SIZE (128 * 1024)
1187 
1188 struct blk_plug_cb;
1189 typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
1190 struct blk_plug_cb {
1191 	struct list_head list;
1192 	blk_plug_cb_fn callback;
1193 	void *data;
1194 };
1195 extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
1196 					     void *data, int size);
1197 extern void blk_start_plug(struct blk_plug *);
1198 extern void blk_finish_plug(struct blk_plug *);
1199 extern void blk_flush_plug_list(struct blk_plug *, bool);
1200 
1201 static inline void blk_flush_plug(struct task_struct *tsk)
1202 {
1203 	struct blk_plug *plug = tsk->plug;
1204 
1205 	if (plug)
1206 		blk_flush_plug_list(plug, false);
1207 }
1208 
1209 static inline void blk_schedule_flush_plug(struct task_struct *tsk)
1210 {
1211 	struct blk_plug *plug = tsk->plug;
1212 
1213 	if (plug)
1214 		blk_flush_plug_list(plug, true);
1215 }
1216 
1217 static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1218 {
1219 	struct blk_plug *plug = tsk->plug;
1220 
1221 	return plug &&
1222 		 (!list_empty(&plug->mq_list) ||
1223 		 !list_empty(&plug->cb_list));
1224 }
1225 
1226 int blkdev_issue_flush(struct block_device *, gfp_t);
1227 long nr_blockdev_pages(void);
1228 #else /* CONFIG_BLOCK */
1229 struct blk_plug {
1230 };
1231 
1232 static inline void blk_start_plug(struct blk_plug *plug)
1233 {
1234 }
1235 
1236 static inline void blk_finish_plug(struct blk_plug *plug)
1237 {
1238 }
1239 
1240 static inline void blk_flush_plug(struct task_struct *task)
1241 {
1242 }
1243 
1244 static inline void blk_schedule_flush_plug(struct task_struct *task)
1245 {
1246 }
1247 
1248 
1249 static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1250 {
1251 	return false;
1252 }
1253 
1254 static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask)
1255 {
1256 	return 0;
1257 }
1258 
1259 static inline long nr_blockdev_pages(void)
1260 {
1261 	return 0;
1262 }
1263 #endif /* CONFIG_BLOCK */
1264 
1265 extern void blk_io_schedule(void);
1266 
1267 extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
1268 		sector_t nr_sects, gfp_t gfp_mask, struct page *page);
1269 
1270 #define BLKDEV_DISCARD_SECURE	(1 << 0)	/* issue a secure erase */
1271 
1272 extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1273 		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
1274 extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1275 		sector_t nr_sects, gfp_t gfp_mask, int flags,
1276 		struct bio **biop);
1277 
1278 #define BLKDEV_ZERO_NOUNMAP	(1 << 0)  /* do not free blocks */
1279 #define BLKDEV_ZERO_NOFALLBACK	(1 << 1)  /* don't write explicit zeroes */
1280 
1281 extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
1282 		sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
1283 		unsigned flags);
1284 extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
1285 		sector_t nr_sects, gfp_t gfp_mask, unsigned flags);
1286 
1287 static inline int sb_issue_discard(struct super_block *sb, sector_t block,
1288 		sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
1289 {
1290 	return blkdev_issue_discard(sb->s_bdev,
1291 				    block << (sb->s_blocksize_bits -
1292 					      SECTOR_SHIFT),
1293 				    nr_blocks << (sb->s_blocksize_bits -
1294 						  SECTOR_SHIFT),
1295 				    gfp_mask, flags);
1296 }
1297 static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
1298 		sector_t nr_blocks, gfp_t gfp_mask)
1299 {
1300 	return blkdev_issue_zeroout(sb->s_bdev,
1301 				    block << (sb->s_blocksize_bits -
1302 					      SECTOR_SHIFT),
1303 				    nr_blocks << (sb->s_blocksize_bits -
1304 						  SECTOR_SHIFT),
1305 				    gfp_mask, 0);
1306 }
1307 
1308 extern int blk_verify_command(unsigned char *cmd, fmode_t mode);
1309 
1310 enum blk_default_limits {
1311 	BLK_MAX_SEGMENTS	= 128,
1312 	BLK_SAFE_MAX_SECTORS	= 255,
1313 	BLK_DEF_MAX_SECTORS	= 2560,
1314 	BLK_MAX_SEGMENT_SIZE	= 65536,
1315 	BLK_SEG_BOUNDARY_MASK	= 0xFFFFFFFFUL,
1316 };
1317 
1318 static inline unsigned long queue_segment_boundary(const struct request_queue *q)
1319 {
1320 	return q->limits.seg_boundary_mask;
1321 }
1322 
1323 static inline unsigned long queue_virt_boundary(const struct request_queue *q)
1324 {
1325 	return q->limits.virt_boundary_mask;
1326 }
1327 
1328 static inline unsigned int queue_max_sectors(const struct request_queue *q)
1329 {
1330 	return q->limits.max_sectors;
1331 }
1332 
1333 static inline unsigned int queue_max_hw_sectors(const struct request_queue *q)
1334 {
1335 	return q->limits.max_hw_sectors;
1336 }
1337 
1338 static inline unsigned short queue_max_segments(const struct request_queue *q)
1339 {
1340 	return q->limits.max_segments;
1341 }
1342 
1343 static inline unsigned short queue_max_discard_segments(const struct request_queue *q)
1344 {
1345 	return q->limits.max_discard_segments;
1346 }
1347 
1348 static inline unsigned int queue_max_segment_size(const struct request_queue *q)
1349 {
1350 	return q->limits.max_segment_size;
1351 }
1352 
1353 static inline unsigned int queue_max_zone_append_sectors(const struct request_queue *q)
1354 {
1355 	return q->limits.max_zone_append_sectors;
1356 }
1357 
1358 static inline unsigned queue_logical_block_size(const struct request_queue *q)
1359 {
1360 	int retval = 512;
1361 
1362 	if (q && q->limits.logical_block_size)
1363 		retval = q->limits.logical_block_size;
1364 
1365 	return retval;
1366 }
1367 
1368 static inline unsigned int bdev_logical_block_size(struct block_device *bdev)
1369 {
1370 	return queue_logical_block_size(bdev_get_queue(bdev));
1371 }
1372 
1373 static inline unsigned int queue_physical_block_size(const struct request_queue *q)
1374 {
1375 	return q->limits.physical_block_size;
1376 }
1377 
1378 static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
1379 {
1380 	return queue_physical_block_size(bdev_get_queue(bdev));
1381 }
1382 
1383 static inline unsigned int queue_io_min(const struct request_queue *q)
1384 {
1385 	return q->limits.io_min;
1386 }
1387 
1388 static inline int bdev_io_min(struct block_device *bdev)
1389 {
1390 	return queue_io_min(bdev_get_queue(bdev));
1391 }
1392 
1393 static inline unsigned int queue_io_opt(const struct request_queue *q)
1394 {
1395 	return q->limits.io_opt;
1396 }
1397 
1398 static inline int bdev_io_opt(struct block_device *bdev)
1399 {
1400 	return queue_io_opt(bdev_get_queue(bdev));
1401 }
1402 
1403 static inline int queue_alignment_offset(const struct request_queue *q)
1404 {
1405 	if (q->limits.misaligned)
1406 		return -1;
1407 
1408 	return q->limits.alignment_offset;
1409 }
1410 
1411 static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
1412 {
1413 	unsigned int granularity = max(lim->physical_block_size, lim->io_min);
1414 	unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
1415 		<< SECTOR_SHIFT;
1416 
1417 	return (granularity + lim->alignment_offset - alignment) % granularity;
1418 }
1419 
1420 static inline int bdev_alignment_offset(struct block_device *bdev)
1421 {
1422 	struct request_queue *q = bdev_get_queue(bdev);
1423 
1424 	if (q->limits.misaligned)
1425 		return -1;
1426 
1427 	if (bdev != bdev->bd_contains)
1428 		return bdev->bd_part->alignment_offset;
1429 
1430 	return q->limits.alignment_offset;
1431 }
1432 
1433 static inline int queue_discard_alignment(const struct request_queue *q)
1434 {
1435 	if (q->limits.discard_misaligned)
1436 		return -1;
1437 
1438 	return q->limits.discard_alignment;
1439 }
1440 
1441 static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
1442 {
1443 	unsigned int alignment, granularity, offset;
1444 
1445 	if (!lim->max_discard_sectors)
1446 		return 0;
1447 
1448 	/* Why are these in bytes, not sectors? */
1449 	alignment = lim->discard_alignment >> SECTOR_SHIFT;
1450 	granularity = lim->discard_granularity >> SECTOR_SHIFT;
1451 	if (!granularity)
1452 		return 0;
1453 
1454 	/* Offset of the partition start in 'granularity' sectors */
1455 	offset = sector_div(sector, granularity);
1456 
1457 	/* And why do we do this modulus *again* in blkdev_issue_discard()? */
1458 	offset = (granularity + alignment - offset) % granularity;
1459 
1460 	/* Turn it back into bytes, gaah */
1461 	return offset << SECTOR_SHIFT;
1462 }
1463 
1464 static inline int bdev_discard_alignment(struct block_device *bdev)
1465 {
1466 	struct request_queue *q = bdev_get_queue(bdev);
1467 
1468 	if (bdev != bdev->bd_contains)
1469 		return bdev->bd_part->discard_alignment;
1470 
1471 	return q->limits.discard_alignment;
1472 }
1473 
1474 static inline unsigned int bdev_write_same(struct block_device *bdev)
1475 {
1476 	struct request_queue *q = bdev_get_queue(bdev);
1477 
1478 	if (q)
1479 		return q->limits.max_write_same_sectors;
1480 
1481 	return 0;
1482 }
1483 
1484 static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
1485 {
1486 	struct request_queue *q = bdev_get_queue(bdev);
1487 
1488 	if (q)
1489 		return q->limits.max_write_zeroes_sectors;
1490 
1491 	return 0;
1492 }
1493 
1494 static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev)
1495 {
1496 	struct request_queue *q = bdev_get_queue(bdev);
1497 
1498 	if (q)
1499 		return blk_queue_zoned_model(q);
1500 
1501 	return BLK_ZONED_NONE;
1502 }
1503 
1504 static inline bool bdev_is_zoned(struct block_device *bdev)
1505 {
1506 	struct request_queue *q = bdev_get_queue(bdev);
1507 
1508 	if (q)
1509 		return blk_queue_is_zoned(q);
1510 
1511 	return false;
1512 }
1513 
1514 static inline sector_t bdev_zone_sectors(struct block_device *bdev)
1515 {
1516 	struct request_queue *q = bdev_get_queue(bdev);
1517 
1518 	if (q)
1519 		return blk_queue_zone_sectors(q);
1520 	return 0;
1521 }
1522 
1523 static inline int queue_dma_alignment(const struct request_queue *q)
1524 {
1525 	return q ? q->dma_alignment : 511;
1526 }
1527 
1528 static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
1529 				 unsigned int len)
1530 {
1531 	unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
1532 	return !(addr & alignment) && !(len & alignment);
1533 }
1534 
1535 /* assumes size > 256 */
1536 static inline unsigned int blksize_bits(unsigned int size)
1537 {
1538 	unsigned int bits = 8;
1539 	do {
1540 		bits++;
1541 		size >>= 1;
1542 	} while (size > 256);
1543 	return bits;
1544 }
1545 
1546 static inline unsigned int block_size(struct block_device *bdev)
1547 {
1548 	return 1 << bdev->bd_inode->i_blkbits;
1549 }
1550 
1551 int kblockd_schedule_work(struct work_struct *work);
1552 int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
1553 
1554 #define MODULE_ALIAS_BLOCKDEV(major,minor) \
1555 	MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
1556 #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
1557 	MODULE_ALIAS("block-major-" __stringify(major) "-*")
1558 
1559 #if defined(CONFIG_BLK_DEV_INTEGRITY)
1560 
1561 enum blk_integrity_flags {
1562 	BLK_INTEGRITY_VERIFY		= 1 << 0,
1563 	BLK_INTEGRITY_GENERATE		= 1 << 1,
1564 	BLK_INTEGRITY_DEVICE_CAPABLE	= 1 << 2,
1565 	BLK_INTEGRITY_IP_CHECKSUM	= 1 << 3,
1566 };
1567 
1568 struct blk_integrity_iter {
1569 	void			*prot_buf;
1570 	void			*data_buf;
1571 	sector_t		seed;
1572 	unsigned int		data_size;
1573 	unsigned short		interval;
1574 	const char		*disk_name;
1575 };
1576 
1577 typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *);
1578 typedef void (integrity_prepare_fn) (struct request *);
1579 typedef void (integrity_complete_fn) (struct request *, unsigned int);
1580 
1581 struct blk_integrity_profile {
1582 	integrity_processing_fn		*generate_fn;
1583 	integrity_processing_fn		*verify_fn;
1584 	integrity_prepare_fn		*prepare_fn;
1585 	integrity_complete_fn		*complete_fn;
1586 	const char			*name;
1587 };
1588 
1589 extern void blk_integrity_register(struct gendisk *, struct blk_integrity *);
1590 extern void blk_integrity_unregister(struct gendisk *);
1591 extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
1592 extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
1593 				   struct scatterlist *);
1594 extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
1595 extern bool blk_integrity_merge_rq(struct request_queue *, struct request *,
1596 				   struct request *);
1597 extern bool blk_integrity_merge_bio(struct request_queue *, struct request *,
1598 				    struct bio *);
1599 
1600 static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1601 {
1602 	struct blk_integrity *bi = &disk->queue->integrity;
1603 
1604 	if (!bi->profile)
1605 		return NULL;
1606 
1607 	return bi;
1608 }
1609 
1610 static inline
1611 struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
1612 {
1613 	return blk_get_integrity(bdev->bd_disk);
1614 }
1615 
1616 static inline bool
1617 blk_integrity_queue_supports_integrity(struct request_queue *q)
1618 {
1619 	return q->integrity.profile;
1620 }
1621 
1622 static inline bool blk_integrity_rq(struct request *rq)
1623 {
1624 	return rq->cmd_flags & REQ_INTEGRITY;
1625 }
1626 
1627 static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1628 						    unsigned int segs)
1629 {
1630 	q->limits.max_integrity_segments = segs;
1631 }
1632 
1633 static inline unsigned short
1634 queue_max_integrity_segments(const struct request_queue *q)
1635 {
1636 	return q->limits.max_integrity_segments;
1637 }
1638 
1639 /**
1640  * bio_integrity_intervals - Return number of integrity intervals for a bio
1641  * @bi:		blk_integrity profile for device
1642  * @sectors:	Size of the bio in 512-byte sectors
1643  *
1644  * Description: The block layer calculates everything in 512 byte
1645  * sectors but integrity metadata is done in terms of the data integrity
1646  * interval size of the storage device.  Convert the block layer sectors
1647  * to the appropriate number of integrity intervals.
1648  */
1649 static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
1650 						   unsigned int sectors)
1651 {
1652 	return sectors >> (bi->interval_exp - 9);
1653 }
1654 
1655 static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
1656 					       unsigned int sectors)
1657 {
1658 	return bio_integrity_intervals(bi, sectors) * bi->tuple_size;
1659 }
1660 
1661 /*
1662  * Return the first bvec that contains integrity data.  Only drivers that are
1663  * limited to a single integrity segment should use this helper.
1664  */
1665 static inline struct bio_vec *rq_integrity_vec(struct request *rq)
1666 {
1667 	if (WARN_ON_ONCE(queue_max_integrity_segments(rq->q) > 1))
1668 		return NULL;
1669 	return rq->bio->bi_integrity->bip_vec;
1670 }
1671 
1672 #else /* CONFIG_BLK_DEV_INTEGRITY */
1673 
1674 struct bio;
1675 struct block_device;
1676 struct gendisk;
1677 struct blk_integrity;
1678 
1679 static inline int blk_integrity_rq(struct request *rq)
1680 {
1681 	return 0;
1682 }
1683 static inline int blk_rq_count_integrity_sg(struct request_queue *q,
1684 					    struct bio *b)
1685 {
1686 	return 0;
1687 }
1688 static inline int blk_rq_map_integrity_sg(struct request_queue *q,
1689 					  struct bio *b,
1690 					  struct scatterlist *s)
1691 {
1692 	return 0;
1693 }
1694 static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
1695 {
1696 	return NULL;
1697 }
1698 static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1699 {
1700 	return NULL;
1701 }
1702 static inline bool
1703 blk_integrity_queue_supports_integrity(struct request_queue *q)
1704 {
1705 	return false;
1706 }
1707 static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
1708 {
1709 	return 0;
1710 }
1711 static inline void blk_integrity_register(struct gendisk *d,
1712 					 struct blk_integrity *b)
1713 {
1714 }
1715 static inline void blk_integrity_unregister(struct gendisk *d)
1716 {
1717 }
1718 static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1719 						    unsigned int segs)
1720 {
1721 }
1722 static inline unsigned short queue_max_integrity_segments(const struct request_queue *q)
1723 {
1724 	return 0;
1725 }
1726 static inline bool blk_integrity_merge_rq(struct request_queue *rq,
1727 					  struct request *r1,
1728 					  struct request *r2)
1729 {
1730 	return true;
1731 }
1732 static inline bool blk_integrity_merge_bio(struct request_queue *rq,
1733 					   struct request *r,
1734 					   struct bio *b)
1735 {
1736 	return true;
1737 }
1738 
1739 static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
1740 						   unsigned int sectors)
1741 {
1742 	return 0;
1743 }
1744 
1745 static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
1746 					       unsigned int sectors)
1747 {
1748 	return 0;
1749 }
1750 
1751 static inline struct bio_vec *rq_integrity_vec(struct request *rq)
1752 {
1753 	return NULL;
1754 }
1755 
1756 #endif /* CONFIG_BLK_DEV_INTEGRITY */
1757 
1758 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
1759 
1760 bool blk_ksm_register(struct blk_keyslot_manager *ksm, struct request_queue *q);
1761 
1762 void blk_ksm_unregister(struct request_queue *q);
1763 
1764 #else /* CONFIG_BLK_INLINE_ENCRYPTION */
1765 
1766 static inline bool blk_ksm_register(struct blk_keyslot_manager *ksm,
1767 				    struct request_queue *q)
1768 {
1769 	return true;
1770 }
1771 
1772 static inline void blk_ksm_unregister(struct request_queue *q) { }
1773 
1774 #endif /* CONFIG_BLK_INLINE_ENCRYPTION */
1775 
1776 
1777 struct block_device_operations {
1778 	blk_qc_t (*submit_bio) (struct bio *bio);
1779 	int (*open) (struct block_device *, fmode_t);
1780 	void (*release) (struct gendisk *, fmode_t);
1781 	int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int);
1782 	int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1783 	int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1784 	unsigned int (*check_events) (struct gendisk *disk,
1785 				      unsigned int clearing);
1786 	void (*unlock_native_capacity) (struct gendisk *);
1787 	int (*revalidate_disk) (struct gendisk *);
1788 	int (*getgeo)(struct block_device *, struct hd_geometry *);
1789 	/* this callback is with swap_lock and sometimes page table lock held */
1790 	void (*swap_slot_free_notify) (struct block_device *, unsigned long);
1791 	int (*report_zones)(struct gendisk *, sector_t sector,
1792 			unsigned int nr_zones, report_zones_cb cb, void *data);
1793 	char *(*devnode)(struct gendisk *disk, umode_t *mode);
1794 	struct module *owner;
1795 	const struct pr_ops *pr_ops;
1796 };
1797 
1798 #ifdef CONFIG_COMPAT
1799 extern int blkdev_compat_ptr_ioctl(struct block_device *, fmode_t,
1800 				      unsigned int, unsigned long);
1801 #else
1802 #define blkdev_compat_ptr_ioctl NULL
1803 #endif
1804 
1805 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
1806 				 unsigned long);
1807 extern int bdev_read_page(struct block_device *, sector_t, struct page *);
1808 extern int bdev_write_page(struct block_device *, sector_t, struct page *,
1809 						struct writeback_control *);
1810 
1811 #ifdef CONFIG_BLK_DEV_ZONED
1812 bool blk_req_needs_zone_write_lock(struct request *rq);
1813 bool blk_req_zone_write_trylock(struct request *rq);
1814 void __blk_req_zone_write_lock(struct request *rq);
1815 void __blk_req_zone_write_unlock(struct request *rq);
1816 
1817 static inline void blk_req_zone_write_lock(struct request *rq)
1818 {
1819 	if (blk_req_needs_zone_write_lock(rq))
1820 		__blk_req_zone_write_lock(rq);
1821 }
1822 
1823 static inline void blk_req_zone_write_unlock(struct request *rq)
1824 {
1825 	if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED)
1826 		__blk_req_zone_write_unlock(rq);
1827 }
1828 
1829 static inline bool blk_req_zone_is_write_locked(struct request *rq)
1830 {
1831 	return rq->q->seq_zones_wlock &&
1832 		test_bit(blk_rq_zone_no(rq), rq->q->seq_zones_wlock);
1833 }
1834 
1835 static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
1836 {
1837 	if (!blk_req_needs_zone_write_lock(rq))
1838 		return true;
1839 	return !blk_req_zone_is_write_locked(rq);
1840 }
1841 #else
1842 static inline bool blk_req_needs_zone_write_lock(struct request *rq)
1843 {
1844 	return false;
1845 }
1846 
1847 static inline void blk_req_zone_write_lock(struct request *rq)
1848 {
1849 }
1850 
1851 static inline void blk_req_zone_write_unlock(struct request *rq)
1852 {
1853 }
1854 static inline bool blk_req_zone_is_write_locked(struct request *rq)
1855 {
1856 	return false;
1857 }
1858 
1859 static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
1860 {
1861 	return true;
1862 }
1863 #endif /* CONFIG_BLK_DEV_ZONED */
1864 
1865 static inline void blk_wake_io_task(struct task_struct *waiter)
1866 {
1867 	/*
1868 	 * If we're polling, the task itself is doing the completions. For
1869 	 * that case, we don't need to signal a wakeup, it's enough to just
1870 	 * mark us as RUNNING.
1871 	 */
1872 	if (waiter == current)
1873 		__set_current_state(TASK_RUNNING);
1874 	else
1875 		wake_up_process(waiter);
1876 }
1877 
1878 unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
1879 		unsigned int op);
1880 void disk_end_io_acct(struct gendisk *disk, unsigned int op,
1881 		unsigned long start_time);
1882 
1883 /**
1884  * bio_start_io_acct - start I/O accounting for bio based drivers
1885  * @bio:	bio to start account for
1886  *
1887  * Returns the start time that should be passed back to bio_end_io_acct().
1888  */
1889 static inline unsigned long bio_start_io_acct(struct bio *bio)
1890 {
1891 	return disk_start_io_acct(bio->bi_disk, bio_sectors(bio), bio_op(bio));
1892 }
1893 
1894 /**
1895  * bio_end_io_acct - end I/O accounting for bio based drivers
1896  * @bio:	bio to end account for
1897  * @start:	start time returned by bio_start_io_acct()
1898  */
1899 static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time)
1900 {
1901 	return disk_end_io_acct(bio->bi_disk, bio_op(bio), start_time);
1902 }
1903 
1904 int bdev_read_only(struct block_device *bdev);
1905 int set_blocksize(struct block_device *bdev, int size);
1906 
1907 const char *bdevname(struct block_device *bdev, char *buffer);
1908 struct block_device *lookup_bdev(const char *);
1909 
1910 void blkdev_show(struct seq_file *seqf, off_t offset);
1911 
1912 #define BDEVNAME_SIZE	32	/* Largest string for a blockdev identifier */
1913 #define BDEVT_SIZE	10	/* Largest string for MAJ:MIN for blkdev */
1914 #ifdef CONFIG_BLOCK
1915 #define BLKDEV_MAJOR_MAX	512
1916 #else
1917 #define BLKDEV_MAJOR_MAX	0
1918 #endif
1919 
1920 int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder);
1921 struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
1922 		void *holder);
1923 struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder);
1924 int bd_prepare_to_claim(struct block_device *bdev, struct block_device *whole,
1925 		void *holder);
1926 void bd_abort_claiming(struct block_device *bdev, struct block_device *whole,
1927 		void *holder);
1928 void blkdev_put(struct block_device *bdev, fmode_t mode);
1929 
1930 struct block_device *I_BDEV(struct inode *inode);
1931 struct block_device *bdget(dev_t);
1932 struct block_device *bdgrab(struct block_device *bdev);
1933 void bdput(struct block_device *);
1934 
1935 #ifdef CONFIG_BLOCK
1936 void invalidate_bdev(struct block_device *bdev);
1937 int sync_blockdev(struct block_device *bdev);
1938 #else
1939 static inline void invalidate_bdev(struct block_device *bdev)
1940 {
1941 }
1942 static inline int sync_blockdev(struct block_device *bdev)
1943 {
1944 	return 0;
1945 }
1946 #endif
1947 int fsync_bdev(struct block_device *bdev);
1948 
1949 struct super_block *freeze_bdev(struct block_device *bdev);
1950 int thaw_bdev(struct block_device *bdev, struct super_block *sb);
1951 
1952 #endif /* _LINUX_BLKDEV_H */
1953