xref: /linux-6.15/include/linux/blkdev.h (revision ef99b2d3)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21da177e4SLinus Torvalds #ifndef _LINUX_BLKDEV_H
31da177e4SLinus Torvalds #define _LINUX_BLKDEV_H
41da177e4SLinus Torvalds 
585fd0bc9SRussell King #include <linux/sched.h>
61da177e4SLinus Torvalds #include <linux/genhd.h>
71da177e4SLinus Torvalds #include <linux/list.h>
8320ae51fSJens Axboe #include <linux/llist.h>
9b296a6d5SAndy Shevchenko #include <linux/minmax.h>
101da177e4SLinus Torvalds #include <linux/timer.h>
111da177e4SLinus Torvalds #include <linux/workqueue.h>
121da177e4SLinus Torvalds #include <linux/wait.h>
131da177e4SLinus Torvalds #include <linux/bio.h>
143e6053d7SHugh Dickins #include <linux/gfp.h>
15548bc8e1STejun Heo #include <linux/rcupdate.h>
16add703fdSTejun Heo #include <linux/percpu-refcount.h>
176a0cb1bcSHannes Reinecke #include <linux/blkzoned.h>
18d97e594cSJohn Garry #include <linux/sbitmap.h>
191da177e4SLinus Torvalds 
20de477254SPaul Gortmaker struct module;
211da177e4SLinus Torvalds struct request_queue;
221da177e4SLinus Torvalds struct elevator_queue;
232056a782SJens Axboe struct blk_trace;
243d6392cfSJens Axboe struct request;
253d6392cfSJens Axboe struct sg_io_hdr;
263c798398STejun Heo struct blkcg_gq;
277c94e1c1SMing Lei struct blk_flush_queue;
28bbd3e064SChristoph Hellwig struct pr_ops;
29a7905043SJosef Bacik struct rq_qos;
3034dbad5dSOmar Sandoval struct blk_queue_stats;
3134dbad5dSOmar Sandoval struct blk_stat_callback;
321b262839SSatya Tangirala struct blk_keyslot_manager;
331da177e4SLinus Torvalds 
34096392e0SMinwoo Im /* Must be consistent with blk_mq_poll_stats_bkt() */
350206319fSStephen Bates #define BLK_MQ_POLL_STATS_BKTS 16
360206319fSStephen Bates 
3729ece8b4SYufen Yu /* Doing classic polling */
3829ece8b4SYufen Yu #define BLK_MQ_POLL_CLASSIC -1
3929ece8b4SYufen Yu 
408bd435b3STejun Heo /*
418bd435b3STejun Heo  * Maximum number of blkcg policies allowed to be registered concurrently.
428bd435b3STejun Heo  * Defined here to simplify include dependency.
438bd435b3STejun Heo  */
44ec645dc9SOleksandr Natalenko #define BLKCG_MAX_POLS		6
458bd435b3STejun Heo 
46da6269daSChristoph Hellwig static inline bool blk_op_is_passthrough(unsigned int op)
4714cb0dc6SMing Lei {
48da6269daSChristoph Hellwig 	op &= REQ_OP_MASK;
4914cb0dc6SMing Lei 	return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT;
5014cb0dc6SMing Lei }
5114cb0dc6SMing Lei 
52797476b8SDamien Le Moal /*
53797476b8SDamien Le Moal  * Zoned block device models (zoned limit).
543093a479SChristoph Hellwig  *
553093a479SChristoph Hellwig  * Note: This needs to be ordered from the least to the most severe
563093a479SChristoph Hellwig  * restrictions for the inheritance in blk_stack_limits() to work.
57797476b8SDamien Le Moal  */
58797476b8SDamien Le Moal enum blk_zoned_model {
593093a479SChristoph Hellwig 	BLK_ZONED_NONE = 0,	/* Regular block device */
60797476b8SDamien Le Moal 	BLK_ZONED_HA,		/* Host-aware zoned block device */
61797476b8SDamien Le Moal 	BLK_ZONED_HM,		/* Host-managed zoned block device */
62797476b8SDamien Le Moal };
63797476b8SDamien Le Moal 
649bb33f24SChristoph Hellwig /*
659bb33f24SChristoph Hellwig  * BLK_BOUNCE_NONE:	never bounce (default)
669bb33f24SChristoph Hellwig  * BLK_BOUNCE_HIGH:	bounce all highmem pages
679bb33f24SChristoph Hellwig  */
689bb33f24SChristoph Hellwig enum blk_bounce {
699bb33f24SChristoph Hellwig 	BLK_BOUNCE_NONE,
709bb33f24SChristoph Hellwig 	BLK_BOUNCE_HIGH,
719bb33f24SChristoph Hellwig };
729bb33f24SChristoph Hellwig 
73025146e1SMartin K. Petersen struct queue_limits {
749bb33f24SChristoph Hellwig 	enum blk_bounce		bounce;
75025146e1SMartin K. Petersen 	unsigned long		seg_boundary_mask;
7603100aadSKeith Busch 	unsigned long		virt_boundary_mask;
77025146e1SMartin K. Petersen 
78025146e1SMartin K. Petersen 	unsigned int		max_hw_sectors;
79ca369d51SMartin K. Petersen 	unsigned int		max_dev_sectors;
80762380adSJens Axboe 	unsigned int		chunk_sectors;
81025146e1SMartin K. Petersen 	unsigned int		max_sectors;
82025146e1SMartin K. Petersen 	unsigned int		max_segment_size;
83c72758f3SMartin K. Petersen 	unsigned int		physical_block_size;
84ad6bf88aSMikulas Patocka 	unsigned int		logical_block_size;
85c72758f3SMartin K. Petersen 	unsigned int		alignment_offset;
86c72758f3SMartin K. Petersen 	unsigned int		io_min;
87c72758f3SMartin K. Petersen 	unsigned int		io_opt;
8867efc925SChristoph Hellwig 	unsigned int		max_discard_sectors;
890034af03SJens Axboe 	unsigned int		max_hw_discard_sectors;
904363ac7cSMartin K. Petersen 	unsigned int		max_write_same_sectors;
91a6f0788eSChaitanya Kulkarni 	unsigned int		max_write_zeroes_sectors;
920512a75bSKeith Busch 	unsigned int		max_zone_append_sectors;
9386b37281SMartin K. Petersen 	unsigned int		discard_granularity;
9486b37281SMartin K. Petersen 	unsigned int		discard_alignment;
95a805a4faSDamien Le Moal 	unsigned int		zone_write_granularity;
96025146e1SMartin K. Petersen 
978a78362cSMartin K. Petersen 	unsigned short		max_segments;
9813f05c8dSMartin K. Petersen 	unsigned short		max_integrity_segments;
991e739730SChristoph Hellwig 	unsigned short		max_discard_segments;
100025146e1SMartin K. Petersen 
101c72758f3SMartin K. Petersen 	unsigned char		misaligned;
10286b37281SMartin K. Petersen 	unsigned char		discard_misaligned;
103c78afc62SKent Overstreet 	unsigned char		raid_partial_stripes_expensive;
104797476b8SDamien Le Moal 	enum blk_zoned_model	zoned;
105025146e1SMartin K. Petersen };
106025146e1SMartin K. Petersen 
107d4100351SChristoph Hellwig typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
108d4100351SChristoph Hellwig 			       void *data);
109d4100351SChristoph Hellwig 
11027ba3e8fSDamien Le Moal void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model);
11127ba3e8fSDamien Le Moal 
1126a0cb1bcSHannes Reinecke #ifdef CONFIG_BLK_DEV_ZONED
1136a0cb1bcSHannes Reinecke 
114d4100351SChristoph Hellwig #define BLK_ALL_ZONES  ((unsigned int)-1)
115d4100351SChristoph Hellwig int blkdev_report_zones(struct block_device *bdev, sector_t sector,
116d4100351SChristoph Hellwig 			unsigned int nr_zones, report_zones_cb cb, void *data);
1179b38bb4bSChristoph Hellwig unsigned int blkdev_nr_zones(struct gendisk *disk);
1186c1b1da5SAjay Joshi extern int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
1196c1b1da5SAjay Joshi 			    sector_t sectors, sector_t nr_sectors,
1206c1b1da5SAjay Joshi 			    gfp_t gfp_mask);
121e732671aSDamien Le Moal int blk_revalidate_disk_zones(struct gendisk *disk,
122e732671aSDamien Le Moal 			      void (*update_driver_data)(struct gendisk *disk));
1236a0cb1bcSHannes Reinecke 
1243ed05a98SShaun Tancheff extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
1253ed05a98SShaun Tancheff 				     unsigned int cmd, unsigned long arg);
126e876df1fSAjay Joshi extern int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
1273ed05a98SShaun Tancheff 				  unsigned int cmd, unsigned long arg);
1283ed05a98SShaun Tancheff 
1293ed05a98SShaun Tancheff #else /* CONFIG_BLK_DEV_ZONED */
1303ed05a98SShaun Tancheff 
1319b38bb4bSChristoph Hellwig static inline unsigned int blkdev_nr_zones(struct gendisk *disk)
132a91e1380SDamien Le Moal {
133a91e1380SDamien Le Moal 	return 0;
134a91e1380SDamien Le Moal }
135bf505456SDamien Le Moal 
1363ed05a98SShaun Tancheff static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
1373ed05a98SShaun Tancheff 					    fmode_t mode, unsigned int cmd,
1383ed05a98SShaun Tancheff 					    unsigned long arg)
1393ed05a98SShaun Tancheff {
1403ed05a98SShaun Tancheff 	return -ENOTTY;
1413ed05a98SShaun Tancheff }
1423ed05a98SShaun Tancheff 
143e876df1fSAjay Joshi static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev,
1443ed05a98SShaun Tancheff 					 fmode_t mode, unsigned int cmd,
1453ed05a98SShaun Tancheff 					 unsigned long arg)
1463ed05a98SShaun Tancheff {
1473ed05a98SShaun Tancheff 	return -ENOTTY;
1483ed05a98SShaun Tancheff }
1493ed05a98SShaun Tancheff 
1506a0cb1bcSHannes Reinecke #endif /* CONFIG_BLK_DEV_ZONED */
1516a0cb1bcSHannes Reinecke 
152d7b76301SRichard Kennedy struct request_queue {
1531da177e4SLinus Torvalds 	struct request		*last_merge;
154b374d18aSJens Axboe 	struct elevator_queue	*elevator;
1551da177e4SLinus Torvalds 
1560549e87cSMing Lei 	struct percpu_ref	q_usage_counter;
1570549e87cSMing Lei 
15834dbad5dSOmar Sandoval 	struct blk_queue_stats	*stats;
159a7905043SJosef Bacik 	struct rq_qos		*rq_qos;
16087760e5eSJens Axboe 
161f8a5b122SJens Axboe 	const struct blk_mq_ops	*mq_ops;
162320ae51fSJens Axboe 
163320ae51fSJens Axboe 	/* sw queues */
164e6cdb092SMing Lei 	struct blk_mq_ctx __percpu	*queue_ctx;
165320ae51fSJens Axboe 
166d278d4a8SJens Axboe 	unsigned int		queue_depth;
167d278d4a8SJens Axboe 
168320ae51fSJens Axboe 	/* hw dispatch queues */
169320ae51fSJens Axboe 	struct blk_mq_hw_ctx	**queue_hw_ctx;
170320ae51fSJens Axboe 	unsigned int		nr_hw_queues;
171320ae51fSJens Axboe 
1721da177e4SLinus Torvalds 	/*
1731da177e4SLinus Torvalds 	 * The queue owner gets to use this for whatever they like.
1741da177e4SLinus Torvalds 	 * ll_rw_blk doesn't touch it.
1751da177e4SLinus Torvalds 	 */
1761da177e4SLinus Torvalds 	void			*queuedata;
1771da177e4SLinus Torvalds 
1781da177e4SLinus Torvalds 	/*
1791da177e4SLinus Torvalds 	 * various queue flags, see QUEUE_* below
1801da177e4SLinus Torvalds 	 */
1811da177e4SLinus Torvalds 	unsigned long		queue_flags;
182cd84a62eSBart Van Assche 	/*
183cd84a62eSBart Van Assche 	 * Number of contexts that have called blk_set_pm_only(). If this
184a4d34da7SBart Van Assche 	 * counter is above zero then only RQF_PM requests are processed.
185cd84a62eSBart Van Assche 	 */
186cd84a62eSBart Van Assche 	atomic_t		pm_only;
1871da177e4SLinus Torvalds 
1881da177e4SLinus Torvalds 	/*
189a73f730dSTejun Heo 	 * ida allocated id for this queue.  Used to index queues from
190a73f730dSTejun Heo 	 * ioctx.
191a73f730dSTejun Heo 	 */
192a73f730dSTejun Heo 	int			id;
193a73f730dSTejun Heo 
1940d945c1fSChristoph Hellwig 	spinlock_t		queue_lock;
1951da177e4SLinus Torvalds 
196d152c682SChristoph Hellwig 	struct gendisk		*disk;
197d152c682SChristoph Hellwig 
1981da177e4SLinus Torvalds 	/*
1991da177e4SLinus Torvalds 	 * queue kobject
2001da177e4SLinus Torvalds 	 */
2011da177e4SLinus Torvalds 	struct kobject kobj;
2021da177e4SLinus Torvalds 
203320ae51fSJens Axboe 	/*
204320ae51fSJens Axboe 	 * mq queue kobject
205320ae51fSJens Axboe 	 */
2061db4909eSMing Lei 	struct kobject *mq_kobj;
207320ae51fSJens Axboe 
208ac6fc48cSDan Williams #ifdef  CONFIG_BLK_DEV_INTEGRITY
209ac6fc48cSDan Williams 	struct blk_integrity integrity;
210ac6fc48cSDan Williams #endif	/* CONFIG_BLK_DEV_INTEGRITY */
211ac6fc48cSDan Williams 
21247fafbc7SRafael J. Wysocki #ifdef CONFIG_PM
2136c954667SLin Ming 	struct device		*dev;
214db04e18dSGeert Uytterhoeven 	enum rpm_status		rpm_status;
2156c954667SLin Ming #endif
2166c954667SLin Ming 
2171da177e4SLinus Torvalds 	/*
2181da177e4SLinus Torvalds 	 * queue settings
2191da177e4SLinus Torvalds 	 */
2201da177e4SLinus Torvalds 	unsigned long		nr_requests;	/* Max # of requests */
2211da177e4SLinus Torvalds 
222e3790c7dSTejun Heo 	unsigned int		dma_pad_mask;
2231da177e4SLinus Torvalds 	unsigned int		dma_alignment;
2241da177e4SLinus Torvalds 
2251b262839SSatya Tangirala #ifdef CONFIG_BLK_INLINE_ENCRYPTION
2261b262839SSatya Tangirala 	/* Inline crypto capabilities */
2271b262839SSatya Tangirala 	struct blk_keyslot_manager *ksm;
2281b262839SSatya Tangirala #endif
2291b262839SSatya Tangirala 
230242f9dcbSJens Axboe 	unsigned int		rq_timeout;
23164f1c21eSJens Axboe 	int			poll_nsec;
23234dbad5dSOmar Sandoval 
23334dbad5dSOmar Sandoval 	struct blk_stat_callback	*poll_cb;
2340206319fSStephen Bates 	struct blk_rq_stat	poll_stat[BLK_MQ_POLL_STATS_BKTS];
23534dbad5dSOmar Sandoval 
236242f9dcbSJens Axboe 	struct timer_list	timeout;
237287922ebSChristoph Hellwig 	struct work_struct	timeout_work;
238242f9dcbSJens Axboe 
239079a2e3eSJohn Garry 	atomic_t		nr_active_requests_shared_tags;
240bccf5e26SJohn Garry 
241079a2e3eSJohn Garry 	struct blk_mq_tags	*sched_shared_tags;
242d97e594cSJohn Garry 
243a612fddfSTejun Heo 	struct list_head	icq_list;
2444eef3049STejun Heo #ifdef CONFIG_BLK_CGROUP
245a2b1693bSTejun Heo 	DECLARE_BITMAP		(blkcg_pols, BLKCG_MAX_POLS);
2463c798398STejun Heo 	struct blkcg_gq		*root_blkg;
24703aa264aSTejun Heo 	struct list_head	blkg_list;
2484eef3049STejun Heo #endif
249a612fddfSTejun Heo 
250025146e1SMartin K. Petersen 	struct queue_limits	limits;
251025146e1SMartin K. Petersen 
25268c43f13SDamien Le Moal 	unsigned int		required_elevator_features;
25368c43f13SDamien Le Moal 
2546a5ac984SBart Van Assche #ifdef CONFIG_BLK_DEV_ZONED
2551da177e4SLinus Torvalds 	/*
2566cc77e9cSChristoph Hellwig 	 * Zoned block device information for request dispatch control.
2576cc77e9cSChristoph Hellwig 	 * nr_zones is the total number of zones of the device. This is always
258f216fdd7SChristoph Hellwig 	 * 0 for regular block devices. conv_zones_bitmap is a bitmap of nr_zones
259f216fdd7SChristoph Hellwig 	 * bits which indicates if a zone is conventional (bit set) or
260f216fdd7SChristoph Hellwig 	 * sequential (bit clear). seq_zones_wlock is a bitmap of nr_zones
2616cc77e9cSChristoph Hellwig 	 * bits which indicates if a zone is write locked, that is, if a write
2626cc77e9cSChristoph Hellwig 	 * request targeting the zone was dispatched. All three fields are
2636cc77e9cSChristoph Hellwig 	 * initialized by the low level device driver (e.g. scsi/sd.c).
2646cc77e9cSChristoph Hellwig 	 * Stacking drivers (device mappers) may or may not initialize
2656cc77e9cSChristoph Hellwig 	 * these fields.
266ccce20fcSBart Van Assche 	 *
267ccce20fcSBart Van Assche 	 * Reads of this information must be protected with blk_queue_enter() /
268ccce20fcSBart Van Assche 	 * blk_queue_exit(). Modifying this information is only allowed while
269ccce20fcSBart Van Assche 	 * no requests are being processed. See also blk_mq_freeze_queue() and
270ccce20fcSBart Van Assche 	 * blk_mq_unfreeze_queue().
2716cc77e9cSChristoph Hellwig 	 */
2726cc77e9cSChristoph Hellwig 	unsigned int		nr_zones;
273f216fdd7SChristoph Hellwig 	unsigned long		*conv_zones_bitmap;
2746cc77e9cSChristoph Hellwig 	unsigned long		*seq_zones_wlock;
275e15864f8SNiklas Cassel 	unsigned int		max_open_zones;
276659bf827SNiklas Cassel 	unsigned int		max_active_zones;
2776a5ac984SBart Van Assche #endif /* CONFIG_BLK_DEV_ZONED */
2786cc77e9cSChristoph Hellwig 
2791946089aSChristoph Lameter 	int			node;
28085e0cbbbSLuis Chamberlain 	struct mutex		debugfs_mutex;
2816c5c9341SAlexey Dobriyan #ifdef CONFIG_BLK_DEV_IO_TRACE
282c780e86dSJan Kara 	struct blk_trace __rcu	*blk_trace;
2836c5c9341SAlexey Dobriyan #endif
2841da177e4SLinus Torvalds 	/*
2854913efe4STejun Heo 	 * for flush operations
2861da177e4SLinus Torvalds 	 */
2877c94e1c1SMing Lei 	struct blk_flush_queue	*fq;
288483f4afcSAl Viro 
2896fca6a61SChristoph Hellwig 	struct list_head	requeue_list;
2906fca6a61SChristoph Hellwig 	spinlock_t		requeue_lock;
2912849450aSMike Snitzer 	struct delayed_work	requeue_work;
2926fca6a61SChristoph Hellwig 
293483f4afcSAl Viro 	struct mutex		sysfs_lock;
294cecf5d87SMing Lei 	struct mutex		sysfs_dir_lock;
295d351af01SFUJITA Tomonori 
2962f8f1336SMing Lei 	/*
2972f8f1336SMing Lei 	 * for reusing dead hctx instance in case of updating
2982f8f1336SMing Lei 	 * nr_hw_queues
2992f8f1336SMing Lei 	 */
3002f8f1336SMing Lei 	struct list_head	unused_hctx_list;
3012f8f1336SMing Lei 	spinlock_t		unused_hctx_lock;
3022f8f1336SMing Lei 
3037996a8b5SBob Liu 	int			mq_freeze_depth;
304d732580bSTejun Heo 
305e43473b7SVivek Goyal #ifdef CONFIG_BLK_DEV_THROTTLING
306e43473b7SVivek Goyal 	/* Throttle data */
307e43473b7SVivek Goyal 	struct throtl_data *td;
308e43473b7SVivek Goyal #endif
309548bc8e1STejun Heo 	struct rcu_head		rcu_head;
310320ae51fSJens Axboe 	wait_queue_head_t	mq_freeze_wq;
3117996a8b5SBob Liu 	/*
3127996a8b5SBob Liu 	 * Protect concurrent access to q_usage_counter by
3137996a8b5SBob Liu 	 * percpu_ref_kill() and percpu_ref_reinit().
3147996a8b5SBob Liu 	 */
3157996a8b5SBob Liu 	struct mutex		mq_freeze_lock;
3160d2602caSJens Axboe 
3170d2602caSJens Axboe 	struct blk_mq_tag_set	*tag_set;
3180d2602caSJens Axboe 	struct list_head	tag_set_list;
319338aa96dSKent Overstreet 	struct bio_set		bio_split;
3204593fdbeSAkinobu Mita 
32107e4feadSOmar Sandoval 	struct dentry		*debugfs_dir;
32285e0cbbbSLuis Chamberlain 
32385e0cbbbSLuis Chamberlain #ifdef CONFIG_BLK_DEBUG_FS
324d332ce09SOmar Sandoval 	struct dentry		*sched_debugfs_dir;
325cc56694fSMing Lei 	struct dentry		*rqos_debugfs_dir;
32607e4feadSOmar Sandoval #endif
32707e4feadSOmar Sandoval 
3284593fdbeSAkinobu Mita 	bool			mq_sysfs_init_done;
3296d247d7fSChristoph Hellwig 
330f793dfd3SJens Axboe #define BLK_MAX_WRITE_HINTS	5
331f793dfd3SJens Axboe 	u64			write_hints[BLK_MAX_WRITE_HINTS];
3321da177e4SLinus Torvalds };
3331da177e4SLinus Torvalds 
334bfe373f6SHou Tao /* Keep blk_queue_flag_name[] in sync with the definitions below */
335eca7abf3SJens Axboe #define QUEUE_FLAG_STOPPED	0	/* queue is stopped */
336eca7abf3SJens Axboe #define QUEUE_FLAG_DYING	1	/* queue being torn down */
337eca7abf3SJens Axboe #define QUEUE_FLAG_NOMERGES     3	/* disable merge attempts */
338eca7abf3SJens Axboe #define QUEUE_FLAG_SAME_COMP	4	/* complete on same CPU-group */
339eca7abf3SJens Axboe #define QUEUE_FLAG_FAIL_IO	5	/* fake timeout */
340eca7abf3SJens Axboe #define QUEUE_FLAG_NONROT	6	/* non-rotational device (SSD) */
34188e740f1SFernando Luis Vázquez Cao #define QUEUE_FLAG_VIRT		QUEUE_FLAG_NONROT /* paravirt device */
342eca7abf3SJens Axboe #define QUEUE_FLAG_IO_STAT	7	/* do disk/partitions IO accounting */
343eca7abf3SJens Axboe #define QUEUE_FLAG_DISCARD	8	/* supports DISCARD */
344eca7abf3SJens Axboe #define QUEUE_FLAG_NOXMERGES	9	/* No extended merges */
345eca7abf3SJens Axboe #define QUEUE_FLAG_ADD_RANDOM	10	/* Contributes to random pool */
346eca7abf3SJens Axboe #define QUEUE_FLAG_SECERASE	11	/* supports secure erase */
347eca7abf3SJens Axboe #define QUEUE_FLAG_SAME_FORCE	12	/* force complete on same CPU */
348eca7abf3SJens Axboe #define QUEUE_FLAG_DEAD		13	/* queue tear-down finished */
349eca7abf3SJens Axboe #define QUEUE_FLAG_INIT_DONE	14	/* queue is initialized */
3501cb039f3SChristoph Hellwig #define QUEUE_FLAG_STABLE_WRITES 15	/* don't modify blks until WB is done */
351eca7abf3SJens Axboe #define QUEUE_FLAG_POLL		16	/* IO polling enabled if set */
352eca7abf3SJens Axboe #define QUEUE_FLAG_WC		17	/* Write back caching */
353eca7abf3SJens Axboe #define QUEUE_FLAG_FUA		18	/* device supports FUA writes */
354eca7abf3SJens Axboe #define QUEUE_FLAG_DAX		19	/* device supports DAX */
355eca7abf3SJens Axboe #define QUEUE_FLAG_STATS	20	/* track IO start and completion times */
356eca7abf3SJens Axboe #define QUEUE_FLAG_POLL_STATS	21	/* collecting stats for hybrid polling */
357eca7abf3SJens Axboe #define QUEUE_FLAG_REGISTERED	22	/* queue has been registered to a disk */
358eca7abf3SJens Axboe #define QUEUE_FLAG_SCSI_PASSTHROUGH 23	/* queue supports SCSI commands */
359eca7abf3SJens Axboe #define QUEUE_FLAG_QUIESCED	24	/* queue has been quiesced */
360eca7abf3SJens Axboe #define QUEUE_FLAG_PCI_P2PDMA	25	/* device supports PCI p2p requests */
361e84e8f06SChaitanya Kulkarni #define QUEUE_FLAG_ZONE_RESETALL 26	/* supports Zone Reset All */
3626f816b4bSTejun Heo #define QUEUE_FLAG_RQ_ALLOC_TIME 27	/* record rq->alloc_time_ns */
363f1b49fdcSJohn Garry #define QUEUE_FLAG_HCTX_ACTIVE	28	/* at least one blk-mq hctx is active */
364021a2446SMike Snitzer #define QUEUE_FLAG_NOWAIT       29	/* device supports NOWAIT */
365797e7dbbSTejun Heo 
36694eddfbeSJens Axboe #define QUEUE_FLAG_MQ_DEFAULT	((1 << QUEUE_FLAG_IO_STAT) |		\
367021a2446SMike Snitzer 				 (1 << QUEUE_FLAG_SAME_COMP) |		\
368021a2446SMike Snitzer 				 (1 << QUEUE_FLAG_NOWAIT))
36994eddfbeSJens Axboe 
3708814ce8aSBart Van Assche void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
3718814ce8aSBart Van Assche void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
3728814ce8aSBart Van Assche bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
3738814ce8aSBart Van Assche 
3741da177e4SLinus Torvalds #define blk_queue_stopped(q)	test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
3753f3299d5SBart Van Assche #define blk_queue_dying(q)	test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
376c246e80dSBart Van Assche #define blk_queue_dead(q)	test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
377320ae51fSJens Axboe #define blk_queue_init_done(q)	test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
378ac9fafa1SAlan D. Brunelle #define blk_queue_nomerges(q)	test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
379488991e2SAlan D. Brunelle #define blk_queue_noxmerges(q)	\
380488991e2SAlan D. Brunelle 	test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
381a68bbddbSJens Axboe #define blk_queue_nonrot(q)	test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
3821cb039f3SChristoph Hellwig #define blk_queue_stable_writes(q) \
3831cb039f3SChristoph Hellwig 	test_bit(QUEUE_FLAG_STABLE_WRITES, &(q)->queue_flags)
384bc58ba94SJens Axboe #define blk_queue_io_stat(q)	test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
385e2e1a148SJens Axboe #define blk_queue_add_random(q)	test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
386c15227deSChristoph Hellwig #define blk_queue_discard(q)	test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
387e84e8f06SChaitanya Kulkarni #define blk_queue_zone_resetall(q)	\
388e84e8f06SChaitanya Kulkarni 	test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags)
389288dab8aSChristoph Hellwig #define blk_queue_secure_erase(q) \
390288dab8aSChristoph Hellwig 	(test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
391163d4baaSToshi Kani #define blk_queue_dax(q)	test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
3929efc160fSBart Van Assche #define blk_queue_scsi_passthrough(q)	\
3939efc160fSBart Van Assche 	test_bit(QUEUE_FLAG_SCSI_PASSTHROUGH, &(q)->queue_flags)
39449d92c0dSLogan Gunthorpe #define blk_queue_pci_p2pdma(q)	\
39549d92c0dSLogan Gunthorpe 	test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags)
3966f816b4bSTejun Heo #ifdef CONFIG_BLK_RQ_ALLOC_TIME
3976f816b4bSTejun Heo #define blk_queue_rq_alloc_time(q)	\
3986f816b4bSTejun Heo 	test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags)
3996f816b4bSTejun Heo #else
4006f816b4bSTejun Heo #define blk_queue_rq_alloc_time(q)	false
4016f816b4bSTejun Heo #endif
4021da177e4SLinus Torvalds 
40333659ebbSChristoph Hellwig #define blk_noretry_request(rq) \
40433659ebbSChristoph Hellwig 	((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
40533659ebbSChristoph Hellwig 			     REQ_FAILFAST_DRIVER))
406f4560ffeSMing Lei #define blk_queue_quiesced(q)	test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
407cd84a62eSBart Van Assche #define blk_queue_pm_only(q)	atomic_read(&(q)->pm_only)
4080ce91444SDave Chinner #define blk_queue_fua(q)	test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags)
40958c898baSMing Lei #define blk_queue_registered(q)	test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags)
410021a2446SMike Snitzer #define blk_queue_nowait(q)	test_bit(QUEUE_FLAG_NOWAIT, &(q)->queue_flags)
411c9254f2dSBart Van Assche 
412cd84a62eSBart Van Assche extern void blk_set_pm_only(struct request_queue *q);
413cd84a62eSBart Van Assche extern void blk_clear_pm_only(struct request_queue *q);
4144aff5e23SJens Axboe 
4151da177e4SLinus Torvalds #define list_entry_rq(ptr)	list_entry((ptr), struct request, queuelist)
4161da177e4SLinus Torvalds 
4173ab3a031SChristoph Hellwig #define dma_map_bvec(dev, bv, dir, attrs) \
4183ab3a031SChristoph Hellwig 	dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \
4193ab3a031SChristoph Hellwig 	(dir), (attrs))
4203ab3a031SChristoph Hellwig 
421344e9ffcSJens Axboe static inline bool queue_is_mq(struct request_queue *q)
42249fd524fSJens Axboe {
423a1ce35faSJens Axboe 	return q->mq_ops;
42449fd524fSJens Axboe }
42549fd524fSJens Axboe 
42652abca64SAlan Stern #ifdef CONFIG_PM
42752abca64SAlan Stern static inline enum rpm_status queue_rpm_status(struct request_queue *q)
42852abca64SAlan Stern {
42952abca64SAlan Stern 	return q->rpm_status;
43052abca64SAlan Stern }
43152abca64SAlan Stern #else
43252abca64SAlan Stern static inline enum rpm_status queue_rpm_status(struct request_queue *q)
43352abca64SAlan Stern {
43452abca64SAlan Stern 	return RPM_ACTIVE;
43552abca64SAlan Stern }
43652abca64SAlan Stern #endif
43752abca64SAlan Stern 
438797476b8SDamien Le Moal static inline enum blk_zoned_model
439797476b8SDamien Le Moal blk_queue_zoned_model(struct request_queue *q)
440797476b8SDamien Le Moal {
4416fcd6695SChristoph Hellwig 	if (IS_ENABLED(CONFIG_BLK_DEV_ZONED))
442797476b8SDamien Le Moal 		return q->limits.zoned;
4436fcd6695SChristoph Hellwig 	return BLK_ZONED_NONE;
444797476b8SDamien Le Moal }
445797476b8SDamien Le Moal 
446797476b8SDamien Le Moal static inline bool blk_queue_is_zoned(struct request_queue *q)
447797476b8SDamien Le Moal {
448797476b8SDamien Le Moal 	switch (blk_queue_zoned_model(q)) {
449797476b8SDamien Le Moal 	case BLK_ZONED_HA:
450797476b8SDamien Le Moal 	case BLK_ZONED_HM:
451797476b8SDamien Le Moal 		return true;
452797476b8SDamien Le Moal 	default:
453797476b8SDamien Le Moal 		return false;
454797476b8SDamien Le Moal 	}
455797476b8SDamien Le Moal }
456797476b8SDamien Le Moal 
457113ab72eSDamien Le Moal static inline sector_t blk_queue_zone_sectors(struct request_queue *q)
4586a0cb1bcSHannes Reinecke {
4596a0cb1bcSHannes Reinecke 	return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
4606a0cb1bcSHannes Reinecke }
4616a0cb1bcSHannes Reinecke 
4626a5ac984SBart Van Assche #ifdef CONFIG_BLK_DEV_ZONED
463965b652eSDamien Le Moal static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
464965b652eSDamien Le Moal {
465965b652eSDamien Le Moal 	return blk_queue_is_zoned(q) ? q->nr_zones : 0;
466965b652eSDamien Le Moal }
467965b652eSDamien Le Moal 
4686cc77e9cSChristoph Hellwig static inline unsigned int blk_queue_zone_no(struct request_queue *q,
4696cc77e9cSChristoph Hellwig 					     sector_t sector)
4706cc77e9cSChristoph Hellwig {
4716cc77e9cSChristoph Hellwig 	if (!blk_queue_is_zoned(q))
4726cc77e9cSChristoph Hellwig 		return 0;
4736cc77e9cSChristoph Hellwig 	return sector >> ilog2(q->limits.chunk_sectors);
4746cc77e9cSChristoph Hellwig }
4756cc77e9cSChristoph Hellwig 
4766cc77e9cSChristoph Hellwig static inline bool blk_queue_zone_is_seq(struct request_queue *q,
4776cc77e9cSChristoph Hellwig 					 sector_t sector)
4786cc77e9cSChristoph Hellwig {
479f216fdd7SChristoph Hellwig 	if (!blk_queue_is_zoned(q))
4806cc77e9cSChristoph Hellwig 		return false;
481f216fdd7SChristoph Hellwig 	if (!q->conv_zones_bitmap)
482f216fdd7SChristoph Hellwig 		return true;
483f216fdd7SChristoph Hellwig 	return !test_bit(blk_queue_zone_no(q, sector), q->conv_zones_bitmap);
4846cc77e9cSChristoph Hellwig }
485e15864f8SNiklas Cassel 
486e15864f8SNiklas Cassel static inline void blk_queue_max_open_zones(struct request_queue *q,
487e15864f8SNiklas Cassel 		unsigned int max_open_zones)
488e15864f8SNiklas Cassel {
489e15864f8SNiklas Cassel 	q->max_open_zones = max_open_zones;
490e15864f8SNiklas Cassel }
491e15864f8SNiklas Cassel 
492e15864f8SNiklas Cassel static inline unsigned int queue_max_open_zones(const struct request_queue *q)
493e15864f8SNiklas Cassel {
494e15864f8SNiklas Cassel 	return q->max_open_zones;
495e15864f8SNiklas Cassel }
496659bf827SNiklas Cassel 
497659bf827SNiklas Cassel static inline void blk_queue_max_active_zones(struct request_queue *q,
498659bf827SNiklas Cassel 		unsigned int max_active_zones)
499659bf827SNiklas Cassel {
500659bf827SNiklas Cassel 	q->max_active_zones = max_active_zones;
501659bf827SNiklas Cassel }
502659bf827SNiklas Cassel 
503659bf827SNiklas Cassel static inline unsigned int queue_max_active_zones(const struct request_queue *q)
504659bf827SNiklas Cassel {
505659bf827SNiklas Cassel 	return q->max_active_zones;
506659bf827SNiklas Cassel }
507965b652eSDamien Le Moal #else /* CONFIG_BLK_DEV_ZONED */
508965b652eSDamien Le Moal static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
509965b652eSDamien Le Moal {
510965b652eSDamien Le Moal 	return 0;
511965b652eSDamien Le Moal }
51202992df8SJohannes Thumshirn static inline bool blk_queue_zone_is_seq(struct request_queue *q,
51302992df8SJohannes Thumshirn 					 sector_t sector)
51402992df8SJohannes Thumshirn {
51502992df8SJohannes Thumshirn 	return false;
51602992df8SJohannes Thumshirn }
51702992df8SJohannes Thumshirn static inline unsigned int blk_queue_zone_no(struct request_queue *q,
51802992df8SJohannes Thumshirn 					     sector_t sector)
51902992df8SJohannes Thumshirn {
52002992df8SJohannes Thumshirn 	return 0;
52102992df8SJohannes Thumshirn }
522e15864f8SNiklas Cassel static inline unsigned int queue_max_open_zones(const struct request_queue *q)
523e15864f8SNiklas Cassel {
524e15864f8SNiklas Cassel 	return 0;
525e15864f8SNiklas Cassel }
526659bf827SNiklas Cassel static inline unsigned int queue_max_active_zones(const struct request_queue *q)
527659bf827SNiklas Cassel {
528659bf827SNiklas Cassel 	return 0;
529659bf827SNiklas Cassel }
5306a5ac984SBart Van Assche #endif /* CONFIG_BLK_DEV_ZONED */
5316cc77e9cSChristoph Hellwig 
532d278d4a8SJens Axboe static inline unsigned int blk_queue_depth(struct request_queue *q)
533d278d4a8SJens Axboe {
534d278d4a8SJens Axboe 	if (q->queue_depth)
535d278d4a8SJens Axboe 		return q->queue_depth;
536d278d4a8SJens Axboe 
537d278d4a8SJens Axboe 	return q->nr_requests;
538d278d4a8SJens Axboe }
539d278d4a8SJens Axboe 
5403d6392cfSJens Axboe /*
5413d6392cfSJens Axboe  * default timeout for SG_IO if none specified
5423d6392cfSJens Axboe  */
5433d6392cfSJens Axboe #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
544f2f1fa78SLinus Torvalds #define BLK_MIN_SG_TIMEOUT	(7 * HZ)
5453d6392cfSJens Axboe 
5465705f702SNeilBrown /* This should not be used directly - use rq_for_each_segment */
5471e428079SJens Axboe #define for_each_bio(_bio)		\
5481e428079SJens Axboe 	for (; _bio; _bio = _bio->bi_next)
5491da177e4SLinus Torvalds 
5502d4dc890SIlya Loginov 
5511da177e4SLinus Torvalds extern int blk_register_queue(struct gendisk *disk);
5521da177e4SLinus Torvalds extern void blk_unregister_queue(struct gendisk *disk);
553ed00aabdSChristoph Hellwig blk_qc_t submit_bio_noacct(struct bio *bio);
55424b83debSChristoph Hellwig 
555ef9e3facSKiyoshi Ueda extern int blk_lld_busy(struct request_queue *q);
556f695ca38SChristoph Hellwig extern void blk_queue_split(struct bio **);
5579a95e4efSBart Van Assche extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
5582e6edc95SDan Williams extern void blk_queue_exit(struct request_queue *q);
5591da177e4SLinus Torvalds extern void blk_sync_queue(struct request_queue *q);
560fb9b16e1SKeith Busch 
561e47bc4edSChaitanya Kulkarni /* Helper to convert REQ_OP_XXX to its string format XXX */
562e47bc4edSChaitanya Kulkarni extern const char *blk_op_str(unsigned int op);
563e47bc4edSChaitanya Kulkarni 
5642a842acaSChristoph Hellwig int blk_status_to_errno(blk_status_t status);
5652a842acaSChristoph Hellwig blk_status_t errno_to_blk_status(int errno);
5662a842acaSChristoph Hellwig 
567*ef99b2d3SChristoph Hellwig /* only poll the hardware once, don't continue until a completion was found */
568*ef99b2d3SChristoph Hellwig #define BLK_POLL_ONESHOT		(1 << 0)
569*ef99b2d3SChristoph Hellwig int blk_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags);
57005229beeSJens Axboe 
571165125e1SJens Axboe static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
5721da177e4SLinus Torvalds {
573ff9ea323STejun Heo 	return bdev->bd_disk->queue;	/* this is never NULL */
5741da177e4SLinus Torvalds }
5751da177e4SLinus Torvalds 
5761da177e4SLinus Torvalds /*
577233bde21SBart Van Assche  * The basic unit of block I/O is a sector. It is used in a number of contexts
578233bde21SBart Van Assche  * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9
579233bde21SBart Van Assche  * bytes. Variables of type sector_t represent an offset or size that is a
580233bde21SBart Van Assche  * multiple of 512 bytes. Hence these two constants.
581233bde21SBart Van Assche  */
582233bde21SBart Van Assche #ifndef SECTOR_SHIFT
583233bde21SBart Van Assche #define SECTOR_SHIFT 9
584233bde21SBart Van Assche #endif
585233bde21SBart Van Assche #ifndef SECTOR_SIZE
586233bde21SBart Van Assche #define SECTOR_SIZE (1 << SECTOR_SHIFT)
587233bde21SBart Van Assche #endif
588233bde21SBart Van Assche 
589018eca45SGuoqing Jiang #define PAGE_SECTORS_SHIFT	(PAGE_SHIFT - SECTOR_SHIFT)
590018eca45SGuoqing Jiang #define PAGE_SECTORS		(1 << PAGE_SECTORS_SHIFT)
591018eca45SGuoqing Jiang #define SECTOR_MASK		(PAGE_SECTORS - 1)
592018eca45SGuoqing Jiang 
5936a5ac984SBart Van Assche #ifdef CONFIG_BLK_DEV_ZONED
59402694e86SChaitanya Kulkarni 
59502694e86SChaitanya Kulkarni /* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */
59602694e86SChaitanya Kulkarni const char *blk_zone_cond_str(enum blk_zone_cond zone_cond);
59702694e86SChaitanya Kulkarni 
598d0ea6bdeSDamien Le Moal static inline unsigned int bio_zone_no(struct bio *bio)
599d0ea6bdeSDamien Le Moal {
600d0ea6bdeSDamien Le Moal 	return blk_queue_zone_no(bdev_get_queue(bio->bi_bdev),
601d0ea6bdeSDamien Le Moal 				 bio->bi_iter.bi_sector);
602d0ea6bdeSDamien Le Moal }
603d0ea6bdeSDamien Le Moal 
604d0ea6bdeSDamien Le Moal static inline unsigned int bio_zone_is_seq(struct bio *bio)
605d0ea6bdeSDamien Le Moal {
606d0ea6bdeSDamien Le Moal 	return blk_queue_zone_is_seq(bdev_get_queue(bio->bi_bdev),
607d0ea6bdeSDamien Le Moal 				     bio->bi_iter.bi_sector);
608d0ea6bdeSDamien Le Moal }
6096a5ac984SBart Van Assche #endif /* CONFIG_BLK_DEV_ZONED */
6106cc77e9cSChristoph Hellwig 
611f31dc1cdSMartin K. Petersen static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
6128fe0d473SMike Christie 						     int op)
613f31dc1cdSMartin K. Petersen {
6147afafc8aSAdrian Hunter 	if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
615233bde21SBart Van Assche 		return min(q->limits.max_discard_sectors,
616233bde21SBart Van Assche 			   UINT_MAX >> SECTOR_SHIFT);
617f31dc1cdSMartin K. Petersen 
6188fe0d473SMike Christie 	if (unlikely(op == REQ_OP_WRITE_SAME))
6194363ac7cSMartin K. Petersen 		return q->limits.max_write_same_sectors;
6204363ac7cSMartin K. Petersen 
621a6f0788eSChaitanya Kulkarni 	if (unlikely(op == REQ_OP_WRITE_ZEROES))
622a6f0788eSChaitanya Kulkarni 		return q->limits.max_write_zeroes_sectors;
623a6f0788eSChaitanya Kulkarni 
624f31dc1cdSMartin K. Petersen 	return q->limits.max_sectors;
625f31dc1cdSMartin K. Petersen }
626f31dc1cdSMartin K. Petersen 
627762380adSJens Axboe /*
628762380adSJens Axboe  * Return maximum size of a request at given offset. Only valid for
629762380adSJens Axboe  * file system requests.
630762380adSJens Axboe  */
631762380adSJens Axboe static inline unsigned int blk_max_size_offset(struct request_queue *q,
6323ee16db3SMike Snitzer 					       sector_t offset,
6333ee16db3SMike Snitzer 					       unsigned int chunk_sectors)
634762380adSJens Axboe {
63565f33b35SMike Snitzer 	if (!chunk_sectors) {
63665f33b35SMike Snitzer 		if (q->limits.chunk_sectors)
6373ee16db3SMike Snitzer 			chunk_sectors = q->limits.chunk_sectors;
6383ee16db3SMike Snitzer 		else
639736ed4deSJens Axboe 			return q->limits.max_sectors;
64065f33b35SMike Snitzer 	}
641762380adSJens Axboe 
64207d098e6SMike Snitzer 	if (likely(is_power_of_2(chunk_sectors)))
64307d098e6SMike Snitzer 		chunk_sectors -= offset & (chunk_sectors - 1);
64407d098e6SMike Snitzer 	else
64507d098e6SMike Snitzer 		chunk_sectors -= sector_div(offset, chunk_sectors);
64607d098e6SMike Snitzer 
64707d098e6SMike Snitzer 	return min(q->limits.max_sectors, chunk_sectors);
648762380adSJens Axboe }
649762380adSJens Axboe 
6501da177e4SLinus Torvalds /*
6511da177e4SLinus Torvalds  * Access functions for manipulating queue properties
6521da177e4SLinus Torvalds  */
653165125e1SJens Axboe extern void blk_cleanup_queue(struct request_queue *);
6549bb33f24SChristoph Hellwig void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce limit);
655086fa5ffSMartin K. Petersen extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
656762380adSJens Axboe extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
6578a78362cSMartin K. Petersen extern void blk_queue_max_segments(struct request_queue *, unsigned short);
6581e739730SChristoph Hellwig extern void blk_queue_max_discard_segments(struct request_queue *,
6591e739730SChristoph Hellwig 		unsigned short);
660165125e1SJens Axboe extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
66167efc925SChristoph Hellwig extern void blk_queue_max_discard_sectors(struct request_queue *q,
66267efc925SChristoph Hellwig 		unsigned int max_discard_sectors);
6634363ac7cSMartin K. Petersen extern void blk_queue_max_write_same_sectors(struct request_queue *q,
6644363ac7cSMartin K. Petersen 		unsigned int max_write_same_sectors);
665a6f0788eSChaitanya Kulkarni extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
666a6f0788eSChaitanya Kulkarni 		unsigned int max_write_same_sectors);
667ad6bf88aSMikulas Patocka extern void blk_queue_logical_block_size(struct request_queue *, unsigned int);
6680512a75bSKeith Busch extern void blk_queue_max_zone_append_sectors(struct request_queue *q,
6690512a75bSKeith Busch 		unsigned int max_zone_append_sectors);
670892b6f90SMartin K. Petersen extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
671a805a4faSDamien Le Moal void blk_queue_zone_write_granularity(struct request_queue *q,
672a805a4faSDamien Le Moal 				      unsigned int size);
673c72758f3SMartin K. Petersen extern void blk_queue_alignment_offset(struct request_queue *q,
674c72758f3SMartin K. Petersen 				       unsigned int alignment);
675471aa704SChristoph Hellwig void disk_update_readahead(struct gendisk *disk);
6767c958e32SMartin K. Petersen extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
677c72758f3SMartin K. Petersen extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
6783c5820c7SMartin K. Petersen extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
679c72758f3SMartin K. Petersen extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
680d278d4a8SJens Axboe extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
681e475bba2SMartin K. Petersen extern void blk_set_default_limits(struct queue_limits *lim);
682b1bd055dSMartin K. Petersen extern void blk_set_stacking_limits(struct queue_limits *lim);
683c72758f3SMartin K. Petersen extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
684c72758f3SMartin K. Petersen 			    sector_t offset);
685c72758f3SMartin K. Petersen extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
686c72758f3SMartin K. Petersen 			      sector_t offset);
68727f8221aSFUJITA Tomonori extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
688165125e1SJens Axboe extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
68903100aadSKeith Busch extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
690165125e1SJens Axboe extern void blk_queue_dma_alignment(struct request_queue *, int);
69111c3e689SJames Bottomley extern void blk_queue_update_dma_alignment(struct request_queue *, int);
692242f9dcbSJens Axboe extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
69393e9d8e8SJens Axboe extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
6942e9bc346SChristoph Hellwig 
6952e9bc346SChristoph Hellwig /*
6962e9bc346SChristoph Hellwig  * Elevator features for blk_queue_required_elevator_features:
6972e9bc346SChristoph Hellwig  */
6982e9bc346SChristoph Hellwig /* Supports zoned block devices sequential write constraint */
6992e9bc346SChristoph Hellwig #define ELEVATOR_F_ZBD_SEQ_WRITE	(1U << 0)
7002e9bc346SChristoph Hellwig /* Supports scheduling on multiple hardware queues */
7012e9bc346SChristoph Hellwig #define ELEVATOR_F_MQ_AWARE		(1U << 1)
7022e9bc346SChristoph Hellwig 
70368c43f13SDamien Le Moal extern void blk_queue_required_elevator_features(struct request_queue *q,
70468c43f13SDamien Le Moal 						 unsigned int features);
70545147fb5SYoshihiro Shimoda extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
70645147fb5SYoshihiro Shimoda 					      struct device *dev);
7071da177e4SLinus Torvalds 
70809ac46c4STejun Heo bool __must_check blk_get_queue(struct request_queue *);
709165125e1SJens Axboe extern void blk_put_queue(struct request_queue *);
7103f21c265SJens Axboe extern void blk_set_queue_dying(struct request_queue *);
7111da177e4SLinus Torvalds 
7121a4dcfa8SChristoph Hellwig #ifdef CONFIG_BLOCK
713316cc67dSShaohua Li /*
71475df7136SSuresh Jayaraman  * blk_plug permits building a queue of related requests by holding the I/O
71575df7136SSuresh Jayaraman  * fragments for a short period. This allows merging of sequential requests
71675df7136SSuresh Jayaraman  * into single larger request. As the requests are moved from a per-task list to
71775df7136SSuresh Jayaraman  * the device's request_queue in a batch, this results in improved scalability
71875df7136SSuresh Jayaraman  * as the lock contention for request_queue lock is reduced.
71975df7136SSuresh Jayaraman  *
72075df7136SSuresh Jayaraman  * It is ok not to disable preemption when adding the request to the plug list
72175df7136SSuresh Jayaraman  * or when attempting a merge, because blk_schedule_flush_list() will only flush
72275df7136SSuresh Jayaraman  * the plug list when the task sleeps by itself. For details, please see
72375df7136SSuresh Jayaraman  * schedule() where blk_schedule_flush_plug() is called.
724316cc67dSShaohua Li  */
72573c10101SJens Axboe struct blk_plug {
726320ae51fSJens Axboe 	struct list_head mq_list; /* blk-mq requests */
72747c122e3SJens Axboe 
72847c122e3SJens Axboe 	/* if ios_left is > 1, we can batch tag/rq allocations */
72947c122e3SJens Axboe 	struct request *cached_rq;
73047c122e3SJens Axboe 	unsigned short nr_ios;
73147c122e3SJens Axboe 
7325f0ed774SJens Axboe 	unsigned short rq_count;
73347c122e3SJens Axboe 
734ce5b009cSJens Axboe 	bool multiple_queues;
7355a473e83SJens Axboe 	bool nowait;
73647c122e3SJens Axboe 
73747c122e3SJens Axboe 	struct list_head cb_list; /* md requires an unplug callback */
73873c10101SJens Axboe };
73955c022bbSShaohua Li 
7409cbb1750SNeilBrown struct blk_plug_cb;
74174018dc3SNeilBrown typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
742048c9374SNeilBrown struct blk_plug_cb {
743048c9374SNeilBrown 	struct list_head list;
7449cbb1750SNeilBrown 	blk_plug_cb_fn callback;
7459cbb1750SNeilBrown 	void *data;
746048c9374SNeilBrown };
7479cbb1750SNeilBrown extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
7489cbb1750SNeilBrown 					     void *data, int size);
74973c10101SJens Axboe extern void blk_start_plug(struct blk_plug *);
75047c122e3SJens Axboe extern void blk_start_plug_nr_ios(struct blk_plug *, unsigned short);
75173c10101SJens Axboe extern void blk_finish_plug(struct blk_plug *);
752f6603783SJens Axboe extern void blk_flush_plug_list(struct blk_plug *, bool);
75373c10101SJens Axboe 
75473c10101SJens Axboe static inline void blk_flush_plug(struct task_struct *tsk)
75573c10101SJens Axboe {
75673c10101SJens Axboe 	struct blk_plug *plug = tsk->plug;
75773c10101SJens Axboe 
75888b996cdSChristoph Hellwig 	if (plug)
759a237c1c5SJens Axboe 		blk_flush_plug_list(plug, false);
760a237c1c5SJens Axboe }
761a237c1c5SJens Axboe 
762a237c1c5SJens Axboe static inline void blk_schedule_flush_plug(struct task_struct *tsk)
763a237c1c5SJens Axboe {
764a237c1c5SJens Axboe 	struct blk_plug *plug = tsk->plug;
765a237c1c5SJens Axboe 
766a237c1c5SJens Axboe 	if (plug)
767f6603783SJens Axboe 		blk_flush_plug_list(plug, true);
76873c10101SJens Axboe }
76973c10101SJens Axboe 
77073c10101SJens Axboe static inline bool blk_needs_flush_plug(struct task_struct *tsk)
77173c10101SJens Axboe {
77273c10101SJens Axboe 	struct blk_plug *plug = tsk->plug;
77373c10101SJens Axboe 
774320ae51fSJens Axboe 	return plug &&
775a1ce35faSJens Axboe 		 (!list_empty(&plug->mq_list) ||
776320ae51fSJens Axboe 		 !list_empty(&plug->cb_list));
77773c10101SJens Axboe }
77873c10101SJens Axboe 
779c6bf3f0eSChristoph Hellwig int blkdev_issue_flush(struct block_device *bdev);
7801a4dcfa8SChristoph Hellwig long nr_blockdev_pages(void);
7811a4dcfa8SChristoph Hellwig #else /* CONFIG_BLOCK */
7821a4dcfa8SChristoph Hellwig struct blk_plug {
7831a4dcfa8SChristoph Hellwig };
7841a4dcfa8SChristoph Hellwig 
78547c122e3SJens Axboe static inline void blk_start_plug_nr_ios(struct blk_plug *plug,
78647c122e3SJens Axboe 					 unsigned short nr_ios)
78747c122e3SJens Axboe {
78847c122e3SJens Axboe }
78947c122e3SJens Axboe 
7901a4dcfa8SChristoph Hellwig static inline void blk_start_plug(struct blk_plug *plug)
7911a4dcfa8SChristoph Hellwig {
7921a4dcfa8SChristoph Hellwig }
7931a4dcfa8SChristoph Hellwig 
7941a4dcfa8SChristoph Hellwig static inline void blk_finish_plug(struct blk_plug *plug)
7951a4dcfa8SChristoph Hellwig {
7961a4dcfa8SChristoph Hellwig }
7971a4dcfa8SChristoph Hellwig 
7981a4dcfa8SChristoph Hellwig static inline void blk_flush_plug(struct task_struct *task)
7991a4dcfa8SChristoph Hellwig {
8001a4dcfa8SChristoph Hellwig }
8011a4dcfa8SChristoph Hellwig 
8021a4dcfa8SChristoph Hellwig static inline void blk_schedule_flush_plug(struct task_struct *task)
8031a4dcfa8SChristoph Hellwig {
8041a4dcfa8SChristoph Hellwig }
8051a4dcfa8SChristoph Hellwig 
8061a4dcfa8SChristoph Hellwig 
8071a4dcfa8SChristoph Hellwig static inline bool blk_needs_flush_plug(struct task_struct *tsk)
8081a4dcfa8SChristoph Hellwig {
8091a4dcfa8SChristoph Hellwig 	return false;
8101a4dcfa8SChristoph Hellwig }
8111a4dcfa8SChristoph Hellwig 
812c6bf3f0eSChristoph Hellwig static inline int blkdev_issue_flush(struct block_device *bdev)
8131a4dcfa8SChristoph Hellwig {
8141a4dcfa8SChristoph Hellwig 	return 0;
8151a4dcfa8SChristoph Hellwig }
8161a4dcfa8SChristoph Hellwig 
8171a4dcfa8SChristoph Hellwig static inline long nr_blockdev_pages(void)
8181a4dcfa8SChristoph Hellwig {
8191a4dcfa8SChristoph Hellwig 	return 0;
8201a4dcfa8SChristoph Hellwig }
8211a4dcfa8SChristoph Hellwig #endif /* CONFIG_BLOCK */
8221a4dcfa8SChristoph Hellwig 
82371ac860aSMing Lei extern void blk_io_schedule(void);
82471ac860aSMing Lei 
825ee472d83SChristoph Hellwig extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
826ee472d83SChristoph Hellwig 		sector_t nr_sects, gfp_t gfp_mask, struct page *page);
827e950fdf7SChristoph Hellwig 
828e950fdf7SChristoph Hellwig #define BLKDEV_DISCARD_SECURE	(1 << 0)	/* issue a secure erase */
829dd3932edSChristoph Hellwig 
830fbd9b09aSDmitry Monakhov extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
831fbd9b09aSDmitry Monakhov 		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
83238f25255SChristoph Hellwig extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
833288dab8aSChristoph Hellwig 		sector_t nr_sects, gfp_t gfp_mask, int flags,
834469e3216SMike Christie 		struct bio **biop);
835ee472d83SChristoph Hellwig 
836ee472d83SChristoph Hellwig #define BLKDEV_ZERO_NOUNMAP	(1 << 0)  /* do not free blocks */
837cb365b96SChristoph Hellwig #define BLKDEV_ZERO_NOFALLBACK	(1 << 1)  /* don't write explicit zeroes */
838ee472d83SChristoph Hellwig 
839e73c23ffSChaitanya Kulkarni extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
840e73c23ffSChaitanya Kulkarni 		sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
841ee472d83SChristoph Hellwig 		unsigned flags);
8423f14d792SDmitry Monakhov extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
843ee472d83SChristoph Hellwig 		sector_t nr_sects, gfp_t gfp_mask, unsigned flags);
844ee472d83SChristoph Hellwig 
8452cf6d26aSChristoph Hellwig static inline int sb_issue_discard(struct super_block *sb, sector_t block,
8462cf6d26aSChristoph Hellwig 		sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
847fb2dce86SDavid Woodhouse {
848233bde21SBart Van Assche 	return blkdev_issue_discard(sb->s_bdev,
849233bde21SBart Van Assche 				    block << (sb->s_blocksize_bits -
850233bde21SBart Van Assche 					      SECTOR_SHIFT),
851233bde21SBart Van Assche 				    nr_blocks << (sb->s_blocksize_bits -
852233bde21SBart Van Assche 						  SECTOR_SHIFT),
8532cf6d26aSChristoph Hellwig 				    gfp_mask, flags);
854fb2dce86SDavid Woodhouse }
855e6fa0be6SLukas Czerner static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
856a107e5a3STheodore Ts'o 		sector_t nr_blocks, gfp_t gfp_mask)
857e6fa0be6SLukas Czerner {
858e6fa0be6SLukas Czerner 	return blkdev_issue_zeroout(sb->s_bdev,
859233bde21SBart Van Assche 				    block << (sb->s_blocksize_bits -
860233bde21SBart Van Assche 					      SECTOR_SHIFT),
861233bde21SBart Van Assche 				    nr_blocks << (sb->s_blocksize_bits -
862233bde21SBart Van Assche 						  SECTOR_SHIFT),
863ee472d83SChristoph Hellwig 				    gfp_mask, 0);
864e6fa0be6SLukas Czerner }
8651da177e4SLinus Torvalds 
866fa01b1e9SChristoph Hellwig static inline bool bdev_is_partition(struct block_device *bdev)
867fa01b1e9SChristoph Hellwig {
868fa01b1e9SChristoph Hellwig 	return bdev->bd_partno;
869fa01b1e9SChristoph Hellwig }
870fa01b1e9SChristoph Hellwig 
871eb28d31bSMartin K. Petersen enum blk_default_limits {
872eb28d31bSMartin K. Petersen 	BLK_MAX_SEGMENTS	= 128,
873eb28d31bSMartin K. Petersen 	BLK_SAFE_MAX_SECTORS	= 255,
874d2be537cSJeff Moyer 	BLK_DEF_MAX_SECTORS	= 2560,
875eb28d31bSMartin K. Petersen 	BLK_MAX_SEGMENT_SIZE	= 65536,
876eb28d31bSMartin K. Petersen 	BLK_SEG_BOUNDARY_MASK	= 0xFFFFFFFFUL,
877eb28d31bSMartin K. Petersen };
8780e435ac2SMilan Broz 
879af2c68feSBart Van Assche static inline unsigned long queue_segment_boundary(const struct request_queue *q)
880ae03bf63SMartin K. Petersen {
881025146e1SMartin K. Petersen 	return q->limits.seg_boundary_mask;
882ae03bf63SMartin K. Petersen }
883ae03bf63SMartin K. Petersen 
884af2c68feSBart Van Assche static inline unsigned long queue_virt_boundary(const struct request_queue *q)
88503100aadSKeith Busch {
88603100aadSKeith Busch 	return q->limits.virt_boundary_mask;
88703100aadSKeith Busch }
88803100aadSKeith Busch 
889af2c68feSBart Van Assche static inline unsigned int queue_max_sectors(const struct request_queue *q)
890ae03bf63SMartin K. Petersen {
891025146e1SMartin K. Petersen 	return q->limits.max_sectors;
892ae03bf63SMartin K. Petersen }
893ae03bf63SMartin K. Petersen 
894547e2f70SChristoph Hellwig static inline unsigned int queue_max_bytes(struct request_queue *q)
895547e2f70SChristoph Hellwig {
896547e2f70SChristoph Hellwig 	return min_t(unsigned int, queue_max_sectors(q), INT_MAX >> 9) << 9;
897547e2f70SChristoph Hellwig }
898547e2f70SChristoph Hellwig 
899af2c68feSBart Van Assche static inline unsigned int queue_max_hw_sectors(const struct request_queue *q)
900ae03bf63SMartin K. Petersen {
901025146e1SMartin K. Petersen 	return q->limits.max_hw_sectors;
902ae03bf63SMartin K. Petersen }
903ae03bf63SMartin K. Petersen 
904af2c68feSBart Van Assche static inline unsigned short queue_max_segments(const struct request_queue *q)
905ae03bf63SMartin K. Petersen {
9068a78362cSMartin K. Petersen 	return q->limits.max_segments;
907ae03bf63SMartin K. Petersen }
908ae03bf63SMartin K. Petersen 
909af2c68feSBart Van Assche static inline unsigned short queue_max_discard_segments(const struct request_queue *q)
9101e739730SChristoph Hellwig {
9111e739730SChristoph Hellwig 	return q->limits.max_discard_segments;
9121e739730SChristoph Hellwig }
9131e739730SChristoph Hellwig 
914af2c68feSBart Van Assche static inline unsigned int queue_max_segment_size(const struct request_queue *q)
915ae03bf63SMartin K. Petersen {
916025146e1SMartin K. Petersen 	return q->limits.max_segment_size;
917ae03bf63SMartin K. Petersen }
918ae03bf63SMartin K. Petersen 
9190512a75bSKeith Busch static inline unsigned int queue_max_zone_append_sectors(const struct request_queue *q)
9200512a75bSKeith Busch {
921fe6f0cdcSJohannes Thumshirn 
922fe6f0cdcSJohannes Thumshirn 	const struct queue_limits *l = &q->limits;
923fe6f0cdcSJohannes Thumshirn 
924fe6f0cdcSJohannes Thumshirn 	return min(l->max_zone_append_sectors, l->max_sectors);
9250512a75bSKeith Busch }
9260512a75bSKeith Busch 
927ad6bf88aSMikulas Patocka static inline unsigned queue_logical_block_size(const struct request_queue *q)
9281da177e4SLinus Torvalds {
9291da177e4SLinus Torvalds 	int retval = 512;
9301da177e4SLinus Torvalds 
931025146e1SMartin K. Petersen 	if (q && q->limits.logical_block_size)
932025146e1SMartin K. Petersen 		retval = q->limits.logical_block_size;
9331da177e4SLinus Torvalds 
9341da177e4SLinus Torvalds 	return retval;
9351da177e4SLinus Torvalds }
9361da177e4SLinus Torvalds 
937ad6bf88aSMikulas Patocka static inline unsigned int bdev_logical_block_size(struct block_device *bdev)
9381da177e4SLinus Torvalds {
939e1defc4fSMartin K. Petersen 	return queue_logical_block_size(bdev_get_queue(bdev));
9401da177e4SLinus Torvalds }
9411da177e4SLinus Torvalds 
942af2c68feSBart Van Assche static inline unsigned int queue_physical_block_size(const struct request_queue *q)
943c72758f3SMartin K. Petersen {
944c72758f3SMartin K. Petersen 	return q->limits.physical_block_size;
945c72758f3SMartin K. Petersen }
946c72758f3SMartin K. Petersen 
947892b6f90SMartin K. Petersen static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
948ac481c20SMartin K. Petersen {
949ac481c20SMartin K. Petersen 	return queue_physical_block_size(bdev_get_queue(bdev));
950ac481c20SMartin K. Petersen }
951ac481c20SMartin K. Petersen 
952af2c68feSBart Van Assche static inline unsigned int queue_io_min(const struct request_queue *q)
953c72758f3SMartin K. Petersen {
954c72758f3SMartin K. Petersen 	return q->limits.io_min;
955c72758f3SMartin K. Petersen }
956c72758f3SMartin K. Petersen 
957ac481c20SMartin K. Petersen static inline int bdev_io_min(struct block_device *bdev)
958ac481c20SMartin K. Petersen {
959ac481c20SMartin K. Petersen 	return queue_io_min(bdev_get_queue(bdev));
960ac481c20SMartin K. Petersen }
961ac481c20SMartin K. Petersen 
962af2c68feSBart Van Assche static inline unsigned int queue_io_opt(const struct request_queue *q)
963c72758f3SMartin K. Petersen {
964c72758f3SMartin K. Petersen 	return q->limits.io_opt;
965c72758f3SMartin K. Petersen }
966c72758f3SMartin K. Petersen 
967ac481c20SMartin K. Petersen static inline int bdev_io_opt(struct block_device *bdev)
968ac481c20SMartin K. Petersen {
969ac481c20SMartin K. Petersen 	return queue_io_opt(bdev_get_queue(bdev));
970ac481c20SMartin K. Petersen }
971ac481c20SMartin K. Petersen 
972a805a4faSDamien Le Moal static inline unsigned int
973a805a4faSDamien Le Moal queue_zone_write_granularity(const struct request_queue *q)
974a805a4faSDamien Le Moal {
975a805a4faSDamien Le Moal 	return q->limits.zone_write_granularity;
976a805a4faSDamien Le Moal }
977a805a4faSDamien Le Moal 
978a805a4faSDamien Le Moal static inline unsigned int
979a805a4faSDamien Le Moal bdev_zone_write_granularity(struct block_device *bdev)
980a805a4faSDamien Le Moal {
981a805a4faSDamien Le Moal 	return queue_zone_write_granularity(bdev_get_queue(bdev));
982a805a4faSDamien Le Moal }
983a805a4faSDamien Le Moal 
984af2c68feSBart Van Assche static inline int queue_alignment_offset(const struct request_queue *q)
985c72758f3SMartin K. Petersen {
986ac481c20SMartin K. Petersen 	if (q->limits.misaligned)
987c72758f3SMartin K. Petersen 		return -1;
988c72758f3SMartin K. Petersen 
989c72758f3SMartin K. Petersen 	return q->limits.alignment_offset;
990c72758f3SMartin K. Petersen }
991c72758f3SMartin K. Petersen 
992e03a72e1SMartin K. Petersen static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
99381744ee4SMartin K. Petersen {
99481744ee4SMartin K. Petersen 	unsigned int granularity = max(lim->physical_block_size, lim->io_min);
995233bde21SBart Van Assche 	unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
996233bde21SBart Van Assche 		<< SECTOR_SHIFT;
99781744ee4SMartin K. Petersen 
998b8839b8cSMike Snitzer 	return (granularity + lim->alignment_offset - alignment) % granularity;
999c72758f3SMartin K. Petersen }
1000c72758f3SMartin K. Petersen 
1001ac481c20SMartin K. Petersen static inline int bdev_alignment_offset(struct block_device *bdev)
1002ac481c20SMartin K. Petersen {
1003ac481c20SMartin K. Petersen 	struct request_queue *q = bdev_get_queue(bdev);
1004ac481c20SMartin K. Petersen 
1005ac481c20SMartin K. Petersen 	if (q->limits.misaligned)
1006ac481c20SMartin K. Petersen 		return -1;
1007fa01b1e9SChristoph Hellwig 	if (bdev_is_partition(bdev))
10087b8917f5SChristoph Hellwig 		return queue_limit_alignment_offset(&q->limits,
100929ff57c6SChristoph Hellwig 				bdev->bd_start_sect);
1010ac481c20SMartin K. Petersen 	return q->limits.alignment_offset;
1011ac481c20SMartin K. Petersen }
1012ac481c20SMartin K. Petersen 
1013af2c68feSBart Van Assche static inline int queue_discard_alignment(const struct request_queue *q)
101486b37281SMartin K. Petersen {
101586b37281SMartin K. Petersen 	if (q->limits.discard_misaligned)
101686b37281SMartin K. Petersen 		return -1;
101786b37281SMartin K. Petersen 
101886b37281SMartin K. Petersen 	return q->limits.discard_alignment;
101986b37281SMartin K. Petersen }
102086b37281SMartin K. Petersen 
1021e03a72e1SMartin K. Petersen static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
102286b37281SMartin K. Petersen {
102359771079SLinus Torvalds 	unsigned int alignment, granularity, offset;
1024dd3d145dSMartin K. Petersen 
1025a934a00aSMartin K. Petersen 	if (!lim->max_discard_sectors)
1026a934a00aSMartin K. Petersen 		return 0;
1027a934a00aSMartin K. Petersen 
102859771079SLinus Torvalds 	/* Why are these in bytes, not sectors? */
1029233bde21SBart Van Assche 	alignment = lim->discard_alignment >> SECTOR_SHIFT;
1030233bde21SBart Van Assche 	granularity = lim->discard_granularity >> SECTOR_SHIFT;
103159771079SLinus Torvalds 	if (!granularity)
103259771079SLinus Torvalds 		return 0;
103359771079SLinus Torvalds 
103459771079SLinus Torvalds 	/* Offset of the partition start in 'granularity' sectors */
103559771079SLinus Torvalds 	offset = sector_div(sector, granularity);
103659771079SLinus Torvalds 
103759771079SLinus Torvalds 	/* And why do we do this modulus *again* in blkdev_issue_discard()? */
103859771079SLinus Torvalds 	offset = (granularity + alignment - offset) % granularity;
103959771079SLinus Torvalds 
104059771079SLinus Torvalds 	/* Turn it back into bytes, gaah */
1041233bde21SBart Van Assche 	return offset << SECTOR_SHIFT;
104286b37281SMartin K. Petersen }
104386b37281SMartin K. Petersen 
1044c6e66634SPaolo Bonzini static inline int bdev_discard_alignment(struct block_device *bdev)
1045c6e66634SPaolo Bonzini {
1046c6e66634SPaolo Bonzini 	struct request_queue *q = bdev_get_queue(bdev);
1047c6e66634SPaolo Bonzini 
1048fa01b1e9SChristoph Hellwig 	if (bdev_is_partition(bdev))
10497cf34d97SChristoph Hellwig 		return queue_limit_discard_alignment(&q->limits,
105029ff57c6SChristoph Hellwig 				bdev->bd_start_sect);
1051c6e66634SPaolo Bonzini 	return q->limits.discard_alignment;
1052c6e66634SPaolo Bonzini }
1053c6e66634SPaolo Bonzini 
10544363ac7cSMartin K. Petersen static inline unsigned int bdev_write_same(struct block_device *bdev)
10554363ac7cSMartin K. Petersen {
10564363ac7cSMartin K. Petersen 	struct request_queue *q = bdev_get_queue(bdev);
10574363ac7cSMartin K. Petersen 
10584363ac7cSMartin K. Petersen 	if (q)
10594363ac7cSMartin K. Petersen 		return q->limits.max_write_same_sectors;
10604363ac7cSMartin K. Petersen 
10614363ac7cSMartin K. Petersen 	return 0;
10624363ac7cSMartin K. Petersen }
10634363ac7cSMartin K. Petersen 
1064a6f0788eSChaitanya Kulkarni static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
1065a6f0788eSChaitanya Kulkarni {
1066a6f0788eSChaitanya Kulkarni 	struct request_queue *q = bdev_get_queue(bdev);
1067a6f0788eSChaitanya Kulkarni 
1068a6f0788eSChaitanya Kulkarni 	if (q)
1069a6f0788eSChaitanya Kulkarni 		return q->limits.max_write_zeroes_sectors;
1070a6f0788eSChaitanya Kulkarni 
1071a6f0788eSChaitanya Kulkarni 	return 0;
1072a6f0788eSChaitanya Kulkarni }
1073a6f0788eSChaitanya Kulkarni 
1074797476b8SDamien Le Moal static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev)
1075797476b8SDamien Le Moal {
1076797476b8SDamien Le Moal 	struct request_queue *q = bdev_get_queue(bdev);
1077797476b8SDamien Le Moal 
1078797476b8SDamien Le Moal 	if (q)
1079797476b8SDamien Le Moal 		return blk_queue_zoned_model(q);
1080797476b8SDamien Le Moal 
1081797476b8SDamien Le Moal 	return BLK_ZONED_NONE;
1082797476b8SDamien Le Moal }
1083797476b8SDamien Le Moal 
1084797476b8SDamien Le Moal static inline bool bdev_is_zoned(struct block_device *bdev)
1085797476b8SDamien Le Moal {
1086797476b8SDamien Le Moal 	struct request_queue *q = bdev_get_queue(bdev);
1087797476b8SDamien Le Moal 
1088797476b8SDamien Le Moal 	if (q)
1089797476b8SDamien Le Moal 		return blk_queue_is_zoned(q);
1090797476b8SDamien Le Moal 
1091797476b8SDamien Le Moal 	return false;
1092797476b8SDamien Le Moal }
1093797476b8SDamien Le Moal 
1094113ab72eSDamien Le Moal static inline sector_t bdev_zone_sectors(struct block_device *bdev)
10956a0cb1bcSHannes Reinecke {
10966a0cb1bcSHannes Reinecke 	struct request_queue *q = bdev_get_queue(bdev);
10976a0cb1bcSHannes Reinecke 
10986a0cb1bcSHannes Reinecke 	if (q)
1099f99e8648SDamien Le Moal 		return blk_queue_zone_sectors(q);
11006cc77e9cSChristoph Hellwig 	return 0;
11016cc77e9cSChristoph Hellwig }
11026a0cb1bcSHannes Reinecke 
1103e15864f8SNiklas Cassel static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
1104e15864f8SNiklas Cassel {
1105e15864f8SNiklas Cassel 	struct request_queue *q = bdev_get_queue(bdev);
1106e15864f8SNiklas Cassel 
1107e15864f8SNiklas Cassel 	if (q)
1108e15864f8SNiklas Cassel 		return queue_max_open_zones(q);
1109e15864f8SNiklas Cassel 	return 0;
1110e15864f8SNiklas Cassel }
1111e15864f8SNiklas Cassel 
1112659bf827SNiklas Cassel static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
1113659bf827SNiklas Cassel {
1114659bf827SNiklas Cassel 	struct request_queue *q = bdev_get_queue(bdev);
1115659bf827SNiklas Cassel 
1116659bf827SNiklas Cassel 	if (q)
1117659bf827SNiklas Cassel 		return queue_max_active_zones(q);
1118659bf827SNiklas Cassel 	return 0;
1119659bf827SNiklas Cassel }
1120659bf827SNiklas Cassel 
1121af2c68feSBart Van Assche static inline int queue_dma_alignment(const struct request_queue *q)
11221da177e4SLinus Torvalds {
1123482eb689SPete Wyckoff 	return q ? q->dma_alignment : 511;
11241da177e4SLinus Torvalds }
11251da177e4SLinus Torvalds 
112614417799SNamhyung Kim static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
112787904074SFUJITA Tomonori 				 unsigned int len)
112887904074SFUJITA Tomonori {
112987904074SFUJITA Tomonori 	unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
113014417799SNamhyung Kim 	return !(addr & alignment) && !(len & alignment);
113187904074SFUJITA Tomonori }
113287904074SFUJITA Tomonori 
11331da177e4SLinus Torvalds /* assumes size > 256 */
11341da177e4SLinus Torvalds static inline unsigned int blksize_bits(unsigned int size)
11351da177e4SLinus Torvalds {
11361da177e4SLinus Torvalds 	unsigned int bits = 8;
11371da177e4SLinus Torvalds 	do {
11381da177e4SLinus Torvalds 		bits++;
11391da177e4SLinus Torvalds 		size >>= 1;
11401da177e4SLinus Torvalds 	} while (size > 256);
11411da177e4SLinus Torvalds 	return bits;
11421da177e4SLinus Torvalds }
11431da177e4SLinus Torvalds 
11442befb9e3SAdrian Bunk static inline unsigned int block_size(struct block_device *bdev)
11451da177e4SLinus Torvalds {
11466b7b181bSChristoph Hellwig 	return 1 << bdev->bd_inode->i_blkbits;
11471da177e4SLinus Torvalds }
11481da177e4SLinus Torvalds 
114959c3d45eSJens Axboe int kblockd_schedule_work(struct work_struct *work);
1150818cd1cbSJens Axboe int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
11511da177e4SLinus Torvalds 
11521da177e4SLinus Torvalds #define MODULE_ALIAS_BLOCKDEV(major,minor) \
11531da177e4SLinus Torvalds 	MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
11541da177e4SLinus Torvalds #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
11551da177e4SLinus Torvalds 	MODULE_ALIAS("block-major-" __stringify(major) "-*")
11561da177e4SLinus Torvalds 
1157d145dc23SSatya Tangirala #ifdef CONFIG_BLK_INLINE_ENCRYPTION
1158d145dc23SSatya Tangirala 
1159d145dc23SSatya Tangirala bool blk_ksm_register(struct blk_keyslot_manager *ksm, struct request_queue *q);
1160d145dc23SSatya Tangirala 
1161d145dc23SSatya Tangirala void blk_ksm_unregister(struct request_queue *q);
1162d145dc23SSatya Tangirala 
1163d145dc23SSatya Tangirala #else /* CONFIG_BLK_INLINE_ENCRYPTION */
1164d145dc23SSatya Tangirala 
1165d145dc23SSatya Tangirala static inline bool blk_ksm_register(struct blk_keyslot_manager *ksm,
1166d145dc23SSatya Tangirala 				    struct request_queue *q)
1167d145dc23SSatya Tangirala {
1168d145dc23SSatya Tangirala 	return true;
1169d145dc23SSatya Tangirala }
1170d145dc23SSatya Tangirala 
1171d145dc23SSatya Tangirala static inline void blk_ksm_unregister(struct request_queue *q) { }
1172d145dc23SSatya Tangirala 
1173d145dc23SSatya Tangirala #endif /* CONFIG_BLK_INLINE_ENCRYPTION */
1174d145dc23SSatya Tangirala 
1175d145dc23SSatya Tangirala 
117608f85851SAl Viro struct block_device_operations {
1177c62b37d9SChristoph Hellwig 	blk_qc_t (*submit_bio) (struct bio *bio);
1178d4430d62SAl Viro 	int (*open) (struct block_device *, fmode_t);
1179db2a144bSAl Viro 	void (*release) (struct gendisk *, fmode_t);
11803f289dcbSTejun Heo 	int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int);
1181d4430d62SAl Viro 	int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1182d4430d62SAl Viro 	int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
118377ea887eSTejun Heo 	unsigned int (*check_events) (struct gendisk *disk,
118477ea887eSTejun Heo 				      unsigned int clearing);
1185c3e33e04STejun Heo 	void (*unlock_native_capacity) (struct gendisk *);
118608f85851SAl Viro 	int (*getgeo)(struct block_device *, struct hd_geometry *);
1187e00adcadSChristoph Hellwig 	int (*set_read_only)(struct block_device *bdev, bool ro);
1188b3a27d05SNitin Gupta 	/* this callback is with swap_lock and sometimes page table lock held */
1189b3a27d05SNitin Gupta 	void (*swap_slot_free_notify) (struct block_device *, unsigned long);
1190e76239a3SChristoph Hellwig 	int (*report_zones)(struct gendisk *, sector_t sector,
1191d4100351SChristoph Hellwig 			unsigned int nr_zones, report_zones_cb cb, void *data);
1192348e114bSChristoph Hellwig 	char *(*devnode)(struct gendisk *disk, umode_t *mode);
119308f85851SAl Viro 	struct module *owner;
1194bbd3e064SChristoph Hellwig 	const struct pr_ops *pr_ops;
11950bdfbca8SDmitry Osipenko 
11960bdfbca8SDmitry Osipenko 	/*
11970bdfbca8SDmitry Osipenko 	 * Special callback for probing GPT entry at a given sector.
11980bdfbca8SDmitry Osipenko 	 * Needed by Android devices, used by GPT scanner and MMC blk
11990bdfbca8SDmitry Osipenko 	 * driver.
12000bdfbca8SDmitry Osipenko 	 */
12010bdfbca8SDmitry Osipenko 	int (*alternative_gpt_sector)(struct gendisk *disk, sector_t *sector);
120208f85851SAl Viro };
120308f85851SAl Viro 
1204ee6a129dSArnd Bergmann #ifdef CONFIG_COMPAT
1205ee6a129dSArnd Bergmann extern int blkdev_compat_ptr_ioctl(struct block_device *, fmode_t,
1206ee6a129dSArnd Bergmann 				      unsigned int, unsigned long);
1207ee6a129dSArnd Bergmann #else
1208ee6a129dSArnd Bergmann #define blkdev_compat_ptr_ioctl NULL
1209ee6a129dSArnd Bergmann #endif
1210ee6a129dSArnd Bergmann 
121147a191fdSMatthew Wilcox extern int bdev_read_page(struct block_device *, sector_t, struct page *);
121247a191fdSMatthew Wilcox extern int bdev_write_page(struct block_device *, sector_t, struct page *,
121347a191fdSMatthew Wilcox 						struct writeback_control *);
12146cc77e9cSChristoph Hellwig 
12150619317fSJens Axboe static inline void blk_wake_io_task(struct task_struct *waiter)
12160619317fSJens Axboe {
12170619317fSJens Axboe 	/*
12180619317fSJens Axboe 	 * If we're polling, the task itself is doing the completions. For
12190619317fSJens Axboe 	 * that case, we don't need to signal a wakeup, it's enough to just
12200619317fSJens Axboe 	 * mark us as RUNNING.
12210619317fSJens Axboe 	 */
12220619317fSJens Axboe 	if (waiter == current)
12230619317fSJens Axboe 		__set_current_state(TASK_RUNNING);
12240619317fSJens Axboe 	else
12250619317fSJens Axboe 		wake_up_process(waiter);
12260619317fSJens Axboe }
12270619317fSJens Axboe 
1228956d510eSChristoph Hellwig unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
1229956d510eSChristoph Hellwig 		unsigned int op);
1230956d510eSChristoph Hellwig void disk_end_io_acct(struct gendisk *disk, unsigned int op,
1231956d510eSChristoph Hellwig 		unsigned long start_time);
1232956d510eSChristoph Hellwig 
123399dfc43eSChristoph Hellwig unsigned long bio_start_io_acct(struct bio *bio);
123499dfc43eSChristoph Hellwig void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
123599dfc43eSChristoph Hellwig 		struct block_device *orig_bdev);
1236956d510eSChristoph Hellwig 
1237956d510eSChristoph Hellwig /**
1238956d510eSChristoph Hellwig  * bio_end_io_acct - end I/O accounting for bio based drivers
1239956d510eSChristoph Hellwig  * @bio:	bio to end account for
1240956d510eSChristoph Hellwig  * @start:	start time returned by bio_start_io_acct()
1241956d510eSChristoph Hellwig  */
1242956d510eSChristoph Hellwig static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time)
1243956d510eSChristoph Hellwig {
124499dfc43eSChristoph Hellwig 	return bio_end_io_acct_remapped(bio, start_time, bio->bi_bdev);
1245956d510eSChristoph Hellwig }
1246956d510eSChristoph Hellwig 
12473f1266f1SChristoph Hellwig int bdev_read_only(struct block_device *bdev);
12483f1266f1SChristoph Hellwig int set_blocksize(struct block_device *bdev, int size);
12493f1266f1SChristoph Hellwig 
12503f1266f1SChristoph Hellwig const char *bdevname(struct block_device *bdev, char *buffer);
12514e7b5671SChristoph Hellwig int lookup_bdev(const char *pathname, dev_t *dev);
12523f1266f1SChristoph Hellwig 
12533f1266f1SChristoph Hellwig void blkdev_show(struct seq_file *seqf, off_t offset);
12543f1266f1SChristoph Hellwig 
12553f1266f1SChristoph Hellwig #define BDEVNAME_SIZE	32	/* Largest string for a blockdev identifier */
12563f1266f1SChristoph Hellwig #define BDEVT_SIZE	10	/* Largest string for MAJ:MIN for blkdev */
12573f1266f1SChristoph Hellwig #ifdef CONFIG_BLOCK
12583f1266f1SChristoph Hellwig #define BLKDEV_MAJOR_MAX	512
12593f1266f1SChristoph Hellwig #else
12603f1266f1SChristoph Hellwig #define BLKDEV_MAJOR_MAX	0
12611da177e4SLinus Torvalds #endif
12623f1266f1SChristoph Hellwig 
12633f1266f1SChristoph Hellwig struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
12643f1266f1SChristoph Hellwig 		void *holder);
12653f1266f1SChristoph Hellwig struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder);
126637c3fc9aSChristoph Hellwig int bd_prepare_to_claim(struct block_device *bdev, void *holder);
126737c3fc9aSChristoph Hellwig void bd_abort_claiming(struct block_device *bdev, void *holder);
12683f1266f1SChristoph Hellwig void blkdev_put(struct block_device *bdev, fmode_t mode);
12693f1266f1SChristoph Hellwig 
127022ae8ce8SChristoph Hellwig /* just for blk-cgroup, don't use elsewhere */
127122ae8ce8SChristoph Hellwig struct block_device *blkdev_get_no_open(dev_t dev);
127222ae8ce8SChristoph Hellwig void blkdev_put_no_open(struct block_device *bdev);
127322ae8ce8SChristoph Hellwig 
127422ae8ce8SChristoph Hellwig struct block_device *bdev_alloc(struct gendisk *disk, u8 partno);
127522ae8ce8SChristoph Hellwig void bdev_add(struct block_device *bdev, dev_t dev);
1276621c1f42SChristoph Hellwig struct block_device *I_BDEV(struct inode *inode);
12772c2b9fd6SChristoph Hellwig int truncate_bdev_range(struct block_device *bdev, fmode_t mode, loff_t lstart,
12782c2b9fd6SChristoph Hellwig 		loff_t lend);
12793f1266f1SChristoph Hellwig 
12803f1266f1SChristoph Hellwig #ifdef CONFIG_BLOCK
12813f1266f1SChristoph Hellwig void invalidate_bdev(struct block_device *bdev);
12823f1266f1SChristoph Hellwig int sync_blockdev(struct block_device *bdev);
12833f1266f1SChristoph Hellwig #else
12843f1266f1SChristoph Hellwig static inline void invalidate_bdev(struct block_device *bdev)
12853f1266f1SChristoph Hellwig {
12863f1266f1SChristoph Hellwig }
12873f1266f1SChristoph Hellwig static inline int sync_blockdev(struct block_device *bdev)
12883f1266f1SChristoph Hellwig {
12893f1266f1SChristoph Hellwig 	return 0;
12903f1266f1SChristoph Hellwig }
12913f1266f1SChristoph Hellwig #endif
12923f1266f1SChristoph Hellwig int fsync_bdev(struct block_device *bdev);
12933f1266f1SChristoph Hellwig 
1294040f04bdSChristoph Hellwig int freeze_bdev(struct block_device *bdev);
1295040f04bdSChristoph Hellwig int thaw_bdev(struct block_device *bdev);
12963f1266f1SChristoph Hellwig 
12973f1266f1SChristoph Hellwig #endif /* _LINUX_BLKDEV_H */
1298