xref: /linux-6.15/include/linux/blkdev.h (revision 9abcfbd2)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2322cbb50SChristoph Hellwig /*
3322cbb50SChristoph Hellwig  * Portions Copyright (C) 1992 Drew Eckhardt
4322cbb50SChristoph Hellwig  */
51da177e4SLinus Torvalds #ifndef _LINUX_BLKDEV_H
61da177e4SLinus Torvalds #define _LINUX_BLKDEV_H
71da177e4SLinus Torvalds 
8322cbb50SChristoph Hellwig #include <linux/types.h>
9322cbb50SChristoph Hellwig #include <linux/blk_types.h>
10322cbb50SChristoph Hellwig #include <linux/device.h>
111da177e4SLinus Torvalds #include <linux/list.h>
12320ae51fSJens Axboe #include <linux/llist.h>
13b296a6d5SAndy Shevchenko #include <linux/minmax.h>
141da177e4SLinus Torvalds #include <linux/timer.h>
151da177e4SLinus Torvalds #include <linux/workqueue.h>
161da177e4SLinus Torvalds #include <linux/wait.h>
171da177e4SLinus Torvalds #include <linux/bio.h>
183e6053d7SHugh Dickins #include <linux/gfp.h>
19322cbb50SChristoph Hellwig #include <linux/kdev_t.h>
20548bc8e1STejun Heo #include <linux/rcupdate.h>
21add703fdSTejun Heo #include <linux/percpu-refcount.h>
226a0cb1bcSHannes Reinecke #include <linux/blkzoned.h>
23322cbb50SChristoph Hellwig #include <linux/sched.h>
24d97e594cSJohn Garry #include <linux/sbitmap.h>
25322cbb50SChristoph Hellwig #include <linux/uuid.h>
26322cbb50SChristoph Hellwig #include <linux/xarray.h>
27f3a60882SChristian Brauner #include <linux/file.h>
281da177e4SLinus Torvalds 
29de477254SPaul Gortmaker struct module;
301da177e4SLinus Torvalds struct request_queue;
311da177e4SLinus Torvalds struct elevator_queue;
322056a782SJens Axboe struct blk_trace;
333d6392cfSJens Axboe struct request;
343d6392cfSJens Axboe struct sg_io_hdr;
353c798398STejun Heo struct blkcg_gq;
367c94e1c1SMing Lei struct blk_flush_queue;
373e08773cSChristoph Hellwig struct kiocb;
38bbd3e064SChristoph Hellwig struct pr_ops;
39a7905043SJosef Bacik struct rq_qos;
4034dbad5dSOmar Sandoval struct blk_queue_stats;
4134dbad5dSOmar Sandoval struct blk_stat_callback;
42cb77cb5aSEric Biggers struct blk_crypto_profile;
431da177e4SLinus Torvalds 
44322cbb50SChristoph Hellwig extern const struct device_type disk_type;
45cdb37f73SThomas Weißschuh extern const struct device_type part_type;
46f8c7511dSRicardo B. Marliere extern const struct class block_class;
47322cbb50SChristoph Hellwig 
488bd435b3STejun Heo /*
498bd435b3STejun Heo  * Maximum number of blkcg policies allowed to be registered concurrently.
508bd435b3STejun Heo  * Defined here to simplify include dependency.
518bd435b3STejun Heo  */
52ec645dc9SOleksandr Natalenko #define BLKCG_MAX_POLS		6
538bd435b3STejun Heo 
54322cbb50SChristoph Hellwig #define DISK_MAX_PARTS			256
55322cbb50SChristoph Hellwig #define DISK_NAME_LEN			32
56322cbb50SChristoph Hellwig 
57322cbb50SChristoph Hellwig #define PARTITION_META_INFO_VOLNAMELTH	64
58322cbb50SChristoph Hellwig /*
59322cbb50SChristoph Hellwig  * Enough for the string representation of any kind of UUID plus NULL.
60322cbb50SChristoph Hellwig  * EFI UUID is 36 characters. MSDOS UUID is 11 characters.
61322cbb50SChristoph Hellwig  */
62322cbb50SChristoph Hellwig #define PARTITION_META_INFO_UUIDLTH	(UUID_STRING_LEN + 1)
63322cbb50SChristoph Hellwig 
64322cbb50SChristoph Hellwig struct partition_meta_info {
65322cbb50SChristoph Hellwig 	char uuid[PARTITION_META_INFO_UUIDLTH];
66322cbb50SChristoph Hellwig 	u8 volname[PARTITION_META_INFO_VOLNAMELTH];
67322cbb50SChristoph Hellwig };
68322cbb50SChristoph Hellwig 
69322cbb50SChristoph Hellwig /**
70322cbb50SChristoph Hellwig  * DOC: genhd capability flags
71322cbb50SChristoph Hellwig  *
72322cbb50SChristoph Hellwig  * ``GENHD_FL_REMOVABLE``: indicates that the block device gives access to
73322cbb50SChristoph Hellwig  * removable media.  When set, the device remains present even when media is not
74322cbb50SChristoph Hellwig  * inserted.  Shall not be set for devices which are removed entirely when the
75322cbb50SChristoph Hellwig  * media is removed.
76322cbb50SChristoph Hellwig  *
77322cbb50SChristoph Hellwig  * ``GENHD_FL_HIDDEN``: the block device is hidden; it doesn't produce events,
78322cbb50SChristoph Hellwig  * doesn't appear in sysfs, and can't be opened from userspace or using
79322cbb50SChristoph Hellwig  * blkdev_get*. Used for the underlying components of multipath devices.
80322cbb50SChristoph Hellwig  *
81322cbb50SChristoph Hellwig  * ``GENHD_FL_NO_PART``: partition support is disabled.  The kernel will not
82322cbb50SChristoph Hellwig  * scan for partitions from add_disk, and users can't add partitions manually.
83322cbb50SChristoph Hellwig  *
84322cbb50SChristoph Hellwig  */
85322cbb50SChristoph Hellwig enum {
86322cbb50SChristoph Hellwig 	GENHD_FL_REMOVABLE			= 1 << 0,
87322cbb50SChristoph Hellwig 	GENHD_FL_HIDDEN				= 1 << 1,
88322cbb50SChristoph Hellwig 	GENHD_FL_NO_PART			= 1 << 2,
89322cbb50SChristoph Hellwig };
90322cbb50SChristoph Hellwig 
91322cbb50SChristoph Hellwig enum {
92322cbb50SChristoph Hellwig 	DISK_EVENT_MEDIA_CHANGE			= 1 << 0, /* media changed */
93322cbb50SChristoph Hellwig 	DISK_EVENT_EJECT_REQUEST		= 1 << 1, /* eject requested */
94322cbb50SChristoph Hellwig };
95322cbb50SChristoph Hellwig 
96322cbb50SChristoph Hellwig enum {
97322cbb50SChristoph Hellwig 	/* Poll even if events_poll_msecs is unset */
98322cbb50SChristoph Hellwig 	DISK_EVENT_FLAG_POLL			= 1 << 0,
99322cbb50SChristoph Hellwig 	/* Forward events to udev */
100322cbb50SChristoph Hellwig 	DISK_EVENT_FLAG_UEVENT			= 1 << 1,
101322cbb50SChristoph Hellwig 	/* Block event polling when open for exclusive write */
102322cbb50SChristoph Hellwig 	DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE	= 1 << 2,
103322cbb50SChristoph Hellwig };
104322cbb50SChristoph Hellwig 
105322cbb50SChristoph Hellwig struct disk_events;
106322cbb50SChristoph Hellwig struct badblocks;
107322cbb50SChristoph Hellwig 
108e9f5f44aSChristoph Hellwig enum blk_integrity_checksum {
109e9f5f44aSChristoph Hellwig 	BLK_INTEGRITY_CSUM_NONE		= 0,
110e9f5f44aSChristoph Hellwig 	BLK_INTEGRITY_CSUM_IP		= 1,
111e9f5f44aSChristoph Hellwig 	BLK_INTEGRITY_CSUM_CRC		= 2,
112e9f5f44aSChristoph Hellwig 	BLK_INTEGRITY_CSUM_CRC64	= 3,
113e9f5f44aSChristoph Hellwig } __packed ;
114e9f5f44aSChristoph Hellwig 
115322cbb50SChristoph Hellwig struct blk_integrity {
116322cbb50SChristoph Hellwig 	unsigned char				flags;
117e9f5f44aSChristoph Hellwig 	enum blk_integrity_checksum		csum_type;
118322cbb50SChristoph Hellwig 	unsigned char				tuple_size;
11960d21aacSKanchan Joshi 	unsigned char				pi_offset;
120322cbb50SChristoph Hellwig 	unsigned char				interval_exp;
121322cbb50SChristoph Hellwig 	unsigned char				tag_size;
122322cbb50SChristoph Hellwig };
123322cbb50SChristoph Hellwig 
12405bdb996SChristoph Hellwig typedef unsigned int __bitwise blk_mode_t;
12505bdb996SChristoph Hellwig 
12605bdb996SChristoph Hellwig /* open for reading */
12705bdb996SChristoph Hellwig #define BLK_OPEN_READ		((__force blk_mode_t)(1 << 0))
12805bdb996SChristoph Hellwig /* open for writing */
12905bdb996SChristoph Hellwig #define BLK_OPEN_WRITE		((__force blk_mode_t)(1 << 1))
13005bdb996SChristoph Hellwig /* open exclusively (vs other exclusive openers */
13105bdb996SChristoph Hellwig #define BLK_OPEN_EXCL		((__force blk_mode_t)(1 << 2))
13205bdb996SChristoph Hellwig /* opened with O_NDELAY */
13305bdb996SChristoph Hellwig #define BLK_OPEN_NDELAY		((__force blk_mode_t)(1 << 3))
13405bdb996SChristoph Hellwig /* open for "writes" only for ioctls (specialy hack for floppy.c) */
13505bdb996SChristoph Hellwig #define BLK_OPEN_WRITE_IOCTL	((__force blk_mode_t)(1 << 4))
136ed5cc702SJan Kara /* open is exclusive wrt all other BLK_OPEN_WRITE opens to the device */
137ed5cc702SJan Kara #define BLK_OPEN_RESTRICT_WRITES	((__force blk_mode_t)(1 << 5))
138752863bdSChristoph Hellwig /* return partition scanning errors */
139752863bdSChristoph Hellwig #define BLK_OPEN_STRICT_SCAN	((__force blk_mode_t)(1 << 6))
14005bdb996SChristoph Hellwig 
141322cbb50SChristoph Hellwig struct gendisk {
142322cbb50SChristoph Hellwig 	/*
143322cbb50SChristoph Hellwig 	 * major/first_minor/minors should not be set by any new driver, the
144322cbb50SChristoph Hellwig 	 * block core will take care of allocating them automatically.
145322cbb50SChristoph Hellwig 	 */
146322cbb50SChristoph Hellwig 	int major;
147322cbb50SChristoph Hellwig 	int first_minor;
148322cbb50SChristoph Hellwig 	int minors;
149322cbb50SChristoph Hellwig 
150322cbb50SChristoph Hellwig 	char disk_name[DISK_NAME_LEN];	/* name of major driver */
151322cbb50SChristoph Hellwig 
152322cbb50SChristoph Hellwig 	unsigned short events;		/* supported events */
153322cbb50SChristoph Hellwig 	unsigned short event_flags;	/* flags related to event processing */
154322cbb50SChristoph Hellwig 
155322cbb50SChristoph Hellwig 	struct xarray part_tbl;
156322cbb50SChristoph Hellwig 	struct block_device *part0;
157322cbb50SChristoph Hellwig 
158322cbb50SChristoph Hellwig 	const struct block_device_operations *fops;
159322cbb50SChristoph Hellwig 	struct request_queue *queue;
160322cbb50SChristoph Hellwig 	void *private_data;
161322cbb50SChristoph Hellwig 
16246754bd0SChristoph Hellwig 	struct bio_set bio_split;
16346754bd0SChristoph Hellwig 
164322cbb50SChristoph Hellwig 	int flags;
165322cbb50SChristoph Hellwig 	unsigned long state;
166322cbb50SChristoph Hellwig #define GD_NEED_PART_SCAN		0
167322cbb50SChristoph Hellwig #define GD_READ_ONLY			1
168322cbb50SChristoph Hellwig #define GD_DEAD				2
169322cbb50SChristoph Hellwig #define GD_NATIVE_CAPACITY		3
17076792055SChristoph Hellwig #define GD_ADDED			4
171b9684a71SChristoph Hellwig #define GD_SUPPRESS_PART_SCAN		5
1726f8191fdSChristoph Hellwig #define GD_OWNS_QUEUE			6
173322cbb50SChristoph Hellwig 
174322cbb50SChristoph Hellwig 	struct mutex open_mutex;	/* open/close mutex */
175322cbb50SChristoph Hellwig 	unsigned open_partitions;	/* number of open partitions */
176322cbb50SChristoph Hellwig 
177322cbb50SChristoph Hellwig 	struct backing_dev_info	*bdi;
1782bd85221SChristoph Hellwig 	struct kobject queue_kobj;	/* the queue/ directory */
179322cbb50SChristoph Hellwig 	struct kobject *slave_dir;
180322cbb50SChristoph Hellwig #ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
181322cbb50SChristoph Hellwig 	struct list_head slave_bdevs;
182322cbb50SChristoph Hellwig #endif
183322cbb50SChristoph Hellwig 	struct timer_rand_state *random;
184322cbb50SChristoph Hellwig 	atomic_t sync_io;		/* RAID */
185322cbb50SChristoph Hellwig 	struct disk_events *ev;
186d86e716aSChristoph Hellwig 
187d86e716aSChristoph Hellwig #ifdef CONFIG_BLK_DEV_ZONED
188d86e716aSChristoph Hellwig 	/*
18902ccd7c3SDamien Le Moal 	 * Zoned block device information. Reads of this information must be
19002ccd7c3SDamien Le Moal 	 * protected with blk_queue_enter() / blk_queue_exit(). Modifying this
19102ccd7c3SDamien Le Moal 	 * information is only allowed while no requests are being processed.
19202ccd7c3SDamien Le Moal 	 * See also blk_mq_freeze_queue() and blk_mq_unfreeze_queue().
193d86e716aSChristoph Hellwig 	 */
194d86e716aSChristoph Hellwig 	unsigned int		nr_zones;
195ecfe43b1SDamien Le Moal 	unsigned int		zone_capacity;
19629459c3eSDamien Le Moal 	unsigned int		last_zone_capacity;
197d86e716aSChristoph Hellwig 	unsigned long		*conv_zones_bitmap;
198dd291d77SDamien Le Moal 	unsigned int            zone_wplugs_hash_bits;
199dd291d77SDamien Le Moal 	spinlock_t              zone_wplugs_lock;
200dd291d77SDamien Le Moal 	struct mempool_s	*zone_wplugs_pool;
201dd291d77SDamien Le Moal 	struct hlist_head       *zone_wplugs_hash;
202dd291d77SDamien Le Moal 	struct list_head        zone_wplugs_err_list;
203dd291d77SDamien Le Moal 	struct work_struct	zone_wplugs_work;
204a8f59e5aSDamien Le Moal 	struct workqueue_struct *zone_wplugs_wq;
205d86e716aSChristoph Hellwig #endif /* CONFIG_BLK_DEV_ZONED */
206d86e716aSChristoph Hellwig 
207322cbb50SChristoph Hellwig #if IS_ENABLED(CONFIG_CDROM)
208322cbb50SChristoph Hellwig 	struct cdrom_device_info *cdi;
209322cbb50SChristoph Hellwig #endif
210322cbb50SChristoph Hellwig 	int node_id;
211322cbb50SChristoph Hellwig 	struct badblocks *bb;
212322cbb50SChristoph Hellwig 	struct lockdep_map lockdep_map;
213322cbb50SChristoph Hellwig 	u64 diskseq;
21405bdb996SChristoph Hellwig 	blk_mode_t open_mode;
2156a27d28cSChristoph Hellwig 
2166a27d28cSChristoph Hellwig 	/*
2176a27d28cSChristoph Hellwig 	 * Independent sector access ranges. This is always NULL for
2186a27d28cSChristoph Hellwig 	 * devices that do not have multiple independent access ranges.
2196a27d28cSChristoph Hellwig 	 */
2206a27d28cSChristoph Hellwig 	struct blk_independent_access_ranges *ia_ranges;
221322cbb50SChristoph Hellwig };
222322cbb50SChristoph Hellwig 
223dbdc1be3SChristoph Hellwig /**
224dbdc1be3SChristoph Hellwig  * disk_openers - returns how many openers are there for a disk
225dbdc1be3SChristoph Hellwig  * @disk: disk to check
226dbdc1be3SChristoph Hellwig  *
227dbdc1be3SChristoph Hellwig  * This returns the number of openers for a disk.  Note that this value is only
228dbdc1be3SChristoph Hellwig  * stable if disk->open_mutex is held.
229dbdc1be3SChristoph Hellwig  *
230dbdc1be3SChristoph Hellwig  * Note: Due to a quirk in the block layer open code, each open partition is
231dbdc1be3SChristoph Hellwig  * only counted once even if there are multiple openers.
232dbdc1be3SChristoph Hellwig  */
233dbdc1be3SChristoph Hellwig static inline unsigned int disk_openers(struct gendisk *disk)
234dbdc1be3SChristoph Hellwig {
2359acf381fSChristoph Hellwig 	return atomic_read(&disk->part0->bd_openers);
236dbdc1be3SChristoph Hellwig }
237dbdc1be3SChristoph Hellwig 
238140ce28dSChristoph Hellwig /**
239140ce28dSChristoph Hellwig  * disk_has_partscan - return %true if partition scanning is enabled on a disk
240140ce28dSChristoph Hellwig  * @disk: disk to check
241140ce28dSChristoph Hellwig  *
242140ce28dSChristoph Hellwig  * Returns %true if partitions scanning is enabled for @disk, or %false if
243140ce28dSChristoph Hellwig  * partition scanning is disabled either permanently or temporarily.
244140ce28dSChristoph Hellwig  */
245140ce28dSChristoph Hellwig static inline bool disk_has_partscan(struct gendisk *disk)
246140ce28dSChristoph Hellwig {
247140ce28dSChristoph Hellwig 	return !(disk->flags & (GENHD_FL_NO_PART | GENHD_FL_HIDDEN)) &&
248140ce28dSChristoph Hellwig 		!test_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
249140ce28dSChristoph Hellwig }
250140ce28dSChristoph Hellwig 
251322cbb50SChristoph Hellwig /*
252322cbb50SChristoph Hellwig  * The gendisk is refcounted by the part0 block_device, and the bd_device
253322cbb50SChristoph Hellwig  * therein is also used for device model presentation in sysfs.
254322cbb50SChristoph Hellwig  */
255322cbb50SChristoph Hellwig #define dev_to_disk(device) \
256322cbb50SChristoph Hellwig 	(dev_to_bdev(device)->bd_disk)
257322cbb50SChristoph Hellwig #define disk_to_dev(disk) \
258322cbb50SChristoph Hellwig 	(&((disk)->part0->bd_device))
259322cbb50SChristoph Hellwig 
260322cbb50SChristoph Hellwig #if IS_REACHABLE(CONFIG_CDROM)
261322cbb50SChristoph Hellwig #define disk_to_cdi(disk)	((disk)->cdi)
262322cbb50SChristoph Hellwig #else
263322cbb50SChristoph Hellwig #define disk_to_cdi(disk)	NULL
264322cbb50SChristoph Hellwig #endif
265322cbb50SChristoph Hellwig 
266322cbb50SChristoph Hellwig static inline dev_t disk_devt(struct gendisk *disk)
267322cbb50SChristoph Hellwig {
268322cbb50SChristoph Hellwig 	return MKDEV(disk->major, disk->first_minor);
269322cbb50SChristoph Hellwig }
270322cbb50SChristoph Hellwig 
27137ae5a0fSTetsuo Handa static inline int blk_validate_block_size(unsigned long bsize)
272570b1cacSXie Yongji {
273570b1cacSXie Yongji 	if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize))
274570b1cacSXie Yongji 		return -EINVAL;
275570b1cacSXie Yongji 
276570b1cacSXie Yongji 	return 0;
277570b1cacSXie Yongji }
278570b1cacSXie Yongji 
27916458cf3SBart Van Assche static inline bool blk_op_is_passthrough(blk_opf_t op)
28014cb0dc6SMing Lei {
281da6269daSChristoph Hellwig 	op &= REQ_OP_MASK;
28214cb0dc6SMing Lei 	return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT;
28314cb0dc6SMing Lei }
28414cb0dc6SMing Lei 
2851122c0c1SChristoph Hellwig /* flags set by the driver in queue_limits.features */
2861122c0c1SChristoph Hellwig enum {
2871122c0c1SChristoph Hellwig 	/* supports a volatile write cache */
2881122c0c1SChristoph Hellwig 	BLK_FEAT_WRITE_CACHE			= (1u << 0),
2891122c0c1SChristoph Hellwig 
2901122c0c1SChristoph Hellwig 	/* supports passing on the FUA bit */
2911122c0c1SChristoph Hellwig 	BLK_FEAT_FUA				= (1u << 1),
292bd4a633bSChristoph Hellwig 
293bd4a633bSChristoph Hellwig 	/* rotational device (hard drive or floppy) */
294bd4a633bSChristoph Hellwig 	BLK_FEAT_ROTATIONAL			= (1u << 2),
29539a9f1c3SChristoph Hellwig 
29639a9f1c3SChristoph Hellwig 	/* contributes to the random number pool */
29739a9f1c3SChristoph Hellwig 	BLK_FEAT_ADD_RANDOM			= (1u << 3),
298cdb24979SChristoph Hellwig 
299cdb24979SChristoph Hellwig 	/* do disk/partitions IO accounting */
300cdb24979SChristoph Hellwig 	BLK_FEAT_IO_STAT			= (1u << 4),
3011a02f3a7SChristoph Hellwig 
3021a02f3a7SChristoph Hellwig 	/* don't modify data until writeback is done */
3031a02f3a7SChristoph Hellwig 	BLK_FEAT_STABLE_WRITES			= (1u << 5),
304aadd5c59SChristoph Hellwig 
305aadd5c59SChristoph Hellwig 	/* always completes in submit context */
306aadd5c59SChristoph Hellwig 	BLK_FEAT_SYNCHRONOUS			= (1u << 6),
307f76af42fSChristoph Hellwig 
308f76af42fSChristoph Hellwig 	/* supports REQ_NOWAIT */
309f76af42fSChristoph Hellwig 	BLK_FEAT_NOWAIT				= (1u << 7),
310f467fee4SChristoph Hellwig 
311f467fee4SChristoph Hellwig 	/* supports DAX */
312f467fee4SChristoph Hellwig 	BLK_FEAT_DAX				= (1u << 8),
3138023e144SChristoph Hellwig 
3148023e144SChristoph Hellwig 	/* supports I/O polling */
3158023e144SChristoph Hellwig 	BLK_FEAT_POLL				= (1u << 9),
316b1fc937aSChristoph Hellwig 
317b1fc937aSChristoph Hellwig 	/* is a zoned device */
318b1fc937aSChristoph Hellwig 	BLK_FEAT_ZONED				= (1u << 10),
319a52758a3SChristoph Hellwig 
320a52758a3SChristoph Hellwig 	/* supports Zone Reset All */
321a52758a3SChristoph Hellwig 	BLK_FEAT_ZONE_RESETALL			= (1u << 11),
3229c1e42e3SChristoph Hellwig 
3239c1e42e3SChristoph Hellwig 	/* supports PCI(e) p2p requests */
3249c1e42e3SChristoph Hellwig 	BLK_FEAT_PCI_P2PDMA			= (1u << 12),
3258c8f5c85SChristoph Hellwig 
3268c8f5c85SChristoph Hellwig 	/* skip this queue in blk_mq_(un)quiesce_tagset */
3278c8f5c85SChristoph Hellwig 	BLK_FEAT_SKIP_TAGSET_QUIESCE		= (1u << 13),
328339d3948SChristoph Hellwig 
329339d3948SChristoph Hellwig 	/* bounce all highmem pages */
330339d3948SChristoph Hellwig 	BLK_FEAT_BOUNCE_HIGH			= (1u << 14),
3317d4dec52SChristoph Hellwig 
3327d4dec52SChristoph Hellwig 	/* undocumented magic for bcache */
3337d4dec52SChristoph Hellwig 	BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE	= (1u << 15),
3341122c0c1SChristoph Hellwig };
3351122c0c1SChristoph Hellwig 
3361122c0c1SChristoph Hellwig /*
3371122c0c1SChristoph Hellwig  * Flags automatically inherited when stacking limits.
3381122c0c1SChristoph Hellwig  */
3391122c0c1SChristoph Hellwig #define BLK_FEAT_INHERIT_MASK \
3401a02f3a7SChristoph Hellwig 	(BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA | BLK_FEAT_ROTATIONAL | \
3417d4dec52SChristoph Hellwig 	 BLK_FEAT_STABLE_WRITES | BLK_FEAT_ZONED | BLK_FEAT_BOUNCE_HIGH | \
3427d4dec52SChristoph Hellwig 	 BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE)
3431122c0c1SChristoph Hellwig 
3441122c0c1SChristoph Hellwig /* internal flags in queue_limits.flags */
3451122c0c1SChristoph Hellwig enum {
346bae1c743SChristoph Hellwig 	/* do not send FLUSH/FUA commands despite advertising a write cache */
347bae1c743SChristoph Hellwig 	BLK_FLAG_WRITE_CACHE_DISABLED		= (1u << 0),
3481122c0c1SChristoph Hellwig 
3495543217bSChristoph Hellwig 	/* I/O topology is misaligned */
3505543217bSChristoph Hellwig 	BLK_FEAT_MISALIGNED			= (1u << 1),
3519bb33f24SChristoph Hellwig };
3529bb33f24SChristoph Hellwig 
353025146e1SMartin K. Petersen struct queue_limits {
3541122c0c1SChristoph Hellwig 	unsigned int		features;
3551122c0c1SChristoph Hellwig 	unsigned int		flags;
356025146e1SMartin K. Petersen 	unsigned long		seg_boundary_mask;
35703100aadSKeith Busch 	unsigned long		virt_boundary_mask;
358025146e1SMartin K. Petersen 
359025146e1SMartin K. Petersen 	unsigned int		max_hw_sectors;
360ca369d51SMartin K. Petersen 	unsigned int		max_dev_sectors;
361762380adSJens Axboe 	unsigned int		chunk_sectors;
362025146e1SMartin K. Petersen 	unsigned int		max_sectors;
363c9c77418SKeith Busch 	unsigned int		max_user_sectors;
364025146e1SMartin K. Petersen 	unsigned int		max_segment_size;
365c72758f3SMartin K. Petersen 	unsigned int		physical_block_size;
366ad6bf88aSMikulas Patocka 	unsigned int		logical_block_size;
367c72758f3SMartin K. Petersen 	unsigned int		alignment_offset;
368c72758f3SMartin K. Petersen 	unsigned int		io_min;
369c72758f3SMartin K. Petersen 	unsigned int		io_opt;
37067efc925SChristoph Hellwig 	unsigned int		max_discard_sectors;
3710034af03SJens Axboe 	unsigned int		max_hw_discard_sectors;
3724f563a64SChristoph Hellwig 	unsigned int		max_user_discard_sectors;
37344abff2cSChristoph Hellwig 	unsigned int		max_secure_erase_sectors;
374a6f0788eSChaitanya Kulkarni 	unsigned int		max_write_zeroes_sectors;
3750512a75bSKeith Busch 	unsigned int		max_zone_append_sectors;
37686b37281SMartin K. Petersen 	unsigned int		discard_granularity;
37786b37281SMartin K. Petersen 	unsigned int		discard_alignment;
378a805a4faSDamien Le Moal 	unsigned int		zone_write_granularity;
379025146e1SMartin K. Petersen 
3809da3d1e9SJohn Garry 	/* atomic write limits */
3819da3d1e9SJohn Garry 	unsigned int		atomic_write_hw_max;
3829da3d1e9SJohn Garry 	unsigned int		atomic_write_max_sectors;
3839da3d1e9SJohn Garry 	unsigned int		atomic_write_hw_boundary;
3849da3d1e9SJohn Garry 	unsigned int		atomic_write_boundary_sectors;
3859da3d1e9SJohn Garry 	unsigned int		atomic_write_hw_unit_min;
3869da3d1e9SJohn Garry 	unsigned int		atomic_write_unit_min;
3879da3d1e9SJohn Garry 	unsigned int		atomic_write_hw_unit_max;
3889da3d1e9SJohn Garry 	unsigned int		atomic_write_unit_max;
3899da3d1e9SJohn Garry 
3908a78362cSMartin K. Petersen 	unsigned short		max_segments;
39113f05c8dSMartin K. Petersen 	unsigned short		max_integrity_segments;
3921e739730SChristoph Hellwig 	unsigned short		max_discard_segments;
393025146e1SMartin K. Petersen 
3948c4955c0SChristoph Hellwig 	unsigned int		max_open_zones;
3958c4955c0SChristoph Hellwig 	unsigned int		max_active_zones;
396c964d62fSKeith Busch 
397c964d62fSKeith Busch 	/*
398c964d62fSKeith Busch 	 * Drivers that set dma_alignment to less than 511 must be prepared to
399c964d62fSKeith Busch 	 * handle individual bvec's that are not a multiple of a SECTOR_SIZE
400c964d62fSKeith Busch 	 * due to possible offsets.
401c964d62fSKeith Busch 	 */
402c964d62fSKeith Busch 	unsigned int		dma_alignment;
403c6e56cf6SChristoph Hellwig 
404c6e56cf6SChristoph Hellwig 	struct blk_integrity	integrity;
405025146e1SMartin K. Petersen };
406025146e1SMartin K. Petersen 
407d4100351SChristoph Hellwig typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
408d4100351SChristoph Hellwig 			       void *data);
409d4100351SChristoph Hellwig 
410d4100351SChristoph Hellwig #define BLK_ALL_ZONES  ((unsigned int)-1)
411d4100351SChristoph Hellwig int blkdev_report_zones(struct block_device *bdev, sector_t sector,
412d4100351SChristoph Hellwig 		unsigned int nr_zones, report_zones_cb cb, void *data);
413668bfeeaSChristoph Hellwig int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op,
41471f4ecdbSJohannes Thumshirn 		sector_t sectors, sector_t nr_sectors);
4159b3c08b9SDamien Le Moal int blk_revalidate_disk_zones(struct gendisk *disk);
4166a0cb1bcSHannes Reinecke 
417a2247f19SDamien Le Moal /*
418a2247f19SDamien Le Moal  * Independent access ranges: struct blk_independent_access_range describes
419a2247f19SDamien Le Moal  * a range of contiguous sectors that can be accessed using device command
420a2247f19SDamien Le Moal  * execution resources that are independent from the resources used for
421a2247f19SDamien Le Moal  * other access ranges. This is typically found with single-LUN multi-actuator
422a2247f19SDamien Le Moal  * HDDs where each access range is served by a different set of heads.
423a2247f19SDamien Le Moal  * The set of independent ranges supported by the device is defined using
424a2247f19SDamien Le Moal  * struct blk_independent_access_ranges. The independent ranges must not overlap
425a2247f19SDamien Le Moal  * and must include all sectors within the disk capacity (no sector holes
426a2247f19SDamien Le Moal  * allowed).
427a2247f19SDamien Le Moal  * For a device with multiple ranges, requests targeting sectors in different
428a2247f19SDamien Le Moal  * ranges can be executed in parallel. A request can straddle an access range
429a2247f19SDamien Le Moal  * boundary.
430a2247f19SDamien Le Moal  */
431a2247f19SDamien Le Moal struct blk_independent_access_range {
432a2247f19SDamien Le Moal 	struct kobject		kobj;
433a2247f19SDamien Le Moal 	sector_t		sector;
434a2247f19SDamien Le Moal 	sector_t		nr_sectors;
435a2247f19SDamien Le Moal };
436a2247f19SDamien Le Moal 
437a2247f19SDamien Le Moal struct blk_independent_access_ranges {
438a2247f19SDamien Le Moal 	struct kobject				kobj;
439a2247f19SDamien Le Moal 	bool					sysfs_registered;
440a2247f19SDamien Le Moal 	unsigned int				nr_ia_ranges;
441a2247f19SDamien Le Moal 	struct blk_independent_access_range	ia_range[];
442a2247f19SDamien Le Moal };
443a2247f19SDamien Le Moal 
444d7b76301SRichard Kennedy struct request_queue {
4451da177e4SLinus Torvalds 	/*
4461da177e4SLinus Torvalds 	 * The queue owner gets to use this for whatever they like.
4471da177e4SLinus Torvalds 	 * ll_rw_blk doesn't touch it.
4481da177e4SLinus Torvalds 	 */
4491da177e4SLinus Torvalds 	void			*queuedata;
4501da177e4SLinus Torvalds 
4510c734c5eSJens Axboe 	struct elevator_queue	*elevator;
4520c734c5eSJens Axboe 
4530c734c5eSJens Axboe 	const struct blk_mq_ops	*mq_ops;
4540c734c5eSJens Axboe 
4550c734c5eSJens Axboe 	/* sw queues */
4560c734c5eSJens Axboe 	struct blk_mq_ctx __percpu	*queue_ctx;
4570c734c5eSJens Axboe 
4581da177e4SLinus Torvalds 	/*
4591da177e4SLinus Torvalds 	 * various queue flags, see QUEUE_* below
4601da177e4SLinus Torvalds 	 */
4611da177e4SLinus Torvalds 	unsigned long		queue_flags;
4621da177e4SLinus Torvalds 
4630c734c5eSJens Axboe 	unsigned int		rq_timeout;
4640c734c5eSJens Axboe 
4650c734c5eSJens Axboe 	unsigned int		queue_depth;
4660c734c5eSJens Axboe 
4670c734c5eSJens Axboe 	refcount_t		refs;
4680c734c5eSJens Axboe 
4690c734c5eSJens Axboe 	/* hw dispatch queues */
4700c734c5eSJens Axboe 	unsigned int		nr_hw_queues;
4710c734c5eSJens Axboe 	struct xarray		hctx_table;
4720c734c5eSJens Axboe 
4730c734c5eSJens Axboe 	struct percpu_ref	q_usage_counter;
4740c734c5eSJens Axboe 
4750c734c5eSJens Axboe 	struct request		*last_merge;
476a73f730dSTejun Heo 
4770d945c1fSChristoph Hellwig 	spinlock_t		queue_lock;
4781da177e4SLinus Torvalds 
4790c734c5eSJens Axboe 	int			quiesce_depth;
480d152c682SChristoph Hellwig 
4810c734c5eSJens Axboe 	struct gendisk		*disk;
4821da177e4SLinus Torvalds 
483320ae51fSJens Axboe 	/*
484320ae51fSJens Axboe 	 * mq queue kobject
485320ae51fSJens Axboe 	 */
4861db4909eSMing Lei 	struct kobject *mq_kobj;
487320ae51fSJens Axboe 
4880c734c5eSJens Axboe 	struct queue_limits	limits;
4890c734c5eSJens Axboe 
49047fafbc7SRafael J. Wysocki #ifdef CONFIG_PM
4916c954667SLin Ming 	struct device		*dev;
492db04e18dSGeert Uytterhoeven 	enum rpm_status		rpm_status;
4936c954667SLin Ming #endif
4946c954667SLin Ming 
4951da177e4SLinus Torvalds 	/*
4960c734c5eSJens Axboe 	 * Number of contexts that have called blk_set_pm_only(). If this
4970c734c5eSJens Axboe 	 * counter is above zero then only RQF_PM requests are processed.
4980c734c5eSJens Axboe 	 */
4990c734c5eSJens Axboe 	atomic_t		pm_only;
5000c734c5eSJens Axboe 
5010c734c5eSJens Axboe 	struct blk_queue_stats	*stats;
5020c734c5eSJens Axboe 	struct rq_qos		*rq_qos;
5030c734c5eSJens Axboe 	struct mutex		rq_qos_mutex;
5040c734c5eSJens Axboe 
5050c734c5eSJens Axboe 	/*
5060c734c5eSJens Axboe 	 * ida allocated id for this queue.  Used to index queues from
5070c734c5eSJens Axboe 	 * ioctx.
5080c734c5eSJens Axboe 	 */
5090c734c5eSJens Axboe 	int			id;
5100c734c5eSJens Axboe 
5110c734c5eSJens Axboe 	unsigned int		dma_pad_mask;
5120c734c5eSJens Axboe 
5130c734c5eSJens Axboe 	/*
5141da177e4SLinus Torvalds 	 * queue settings
5151da177e4SLinus Torvalds 	 */
5161da177e4SLinus Torvalds 	unsigned long		nr_requests;	/* Max # of requests */
5171da177e4SLinus Torvalds 
5181b262839SSatya Tangirala #ifdef CONFIG_BLK_INLINE_ENCRYPTION
519cb77cb5aSEric Biggers 	struct blk_crypto_profile *crypto_profile;
52020f01f16SEric Biggers 	struct kobject *crypto_kobject;
5211b262839SSatya Tangirala #endif
5221b262839SSatya Tangirala 
523242f9dcbSJens Axboe 	struct timer_list	timeout;
524287922ebSChristoph Hellwig 	struct work_struct	timeout_work;
525242f9dcbSJens Axboe 
526079a2e3eSJohn Garry 	atomic_t		nr_active_requests_shared_tags;
527bccf5e26SJohn Garry 
528079a2e3eSJohn Garry 	struct blk_mq_tags	*sched_shared_tags;
529d97e594cSJohn Garry 
530a612fddfSTejun Heo 	struct list_head	icq_list;
5311231039dSChristoph Hellwig #ifdef CONFIG_BLK_CGROUP
5321231039dSChristoph Hellwig 	DECLARE_BITMAP		(blkcg_pols, BLKCG_MAX_POLS);
5331231039dSChristoph Hellwig 	struct blkcg_gq		*root_blkg;
5341231039dSChristoph Hellwig 	struct list_head	blkg_list;
5351231039dSChristoph Hellwig 	struct mutex		blkcg_mutex;
5361231039dSChristoph Hellwig #endif
537a612fddfSTejun Heo 
5381946089aSChristoph Lameter 	int			node;
5390c734c5eSJens Axboe 
5400c734c5eSJens Axboe 	spinlock_t		requeue_lock;
5410c734c5eSJens Axboe 	struct list_head	requeue_list;
5420c734c5eSJens Axboe 	struct delayed_work	requeue_work;
5430c734c5eSJens Axboe 
5446c5c9341SAlexey Dobriyan #ifdef CONFIG_BLK_DEV_IO_TRACE
545c780e86dSJan Kara 	struct blk_trace __rcu	*blk_trace;
5466c5c9341SAlexey Dobriyan #endif
5471da177e4SLinus Torvalds 	/*
5484913efe4STejun Heo 	 * for flush operations
5491da177e4SLinus Torvalds 	 */
5507c94e1c1SMing Lei 	struct blk_flush_queue	*fq;
5519a67aa52SChristoph Hellwig 	struct list_head	flush_list;
552483f4afcSAl Viro 
553483f4afcSAl Viro 	struct mutex		sysfs_lock;
554cecf5d87SMing Lei 	struct mutex		sysfs_dir_lock;
555d690cb8aSChristoph Hellwig 	struct mutex		limits_lock;
556d351af01SFUJITA Tomonori 
5572f8f1336SMing Lei 	/*
5582f8f1336SMing Lei 	 * for reusing dead hctx instance in case of updating
5592f8f1336SMing Lei 	 * nr_hw_queues
5602f8f1336SMing Lei 	 */
5612f8f1336SMing Lei 	struct list_head	unused_hctx_list;
5622f8f1336SMing Lei 	spinlock_t		unused_hctx_lock;
5632f8f1336SMing Lei 
5647996a8b5SBob Liu 	int			mq_freeze_depth;
565d732580bSTejun Heo 
566e43473b7SVivek Goyal #ifdef CONFIG_BLK_DEV_THROTTLING
567e43473b7SVivek Goyal 	/* Throttle data */
568e43473b7SVivek Goyal 	struct throtl_data *td;
569e43473b7SVivek Goyal #endif
570548bc8e1STejun Heo 	struct rcu_head		rcu_head;
571320ae51fSJens Axboe 	wait_queue_head_t	mq_freeze_wq;
5727996a8b5SBob Liu 	/*
5737996a8b5SBob Liu 	 * Protect concurrent access to q_usage_counter by
5747996a8b5SBob Liu 	 * percpu_ref_kill() and percpu_ref_reinit().
5757996a8b5SBob Liu 	 */
5767996a8b5SBob Liu 	struct mutex		mq_freeze_lock;
5770d2602caSJens Axboe 
5780d2602caSJens Axboe 	struct blk_mq_tag_set	*tag_set;
5790d2602caSJens Axboe 	struct list_head	tag_set_list;
5804593fdbeSAkinobu Mita 
58107e4feadSOmar Sandoval 	struct dentry		*debugfs_dir;
582d332ce09SOmar Sandoval 	struct dentry		*sched_debugfs_dir;
583cc56694fSMing Lei 	struct dentry		*rqos_debugfs_dir;
5845cf9c91bSChristoph Hellwig 	/*
5855cf9c91bSChristoph Hellwig 	 * Serializes all debugfs metadata operations using the above dentries.
5865cf9c91bSChristoph Hellwig 	 */
5875cf9c91bSChristoph Hellwig 	struct mutex		debugfs_mutex;
58807e4feadSOmar Sandoval 
5894593fdbeSAkinobu Mita 	bool			mq_sysfs_init_done;
5901da177e4SLinus Torvalds };
5911da177e4SLinus Torvalds 
592bfe373f6SHou Tao /* Keep blk_queue_flag_name[] in sync with the definitions below */
593eca7abf3SJens Axboe #define QUEUE_FLAG_STOPPED	0	/* queue is stopped */
594eca7abf3SJens Axboe #define QUEUE_FLAG_DYING	1	/* queue being torn down */
595eca7abf3SJens Axboe #define QUEUE_FLAG_NOMERGES     3	/* disable merge attempts */
596eca7abf3SJens Axboe #define QUEUE_FLAG_SAME_COMP	4	/* complete on same CPU-group */
597eca7abf3SJens Axboe #define QUEUE_FLAG_FAIL_IO	5	/* fake timeout */
598eca7abf3SJens Axboe #define QUEUE_FLAG_NOXMERGES	9	/* No extended merges */
599eca7abf3SJens Axboe #define QUEUE_FLAG_SAME_FORCE	12	/* force complete on same CPU */
600eca7abf3SJens Axboe #define QUEUE_FLAG_INIT_DONE	14	/* queue is initialized */
601eca7abf3SJens Axboe #define QUEUE_FLAG_STATS	20	/* track IO start and completion times */
602eca7abf3SJens Axboe #define QUEUE_FLAG_REGISTERED	22	/* queue has been registered to a disk */
603eca7abf3SJens Axboe #define QUEUE_FLAG_QUIESCED	24	/* queue has been quiesced */
6046f816b4bSTejun Heo #define QUEUE_FLAG_RQ_ALLOC_TIME 27	/* record rq->alloc_time_ns */
605f1b49fdcSJohn Garry #define QUEUE_FLAG_HCTX_ACTIVE	28	/* at least one blk-mq hctx is active */
6064d337cebSMing Lei #define QUEUE_FLAG_SQ_SCHED     30	/* single queue style io dispatch */
607797e7dbbSTejun Heo 
608f76af42fSChristoph Hellwig #define QUEUE_FLAG_MQ_DEFAULT	(1UL << QUEUE_FLAG_SAME_COMP)
60994eddfbeSJens Axboe 
6108814ce8aSBart Van Assche void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
6118814ce8aSBart Van Assche void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
6128814ce8aSBart Van Assche bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
6138814ce8aSBart Van Assche 
6141da177e4SLinus Torvalds #define blk_queue_stopped(q)	test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
6153f3299d5SBart Van Assche #define blk_queue_dying(q)	test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
616320ae51fSJens Axboe #define blk_queue_init_done(q)	test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
617ac9fafa1SAlan D. Brunelle #define blk_queue_nomerges(q)	test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
618488991e2SAlan D. Brunelle #define blk_queue_noxmerges(q)	\
619488991e2SAlan D. Brunelle 	test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
620bd4a633bSChristoph Hellwig #define blk_queue_nonrot(q)	((q)->limits.features & BLK_FEAT_ROTATIONAL)
621cdb24979SChristoph Hellwig #define blk_queue_io_stat(q)	((q)->limits.features & BLK_FEAT_IO_STAT)
622e84e8f06SChaitanya Kulkarni #define blk_queue_zone_resetall(q)	\
623a52758a3SChristoph Hellwig 	((q)->limits.features & BLK_FEAT_ZONE_RESETALL)
624f467fee4SChristoph Hellwig #define blk_queue_dax(q)	((q)->limits.features & BLK_FEAT_DAX)
6259c1e42e3SChristoph Hellwig #define blk_queue_pci_p2pdma(q)	((q)->limits.features & BLK_FEAT_PCI_P2PDMA)
6266f816b4bSTejun Heo #ifdef CONFIG_BLK_RQ_ALLOC_TIME
6276f816b4bSTejun Heo #define blk_queue_rq_alloc_time(q)	\
6286f816b4bSTejun Heo 	test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags)
6296f816b4bSTejun Heo #else
6306f816b4bSTejun Heo #define blk_queue_rq_alloc_time(q)	false
6316f816b4bSTejun Heo #endif
6321da177e4SLinus Torvalds 
63333659ebbSChristoph Hellwig #define blk_noretry_request(rq) \
63433659ebbSChristoph Hellwig 	((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
63533659ebbSChristoph Hellwig 			     REQ_FAILFAST_DRIVER))
636f4560ffeSMing Lei #define blk_queue_quiesced(q)	test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
637cd84a62eSBart Van Assche #define blk_queue_pm_only(q)	atomic_read(&(q)->pm_only)
63858c898baSMing Lei #define blk_queue_registered(q)	test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags)
6394d337cebSMing Lei #define blk_queue_sq_sched(q)	test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags)
640414dd48eSChao Leng #define blk_queue_skip_tagset_quiesce(q) \
6418c8f5c85SChristoph Hellwig 	((q)->limits.features & BLK_FEAT_SKIP_TAGSET_QUIESCE)
642c9254f2dSBart Van Assche 
643cd84a62eSBart Van Assche extern void blk_set_pm_only(struct request_queue *q);
644cd84a62eSBart Van Assche extern void blk_clear_pm_only(struct request_queue *q);
6454aff5e23SJens Axboe 
6461da177e4SLinus Torvalds #define list_entry_rq(ptr)	list_entry((ptr), struct request, queuelist)
6471da177e4SLinus Torvalds 
6483ab3a031SChristoph Hellwig #define dma_map_bvec(dev, bv, dir, attrs) \
6493ab3a031SChristoph Hellwig 	dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \
6503ab3a031SChristoph Hellwig 	(dir), (attrs))
6513ab3a031SChristoph Hellwig 
652344e9ffcSJens Axboe static inline bool queue_is_mq(struct request_queue *q)
65349fd524fSJens Axboe {
654a1ce35faSJens Axboe 	return q->mq_ops;
65549fd524fSJens Axboe }
65649fd524fSJens Axboe 
65752abca64SAlan Stern #ifdef CONFIG_PM
65852abca64SAlan Stern static inline enum rpm_status queue_rpm_status(struct request_queue *q)
65952abca64SAlan Stern {
66052abca64SAlan Stern 	return q->rpm_status;
66152abca64SAlan Stern }
66252abca64SAlan Stern #else
66352abca64SAlan Stern static inline enum rpm_status queue_rpm_status(struct request_queue *q)
66452abca64SAlan Stern {
66552abca64SAlan Stern 	return RPM_ACTIVE;
66652abca64SAlan Stern }
66752abca64SAlan Stern #endif
66852abca64SAlan Stern 
669797476b8SDamien Le Moal static inline bool blk_queue_is_zoned(struct request_queue *q)
670797476b8SDamien Le Moal {
671b1fc937aSChristoph Hellwig 	return IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
672b1fc937aSChristoph Hellwig 		(q->limits.features & BLK_FEAT_ZONED);
673797476b8SDamien Le Moal }
674797476b8SDamien Le Moal 
6756a5ac984SBart Van Assche #ifdef CONFIG_BLK_DEV_ZONED
676668bfeeaSChristoph Hellwig unsigned int bdev_nr_zones(struct block_device *bdev);
677668bfeeaSChristoph Hellwig 
678d86e716aSChristoph Hellwig static inline unsigned int disk_nr_zones(struct gendisk *disk)
679965b652eSDamien Le Moal {
680d86e716aSChristoph Hellwig 	return blk_queue_is_zoned(disk->queue) ? disk->nr_zones : 0;
681965b652eSDamien Le Moal }
682965b652eSDamien Le Moal 
683d86e716aSChristoph Hellwig static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
6846cc77e9cSChristoph Hellwig {
685d86e716aSChristoph Hellwig 	if (!blk_queue_is_zoned(disk->queue))
6866cc77e9cSChristoph Hellwig 		return 0;
687d86e716aSChristoph Hellwig 	return sector >> ilog2(disk->queue->limits.chunk_sectors);
6886cc77e9cSChristoph Hellwig }
6896cc77e9cSChristoph Hellwig 
6901dc01720SChristoph Hellwig static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
691659bf827SNiklas Cassel {
6928c4955c0SChristoph Hellwig 	return bdev->bd_disk->queue->limits.max_open_zones;
693659bf827SNiklas Cassel }
6941dc01720SChristoph Hellwig 
6951dc01720SChristoph Hellwig static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
6961dc01720SChristoph Hellwig {
6978c4955c0SChristoph Hellwig 	return bdev->bd_disk->queue->limits.max_active_zones;
6981dc01720SChristoph Hellwig }
6991dc01720SChristoph Hellwig 
700dd291d77SDamien Le Moal bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs);
701965b652eSDamien Le Moal #else /* CONFIG_BLK_DEV_ZONED */
702668bfeeaSChristoph Hellwig static inline unsigned int bdev_nr_zones(struct block_device *bdev)
703668bfeeaSChristoph Hellwig {
704668bfeeaSChristoph Hellwig 	return 0;
705668bfeeaSChristoph Hellwig }
706668bfeeaSChristoph Hellwig 
707d86e716aSChristoph Hellwig static inline unsigned int disk_nr_zones(struct gendisk *disk)
708965b652eSDamien Le Moal {
709965b652eSDamien Le Moal 	return 0;
710965b652eSDamien Le Moal }
711d86e716aSChristoph Hellwig static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
71202992df8SJohannes Thumshirn {
71302992df8SJohannes Thumshirn 	return 0;
71402992df8SJohannes Thumshirn }
7151dc01720SChristoph Hellwig static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
716e15864f8SNiklas Cassel {
717e15864f8SNiklas Cassel 	return 0;
718e15864f8SNiklas Cassel }
719d86e716aSChristoph Hellwig 
7201dc01720SChristoph Hellwig static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
721659bf827SNiklas Cassel {
722659bf827SNiklas Cassel 	return 0;
723659bf827SNiklas Cassel }
724dd291d77SDamien Le Moal static inline bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
725dd291d77SDamien Le Moal {
726dd291d77SDamien Le Moal 	return false;
727dd291d77SDamien Le Moal }
7286a5ac984SBart Van Assche #endif /* CONFIG_BLK_DEV_ZONED */
7296cc77e9cSChristoph Hellwig 
730d278d4a8SJens Axboe static inline unsigned int blk_queue_depth(struct request_queue *q)
731d278d4a8SJens Axboe {
732d278d4a8SJens Axboe 	if (q->queue_depth)
733d278d4a8SJens Axboe 		return q->queue_depth;
734d278d4a8SJens Axboe 
735d278d4a8SJens Axboe 	return q->nr_requests;
736d278d4a8SJens Axboe }
737d278d4a8SJens Axboe 
7383d6392cfSJens Axboe /*
7393d6392cfSJens Axboe  * default timeout for SG_IO if none specified
7403d6392cfSJens Axboe  */
7413d6392cfSJens Axboe #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
742f2f1fa78SLinus Torvalds #define BLK_MIN_SG_TIMEOUT	(7 * HZ)
7433d6392cfSJens Axboe 
7445705f702SNeilBrown /* This should not be used directly - use rq_for_each_segment */
7451e428079SJens Axboe #define for_each_bio(_bio)		\
7461e428079SJens Axboe 	for (; _bio; _bio = _bio->bi_next)
7471da177e4SLinus Torvalds 
748322cbb50SChristoph Hellwig int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
749322cbb50SChristoph Hellwig 				 const struct attribute_group **groups);
750322cbb50SChristoph Hellwig static inline int __must_check add_disk(struct gendisk *disk)
751322cbb50SChristoph Hellwig {
752322cbb50SChristoph Hellwig 	return device_add_disk(NULL, disk, NULL);
753322cbb50SChristoph Hellwig }
754322cbb50SChristoph Hellwig void del_gendisk(struct gendisk *gp);
755322cbb50SChristoph Hellwig void invalidate_disk(struct gendisk *disk);
756322cbb50SChristoph Hellwig void set_disk_ro(struct gendisk *disk, bool read_only);
757322cbb50SChristoph Hellwig void disk_uevent(struct gendisk *disk, enum kobject_action action);
758322cbb50SChristoph Hellwig 
759b8c873edSAl Viro static inline u8 bdev_partno(const struct block_device *bdev)
760b8c873edSAl Viro {
7611116b9faSAl Viro 	return atomic_read(&bdev->__bd_flags) & BD_PARTNO;
7621116b9faSAl Viro }
7631116b9faSAl Viro 
7641116b9faSAl Viro static inline bool bdev_test_flag(const struct block_device *bdev, unsigned flag)
7651116b9faSAl Viro {
7661116b9faSAl Viro 	return atomic_read(&bdev->__bd_flags) & flag;
7671116b9faSAl Viro }
7681116b9faSAl Viro 
7691116b9faSAl Viro static inline void bdev_set_flag(struct block_device *bdev, unsigned flag)
7701116b9faSAl Viro {
7711116b9faSAl Viro 	atomic_or(flag, &bdev->__bd_flags);
7721116b9faSAl Viro }
7731116b9faSAl Viro 
7741116b9faSAl Viro static inline void bdev_clear_flag(struct block_device *bdev, unsigned flag)
7751116b9faSAl Viro {
7761116b9faSAl Viro 	atomic_andnot(flag, &bdev->__bd_flags);
777b8c873edSAl Viro }
778b8c873edSAl Viro 
779322cbb50SChristoph Hellwig static inline int get_disk_ro(struct gendisk *disk)
780322cbb50SChristoph Hellwig {
78101e198f0SAl Viro 	return bdev_test_flag(disk->part0, BD_READ_ONLY) ||
782322cbb50SChristoph Hellwig 		test_bit(GD_READ_ONLY, &disk->state);
783322cbb50SChristoph Hellwig }
784322cbb50SChristoph Hellwig 
785322cbb50SChristoph Hellwig static inline int bdev_read_only(struct block_device *bdev)
786322cbb50SChristoph Hellwig {
78701e198f0SAl Viro 	return bdev_test_flag(bdev, BD_READ_ONLY) || get_disk_ro(bdev->bd_disk);
788322cbb50SChristoph Hellwig }
789322cbb50SChristoph Hellwig 
790322cbb50SChristoph Hellwig bool set_capacity_and_notify(struct gendisk *disk, sector_t size);
791ab6860f6SChristoph Hellwig void disk_force_media_change(struct gendisk *disk);
792560e20e4SChristoph Hellwig void bdev_mark_dead(struct block_device *bdev, bool surprise);
793322cbb50SChristoph Hellwig 
794322cbb50SChristoph Hellwig void add_disk_randomness(struct gendisk *disk) __latent_entropy;
795322cbb50SChristoph Hellwig void rand_initialize_disk(struct gendisk *disk);
796322cbb50SChristoph Hellwig 
797322cbb50SChristoph Hellwig static inline sector_t get_start_sect(struct block_device *bdev)
798322cbb50SChristoph Hellwig {
799322cbb50SChristoph Hellwig 	return bdev->bd_start_sect;
800322cbb50SChristoph Hellwig }
801322cbb50SChristoph Hellwig 
802322cbb50SChristoph Hellwig static inline sector_t bdev_nr_sectors(struct block_device *bdev)
803322cbb50SChristoph Hellwig {
804322cbb50SChristoph Hellwig 	return bdev->bd_nr_sectors;
805322cbb50SChristoph Hellwig }
806322cbb50SChristoph Hellwig 
807322cbb50SChristoph Hellwig static inline loff_t bdev_nr_bytes(struct block_device *bdev)
808322cbb50SChristoph Hellwig {
809322cbb50SChristoph Hellwig 	return (loff_t)bdev_nr_sectors(bdev) << SECTOR_SHIFT;
810322cbb50SChristoph Hellwig }
811322cbb50SChristoph Hellwig 
812322cbb50SChristoph Hellwig static inline sector_t get_capacity(struct gendisk *disk)
813322cbb50SChristoph Hellwig {
814322cbb50SChristoph Hellwig 	return bdev_nr_sectors(disk->part0);
815322cbb50SChristoph Hellwig }
816322cbb50SChristoph Hellwig 
817322cbb50SChristoph Hellwig static inline u64 sb_bdev_nr_blocks(struct super_block *sb)
818322cbb50SChristoph Hellwig {
819322cbb50SChristoph Hellwig 	return bdev_nr_sectors(sb->s_bdev) >>
820322cbb50SChristoph Hellwig 		(sb->s_blocksize_bits - SECTOR_SHIFT);
821322cbb50SChristoph Hellwig }
822322cbb50SChristoph Hellwig 
823322cbb50SChristoph Hellwig int bdev_disk_changed(struct gendisk *disk, bool invalidate);
824322cbb50SChristoph Hellwig 
825322cbb50SChristoph Hellwig void put_disk(struct gendisk *disk);
82674fa8f9cSChristoph Hellwig struct gendisk *__blk_alloc_disk(struct queue_limits *lim, int node,
82774fa8f9cSChristoph Hellwig 		struct lock_class_key *lkclass);
828322cbb50SChristoph Hellwig 
829322cbb50SChristoph Hellwig /**
830322cbb50SChristoph Hellwig  * blk_alloc_disk - allocate a gendisk structure
83174fa8f9cSChristoph Hellwig  * @lim: queue limits to be used for this disk.
832322cbb50SChristoph Hellwig  * @node_id: numa node to allocate on
833322cbb50SChristoph Hellwig  *
834322cbb50SChristoph Hellwig  * Allocate and pre-initialize a gendisk structure for use with BIO based
835322cbb50SChristoph Hellwig  * drivers.
836322cbb50SChristoph Hellwig  *
83774fa8f9cSChristoph Hellwig  * Returns an ERR_PTR on error, else the allocated disk.
83874fa8f9cSChristoph Hellwig  *
839322cbb50SChristoph Hellwig  * Context: can sleep
840322cbb50SChristoph Hellwig  */
84174fa8f9cSChristoph Hellwig #define blk_alloc_disk(lim, node_id)					\
842322cbb50SChristoph Hellwig ({									\
843322cbb50SChristoph Hellwig 	static struct lock_class_key __key;				\
844322cbb50SChristoph Hellwig 									\
84574fa8f9cSChristoph Hellwig 	__blk_alloc_disk(lim, node_id, &__key);				\
846322cbb50SChristoph Hellwig })
847322cbb50SChristoph Hellwig 
848322cbb50SChristoph Hellwig int __register_blkdev(unsigned int major, const char *name,
849322cbb50SChristoph Hellwig 		void (*probe)(dev_t devt));
850322cbb50SChristoph Hellwig #define register_blkdev(major, name) \
851322cbb50SChristoph Hellwig 	__register_blkdev(major, name, NULL)
852322cbb50SChristoph Hellwig void unregister_blkdev(unsigned int major, const char *name);
853322cbb50SChristoph Hellwig 
854444aa2c5SChristoph Hellwig bool disk_check_media_change(struct gendisk *disk);
855322cbb50SChristoph Hellwig void set_capacity(struct gendisk *disk, sector_t size);
856322cbb50SChristoph Hellwig 
857322cbb50SChristoph Hellwig #ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
858322cbb50SChristoph Hellwig int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
859322cbb50SChristoph Hellwig void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk);
860322cbb50SChristoph Hellwig #else
861322cbb50SChristoph Hellwig static inline int bd_link_disk_holder(struct block_device *bdev,
862322cbb50SChristoph Hellwig 				      struct gendisk *disk)
863322cbb50SChristoph Hellwig {
864322cbb50SChristoph Hellwig 	return 0;
865322cbb50SChristoph Hellwig }
866322cbb50SChristoph Hellwig static inline void bd_unlink_disk_holder(struct block_device *bdev,
867322cbb50SChristoph Hellwig 					 struct gendisk *disk)
868322cbb50SChristoph Hellwig {
869322cbb50SChristoph Hellwig }
870322cbb50SChristoph Hellwig #endif /* CONFIG_BLOCK_HOLDER_DEPRECATED */
871322cbb50SChristoph Hellwig 
872322cbb50SChristoph Hellwig dev_t part_devt(struct gendisk *disk, u8 partno);
873322cbb50SChristoph Hellwig void inc_diskseq(struct gendisk *disk);
874322cbb50SChristoph Hellwig void blk_request_module(dev_t devt);
8752d4dc890SIlya Loginov 
8761da177e4SLinus Torvalds extern int blk_register_queue(struct gendisk *disk);
8771da177e4SLinus Torvalds extern void blk_unregister_queue(struct gendisk *disk);
8783e08773cSChristoph Hellwig void submit_bio_noacct(struct bio *bio);
8795a97806fSChristoph Hellwig struct bio *bio_split_to_limits(struct bio *bio);
88024b83debSChristoph Hellwig 
881ef9e3facSKiyoshi Ueda extern int blk_lld_busy(struct request_queue *q);
8829a95e4efSBart Van Assche extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
8832e6edc95SDan Williams extern void blk_queue_exit(struct request_queue *q);
8841da177e4SLinus Torvalds extern void blk_sync_queue(struct request_queue *q);
885fb9b16e1SKeith Busch 
886e47bc4edSChaitanya Kulkarni /* Helper to convert REQ_OP_XXX to its string format XXX */
88777e7ffd7SBart Van Assche extern const char *blk_op_str(enum req_op op);
888e47bc4edSChaitanya Kulkarni 
8892a842acaSChristoph Hellwig int blk_status_to_errno(blk_status_t status);
8902a842acaSChristoph Hellwig blk_status_t errno_to_blk_status(int errno);
8917ba37927SKent Overstreet const char *blk_status_to_str(blk_status_t status);
8922a842acaSChristoph Hellwig 
893ef99b2d3SChristoph Hellwig /* only poll the hardware once, don't continue until a completion was found */
894ef99b2d3SChristoph Hellwig #define BLK_POLL_ONESHOT		(1 << 0)
8955a72e899SJens Axboe int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags);
8965a72e899SJens Axboe int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
8975a72e899SJens Axboe 			unsigned int flags);
89805229beeSJens Axboe 
899165125e1SJens Axboe static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
9001da177e4SLinus Torvalds {
90117220ca5SPavel Begunkov 	return bdev->bd_queue;	/* this is never NULL */
9021da177e4SLinus Torvalds }
9031da177e4SLinus Torvalds 
90402694e86SChaitanya Kulkarni /* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */
90502694e86SChaitanya Kulkarni const char *blk_zone_cond_str(enum blk_zone_cond zone_cond);
90602694e86SChaitanya Kulkarni 
907d0ea6bdeSDamien Le Moal static inline unsigned int bio_zone_no(struct bio *bio)
908d0ea6bdeSDamien Le Moal {
909d86e716aSChristoph Hellwig 	return disk_zone_no(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector);
910d0ea6bdeSDamien Le Moal }
911d0ea6bdeSDamien Le Moal 
912b85a3c1bSDamien Le Moal static inline bool bio_straddles_zones(struct bio *bio)
913d0ea6bdeSDamien Le Moal {
914b85a3c1bSDamien Le Moal 	return bio_sectors(bio) &&
915b85a3c1bSDamien Le Moal 		bio_zone_no(bio) !=
916b85a3c1bSDamien Le Moal 		disk_zone_no(bio->bi_bdev->bd_disk, bio_end_sector(bio) - 1);
917d0ea6bdeSDamien Le Moal }
9186cc77e9cSChristoph Hellwig 
919762380adSJens Axboe /*
920f70167a7SJohn Garry  * Return how much within the boundary is left to be used for I/O at a given
921f70167a7SJohn Garry  * offset.
9228689461bSChristoph Hellwig  */
923f70167a7SJohn Garry static inline unsigned int blk_boundary_sectors_left(sector_t offset,
924f70167a7SJohn Garry 		unsigned int boundary_sectors)
9258689461bSChristoph Hellwig {
926f70167a7SJohn Garry 	if (unlikely(!is_power_of_2(boundary_sectors)))
927f70167a7SJohn Garry 		return boundary_sectors - sector_div(offset, boundary_sectors);
928f70167a7SJohn Garry 	return boundary_sectors - (offset & (boundary_sectors - 1));
9298689461bSChristoph Hellwig }
9308689461bSChristoph Hellwig 
931d690cb8aSChristoph Hellwig /**
932d690cb8aSChristoph Hellwig  * queue_limits_start_update - start an atomic update of queue limits
933d690cb8aSChristoph Hellwig  * @q:		queue to update
934d690cb8aSChristoph Hellwig  *
935d690cb8aSChristoph Hellwig  * This functions starts an atomic update of the queue limits.  It takes a lock
936d690cb8aSChristoph Hellwig  * to prevent other updates and returns a snapshot of the current limits that
937d690cb8aSChristoph Hellwig  * the caller can modify.  The caller must call queue_limits_commit_update()
938d690cb8aSChristoph Hellwig  * to finish the update.
939d690cb8aSChristoph Hellwig  *
940d690cb8aSChristoph Hellwig  * Context: process context.  The caller must have frozen the queue or ensured
941d690cb8aSChristoph Hellwig  * that there is outstanding I/O by other means.
942d690cb8aSChristoph Hellwig  */
943d690cb8aSChristoph Hellwig static inline struct queue_limits
944d690cb8aSChristoph Hellwig queue_limits_start_update(struct request_queue *q)
945d690cb8aSChristoph Hellwig {
946d690cb8aSChristoph Hellwig 	mutex_lock(&q->limits_lock);
947d690cb8aSChristoph Hellwig 	return q->limits;
948d690cb8aSChristoph Hellwig }
949d690cb8aSChristoph Hellwig int queue_limits_commit_update(struct request_queue *q,
950d690cb8aSChristoph Hellwig 		struct queue_limits *lim);
951631d4efbSChristoph Hellwig int queue_limits_set(struct request_queue *q, struct queue_limits *lim);
952d690cb8aSChristoph Hellwig 
95329306626SChristoph Hellwig /**
95429306626SChristoph Hellwig  * queue_limits_cancel_update - cancel an atomic update of queue limits
95529306626SChristoph Hellwig  * @q:		queue to update
95629306626SChristoph Hellwig  *
95729306626SChristoph Hellwig  * This functions cancels an atomic update of the queue limits started by
95829306626SChristoph Hellwig  * queue_limits_start_update() and should be used when an error occurs after
95929306626SChristoph Hellwig  * starting update.
96029306626SChristoph Hellwig  */
96129306626SChristoph Hellwig static inline void queue_limits_cancel_update(struct request_queue *q)
96229306626SChristoph Hellwig {
96329306626SChristoph Hellwig 	mutex_unlock(&q->limits_lock);
96429306626SChristoph Hellwig }
96529306626SChristoph Hellwig 
9668689461bSChristoph Hellwig /*
96773e3715eSChristoph Hellwig  * These helpers are for drivers that have sloppy feature negotiation and might
96873e3715eSChristoph Hellwig  * have to disable DISCARD, WRITE_ZEROES or SECURE_DISCARD from the I/O
96973e3715eSChristoph Hellwig  * completion handler when the device returned an indicator that the respective
97073e3715eSChristoph Hellwig  * feature is not actually supported.  They are racy and the driver needs to
97173e3715eSChristoph Hellwig  * cope with that.  Try to avoid this scheme if you can.
97273e3715eSChristoph Hellwig  */
97373e3715eSChristoph Hellwig static inline void blk_queue_disable_discard(struct request_queue *q)
97473e3715eSChristoph Hellwig {
97573e3715eSChristoph Hellwig 	q->limits.max_discard_sectors = 0;
97673e3715eSChristoph Hellwig }
97773e3715eSChristoph Hellwig 
97873e3715eSChristoph Hellwig static inline void blk_queue_disable_secure_erase(struct request_queue *q)
97973e3715eSChristoph Hellwig {
98073e3715eSChristoph Hellwig 	q->limits.max_secure_erase_sectors = 0;
98173e3715eSChristoph Hellwig }
98273e3715eSChristoph Hellwig 
98373e3715eSChristoph Hellwig static inline void blk_queue_disable_write_zeroes(struct request_queue *q)
98473e3715eSChristoph Hellwig {
98573e3715eSChristoph Hellwig 	q->limits.max_write_zeroes_sectors = 0;
98673e3715eSChristoph Hellwig }
98773e3715eSChristoph Hellwig 
98873e3715eSChristoph Hellwig /*
9891da177e4SLinus Torvalds  * Access functions for manipulating queue properties
9901da177e4SLinus Torvalds  */
991471aa704SChristoph Hellwig void disk_update_readahead(struct gendisk *disk);
9927c958e32SMartin K. Petersen extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
9933c5820c7SMartin K. Petersen extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
994d278d4a8SJens Axboe extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
995b1bd055dSMartin K. Petersen extern void blk_set_stacking_limits(struct queue_limits *lim);
996c72758f3SMartin K. Petersen extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
997c72758f3SMartin K. Petersen 			    sector_t offset);
998c1373f1cSChristoph Hellwig void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
999c1373f1cSChristoph Hellwig 		sector_t offset, const char *pfx);
100027f8221aSFUJITA Tomonori extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
1001242f9dcbSJens Axboe extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
10022e9bc346SChristoph Hellwig 
1003a2247f19SDamien Le Moal struct blk_independent_access_ranges *
1004a2247f19SDamien Le Moal disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges);
1005a2247f19SDamien Le Moal void disk_set_independent_access_ranges(struct gendisk *disk,
1006a2247f19SDamien Le Moal 				struct blk_independent_access_ranges *iars);
1007a2247f19SDamien Le Moal 
100809ac46c4STejun Heo bool __must_check blk_get_queue(struct request_queue *);
1009165125e1SJens Axboe extern void blk_put_queue(struct request_queue *);
10107a5428dcSChristoph Hellwig 
10117a5428dcSChristoph Hellwig void blk_mark_disk_dead(struct gendisk *disk);
10121da177e4SLinus Torvalds 
10131a4dcfa8SChristoph Hellwig #ifdef CONFIG_BLOCK
1014316cc67dSShaohua Li /*
101575df7136SSuresh Jayaraman  * blk_plug permits building a queue of related requests by holding the I/O
101675df7136SSuresh Jayaraman  * fragments for a short period. This allows merging of sequential requests
101775df7136SSuresh Jayaraman  * into single larger request. As the requests are moved from a per-task list to
101875df7136SSuresh Jayaraman  * the device's request_queue in a batch, this results in improved scalability
101975df7136SSuresh Jayaraman  * as the lock contention for request_queue lock is reduced.
102075df7136SSuresh Jayaraman  *
102175df7136SSuresh Jayaraman  * It is ok not to disable preemption when adding the request to the plug list
1022008f75a2SChristoph Hellwig  * or when attempting a merge. For details, please see schedule() where
1023008f75a2SChristoph Hellwig  * blk_flush_plug() is called.
1024316cc67dSShaohua Li  */
102573c10101SJens Axboe struct blk_plug {
1026bc490f81SJens Axboe 	struct request *mq_list; /* blk-mq requests */
102747c122e3SJens Axboe 
102847c122e3SJens Axboe 	/* if ios_left is > 1, we can batch tag/rq allocations */
102947c122e3SJens Axboe 	struct request *cached_rq;
1030da4c8c3dSJens Axboe 	u64 cur_ktime;
103147c122e3SJens Axboe 	unsigned short nr_ios;
103247c122e3SJens Axboe 
10335f0ed774SJens Axboe 	unsigned short rq_count;
103447c122e3SJens Axboe 
1035ce5b009cSJens Axboe 	bool multiple_queues;
1036dc5fc361SJens Axboe 	bool has_elevator;
103747c122e3SJens Axboe 
103847c122e3SJens Axboe 	struct list_head cb_list; /* md requires an unplug callback */
103973c10101SJens Axboe };
104055c022bbSShaohua Li 
10419cbb1750SNeilBrown struct blk_plug_cb;
104274018dc3SNeilBrown typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
1043048c9374SNeilBrown struct blk_plug_cb {
1044048c9374SNeilBrown 	struct list_head list;
10459cbb1750SNeilBrown 	blk_plug_cb_fn callback;
10469cbb1750SNeilBrown 	void *data;
1047048c9374SNeilBrown };
10489cbb1750SNeilBrown extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
10499cbb1750SNeilBrown 					     void *data, int size);
105073c10101SJens Axboe extern void blk_start_plug(struct blk_plug *);
105147c122e3SJens Axboe extern void blk_start_plug_nr_ios(struct blk_plug *, unsigned short);
105273c10101SJens Axboe extern void blk_finish_plug(struct blk_plug *);
105373c10101SJens Axboe 
1054aa8dcccaSChristoph Hellwig void __blk_flush_plug(struct blk_plug *plug, bool from_schedule);
1055aa8dcccaSChristoph Hellwig static inline void blk_flush_plug(struct blk_plug *plug, bool async)
105673c10101SJens Axboe {
1057aa8dcccaSChristoph Hellwig 	if (plug)
1058aa8dcccaSChristoph Hellwig 		__blk_flush_plug(plug, async);
105973c10101SJens Axboe }
106073c10101SJens Axboe 
106106b23f92SJens Axboe /*
106206b23f92SJens Axboe  * tsk == current here
106306b23f92SJens Axboe  */
106406b23f92SJens Axboe static inline void blk_plug_invalidate_ts(struct task_struct *tsk)
106506b23f92SJens Axboe {
106606b23f92SJens Axboe 	struct blk_plug *plug = tsk->plug;
106706b23f92SJens Axboe 
106806b23f92SJens Axboe 	if (plug)
106906b23f92SJens Axboe 		plug->cur_ktime = 0;
107006b23f92SJens Axboe 	current->flags &= ~PF_BLOCK_TS;
107106b23f92SJens Axboe }
107206b23f92SJens Axboe 
1073c6bf3f0eSChristoph Hellwig int blkdev_issue_flush(struct block_device *bdev);
10741a4dcfa8SChristoph Hellwig long nr_blockdev_pages(void);
10751a4dcfa8SChristoph Hellwig #else /* CONFIG_BLOCK */
10761a4dcfa8SChristoph Hellwig struct blk_plug {
10771a4dcfa8SChristoph Hellwig };
10781a4dcfa8SChristoph Hellwig 
107947c122e3SJens Axboe static inline void blk_start_plug_nr_ios(struct blk_plug *plug,
108047c122e3SJens Axboe 					 unsigned short nr_ios)
108147c122e3SJens Axboe {
108247c122e3SJens Axboe }
108347c122e3SJens Axboe 
10841a4dcfa8SChristoph Hellwig static inline void blk_start_plug(struct blk_plug *plug)
10851a4dcfa8SChristoph Hellwig {
10861a4dcfa8SChristoph Hellwig }
10871a4dcfa8SChristoph Hellwig 
10881a4dcfa8SChristoph Hellwig static inline void blk_finish_plug(struct blk_plug *plug)
10891a4dcfa8SChristoph Hellwig {
10901a4dcfa8SChristoph Hellwig }
10911a4dcfa8SChristoph Hellwig 
1092008f75a2SChristoph Hellwig static inline void blk_flush_plug(struct blk_plug *plug, bool async)
10931a4dcfa8SChristoph Hellwig {
10941a4dcfa8SChristoph Hellwig }
10951a4dcfa8SChristoph Hellwig 
109606b23f92SJens Axboe static inline void blk_plug_invalidate_ts(struct task_struct *tsk)
109706b23f92SJens Axboe {
109806b23f92SJens Axboe }
109906b23f92SJens Axboe 
1100c6bf3f0eSChristoph Hellwig static inline int blkdev_issue_flush(struct block_device *bdev)
11011a4dcfa8SChristoph Hellwig {
11021a4dcfa8SChristoph Hellwig 	return 0;
11031a4dcfa8SChristoph Hellwig }
11041a4dcfa8SChristoph Hellwig 
11051a4dcfa8SChristoph Hellwig static inline long nr_blockdev_pages(void)
11061a4dcfa8SChristoph Hellwig {
11071a4dcfa8SChristoph Hellwig 	return 0;
11081a4dcfa8SChristoph Hellwig }
11091a4dcfa8SChristoph Hellwig #endif /* CONFIG_BLOCK */
11101a4dcfa8SChristoph Hellwig 
111171ac860aSMing Lei extern void blk_io_schedule(void);
111271ac860aSMing Lei 
111344abff2cSChristoph Hellwig int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
111444abff2cSChristoph Hellwig 		sector_t nr_sects, gfp_t gfp_mask);
111544abff2cSChristoph Hellwig int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
111644abff2cSChristoph Hellwig 		sector_t nr_sects, gfp_t gfp_mask, struct bio **biop);
111744abff2cSChristoph Hellwig int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
111844abff2cSChristoph Hellwig 		sector_t nr_sects, gfp_t gfp);
1119ee472d83SChristoph Hellwig 
1120ee472d83SChristoph Hellwig #define BLKDEV_ZERO_NOUNMAP	(1 << 0)  /* do not free blocks */
1121cb365b96SChristoph Hellwig #define BLKDEV_ZERO_NOFALLBACK	(1 << 1)  /* don't write explicit zeroes */
1122ee472d83SChristoph Hellwig 
1123e73c23ffSChaitanya Kulkarni extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
1124e73c23ffSChaitanya Kulkarni 		sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
1125ee472d83SChristoph Hellwig 		unsigned flags);
11263f14d792SDmitry Monakhov extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
1127ee472d83SChristoph Hellwig 		sector_t nr_sects, gfp_t gfp_mask, unsigned flags);
1128ee472d83SChristoph Hellwig 
11292cf6d26aSChristoph Hellwig static inline int sb_issue_discard(struct super_block *sb, sector_t block,
11302cf6d26aSChristoph Hellwig 		sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
1131fb2dce86SDavid Woodhouse {
1132233bde21SBart Van Assche 	return blkdev_issue_discard(sb->s_bdev,
1133233bde21SBart Van Assche 				    block << (sb->s_blocksize_bits -
1134233bde21SBart Van Assche 					      SECTOR_SHIFT),
1135233bde21SBart Van Assche 				    nr_blocks << (sb->s_blocksize_bits -
1136233bde21SBart Van Assche 						  SECTOR_SHIFT),
113744abff2cSChristoph Hellwig 				    gfp_mask);
1138fb2dce86SDavid Woodhouse }
1139e6fa0be6SLukas Czerner static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
1140a107e5a3STheodore Ts'o 		sector_t nr_blocks, gfp_t gfp_mask)
1141e6fa0be6SLukas Czerner {
1142e6fa0be6SLukas Czerner 	return blkdev_issue_zeroout(sb->s_bdev,
1143233bde21SBart Van Assche 				    block << (sb->s_blocksize_bits -
1144233bde21SBart Van Assche 					      SECTOR_SHIFT),
1145233bde21SBart Van Assche 				    nr_blocks << (sb->s_blocksize_bits -
1146233bde21SBart Van Assche 						  SECTOR_SHIFT),
1147ee472d83SChristoph Hellwig 				    gfp_mask, 0);
1148e6fa0be6SLukas Czerner }
11491da177e4SLinus Torvalds 
1150fa01b1e9SChristoph Hellwig static inline bool bdev_is_partition(struct block_device *bdev)
1151fa01b1e9SChristoph Hellwig {
1152b8c873edSAl Viro 	return bdev_partno(bdev) != 0;
1153fa01b1e9SChristoph Hellwig }
1154fa01b1e9SChristoph Hellwig 
1155eb28d31bSMartin K. Petersen enum blk_default_limits {
1156eb28d31bSMartin K. Petersen 	BLK_MAX_SEGMENTS	= 128,
1157eb28d31bSMartin K. Petersen 	BLK_SAFE_MAX_SECTORS	= 255,
1158eb28d31bSMartin K. Petersen 	BLK_MAX_SEGMENT_SIZE	= 65536,
1159eb28d31bSMartin K. Petersen 	BLK_SEG_BOUNDARY_MASK	= 0xFFFFFFFFUL,
1160eb28d31bSMartin K. Petersen };
11610e435ac2SMilan Broz 
1162d6b9f4e6SChristoph Hellwig /*
1163d6b9f4e6SChristoph Hellwig  * Default upper limit for the software max_sectors limit used for
1164d6b9f4e6SChristoph Hellwig  * regular file system I/O.  This can be increased through sysfs.
1165d6b9f4e6SChristoph Hellwig  *
1166d6b9f4e6SChristoph Hellwig  * Not to be confused with the max_hw_sector limit that is entirely
1167d6b9f4e6SChristoph Hellwig  * controlled by the driver, usually based on hardware limits.
1168d6b9f4e6SChristoph Hellwig  */
1169d6b9f4e6SChristoph Hellwig #define BLK_DEF_MAX_SECTORS_CAP	2560u
11700a26f327SKeith Busch 
1171af2c68feSBart Van Assche static inline unsigned long queue_segment_boundary(const struct request_queue *q)
1172ae03bf63SMartin K. Petersen {
1173025146e1SMartin K. Petersen 	return q->limits.seg_boundary_mask;
1174ae03bf63SMartin K. Petersen }
1175ae03bf63SMartin K. Petersen 
1176af2c68feSBart Van Assche static inline unsigned long queue_virt_boundary(const struct request_queue *q)
117703100aadSKeith Busch {
117803100aadSKeith Busch 	return q->limits.virt_boundary_mask;
117903100aadSKeith Busch }
118003100aadSKeith Busch 
1181af2c68feSBart Van Assche static inline unsigned int queue_max_sectors(const struct request_queue *q)
1182ae03bf63SMartin K. Petersen {
1183025146e1SMartin K. Petersen 	return q->limits.max_sectors;
1184ae03bf63SMartin K. Petersen }
1185ae03bf63SMartin K. Petersen 
1186547e2f70SChristoph Hellwig static inline unsigned int queue_max_bytes(struct request_queue *q)
1187547e2f70SChristoph Hellwig {
1188547e2f70SChristoph Hellwig 	return min_t(unsigned int, queue_max_sectors(q), INT_MAX >> 9) << 9;
1189547e2f70SChristoph Hellwig }
1190547e2f70SChristoph Hellwig 
1191af2c68feSBart Van Assche static inline unsigned int queue_max_hw_sectors(const struct request_queue *q)
1192ae03bf63SMartin K. Petersen {
1193025146e1SMartin K. Petersen 	return q->limits.max_hw_sectors;
1194ae03bf63SMartin K. Petersen }
1195ae03bf63SMartin K. Petersen 
1196af2c68feSBart Van Assche static inline unsigned short queue_max_segments(const struct request_queue *q)
1197ae03bf63SMartin K. Petersen {
11988a78362cSMartin K. Petersen 	return q->limits.max_segments;
1199ae03bf63SMartin K. Petersen }
1200ae03bf63SMartin K. Petersen 
1201af2c68feSBart Van Assche static inline unsigned short queue_max_discard_segments(const struct request_queue *q)
12021e739730SChristoph Hellwig {
12031e739730SChristoph Hellwig 	return q->limits.max_discard_segments;
12041e739730SChristoph Hellwig }
12051e739730SChristoph Hellwig 
1206af2c68feSBart Van Assche static inline unsigned int queue_max_segment_size(const struct request_queue *q)
1207ae03bf63SMartin K. Petersen {
1208025146e1SMartin K. Petersen 	return q->limits.max_segment_size;
1209ae03bf63SMartin K. Petersen }
1210ae03bf63SMartin K. Petersen 
1211ccdbf0aaSDamien Le Moal static inline unsigned int queue_limits_max_zone_append_sectors(struct queue_limits *l)
12120512a75bSKeith Busch {
1213ccdbf0aaSDamien Le Moal 	unsigned int max_sectors = min(l->chunk_sectors, l->max_hw_sectors);
1214fe6f0cdcSJohannes Thumshirn 
1215ccdbf0aaSDamien Le Moal 	return min_not_zero(l->max_zone_append_sectors, max_sectors);
1216ccdbf0aaSDamien Le Moal }
1217fe6f0cdcSJohannes Thumshirn 
1218ccdbf0aaSDamien Le Moal static inline unsigned int queue_max_zone_append_sectors(struct request_queue *q)
1219ccdbf0aaSDamien Le Moal {
1220ccdbf0aaSDamien Le Moal 	if (!blk_queue_is_zoned(q))
1221ccdbf0aaSDamien Le Moal 		return 0;
1222ccdbf0aaSDamien Le Moal 
1223ccdbf0aaSDamien Le Moal 	return queue_limits_max_zone_append_sectors(&q->limits);
1224ccdbf0aaSDamien Le Moal }
1225ccdbf0aaSDamien Le Moal 
1226ccdbf0aaSDamien Le Moal static inline bool queue_emulates_zone_append(struct request_queue *q)
1227ccdbf0aaSDamien Le Moal {
1228ccdbf0aaSDamien Le Moal 	return blk_queue_is_zoned(q) && !q->limits.max_zone_append_sectors;
1229ccdbf0aaSDamien Le Moal }
1230ccdbf0aaSDamien Le Moal 
1231ccdbf0aaSDamien Le Moal static inline bool bdev_emulates_zone_append(struct block_device *bdev)
1232ccdbf0aaSDamien Le Moal {
1233ccdbf0aaSDamien Le Moal 	return queue_emulates_zone_append(bdev_get_queue(bdev));
12340512a75bSKeith Busch }
12350512a75bSKeith Busch 
12362aba0d19SChristoph Hellwig static inline unsigned int
12372aba0d19SChristoph Hellwig bdev_max_zone_append_sectors(struct block_device *bdev)
12382aba0d19SChristoph Hellwig {
12392aba0d19SChristoph Hellwig 	return queue_max_zone_append_sectors(bdev_get_queue(bdev));
12402aba0d19SChristoph Hellwig }
12412aba0d19SChristoph Hellwig 
124265ea1b66SNaohiro Aota static inline unsigned int bdev_max_segments(struct block_device *bdev)
124365ea1b66SNaohiro Aota {
124465ea1b66SNaohiro Aota 	return queue_max_segments(bdev_get_queue(bdev));
124565ea1b66SNaohiro Aota }
124665ea1b66SNaohiro Aota 
1247ad6bf88aSMikulas Patocka static inline unsigned queue_logical_block_size(const struct request_queue *q)
12481da177e4SLinus Torvalds {
12491da177e4SLinus Torvalds 	int retval = 512;
12501da177e4SLinus Torvalds 
1251025146e1SMartin K. Petersen 	if (q && q->limits.logical_block_size)
1252025146e1SMartin K. Petersen 		retval = q->limits.logical_block_size;
12531da177e4SLinus Torvalds 
12541da177e4SLinus Torvalds 	return retval;
12551da177e4SLinus Torvalds }
12561da177e4SLinus Torvalds 
1257ad6bf88aSMikulas Patocka static inline unsigned int bdev_logical_block_size(struct block_device *bdev)
12581da177e4SLinus Torvalds {
1259e1defc4fSMartin K. Petersen 	return queue_logical_block_size(bdev_get_queue(bdev));
12601da177e4SLinus Torvalds }
12611da177e4SLinus Torvalds 
1262af2c68feSBart Van Assche static inline unsigned int queue_physical_block_size(const struct request_queue *q)
1263c72758f3SMartin K. Petersen {
1264c72758f3SMartin K. Petersen 	return q->limits.physical_block_size;
1265c72758f3SMartin K. Petersen }
1266c72758f3SMartin K. Petersen 
1267892b6f90SMartin K. Petersen static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
1268ac481c20SMartin K. Petersen {
1269ac481c20SMartin K. Petersen 	return queue_physical_block_size(bdev_get_queue(bdev));
1270ac481c20SMartin K. Petersen }
1271ac481c20SMartin K. Petersen 
1272af2c68feSBart Van Assche static inline unsigned int queue_io_min(const struct request_queue *q)
1273c72758f3SMartin K. Petersen {
1274c72758f3SMartin K. Petersen 	return q->limits.io_min;
1275c72758f3SMartin K. Petersen }
1276c72758f3SMartin K. Petersen 
1277ac481c20SMartin K. Petersen static inline int bdev_io_min(struct block_device *bdev)
1278ac481c20SMartin K. Petersen {
1279ac481c20SMartin K. Petersen 	return queue_io_min(bdev_get_queue(bdev));
1280ac481c20SMartin K. Petersen }
1281ac481c20SMartin K. Petersen 
1282af2c68feSBart Van Assche static inline unsigned int queue_io_opt(const struct request_queue *q)
1283c72758f3SMartin K. Petersen {
1284c72758f3SMartin K. Petersen 	return q->limits.io_opt;
1285c72758f3SMartin K. Petersen }
1286c72758f3SMartin K. Petersen 
1287ac481c20SMartin K. Petersen static inline int bdev_io_opt(struct block_device *bdev)
1288ac481c20SMartin K. Petersen {
1289ac481c20SMartin K. Petersen 	return queue_io_opt(bdev_get_queue(bdev));
1290ac481c20SMartin K. Petersen }
1291ac481c20SMartin K. Petersen 
1292a805a4faSDamien Le Moal static inline unsigned int
1293a805a4faSDamien Le Moal queue_zone_write_granularity(const struct request_queue *q)
1294a805a4faSDamien Le Moal {
1295a805a4faSDamien Le Moal 	return q->limits.zone_write_granularity;
1296a805a4faSDamien Le Moal }
1297a805a4faSDamien Le Moal 
1298a805a4faSDamien Le Moal static inline unsigned int
1299a805a4faSDamien Le Moal bdev_zone_write_granularity(struct block_device *bdev)
1300a805a4faSDamien Le Moal {
1301a805a4faSDamien Le Moal 	return queue_zone_write_granularity(bdev_get_queue(bdev));
1302a805a4faSDamien Le Moal }
1303a805a4faSDamien Le Moal 
130489098b07SChristoph Hellwig int bdev_alignment_offset(struct block_device *bdev);
13055c4b4a5cSChristoph Hellwig unsigned int bdev_discard_alignment(struct block_device *bdev);
1306c6e66634SPaolo Bonzini 
1307cf0fbf89SChristoph Hellwig static inline unsigned int bdev_max_discard_sectors(struct block_device *bdev)
1308cf0fbf89SChristoph Hellwig {
1309cf0fbf89SChristoph Hellwig 	return bdev_get_queue(bdev)->limits.max_discard_sectors;
1310cf0fbf89SChristoph Hellwig }
1311cf0fbf89SChristoph Hellwig 
13127b47ef52SChristoph Hellwig static inline unsigned int bdev_discard_granularity(struct block_device *bdev)
13137b47ef52SChristoph Hellwig {
13147b47ef52SChristoph Hellwig 	return bdev_get_queue(bdev)->limits.discard_granularity;
13157b47ef52SChristoph Hellwig }
13167b47ef52SChristoph Hellwig 
131744abff2cSChristoph Hellwig static inline unsigned int
131844abff2cSChristoph Hellwig bdev_max_secure_erase_sectors(struct block_device *bdev)
131944abff2cSChristoph Hellwig {
132044abff2cSChristoph Hellwig 	return bdev_get_queue(bdev)->limits.max_secure_erase_sectors;
132144abff2cSChristoph Hellwig }
132244abff2cSChristoph Hellwig 
1323a6f0788eSChaitanya Kulkarni static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
1324a6f0788eSChaitanya Kulkarni {
1325a6f0788eSChaitanya Kulkarni 	struct request_queue *q = bdev_get_queue(bdev);
1326a6f0788eSChaitanya Kulkarni 
1327a6f0788eSChaitanya Kulkarni 	if (q)
1328a6f0788eSChaitanya Kulkarni 		return q->limits.max_write_zeroes_sectors;
1329a6f0788eSChaitanya Kulkarni 
1330a6f0788eSChaitanya Kulkarni 	return 0;
1331a6f0788eSChaitanya Kulkarni }
1332a6f0788eSChaitanya Kulkarni 
133310f0d2a5SChristoph Hellwig static inline bool bdev_nonrot(struct block_device *bdev)
133410f0d2a5SChristoph Hellwig {
133510f0d2a5SChristoph Hellwig 	return blk_queue_nonrot(bdev_get_queue(bdev));
133610f0d2a5SChristoph Hellwig }
133710f0d2a5SChristoph Hellwig 
13383222d8c2SChristoph Hellwig static inline bool bdev_synchronous(struct block_device *bdev)
13393222d8c2SChristoph Hellwig {
1340aadd5c59SChristoph Hellwig 	return bdev->bd_disk->queue->limits.features & BLK_FEAT_SYNCHRONOUS;
13413222d8c2SChristoph Hellwig }
13423222d8c2SChristoph Hellwig 
134336d25489SChristoph Hellwig static inline bool bdev_stable_writes(struct block_device *bdev)
134436d25489SChristoph Hellwig {
13453c3e85ddSChristoph Hellwig 	struct request_queue *q = bdev_get_queue(bdev);
13463c3e85ddSChristoph Hellwig 
1347c6e56cf6SChristoph Hellwig 	if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
1348c6e56cf6SChristoph Hellwig 	    q->limits.integrity.csum_type != BLK_INTEGRITY_CSUM_NONE)
13493c3e85ddSChristoph Hellwig 		return true;
13501a02f3a7SChristoph Hellwig 	return q->limits.features & BLK_FEAT_STABLE_WRITES;
135136d25489SChristoph Hellwig }
135236d25489SChristoph Hellwig 
13531122c0c1SChristoph Hellwig static inline bool blk_queue_write_cache(struct request_queue *q)
13541122c0c1SChristoph Hellwig {
13551122c0c1SChristoph Hellwig 	return (q->limits.features & BLK_FEAT_WRITE_CACHE) &&
1356bae1c743SChristoph Hellwig 		!(q->limits.flags & BLK_FLAG_WRITE_CACHE_DISABLED);
135708e688fdSChristoph Hellwig }
1358a557e82eSChristoph Hellwig 
1359a557e82eSChristoph Hellwig static inline bool bdev_write_cache(struct block_device *bdev)
1360a557e82eSChristoph Hellwig {
13611122c0c1SChristoph Hellwig 	return blk_queue_write_cache(bdev_get_queue(bdev));
1362797476b8SDamien Le Moal }
1363797476b8SDamien Le Moal 
1364a557e82eSChristoph Hellwig static inline bool bdev_fua(struct block_device *bdev)
1365a557e82eSChristoph Hellwig {
13661122c0c1SChristoph Hellwig 	return bdev_get_queue(bdev)->limits.features & BLK_FEAT_FUA;
1367a557e82eSChristoph Hellwig }
1368a557e82eSChristoph Hellwig 
1369568ec936SChristoph Hellwig static inline bool bdev_nowait(struct block_device *bdev)
1370568ec936SChristoph Hellwig {
1371f76af42fSChristoph Hellwig 	return bdev->bd_disk->queue->limits.features & BLK_FEAT_NOWAIT;
1372568ec936SChristoph Hellwig }
1373568ec936SChristoph Hellwig 
1374797476b8SDamien Le Moal static inline bool bdev_is_zoned(struct block_device *bdev)
1375797476b8SDamien Le Moal {
1376fea127b3SPankaj Raghav 	return blk_queue_is_zoned(bdev_get_queue(bdev));
1377797476b8SDamien Le Moal }
1378797476b8SDamien Le Moal 
1379d67ea690SPankaj Raghav static inline unsigned int bdev_zone_no(struct block_device *bdev, sector_t sec)
1380d67ea690SPankaj Raghav {
1381d67ea690SPankaj Raghav 	return disk_zone_no(bdev->bd_disk, sec);
1382d67ea690SPankaj Raghav }
1383d67ea690SPankaj Raghav 
1384113ab72eSDamien Le Moal static inline sector_t bdev_zone_sectors(struct block_device *bdev)
13856a0cb1bcSHannes Reinecke {
13866a0cb1bcSHannes Reinecke 	struct request_queue *q = bdev_get_queue(bdev);
13876a0cb1bcSHannes Reinecke 
1388de71973cSChristoph Hellwig 	if (!blk_queue_is_zoned(q))
13896cc77e9cSChristoph Hellwig 		return 0;
1390de71973cSChristoph Hellwig 	return q->limits.chunk_sectors;
13916cc77e9cSChristoph Hellwig }
13926a0cb1bcSHannes Reinecke 
1393e29b2100SPankaj Raghav static inline sector_t bdev_offset_from_zone_start(struct block_device *bdev,
1394e29b2100SPankaj Raghav 						   sector_t sector)
1395e29b2100SPankaj Raghav {
1396e29b2100SPankaj Raghav 	return sector & (bdev_zone_sectors(bdev) - 1);
1397e29b2100SPankaj Raghav }
1398e29b2100SPankaj Raghav 
1399b85a3c1bSDamien Le Moal static inline sector_t bio_offset_from_zone_start(struct bio *bio)
1400b85a3c1bSDamien Le Moal {
1401b85a3c1bSDamien Le Moal 	return bdev_offset_from_zone_start(bio->bi_bdev,
1402b85a3c1bSDamien Le Moal 					   bio->bi_iter.bi_sector);
1403b85a3c1bSDamien Le Moal }
1404b85a3c1bSDamien Le Moal 
1405e29b2100SPankaj Raghav static inline bool bdev_is_zone_start(struct block_device *bdev,
1406e29b2100SPankaj Raghav 				      sector_t sector)
1407e29b2100SPankaj Raghav {
1408e29b2100SPankaj Raghav 	return bdev_offset_from_zone_start(bdev, sector) == 0;
1409e29b2100SPankaj Raghav }
1410e29b2100SPankaj Raghav 
1411af2c68feSBart Van Assche static inline int queue_dma_alignment(const struct request_queue *q)
14121da177e4SLinus Torvalds {
1413c964d62fSKeith Busch 	return q ? q->limits.dma_alignment : 511;
14141da177e4SLinus Torvalds }
14151da177e4SLinus Torvalds 
14169da3d1e9SJohn Garry static inline unsigned int
14179da3d1e9SJohn Garry queue_atomic_write_unit_max_bytes(const struct request_queue *q)
14189da3d1e9SJohn Garry {
14199da3d1e9SJohn Garry 	return q->limits.atomic_write_unit_max;
14209da3d1e9SJohn Garry }
14219da3d1e9SJohn Garry 
14229da3d1e9SJohn Garry static inline unsigned int
14239da3d1e9SJohn Garry queue_atomic_write_unit_min_bytes(const struct request_queue *q)
14249da3d1e9SJohn Garry {
14259da3d1e9SJohn Garry 	return q->limits.atomic_write_unit_min;
14269da3d1e9SJohn Garry }
14279da3d1e9SJohn Garry 
14289da3d1e9SJohn Garry static inline unsigned int
14299da3d1e9SJohn Garry queue_atomic_write_boundary_bytes(const struct request_queue *q)
14309da3d1e9SJohn Garry {
14319da3d1e9SJohn Garry 	return q->limits.atomic_write_boundary_sectors << SECTOR_SHIFT;
14329da3d1e9SJohn Garry }
14339da3d1e9SJohn Garry 
14349da3d1e9SJohn Garry static inline unsigned int
14359da3d1e9SJohn Garry queue_atomic_write_max_bytes(const struct request_queue *q)
14369da3d1e9SJohn Garry {
14379da3d1e9SJohn Garry 	return q->limits.atomic_write_max_sectors << SECTOR_SHIFT;
14389da3d1e9SJohn Garry }
14399da3d1e9SJohn Garry 
14404a2dcc35SKeith Busch static inline unsigned int bdev_dma_alignment(struct block_device *bdev)
14414a2dcc35SKeith Busch {
14424a2dcc35SKeith Busch 	return queue_dma_alignment(bdev_get_queue(bdev));
14434a2dcc35SKeith Busch }
14444a2dcc35SKeith Busch 
14455debd969SKeith Busch static inline bool bdev_iter_is_aligned(struct block_device *bdev,
14465debd969SKeith Busch 					struct iov_iter *iter)
14475debd969SKeith Busch {
14485debd969SKeith Busch 	return iov_iter_is_aligned(iter, bdev_dma_alignment(bdev),
14495debd969SKeith Busch 				   bdev_logical_block_size(bdev) - 1);
14505debd969SKeith Busch }
14515debd969SKeith Busch 
145214417799SNamhyung Kim static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
145387904074SFUJITA Tomonori 				 unsigned int len)
145487904074SFUJITA Tomonori {
145587904074SFUJITA Tomonori 	unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
145614417799SNamhyung Kim 	return !(addr & alignment) && !(len & alignment);
145787904074SFUJITA Tomonori }
145887904074SFUJITA Tomonori 
14591da177e4SLinus Torvalds /* assumes size > 256 */
14601da177e4SLinus Torvalds static inline unsigned int blksize_bits(unsigned int size)
14611da177e4SLinus Torvalds {
1462adff2158SDawei Li 	return order_base_2(size >> SECTOR_SHIFT) + SECTOR_SHIFT;
14631da177e4SLinus Torvalds }
14641da177e4SLinus Torvalds 
146559c3d45eSJens Axboe int kblockd_schedule_work(struct work_struct *work);
1466818cd1cbSJens Axboe int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
14671da177e4SLinus Torvalds 
14681da177e4SLinus Torvalds #define MODULE_ALIAS_BLOCKDEV(major,minor) \
14691da177e4SLinus Torvalds 	MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
14701da177e4SLinus Torvalds #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
14711da177e4SLinus Torvalds 	MODULE_ALIAS("block-major-" __stringify(major) "-*")
14721da177e4SLinus Torvalds 
1473d145dc23SSatya Tangirala #ifdef CONFIG_BLK_INLINE_ENCRYPTION
1474d145dc23SSatya Tangirala 
1475cb77cb5aSEric Biggers bool blk_crypto_register(struct blk_crypto_profile *profile,
1476cb77cb5aSEric Biggers 			 struct request_queue *q);
1477d145dc23SSatya Tangirala 
1478d145dc23SSatya Tangirala #else /* CONFIG_BLK_INLINE_ENCRYPTION */
1479d145dc23SSatya Tangirala 
1480cb77cb5aSEric Biggers static inline bool blk_crypto_register(struct blk_crypto_profile *profile,
1481d145dc23SSatya Tangirala 				       struct request_queue *q)
1482d145dc23SSatya Tangirala {
1483d145dc23SSatya Tangirala 	return true;
1484d145dc23SSatya Tangirala }
1485d145dc23SSatya Tangirala 
1486d145dc23SSatya Tangirala #endif /* CONFIG_BLK_INLINE_ENCRYPTION */
1487d145dc23SSatya Tangirala 
14889208d414SChristoph Hellwig enum blk_unique_id {
14899208d414SChristoph Hellwig 	/* these match the Designator Types specified in SPC */
14909208d414SChristoph Hellwig 	BLK_UID_T10	= 1,
14919208d414SChristoph Hellwig 	BLK_UID_EUI64	= 2,
14929208d414SChristoph Hellwig 	BLK_UID_NAA	= 3,
14939208d414SChristoph Hellwig };
14949208d414SChristoph Hellwig 
149508f85851SAl Viro struct block_device_operations {
14963e08773cSChristoph Hellwig 	void (*submit_bio)(struct bio *bio);
149769fe0f29SMing Lei 	int (*poll_bio)(struct bio *bio, struct io_comp_batch *iob,
149869fe0f29SMing Lei 			unsigned int flags);
149905bdb996SChristoph Hellwig 	int (*open)(struct gendisk *disk, blk_mode_t mode);
1500ae220766SChristoph Hellwig 	void (*release)(struct gendisk *disk);
150105bdb996SChristoph Hellwig 	int (*ioctl)(struct block_device *bdev, blk_mode_t mode,
150205bdb996SChristoph Hellwig 			unsigned cmd, unsigned long arg);
150305bdb996SChristoph Hellwig 	int (*compat_ioctl)(struct block_device *bdev, blk_mode_t mode,
150405bdb996SChristoph Hellwig 			unsigned cmd, unsigned long arg);
150577ea887eSTejun Heo 	unsigned int (*check_events) (struct gendisk *disk,
150677ea887eSTejun Heo 				      unsigned int clearing);
1507c3e33e04STejun Heo 	void (*unlock_native_capacity) (struct gendisk *);
150808f85851SAl Viro 	int (*getgeo)(struct block_device *, struct hd_geometry *);
1509e00adcadSChristoph Hellwig 	int (*set_read_only)(struct block_device *bdev, bool ro);
151076792055SChristoph Hellwig 	void (*free_disk)(struct gendisk *disk);
1511b3a27d05SNitin Gupta 	/* this callback is with swap_lock and sometimes page table lock held */
1512b3a27d05SNitin Gupta 	void (*swap_slot_free_notify) (struct block_device *, unsigned long);
1513e76239a3SChristoph Hellwig 	int (*report_zones)(struct gendisk *, sector_t sector,
1514d4100351SChristoph Hellwig 			unsigned int nr_zones, report_zones_cb cb, void *data);
1515050a4f34SJens Axboe 	char *(*devnode)(struct gendisk *disk, umode_t *mode);
15169208d414SChristoph Hellwig 	/* returns the length of the identifier or a negative errno: */
15179208d414SChristoph Hellwig 	int (*get_unique_id)(struct gendisk *disk, u8 id[16],
15189208d414SChristoph Hellwig 			enum blk_unique_id id_type);
151908f85851SAl Viro 	struct module *owner;
1520bbd3e064SChristoph Hellwig 	const struct pr_ops *pr_ops;
15210bdfbca8SDmitry Osipenko 
15220bdfbca8SDmitry Osipenko 	/*
15230bdfbca8SDmitry Osipenko 	 * Special callback for probing GPT entry at a given sector.
15240bdfbca8SDmitry Osipenko 	 * Needed by Android devices, used by GPT scanner and MMC blk
15250bdfbca8SDmitry Osipenko 	 * driver.
15260bdfbca8SDmitry Osipenko 	 */
15270bdfbca8SDmitry Osipenko 	int (*alternative_gpt_sector)(struct gendisk *disk, sector_t *sector);
152808f85851SAl Viro };
152908f85851SAl Viro 
1530ee6a129dSArnd Bergmann #ifdef CONFIG_COMPAT
153105bdb996SChristoph Hellwig extern int blkdev_compat_ptr_ioctl(struct block_device *, blk_mode_t,
1532ee6a129dSArnd Bergmann 				      unsigned int, unsigned long);
1533ee6a129dSArnd Bergmann #else
1534ee6a129dSArnd Bergmann #define blkdev_compat_ptr_ioctl NULL
1535ee6a129dSArnd Bergmann #endif
1536ee6a129dSArnd Bergmann 
15370619317fSJens Axboe static inline void blk_wake_io_task(struct task_struct *waiter)
15380619317fSJens Axboe {
15390619317fSJens Axboe 	/*
15400619317fSJens Axboe 	 * If we're polling, the task itself is doing the completions. For
15410619317fSJens Axboe 	 * that case, we don't need to signal a wakeup, it's enough to just
15420619317fSJens Axboe 	 * mark us as RUNNING.
15430619317fSJens Axboe 	 */
15440619317fSJens Axboe 	if (waiter == current)
15450619317fSJens Axboe 		__set_current_state(TASK_RUNNING);
15460619317fSJens Axboe 	else
15470619317fSJens Axboe 		wake_up_process(waiter);
15480619317fSJens Axboe }
15490619317fSJens Axboe 
15505f275713SYu Kuai unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op,
15515f0614a5SMing Lei 				 unsigned long start_time);
155277e7ffd7SBart Van Assche void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
15535f275713SYu Kuai 		      unsigned int sectors, unsigned long start_time);
1554956d510eSChristoph Hellwig 
155599dfc43eSChristoph Hellwig unsigned long bio_start_io_acct(struct bio *bio);
155699dfc43eSChristoph Hellwig void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
155799dfc43eSChristoph Hellwig 		struct block_device *orig_bdev);
1558956d510eSChristoph Hellwig 
1559956d510eSChristoph Hellwig /**
1560956d510eSChristoph Hellwig  * bio_end_io_acct - end I/O accounting for bio based drivers
1561956d510eSChristoph Hellwig  * @bio:	bio to end account for
1562b42c1fc3SChristoph Hellwig  * @start_time:	start time returned by bio_start_io_acct()
1563956d510eSChristoph Hellwig  */
1564956d510eSChristoph Hellwig static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time)
1565956d510eSChristoph Hellwig {
156699dfc43eSChristoph Hellwig 	return bio_end_io_acct_remapped(bio, start_time, bio->bi_bdev);
1567956d510eSChristoph Hellwig }
1568956d510eSChristoph Hellwig 
15693f1266f1SChristoph Hellwig int bdev_read_only(struct block_device *bdev);
1570ead083aeSAl Viro int set_blocksize(struct file *file, int size);
15713f1266f1SChristoph Hellwig 
15724e7b5671SChristoph Hellwig int lookup_bdev(const char *pathname, dev_t *dev);
15733f1266f1SChristoph Hellwig 
15743f1266f1SChristoph Hellwig void blkdev_show(struct seq_file *seqf, off_t offset);
15753f1266f1SChristoph Hellwig 
15763f1266f1SChristoph Hellwig #define BDEVNAME_SIZE	32	/* Largest string for a blockdev identifier */
15773f1266f1SChristoph Hellwig #define BDEVT_SIZE	10	/* Largest string for MAJ:MIN for blkdev */
15783f1266f1SChristoph Hellwig #ifdef CONFIG_BLOCK
15793f1266f1SChristoph Hellwig #define BLKDEV_MAJOR_MAX	512
15803f1266f1SChristoph Hellwig #else
15813f1266f1SChristoph Hellwig #define BLKDEV_MAJOR_MAX	0
15821da177e4SLinus Torvalds #endif
15833f1266f1SChristoph Hellwig 
15840718afd4SChristoph Hellwig struct blk_holder_ops {
1585d8530de5SChristoph Hellwig 	void (*mark_dead)(struct block_device *bdev, bool surprise);
15862142b88cSChristoph Hellwig 
15872142b88cSChristoph Hellwig 	/*
15882142b88cSChristoph Hellwig 	 * Sync the file system mounted on the block device.
15892142b88cSChristoph Hellwig 	 */
15902142b88cSChristoph Hellwig 	void (*sync)(struct block_device *bdev);
1591a30561a9SChristian Brauner 
1592a30561a9SChristian Brauner 	/*
1593a30561a9SChristian Brauner 	 * Freeze the file system mounted on the block device.
1594a30561a9SChristian Brauner 	 */
1595a30561a9SChristian Brauner 	int (*freeze)(struct block_device *bdev);
1596a30561a9SChristian Brauner 
1597a30561a9SChristian Brauner 	/*
1598a30561a9SChristian Brauner 	 * Thaw the file system mounted on the block device.
1599a30561a9SChristian Brauner 	 */
1600a30561a9SChristian Brauner 	int (*thaw)(struct block_device *bdev);
16010718afd4SChristoph Hellwig };
16020718afd4SChristoph Hellwig 
1603e419cf3eSChristian Brauner /*
1604e419cf3eSChristian Brauner  * For filesystems using @fs_holder_ops, the @holder argument passed to
1605e419cf3eSChristian Brauner  * helpers used to open and claim block devices via
1606e419cf3eSChristian Brauner  * bd_prepare_to_claim() must point to a superblock.
1607e419cf3eSChristian Brauner  */
16087ecd0b6fSChristoph Hellwig extern const struct blk_holder_ops fs_holder_ops;
16097ecd0b6fSChristoph Hellwig 
16103f0b3e78SChristoph Hellwig /*
16113f0b3e78SChristoph Hellwig  * Return the correct open flags for blkdev_get_by_* for super block flags
16123f0b3e78SChristoph Hellwig  * as stored in sb->s_flags.
16133f0b3e78SChristoph Hellwig  */
16143f0b3e78SChristoph Hellwig #define sb_open_mode(flags) \
16156f861765SJan Kara 	(BLK_OPEN_READ | BLK_OPEN_RESTRICT_WRITES | \
16166f861765SJan Kara 	 (((flags) & SB_RDONLY) ? 0 : BLK_OPEN_WRITE))
16173f0b3e78SChristoph Hellwig 
1618f3a60882SChristian Brauner struct file *bdev_file_open_by_dev(dev_t dev, blk_mode_t mode, void *holder,
1619f3a60882SChristian Brauner 		const struct blk_holder_ops *hops);
1620f3a60882SChristian Brauner struct file *bdev_file_open_by_path(const char *path, blk_mode_t mode,
1621f3a60882SChristian Brauner 		void *holder, const struct blk_holder_ops *hops);
16220718afd4SChristoph Hellwig int bd_prepare_to_claim(struct block_device *bdev, void *holder,
16230718afd4SChristoph Hellwig 		const struct blk_holder_ops *hops);
162437c3fc9aSChristoph Hellwig void bd_abort_claiming(struct block_device *bdev, void *holder);
16253f1266f1SChristoph Hellwig 
162622ae8ce8SChristoph Hellwig /* just for blk-cgroup, don't use elsewhere */
162722ae8ce8SChristoph Hellwig struct block_device *blkdev_get_no_open(dev_t dev);
162822ae8ce8SChristoph Hellwig void blkdev_put_no_open(struct block_device *bdev);
162922ae8ce8SChristoph Hellwig 
1630621c1f42SChristoph Hellwig struct block_device *I_BDEV(struct inode *inode);
1631f3a60882SChristian Brauner struct block_device *file_bdev(struct file *bdev_file);
1632186ddac2SYu Kuai bool disk_live(struct gendisk *disk);
1633186ddac2SYu Kuai unsigned int block_size(struct block_device *bdev);
16343f1266f1SChristoph Hellwig 
16353f1266f1SChristoph Hellwig #ifdef CONFIG_BLOCK
16363f1266f1SChristoph Hellwig void invalidate_bdev(struct block_device *bdev);
16373f1266f1SChristoph Hellwig int sync_blockdev(struct block_device *bdev);
163897d6fb1bSYuezhang Mo int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend);
163970164eb6SChristoph Hellwig int sync_blockdev_nowait(struct block_device *bdev);
16401e03a36bSChristoph Hellwig void sync_bdevs(bool wait);
1641*9abcfbd2SPrasad Singamsetty void bdev_statx(struct inode *backing_inode, struct kstat *stat,
1642*9abcfbd2SPrasad Singamsetty 		u32 request_mask);
1643322cbb50SChristoph Hellwig void printk_all_partitions(void);
16442577f53fSChristoph Hellwig int __init early_lookup_bdev(const char *pathname, dev_t *dev);
16453f1266f1SChristoph Hellwig #else
16463f1266f1SChristoph Hellwig static inline void invalidate_bdev(struct block_device *bdev)
16473f1266f1SChristoph Hellwig {
16483f1266f1SChristoph Hellwig }
16493f1266f1SChristoph Hellwig static inline int sync_blockdev(struct block_device *bdev)
16503f1266f1SChristoph Hellwig {
16513f1266f1SChristoph Hellwig 	return 0;
16523f1266f1SChristoph Hellwig }
165370164eb6SChristoph Hellwig static inline int sync_blockdev_nowait(struct block_device *bdev)
165470164eb6SChristoph Hellwig {
165570164eb6SChristoph Hellwig 	return 0;
165670164eb6SChristoph Hellwig }
16571e03a36bSChristoph Hellwig static inline void sync_bdevs(bool wait)
16581e03a36bSChristoph Hellwig {
16591e03a36bSChristoph Hellwig }
1660*9abcfbd2SPrasad Singamsetty static inline void bdev_statx(struct inode *backing_inode, struct kstat *stat,
1661*9abcfbd2SPrasad Singamsetty 				u32 request_mask)
16622d985f8cSEric Biggers {
16632d985f8cSEric Biggers }
1664322cbb50SChristoph Hellwig static inline void printk_all_partitions(void)
1665322cbb50SChristoph Hellwig {
1666322cbb50SChristoph Hellwig }
1667cf056a43SChristoph Hellwig static inline int early_lookup_bdev(const char *pathname, dev_t *dev)
1668cf056a43SChristoph Hellwig {
1669cf056a43SChristoph Hellwig 	return -EINVAL;
1670cf056a43SChristoph Hellwig }
1671322cbb50SChristoph Hellwig #endif /* CONFIG_BLOCK */
1672322cbb50SChristoph Hellwig 
1673982c3b30SChristian Brauner int bdev_freeze(struct block_device *bdev);
1674982c3b30SChristian Brauner int bdev_thaw(struct block_device *bdev);
167522650a99SChristian Brauner void bdev_fput(struct file *bdev_file);
16763f1266f1SChristoph Hellwig 
16775a72e899SJens Axboe struct io_comp_batch {
16785a72e899SJens Axboe 	struct request *req_list;
16795a72e899SJens Axboe 	bool need_ts;
16805a72e899SJens Axboe 	void (*complete)(struct io_comp_batch *);
16815a72e899SJens Axboe };
16825a72e899SJens Axboe 
16839da3d1e9SJohn Garry static inline bool bdev_can_atomic_write(struct block_device *bdev)
16849da3d1e9SJohn Garry {
16859da3d1e9SJohn Garry 	struct request_queue *bd_queue = bdev->bd_queue;
16869da3d1e9SJohn Garry 	struct queue_limits *limits = &bd_queue->limits;
16879da3d1e9SJohn Garry 
16889da3d1e9SJohn Garry 	if (!limits->atomic_write_unit_min)
16899da3d1e9SJohn Garry 		return false;
16909da3d1e9SJohn Garry 
16919da3d1e9SJohn Garry 	if (bdev_is_partition(bdev)) {
16929da3d1e9SJohn Garry 		sector_t bd_start_sect = bdev->bd_start_sect;
16939da3d1e9SJohn Garry 		unsigned int alignment =
16949da3d1e9SJohn Garry 			max(limits->atomic_write_unit_min,
16959da3d1e9SJohn Garry 			    limits->atomic_write_hw_boundary);
16969da3d1e9SJohn Garry 
16979da3d1e9SJohn Garry 		if (!IS_ALIGNED(bd_start_sect, alignment >> SECTOR_SHIFT))
16989da3d1e9SJohn Garry 			return false;
16999da3d1e9SJohn Garry 	}
17009da3d1e9SJohn Garry 
17019da3d1e9SJohn Garry 	return true;
17029da3d1e9SJohn Garry }
17039da3d1e9SJohn Garry 
17045a72e899SJens Axboe #define DEFINE_IO_COMP_BATCH(name)	struct io_comp_batch name = { }
17055a72e899SJens Axboe 
17063f1266f1SChristoph Hellwig #endif /* _LINUX_BLKDEV_H */
1707