1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 2322cbb50SChristoph Hellwig /* 3322cbb50SChristoph Hellwig * Portions Copyright (C) 1992 Drew Eckhardt 4322cbb50SChristoph Hellwig */ 51da177e4SLinus Torvalds #ifndef _LINUX_BLKDEV_H 61da177e4SLinus Torvalds #define _LINUX_BLKDEV_H 71da177e4SLinus Torvalds 8322cbb50SChristoph Hellwig #include <linux/types.h> 9322cbb50SChristoph Hellwig #include <linux/blk_types.h> 10322cbb50SChristoph Hellwig #include <linux/device.h> 111da177e4SLinus Torvalds #include <linux/list.h> 12320ae51fSJens Axboe #include <linux/llist.h> 13b296a6d5SAndy Shevchenko #include <linux/minmax.h> 141da177e4SLinus Torvalds #include <linux/timer.h> 151da177e4SLinus Torvalds #include <linux/workqueue.h> 161da177e4SLinus Torvalds #include <linux/wait.h> 171da177e4SLinus Torvalds #include <linux/bio.h> 183e6053d7SHugh Dickins #include <linux/gfp.h> 19322cbb50SChristoph Hellwig #include <linux/kdev_t.h> 20548bc8e1STejun Heo #include <linux/rcupdate.h> 21add703fdSTejun Heo #include <linux/percpu-refcount.h> 226a0cb1bcSHannes Reinecke #include <linux/blkzoned.h> 23322cbb50SChristoph Hellwig #include <linux/sched.h> 24d97e594cSJohn Garry #include <linux/sbitmap.h> 25704b914fSMing Lei #include <linux/srcu.h> 26322cbb50SChristoph Hellwig #include <linux/uuid.h> 27322cbb50SChristoph Hellwig #include <linux/xarray.h> 281da177e4SLinus Torvalds 29de477254SPaul Gortmaker struct module; 301da177e4SLinus Torvalds struct request_queue; 311da177e4SLinus Torvalds struct elevator_queue; 322056a782SJens Axboe struct blk_trace; 333d6392cfSJens Axboe struct request; 343d6392cfSJens Axboe struct sg_io_hdr; 353c798398STejun Heo struct blkcg_gq; 367c94e1c1SMing Lei struct blk_flush_queue; 373e08773cSChristoph Hellwig struct kiocb; 38bbd3e064SChristoph Hellwig struct pr_ops; 39a7905043SJosef Bacik struct rq_qos; 4034dbad5dSOmar Sandoval struct blk_queue_stats; 4134dbad5dSOmar Sandoval struct blk_stat_callback; 42cb77cb5aSEric Biggers struct blk_crypto_profile; 431da177e4SLinus Torvalds 44322cbb50SChristoph Hellwig extern const struct device_type disk_type; 45322cbb50SChristoph Hellwig extern struct device_type part_type; 46322cbb50SChristoph Hellwig extern struct class block_class; 47322cbb50SChristoph Hellwig 48096392e0SMinwoo Im /* Must be consistent with blk_mq_poll_stats_bkt() */ 490206319fSStephen Bates #define BLK_MQ_POLL_STATS_BKTS 16 500206319fSStephen Bates 5129ece8b4SYufen Yu /* Doing classic polling */ 5229ece8b4SYufen Yu #define BLK_MQ_POLL_CLASSIC -1 5329ece8b4SYufen Yu 548bd435b3STejun Heo /* 558bd435b3STejun Heo * Maximum number of blkcg policies allowed to be registered concurrently. 568bd435b3STejun Heo * Defined here to simplify include dependency. 578bd435b3STejun Heo */ 58ec645dc9SOleksandr Natalenko #define BLKCG_MAX_POLS 6 598bd435b3STejun Heo 60322cbb50SChristoph Hellwig #define DISK_MAX_PARTS 256 61322cbb50SChristoph Hellwig #define DISK_NAME_LEN 32 62322cbb50SChristoph Hellwig 63322cbb50SChristoph Hellwig #define PARTITION_META_INFO_VOLNAMELTH 64 64322cbb50SChristoph Hellwig /* 65322cbb50SChristoph Hellwig * Enough for the string representation of any kind of UUID plus NULL. 66322cbb50SChristoph Hellwig * EFI UUID is 36 characters. MSDOS UUID is 11 characters. 67322cbb50SChristoph Hellwig */ 68322cbb50SChristoph Hellwig #define PARTITION_META_INFO_UUIDLTH (UUID_STRING_LEN + 1) 69322cbb50SChristoph Hellwig 70322cbb50SChristoph Hellwig struct partition_meta_info { 71322cbb50SChristoph Hellwig char uuid[PARTITION_META_INFO_UUIDLTH]; 72322cbb50SChristoph Hellwig u8 volname[PARTITION_META_INFO_VOLNAMELTH]; 73322cbb50SChristoph Hellwig }; 74322cbb50SChristoph Hellwig 75322cbb50SChristoph Hellwig /** 76322cbb50SChristoph Hellwig * DOC: genhd capability flags 77322cbb50SChristoph Hellwig * 78322cbb50SChristoph Hellwig * ``GENHD_FL_REMOVABLE``: indicates that the block device gives access to 79322cbb50SChristoph Hellwig * removable media. When set, the device remains present even when media is not 80322cbb50SChristoph Hellwig * inserted. Shall not be set for devices which are removed entirely when the 81322cbb50SChristoph Hellwig * media is removed. 82322cbb50SChristoph Hellwig * 83322cbb50SChristoph Hellwig * ``GENHD_FL_HIDDEN``: the block device is hidden; it doesn't produce events, 84322cbb50SChristoph Hellwig * doesn't appear in sysfs, and can't be opened from userspace or using 85322cbb50SChristoph Hellwig * blkdev_get*. Used for the underlying components of multipath devices. 86322cbb50SChristoph Hellwig * 87322cbb50SChristoph Hellwig * ``GENHD_FL_NO_PART``: partition support is disabled. The kernel will not 88322cbb50SChristoph Hellwig * scan for partitions from add_disk, and users can't add partitions manually. 89322cbb50SChristoph Hellwig * 90322cbb50SChristoph Hellwig */ 91322cbb50SChristoph Hellwig enum { 92322cbb50SChristoph Hellwig GENHD_FL_REMOVABLE = 1 << 0, 93322cbb50SChristoph Hellwig GENHD_FL_HIDDEN = 1 << 1, 94322cbb50SChristoph Hellwig GENHD_FL_NO_PART = 1 << 2, 95322cbb50SChristoph Hellwig }; 96322cbb50SChristoph Hellwig 97322cbb50SChristoph Hellwig enum { 98322cbb50SChristoph Hellwig DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */ 99322cbb50SChristoph Hellwig DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */ 100322cbb50SChristoph Hellwig }; 101322cbb50SChristoph Hellwig 102322cbb50SChristoph Hellwig enum { 103322cbb50SChristoph Hellwig /* Poll even if events_poll_msecs is unset */ 104322cbb50SChristoph Hellwig DISK_EVENT_FLAG_POLL = 1 << 0, 105322cbb50SChristoph Hellwig /* Forward events to udev */ 106322cbb50SChristoph Hellwig DISK_EVENT_FLAG_UEVENT = 1 << 1, 107322cbb50SChristoph Hellwig /* Block event polling when open for exclusive write */ 108322cbb50SChristoph Hellwig DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE = 1 << 2, 109322cbb50SChristoph Hellwig }; 110322cbb50SChristoph Hellwig 111322cbb50SChristoph Hellwig struct disk_events; 112322cbb50SChristoph Hellwig struct badblocks; 113322cbb50SChristoph Hellwig 114322cbb50SChristoph Hellwig struct blk_integrity { 115322cbb50SChristoph Hellwig const struct blk_integrity_profile *profile; 116322cbb50SChristoph Hellwig unsigned char flags; 117322cbb50SChristoph Hellwig unsigned char tuple_size; 118322cbb50SChristoph Hellwig unsigned char interval_exp; 119322cbb50SChristoph Hellwig unsigned char tag_size; 120322cbb50SChristoph Hellwig }; 121322cbb50SChristoph Hellwig 122322cbb50SChristoph Hellwig struct gendisk { 123322cbb50SChristoph Hellwig /* 124322cbb50SChristoph Hellwig * major/first_minor/minors should not be set by any new driver, the 125322cbb50SChristoph Hellwig * block core will take care of allocating them automatically. 126322cbb50SChristoph Hellwig */ 127322cbb50SChristoph Hellwig int major; 128322cbb50SChristoph Hellwig int first_minor; 129322cbb50SChristoph Hellwig int minors; 130322cbb50SChristoph Hellwig 131322cbb50SChristoph Hellwig char disk_name[DISK_NAME_LEN]; /* name of major driver */ 132322cbb50SChristoph Hellwig 133322cbb50SChristoph Hellwig unsigned short events; /* supported events */ 134322cbb50SChristoph Hellwig unsigned short event_flags; /* flags related to event processing */ 135322cbb50SChristoph Hellwig 136322cbb50SChristoph Hellwig struct xarray part_tbl; 137322cbb50SChristoph Hellwig struct block_device *part0; 138322cbb50SChristoph Hellwig 139322cbb50SChristoph Hellwig const struct block_device_operations *fops; 140322cbb50SChristoph Hellwig struct request_queue *queue; 141322cbb50SChristoph Hellwig void *private_data; 142322cbb50SChristoph Hellwig 14346754bd0SChristoph Hellwig struct bio_set bio_split; 14446754bd0SChristoph Hellwig 145322cbb50SChristoph Hellwig int flags; 146322cbb50SChristoph Hellwig unsigned long state; 147322cbb50SChristoph Hellwig #define GD_NEED_PART_SCAN 0 148322cbb50SChristoph Hellwig #define GD_READ_ONLY 1 149322cbb50SChristoph Hellwig #define GD_DEAD 2 150322cbb50SChristoph Hellwig #define GD_NATIVE_CAPACITY 3 15176792055SChristoph Hellwig #define GD_ADDED 4 152b9684a71SChristoph Hellwig #define GD_SUPPRESS_PART_SCAN 5 1536f8191fdSChristoph Hellwig #define GD_OWNS_QUEUE 6 154322cbb50SChristoph Hellwig 155322cbb50SChristoph Hellwig struct mutex open_mutex; /* open/close mutex */ 156322cbb50SChristoph Hellwig unsigned open_partitions; /* number of open partitions */ 157322cbb50SChristoph Hellwig 158322cbb50SChristoph Hellwig struct backing_dev_info *bdi; 159322cbb50SChristoph Hellwig struct kobject *slave_dir; 160322cbb50SChristoph Hellwig #ifdef CONFIG_BLOCK_HOLDER_DEPRECATED 161322cbb50SChristoph Hellwig struct list_head slave_bdevs; 162322cbb50SChristoph Hellwig #endif 163322cbb50SChristoph Hellwig struct timer_rand_state *random; 164322cbb50SChristoph Hellwig atomic_t sync_io; /* RAID */ 165322cbb50SChristoph Hellwig struct disk_events *ev; 166322cbb50SChristoph Hellwig #ifdef CONFIG_BLK_DEV_INTEGRITY 167322cbb50SChristoph Hellwig struct kobject integrity_kobj; 168322cbb50SChristoph Hellwig #endif /* CONFIG_BLK_DEV_INTEGRITY */ 169d86e716aSChristoph Hellwig 170d86e716aSChristoph Hellwig #ifdef CONFIG_BLK_DEV_ZONED 171d86e716aSChristoph Hellwig /* 172d86e716aSChristoph Hellwig * Zoned block device information for request dispatch control. 173d86e716aSChristoph Hellwig * nr_zones is the total number of zones of the device. This is always 174d86e716aSChristoph Hellwig * 0 for regular block devices. conv_zones_bitmap is a bitmap of nr_zones 175d86e716aSChristoph Hellwig * bits which indicates if a zone is conventional (bit set) or 176d86e716aSChristoph Hellwig * sequential (bit clear). seq_zones_wlock is a bitmap of nr_zones 177d86e716aSChristoph Hellwig * bits which indicates if a zone is write locked, that is, if a write 178d86e716aSChristoph Hellwig * request targeting the zone was dispatched. 179d86e716aSChristoph Hellwig * 180d86e716aSChristoph Hellwig * Reads of this information must be protected with blk_queue_enter() / 181d86e716aSChristoph Hellwig * blk_queue_exit(). Modifying this information is only allowed while 182d86e716aSChristoph Hellwig * no requests are being processed. See also blk_mq_freeze_queue() and 183d86e716aSChristoph Hellwig * blk_mq_unfreeze_queue(). 184d86e716aSChristoph Hellwig */ 185d86e716aSChristoph Hellwig unsigned int nr_zones; 186d86e716aSChristoph Hellwig unsigned int max_open_zones; 187d86e716aSChristoph Hellwig unsigned int max_active_zones; 188d86e716aSChristoph Hellwig unsigned long *conv_zones_bitmap; 189d86e716aSChristoph Hellwig unsigned long *seq_zones_wlock; 190d86e716aSChristoph Hellwig #endif /* CONFIG_BLK_DEV_ZONED */ 191d86e716aSChristoph Hellwig 192322cbb50SChristoph Hellwig #if IS_ENABLED(CONFIG_CDROM) 193322cbb50SChristoph Hellwig struct cdrom_device_info *cdi; 194322cbb50SChristoph Hellwig #endif 195322cbb50SChristoph Hellwig int node_id; 196322cbb50SChristoph Hellwig struct badblocks *bb; 197322cbb50SChristoph Hellwig struct lockdep_map lockdep_map; 198322cbb50SChristoph Hellwig u64 diskseq; 1996a27d28cSChristoph Hellwig 2006a27d28cSChristoph Hellwig /* 2016a27d28cSChristoph Hellwig * Independent sector access ranges. This is always NULL for 2026a27d28cSChristoph Hellwig * devices that do not have multiple independent access ranges. 2036a27d28cSChristoph Hellwig */ 2046a27d28cSChristoph Hellwig struct blk_independent_access_ranges *ia_ranges; 205322cbb50SChristoph Hellwig }; 206322cbb50SChristoph Hellwig 207322cbb50SChristoph Hellwig static inline bool disk_live(struct gendisk *disk) 208322cbb50SChristoph Hellwig { 209322cbb50SChristoph Hellwig return !inode_unhashed(disk->part0->bd_inode); 210322cbb50SChristoph Hellwig } 211322cbb50SChristoph Hellwig 212dbdc1be3SChristoph Hellwig /** 213dbdc1be3SChristoph Hellwig * disk_openers - returns how many openers are there for a disk 214dbdc1be3SChristoph Hellwig * @disk: disk to check 215dbdc1be3SChristoph Hellwig * 216dbdc1be3SChristoph Hellwig * This returns the number of openers for a disk. Note that this value is only 217dbdc1be3SChristoph Hellwig * stable if disk->open_mutex is held. 218dbdc1be3SChristoph Hellwig * 219dbdc1be3SChristoph Hellwig * Note: Due to a quirk in the block layer open code, each open partition is 220dbdc1be3SChristoph Hellwig * only counted once even if there are multiple openers. 221dbdc1be3SChristoph Hellwig */ 222dbdc1be3SChristoph Hellwig static inline unsigned int disk_openers(struct gendisk *disk) 223dbdc1be3SChristoph Hellwig { 2249acf381fSChristoph Hellwig return atomic_read(&disk->part0->bd_openers); 225dbdc1be3SChristoph Hellwig } 226dbdc1be3SChristoph Hellwig 227322cbb50SChristoph Hellwig /* 228322cbb50SChristoph Hellwig * The gendisk is refcounted by the part0 block_device, and the bd_device 229322cbb50SChristoph Hellwig * therein is also used for device model presentation in sysfs. 230322cbb50SChristoph Hellwig */ 231322cbb50SChristoph Hellwig #define dev_to_disk(device) \ 232322cbb50SChristoph Hellwig (dev_to_bdev(device)->bd_disk) 233322cbb50SChristoph Hellwig #define disk_to_dev(disk) \ 234322cbb50SChristoph Hellwig (&((disk)->part0->bd_device)) 235322cbb50SChristoph Hellwig 236322cbb50SChristoph Hellwig #if IS_REACHABLE(CONFIG_CDROM) 237322cbb50SChristoph Hellwig #define disk_to_cdi(disk) ((disk)->cdi) 238322cbb50SChristoph Hellwig #else 239322cbb50SChristoph Hellwig #define disk_to_cdi(disk) NULL 240322cbb50SChristoph Hellwig #endif 241322cbb50SChristoph Hellwig 242322cbb50SChristoph Hellwig static inline dev_t disk_devt(struct gendisk *disk) 243322cbb50SChristoph Hellwig { 244322cbb50SChristoph Hellwig return MKDEV(disk->major, disk->first_minor); 245322cbb50SChristoph Hellwig } 246322cbb50SChristoph Hellwig 24737ae5a0fSTetsuo Handa static inline int blk_validate_block_size(unsigned long bsize) 248570b1cacSXie Yongji { 249570b1cacSXie Yongji if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize)) 250570b1cacSXie Yongji return -EINVAL; 251570b1cacSXie Yongji 252570b1cacSXie Yongji return 0; 253570b1cacSXie Yongji } 254570b1cacSXie Yongji 25516458cf3SBart Van Assche static inline bool blk_op_is_passthrough(blk_opf_t op) 25614cb0dc6SMing Lei { 257da6269daSChristoph Hellwig op &= REQ_OP_MASK; 25814cb0dc6SMing Lei return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT; 25914cb0dc6SMing Lei } 26014cb0dc6SMing Lei 261797476b8SDamien Le Moal /* 262797476b8SDamien Le Moal * Zoned block device models (zoned limit). 2633093a479SChristoph Hellwig * 2643093a479SChristoph Hellwig * Note: This needs to be ordered from the least to the most severe 2653093a479SChristoph Hellwig * restrictions for the inheritance in blk_stack_limits() to work. 266797476b8SDamien Le Moal */ 267797476b8SDamien Le Moal enum blk_zoned_model { 2683093a479SChristoph Hellwig BLK_ZONED_NONE = 0, /* Regular block device */ 269797476b8SDamien Le Moal BLK_ZONED_HA, /* Host-aware zoned block device */ 270797476b8SDamien Le Moal BLK_ZONED_HM, /* Host-managed zoned block device */ 271797476b8SDamien Le Moal }; 272797476b8SDamien Le Moal 2739bb33f24SChristoph Hellwig /* 2749bb33f24SChristoph Hellwig * BLK_BOUNCE_NONE: never bounce (default) 2759bb33f24SChristoph Hellwig * BLK_BOUNCE_HIGH: bounce all highmem pages 2769bb33f24SChristoph Hellwig */ 2779bb33f24SChristoph Hellwig enum blk_bounce { 2789bb33f24SChristoph Hellwig BLK_BOUNCE_NONE, 2799bb33f24SChristoph Hellwig BLK_BOUNCE_HIGH, 2809bb33f24SChristoph Hellwig }; 2819bb33f24SChristoph Hellwig 282025146e1SMartin K. Petersen struct queue_limits { 2839bb33f24SChristoph Hellwig enum blk_bounce bounce; 284025146e1SMartin K. Petersen unsigned long seg_boundary_mask; 28503100aadSKeith Busch unsigned long virt_boundary_mask; 286025146e1SMartin K. Petersen 287025146e1SMartin K. Petersen unsigned int max_hw_sectors; 288ca369d51SMartin K. Petersen unsigned int max_dev_sectors; 289762380adSJens Axboe unsigned int chunk_sectors; 290025146e1SMartin K. Petersen unsigned int max_sectors; 291025146e1SMartin K. Petersen unsigned int max_segment_size; 292c72758f3SMartin K. Petersen unsigned int physical_block_size; 293ad6bf88aSMikulas Patocka unsigned int logical_block_size; 294c72758f3SMartin K. Petersen unsigned int alignment_offset; 295c72758f3SMartin K. Petersen unsigned int io_min; 296c72758f3SMartin K. Petersen unsigned int io_opt; 29767efc925SChristoph Hellwig unsigned int max_discard_sectors; 2980034af03SJens Axboe unsigned int max_hw_discard_sectors; 29944abff2cSChristoph Hellwig unsigned int max_secure_erase_sectors; 300a6f0788eSChaitanya Kulkarni unsigned int max_write_zeroes_sectors; 3010512a75bSKeith Busch unsigned int max_zone_append_sectors; 30286b37281SMartin K. Petersen unsigned int discard_granularity; 30386b37281SMartin K. Petersen unsigned int discard_alignment; 304a805a4faSDamien Le Moal unsigned int zone_write_granularity; 305025146e1SMartin K. Petersen 3068a78362cSMartin K. Petersen unsigned short max_segments; 30713f05c8dSMartin K. Petersen unsigned short max_integrity_segments; 3081e739730SChristoph Hellwig unsigned short max_discard_segments; 309025146e1SMartin K. Petersen 310c72758f3SMartin K. Petersen unsigned char misaligned; 31186b37281SMartin K. Petersen unsigned char discard_misaligned; 312c78afc62SKent Overstreet unsigned char raid_partial_stripes_expensive; 313797476b8SDamien Le Moal enum blk_zoned_model zoned; 314025146e1SMartin K. Petersen }; 315025146e1SMartin K. Petersen 316d4100351SChristoph Hellwig typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx, 317d4100351SChristoph Hellwig void *data); 318d4100351SChristoph Hellwig 3196b2bd274SChristoph Hellwig void disk_set_zoned(struct gendisk *disk, enum blk_zoned_model model); 32027ba3e8fSDamien Le Moal 3216a0cb1bcSHannes Reinecke #ifdef CONFIG_BLK_DEV_ZONED 3226a0cb1bcSHannes Reinecke 323d4100351SChristoph Hellwig #define BLK_ALL_ZONES ((unsigned int)-1) 324d4100351SChristoph Hellwig int blkdev_report_zones(struct block_device *bdev, sector_t sector, 325d4100351SChristoph Hellwig unsigned int nr_zones, report_zones_cb cb, void *data); 326b623e347SChristoph Hellwig unsigned int bdev_nr_zones(struct block_device *bdev); 327ff07a02eSBart Van Assche extern int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op, 3286c1b1da5SAjay Joshi sector_t sectors, sector_t nr_sectors, 3296c1b1da5SAjay Joshi gfp_t gfp_mask); 330e732671aSDamien Le Moal int blk_revalidate_disk_zones(struct gendisk *disk, 331e732671aSDamien Le Moal void (*update_driver_data)(struct gendisk *disk)); 3326a0cb1bcSHannes Reinecke 3333ed05a98SShaun Tancheff extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, 3343ed05a98SShaun Tancheff unsigned int cmd, unsigned long arg); 335e876df1fSAjay Joshi extern int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode, 3363ed05a98SShaun Tancheff unsigned int cmd, unsigned long arg); 3373ed05a98SShaun Tancheff 3383ed05a98SShaun Tancheff #else /* CONFIG_BLK_DEV_ZONED */ 3393ed05a98SShaun Tancheff 340b623e347SChristoph Hellwig static inline unsigned int bdev_nr_zones(struct block_device *bdev) 341a91e1380SDamien Le Moal { 342a91e1380SDamien Le Moal return 0; 343a91e1380SDamien Le Moal } 344bf505456SDamien Le Moal 3453ed05a98SShaun Tancheff static inline int blkdev_report_zones_ioctl(struct block_device *bdev, 3463ed05a98SShaun Tancheff fmode_t mode, unsigned int cmd, 3473ed05a98SShaun Tancheff unsigned long arg) 3483ed05a98SShaun Tancheff { 3493ed05a98SShaun Tancheff return -ENOTTY; 3503ed05a98SShaun Tancheff } 3513ed05a98SShaun Tancheff 352e876df1fSAjay Joshi static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev, 3533ed05a98SShaun Tancheff fmode_t mode, unsigned int cmd, 3543ed05a98SShaun Tancheff unsigned long arg) 3553ed05a98SShaun Tancheff { 3563ed05a98SShaun Tancheff return -ENOTTY; 3573ed05a98SShaun Tancheff } 3583ed05a98SShaun Tancheff 3596a0cb1bcSHannes Reinecke #endif /* CONFIG_BLK_DEV_ZONED */ 3606a0cb1bcSHannes Reinecke 361a2247f19SDamien Le Moal /* 362a2247f19SDamien Le Moal * Independent access ranges: struct blk_independent_access_range describes 363a2247f19SDamien Le Moal * a range of contiguous sectors that can be accessed using device command 364a2247f19SDamien Le Moal * execution resources that are independent from the resources used for 365a2247f19SDamien Le Moal * other access ranges. This is typically found with single-LUN multi-actuator 366a2247f19SDamien Le Moal * HDDs where each access range is served by a different set of heads. 367a2247f19SDamien Le Moal * The set of independent ranges supported by the device is defined using 368a2247f19SDamien Le Moal * struct blk_independent_access_ranges. The independent ranges must not overlap 369a2247f19SDamien Le Moal * and must include all sectors within the disk capacity (no sector holes 370a2247f19SDamien Le Moal * allowed). 371a2247f19SDamien Le Moal * For a device with multiple ranges, requests targeting sectors in different 372a2247f19SDamien Le Moal * ranges can be executed in parallel. A request can straddle an access range 373a2247f19SDamien Le Moal * boundary. 374a2247f19SDamien Le Moal */ 375a2247f19SDamien Le Moal struct blk_independent_access_range { 376a2247f19SDamien Le Moal struct kobject kobj; 377a2247f19SDamien Le Moal sector_t sector; 378a2247f19SDamien Le Moal sector_t nr_sectors; 379a2247f19SDamien Le Moal }; 380a2247f19SDamien Le Moal 381a2247f19SDamien Le Moal struct blk_independent_access_ranges { 382a2247f19SDamien Le Moal struct kobject kobj; 383a2247f19SDamien Le Moal bool sysfs_registered; 384a2247f19SDamien Le Moal unsigned int nr_ia_ranges; 385a2247f19SDamien Le Moal struct blk_independent_access_range ia_range[]; 386a2247f19SDamien Le Moal }; 387a2247f19SDamien Le Moal 388d7b76301SRichard Kennedy struct request_queue { 3891da177e4SLinus Torvalds struct request *last_merge; 390b374d18aSJens Axboe struct elevator_queue *elevator; 3911da177e4SLinus Torvalds 3920549e87cSMing Lei struct percpu_ref q_usage_counter; 3930549e87cSMing Lei 39434dbad5dSOmar Sandoval struct blk_queue_stats *stats; 395a7905043SJosef Bacik struct rq_qos *rq_qos; 39687760e5eSJens Axboe 397f8a5b122SJens Axboe const struct blk_mq_ops *mq_ops; 398320ae51fSJens Axboe 399320ae51fSJens Axboe /* sw queues */ 400e6cdb092SMing Lei struct blk_mq_ctx __percpu *queue_ctx; 401320ae51fSJens Axboe 402d278d4a8SJens Axboe unsigned int queue_depth; 403d278d4a8SJens Axboe 404320ae51fSJens Axboe /* hw dispatch queues */ 4054e5cc99eSMing Lei struct xarray hctx_table; 406320ae51fSJens Axboe unsigned int nr_hw_queues; 407320ae51fSJens Axboe 4081da177e4SLinus Torvalds /* 4091da177e4SLinus Torvalds * The queue owner gets to use this for whatever they like. 4101da177e4SLinus Torvalds * ll_rw_blk doesn't touch it. 4111da177e4SLinus Torvalds */ 4121da177e4SLinus Torvalds void *queuedata; 4131da177e4SLinus Torvalds 4141da177e4SLinus Torvalds /* 4151da177e4SLinus Torvalds * various queue flags, see QUEUE_* below 4161da177e4SLinus Torvalds */ 4171da177e4SLinus Torvalds unsigned long queue_flags; 418cd84a62eSBart Van Assche /* 419cd84a62eSBart Van Assche * Number of contexts that have called blk_set_pm_only(). If this 420a4d34da7SBart Van Assche * counter is above zero then only RQF_PM requests are processed. 421cd84a62eSBart Van Assche */ 422cd84a62eSBart Van Assche atomic_t pm_only; 4231da177e4SLinus Torvalds 4241da177e4SLinus Torvalds /* 425a73f730dSTejun Heo * ida allocated id for this queue. Used to index queues from 426a73f730dSTejun Heo * ioctx. 427a73f730dSTejun Heo */ 428a73f730dSTejun Heo int id; 429a73f730dSTejun Heo 4300d945c1fSChristoph Hellwig spinlock_t queue_lock; 4311da177e4SLinus Torvalds 432d152c682SChristoph Hellwig struct gendisk *disk; 433d152c682SChristoph Hellwig 4341da177e4SLinus Torvalds /* 4351da177e4SLinus Torvalds * queue kobject 4361da177e4SLinus Torvalds */ 4371da177e4SLinus Torvalds struct kobject kobj; 4381da177e4SLinus Torvalds 439320ae51fSJens Axboe /* 440320ae51fSJens Axboe * mq queue kobject 441320ae51fSJens Axboe */ 4421db4909eSMing Lei struct kobject *mq_kobj; 443320ae51fSJens Axboe 444ac6fc48cSDan Williams #ifdef CONFIG_BLK_DEV_INTEGRITY 445ac6fc48cSDan Williams struct blk_integrity integrity; 446ac6fc48cSDan Williams #endif /* CONFIG_BLK_DEV_INTEGRITY */ 447ac6fc48cSDan Williams 44847fafbc7SRafael J. Wysocki #ifdef CONFIG_PM 4496c954667SLin Ming struct device *dev; 450db04e18dSGeert Uytterhoeven enum rpm_status rpm_status; 4516c954667SLin Ming #endif 4526c954667SLin Ming 4531da177e4SLinus Torvalds /* 4541da177e4SLinus Torvalds * queue settings 4551da177e4SLinus Torvalds */ 4561da177e4SLinus Torvalds unsigned long nr_requests; /* Max # of requests */ 4571da177e4SLinus Torvalds 458e3790c7dSTejun Heo unsigned int dma_pad_mask; 459b1a000d3SKeith Busch /* 460b1a000d3SKeith Busch * Drivers that set dma_alignment to less than 511 must be prepared to 461b1a000d3SKeith Busch * handle individual bvec's that are not a multiple of a SECTOR_SIZE 462b1a000d3SKeith Busch * due to possible offsets. 463b1a000d3SKeith Busch */ 4641da177e4SLinus Torvalds unsigned int dma_alignment; 4651da177e4SLinus Torvalds 4661b262839SSatya Tangirala #ifdef CONFIG_BLK_INLINE_ENCRYPTION 467cb77cb5aSEric Biggers struct blk_crypto_profile *crypto_profile; 46820f01f16SEric Biggers struct kobject *crypto_kobject; 4691b262839SSatya Tangirala #endif 4701b262839SSatya Tangirala 471242f9dcbSJens Axboe unsigned int rq_timeout; 47264f1c21eSJens Axboe int poll_nsec; 47334dbad5dSOmar Sandoval 47434dbad5dSOmar Sandoval struct blk_stat_callback *poll_cb; 47548b5c1fbSJens Axboe struct blk_rq_stat *poll_stat; 47634dbad5dSOmar Sandoval 477242f9dcbSJens Axboe struct timer_list timeout; 478287922ebSChristoph Hellwig struct work_struct timeout_work; 479242f9dcbSJens Axboe 480079a2e3eSJohn Garry atomic_t nr_active_requests_shared_tags; 481bccf5e26SJohn Garry 482079a2e3eSJohn Garry struct blk_mq_tags *sched_shared_tags; 483d97e594cSJohn Garry 484a612fddfSTejun Heo struct list_head icq_list; 4854eef3049STejun Heo #ifdef CONFIG_BLK_CGROUP 486a2b1693bSTejun Heo DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS); 4873c798398STejun Heo struct blkcg_gq *root_blkg; 48803aa264aSTejun Heo struct list_head blkg_list; 4894eef3049STejun Heo #endif 490a612fddfSTejun Heo 491025146e1SMartin K. Petersen struct queue_limits limits; 492025146e1SMartin K. Petersen 49368c43f13SDamien Le Moal unsigned int required_elevator_features; 49468c43f13SDamien Le Moal 4951946089aSChristoph Lameter int node; 4966c5c9341SAlexey Dobriyan #ifdef CONFIG_BLK_DEV_IO_TRACE 497c780e86dSJan Kara struct blk_trace __rcu *blk_trace; 4986c5c9341SAlexey Dobriyan #endif 4991da177e4SLinus Torvalds /* 5004913efe4STejun Heo * for flush operations 5011da177e4SLinus Torvalds */ 5027c94e1c1SMing Lei struct blk_flush_queue *fq; 503483f4afcSAl Viro 5046fca6a61SChristoph Hellwig struct list_head requeue_list; 5056fca6a61SChristoph Hellwig spinlock_t requeue_lock; 5062849450aSMike Snitzer struct delayed_work requeue_work; 5076fca6a61SChristoph Hellwig 508483f4afcSAl Viro struct mutex sysfs_lock; 509cecf5d87SMing Lei struct mutex sysfs_dir_lock; 510d351af01SFUJITA Tomonori 5112f8f1336SMing Lei /* 5122f8f1336SMing Lei * for reusing dead hctx instance in case of updating 5132f8f1336SMing Lei * nr_hw_queues 5142f8f1336SMing Lei */ 5152f8f1336SMing Lei struct list_head unused_hctx_list; 5162f8f1336SMing Lei spinlock_t unused_hctx_lock; 5172f8f1336SMing Lei 5187996a8b5SBob Liu int mq_freeze_depth; 519d732580bSTejun Heo 520e43473b7SVivek Goyal #ifdef CONFIG_BLK_DEV_THROTTLING 521e43473b7SVivek Goyal /* Throttle data */ 522e43473b7SVivek Goyal struct throtl_data *td; 523e43473b7SVivek Goyal #endif 524548bc8e1STejun Heo struct rcu_head rcu_head; 525320ae51fSJens Axboe wait_queue_head_t mq_freeze_wq; 5267996a8b5SBob Liu /* 5277996a8b5SBob Liu * Protect concurrent access to q_usage_counter by 5287996a8b5SBob Liu * percpu_ref_kill() and percpu_ref_reinit(). 5297996a8b5SBob Liu */ 5307996a8b5SBob Liu struct mutex mq_freeze_lock; 5310d2602caSJens Axboe 532e70feb8bSMing Lei int quiesce_depth; 533e70feb8bSMing Lei 5340d2602caSJens Axboe struct blk_mq_tag_set *tag_set; 5350d2602caSJens Axboe struct list_head tag_set_list; 5364593fdbeSAkinobu Mita 53707e4feadSOmar Sandoval struct dentry *debugfs_dir; 538d332ce09SOmar Sandoval struct dentry *sched_debugfs_dir; 539cc56694fSMing Lei struct dentry *rqos_debugfs_dir; 5405cf9c91bSChristoph Hellwig /* 5415cf9c91bSChristoph Hellwig * Serializes all debugfs metadata operations using the above dentries. 5425cf9c91bSChristoph Hellwig */ 5435cf9c91bSChristoph Hellwig struct mutex debugfs_mutex; 54407e4feadSOmar Sandoval 5454593fdbeSAkinobu Mita bool mq_sysfs_init_done; 5466d247d7fSChristoph Hellwig 547704b914fSMing Lei /** 548704b914fSMing Lei * @srcu: Sleepable RCU. Use as lock when type of the request queue 549704b914fSMing Lei * is blocking (BLK_MQ_F_BLOCKING). Must be the last member 550704b914fSMing Lei */ 551704b914fSMing Lei struct srcu_struct srcu[]; 5521da177e4SLinus Torvalds }; 5531da177e4SLinus Torvalds 554bfe373f6SHou Tao /* Keep blk_queue_flag_name[] in sync with the definitions below */ 555eca7abf3SJens Axboe #define QUEUE_FLAG_STOPPED 0 /* queue is stopped */ 556eca7abf3SJens Axboe #define QUEUE_FLAG_DYING 1 /* queue being torn down */ 557704b914fSMing Lei #define QUEUE_FLAG_HAS_SRCU 2 /* SRCU is allocated */ 558eca7abf3SJens Axboe #define QUEUE_FLAG_NOMERGES 3 /* disable merge attempts */ 559eca7abf3SJens Axboe #define QUEUE_FLAG_SAME_COMP 4 /* complete on same CPU-group */ 560eca7abf3SJens Axboe #define QUEUE_FLAG_FAIL_IO 5 /* fake timeout */ 561eca7abf3SJens Axboe #define QUEUE_FLAG_NONROT 6 /* non-rotational device (SSD) */ 56288e740f1SFernando Luis Vázquez Cao #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 563eca7abf3SJens Axboe #define QUEUE_FLAG_IO_STAT 7 /* do disk/partitions IO accounting */ 564eca7abf3SJens Axboe #define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */ 565eca7abf3SJens Axboe #define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */ 566eca7abf3SJens Axboe #define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */ 567eca7abf3SJens Axboe #define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */ 5681cb039f3SChristoph Hellwig #define QUEUE_FLAG_STABLE_WRITES 15 /* don't modify blks until WB is done */ 569eca7abf3SJens Axboe #define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */ 570eca7abf3SJens Axboe #define QUEUE_FLAG_WC 17 /* Write back caching */ 571eca7abf3SJens Axboe #define QUEUE_FLAG_FUA 18 /* device supports FUA writes */ 572eca7abf3SJens Axboe #define QUEUE_FLAG_DAX 19 /* device supports DAX */ 573eca7abf3SJens Axboe #define QUEUE_FLAG_STATS 20 /* track IO start and completion times */ 574eca7abf3SJens Axboe #define QUEUE_FLAG_REGISTERED 22 /* queue has been registered to a disk */ 575eca7abf3SJens Axboe #define QUEUE_FLAG_QUIESCED 24 /* queue has been quiesced */ 576eca7abf3SJens Axboe #define QUEUE_FLAG_PCI_P2PDMA 25 /* device supports PCI p2p requests */ 577e84e8f06SChaitanya Kulkarni #define QUEUE_FLAG_ZONE_RESETALL 26 /* supports Zone Reset All */ 5786f816b4bSTejun Heo #define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */ 579f1b49fdcSJohn Garry #define QUEUE_FLAG_HCTX_ACTIVE 28 /* at least one blk-mq hctx is active */ 580021a2446SMike Snitzer #define QUEUE_FLAG_NOWAIT 29 /* device supports NOWAIT */ 5814d337cebSMing Lei #define QUEUE_FLAG_SQ_SCHED 30 /* single queue style io dispatch */ 582797e7dbbSTejun Heo 583*ca5eebdaSBrian Foster #define QUEUE_FLAG_MQ_DEFAULT ((1UL << QUEUE_FLAG_IO_STAT) | \ 584*ca5eebdaSBrian Foster (1UL << QUEUE_FLAG_SAME_COMP) | \ 585*ca5eebdaSBrian Foster (1UL << QUEUE_FLAG_NOWAIT)) 58694eddfbeSJens Axboe 5878814ce8aSBart Van Assche void blk_queue_flag_set(unsigned int flag, struct request_queue *q); 5888814ce8aSBart Van Assche void blk_queue_flag_clear(unsigned int flag, struct request_queue *q); 5898814ce8aSBart Van Assche bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); 5908814ce8aSBart Van Assche 5911da177e4SLinus Torvalds #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 5923f3299d5SBart Van Assche #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) 593704b914fSMing Lei #define blk_queue_has_srcu(q) test_bit(QUEUE_FLAG_HAS_SRCU, &(q)->queue_flags) 594320ae51fSJens Axboe #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) 595ac9fafa1SAlan D. Brunelle #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 596488991e2SAlan D. Brunelle #define blk_queue_noxmerges(q) \ 597488991e2SAlan D. Brunelle test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 598a68bbddbSJens Axboe #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 5991cb039f3SChristoph Hellwig #define blk_queue_stable_writes(q) \ 6001cb039f3SChristoph Hellwig test_bit(QUEUE_FLAG_STABLE_WRITES, &(q)->queue_flags) 601bc58ba94SJens Axboe #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) 602e2e1a148SJens Axboe #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) 603e84e8f06SChaitanya Kulkarni #define blk_queue_zone_resetall(q) \ 604e84e8f06SChaitanya Kulkarni test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags) 605163d4baaSToshi Kani #define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags) 60649d92c0dSLogan Gunthorpe #define blk_queue_pci_p2pdma(q) \ 60749d92c0dSLogan Gunthorpe test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags) 6086f816b4bSTejun Heo #ifdef CONFIG_BLK_RQ_ALLOC_TIME 6096f816b4bSTejun Heo #define blk_queue_rq_alloc_time(q) \ 6106f816b4bSTejun Heo test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags) 6116f816b4bSTejun Heo #else 6126f816b4bSTejun Heo #define blk_queue_rq_alloc_time(q) false 6136f816b4bSTejun Heo #endif 6141da177e4SLinus Torvalds 61533659ebbSChristoph Hellwig #define blk_noretry_request(rq) \ 61633659ebbSChristoph Hellwig ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ 61733659ebbSChristoph Hellwig REQ_FAILFAST_DRIVER)) 618f4560ffeSMing Lei #define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags) 619cd84a62eSBart Van Assche #define blk_queue_pm_only(q) atomic_read(&(q)->pm_only) 62058c898baSMing Lei #define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags) 6214d337cebSMing Lei #define blk_queue_sq_sched(q) test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags) 622c9254f2dSBart Van Assche 623cd84a62eSBart Van Assche extern void blk_set_pm_only(struct request_queue *q); 624cd84a62eSBart Van Assche extern void blk_clear_pm_only(struct request_queue *q); 6254aff5e23SJens Axboe 6261da177e4SLinus Torvalds #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 6271da177e4SLinus Torvalds 6283ab3a031SChristoph Hellwig #define dma_map_bvec(dev, bv, dir, attrs) \ 6293ab3a031SChristoph Hellwig dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \ 6303ab3a031SChristoph Hellwig (dir), (attrs)) 6313ab3a031SChristoph Hellwig 632344e9ffcSJens Axboe static inline bool queue_is_mq(struct request_queue *q) 63349fd524fSJens Axboe { 634a1ce35faSJens Axboe return q->mq_ops; 63549fd524fSJens Axboe } 63649fd524fSJens Axboe 63752abca64SAlan Stern #ifdef CONFIG_PM 63852abca64SAlan Stern static inline enum rpm_status queue_rpm_status(struct request_queue *q) 63952abca64SAlan Stern { 64052abca64SAlan Stern return q->rpm_status; 64152abca64SAlan Stern } 64252abca64SAlan Stern #else 64352abca64SAlan Stern static inline enum rpm_status queue_rpm_status(struct request_queue *q) 64452abca64SAlan Stern { 64552abca64SAlan Stern return RPM_ACTIVE; 64652abca64SAlan Stern } 64752abca64SAlan Stern #endif 64852abca64SAlan Stern 649797476b8SDamien Le Moal static inline enum blk_zoned_model 650797476b8SDamien Le Moal blk_queue_zoned_model(struct request_queue *q) 651797476b8SDamien Le Moal { 6526fcd6695SChristoph Hellwig if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) 653797476b8SDamien Le Moal return q->limits.zoned; 6546fcd6695SChristoph Hellwig return BLK_ZONED_NONE; 655797476b8SDamien Le Moal } 656797476b8SDamien Le Moal 657797476b8SDamien Le Moal static inline bool blk_queue_is_zoned(struct request_queue *q) 658797476b8SDamien Le Moal { 659797476b8SDamien Le Moal switch (blk_queue_zoned_model(q)) { 660797476b8SDamien Le Moal case BLK_ZONED_HA: 661797476b8SDamien Le Moal case BLK_ZONED_HM: 662797476b8SDamien Le Moal return true; 663797476b8SDamien Le Moal default: 664797476b8SDamien Le Moal return false; 665797476b8SDamien Le Moal } 666797476b8SDamien Le Moal } 667797476b8SDamien Le Moal 6686a5ac984SBart Van Assche #ifdef CONFIG_BLK_DEV_ZONED 669d86e716aSChristoph Hellwig static inline unsigned int disk_nr_zones(struct gendisk *disk) 670965b652eSDamien Le Moal { 671d86e716aSChristoph Hellwig return blk_queue_is_zoned(disk->queue) ? disk->nr_zones : 0; 672965b652eSDamien Le Moal } 673965b652eSDamien Le Moal 674d86e716aSChristoph Hellwig static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector) 6756cc77e9cSChristoph Hellwig { 676d86e716aSChristoph Hellwig if (!blk_queue_is_zoned(disk->queue)) 6776cc77e9cSChristoph Hellwig return 0; 678d86e716aSChristoph Hellwig return sector >> ilog2(disk->queue->limits.chunk_sectors); 6796cc77e9cSChristoph Hellwig } 6806cc77e9cSChristoph Hellwig 681d86e716aSChristoph Hellwig static inline bool disk_zone_is_seq(struct gendisk *disk, sector_t sector) 6826cc77e9cSChristoph Hellwig { 683d86e716aSChristoph Hellwig if (!blk_queue_is_zoned(disk->queue)) 6846cc77e9cSChristoph Hellwig return false; 685d86e716aSChristoph Hellwig if (!disk->conv_zones_bitmap) 686f216fdd7SChristoph Hellwig return true; 687d86e716aSChristoph Hellwig return !test_bit(disk_zone_no(disk, sector), disk->conv_zones_bitmap); 6886cc77e9cSChristoph Hellwig } 689e15864f8SNiklas Cassel 690982977dfSChristoph Hellwig static inline void disk_set_max_open_zones(struct gendisk *disk, 691e15864f8SNiklas Cassel unsigned int max_open_zones) 692e15864f8SNiklas Cassel { 693d86e716aSChristoph Hellwig disk->max_open_zones = max_open_zones; 694e15864f8SNiklas Cassel } 695e15864f8SNiklas Cassel 696982977dfSChristoph Hellwig static inline void disk_set_max_active_zones(struct gendisk *disk, 697659bf827SNiklas Cassel unsigned int max_active_zones) 698659bf827SNiklas Cassel { 699d86e716aSChristoph Hellwig disk->max_active_zones = max_active_zones; 700659bf827SNiklas Cassel } 701659bf827SNiklas Cassel 7021dc01720SChristoph Hellwig static inline unsigned int bdev_max_open_zones(struct block_device *bdev) 703659bf827SNiklas Cassel { 704d86e716aSChristoph Hellwig return bdev->bd_disk->max_open_zones; 705659bf827SNiklas Cassel } 7061dc01720SChristoph Hellwig 7071dc01720SChristoph Hellwig static inline unsigned int bdev_max_active_zones(struct block_device *bdev) 7081dc01720SChristoph Hellwig { 709d86e716aSChristoph Hellwig return bdev->bd_disk->max_active_zones; 7101dc01720SChristoph Hellwig } 7111dc01720SChristoph Hellwig 712965b652eSDamien Le Moal #else /* CONFIG_BLK_DEV_ZONED */ 713d86e716aSChristoph Hellwig static inline unsigned int disk_nr_zones(struct gendisk *disk) 714965b652eSDamien Le Moal { 715965b652eSDamien Le Moal return 0; 716965b652eSDamien Le Moal } 717d86e716aSChristoph Hellwig static inline bool disk_zone_is_seq(struct gendisk *disk, sector_t sector) 71802992df8SJohannes Thumshirn { 71902992df8SJohannes Thumshirn return false; 72002992df8SJohannes Thumshirn } 721d86e716aSChristoph Hellwig static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector) 72202992df8SJohannes Thumshirn { 72302992df8SJohannes Thumshirn return 0; 72402992df8SJohannes Thumshirn } 7251dc01720SChristoph Hellwig static inline unsigned int bdev_max_open_zones(struct block_device *bdev) 726e15864f8SNiklas Cassel { 727e15864f8SNiklas Cassel return 0; 728e15864f8SNiklas Cassel } 729d86e716aSChristoph Hellwig 7301dc01720SChristoph Hellwig static inline unsigned int bdev_max_active_zones(struct block_device *bdev) 731659bf827SNiklas Cassel { 732659bf827SNiklas Cassel return 0; 733659bf827SNiklas Cassel } 7346a5ac984SBart Van Assche #endif /* CONFIG_BLK_DEV_ZONED */ 7356cc77e9cSChristoph Hellwig 736d278d4a8SJens Axboe static inline unsigned int blk_queue_depth(struct request_queue *q) 737d278d4a8SJens Axboe { 738d278d4a8SJens Axboe if (q->queue_depth) 739d278d4a8SJens Axboe return q->queue_depth; 740d278d4a8SJens Axboe 741d278d4a8SJens Axboe return q->nr_requests; 742d278d4a8SJens Axboe } 743d278d4a8SJens Axboe 7443d6392cfSJens Axboe /* 7453d6392cfSJens Axboe * default timeout for SG_IO if none specified 7463d6392cfSJens Axboe */ 7473d6392cfSJens Axboe #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) 748f2f1fa78SLinus Torvalds #define BLK_MIN_SG_TIMEOUT (7 * HZ) 7493d6392cfSJens Axboe 7505705f702SNeilBrown /* This should not be used directly - use rq_for_each_segment */ 7511e428079SJens Axboe #define for_each_bio(_bio) \ 7521e428079SJens Axboe for (; _bio; _bio = _bio->bi_next) 7531da177e4SLinus Torvalds 754322cbb50SChristoph Hellwig int __must_check device_add_disk(struct device *parent, struct gendisk *disk, 755322cbb50SChristoph Hellwig const struct attribute_group **groups); 756322cbb50SChristoph Hellwig static inline int __must_check add_disk(struct gendisk *disk) 757322cbb50SChristoph Hellwig { 758322cbb50SChristoph Hellwig return device_add_disk(NULL, disk, NULL); 759322cbb50SChristoph Hellwig } 760322cbb50SChristoph Hellwig void del_gendisk(struct gendisk *gp); 761322cbb50SChristoph Hellwig void invalidate_disk(struct gendisk *disk); 762322cbb50SChristoph Hellwig void set_disk_ro(struct gendisk *disk, bool read_only); 763322cbb50SChristoph Hellwig void disk_uevent(struct gendisk *disk, enum kobject_action action); 764322cbb50SChristoph Hellwig 765322cbb50SChristoph Hellwig static inline int get_disk_ro(struct gendisk *disk) 766322cbb50SChristoph Hellwig { 767322cbb50SChristoph Hellwig return disk->part0->bd_read_only || 768322cbb50SChristoph Hellwig test_bit(GD_READ_ONLY, &disk->state); 769322cbb50SChristoph Hellwig } 770322cbb50SChristoph Hellwig 771322cbb50SChristoph Hellwig static inline int bdev_read_only(struct block_device *bdev) 772322cbb50SChristoph Hellwig { 773322cbb50SChristoph Hellwig return bdev->bd_read_only || get_disk_ro(bdev->bd_disk); 774322cbb50SChristoph Hellwig } 775322cbb50SChristoph Hellwig 776322cbb50SChristoph Hellwig bool set_capacity_and_notify(struct gendisk *disk, sector_t size); 777322cbb50SChristoph Hellwig bool disk_force_media_change(struct gendisk *disk, unsigned int events); 778322cbb50SChristoph Hellwig 779322cbb50SChristoph Hellwig void add_disk_randomness(struct gendisk *disk) __latent_entropy; 780322cbb50SChristoph Hellwig void rand_initialize_disk(struct gendisk *disk); 781322cbb50SChristoph Hellwig 782322cbb50SChristoph Hellwig static inline sector_t get_start_sect(struct block_device *bdev) 783322cbb50SChristoph Hellwig { 784322cbb50SChristoph Hellwig return bdev->bd_start_sect; 785322cbb50SChristoph Hellwig } 786322cbb50SChristoph Hellwig 787322cbb50SChristoph Hellwig static inline sector_t bdev_nr_sectors(struct block_device *bdev) 788322cbb50SChristoph Hellwig { 789322cbb50SChristoph Hellwig return bdev->bd_nr_sectors; 790322cbb50SChristoph Hellwig } 791322cbb50SChristoph Hellwig 792322cbb50SChristoph Hellwig static inline loff_t bdev_nr_bytes(struct block_device *bdev) 793322cbb50SChristoph Hellwig { 794322cbb50SChristoph Hellwig return (loff_t)bdev_nr_sectors(bdev) << SECTOR_SHIFT; 795322cbb50SChristoph Hellwig } 796322cbb50SChristoph Hellwig 797322cbb50SChristoph Hellwig static inline sector_t get_capacity(struct gendisk *disk) 798322cbb50SChristoph Hellwig { 799322cbb50SChristoph Hellwig return bdev_nr_sectors(disk->part0); 800322cbb50SChristoph Hellwig } 801322cbb50SChristoph Hellwig 802322cbb50SChristoph Hellwig static inline u64 sb_bdev_nr_blocks(struct super_block *sb) 803322cbb50SChristoph Hellwig { 804322cbb50SChristoph Hellwig return bdev_nr_sectors(sb->s_bdev) >> 805322cbb50SChristoph Hellwig (sb->s_blocksize_bits - SECTOR_SHIFT); 806322cbb50SChristoph Hellwig } 807322cbb50SChristoph Hellwig 808322cbb50SChristoph Hellwig int bdev_disk_changed(struct gendisk *disk, bool invalidate); 809322cbb50SChristoph Hellwig 810322cbb50SChristoph Hellwig void put_disk(struct gendisk *disk); 811322cbb50SChristoph Hellwig struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass); 812322cbb50SChristoph Hellwig 813322cbb50SChristoph Hellwig /** 814322cbb50SChristoph Hellwig * blk_alloc_disk - allocate a gendisk structure 815322cbb50SChristoph Hellwig * @node_id: numa node to allocate on 816322cbb50SChristoph Hellwig * 817322cbb50SChristoph Hellwig * Allocate and pre-initialize a gendisk structure for use with BIO based 818322cbb50SChristoph Hellwig * drivers. 819322cbb50SChristoph Hellwig * 820322cbb50SChristoph Hellwig * Context: can sleep 821322cbb50SChristoph Hellwig */ 822322cbb50SChristoph Hellwig #define blk_alloc_disk(node_id) \ 823322cbb50SChristoph Hellwig ({ \ 824322cbb50SChristoph Hellwig static struct lock_class_key __key; \ 825322cbb50SChristoph Hellwig \ 826322cbb50SChristoph Hellwig __blk_alloc_disk(node_id, &__key); \ 827322cbb50SChristoph Hellwig }) 828322cbb50SChristoph Hellwig 829322cbb50SChristoph Hellwig int __register_blkdev(unsigned int major, const char *name, 830322cbb50SChristoph Hellwig void (*probe)(dev_t devt)); 831322cbb50SChristoph Hellwig #define register_blkdev(major, name) \ 832322cbb50SChristoph Hellwig __register_blkdev(major, name, NULL) 833322cbb50SChristoph Hellwig void unregister_blkdev(unsigned int major, const char *name); 834322cbb50SChristoph Hellwig 835322cbb50SChristoph Hellwig bool bdev_check_media_change(struct block_device *bdev); 836322cbb50SChristoph Hellwig int __invalidate_device(struct block_device *bdev, bool kill_dirty); 837322cbb50SChristoph Hellwig void set_capacity(struct gendisk *disk, sector_t size); 838322cbb50SChristoph Hellwig 839322cbb50SChristoph Hellwig #ifdef CONFIG_BLOCK_HOLDER_DEPRECATED 840322cbb50SChristoph Hellwig int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk); 841322cbb50SChristoph Hellwig void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk); 842322cbb50SChristoph Hellwig int bd_register_pending_holders(struct gendisk *disk); 843322cbb50SChristoph Hellwig #else 844322cbb50SChristoph Hellwig static inline int bd_link_disk_holder(struct block_device *bdev, 845322cbb50SChristoph Hellwig struct gendisk *disk) 846322cbb50SChristoph Hellwig { 847322cbb50SChristoph Hellwig return 0; 848322cbb50SChristoph Hellwig } 849322cbb50SChristoph Hellwig static inline void bd_unlink_disk_holder(struct block_device *bdev, 850322cbb50SChristoph Hellwig struct gendisk *disk) 851322cbb50SChristoph Hellwig { 852322cbb50SChristoph Hellwig } 853322cbb50SChristoph Hellwig static inline int bd_register_pending_holders(struct gendisk *disk) 854322cbb50SChristoph Hellwig { 855322cbb50SChristoph Hellwig return 0; 856322cbb50SChristoph Hellwig } 857322cbb50SChristoph Hellwig #endif /* CONFIG_BLOCK_HOLDER_DEPRECATED */ 858322cbb50SChristoph Hellwig 859322cbb50SChristoph Hellwig dev_t part_devt(struct gendisk *disk, u8 partno); 860322cbb50SChristoph Hellwig void inc_diskseq(struct gendisk *disk); 861322cbb50SChristoph Hellwig dev_t blk_lookup_devt(const char *name, int partno); 862322cbb50SChristoph Hellwig void blk_request_module(dev_t devt); 8632d4dc890SIlya Loginov 8641da177e4SLinus Torvalds extern int blk_register_queue(struct gendisk *disk); 8651da177e4SLinus Torvalds extern void blk_unregister_queue(struct gendisk *disk); 8663e08773cSChristoph Hellwig void submit_bio_noacct(struct bio *bio); 8675a97806fSChristoph Hellwig struct bio *bio_split_to_limits(struct bio *bio); 86824b83debSChristoph Hellwig 869ef9e3facSKiyoshi Ueda extern int blk_lld_busy(struct request_queue *q); 8709a95e4efSBart Van Assche extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags); 8712e6edc95SDan Williams extern void blk_queue_exit(struct request_queue *q); 8721da177e4SLinus Torvalds extern void blk_sync_queue(struct request_queue *q); 873fb9b16e1SKeith Busch 874e47bc4edSChaitanya Kulkarni /* Helper to convert REQ_OP_XXX to its string format XXX */ 87577e7ffd7SBart Van Assche extern const char *blk_op_str(enum req_op op); 876e47bc4edSChaitanya Kulkarni 8772a842acaSChristoph Hellwig int blk_status_to_errno(blk_status_t status); 8782a842acaSChristoph Hellwig blk_status_t errno_to_blk_status(int errno); 8792a842acaSChristoph Hellwig 880ef99b2d3SChristoph Hellwig /* only poll the hardware once, don't continue until a completion was found */ 881ef99b2d3SChristoph Hellwig #define BLK_POLL_ONESHOT (1 << 0) 882d729cf9aSChristoph Hellwig /* do not sleep to wait for the expected completion time */ 883d729cf9aSChristoph Hellwig #define BLK_POLL_NOSLEEP (1 << 1) 8845a72e899SJens Axboe int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags); 8855a72e899SJens Axboe int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob, 8865a72e899SJens Axboe unsigned int flags); 88705229beeSJens Axboe 888165125e1SJens Axboe static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 8891da177e4SLinus Torvalds { 89017220ca5SPavel Begunkov return bdev->bd_queue; /* this is never NULL */ 8911da177e4SLinus Torvalds } 8921da177e4SLinus Torvalds 89302694e86SChaitanya Kulkarni /* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */ 89402694e86SChaitanya Kulkarni const char *blk_zone_cond_str(enum blk_zone_cond zone_cond); 89502694e86SChaitanya Kulkarni 896d0ea6bdeSDamien Le Moal static inline unsigned int bio_zone_no(struct bio *bio) 897d0ea6bdeSDamien Le Moal { 898d86e716aSChristoph Hellwig return disk_zone_no(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector); 899d0ea6bdeSDamien Le Moal } 900d0ea6bdeSDamien Le Moal 901d0ea6bdeSDamien Le Moal static inline unsigned int bio_zone_is_seq(struct bio *bio) 902d0ea6bdeSDamien Le Moal { 903d86e716aSChristoph Hellwig return disk_zone_is_seq(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector); 904d0ea6bdeSDamien Le Moal } 9056cc77e9cSChristoph Hellwig 906762380adSJens Axboe /* 9078689461bSChristoph Hellwig * Return how much of the chunk is left to be used for I/O at a given offset. 9088689461bSChristoph Hellwig */ 9098689461bSChristoph Hellwig static inline unsigned int blk_chunk_sectors_left(sector_t offset, 9108689461bSChristoph Hellwig unsigned int chunk_sectors) 9118689461bSChristoph Hellwig { 9128689461bSChristoph Hellwig if (unlikely(!is_power_of_2(chunk_sectors))) 9138689461bSChristoph Hellwig return chunk_sectors - sector_div(offset, chunk_sectors); 9148689461bSChristoph Hellwig return chunk_sectors - (offset & (chunk_sectors - 1)); 9158689461bSChristoph Hellwig } 9168689461bSChristoph Hellwig 9178689461bSChristoph Hellwig /* 9181da177e4SLinus Torvalds * Access functions for manipulating queue properties 9191da177e4SLinus Torvalds */ 9209bb33f24SChristoph Hellwig void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce limit); 921086fa5ffSMartin K. Petersen extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 922762380adSJens Axboe extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); 9238a78362cSMartin K. Petersen extern void blk_queue_max_segments(struct request_queue *, unsigned short); 9241e739730SChristoph Hellwig extern void blk_queue_max_discard_segments(struct request_queue *, 9251e739730SChristoph Hellwig unsigned short); 92644abff2cSChristoph Hellwig void blk_queue_max_secure_erase_sectors(struct request_queue *q, 92744abff2cSChristoph Hellwig unsigned int max_sectors); 928165125e1SJens Axboe extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 92967efc925SChristoph Hellwig extern void blk_queue_max_discard_sectors(struct request_queue *q, 93067efc925SChristoph Hellwig unsigned int max_discard_sectors); 931a6f0788eSChaitanya Kulkarni extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q, 932a6f0788eSChaitanya Kulkarni unsigned int max_write_same_sectors); 933ad6bf88aSMikulas Patocka extern void blk_queue_logical_block_size(struct request_queue *, unsigned int); 9340512a75bSKeith Busch extern void blk_queue_max_zone_append_sectors(struct request_queue *q, 9350512a75bSKeith Busch unsigned int max_zone_append_sectors); 936892b6f90SMartin K. Petersen extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); 937a805a4faSDamien Le Moal void blk_queue_zone_write_granularity(struct request_queue *q, 938a805a4faSDamien Le Moal unsigned int size); 939c72758f3SMartin K. Petersen extern void blk_queue_alignment_offset(struct request_queue *q, 940c72758f3SMartin K. Petersen unsigned int alignment); 941471aa704SChristoph Hellwig void disk_update_readahead(struct gendisk *disk); 9427c958e32SMartin K. Petersen extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); 943c72758f3SMartin K. Petersen extern void blk_queue_io_min(struct request_queue *q, unsigned int min); 9443c5820c7SMartin K. Petersen extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); 945c72758f3SMartin K. Petersen extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); 946d278d4a8SJens Axboe extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth); 947e475bba2SMartin K. Petersen extern void blk_set_default_limits(struct queue_limits *lim); 948b1bd055dSMartin K. Petersen extern void blk_set_stacking_limits(struct queue_limits *lim); 949c72758f3SMartin K. Petersen extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 950c72758f3SMartin K. Petersen sector_t offset); 951c72758f3SMartin K. Petersen extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, 952c72758f3SMartin K. Petersen sector_t offset); 95327f8221aSFUJITA Tomonori extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); 954165125e1SJens Axboe extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 95503100aadSKeith Busch extern void blk_queue_virt_boundary(struct request_queue *, unsigned long); 956165125e1SJens Axboe extern void blk_queue_dma_alignment(struct request_queue *, int); 95711c3e689SJames Bottomley extern void blk_queue_update_dma_alignment(struct request_queue *, int); 958242f9dcbSJens Axboe extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 95993e9d8e8SJens Axboe extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); 9602e9bc346SChristoph Hellwig 961a2247f19SDamien Le Moal struct blk_independent_access_ranges * 962a2247f19SDamien Le Moal disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges); 963a2247f19SDamien Le Moal void disk_set_independent_access_ranges(struct gendisk *disk, 964a2247f19SDamien Le Moal struct blk_independent_access_ranges *iars); 965a2247f19SDamien Le Moal 9662e9bc346SChristoph Hellwig /* 9672e9bc346SChristoph Hellwig * Elevator features for blk_queue_required_elevator_features: 9682e9bc346SChristoph Hellwig */ 9692e9bc346SChristoph Hellwig /* Supports zoned block devices sequential write constraint */ 9702e9bc346SChristoph Hellwig #define ELEVATOR_F_ZBD_SEQ_WRITE (1U << 0) 9712e9bc346SChristoph Hellwig 97268c43f13SDamien Le Moal extern void blk_queue_required_elevator_features(struct request_queue *q, 97368c43f13SDamien Le Moal unsigned int features); 97445147fb5SYoshihiro Shimoda extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q, 97545147fb5SYoshihiro Shimoda struct device *dev); 9761da177e4SLinus Torvalds 97709ac46c4STejun Heo bool __must_check blk_get_queue(struct request_queue *); 978165125e1SJens Axboe extern void blk_put_queue(struct request_queue *); 9797a5428dcSChristoph Hellwig 9807a5428dcSChristoph Hellwig void blk_mark_disk_dead(struct gendisk *disk); 9811da177e4SLinus Torvalds 9821a4dcfa8SChristoph Hellwig #ifdef CONFIG_BLOCK 983316cc67dSShaohua Li /* 98475df7136SSuresh Jayaraman * blk_plug permits building a queue of related requests by holding the I/O 98575df7136SSuresh Jayaraman * fragments for a short period. This allows merging of sequential requests 98675df7136SSuresh Jayaraman * into single larger request. As the requests are moved from a per-task list to 98775df7136SSuresh Jayaraman * the device's request_queue in a batch, this results in improved scalability 98875df7136SSuresh Jayaraman * as the lock contention for request_queue lock is reduced. 98975df7136SSuresh Jayaraman * 99075df7136SSuresh Jayaraman * It is ok not to disable preemption when adding the request to the plug list 991008f75a2SChristoph Hellwig * or when attempting a merge. For details, please see schedule() where 992008f75a2SChristoph Hellwig * blk_flush_plug() is called. 993316cc67dSShaohua Li */ 99473c10101SJens Axboe struct blk_plug { 995bc490f81SJens Axboe struct request *mq_list; /* blk-mq requests */ 99647c122e3SJens Axboe 99747c122e3SJens Axboe /* if ios_left is > 1, we can batch tag/rq allocations */ 99847c122e3SJens Axboe struct request *cached_rq; 99947c122e3SJens Axboe unsigned short nr_ios; 100047c122e3SJens Axboe 10015f0ed774SJens Axboe unsigned short rq_count; 100247c122e3SJens Axboe 1003ce5b009cSJens Axboe bool multiple_queues; 1004dc5fc361SJens Axboe bool has_elevator; 10055a473e83SJens Axboe bool nowait; 100647c122e3SJens Axboe 100747c122e3SJens Axboe struct list_head cb_list; /* md requires an unplug callback */ 100873c10101SJens Axboe }; 100955c022bbSShaohua Li 10109cbb1750SNeilBrown struct blk_plug_cb; 101174018dc3SNeilBrown typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); 1012048c9374SNeilBrown struct blk_plug_cb { 1013048c9374SNeilBrown struct list_head list; 10149cbb1750SNeilBrown blk_plug_cb_fn callback; 10159cbb1750SNeilBrown void *data; 1016048c9374SNeilBrown }; 10179cbb1750SNeilBrown extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, 10189cbb1750SNeilBrown void *data, int size); 101973c10101SJens Axboe extern void blk_start_plug(struct blk_plug *); 102047c122e3SJens Axboe extern void blk_start_plug_nr_ios(struct blk_plug *, unsigned short); 102173c10101SJens Axboe extern void blk_finish_plug(struct blk_plug *); 102273c10101SJens Axboe 1023aa8dcccaSChristoph Hellwig void __blk_flush_plug(struct blk_plug *plug, bool from_schedule); 1024aa8dcccaSChristoph Hellwig static inline void blk_flush_plug(struct blk_plug *plug, bool async) 102573c10101SJens Axboe { 1026aa8dcccaSChristoph Hellwig if (plug) 1027aa8dcccaSChristoph Hellwig __blk_flush_plug(plug, async); 102873c10101SJens Axboe } 102973c10101SJens Axboe 1030c6bf3f0eSChristoph Hellwig int blkdev_issue_flush(struct block_device *bdev); 10311a4dcfa8SChristoph Hellwig long nr_blockdev_pages(void); 10321a4dcfa8SChristoph Hellwig #else /* CONFIG_BLOCK */ 10331a4dcfa8SChristoph Hellwig struct blk_plug { 10341a4dcfa8SChristoph Hellwig }; 10351a4dcfa8SChristoph Hellwig 103647c122e3SJens Axboe static inline void blk_start_plug_nr_ios(struct blk_plug *plug, 103747c122e3SJens Axboe unsigned short nr_ios) 103847c122e3SJens Axboe { 103947c122e3SJens Axboe } 104047c122e3SJens Axboe 10411a4dcfa8SChristoph Hellwig static inline void blk_start_plug(struct blk_plug *plug) 10421a4dcfa8SChristoph Hellwig { 10431a4dcfa8SChristoph Hellwig } 10441a4dcfa8SChristoph Hellwig 10451a4dcfa8SChristoph Hellwig static inline void blk_finish_plug(struct blk_plug *plug) 10461a4dcfa8SChristoph Hellwig { 10471a4dcfa8SChristoph Hellwig } 10481a4dcfa8SChristoph Hellwig 1049008f75a2SChristoph Hellwig static inline void blk_flush_plug(struct blk_plug *plug, bool async) 10501a4dcfa8SChristoph Hellwig { 10511a4dcfa8SChristoph Hellwig } 10521a4dcfa8SChristoph Hellwig 1053c6bf3f0eSChristoph Hellwig static inline int blkdev_issue_flush(struct block_device *bdev) 10541a4dcfa8SChristoph Hellwig { 10551a4dcfa8SChristoph Hellwig return 0; 10561a4dcfa8SChristoph Hellwig } 10571a4dcfa8SChristoph Hellwig 10581a4dcfa8SChristoph Hellwig static inline long nr_blockdev_pages(void) 10591a4dcfa8SChristoph Hellwig { 10601a4dcfa8SChristoph Hellwig return 0; 10611a4dcfa8SChristoph Hellwig } 10621a4dcfa8SChristoph Hellwig #endif /* CONFIG_BLOCK */ 10631a4dcfa8SChristoph Hellwig 106471ac860aSMing Lei extern void blk_io_schedule(void); 106571ac860aSMing Lei 106644abff2cSChristoph Hellwig int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 106744abff2cSChristoph Hellwig sector_t nr_sects, gfp_t gfp_mask); 106844abff2cSChristoph Hellwig int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, 106944abff2cSChristoph Hellwig sector_t nr_sects, gfp_t gfp_mask, struct bio **biop); 107044abff2cSChristoph Hellwig int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector, 107144abff2cSChristoph Hellwig sector_t nr_sects, gfp_t gfp); 1072ee472d83SChristoph Hellwig 1073ee472d83SChristoph Hellwig #define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */ 1074cb365b96SChristoph Hellwig #define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */ 1075ee472d83SChristoph Hellwig 1076e73c23ffSChaitanya Kulkarni extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1077e73c23ffSChaitanya Kulkarni sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, 1078ee472d83SChristoph Hellwig unsigned flags); 10793f14d792SDmitry Monakhov extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1080ee472d83SChristoph Hellwig sector_t nr_sects, gfp_t gfp_mask, unsigned flags); 1081ee472d83SChristoph Hellwig 10822cf6d26aSChristoph Hellwig static inline int sb_issue_discard(struct super_block *sb, sector_t block, 10832cf6d26aSChristoph Hellwig sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) 1084fb2dce86SDavid Woodhouse { 1085233bde21SBart Van Assche return blkdev_issue_discard(sb->s_bdev, 1086233bde21SBart Van Assche block << (sb->s_blocksize_bits - 1087233bde21SBart Van Assche SECTOR_SHIFT), 1088233bde21SBart Van Assche nr_blocks << (sb->s_blocksize_bits - 1089233bde21SBart Van Assche SECTOR_SHIFT), 109044abff2cSChristoph Hellwig gfp_mask); 1091fb2dce86SDavid Woodhouse } 1092e6fa0be6SLukas Czerner static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, 1093a107e5a3STheodore Ts'o sector_t nr_blocks, gfp_t gfp_mask) 1094e6fa0be6SLukas Czerner { 1095e6fa0be6SLukas Czerner return blkdev_issue_zeroout(sb->s_bdev, 1096233bde21SBart Van Assche block << (sb->s_blocksize_bits - 1097233bde21SBart Van Assche SECTOR_SHIFT), 1098233bde21SBart Van Assche nr_blocks << (sb->s_blocksize_bits - 1099233bde21SBart Van Assche SECTOR_SHIFT), 1100ee472d83SChristoph Hellwig gfp_mask, 0); 1101e6fa0be6SLukas Czerner } 11021da177e4SLinus Torvalds 1103fa01b1e9SChristoph Hellwig static inline bool bdev_is_partition(struct block_device *bdev) 1104fa01b1e9SChristoph Hellwig { 1105fa01b1e9SChristoph Hellwig return bdev->bd_partno; 1106fa01b1e9SChristoph Hellwig } 1107fa01b1e9SChristoph Hellwig 1108eb28d31bSMartin K. Petersen enum blk_default_limits { 1109eb28d31bSMartin K. Petersen BLK_MAX_SEGMENTS = 128, 1110eb28d31bSMartin K. Petersen BLK_SAFE_MAX_SECTORS = 255, 1111d2be537cSJeff Moyer BLK_DEF_MAX_SECTORS = 2560, 1112eb28d31bSMartin K. Petersen BLK_MAX_SEGMENT_SIZE = 65536, 1113eb28d31bSMartin K. Petersen BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, 1114eb28d31bSMartin K. Petersen }; 11150e435ac2SMilan Broz 1116af2c68feSBart Van Assche static inline unsigned long queue_segment_boundary(const struct request_queue *q) 1117ae03bf63SMartin K. Petersen { 1118025146e1SMartin K. Petersen return q->limits.seg_boundary_mask; 1119ae03bf63SMartin K. Petersen } 1120ae03bf63SMartin K. Petersen 1121af2c68feSBart Van Assche static inline unsigned long queue_virt_boundary(const struct request_queue *q) 112203100aadSKeith Busch { 112303100aadSKeith Busch return q->limits.virt_boundary_mask; 112403100aadSKeith Busch } 112503100aadSKeith Busch 1126af2c68feSBart Van Assche static inline unsigned int queue_max_sectors(const struct request_queue *q) 1127ae03bf63SMartin K. Petersen { 1128025146e1SMartin K. Petersen return q->limits.max_sectors; 1129ae03bf63SMartin K. Petersen } 1130ae03bf63SMartin K. Petersen 1131547e2f70SChristoph Hellwig static inline unsigned int queue_max_bytes(struct request_queue *q) 1132547e2f70SChristoph Hellwig { 1133547e2f70SChristoph Hellwig return min_t(unsigned int, queue_max_sectors(q), INT_MAX >> 9) << 9; 1134547e2f70SChristoph Hellwig } 1135547e2f70SChristoph Hellwig 1136af2c68feSBart Van Assche static inline unsigned int queue_max_hw_sectors(const struct request_queue *q) 1137ae03bf63SMartin K. Petersen { 1138025146e1SMartin K. Petersen return q->limits.max_hw_sectors; 1139ae03bf63SMartin K. Petersen } 1140ae03bf63SMartin K. Petersen 1141af2c68feSBart Van Assche static inline unsigned short queue_max_segments(const struct request_queue *q) 1142ae03bf63SMartin K. Petersen { 11438a78362cSMartin K. Petersen return q->limits.max_segments; 1144ae03bf63SMartin K. Petersen } 1145ae03bf63SMartin K. Petersen 1146af2c68feSBart Van Assche static inline unsigned short queue_max_discard_segments(const struct request_queue *q) 11471e739730SChristoph Hellwig { 11481e739730SChristoph Hellwig return q->limits.max_discard_segments; 11491e739730SChristoph Hellwig } 11501e739730SChristoph Hellwig 1151af2c68feSBart Van Assche static inline unsigned int queue_max_segment_size(const struct request_queue *q) 1152ae03bf63SMartin K. Petersen { 1153025146e1SMartin K. Petersen return q->limits.max_segment_size; 1154ae03bf63SMartin K. Petersen } 1155ae03bf63SMartin K. Petersen 11560512a75bSKeith Busch static inline unsigned int queue_max_zone_append_sectors(const struct request_queue *q) 11570512a75bSKeith Busch { 1158fe6f0cdcSJohannes Thumshirn 1159fe6f0cdcSJohannes Thumshirn const struct queue_limits *l = &q->limits; 1160fe6f0cdcSJohannes Thumshirn 1161fe6f0cdcSJohannes Thumshirn return min(l->max_zone_append_sectors, l->max_sectors); 11620512a75bSKeith Busch } 11630512a75bSKeith Busch 11642aba0d19SChristoph Hellwig static inline unsigned int 11652aba0d19SChristoph Hellwig bdev_max_zone_append_sectors(struct block_device *bdev) 11662aba0d19SChristoph Hellwig { 11672aba0d19SChristoph Hellwig return queue_max_zone_append_sectors(bdev_get_queue(bdev)); 11682aba0d19SChristoph Hellwig } 11692aba0d19SChristoph Hellwig 117065ea1b66SNaohiro Aota static inline unsigned int bdev_max_segments(struct block_device *bdev) 117165ea1b66SNaohiro Aota { 117265ea1b66SNaohiro Aota return queue_max_segments(bdev_get_queue(bdev)); 117365ea1b66SNaohiro Aota } 117465ea1b66SNaohiro Aota 1175ad6bf88aSMikulas Patocka static inline unsigned queue_logical_block_size(const struct request_queue *q) 11761da177e4SLinus Torvalds { 11771da177e4SLinus Torvalds int retval = 512; 11781da177e4SLinus Torvalds 1179025146e1SMartin K. Petersen if (q && q->limits.logical_block_size) 1180025146e1SMartin K. Petersen retval = q->limits.logical_block_size; 11811da177e4SLinus Torvalds 11821da177e4SLinus Torvalds return retval; 11831da177e4SLinus Torvalds } 11841da177e4SLinus Torvalds 1185ad6bf88aSMikulas Patocka static inline unsigned int bdev_logical_block_size(struct block_device *bdev) 11861da177e4SLinus Torvalds { 1187e1defc4fSMartin K. Petersen return queue_logical_block_size(bdev_get_queue(bdev)); 11881da177e4SLinus Torvalds } 11891da177e4SLinus Torvalds 1190af2c68feSBart Van Assche static inline unsigned int queue_physical_block_size(const struct request_queue *q) 1191c72758f3SMartin K. Petersen { 1192c72758f3SMartin K. Petersen return q->limits.physical_block_size; 1193c72758f3SMartin K. Petersen } 1194c72758f3SMartin K. Petersen 1195892b6f90SMartin K. Petersen static inline unsigned int bdev_physical_block_size(struct block_device *bdev) 1196ac481c20SMartin K. Petersen { 1197ac481c20SMartin K. Petersen return queue_physical_block_size(bdev_get_queue(bdev)); 1198ac481c20SMartin K. Petersen } 1199ac481c20SMartin K. Petersen 1200af2c68feSBart Van Assche static inline unsigned int queue_io_min(const struct request_queue *q) 1201c72758f3SMartin K. Petersen { 1202c72758f3SMartin K. Petersen return q->limits.io_min; 1203c72758f3SMartin K. Petersen } 1204c72758f3SMartin K. Petersen 1205ac481c20SMartin K. Petersen static inline int bdev_io_min(struct block_device *bdev) 1206ac481c20SMartin K. Petersen { 1207ac481c20SMartin K. Petersen return queue_io_min(bdev_get_queue(bdev)); 1208ac481c20SMartin K. Petersen } 1209ac481c20SMartin K. Petersen 1210af2c68feSBart Van Assche static inline unsigned int queue_io_opt(const struct request_queue *q) 1211c72758f3SMartin K. Petersen { 1212c72758f3SMartin K. Petersen return q->limits.io_opt; 1213c72758f3SMartin K. Petersen } 1214c72758f3SMartin K. Petersen 1215ac481c20SMartin K. Petersen static inline int bdev_io_opt(struct block_device *bdev) 1216ac481c20SMartin K. Petersen { 1217ac481c20SMartin K. Petersen return queue_io_opt(bdev_get_queue(bdev)); 1218ac481c20SMartin K. Petersen } 1219ac481c20SMartin K. Petersen 1220a805a4faSDamien Le Moal static inline unsigned int 1221a805a4faSDamien Le Moal queue_zone_write_granularity(const struct request_queue *q) 1222a805a4faSDamien Le Moal { 1223a805a4faSDamien Le Moal return q->limits.zone_write_granularity; 1224a805a4faSDamien Le Moal } 1225a805a4faSDamien Le Moal 1226a805a4faSDamien Le Moal static inline unsigned int 1227a805a4faSDamien Le Moal bdev_zone_write_granularity(struct block_device *bdev) 1228a805a4faSDamien Le Moal { 1229a805a4faSDamien Le Moal return queue_zone_write_granularity(bdev_get_queue(bdev)); 1230a805a4faSDamien Le Moal } 1231a805a4faSDamien Le Moal 123289098b07SChristoph Hellwig int bdev_alignment_offset(struct block_device *bdev); 12335c4b4a5cSChristoph Hellwig unsigned int bdev_discard_alignment(struct block_device *bdev); 1234c6e66634SPaolo Bonzini 1235cf0fbf89SChristoph Hellwig static inline unsigned int bdev_max_discard_sectors(struct block_device *bdev) 1236cf0fbf89SChristoph Hellwig { 1237cf0fbf89SChristoph Hellwig return bdev_get_queue(bdev)->limits.max_discard_sectors; 1238cf0fbf89SChristoph Hellwig } 1239cf0fbf89SChristoph Hellwig 12407b47ef52SChristoph Hellwig static inline unsigned int bdev_discard_granularity(struct block_device *bdev) 12417b47ef52SChristoph Hellwig { 12427b47ef52SChristoph Hellwig return bdev_get_queue(bdev)->limits.discard_granularity; 12437b47ef52SChristoph Hellwig } 12447b47ef52SChristoph Hellwig 124544abff2cSChristoph Hellwig static inline unsigned int 124644abff2cSChristoph Hellwig bdev_max_secure_erase_sectors(struct block_device *bdev) 124744abff2cSChristoph Hellwig { 124844abff2cSChristoph Hellwig return bdev_get_queue(bdev)->limits.max_secure_erase_sectors; 124944abff2cSChristoph Hellwig } 125044abff2cSChristoph Hellwig 1251a6f0788eSChaitanya Kulkarni static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev) 1252a6f0788eSChaitanya Kulkarni { 1253a6f0788eSChaitanya Kulkarni struct request_queue *q = bdev_get_queue(bdev); 1254a6f0788eSChaitanya Kulkarni 1255a6f0788eSChaitanya Kulkarni if (q) 1256a6f0788eSChaitanya Kulkarni return q->limits.max_write_zeroes_sectors; 1257a6f0788eSChaitanya Kulkarni 1258a6f0788eSChaitanya Kulkarni return 0; 1259a6f0788eSChaitanya Kulkarni } 1260a6f0788eSChaitanya Kulkarni 126110f0d2a5SChristoph Hellwig static inline bool bdev_nonrot(struct block_device *bdev) 126210f0d2a5SChristoph Hellwig { 126310f0d2a5SChristoph Hellwig return blk_queue_nonrot(bdev_get_queue(bdev)); 126410f0d2a5SChristoph Hellwig } 126510f0d2a5SChristoph Hellwig 126636d25489SChristoph Hellwig static inline bool bdev_stable_writes(struct block_device *bdev) 126736d25489SChristoph Hellwig { 126836d25489SChristoph Hellwig return test_bit(QUEUE_FLAG_STABLE_WRITES, 126936d25489SChristoph Hellwig &bdev_get_queue(bdev)->queue_flags); 127036d25489SChristoph Hellwig } 127136d25489SChristoph Hellwig 127208e688fdSChristoph Hellwig static inline bool bdev_write_cache(struct block_device *bdev) 127308e688fdSChristoph Hellwig { 127408e688fdSChristoph Hellwig return test_bit(QUEUE_FLAG_WC, &bdev_get_queue(bdev)->queue_flags); 127508e688fdSChristoph Hellwig } 127608e688fdSChristoph Hellwig 1277a557e82eSChristoph Hellwig static inline bool bdev_fua(struct block_device *bdev) 1278a557e82eSChristoph Hellwig { 1279a557e82eSChristoph Hellwig return test_bit(QUEUE_FLAG_FUA, &bdev_get_queue(bdev)->queue_flags); 1280a557e82eSChristoph Hellwig } 1281a557e82eSChristoph Hellwig 1282568ec936SChristoph Hellwig static inline bool bdev_nowait(struct block_device *bdev) 1283568ec936SChristoph Hellwig { 1284568ec936SChristoph Hellwig return test_bit(QUEUE_FLAG_NOWAIT, &bdev_get_queue(bdev)->queue_flags); 1285568ec936SChristoph Hellwig } 1286568ec936SChristoph Hellwig 1287797476b8SDamien Le Moal static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev) 1288797476b8SDamien Le Moal { 1289797476b8SDamien Le Moal struct request_queue *q = bdev_get_queue(bdev); 1290797476b8SDamien Le Moal 1291797476b8SDamien Le Moal if (q) 1292797476b8SDamien Le Moal return blk_queue_zoned_model(q); 1293797476b8SDamien Le Moal 1294797476b8SDamien Le Moal return BLK_ZONED_NONE; 1295797476b8SDamien Le Moal } 1296797476b8SDamien Le Moal 1297797476b8SDamien Le Moal static inline bool bdev_is_zoned(struct block_device *bdev) 1298797476b8SDamien Le Moal { 1299797476b8SDamien Le Moal struct request_queue *q = bdev_get_queue(bdev); 1300797476b8SDamien Le Moal 1301797476b8SDamien Le Moal if (q) 1302797476b8SDamien Le Moal return blk_queue_is_zoned(q); 1303797476b8SDamien Le Moal 1304797476b8SDamien Le Moal return false; 1305797476b8SDamien Le Moal } 1306797476b8SDamien Le Moal 13078cafdb5aSPankaj Raghav static inline bool bdev_op_is_zoned_write(struct block_device *bdev, 13088cafdb5aSPankaj Raghav blk_opf_t op) 13098cafdb5aSPankaj Raghav { 13108cafdb5aSPankaj Raghav if (!bdev_is_zoned(bdev)) 13118cafdb5aSPankaj Raghav return false; 13128cafdb5aSPankaj Raghav 13138cafdb5aSPankaj Raghav return op == REQ_OP_WRITE || op == REQ_OP_WRITE_ZEROES; 13148cafdb5aSPankaj Raghav } 13158cafdb5aSPankaj Raghav 1316113ab72eSDamien Le Moal static inline sector_t bdev_zone_sectors(struct block_device *bdev) 13176a0cb1bcSHannes Reinecke { 13186a0cb1bcSHannes Reinecke struct request_queue *q = bdev_get_queue(bdev); 13196a0cb1bcSHannes Reinecke 1320de71973cSChristoph Hellwig if (!blk_queue_is_zoned(q)) 13216cc77e9cSChristoph Hellwig return 0; 1322de71973cSChristoph Hellwig return q->limits.chunk_sectors; 13236cc77e9cSChristoph Hellwig } 13246a0cb1bcSHannes Reinecke 1325af2c68feSBart Van Assche static inline int queue_dma_alignment(const struct request_queue *q) 13261da177e4SLinus Torvalds { 1327482eb689SPete Wyckoff return q ? q->dma_alignment : 511; 13281da177e4SLinus Torvalds } 13291da177e4SLinus Torvalds 13304a2dcc35SKeith Busch static inline unsigned int bdev_dma_alignment(struct block_device *bdev) 13314a2dcc35SKeith Busch { 13324a2dcc35SKeith Busch return queue_dma_alignment(bdev_get_queue(bdev)); 13334a2dcc35SKeith Busch } 13344a2dcc35SKeith Busch 13355debd969SKeith Busch static inline bool bdev_iter_is_aligned(struct block_device *bdev, 13365debd969SKeith Busch struct iov_iter *iter) 13375debd969SKeith Busch { 13385debd969SKeith Busch return iov_iter_is_aligned(iter, bdev_dma_alignment(bdev), 13395debd969SKeith Busch bdev_logical_block_size(bdev) - 1); 13405debd969SKeith Busch } 13415debd969SKeith Busch 134214417799SNamhyung Kim static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, 134387904074SFUJITA Tomonori unsigned int len) 134487904074SFUJITA Tomonori { 134587904074SFUJITA Tomonori unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; 134614417799SNamhyung Kim return !(addr & alignment) && !(len & alignment); 134787904074SFUJITA Tomonori } 134887904074SFUJITA Tomonori 13491da177e4SLinus Torvalds /* assumes size > 256 */ 13501da177e4SLinus Torvalds static inline unsigned int blksize_bits(unsigned int size) 13511da177e4SLinus Torvalds { 13521da177e4SLinus Torvalds unsigned int bits = 8; 13531da177e4SLinus Torvalds do { 13541da177e4SLinus Torvalds bits++; 13551da177e4SLinus Torvalds size >>= 1; 13561da177e4SLinus Torvalds } while (size > 256); 13571da177e4SLinus Torvalds return bits; 13581da177e4SLinus Torvalds } 13591da177e4SLinus Torvalds 13602befb9e3SAdrian Bunk static inline unsigned int block_size(struct block_device *bdev) 13611da177e4SLinus Torvalds { 13626b7b181bSChristoph Hellwig return 1 << bdev->bd_inode->i_blkbits; 13631da177e4SLinus Torvalds } 13641da177e4SLinus Torvalds 136559c3d45eSJens Axboe int kblockd_schedule_work(struct work_struct *work); 1366818cd1cbSJens Axboe int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); 13671da177e4SLinus Torvalds 13681da177e4SLinus Torvalds #define MODULE_ALIAS_BLOCKDEV(major,minor) \ 13691da177e4SLinus Torvalds MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 13701da177e4SLinus Torvalds #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 13711da177e4SLinus Torvalds MODULE_ALIAS("block-major-" __stringify(major) "-*") 13721da177e4SLinus Torvalds 1373d145dc23SSatya Tangirala #ifdef CONFIG_BLK_INLINE_ENCRYPTION 1374d145dc23SSatya Tangirala 1375cb77cb5aSEric Biggers bool blk_crypto_register(struct blk_crypto_profile *profile, 1376cb77cb5aSEric Biggers struct request_queue *q); 1377d145dc23SSatya Tangirala 1378d145dc23SSatya Tangirala #else /* CONFIG_BLK_INLINE_ENCRYPTION */ 1379d145dc23SSatya Tangirala 1380cb77cb5aSEric Biggers static inline bool blk_crypto_register(struct blk_crypto_profile *profile, 1381d145dc23SSatya Tangirala struct request_queue *q) 1382d145dc23SSatya Tangirala { 1383d145dc23SSatya Tangirala return true; 1384d145dc23SSatya Tangirala } 1385d145dc23SSatya Tangirala 1386d145dc23SSatya Tangirala #endif /* CONFIG_BLK_INLINE_ENCRYPTION */ 1387d145dc23SSatya Tangirala 13889208d414SChristoph Hellwig enum blk_unique_id { 13899208d414SChristoph Hellwig /* these match the Designator Types specified in SPC */ 13909208d414SChristoph Hellwig BLK_UID_T10 = 1, 13919208d414SChristoph Hellwig BLK_UID_EUI64 = 2, 13929208d414SChristoph Hellwig BLK_UID_NAA = 3, 13939208d414SChristoph Hellwig }; 13949208d414SChristoph Hellwig 13959208d414SChristoph Hellwig #define NFL4_UFLG_MASK 0x0000003F 1396d145dc23SSatya Tangirala 139708f85851SAl Viro struct block_device_operations { 13983e08773cSChristoph Hellwig void (*submit_bio)(struct bio *bio); 139969fe0f29SMing Lei int (*poll_bio)(struct bio *bio, struct io_comp_batch *iob, 140069fe0f29SMing Lei unsigned int flags); 1401d4430d62SAl Viro int (*open) (struct block_device *, fmode_t); 1402db2a144bSAl Viro void (*release) (struct gendisk *, fmode_t); 140386947df3SBart Van Assche int (*rw_page)(struct block_device *, sector_t, struct page *, enum req_op); 1404d4430d62SAl Viro int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1405d4430d62SAl Viro int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 140677ea887eSTejun Heo unsigned int (*check_events) (struct gendisk *disk, 140777ea887eSTejun Heo unsigned int clearing); 1408c3e33e04STejun Heo void (*unlock_native_capacity) (struct gendisk *); 140908f85851SAl Viro int (*getgeo)(struct block_device *, struct hd_geometry *); 1410e00adcadSChristoph Hellwig int (*set_read_only)(struct block_device *bdev, bool ro); 141176792055SChristoph Hellwig void (*free_disk)(struct gendisk *disk); 1412b3a27d05SNitin Gupta /* this callback is with swap_lock and sometimes page table lock held */ 1413b3a27d05SNitin Gupta void (*swap_slot_free_notify) (struct block_device *, unsigned long); 1414e76239a3SChristoph Hellwig int (*report_zones)(struct gendisk *, sector_t sector, 1415d4100351SChristoph Hellwig unsigned int nr_zones, report_zones_cb cb, void *data); 1416348e114bSChristoph Hellwig char *(*devnode)(struct gendisk *disk, umode_t *mode); 14179208d414SChristoph Hellwig /* returns the length of the identifier or a negative errno: */ 14189208d414SChristoph Hellwig int (*get_unique_id)(struct gendisk *disk, u8 id[16], 14199208d414SChristoph Hellwig enum blk_unique_id id_type); 142008f85851SAl Viro struct module *owner; 1421bbd3e064SChristoph Hellwig const struct pr_ops *pr_ops; 14220bdfbca8SDmitry Osipenko 14230bdfbca8SDmitry Osipenko /* 14240bdfbca8SDmitry Osipenko * Special callback for probing GPT entry at a given sector. 14250bdfbca8SDmitry Osipenko * Needed by Android devices, used by GPT scanner and MMC blk 14260bdfbca8SDmitry Osipenko * driver. 14270bdfbca8SDmitry Osipenko */ 14280bdfbca8SDmitry Osipenko int (*alternative_gpt_sector)(struct gendisk *disk, sector_t *sector); 142908f85851SAl Viro }; 143008f85851SAl Viro 1431ee6a129dSArnd Bergmann #ifdef CONFIG_COMPAT 1432ee6a129dSArnd Bergmann extern int blkdev_compat_ptr_ioctl(struct block_device *, fmode_t, 1433ee6a129dSArnd Bergmann unsigned int, unsigned long); 1434ee6a129dSArnd Bergmann #else 1435ee6a129dSArnd Bergmann #define blkdev_compat_ptr_ioctl NULL 1436ee6a129dSArnd Bergmann #endif 1437ee6a129dSArnd Bergmann 143847a191fdSMatthew Wilcox extern int bdev_read_page(struct block_device *, sector_t, struct page *); 143947a191fdSMatthew Wilcox extern int bdev_write_page(struct block_device *, sector_t, struct page *, 144047a191fdSMatthew Wilcox struct writeback_control *); 14416cc77e9cSChristoph Hellwig 14420619317fSJens Axboe static inline void blk_wake_io_task(struct task_struct *waiter) 14430619317fSJens Axboe { 14440619317fSJens Axboe /* 14450619317fSJens Axboe * If we're polling, the task itself is doing the completions. For 14460619317fSJens Axboe * that case, we don't need to signal a wakeup, it's enough to just 14470619317fSJens Axboe * mark us as RUNNING. 14480619317fSJens Axboe */ 14490619317fSJens Axboe if (waiter == current) 14500619317fSJens Axboe __set_current_state(TASK_RUNNING); 14510619317fSJens Axboe else 14520619317fSJens Axboe wake_up_process(waiter); 14530619317fSJens Axboe } 14540619317fSJens Axboe 14555f0614a5SMing Lei unsigned long bdev_start_io_acct(struct block_device *bdev, 145677e7ffd7SBart Van Assche unsigned int sectors, enum req_op op, 14575f0614a5SMing Lei unsigned long start_time); 145877e7ffd7SBart Van Assche void bdev_end_io_acct(struct block_device *bdev, enum req_op op, 1459956d510eSChristoph Hellwig unsigned long start_time); 1460956d510eSChristoph Hellwig 1461e45c47d1SMike Snitzer void bio_start_io_acct_time(struct bio *bio, unsigned long start_time); 146299dfc43eSChristoph Hellwig unsigned long bio_start_io_acct(struct bio *bio); 146399dfc43eSChristoph Hellwig void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time, 146499dfc43eSChristoph Hellwig struct block_device *orig_bdev); 1465956d510eSChristoph Hellwig 1466956d510eSChristoph Hellwig /** 1467956d510eSChristoph Hellwig * bio_end_io_acct - end I/O accounting for bio based drivers 1468956d510eSChristoph Hellwig * @bio: bio to end account for 1469b42c1fc3SChristoph Hellwig * @start_time: start time returned by bio_start_io_acct() 1470956d510eSChristoph Hellwig */ 1471956d510eSChristoph Hellwig static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time) 1472956d510eSChristoph Hellwig { 147399dfc43eSChristoph Hellwig return bio_end_io_acct_remapped(bio, start_time, bio->bi_bdev); 1474956d510eSChristoph Hellwig } 1475956d510eSChristoph Hellwig 14763f1266f1SChristoph Hellwig int bdev_read_only(struct block_device *bdev); 14773f1266f1SChristoph Hellwig int set_blocksize(struct block_device *bdev, int size); 14783f1266f1SChristoph Hellwig 14794e7b5671SChristoph Hellwig int lookup_bdev(const char *pathname, dev_t *dev); 14803f1266f1SChristoph Hellwig 14813f1266f1SChristoph Hellwig void blkdev_show(struct seq_file *seqf, off_t offset); 14823f1266f1SChristoph Hellwig 14833f1266f1SChristoph Hellwig #define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */ 14843f1266f1SChristoph Hellwig #define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */ 14853f1266f1SChristoph Hellwig #ifdef CONFIG_BLOCK 14863f1266f1SChristoph Hellwig #define BLKDEV_MAJOR_MAX 512 14873f1266f1SChristoph Hellwig #else 14883f1266f1SChristoph Hellwig #define BLKDEV_MAJOR_MAX 0 14891da177e4SLinus Torvalds #endif 14903f1266f1SChristoph Hellwig 14913f1266f1SChristoph Hellwig struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, 14923f1266f1SChristoph Hellwig void *holder); 14933f1266f1SChristoph Hellwig struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder); 149437c3fc9aSChristoph Hellwig int bd_prepare_to_claim(struct block_device *bdev, void *holder); 149537c3fc9aSChristoph Hellwig void bd_abort_claiming(struct block_device *bdev, void *holder); 14963f1266f1SChristoph Hellwig void blkdev_put(struct block_device *bdev, fmode_t mode); 14973f1266f1SChristoph Hellwig 149822ae8ce8SChristoph Hellwig /* just for blk-cgroup, don't use elsewhere */ 149922ae8ce8SChristoph Hellwig struct block_device *blkdev_get_no_open(dev_t dev); 150022ae8ce8SChristoph Hellwig void blkdev_put_no_open(struct block_device *bdev); 150122ae8ce8SChristoph Hellwig 150222ae8ce8SChristoph Hellwig struct block_device *bdev_alloc(struct gendisk *disk, u8 partno); 150322ae8ce8SChristoph Hellwig void bdev_add(struct block_device *bdev, dev_t dev); 1504621c1f42SChristoph Hellwig struct block_device *I_BDEV(struct inode *inode); 15052c2b9fd6SChristoph Hellwig int truncate_bdev_range(struct block_device *bdev, fmode_t mode, loff_t lstart, 15062c2b9fd6SChristoph Hellwig loff_t lend); 15073f1266f1SChristoph Hellwig 15083f1266f1SChristoph Hellwig #ifdef CONFIG_BLOCK 15093f1266f1SChristoph Hellwig void invalidate_bdev(struct block_device *bdev); 15103f1266f1SChristoph Hellwig int sync_blockdev(struct block_device *bdev); 151197d6fb1bSYuezhang Mo int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend); 151270164eb6SChristoph Hellwig int sync_blockdev_nowait(struct block_device *bdev); 15131e03a36bSChristoph Hellwig void sync_bdevs(bool wait); 1514322cbb50SChristoph Hellwig void printk_all_partitions(void); 15153f1266f1SChristoph Hellwig #else 15163f1266f1SChristoph Hellwig static inline void invalidate_bdev(struct block_device *bdev) 15173f1266f1SChristoph Hellwig { 15183f1266f1SChristoph Hellwig } 15193f1266f1SChristoph Hellwig static inline int sync_blockdev(struct block_device *bdev) 15203f1266f1SChristoph Hellwig { 15213f1266f1SChristoph Hellwig return 0; 15223f1266f1SChristoph Hellwig } 152370164eb6SChristoph Hellwig static inline int sync_blockdev_nowait(struct block_device *bdev) 152470164eb6SChristoph Hellwig { 152570164eb6SChristoph Hellwig return 0; 152670164eb6SChristoph Hellwig } 15271e03a36bSChristoph Hellwig static inline void sync_bdevs(bool wait) 15281e03a36bSChristoph Hellwig { 15291e03a36bSChristoph Hellwig } 1530322cbb50SChristoph Hellwig static inline void printk_all_partitions(void) 1531322cbb50SChristoph Hellwig { 1532322cbb50SChristoph Hellwig } 1533322cbb50SChristoph Hellwig #endif /* CONFIG_BLOCK */ 1534322cbb50SChristoph Hellwig 15353f1266f1SChristoph Hellwig int fsync_bdev(struct block_device *bdev); 15363f1266f1SChristoph Hellwig 1537040f04bdSChristoph Hellwig int freeze_bdev(struct block_device *bdev); 1538040f04bdSChristoph Hellwig int thaw_bdev(struct block_device *bdev); 15393f1266f1SChristoph Hellwig 15405a72e899SJens Axboe struct io_comp_batch { 15415a72e899SJens Axboe struct request *req_list; 15425a72e899SJens Axboe bool need_ts; 15435a72e899SJens Axboe void (*complete)(struct io_comp_batch *); 15445a72e899SJens Axboe }; 15455a72e899SJens Axboe 15465a72e899SJens Axboe #define DEFINE_IO_COMP_BATCH(name) struct io_comp_batch name = { } 15475a72e899SJens Axboe 15483f1266f1SChristoph Hellwig #endif /* _LINUX_BLKDEV_H */ 1549