1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 2322cbb50SChristoph Hellwig /* 3322cbb50SChristoph Hellwig * Portions Copyright (C) 1992 Drew Eckhardt 4322cbb50SChristoph Hellwig */ 51da177e4SLinus Torvalds #ifndef _LINUX_BLKDEV_H 61da177e4SLinus Torvalds #define _LINUX_BLKDEV_H 71da177e4SLinus Torvalds 8322cbb50SChristoph Hellwig #include <linux/types.h> 9322cbb50SChristoph Hellwig #include <linux/blk_types.h> 10322cbb50SChristoph Hellwig #include <linux/device.h> 111da177e4SLinus Torvalds #include <linux/list.h> 12320ae51fSJens Axboe #include <linux/llist.h> 13b296a6d5SAndy Shevchenko #include <linux/minmax.h> 141da177e4SLinus Torvalds #include <linux/timer.h> 151da177e4SLinus Torvalds #include <linux/workqueue.h> 161da177e4SLinus Torvalds #include <linux/wait.h> 171da177e4SLinus Torvalds #include <linux/bio.h> 183e6053d7SHugh Dickins #include <linux/gfp.h> 19322cbb50SChristoph Hellwig #include <linux/kdev_t.h> 20548bc8e1STejun Heo #include <linux/rcupdate.h> 21add703fdSTejun Heo #include <linux/percpu-refcount.h> 226a0cb1bcSHannes Reinecke #include <linux/blkzoned.h> 23322cbb50SChristoph Hellwig #include <linux/sched.h> 24d97e594cSJohn Garry #include <linux/sbitmap.h> 25322cbb50SChristoph Hellwig #include <linux/uuid.h> 26322cbb50SChristoph Hellwig #include <linux/xarray.h> 271da177e4SLinus Torvalds 28de477254SPaul Gortmaker struct module; 291da177e4SLinus Torvalds struct request_queue; 301da177e4SLinus Torvalds struct elevator_queue; 312056a782SJens Axboe struct blk_trace; 323d6392cfSJens Axboe struct request; 333d6392cfSJens Axboe struct sg_io_hdr; 343c798398STejun Heo struct blkcg_gq; 357c94e1c1SMing Lei struct blk_flush_queue; 363e08773cSChristoph Hellwig struct kiocb; 37bbd3e064SChristoph Hellwig struct pr_ops; 38a7905043SJosef Bacik struct rq_qos; 3934dbad5dSOmar Sandoval struct blk_queue_stats; 4034dbad5dSOmar Sandoval struct blk_stat_callback; 41cb77cb5aSEric Biggers struct blk_crypto_profile; 421da177e4SLinus Torvalds 43322cbb50SChristoph Hellwig extern const struct device_type disk_type; 44cdb37f73SThomas Weißschuh extern const struct device_type part_type; 45322cbb50SChristoph Hellwig extern struct class block_class; 46322cbb50SChristoph Hellwig 478bd435b3STejun Heo /* 488bd435b3STejun Heo * Maximum number of blkcg policies allowed to be registered concurrently. 498bd435b3STejun Heo * Defined here to simplify include dependency. 508bd435b3STejun Heo */ 51ec645dc9SOleksandr Natalenko #define BLKCG_MAX_POLS 6 528bd435b3STejun Heo 53322cbb50SChristoph Hellwig #define DISK_MAX_PARTS 256 54322cbb50SChristoph Hellwig #define DISK_NAME_LEN 32 55322cbb50SChristoph Hellwig 56322cbb50SChristoph Hellwig #define PARTITION_META_INFO_VOLNAMELTH 64 57322cbb50SChristoph Hellwig /* 58322cbb50SChristoph Hellwig * Enough for the string representation of any kind of UUID plus NULL. 59322cbb50SChristoph Hellwig * EFI UUID is 36 characters. MSDOS UUID is 11 characters. 60322cbb50SChristoph Hellwig */ 61322cbb50SChristoph Hellwig #define PARTITION_META_INFO_UUIDLTH (UUID_STRING_LEN + 1) 62322cbb50SChristoph Hellwig 63322cbb50SChristoph Hellwig struct partition_meta_info { 64322cbb50SChristoph Hellwig char uuid[PARTITION_META_INFO_UUIDLTH]; 65322cbb50SChristoph Hellwig u8 volname[PARTITION_META_INFO_VOLNAMELTH]; 66322cbb50SChristoph Hellwig }; 67322cbb50SChristoph Hellwig 68322cbb50SChristoph Hellwig /** 69322cbb50SChristoph Hellwig * DOC: genhd capability flags 70322cbb50SChristoph Hellwig * 71322cbb50SChristoph Hellwig * ``GENHD_FL_REMOVABLE``: indicates that the block device gives access to 72322cbb50SChristoph Hellwig * removable media. When set, the device remains present even when media is not 73322cbb50SChristoph Hellwig * inserted. Shall not be set for devices which are removed entirely when the 74322cbb50SChristoph Hellwig * media is removed. 75322cbb50SChristoph Hellwig * 76322cbb50SChristoph Hellwig * ``GENHD_FL_HIDDEN``: the block device is hidden; it doesn't produce events, 77322cbb50SChristoph Hellwig * doesn't appear in sysfs, and can't be opened from userspace or using 78322cbb50SChristoph Hellwig * blkdev_get*. Used for the underlying components of multipath devices. 79322cbb50SChristoph Hellwig * 80322cbb50SChristoph Hellwig * ``GENHD_FL_NO_PART``: partition support is disabled. The kernel will not 81322cbb50SChristoph Hellwig * scan for partitions from add_disk, and users can't add partitions manually. 82322cbb50SChristoph Hellwig * 83322cbb50SChristoph Hellwig */ 84322cbb50SChristoph Hellwig enum { 85322cbb50SChristoph Hellwig GENHD_FL_REMOVABLE = 1 << 0, 86322cbb50SChristoph Hellwig GENHD_FL_HIDDEN = 1 << 1, 87322cbb50SChristoph Hellwig GENHD_FL_NO_PART = 1 << 2, 88322cbb50SChristoph Hellwig }; 89322cbb50SChristoph Hellwig 90322cbb50SChristoph Hellwig enum { 91322cbb50SChristoph Hellwig DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */ 92322cbb50SChristoph Hellwig DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */ 93322cbb50SChristoph Hellwig }; 94322cbb50SChristoph Hellwig 95322cbb50SChristoph Hellwig enum { 96322cbb50SChristoph Hellwig /* Poll even if events_poll_msecs is unset */ 97322cbb50SChristoph Hellwig DISK_EVENT_FLAG_POLL = 1 << 0, 98322cbb50SChristoph Hellwig /* Forward events to udev */ 99322cbb50SChristoph Hellwig DISK_EVENT_FLAG_UEVENT = 1 << 1, 100322cbb50SChristoph Hellwig /* Block event polling when open for exclusive write */ 101322cbb50SChristoph Hellwig DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE = 1 << 2, 102322cbb50SChristoph Hellwig }; 103322cbb50SChristoph Hellwig 104322cbb50SChristoph Hellwig struct disk_events; 105322cbb50SChristoph Hellwig struct badblocks; 106322cbb50SChristoph Hellwig 107322cbb50SChristoph Hellwig struct blk_integrity { 108322cbb50SChristoph Hellwig const struct blk_integrity_profile *profile; 109322cbb50SChristoph Hellwig unsigned char flags; 110322cbb50SChristoph Hellwig unsigned char tuple_size; 111322cbb50SChristoph Hellwig unsigned char interval_exp; 112322cbb50SChristoph Hellwig unsigned char tag_size; 113322cbb50SChristoph Hellwig }; 114322cbb50SChristoph Hellwig 11505bdb996SChristoph Hellwig typedef unsigned int __bitwise blk_mode_t; 11605bdb996SChristoph Hellwig 11705bdb996SChristoph Hellwig /* open for reading */ 11805bdb996SChristoph Hellwig #define BLK_OPEN_READ ((__force blk_mode_t)(1 << 0)) 11905bdb996SChristoph Hellwig /* open for writing */ 12005bdb996SChristoph Hellwig #define BLK_OPEN_WRITE ((__force blk_mode_t)(1 << 1)) 12105bdb996SChristoph Hellwig /* open exclusively (vs other exclusive openers */ 12205bdb996SChristoph Hellwig #define BLK_OPEN_EXCL ((__force blk_mode_t)(1 << 2)) 12305bdb996SChristoph Hellwig /* opened with O_NDELAY */ 12405bdb996SChristoph Hellwig #define BLK_OPEN_NDELAY ((__force blk_mode_t)(1 << 3)) 12505bdb996SChristoph Hellwig /* open for "writes" only for ioctls (specialy hack for floppy.c) */ 12605bdb996SChristoph Hellwig #define BLK_OPEN_WRITE_IOCTL ((__force blk_mode_t)(1 << 4)) 12705bdb996SChristoph Hellwig 128322cbb50SChristoph Hellwig struct gendisk { 129322cbb50SChristoph Hellwig /* 130322cbb50SChristoph Hellwig * major/first_minor/minors should not be set by any new driver, the 131322cbb50SChristoph Hellwig * block core will take care of allocating them automatically. 132322cbb50SChristoph Hellwig */ 133322cbb50SChristoph Hellwig int major; 134322cbb50SChristoph Hellwig int first_minor; 135322cbb50SChristoph Hellwig int minors; 136322cbb50SChristoph Hellwig 137322cbb50SChristoph Hellwig char disk_name[DISK_NAME_LEN]; /* name of major driver */ 138322cbb50SChristoph Hellwig 139322cbb50SChristoph Hellwig unsigned short events; /* supported events */ 140322cbb50SChristoph Hellwig unsigned short event_flags; /* flags related to event processing */ 141322cbb50SChristoph Hellwig 142322cbb50SChristoph Hellwig struct xarray part_tbl; 143322cbb50SChristoph Hellwig struct block_device *part0; 144322cbb50SChristoph Hellwig 145322cbb50SChristoph Hellwig const struct block_device_operations *fops; 146322cbb50SChristoph Hellwig struct request_queue *queue; 147322cbb50SChristoph Hellwig void *private_data; 148322cbb50SChristoph Hellwig 14946754bd0SChristoph Hellwig struct bio_set bio_split; 15046754bd0SChristoph Hellwig 151322cbb50SChristoph Hellwig int flags; 152322cbb50SChristoph Hellwig unsigned long state; 153322cbb50SChristoph Hellwig #define GD_NEED_PART_SCAN 0 154322cbb50SChristoph Hellwig #define GD_READ_ONLY 1 155322cbb50SChristoph Hellwig #define GD_DEAD 2 156322cbb50SChristoph Hellwig #define GD_NATIVE_CAPACITY 3 15776792055SChristoph Hellwig #define GD_ADDED 4 158b9684a71SChristoph Hellwig #define GD_SUPPRESS_PART_SCAN 5 1596f8191fdSChristoph Hellwig #define GD_OWNS_QUEUE 6 160322cbb50SChristoph Hellwig 161322cbb50SChristoph Hellwig struct mutex open_mutex; /* open/close mutex */ 162322cbb50SChristoph Hellwig unsigned open_partitions; /* number of open partitions */ 163322cbb50SChristoph Hellwig 164322cbb50SChristoph Hellwig struct backing_dev_info *bdi; 1652bd85221SChristoph Hellwig struct kobject queue_kobj; /* the queue/ directory */ 166322cbb50SChristoph Hellwig struct kobject *slave_dir; 167322cbb50SChristoph Hellwig #ifdef CONFIG_BLOCK_HOLDER_DEPRECATED 168322cbb50SChristoph Hellwig struct list_head slave_bdevs; 169322cbb50SChristoph Hellwig #endif 170322cbb50SChristoph Hellwig struct timer_rand_state *random; 171322cbb50SChristoph Hellwig atomic_t sync_io; /* RAID */ 172322cbb50SChristoph Hellwig struct disk_events *ev; 173d86e716aSChristoph Hellwig 174d86e716aSChristoph Hellwig #ifdef CONFIG_BLK_DEV_ZONED 175d86e716aSChristoph Hellwig /* 176d86e716aSChristoph Hellwig * Zoned block device information for request dispatch control. 177d86e716aSChristoph Hellwig * nr_zones is the total number of zones of the device. This is always 178d86e716aSChristoph Hellwig * 0 for regular block devices. conv_zones_bitmap is a bitmap of nr_zones 179d86e716aSChristoph Hellwig * bits which indicates if a zone is conventional (bit set) or 180d86e716aSChristoph Hellwig * sequential (bit clear). seq_zones_wlock is a bitmap of nr_zones 181d86e716aSChristoph Hellwig * bits which indicates if a zone is write locked, that is, if a write 182d86e716aSChristoph Hellwig * request targeting the zone was dispatched. 183d86e716aSChristoph Hellwig * 184d86e716aSChristoph Hellwig * Reads of this information must be protected with blk_queue_enter() / 185d86e716aSChristoph Hellwig * blk_queue_exit(). Modifying this information is only allowed while 186d86e716aSChristoph Hellwig * no requests are being processed. See also blk_mq_freeze_queue() and 187d86e716aSChristoph Hellwig * blk_mq_unfreeze_queue(). 188d86e716aSChristoph Hellwig */ 189d86e716aSChristoph Hellwig unsigned int nr_zones; 190d86e716aSChristoph Hellwig unsigned int max_open_zones; 191d86e716aSChristoph Hellwig unsigned int max_active_zones; 192d86e716aSChristoph Hellwig unsigned long *conv_zones_bitmap; 193d86e716aSChristoph Hellwig unsigned long *seq_zones_wlock; 194d86e716aSChristoph Hellwig #endif /* CONFIG_BLK_DEV_ZONED */ 195d86e716aSChristoph Hellwig 196322cbb50SChristoph Hellwig #if IS_ENABLED(CONFIG_CDROM) 197322cbb50SChristoph Hellwig struct cdrom_device_info *cdi; 198322cbb50SChristoph Hellwig #endif 199322cbb50SChristoph Hellwig int node_id; 200322cbb50SChristoph Hellwig struct badblocks *bb; 201322cbb50SChristoph Hellwig struct lockdep_map lockdep_map; 202322cbb50SChristoph Hellwig u64 diskseq; 20305bdb996SChristoph Hellwig blk_mode_t open_mode; 2046a27d28cSChristoph Hellwig 2056a27d28cSChristoph Hellwig /* 2066a27d28cSChristoph Hellwig * Independent sector access ranges. This is always NULL for 2076a27d28cSChristoph Hellwig * devices that do not have multiple independent access ranges. 2086a27d28cSChristoph Hellwig */ 2096a27d28cSChristoph Hellwig struct blk_independent_access_ranges *ia_ranges; 210322cbb50SChristoph Hellwig }; 211322cbb50SChristoph Hellwig 212322cbb50SChristoph Hellwig static inline bool disk_live(struct gendisk *disk) 213322cbb50SChristoph Hellwig { 214322cbb50SChristoph Hellwig return !inode_unhashed(disk->part0->bd_inode); 215322cbb50SChristoph Hellwig } 216322cbb50SChristoph Hellwig 217dbdc1be3SChristoph Hellwig /** 218dbdc1be3SChristoph Hellwig * disk_openers - returns how many openers are there for a disk 219dbdc1be3SChristoph Hellwig * @disk: disk to check 220dbdc1be3SChristoph Hellwig * 221dbdc1be3SChristoph Hellwig * This returns the number of openers for a disk. Note that this value is only 222dbdc1be3SChristoph Hellwig * stable if disk->open_mutex is held. 223dbdc1be3SChristoph Hellwig * 224dbdc1be3SChristoph Hellwig * Note: Due to a quirk in the block layer open code, each open partition is 225dbdc1be3SChristoph Hellwig * only counted once even if there are multiple openers. 226dbdc1be3SChristoph Hellwig */ 227dbdc1be3SChristoph Hellwig static inline unsigned int disk_openers(struct gendisk *disk) 228dbdc1be3SChristoph Hellwig { 2299acf381fSChristoph Hellwig return atomic_read(&disk->part0->bd_openers); 230dbdc1be3SChristoph Hellwig } 231dbdc1be3SChristoph Hellwig 232322cbb50SChristoph Hellwig /* 233322cbb50SChristoph Hellwig * The gendisk is refcounted by the part0 block_device, and the bd_device 234322cbb50SChristoph Hellwig * therein is also used for device model presentation in sysfs. 235322cbb50SChristoph Hellwig */ 236322cbb50SChristoph Hellwig #define dev_to_disk(device) \ 237322cbb50SChristoph Hellwig (dev_to_bdev(device)->bd_disk) 238322cbb50SChristoph Hellwig #define disk_to_dev(disk) \ 239322cbb50SChristoph Hellwig (&((disk)->part0->bd_device)) 240322cbb50SChristoph Hellwig 241322cbb50SChristoph Hellwig #if IS_REACHABLE(CONFIG_CDROM) 242322cbb50SChristoph Hellwig #define disk_to_cdi(disk) ((disk)->cdi) 243322cbb50SChristoph Hellwig #else 244322cbb50SChristoph Hellwig #define disk_to_cdi(disk) NULL 245322cbb50SChristoph Hellwig #endif 246322cbb50SChristoph Hellwig 247322cbb50SChristoph Hellwig static inline dev_t disk_devt(struct gendisk *disk) 248322cbb50SChristoph Hellwig { 249322cbb50SChristoph Hellwig return MKDEV(disk->major, disk->first_minor); 250322cbb50SChristoph Hellwig } 251322cbb50SChristoph Hellwig 25237ae5a0fSTetsuo Handa static inline int blk_validate_block_size(unsigned long bsize) 253570b1cacSXie Yongji { 254570b1cacSXie Yongji if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize)) 255570b1cacSXie Yongji return -EINVAL; 256570b1cacSXie Yongji 257570b1cacSXie Yongji return 0; 258570b1cacSXie Yongji } 259570b1cacSXie Yongji 26016458cf3SBart Van Assche static inline bool blk_op_is_passthrough(blk_opf_t op) 26114cb0dc6SMing Lei { 262da6269daSChristoph Hellwig op &= REQ_OP_MASK; 26314cb0dc6SMing Lei return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT; 26414cb0dc6SMing Lei } 26514cb0dc6SMing Lei 266797476b8SDamien Le Moal /* 2679bb33f24SChristoph Hellwig * BLK_BOUNCE_NONE: never bounce (default) 2689bb33f24SChristoph Hellwig * BLK_BOUNCE_HIGH: bounce all highmem pages 2699bb33f24SChristoph Hellwig */ 2709bb33f24SChristoph Hellwig enum blk_bounce { 2719bb33f24SChristoph Hellwig BLK_BOUNCE_NONE, 2729bb33f24SChristoph Hellwig BLK_BOUNCE_HIGH, 2739bb33f24SChristoph Hellwig }; 2749bb33f24SChristoph Hellwig 275025146e1SMartin K. Petersen struct queue_limits { 2769bb33f24SChristoph Hellwig enum blk_bounce bounce; 277025146e1SMartin K. Petersen unsigned long seg_boundary_mask; 27803100aadSKeith Busch unsigned long virt_boundary_mask; 279025146e1SMartin K. Petersen 280025146e1SMartin K. Petersen unsigned int max_hw_sectors; 281ca369d51SMartin K. Petersen unsigned int max_dev_sectors; 282762380adSJens Axboe unsigned int chunk_sectors; 283025146e1SMartin K. Petersen unsigned int max_sectors; 284c9c77418SKeith Busch unsigned int max_user_sectors; 285025146e1SMartin K. Petersen unsigned int max_segment_size; 286c72758f3SMartin K. Petersen unsigned int physical_block_size; 287ad6bf88aSMikulas Patocka unsigned int logical_block_size; 288c72758f3SMartin K. Petersen unsigned int alignment_offset; 289c72758f3SMartin K. Petersen unsigned int io_min; 290c72758f3SMartin K. Petersen unsigned int io_opt; 29167efc925SChristoph Hellwig unsigned int max_discard_sectors; 2920034af03SJens Axboe unsigned int max_hw_discard_sectors; 29344abff2cSChristoph Hellwig unsigned int max_secure_erase_sectors; 294a6f0788eSChaitanya Kulkarni unsigned int max_write_zeroes_sectors; 2950512a75bSKeith Busch unsigned int max_zone_append_sectors; 29686b37281SMartin K. Petersen unsigned int discard_granularity; 29786b37281SMartin K. Petersen unsigned int discard_alignment; 298a805a4faSDamien Le Moal unsigned int zone_write_granularity; 299025146e1SMartin K. Petersen 3008a78362cSMartin K. Petersen unsigned short max_segments; 30113f05c8dSMartin K. Petersen unsigned short max_integrity_segments; 3021e739730SChristoph Hellwig unsigned short max_discard_segments; 303025146e1SMartin K. Petersen 304c72758f3SMartin K. Petersen unsigned char misaligned; 30586b37281SMartin K. Petersen unsigned char discard_misaligned; 306c78afc62SKent Overstreet unsigned char raid_partial_stripes_expensive; 307*7437bb73SChristoph Hellwig bool zoned; 308c964d62fSKeith Busch 309c964d62fSKeith Busch /* 310c964d62fSKeith Busch * Drivers that set dma_alignment to less than 511 must be prepared to 311c964d62fSKeith Busch * handle individual bvec's that are not a multiple of a SECTOR_SIZE 312c964d62fSKeith Busch * due to possible offsets. 313c964d62fSKeith Busch */ 314c964d62fSKeith Busch unsigned int dma_alignment; 315025146e1SMartin K. Petersen }; 316025146e1SMartin K. Petersen 317d4100351SChristoph Hellwig typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx, 318d4100351SChristoph Hellwig void *data); 319d4100351SChristoph Hellwig 320*7437bb73SChristoph Hellwig void disk_set_zoned(struct gendisk *disk, bool zoned); 32127ba3e8fSDamien Le Moal 322d4100351SChristoph Hellwig #define BLK_ALL_ZONES ((unsigned int)-1) 323d4100351SChristoph Hellwig int blkdev_report_zones(struct block_device *bdev, sector_t sector, 324d4100351SChristoph Hellwig unsigned int nr_zones, report_zones_cb cb, void *data); 325668bfeeaSChristoph Hellwig int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op, 326668bfeeaSChristoph Hellwig sector_t sectors, sector_t nr_sectors, gfp_t gfp_mask); 327e732671aSDamien Le Moal int blk_revalidate_disk_zones(struct gendisk *disk, 328e732671aSDamien Le Moal void (*update_driver_data)(struct gendisk *disk)); 3296a0cb1bcSHannes Reinecke 330a2247f19SDamien Le Moal /* 331a2247f19SDamien Le Moal * Independent access ranges: struct blk_independent_access_range describes 332a2247f19SDamien Le Moal * a range of contiguous sectors that can be accessed using device command 333a2247f19SDamien Le Moal * execution resources that are independent from the resources used for 334a2247f19SDamien Le Moal * other access ranges. This is typically found with single-LUN multi-actuator 335a2247f19SDamien Le Moal * HDDs where each access range is served by a different set of heads. 336a2247f19SDamien Le Moal * The set of independent ranges supported by the device is defined using 337a2247f19SDamien Le Moal * struct blk_independent_access_ranges. The independent ranges must not overlap 338a2247f19SDamien Le Moal * and must include all sectors within the disk capacity (no sector holes 339a2247f19SDamien Le Moal * allowed). 340a2247f19SDamien Le Moal * For a device with multiple ranges, requests targeting sectors in different 341a2247f19SDamien Le Moal * ranges can be executed in parallel. A request can straddle an access range 342a2247f19SDamien Le Moal * boundary. 343a2247f19SDamien Le Moal */ 344a2247f19SDamien Le Moal struct blk_independent_access_range { 345a2247f19SDamien Le Moal struct kobject kobj; 346a2247f19SDamien Le Moal sector_t sector; 347a2247f19SDamien Le Moal sector_t nr_sectors; 348a2247f19SDamien Le Moal }; 349a2247f19SDamien Le Moal 350a2247f19SDamien Le Moal struct blk_independent_access_ranges { 351a2247f19SDamien Le Moal struct kobject kobj; 352a2247f19SDamien Le Moal bool sysfs_registered; 353a2247f19SDamien Le Moal unsigned int nr_ia_ranges; 354a2247f19SDamien Le Moal struct blk_independent_access_range ia_range[]; 355a2247f19SDamien Le Moal }; 356a2247f19SDamien Le Moal 357d7b76301SRichard Kennedy struct request_queue { 3581da177e4SLinus Torvalds /* 3591da177e4SLinus Torvalds * The queue owner gets to use this for whatever they like. 3601da177e4SLinus Torvalds * ll_rw_blk doesn't touch it. 3611da177e4SLinus Torvalds */ 3621da177e4SLinus Torvalds void *queuedata; 3631da177e4SLinus Torvalds 3640c734c5eSJens Axboe struct elevator_queue *elevator; 3650c734c5eSJens Axboe 3660c734c5eSJens Axboe const struct blk_mq_ops *mq_ops; 3670c734c5eSJens Axboe 3680c734c5eSJens Axboe /* sw queues */ 3690c734c5eSJens Axboe struct blk_mq_ctx __percpu *queue_ctx; 3700c734c5eSJens Axboe 3711da177e4SLinus Torvalds /* 3721da177e4SLinus Torvalds * various queue flags, see QUEUE_* below 3731da177e4SLinus Torvalds */ 3741da177e4SLinus Torvalds unsigned long queue_flags; 3751da177e4SLinus Torvalds 3760c734c5eSJens Axboe unsigned int rq_timeout; 3770c734c5eSJens Axboe 3780c734c5eSJens Axboe unsigned int queue_depth; 3790c734c5eSJens Axboe 3800c734c5eSJens Axboe refcount_t refs; 3810c734c5eSJens Axboe 3820c734c5eSJens Axboe /* hw dispatch queues */ 3830c734c5eSJens Axboe unsigned int nr_hw_queues; 3840c734c5eSJens Axboe struct xarray hctx_table; 3850c734c5eSJens Axboe 3860c734c5eSJens Axboe struct percpu_ref q_usage_counter; 3870c734c5eSJens Axboe 3880c734c5eSJens Axboe struct request *last_merge; 389a73f730dSTejun Heo 3900d945c1fSChristoph Hellwig spinlock_t queue_lock; 3911da177e4SLinus Torvalds 3920c734c5eSJens Axboe int quiesce_depth; 393d152c682SChristoph Hellwig 3940c734c5eSJens Axboe struct gendisk *disk; 3951da177e4SLinus Torvalds 396320ae51fSJens Axboe /* 397320ae51fSJens Axboe * mq queue kobject 398320ae51fSJens Axboe */ 3991db4909eSMing Lei struct kobject *mq_kobj; 400320ae51fSJens Axboe 4010c734c5eSJens Axboe struct queue_limits limits; 4020c734c5eSJens Axboe 403ac6fc48cSDan Williams #ifdef CONFIG_BLK_DEV_INTEGRITY 404ac6fc48cSDan Williams struct blk_integrity integrity; 405ac6fc48cSDan Williams #endif /* CONFIG_BLK_DEV_INTEGRITY */ 406ac6fc48cSDan Williams 40747fafbc7SRafael J. Wysocki #ifdef CONFIG_PM 4086c954667SLin Ming struct device *dev; 409db04e18dSGeert Uytterhoeven enum rpm_status rpm_status; 4106c954667SLin Ming #endif 4116c954667SLin Ming 4121da177e4SLinus Torvalds /* 4130c734c5eSJens Axboe * Number of contexts that have called blk_set_pm_only(). If this 4140c734c5eSJens Axboe * counter is above zero then only RQF_PM requests are processed. 4150c734c5eSJens Axboe */ 4160c734c5eSJens Axboe atomic_t pm_only; 4170c734c5eSJens Axboe 4180c734c5eSJens Axboe struct blk_queue_stats *stats; 4190c734c5eSJens Axboe struct rq_qos *rq_qos; 4200c734c5eSJens Axboe struct mutex rq_qos_mutex; 4210c734c5eSJens Axboe 4220c734c5eSJens Axboe /* 4230c734c5eSJens Axboe * ida allocated id for this queue. Used to index queues from 4240c734c5eSJens Axboe * ioctx. 4250c734c5eSJens Axboe */ 4260c734c5eSJens Axboe int id; 4270c734c5eSJens Axboe 4280c734c5eSJens Axboe unsigned int dma_pad_mask; 4290c734c5eSJens Axboe 4300c734c5eSJens Axboe /* 4311da177e4SLinus Torvalds * queue settings 4321da177e4SLinus Torvalds */ 4331da177e4SLinus Torvalds unsigned long nr_requests; /* Max # of requests */ 4341da177e4SLinus Torvalds 4351b262839SSatya Tangirala #ifdef CONFIG_BLK_INLINE_ENCRYPTION 436cb77cb5aSEric Biggers struct blk_crypto_profile *crypto_profile; 43720f01f16SEric Biggers struct kobject *crypto_kobject; 4381b262839SSatya Tangirala #endif 4391b262839SSatya Tangirala 440242f9dcbSJens Axboe struct timer_list timeout; 441287922ebSChristoph Hellwig struct work_struct timeout_work; 442242f9dcbSJens Axboe 443079a2e3eSJohn Garry atomic_t nr_active_requests_shared_tags; 444bccf5e26SJohn Garry 4450c734c5eSJens Axboe unsigned int required_elevator_features; 4460c734c5eSJens Axboe 447079a2e3eSJohn Garry struct blk_mq_tags *sched_shared_tags; 448d97e594cSJohn Garry 449a612fddfSTejun Heo struct list_head icq_list; 4501231039dSChristoph Hellwig #ifdef CONFIG_BLK_CGROUP 4511231039dSChristoph Hellwig DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS); 4521231039dSChristoph Hellwig struct blkcg_gq *root_blkg; 4531231039dSChristoph Hellwig struct list_head blkg_list; 4541231039dSChristoph Hellwig struct mutex blkcg_mutex; 4551231039dSChristoph Hellwig #endif 456a612fddfSTejun Heo 4571946089aSChristoph Lameter int node; 4580c734c5eSJens Axboe 4590c734c5eSJens Axboe spinlock_t requeue_lock; 4600c734c5eSJens Axboe struct list_head requeue_list; 4610c734c5eSJens Axboe struct delayed_work requeue_work; 4620c734c5eSJens Axboe 4636c5c9341SAlexey Dobriyan #ifdef CONFIG_BLK_DEV_IO_TRACE 464c780e86dSJan Kara struct blk_trace __rcu *blk_trace; 4656c5c9341SAlexey Dobriyan #endif 4661da177e4SLinus Torvalds /* 4674913efe4STejun Heo * for flush operations 4681da177e4SLinus Torvalds */ 4697c94e1c1SMing Lei struct blk_flush_queue *fq; 4709a67aa52SChristoph Hellwig struct list_head flush_list; 471483f4afcSAl Viro 472483f4afcSAl Viro struct mutex sysfs_lock; 473cecf5d87SMing Lei struct mutex sysfs_dir_lock; 474d351af01SFUJITA Tomonori 4752f8f1336SMing Lei /* 4762f8f1336SMing Lei * for reusing dead hctx instance in case of updating 4772f8f1336SMing Lei * nr_hw_queues 4782f8f1336SMing Lei */ 4792f8f1336SMing Lei struct list_head unused_hctx_list; 4802f8f1336SMing Lei spinlock_t unused_hctx_lock; 4812f8f1336SMing Lei 4827996a8b5SBob Liu int mq_freeze_depth; 483d732580bSTejun Heo 484e43473b7SVivek Goyal #ifdef CONFIG_BLK_DEV_THROTTLING 485e43473b7SVivek Goyal /* Throttle data */ 486e43473b7SVivek Goyal struct throtl_data *td; 487e43473b7SVivek Goyal #endif 488548bc8e1STejun Heo struct rcu_head rcu_head; 489320ae51fSJens Axboe wait_queue_head_t mq_freeze_wq; 4907996a8b5SBob Liu /* 4917996a8b5SBob Liu * Protect concurrent access to q_usage_counter by 4927996a8b5SBob Liu * percpu_ref_kill() and percpu_ref_reinit(). 4937996a8b5SBob Liu */ 4947996a8b5SBob Liu struct mutex mq_freeze_lock; 4950d2602caSJens Axboe 4960d2602caSJens Axboe struct blk_mq_tag_set *tag_set; 4970d2602caSJens Axboe struct list_head tag_set_list; 4984593fdbeSAkinobu Mita 49907e4feadSOmar Sandoval struct dentry *debugfs_dir; 500d332ce09SOmar Sandoval struct dentry *sched_debugfs_dir; 501cc56694fSMing Lei struct dentry *rqos_debugfs_dir; 5025cf9c91bSChristoph Hellwig /* 5035cf9c91bSChristoph Hellwig * Serializes all debugfs metadata operations using the above dentries. 5045cf9c91bSChristoph Hellwig */ 5055cf9c91bSChristoph Hellwig struct mutex debugfs_mutex; 50607e4feadSOmar Sandoval 5074593fdbeSAkinobu Mita bool mq_sysfs_init_done; 5081da177e4SLinus Torvalds }; 5091da177e4SLinus Torvalds 510bfe373f6SHou Tao /* Keep blk_queue_flag_name[] in sync with the definitions below */ 511eca7abf3SJens Axboe #define QUEUE_FLAG_STOPPED 0 /* queue is stopped */ 512eca7abf3SJens Axboe #define QUEUE_FLAG_DYING 1 /* queue being torn down */ 513eca7abf3SJens Axboe #define QUEUE_FLAG_NOMERGES 3 /* disable merge attempts */ 514eca7abf3SJens Axboe #define QUEUE_FLAG_SAME_COMP 4 /* complete on same CPU-group */ 515eca7abf3SJens Axboe #define QUEUE_FLAG_FAIL_IO 5 /* fake timeout */ 516eca7abf3SJens Axboe #define QUEUE_FLAG_NONROT 6 /* non-rotational device (SSD) */ 51788e740f1SFernando Luis Vázquez Cao #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 518eca7abf3SJens Axboe #define QUEUE_FLAG_IO_STAT 7 /* do disk/partitions IO accounting */ 519eca7abf3SJens Axboe #define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */ 520eca7abf3SJens Axboe #define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */ 5213222d8c2SChristoph Hellwig #define QUEUE_FLAG_SYNCHRONOUS 11 /* always completes in submit context */ 522eca7abf3SJens Axboe #define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */ 52343c9835bSChristoph Hellwig #define QUEUE_FLAG_HW_WC 18 /* Write back caching supported */ 524eca7abf3SJens Axboe #define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */ 5251cb039f3SChristoph Hellwig #define QUEUE_FLAG_STABLE_WRITES 15 /* don't modify blks until WB is done */ 526eca7abf3SJens Axboe #define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */ 527eca7abf3SJens Axboe #define QUEUE_FLAG_WC 17 /* Write back caching */ 528eca7abf3SJens Axboe #define QUEUE_FLAG_FUA 18 /* device supports FUA writes */ 529eca7abf3SJens Axboe #define QUEUE_FLAG_DAX 19 /* device supports DAX */ 530eca7abf3SJens Axboe #define QUEUE_FLAG_STATS 20 /* track IO start and completion times */ 531eca7abf3SJens Axboe #define QUEUE_FLAG_REGISTERED 22 /* queue has been registered to a disk */ 532eca7abf3SJens Axboe #define QUEUE_FLAG_QUIESCED 24 /* queue has been quiesced */ 533eca7abf3SJens Axboe #define QUEUE_FLAG_PCI_P2PDMA 25 /* device supports PCI p2p requests */ 534e84e8f06SChaitanya Kulkarni #define QUEUE_FLAG_ZONE_RESETALL 26 /* supports Zone Reset All */ 5356f816b4bSTejun Heo #define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */ 536f1b49fdcSJohn Garry #define QUEUE_FLAG_HCTX_ACTIVE 28 /* at least one blk-mq hctx is active */ 537021a2446SMike Snitzer #define QUEUE_FLAG_NOWAIT 29 /* device supports NOWAIT */ 5384d337cebSMing Lei #define QUEUE_FLAG_SQ_SCHED 30 /* single queue style io dispatch */ 539414dd48eSChao Leng #define QUEUE_FLAG_SKIP_TAGSET_QUIESCE 31 /* quiesce_tagset skip the queue*/ 540797e7dbbSTejun Heo 541ca5eebdaSBrian Foster #define QUEUE_FLAG_MQ_DEFAULT ((1UL << QUEUE_FLAG_IO_STAT) | \ 542ca5eebdaSBrian Foster (1UL << QUEUE_FLAG_SAME_COMP) | \ 543ca5eebdaSBrian Foster (1UL << QUEUE_FLAG_NOWAIT)) 54494eddfbeSJens Axboe 5458814ce8aSBart Van Assche void blk_queue_flag_set(unsigned int flag, struct request_queue *q); 5468814ce8aSBart Van Assche void blk_queue_flag_clear(unsigned int flag, struct request_queue *q); 5478814ce8aSBart Van Assche bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); 5488814ce8aSBart Van Assche 5491da177e4SLinus Torvalds #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 5503f3299d5SBart Van Assche #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) 551320ae51fSJens Axboe #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) 552ac9fafa1SAlan D. Brunelle #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 553488991e2SAlan D. Brunelle #define blk_queue_noxmerges(q) \ 554488991e2SAlan D. Brunelle test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 555a68bbddbSJens Axboe #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 5561cb039f3SChristoph Hellwig #define blk_queue_stable_writes(q) \ 5571cb039f3SChristoph Hellwig test_bit(QUEUE_FLAG_STABLE_WRITES, &(q)->queue_flags) 558bc58ba94SJens Axboe #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) 559e2e1a148SJens Axboe #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) 560e84e8f06SChaitanya Kulkarni #define blk_queue_zone_resetall(q) \ 561e84e8f06SChaitanya Kulkarni test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags) 562163d4baaSToshi Kani #define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags) 56349d92c0dSLogan Gunthorpe #define blk_queue_pci_p2pdma(q) \ 56449d92c0dSLogan Gunthorpe test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags) 5656f816b4bSTejun Heo #ifdef CONFIG_BLK_RQ_ALLOC_TIME 5666f816b4bSTejun Heo #define blk_queue_rq_alloc_time(q) \ 5676f816b4bSTejun Heo test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags) 5686f816b4bSTejun Heo #else 5696f816b4bSTejun Heo #define blk_queue_rq_alloc_time(q) false 5706f816b4bSTejun Heo #endif 5711da177e4SLinus Torvalds 57233659ebbSChristoph Hellwig #define blk_noretry_request(rq) \ 57333659ebbSChristoph Hellwig ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ 57433659ebbSChristoph Hellwig REQ_FAILFAST_DRIVER)) 575f4560ffeSMing Lei #define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags) 576cd84a62eSBart Van Assche #define blk_queue_pm_only(q) atomic_read(&(q)->pm_only) 57758c898baSMing Lei #define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags) 5784d337cebSMing Lei #define blk_queue_sq_sched(q) test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags) 579414dd48eSChao Leng #define blk_queue_skip_tagset_quiesce(q) \ 580414dd48eSChao Leng test_bit(QUEUE_FLAG_SKIP_TAGSET_QUIESCE, &(q)->queue_flags) 581c9254f2dSBart Van Assche 582cd84a62eSBart Van Assche extern void blk_set_pm_only(struct request_queue *q); 583cd84a62eSBart Van Assche extern void blk_clear_pm_only(struct request_queue *q); 5844aff5e23SJens Axboe 5851da177e4SLinus Torvalds #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 5861da177e4SLinus Torvalds 5873ab3a031SChristoph Hellwig #define dma_map_bvec(dev, bv, dir, attrs) \ 5883ab3a031SChristoph Hellwig dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \ 5893ab3a031SChristoph Hellwig (dir), (attrs)) 5903ab3a031SChristoph Hellwig 591344e9ffcSJens Axboe static inline bool queue_is_mq(struct request_queue *q) 59249fd524fSJens Axboe { 593a1ce35faSJens Axboe return q->mq_ops; 59449fd524fSJens Axboe } 59549fd524fSJens Axboe 59652abca64SAlan Stern #ifdef CONFIG_PM 59752abca64SAlan Stern static inline enum rpm_status queue_rpm_status(struct request_queue *q) 59852abca64SAlan Stern { 59952abca64SAlan Stern return q->rpm_status; 60052abca64SAlan Stern } 60152abca64SAlan Stern #else 60252abca64SAlan Stern static inline enum rpm_status queue_rpm_status(struct request_queue *q) 60352abca64SAlan Stern { 60452abca64SAlan Stern return RPM_ACTIVE; 60552abca64SAlan Stern } 60652abca64SAlan Stern #endif 60752abca64SAlan Stern 608797476b8SDamien Le Moal static inline bool blk_queue_is_zoned(struct request_queue *q) 609797476b8SDamien Le Moal { 610*7437bb73SChristoph Hellwig return IS_ENABLED(CONFIG_BLK_DEV_ZONED) && q->limits.zoned; 611797476b8SDamien Le Moal } 612797476b8SDamien Le Moal 6136a5ac984SBart Van Assche #ifdef CONFIG_BLK_DEV_ZONED 614668bfeeaSChristoph Hellwig unsigned int bdev_nr_zones(struct block_device *bdev); 615668bfeeaSChristoph Hellwig 616d86e716aSChristoph Hellwig static inline unsigned int disk_nr_zones(struct gendisk *disk) 617965b652eSDamien Le Moal { 618d86e716aSChristoph Hellwig return blk_queue_is_zoned(disk->queue) ? disk->nr_zones : 0; 619965b652eSDamien Le Moal } 620965b652eSDamien Le Moal 621d86e716aSChristoph Hellwig static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector) 6226cc77e9cSChristoph Hellwig { 623d86e716aSChristoph Hellwig if (!blk_queue_is_zoned(disk->queue)) 6246cc77e9cSChristoph Hellwig return 0; 625d86e716aSChristoph Hellwig return sector >> ilog2(disk->queue->limits.chunk_sectors); 6266cc77e9cSChristoph Hellwig } 6276cc77e9cSChristoph Hellwig 628d86e716aSChristoph Hellwig static inline bool disk_zone_is_seq(struct gendisk *disk, sector_t sector) 6296cc77e9cSChristoph Hellwig { 630d86e716aSChristoph Hellwig if (!blk_queue_is_zoned(disk->queue)) 6316cc77e9cSChristoph Hellwig return false; 632d86e716aSChristoph Hellwig if (!disk->conv_zones_bitmap) 633f216fdd7SChristoph Hellwig return true; 634d86e716aSChristoph Hellwig return !test_bit(disk_zone_no(disk, sector), disk->conv_zones_bitmap); 6356cc77e9cSChristoph Hellwig } 636e15864f8SNiklas Cassel 637982977dfSChristoph Hellwig static inline void disk_set_max_open_zones(struct gendisk *disk, 638e15864f8SNiklas Cassel unsigned int max_open_zones) 639e15864f8SNiklas Cassel { 640d86e716aSChristoph Hellwig disk->max_open_zones = max_open_zones; 641e15864f8SNiklas Cassel } 642e15864f8SNiklas Cassel 643982977dfSChristoph Hellwig static inline void disk_set_max_active_zones(struct gendisk *disk, 644659bf827SNiklas Cassel unsigned int max_active_zones) 645659bf827SNiklas Cassel { 646d86e716aSChristoph Hellwig disk->max_active_zones = max_active_zones; 647659bf827SNiklas Cassel } 648659bf827SNiklas Cassel 6491dc01720SChristoph Hellwig static inline unsigned int bdev_max_open_zones(struct block_device *bdev) 650659bf827SNiklas Cassel { 651d86e716aSChristoph Hellwig return bdev->bd_disk->max_open_zones; 652659bf827SNiklas Cassel } 6531dc01720SChristoph Hellwig 6541dc01720SChristoph Hellwig static inline unsigned int bdev_max_active_zones(struct block_device *bdev) 6551dc01720SChristoph Hellwig { 656d86e716aSChristoph Hellwig return bdev->bd_disk->max_active_zones; 6571dc01720SChristoph Hellwig } 6581dc01720SChristoph Hellwig 659965b652eSDamien Le Moal #else /* CONFIG_BLK_DEV_ZONED */ 660668bfeeaSChristoph Hellwig static inline unsigned int bdev_nr_zones(struct block_device *bdev) 661668bfeeaSChristoph Hellwig { 662668bfeeaSChristoph Hellwig return 0; 663668bfeeaSChristoph Hellwig } 664668bfeeaSChristoph Hellwig 665d86e716aSChristoph Hellwig static inline unsigned int disk_nr_zones(struct gendisk *disk) 666965b652eSDamien Le Moal { 667965b652eSDamien Le Moal return 0; 668965b652eSDamien Le Moal } 669d86e716aSChristoph Hellwig static inline bool disk_zone_is_seq(struct gendisk *disk, sector_t sector) 67002992df8SJohannes Thumshirn { 67102992df8SJohannes Thumshirn return false; 67202992df8SJohannes Thumshirn } 673d86e716aSChristoph Hellwig static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector) 67402992df8SJohannes Thumshirn { 67502992df8SJohannes Thumshirn return 0; 67602992df8SJohannes Thumshirn } 6771dc01720SChristoph Hellwig static inline unsigned int bdev_max_open_zones(struct block_device *bdev) 678e15864f8SNiklas Cassel { 679e15864f8SNiklas Cassel return 0; 680e15864f8SNiklas Cassel } 681d86e716aSChristoph Hellwig 6821dc01720SChristoph Hellwig static inline unsigned int bdev_max_active_zones(struct block_device *bdev) 683659bf827SNiklas Cassel { 684659bf827SNiklas Cassel return 0; 685659bf827SNiklas Cassel } 6866a5ac984SBart Van Assche #endif /* CONFIG_BLK_DEV_ZONED */ 6876cc77e9cSChristoph Hellwig 688d278d4a8SJens Axboe static inline unsigned int blk_queue_depth(struct request_queue *q) 689d278d4a8SJens Axboe { 690d278d4a8SJens Axboe if (q->queue_depth) 691d278d4a8SJens Axboe return q->queue_depth; 692d278d4a8SJens Axboe 693d278d4a8SJens Axboe return q->nr_requests; 694d278d4a8SJens Axboe } 695d278d4a8SJens Axboe 6963d6392cfSJens Axboe /* 6973d6392cfSJens Axboe * default timeout for SG_IO if none specified 6983d6392cfSJens Axboe */ 6993d6392cfSJens Axboe #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) 700f2f1fa78SLinus Torvalds #define BLK_MIN_SG_TIMEOUT (7 * HZ) 7013d6392cfSJens Axboe 7025705f702SNeilBrown /* This should not be used directly - use rq_for_each_segment */ 7031e428079SJens Axboe #define for_each_bio(_bio) \ 7041e428079SJens Axboe for (; _bio; _bio = _bio->bi_next) 7051da177e4SLinus Torvalds 706322cbb50SChristoph Hellwig int __must_check device_add_disk(struct device *parent, struct gendisk *disk, 707322cbb50SChristoph Hellwig const struct attribute_group **groups); 708322cbb50SChristoph Hellwig static inline int __must_check add_disk(struct gendisk *disk) 709322cbb50SChristoph Hellwig { 710322cbb50SChristoph Hellwig return device_add_disk(NULL, disk, NULL); 711322cbb50SChristoph Hellwig } 712322cbb50SChristoph Hellwig void del_gendisk(struct gendisk *gp); 713322cbb50SChristoph Hellwig void invalidate_disk(struct gendisk *disk); 714322cbb50SChristoph Hellwig void set_disk_ro(struct gendisk *disk, bool read_only); 715322cbb50SChristoph Hellwig void disk_uevent(struct gendisk *disk, enum kobject_action action); 716322cbb50SChristoph Hellwig 717322cbb50SChristoph Hellwig static inline int get_disk_ro(struct gendisk *disk) 718322cbb50SChristoph Hellwig { 719322cbb50SChristoph Hellwig return disk->part0->bd_read_only || 720322cbb50SChristoph Hellwig test_bit(GD_READ_ONLY, &disk->state); 721322cbb50SChristoph Hellwig } 722322cbb50SChristoph Hellwig 723322cbb50SChristoph Hellwig static inline int bdev_read_only(struct block_device *bdev) 724322cbb50SChristoph Hellwig { 725322cbb50SChristoph Hellwig return bdev->bd_read_only || get_disk_ro(bdev->bd_disk); 726322cbb50SChristoph Hellwig } 727322cbb50SChristoph Hellwig 728322cbb50SChristoph Hellwig bool set_capacity_and_notify(struct gendisk *disk, sector_t size); 729ab6860f6SChristoph Hellwig void disk_force_media_change(struct gendisk *disk); 730560e20e4SChristoph Hellwig void bdev_mark_dead(struct block_device *bdev, bool surprise); 731322cbb50SChristoph Hellwig 732322cbb50SChristoph Hellwig void add_disk_randomness(struct gendisk *disk) __latent_entropy; 733322cbb50SChristoph Hellwig void rand_initialize_disk(struct gendisk *disk); 734322cbb50SChristoph Hellwig 735322cbb50SChristoph Hellwig static inline sector_t get_start_sect(struct block_device *bdev) 736322cbb50SChristoph Hellwig { 737322cbb50SChristoph Hellwig return bdev->bd_start_sect; 738322cbb50SChristoph Hellwig } 739322cbb50SChristoph Hellwig 740322cbb50SChristoph Hellwig static inline sector_t bdev_nr_sectors(struct block_device *bdev) 741322cbb50SChristoph Hellwig { 742322cbb50SChristoph Hellwig return bdev->bd_nr_sectors; 743322cbb50SChristoph Hellwig } 744322cbb50SChristoph Hellwig 745322cbb50SChristoph Hellwig static inline loff_t bdev_nr_bytes(struct block_device *bdev) 746322cbb50SChristoph Hellwig { 747322cbb50SChristoph Hellwig return (loff_t)bdev_nr_sectors(bdev) << SECTOR_SHIFT; 748322cbb50SChristoph Hellwig } 749322cbb50SChristoph Hellwig 750322cbb50SChristoph Hellwig static inline sector_t get_capacity(struct gendisk *disk) 751322cbb50SChristoph Hellwig { 752322cbb50SChristoph Hellwig return bdev_nr_sectors(disk->part0); 753322cbb50SChristoph Hellwig } 754322cbb50SChristoph Hellwig 755322cbb50SChristoph Hellwig static inline u64 sb_bdev_nr_blocks(struct super_block *sb) 756322cbb50SChristoph Hellwig { 757322cbb50SChristoph Hellwig return bdev_nr_sectors(sb->s_bdev) >> 758322cbb50SChristoph Hellwig (sb->s_blocksize_bits - SECTOR_SHIFT); 759322cbb50SChristoph Hellwig } 760322cbb50SChristoph Hellwig 761322cbb50SChristoph Hellwig int bdev_disk_changed(struct gendisk *disk, bool invalidate); 762322cbb50SChristoph Hellwig 763322cbb50SChristoph Hellwig void put_disk(struct gendisk *disk); 764322cbb50SChristoph Hellwig struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass); 765322cbb50SChristoph Hellwig 766322cbb50SChristoph Hellwig /** 767322cbb50SChristoph Hellwig * blk_alloc_disk - allocate a gendisk structure 768322cbb50SChristoph Hellwig * @node_id: numa node to allocate on 769322cbb50SChristoph Hellwig * 770322cbb50SChristoph Hellwig * Allocate and pre-initialize a gendisk structure for use with BIO based 771322cbb50SChristoph Hellwig * drivers. 772322cbb50SChristoph Hellwig * 773322cbb50SChristoph Hellwig * Context: can sleep 774322cbb50SChristoph Hellwig */ 775322cbb50SChristoph Hellwig #define blk_alloc_disk(node_id) \ 776322cbb50SChristoph Hellwig ({ \ 777322cbb50SChristoph Hellwig static struct lock_class_key __key; \ 778322cbb50SChristoph Hellwig \ 779322cbb50SChristoph Hellwig __blk_alloc_disk(node_id, &__key); \ 780322cbb50SChristoph Hellwig }) 781322cbb50SChristoph Hellwig 782322cbb50SChristoph Hellwig int __register_blkdev(unsigned int major, const char *name, 783322cbb50SChristoph Hellwig void (*probe)(dev_t devt)); 784322cbb50SChristoph Hellwig #define register_blkdev(major, name) \ 785322cbb50SChristoph Hellwig __register_blkdev(major, name, NULL) 786322cbb50SChristoph Hellwig void unregister_blkdev(unsigned int major, const char *name); 787322cbb50SChristoph Hellwig 788444aa2c5SChristoph Hellwig bool disk_check_media_change(struct gendisk *disk); 789322cbb50SChristoph Hellwig void set_capacity(struct gendisk *disk, sector_t size); 790322cbb50SChristoph Hellwig 791322cbb50SChristoph Hellwig #ifdef CONFIG_BLOCK_HOLDER_DEPRECATED 792322cbb50SChristoph Hellwig int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk); 793322cbb50SChristoph Hellwig void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk); 794322cbb50SChristoph Hellwig #else 795322cbb50SChristoph Hellwig static inline int bd_link_disk_holder(struct block_device *bdev, 796322cbb50SChristoph Hellwig struct gendisk *disk) 797322cbb50SChristoph Hellwig { 798322cbb50SChristoph Hellwig return 0; 799322cbb50SChristoph Hellwig } 800322cbb50SChristoph Hellwig static inline void bd_unlink_disk_holder(struct block_device *bdev, 801322cbb50SChristoph Hellwig struct gendisk *disk) 802322cbb50SChristoph Hellwig { 803322cbb50SChristoph Hellwig } 804322cbb50SChristoph Hellwig #endif /* CONFIG_BLOCK_HOLDER_DEPRECATED */ 805322cbb50SChristoph Hellwig 806322cbb50SChristoph Hellwig dev_t part_devt(struct gendisk *disk, u8 partno); 807322cbb50SChristoph Hellwig void inc_diskseq(struct gendisk *disk); 808322cbb50SChristoph Hellwig void blk_request_module(dev_t devt); 8092d4dc890SIlya Loginov 8101da177e4SLinus Torvalds extern int blk_register_queue(struct gendisk *disk); 8111da177e4SLinus Torvalds extern void blk_unregister_queue(struct gendisk *disk); 8123e08773cSChristoph Hellwig void submit_bio_noacct(struct bio *bio); 8135a97806fSChristoph Hellwig struct bio *bio_split_to_limits(struct bio *bio); 81424b83debSChristoph Hellwig 815ef9e3facSKiyoshi Ueda extern int blk_lld_busy(struct request_queue *q); 8169a95e4efSBart Van Assche extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags); 8172e6edc95SDan Williams extern void blk_queue_exit(struct request_queue *q); 8181da177e4SLinus Torvalds extern void blk_sync_queue(struct request_queue *q); 819fb9b16e1SKeith Busch 820e47bc4edSChaitanya Kulkarni /* Helper to convert REQ_OP_XXX to its string format XXX */ 82177e7ffd7SBart Van Assche extern const char *blk_op_str(enum req_op op); 822e47bc4edSChaitanya Kulkarni 8232a842acaSChristoph Hellwig int blk_status_to_errno(blk_status_t status); 8242a842acaSChristoph Hellwig blk_status_t errno_to_blk_status(int errno); 8257ba37927SKent Overstreet const char *blk_status_to_str(blk_status_t status); 8262a842acaSChristoph Hellwig 827ef99b2d3SChristoph Hellwig /* only poll the hardware once, don't continue until a completion was found */ 828ef99b2d3SChristoph Hellwig #define BLK_POLL_ONESHOT (1 << 0) 8295a72e899SJens Axboe int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags); 8305a72e899SJens Axboe int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob, 8315a72e899SJens Axboe unsigned int flags); 83205229beeSJens Axboe 833165125e1SJens Axboe static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 8341da177e4SLinus Torvalds { 83517220ca5SPavel Begunkov return bdev->bd_queue; /* this is never NULL */ 8361da177e4SLinus Torvalds } 8371da177e4SLinus Torvalds 83802694e86SChaitanya Kulkarni /* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */ 83902694e86SChaitanya Kulkarni const char *blk_zone_cond_str(enum blk_zone_cond zone_cond); 84002694e86SChaitanya Kulkarni 841d0ea6bdeSDamien Le Moal static inline unsigned int bio_zone_no(struct bio *bio) 842d0ea6bdeSDamien Le Moal { 843d86e716aSChristoph Hellwig return disk_zone_no(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector); 844d0ea6bdeSDamien Le Moal } 845d0ea6bdeSDamien Le Moal 846d0ea6bdeSDamien Le Moal static inline unsigned int bio_zone_is_seq(struct bio *bio) 847d0ea6bdeSDamien Le Moal { 848d86e716aSChristoph Hellwig return disk_zone_is_seq(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector); 849d0ea6bdeSDamien Le Moal } 8506cc77e9cSChristoph Hellwig 851762380adSJens Axboe /* 8528689461bSChristoph Hellwig * Return how much of the chunk is left to be used for I/O at a given offset. 8538689461bSChristoph Hellwig */ 8548689461bSChristoph Hellwig static inline unsigned int blk_chunk_sectors_left(sector_t offset, 8558689461bSChristoph Hellwig unsigned int chunk_sectors) 8568689461bSChristoph Hellwig { 8578689461bSChristoph Hellwig if (unlikely(!is_power_of_2(chunk_sectors))) 8588689461bSChristoph Hellwig return chunk_sectors - sector_div(offset, chunk_sectors); 8598689461bSChristoph Hellwig return chunk_sectors - (offset & (chunk_sectors - 1)); 8608689461bSChristoph Hellwig } 8618689461bSChristoph Hellwig 8628689461bSChristoph Hellwig /* 8631da177e4SLinus Torvalds * Access functions for manipulating queue properties 8641da177e4SLinus Torvalds */ 8659bb33f24SChristoph Hellwig void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce limit); 866086fa5ffSMartin K. Petersen extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 867762380adSJens Axboe extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); 8688a78362cSMartin K. Petersen extern void blk_queue_max_segments(struct request_queue *, unsigned short); 8691e739730SChristoph Hellwig extern void blk_queue_max_discard_segments(struct request_queue *, 8701e739730SChristoph Hellwig unsigned short); 87144abff2cSChristoph Hellwig void blk_queue_max_secure_erase_sectors(struct request_queue *q, 87244abff2cSChristoph Hellwig unsigned int max_sectors); 873165125e1SJens Axboe extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 87467efc925SChristoph Hellwig extern void blk_queue_max_discard_sectors(struct request_queue *q, 87567efc925SChristoph Hellwig unsigned int max_discard_sectors); 876a6f0788eSChaitanya Kulkarni extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q, 877a6f0788eSChaitanya Kulkarni unsigned int max_write_same_sectors); 878ad6bf88aSMikulas Patocka extern void blk_queue_logical_block_size(struct request_queue *, unsigned int); 8790512a75bSKeith Busch extern void blk_queue_max_zone_append_sectors(struct request_queue *q, 8800512a75bSKeith Busch unsigned int max_zone_append_sectors); 881892b6f90SMartin K. Petersen extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); 882a805a4faSDamien Le Moal void blk_queue_zone_write_granularity(struct request_queue *q, 883a805a4faSDamien Le Moal unsigned int size); 884c72758f3SMartin K. Petersen extern void blk_queue_alignment_offset(struct request_queue *q, 885c72758f3SMartin K. Petersen unsigned int alignment); 886471aa704SChristoph Hellwig void disk_update_readahead(struct gendisk *disk); 8877c958e32SMartin K. Petersen extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); 888c72758f3SMartin K. Petersen extern void blk_queue_io_min(struct request_queue *q, unsigned int min); 8893c5820c7SMartin K. Petersen extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); 890c72758f3SMartin K. Petersen extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); 891d278d4a8SJens Axboe extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth); 892b1bd055dSMartin K. Petersen extern void blk_set_stacking_limits(struct queue_limits *lim); 893c72758f3SMartin K. Petersen extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 894c72758f3SMartin K. Petersen sector_t offset); 895c72758f3SMartin K. Petersen extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, 896c72758f3SMartin K. Petersen sector_t offset); 89727f8221aSFUJITA Tomonori extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); 898165125e1SJens Axboe extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 89903100aadSKeith Busch extern void blk_queue_virt_boundary(struct request_queue *, unsigned long); 900165125e1SJens Axboe extern void blk_queue_dma_alignment(struct request_queue *, int); 90111c3e689SJames Bottomley extern void blk_queue_update_dma_alignment(struct request_queue *, int); 902242f9dcbSJens Axboe extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 90393e9d8e8SJens Axboe extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); 9042e9bc346SChristoph Hellwig 905a2247f19SDamien Le Moal struct blk_independent_access_ranges * 906a2247f19SDamien Le Moal disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges); 907a2247f19SDamien Le Moal void disk_set_independent_access_ranges(struct gendisk *disk, 908a2247f19SDamien Le Moal struct blk_independent_access_ranges *iars); 909a2247f19SDamien Le Moal 9102e9bc346SChristoph Hellwig /* 9112e9bc346SChristoph Hellwig * Elevator features for blk_queue_required_elevator_features: 9122e9bc346SChristoph Hellwig */ 9132e9bc346SChristoph Hellwig /* Supports zoned block devices sequential write constraint */ 9142e9bc346SChristoph Hellwig #define ELEVATOR_F_ZBD_SEQ_WRITE (1U << 0) 9152e9bc346SChristoph Hellwig 91668c43f13SDamien Le Moal extern void blk_queue_required_elevator_features(struct request_queue *q, 91768c43f13SDamien Le Moal unsigned int features); 91845147fb5SYoshihiro Shimoda extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q, 91945147fb5SYoshihiro Shimoda struct device *dev); 9201da177e4SLinus Torvalds 92109ac46c4STejun Heo bool __must_check blk_get_queue(struct request_queue *); 922165125e1SJens Axboe extern void blk_put_queue(struct request_queue *); 9237a5428dcSChristoph Hellwig 9247a5428dcSChristoph Hellwig void blk_mark_disk_dead(struct gendisk *disk); 9251da177e4SLinus Torvalds 9261a4dcfa8SChristoph Hellwig #ifdef CONFIG_BLOCK 927316cc67dSShaohua Li /* 92875df7136SSuresh Jayaraman * blk_plug permits building a queue of related requests by holding the I/O 92975df7136SSuresh Jayaraman * fragments for a short period. This allows merging of sequential requests 93075df7136SSuresh Jayaraman * into single larger request. As the requests are moved from a per-task list to 93175df7136SSuresh Jayaraman * the device's request_queue in a batch, this results in improved scalability 93275df7136SSuresh Jayaraman * as the lock contention for request_queue lock is reduced. 93375df7136SSuresh Jayaraman * 93475df7136SSuresh Jayaraman * It is ok not to disable preemption when adding the request to the plug list 935008f75a2SChristoph Hellwig * or when attempting a merge. For details, please see schedule() where 936008f75a2SChristoph Hellwig * blk_flush_plug() is called. 937316cc67dSShaohua Li */ 93873c10101SJens Axboe struct blk_plug { 939bc490f81SJens Axboe struct request *mq_list; /* blk-mq requests */ 94047c122e3SJens Axboe 94147c122e3SJens Axboe /* if ios_left is > 1, we can batch tag/rq allocations */ 94247c122e3SJens Axboe struct request *cached_rq; 94347c122e3SJens Axboe unsigned short nr_ios; 94447c122e3SJens Axboe 9455f0ed774SJens Axboe unsigned short rq_count; 94647c122e3SJens Axboe 947ce5b009cSJens Axboe bool multiple_queues; 948dc5fc361SJens Axboe bool has_elevator; 94947c122e3SJens Axboe 95047c122e3SJens Axboe struct list_head cb_list; /* md requires an unplug callback */ 95173c10101SJens Axboe }; 95255c022bbSShaohua Li 9539cbb1750SNeilBrown struct blk_plug_cb; 95474018dc3SNeilBrown typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); 955048c9374SNeilBrown struct blk_plug_cb { 956048c9374SNeilBrown struct list_head list; 9579cbb1750SNeilBrown blk_plug_cb_fn callback; 9589cbb1750SNeilBrown void *data; 959048c9374SNeilBrown }; 9609cbb1750SNeilBrown extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, 9619cbb1750SNeilBrown void *data, int size); 96273c10101SJens Axboe extern void blk_start_plug(struct blk_plug *); 96347c122e3SJens Axboe extern void blk_start_plug_nr_ios(struct blk_plug *, unsigned short); 96473c10101SJens Axboe extern void blk_finish_plug(struct blk_plug *); 96573c10101SJens Axboe 966aa8dcccaSChristoph Hellwig void __blk_flush_plug(struct blk_plug *plug, bool from_schedule); 967aa8dcccaSChristoph Hellwig static inline void blk_flush_plug(struct blk_plug *plug, bool async) 96873c10101SJens Axboe { 969aa8dcccaSChristoph Hellwig if (plug) 970aa8dcccaSChristoph Hellwig __blk_flush_plug(plug, async); 97173c10101SJens Axboe } 97273c10101SJens Axboe 973c6bf3f0eSChristoph Hellwig int blkdev_issue_flush(struct block_device *bdev); 9741a4dcfa8SChristoph Hellwig long nr_blockdev_pages(void); 9751a4dcfa8SChristoph Hellwig #else /* CONFIG_BLOCK */ 9761a4dcfa8SChristoph Hellwig struct blk_plug { 9771a4dcfa8SChristoph Hellwig }; 9781a4dcfa8SChristoph Hellwig 97947c122e3SJens Axboe static inline void blk_start_plug_nr_ios(struct blk_plug *plug, 98047c122e3SJens Axboe unsigned short nr_ios) 98147c122e3SJens Axboe { 98247c122e3SJens Axboe } 98347c122e3SJens Axboe 9841a4dcfa8SChristoph Hellwig static inline void blk_start_plug(struct blk_plug *plug) 9851a4dcfa8SChristoph Hellwig { 9861a4dcfa8SChristoph Hellwig } 9871a4dcfa8SChristoph Hellwig 9881a4dcfa8SChristoph Hellwig static inline void blk_finish_plug(struct blk_plug *plug) 9891a4dcfa8SChristoph Hellwig { 9901a4dcfa8SChristoph Hellwig } 9911a4dcfa8SChristoph Hellwig 992008f75a2SChristoph Hellwig static inline void blk_flush_plug(struct blk_plug *plug, bool async) 9931a4dcfa8SChristoph Hellwig { 9941a4dcfa8SChristoph Hellwig } 9951a4dcfa8SChristoph Hellwig 996c6bf3f0eSChristoph Hellwig static inline int blkdev_issue_flush(struct block_device *bdev) 9971a4dcfa8SChristoph Hellwig { 9981a4dcfa8SChristoph Hellwig return 0; 9991a4dcfa8SChristoph Hellwig } 10001a4dcfa8SChristoph Hellwig 10011a4dcfa8SChristoph Hellwig static inline long nr_blockdev_pages(void) 10021a4dcfa8SChristoph Hellwig { 10031a4dcfa8SChristoph Hellwig return 0; 10041a4dcfa8SChristoph Hellwig } 10051a4dcfa8SChristoph Hellwig #endif /* CONFIG_BLOCK */ 10061a4dcfa8SChristoph Hellwig 100771ac860aSMing Lei extern void blk_io_schedule(void); 100871ac860aSMing Lei 100944abff2cSChristoph Hellwig int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 101044abff2cSChristoph Hellwig sector_t nr_sects, gfp_t gfp_mask); 101144abff2cSChristoph Hellwig int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, 101244abff2cSChristoph Hellwig sector_t nr_sects, gfp_t gfp_mask, struct bio **biop); 101344abff2cSChristoph Hellwig int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector, 101444abff2cSChristoph Hellwig sector_t nr_sects, gfp_t gfp); 1015ee472d83SChristoph Hellwig 1016ee472d83SChristoph Hellwig #define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */ 1017cb365b96SChristoph Hellwig #define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */ 1018ee472d83SChristoph Hellwig 1019e73c23ffSChaitanya Kulkarni extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1020e73c23ffSChaitanya Kulkarni sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, 1021ee472d83SChristoph Hellwig unsigned flags); 10223f14d792SDmitry Monakhov extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1023ee472d83SChristoph Hellwig sector_t nr_sects, gfp_t gfp_mask, unsigned flags); 1024ee472d83SChristoph Hellwig 10252cf6d26aSChristoph Hellwig static inline int sb_issue_discard(struct super_block *sb, sector_t block, 10262cf6d26aSChristoph Hellwig sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) 1027fb2dce86SDavid Woodhouse { 1028233bde21SBart Van Assche return blkdev_issue_discard(sb->s_bdev, 1029233bde21SBart Van Assche block << (sb->s_blocksize_bits - 1030233bde21SBart Van Assche SECTOR_SHIFT), 1031233bde21SBart Van Assche nr_blocks << (sb->s_blocksize_bits - 1032233bde21SBart Van Assche SECTOR_SHIFT), 103344abff2cSChristoph Hellwig gfp_mask); 1034fb2dce86SDavid Woodhouse } 1035e6fa0be6SLukas Czerner static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, 1036a107e5a3STheodore Ts'o sector_t nr_blocks, gfp_t gfp_mask) 1037e6fa0be6SLukas Czerner { 1038e6fa0be6SLukas Czerner return blkdev_issue_zeroout(sb->s_bdev, 1039233bde21SBart Van Assche block << (sb->s_blocksize_bits - 1040233bde21SBart Van Assche SECTOR_SHIFT), 1041233bde21SBart Van Assche nr_blocks << (sb->s_blocksize_bits - 1042233bde21SBart Van Assche SECTOR_SHIFT), 1043ee472d83SChristoph Hellwig gfp_mask, 0); 1044e6fa0be6SLukas Czerner } 10451da177e4SLinus Torvalds 1046fa01b1e9SChristoph Hellwig static inline bool bdev_is_partition(struct block_device *bdev) 1047fa01b1e9SChristoph Hellwig { 1048fa01b1e9SChristoph Hellwig return bdev->bd_partno; 1049fa01b1e9SChristoph Hellwig } 1050fa01b1e9SChristoph Hellwig 1051eb28d31bSMartin K. Petersen enum blk_default_limits { 1052eb28d31bSMartin K. Petersen BLK_MAX_SEGMENTS = 128, 1053eb28d31bSMartin K. Petersen BLK_SAFE_MAX_SECTORS = 255, 1054eb28d31bSMartin K. Petersen BLK_MAX_SEGMENT_SIZE = 65536, 1055eb28d31bSMartin K. Petersen BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, 1056eb28d31bSMartin K. Petersen }; 10570e435ac2SMilan Broz 10580a26f327SKeith Busch #define BLK_DEF_MAX_SECTORS 2560u 10590a26f327SKeith Busch 1060af2c68feSBart Van Assche static inline unsigned long queue_segment_boundary(const struct request_queue *q) 1061ae03bf63SMartin K. Petersen { 1062025146e1SMartin K. Petersen return q->limits.seg_boundary_mask; 1063ae03bf63SMartin K. Petersen } 1064ae03bf63SMartin K. Petersen 1065af2c68feSBart Van Assche static inline unsigned long queue_virt_boundary(const struct request_queue *q) 106603100aadSKeith Busch { 106703100aadSKeith Busch return q->limits.virt_boundary_mask; 106803100aadSKeith Busch } 106903100aadSKeith Busch 1070af2c68feSBart Van Assche static inline unsigned int queue_max_sectors(const struct request_queue *q) 1071ae03bf63SMartin K. Petersen { 1072025146e1SMartin K. Petersen return q->limits.max_sectors; 1073ae03bf63SMartin K. Petersen } 1074ae03bf63SMartin K. Petersen 1075547e2f70SChristoph Hellwig static inline unsigned int queue_max_bytes(struct request_queue *q) 1076547e2f70SChristoph Hellwig { 1077547e2f70SChristoph Hellwig return min_t(unsigned int, queue_max_sectors(q), INT_MAX >> 9) << 9; 1078547e2f70SChristoph Hellwig } 1079547e2f70SChristoph Hellwig 1080af2c68feSBart Van Assche static inline unsigned int queue_max_hw_sectors(const struct request_queue *q) 1081ae03bf63SMartin K. Petersen { 1082025146e1SMartin K. Petersen return q->limits.max_hw_sectors; 1083ae03bf63SMartin K. Petersen } 1084ae03bf63SMartin K. Petersen 1085af2c68feSBart Van Assche static inline unsigned short queue_max_segments(const struct request_queue *q) 1086ae03bf63SMartin K. Petersen { 10878a78362cSMartin K. Petersen return q->limits.max_segments; 1088ae03bf63SMartin K. Petersen } 1089ae03bf63SMartin K. Petersen 1090af2c68feSBart Van Assche static inline unsigned short queue_max_discard_segments(const struct request_queue *q) 10911e739730SChristoph Hellwig { 10921e739730SChristoph Hellwig return q->limits.max_discard_segments; 10931e739730SChristoph Hellwig } 10941e739730SChristoph Hellwig 1095af2c68feSBart Van Assche static inline unsigned int queue_max_segment_size(const struct request_queue *q) 1096ae03bf63SMartin K. Petersen { 1097025146e1SMartin K. Petersen return q->limits.max_segment_size; 1098ae03bf63SMartin K. Petersen } 1099ae03bf63SMartin K. Petersen 11000512a75bSKeith Busch static inline unsigned int queue_max_zone_append_sectors(const struct request_queue *q) 11010512a75bSKeith Busch { 1102fe6f0cdcSJohannes Thumshirn 1103fe6f0cdcSJohannes Thumshirn const struct queue_limits *l = &q->limits; 1104fe6f0cdcSJohannes Thumshirn 1105fe6f0cdcSJohannes Thumshirn return min(l->max_zone_append_sectors, l->max_sectors); 11060512a75bSKeith Busch } 11070512a75bSKeith Busch 11082aba0d19SChristoph Hellwig static inline unsigned int 11092aba0d19SChristoph Hellwig bdev_max_zone_append_sectors(struct block_device *bdev) 11102aba0d19SChristoph Hellwig { 11112aba0d19SChristoph Hellwig return queue_max_zone_append_sectors(bdev_get_queue(bdev)); 11122aba0d19SChristoph Hellwig } 11132aba0d19SChristoph Hellwig 111465ea1b66SNaohiro Aota static inline unsigned int bdev_max_segments(struct block_device *bdev) 111565ea1b66SNaohiro Aota { 111665ea1b66SNaohiro Aota return queue_max_segments(bdev_get_queue(bdev)); 111765ea1b66SNaohiro Aota } 111865ea1b66SNaohiro Aota 1119ad6bf88aSMikulas Patocka static inline unsigned queue_logical_block_size(const struct request_queue *q) 11201da177e4SLinus Torvalds { 11211da177e4SLinus Torvalds int retval = 512; 11221da177e4SLinus Torvalds 1123025146e1SMartin K. Petersen if (q && q->limits.logical_block_size) 1124025146e1SMartin K. Petersen retval = q->limits.logical_block_size; 11251da177e4SLinus Torvalds 11261da177e4SLinus Torvalds return retval; 11271da177e4SLinus Torvalds } 11281da177e4SLinus Torvalds 1129ad6bf88aSMikulas Patocka static inline unsigned int bdev_logical_block_size(struct block_device *bdev) 11301da177e4SLinus Torvalds { 1131e1defc4fSMartin K. Petersen return queue_logical_block_size(bdev_get_queue(bdev)); 11321da177e4SLinus Torvalds } 11331da177e4SLinus Torvalds 1134af2c68feSBart Van Assche static inline unsigned int queue_physical_block_size(const struct request_queue *q) 1135c72758f3SMartin K. Petersen { 1136c72758f3SMartin K. Petersen return q->limits.physical_block_size; 1137c72758f3SMartin K. Petersen } 1138c72758f3SMartin K. Petersen 1139892b6f90SMartin K. Petersen static inline unsigned int bdev_physical_block_size(struct block_device *bdev) 1140ac481c20SMartin K. Petersen { 1141ac481c20SMartin K. Petersen return queue_physical_block_size(bdev_get_queue(bdev)); 1142ac481c20SMartin K. Petersen } 1143ac481c20SMartin K. Petersen 1144af2c68feSBart Van Assche static inline unsigned int queue_io_min(const struct request_queue *q) 1145c72758f3SMartin K. Petersen { 1146c72758f3SMartin K. Petersen return q->limits.io_min; 1147c72758f3SMartin K. Petersen } 1148c72758f3SMartin K. Petersen 1149ac481c20SMartin K. Petersen static inline int bdev_io_min(struct block_device *bdev) 1150ac481c20SMartin K. Petersen { 1151ac481c20SMartin K. Petersen return queue_io_min(bdev_get_queue(bdev)); 1152ac481c20SMartin K. Petersen } 1153ac481c20SMartin K. Petersen 1154af2c68feSBart Van Assche static inline unsigned int queue_io_opt(const struct request_queue *q) 1155c72758f3SMartin K. Petersen { 1156c72758f3SMartin K. Petersen return q->limits.io_opt; 1157c72758f3SMartin K. Petersen } 1158c72758f3SMartin K. Petersen 1159ac481c20SMartin K. Petersen static inline int bdev_io_opt(struct block_device *bdev) 1160ac481c20SMartin K. Petersen { 1161ac481c20SMartin K. Petersen return queue_io_opt(bdev_get_queue(bdev)); 1162ac481c20SMartin K. Petersen } 1163ac481c20SMartin K. Petersen 1164a805a4faSDamien Le Moal static inline unsigned int 1165a805a4faSDamien Le Moal queue_zone_write_granularity(const struct request_queue *q) 1166a805a4faSDamien Le Moal { 1167a805a4faSDamien Le Moal return q->limits.zone_write_granularity; 1168a805a4faSDamien Le Moal } 1169a805a4faSDamien Le Moal 1170a805a4faSDamien Le Moal static inline unsigned int 1171a805a4faSDamien Le Moal bdev_zone_write_granularity(struct block_device *bdev) 1172a805a4faSDamien Le Moal { 1173a805a4faSDamien Le Moal return queue_zone_write_granularity(bdev_get_queue(bdev)); 1174a805a4faSDamien Le Moal } 1175a805a4faSDamien Le Moal 117689098b07SChristoph Hellwig int bdev_alignment_offset(struct block_device *bdev); 11775c4b4a5cSChristoph Hellwig unsigned int bdev_discard_alignment(struct block_device *bdev); 1178c6e66634SPaolo Bonzini 1179cf0fbf89SChristoph Hellwig static inline unsigned int bdev_max_discard_sectors(struct block_device *bdev) 1180cf0fbf89SChristoph Hellwig { 1181cf0fbf89SChristoph Hellwig return bdev_get_queue(bdev)->limits.max_discard_sectors; 1182cf0fbf89SChristoph Hellwig } 1183cf0fbf89SChristoph Hellwig 11847b47ef52SChristoph Hellwig static inline unsigned int bdev_discard_granularity(struct block_device *bdev) 11857b47ef52SChristoph Hellwig { 11867b47ef52SChristoph Hellwig return bdev_get_queue(bdev)->limits.discard_granularity; 11877b47ef52SChristoph Hellwig } 11887b47ef52SChristoph Hellwig 118944abff2cSChristoph Hellwig static inline unsigned int 119044abff2cSChristoph Hellwig bdev_max_secure_erase_sectors(struct block_device *bdev) 119144abff2cSChristoph Hellwig { 119244abff2cSChristoph Hellwig return bdev_get_queue(bdev)->limits.max_secure_erase_sectors; 119344abff2cSChristoph Hellwig } 119444abff2cSChristoph Hellwig 1195a6f0788eSChaitanya Kulkarni static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev) 1196a6f0788eSChaitanya Kulkarni { 1197a6f0788eSChaitanya Kulkarni struct request_queue *q = bdev_get_queue(bdev); 1198a6f0788eSChaitanya Kulkarni 1199a6f0788eSChaitanya Kulkarni if (q) 1200a6f0788eSChaitanya Kulkarni return q->limits.max_write_zeroes_sectors; 1201a6f0788eSChaitanya Kulkarni 1202a6f0788eSChaitanya Kulkarni return 0; 1203a6f0788eSChaitanya Kulkarni } 1204a6f0788eSChaitanya Kulkarni 120510f0d2a5SChristoph Hellwig static inline bool bdev_nonrot(struct block_device *bdev) 120610f0d2a5SChristoph Hellwig { 120710f0d2a5SChristoph Hellwig return blk_queue_nonrot(bdev_get_queue(bdev)); 120810f0d2a5SChristoph Hellwig } 120910f0d2a5SChristoph Hellwig 12103222d8c2SChristoph Hellwig static inline bool bdev_synchronous(struct block_device *bdev) 12113222d8c2SChristoph Hellwig { 12123222d8c2SChristoph Hellwig return test_bit(QUEUE_FLAG_SYNCHRONOUS, 12133222d8c2SChristoph Hellwig &bdev_get_queue(bdev)->queue_flags); 12143222d8c2SChristoph Hellwig } 12153222d8c2SChristoph Hellwig 121636d25489SChristoph Hellwig static inline bool bdev_stable_writes(struct block_device *bdev) 121736d25489SChristoph Hellwig { 121836d25489SChristoph Hellwig return test_bit(QUEUE_FLAG_STABLE_WRITES, 121936d25489SChristoph Hellwig &bdev_get_queue(bdev)->queue_flags); 122036d25489SChristoph Hellwig } 122136d25489SChristoph Hellwig 122208e688fdSChristoph Hellwig static inline bool bdev_write_cache(struct block_device *bdev) 122308e688fdSChristoph Hellwig { 122408e688fdSChristoph Hellwig return test_bit(QUEUE_FLAG_WC, &bdev_get_queue(bdev)->queue_flags); 122508e688fdSChristoph Hellwig } 122608e688fdSChristoph Hellwig 1227a557e82eSChristoph Hellwig static inline bool bdev_fua(struct block_device *bdev) 1228a557e82eSChristoph Hellwig { 1229a557e82eSChristoph Hellwig return test_bit(QUEUE_FLAG_FUA, &bdev_get_queue(bdev)->queue_flags); 1230a557e82eSChristoph Hellwig } 1231a557e82eSChristoph Hellwig 1232568ec936SChristoph Hellwig static inline bool bdev_nowait(struct block_device *bdev) 1233568ec936SChristoph Hellwig { 1234568ec936SChristoph Hellwig return test_bit(QUEUE_FLAG_NOWAIT, &bdev_get_queue(bdev)->queue_flags); 1235568ec936SChristoph Hellwig } 1236568ec936SChristoph Hellwig 1237797476b8SDamien Le Moal static inline bool bdev_is_zoned(struct block_device *bdev) 1238797476b8SDamien Le Moal { 1239fea127b3SPankaj Raghav return blk_queue_is_zoned(bdev_get_queue(bdev)); 1240797476b8SDamien Le Moal } 1241797476b8SDamien Le Moal 1242d67ea690SPankaj Raghav static inline unsigned int bdev_zone_no(struct block_device *bdev, sector_t sec) 1243d67ea690SPankaj Raghav { 1244d67ea690SPankaj Raghav return disk_zone_no(bdev->bd_disk, sec); 1245d67ea690SPankaj Raghav } 1246d67ea690SPankaj Raghav 1247a3707982SBart Van Assche /* Whether write serialization is required for @op on zoned devices. */ 1248a3707982SBart Van Assche static inline bool op_needs_zoned_write_locking(enum req_op op) 12498cafdb5aSPankaj Raghav { 12508cafdb5aSPankaj Raghav return op == REQ_OP_WRITE || op == REQ_OP_WRITE_ZEROES; 12518cafdb5aSPankaj Raghav } 12528cafdb5aSPankaj Raghav 1253de71973cSChristoph Hellwig static inline bool bdev_op_is_zoned_write(struct block_device *bdev, 12543ddbe2a7SBart Van Assche enum req_op op) 1255de71973cSChristoph Hellwig { 1256a3707982SBart Van Assche return bdev_is_zoned(bdev) && op_needs_zoned_write_locking(op); 1257482eb689SPete Wyckoff } 12581da177e4SLinus Torvalds 12591da177e4SLinus Torvalds static inline sector_t bdev_zone_sectors(struct block_device *bdev) 12604a2dcc35SKeith Busch { 12614a2dcc35SKeith Busch struct request_queue *q = bdev_get_queue(bdev); 12624a2dcc35SKeith Busch 12634a2dcc35SKeith Busch if (!blk_queue_is_zoned(q)) 12644a2dcc35SKeith Busch return 0; 12655debd969SKeith Busch return q->limits.chunk_sectors; 12665debd969SKeith Busch } 12675debd969SKeith Busch 1268e29b2100SPankaj Raghav static inline sector_t bdev_offset_from_zone_start(struct block_device *bdev, 1269e29b2100SPankaj Raghav sector_t sector) 1270e29b2100SPankaj Raghav { 1271e29b2100SPankaj Raghav return sector & (bdev_zone_sectors(bdev) - 1); 1272e29b2100SPankaj Raghav } 1273e29b2100SPankaj Raghav 1274e29b2100SPankaj Raghav static inline bool bdev_is_zone_start(struct block_device *bdev, 1275e29b2100SPankaj Raghav sector_t sector) 1276e29b2100SPankaj Raghav { 1277e29b2100SPankaj Raghav return bdev_offset_from_zone_start(bdev, sector) == 0; 1278e29b2100SPankaj Raghav } 1279e29b2100SPankaj Raghav 12801da177e4SLinus Torvalds static inline int queue_dma_alignment(const struct request_queue *q) 12811da177e4SLinus Torvalds { 1282c964d62fSKeith Busch return q ? q->limits.dma_alignment : 511; 12831da177e4SLinus Torvalds } 12841da177e4SLinus Torvalds 128514417799SNamhyung Kim static inline unsigned int bdev_dma_alignment(struct block_device *bdev) 128687904074SFUJITA Tomonori { 128787904074SFUJITA Tomonori return queue_dma_alignment(bdev_get_queue(bdev)); 128887904074SFUJITA Tomonori } 128914417799SNamhyung Kim 12905debd969SKeith Busch static inline bool bdev_iter_is_aligned(struct block_device *bdev, 12915debd969SKeith Busch struct iov_iter *iter) 12925debd969SKeith Busch { 12935debd969SKeith Busch return iov_iter_is_aligned(iter, bdev_dma_alignment(bdev), 12945debd969SKeith Busch bdev_logical_block_size(bdev) - 1); 12955debd969SKeith Busch } 12965debd969SKeith Busch 129787904074SFUJITA Tomonori static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, 129887904074SFUJITA Tomonori unsigned int len) 129987904074SFUJITA Tomonori { 130087904074SFUJITA Tomonori unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; 130187904074SFUJITA Tomonori return !(addr & alignment) && !(len & alignment); 130287904074SFUJITA Tomonori } 130387904074SFUJITA Tomonori 13041da177e4SLinus Torvalds /* assumes size > 256 */ 13051da177e4SLinus Torvalds static inline unsigned int blksize_bits(unsigned int size) 13061da177e4SLinus Torvalds { 1307adff2158SDawei Li return order_base_2(size >> SECTOR_SHIFT) + SECTOR_SHIFT; 13081da177e4SLinus Torvalds } 13091da177e4SLinus Torvalds 13102befb9e3SAdrian Bunk static inline unsigned int block_size(struct block_device *bdev) 13111da177e4SLinus Torvalds { 13126b7b181bSChristoph Hellwig return 1 << bdev->bd_inode->i_blkbits; 13131da177e4SLinus Torvalds } 13141da177e4SLinus Torvalds 131559c3d45eSJens Axboe int kblockd_schedule_work(struct work_struct *work); 1316818cd1cbSJens Axboe int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); 13171da177e4SLinus Torvalds 13181da177e4SLinus Torvalds #define MODULE_ALIAS_BLOCKDEV(major,minor) \ 13191da177e4SLinus Torvalds MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 13201da177e4SLinus Torvalds #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 13211da177e4SLinus Torvalds MODULE_ALIAS("block-major-" __stringify(major) "-*") 13221da177e4SLinus Torvalds 1323d145dc23SSatya Tangirala #ifdef CONFIG_BLK_INLINE_ENCRYPTION 1324d145dc23SSatya Tangirala 1325cb77cb5aSEric Biggers bool blk_crypto_register(struct blk_crypto_profile *profile, 1326cb77cb5aSEric Biggers struct request_queue *q); 1327d145dc23SSatya Tangirala 1328d145dc23SSatya Tangirala #else /* CONFIG_BLK_INLINE_ENCRYPTION */ 1329d145dc23SSatya Tangirala 1330cb77cb5aSEric Biggers static inline bool blk_crypto_register(struct blk_crypto_profile *profile, 1331d145dc23SSatya Tangirala struct request_queue *q) 1332d145dc23SSatya Tangirala { 1333d145dc23SSatya Tangirala return true; 1334d145dc23SSatya Tangirala } 1335d145dc23SSatya Tangirala 1336d145dc23SSatya Tangirala #endif /* CONFIG_BLK_INLINE_ENCRYPTION */ 1337d145dc23SSatya Tangirala 13389208d414SChristoph Hellwig enum blk_unique_id { 13399208d414SChristoph Hellwig /* these match the Designator Types specified in SPC */ 13409208d414SChristoph Hellwig BLK_UID_T10 = 1, 13419208d414SChristoph Hellwig BLK_UID_EUI64 = 2, 13429208d414SChristoph Hellwig BLK_UID_NAA = 3, 13439208d414SChristoph Hellwig }; 13449208d414SChristoph Hellwig 134508f85851SAl Viro struct block_device_operations { 13463e08773cSChristoph Hellwig void (*submit_bio)(struct bio *bio); 134769fe0f29SMing Lei int (*poll_bio)(struct bio *bio, struct io_comp_batch *iob, 134869fe0f29SMing Lei unsigned int flags); 134905bdb996SChristoph Hellwig int (*open)(struct gendisk *disk, blk_mode_t mode); 1350ae220766SChristoph Hellwig void (*release)(struct gendisk *disk); 135105bdb996SChristoph Hellwig int (*ioctl)(struct block_device *bdev, blk_mode_t mode, 135205bdb996SChristoph Hellwig unsigned cmd, unsigned long arg); 135305bdb996SChristoph Hellwig int (*compat_ioctl)(struct block_device *bdev, blk_mode_t mode, 135405bdb996SChristoph Hellwig unsigned cmd, unsigned long arg); 135577ea887eSTejun Heo unsigned int (*check_events) (struct gendisk *disk, 135677ea887eSTejun Heo unsigned int clearing); 1357c3e33e04STejun Heo void (*unlock_native_capacity) (struct gendisk *); 135808f85851SAl Viro int (*getgeo)(struct block_device *, struct hd_geometry *); 1359e00adcadSChristoph Hellwig int (*set_read_only)(struct block_device *bdev, bool ro); 136076792055SChristoph Hellwig void (*free_disk)(struct gendisk *disk); 1361b3a27d05SNitin Gupta /* this callback is with swap_lock and sometimes page table lock held */ 1362b3a27d05SNitin Gupta void (*swap_slot_free_notify) (struct block_device *, unsigned long); 1363e76239a3SChristoph Hellwig int (*report_zones)(struct gendisk *, sector_t sector, 1364d4100351SChristoph Hellwig unsigned int nr_zones, report_zones_cb cb, void *data); 1365050a4f34SJens Axboe char *(*devnode)(struct gendisk *disk, umode_t *mode); 13669208d414SChristoph Hellwig /* returns the length of the identifier or a negative errno: */ 13679208d414SChristoph Hellwig int (*get_unique_id)(struct gendisk *disk, u8 id[16], 13689208d414SChristoph Hellwig enum blk_unique_id id_type); 136908f85851SAl Viro struct module *owner; 1370bbd3e064SChristoph Hellwig const struct pr_ops *pr_ops; 13710bdfbca8SDmitry Osipenko 13720bdfbca8SDmitry Osipenko /* 13730bdfbca8SDmitry Osipenko * Special callback for probing GPT entry at a given sector. 13740bdfbca8SDmitry Osipenko * Needed by Android devices, used by GPT scanner and MMC blk 13750bdfbca8SDmitry Osipenko * driver. 13760bdfbca8SDmitry Osipenko */ 13770bdfbca8SDmitry Osipenko int (*alternative_gpt_sector)(struct gendisk *disk, sector_t *sector); 137808f85851SAl Viro }; 137908f85851SAl Viro 1380ee6a129dSArnd Bergmann #ifdef CONFIG_COMPAT 138105bdb996SChristoph Hellwig extern int blkdev_compat_ptr_ioctl(struct block_device *, blk_mode_t, 1382ee6a129dSArnd Bergmann unsigned int, unsigned long); 1383ee6a129dSArnd Bergmann #else 1384ee6a129dSArnd Bergmann #define blkdev_compat_ptr_ioctl NULL 1385ee6a129dSArnd Bergmann #endif 1386ee6a129dSArnd Bergmann 13870619317fSJens Axboe static inline void blk_wake_io_task(struct task_struct *waiter) 13880619317fSJens Axboe { 13890619317fSJens Axboe /* 13900619317fSJens Axboe * If we're polling, the task itself is doing the completions. For 13910619317fSJens Axboe * that case, we don't need to signal a wakeup, it's enough to just 13920619317fSJens Axboe * mark us as RUNNING. 13930619317fSJens Axboe */ 13940619317fSJens Axboe if (waiter == current) 13950619317fSJens Axboe __set_current_state(TASK_RUNNING); 13960619317fSJens Axboe else 13970619317fSJens Axboe wake_up_process(waiter); 13980619317fSJens Axboe } 13990619317fSJens Axboe 14005f275713SYu Kuai unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op, 14015f0614a5SMing Lei unsigned long start_time); 140277e7ffd7SBart Van Assche void bdev_end_io_acct(struct block_device *bdev, enum req_op op, 14035f275713SYu Kuai unsigned int sectors, unsigned long start_time); 1404956d510eSChristoph Hellwig 140599dfc43eSChristoph Hellwig unsigned long bio_start_io_acct(struct bio *bio); 140699dfc43eSChristoph Hellwig void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time, 140799dfc43eSChristoph Hellwig struct block_device *orig_bdev); 1408956d510eSChristoph Hellwig 1409956d510eSChristoph Hellwig /** 1410956d510eSChristoph Hellwig * bio_end_io_acct - end I/O accounting for bio based drivers 1411956d510eSChristoph Hellwig * @bio: bio to end account for 1412b42c1fc3SChristoph Hellwig * @start_time: start time returned by bio_start_io_acct() 1413956d510eSChristoph Hellwig */ 1414956d510eSChristoph Hellwig static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time) 1415956d510eSChristoph Hellwig { 141699dfc43eSChristoph Hellwig return bio_end_io_acct_remapped(bio, start_time, bio->bi_bdev); 1417956d510eSChristoph Hellwig } 1418956d510eSChristoph Hellwig 14193f1266f1SChristoph Hellwig int bdev_read_only(struct block_device *bdev); 14203f1266f1SChristoph Hellwig int set_blocksize(struct block_device *bdev, int size); 14213f1266f1SChristoph Hellwig 14224e7b5671SChristoph Hellwig int lookup_bdev(const char *pathname, dev_t *dev); 14233f1266f1SChristoph Hellwig 14243f1266f1SChristoph Hellwig void blkdev_show(struct seq_file *seqf, off_t offset); 14253f1266f1SChristoph Hellwig 14263f1266f1SChristoph Hellwig #define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */ 14273f1266f1SChristoph Hellwig #define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */ 14283f1266f1SChristoph Hellwig #ifdef CONFIG_BLOCK 14293f1266f1SChristoph Hellwig #define BLKDEV_MAJOR_MAX 512 14303f1266f1SChristoph Hellwig #else 14313f1266f1SChristoph Hellwig #define BLKDEV_MAJOR_MAX 0 14321da177e4SLinus Torvalds #endif 14333f1266f1SChristoph Hellwig 14340718afd4SChristoph Hellwig struct blk_holder_ops { 1435d8530de5SChristoph Hellwig void (*mark_dead)(struct block_device *bdev, bool surprise); 14362142b88cSChristoph Hellwig 14372142b88cSChristoph Hellwig /* 14382142b88cSChristoph Hellwig * Sync the file system mounted on the block device. 14392142b88cSChristoph Hellwig */ 14402142b88cSChristoph Hellwig void (*sync)(struct block_device *bdev); 14410718afd4SChristoph Hellwig }; 14420718afd4SChristoph Hellwig 14437ecd0b6fSChristoph Hellwig extern const struct blk_holder_ops fs_holder_ops; 14447ecd0b6fSChristoph Hellwig 14453f0b3e78SChristoph Hellwig /* 14463f0b3e78SChristoph Hellwig * Return the correct open flags for blkdev_get_by_* for super block flags 14473f0b3e78SChristoph Hellwig * as stored in sb->s_flags. 14483f0b3e78SChristoph Hellwig */ 14493f0b3e78SChristoph Hellwig #define sb_open_mode(flags) \ 145005bdb996SChristoph Hellwig (BLK_OPEN_READ | (((flags) & SB_RDONLY) ? 0 : BLK_OPEN_WRITE)) 14513f0b3e78SChristoph Hellwig 1452e719b4d1SJan Kara struct bdev_handle { 1453e719b4d1SJan Kara struct block_device *bdev; 1454e719b4d1SJan Kara void *holder; 1455841dd789SJan Kara blk_mode_t mode; 1456e719b4d1SJan Kara }; 1457e719b4d1SJan Kara 145805bdb996SChristoph Hellwig struct block_device *blkdev_get_by_dev(dev_t dev, blk_mode_t mode, void *holder, 14590718afd4SChristoph Hellwig const struct blk_holder_ops *hops); 146005bdb996SChristoph Hellwig struct block_device *blkdev_get_by_path(const char *path, blk_mode_t mode, 14610718afd4SChristoph Hellwig void *holder, const struct blk_holder_ops *hops); 1462e719b4d1SJan Kara struct bdev_handle *bdev_open_by_dev(dev_t dev, blk_mode_t mode, void *holder, 1463e719b4d1SJan Kara const struct blk_holder_ops *hops); 1464e719b4d1SJan Kara struct bdev_handle *bdev_open_by_path(const char *path, blk_mode_t mode, 1465e719b4d1SJan Kara void *holder, const struct blk_holder_ops *hops); 14660718afd4SChristoph Hellwig int bd_prepare_to_claim(struct block_device *bdev, void *holder, 14670718afd4SChristoph Hellwig const struct blk_holder_ops *hops); 146837c3fc9aSChristoph Hellwig void bd_abort_claiming(struct block_device *bdev, void *holder); 14692736e8eeSChristoph Hellwig void blkdev_put(struct block_device *bdev, void *holder); 1470e719b4d1SJan Kara void bdev_release(struct bdev_handle *handle); 14713f1266f1SChristoph Hellwig 147222ae8ce8SChristoph Hellwig /* just for blk-cgroup, don't use elsewhere */ 147322ae8ce8SChristoph Hellwig struct block_device *blkdev_get_no_open(dev_t dev); 147422ae8ce8SChristoph Hellwig void blkdev_put_no_open(struct block_device *bdev); 147522ae8ce8SChristoph Hellwig 1476621c1f42SChristoph Hellwig struct block_device *I_BDEV(struct inode *inode); 14773f1266f1SChristoph Hellwig 14783f1266f1SChristoph Hellwig #ifdef CONFIG_BLOCK 14793f1266f1SChristoph Hellwig void invalidate_bdev(struct block_device *bdev); 14803f1266f1SChristoph Hellwig int sync_blockdev(struct block_device *bdev); 148197d6fb1bSYuezhang Mo int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend); 148270164eb6SChristoph Hellwig int sync_blockdev_nowait(struct block_device *bdev); 14831e03a36bSChristoph Hellwig void sync_bdevs(bool wait); 14842d985f8cSEric Biggers void bdev_statx_dioalign(struct inode *inode, struct kstat *stat); 1485322cbb50SChristoph Hellwig void printk_all_partitions(void); 14862577f53fSChristoph Hellwig int __init early_lookup_bdev(const char *pathname, dev_t *dev); 14873f1266f1SChristoph Hellwig #else 14883f1266f1SChristoph Hellwig static inline void invalidate_bdev(struct block_device *bdev) 14893f1266f1SChristoph Hellwig { 14903f1266f1SChristoph Hellwig } 14913f1266f1SChristoph Hellwig static inline int sync_blockdev(struct block_device *bdev) 14923f1266f1SChristoph Hellwig { 14933f1266f1SChristoph Hellwig return 0; 14943f1266f1SChristoph Hellwig } 149570164eb6SChristoph Hellwig static inline int sync_blockdev_nowait(struct block_device *bdev) 149670164eb6SChristoph Hellwig { 149770164eb6SChristoph Hellwig return 0; 149870164eb6SChristoph Hellwig } 14991e03a36bSChristoph Hellwig static inline void sync_bdevs(bool wait) 15001e03a36bSChristoph Hellwig { 15011e03a36bSChristoph Hellwig } 15022d985f8cSEric Biggers static inline void bdev_statx_dioalign(struct inode *inode, struct kstat *stat) 15032d985f8cSEric Biggers { 15042d985f8cSEric Biggers } 1505322cbb50SChristoph Hellwig static inline void printk_all_partitions(void) 1506322cbb50SChristoph Hellwig { 1507322cbb50SChristoph Hellwig } 1508cf056a43SChristoph Hellwig static inline int early_lookup_bdev(const char *pathname, dev_t *dev) 1509cf056a43SChristoph Hellwig { 1510cf056a43SChristoph Hellwig return -EINVAL; 1511cf056a43SChristoph Hellwig } 1512322cbb50SChristoph Hellwig #endif /* CONFIG_BLOCK */ 1513322cbb50SChristoph Hellwig 1514040f04bdSChristoph Hellwig int freeze_bdev(struct block_device *bdev); 1515040f04bdSChristoph Hellwig int thaw_bdev(struct block_device *bdev); 15163f1266f1SChristoph Hellwig 15175a72e899SJens Axboe struct io_comp_batch { 15185a72e899SJens Axboe struct request *req_list; 15195a72e899SJens Axboe bool need_ts; 15205a72e899SJens Axboe void (*complete)(struct io_comp_batch *); 15215a72e899SJens Axboe }; 15225a72e899SJens Axboe 15235a72e899SJens Axboe #define DEFINE_IO_COMP_BATCH(name) struct io_comp_batch name = { } 15245a72e899SJens Axboe 15253f1266f1SChristoph Hellwig #endif /* _LINUX_BLKDEV_H */ 1526