1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 2322cbb50SChristoph Hellwig /* 3322cbb50SChristoph Hellwig * Portions Copyright (C) 1992 Drew Eckhardt 4322cbb50SChristoph Hellwig */ 51da177e4SLinus Torvalds #ifndef _LINUX_BLKDEV_H 61da177e4SLinus Torvalds #define _LINUX_BLKDEV_H 71da177e4SLinus Torvalds 8322cbb50SChristoph Hellwig #include <linux/types.h> 9322cbb50SChristoph Hellwig #include <linux/blk_types.h> 10322cbb50SChristoph Hellwig #include <linux/device.h> 111da177e4SLinus Torvalds #include <linux/list.h> 12320ae51fSJens Axboe #include <linux/llist.h> 13b296a6d5SAndy Shevchenko #include <linux/minmax.h> 141da177e4SLinus Torvalds #include <linux/timer.h> 151da177e4SLinus Torvalds #include <linux/workqueue.h> 161da177e4SLinus Torvalds #include <linux/wait.h> 171da177e4SLinus Torvalds #include <linux/bio.h> 183e6053d7SHugh Dickins #include <linux/gfp.h> 19322cbb50SChristoph Hellwig #include <linux/kdev_t.h> 20548bc8e1STejun Heo #include <linux/rcupdate.h> 21add703fdSTejun Heo #include <linux/percpu-refcount.h> 226a0cb1bcSHannes Reinecke #include <linux/blkzoned.h> 23322cbb50SChristoph Hellwig #include <linux/sched.h> 24d97e594cSJohn Garry #include <linux/sbitmap.h> 25322cbb50SChristoph Hellwig #include <linux/uuid.h> 26322cbb50SChristoph Hellwig #include <linux/xarray.h> 27f3a60882SChristian Brauner #include <linux/file.h> 281da177e4SLinus Torvalds 29de477254SPaul Gortmaker struct module; 301da177e4SLinus Torvalds struct request_queue; 311da177e4SLinus Torvalds struct elevator_queue; 322056a782SJens Axboe struct blk_trace; 333d6392cfSJens Axboe struct request; 343d6392cfSJens Axboe struct sg_io_hdr; 353c798398STejun Heo struct blkcg_gq; 367c94e1c1SMing Lei struct blk_flush_queue; 373e08773cSChristoph Hellwig struct kiocb; 38bbd3e064SChristoph Hellwig struct pr_ops; 39a7905043SJosef Bacik struct rq_qos; 4034dbad5dSOmar Sandoval struct blk_queue_stats; 4134dbad5dSOmar Sandoval struct blk_stat_callback; 42cb77cb5aSEric Biggers struct blk_crypto_profile; 431da177e4SLinus Torvalds 44322cbb50SChristoph Hellwig extern const struct device_type disk_type; 45cdb37f73SThomas Weißschuh extern const struct device_type part_type; 46f8c7511dSRicardo B. Marliere extern const struct class block_class; 47322cbb50SChristoph Hellwig 488bd435b3STejun Heo /* 498bd435b3STejun Heo * Maximum number of blkcg policies allowed to be registered concurrently. 508bd435b3STejun Heo * Defined here to simplify include dependency. 518bd435b3STejun Heo */ 52ec645dc9SOleksandr Natalenko #define BLKCG_MAX_POLS 6 538bd435b3STejun Heo 54322cbb50SChristoph Hellwig #define DISK_MAX_PARTS 256 55322cbb50SChristoph Hellwig #define DISK_NAME_LEN 32 56322cbb50SChristoph Hellwig 57322cbb50SChristoph Hellwig #define PARTITION_META_INFO_VOLNAMELTH 64 58322cbb50SChristoph Hellwig /* 59322cbb50SChristoph Hellwig * Enough for the string representation of any kind of UUID plus NULL. 60322cbb50SChristoph Hellwig * EFI UUID is 36 characters. MSDOS UUID is 11 characters. 61322cbb50SChristoph Hellwig */ 62322cbb50SChristoph Hellwig #define PARTITION_META_INFO_UUIDLTH (UUID_STRING_LEN + 1) 63322cbb50SChristoph Hellwig 64322cbb50SChristoph Hellwig struct partition_meta_info { 65322cbb50SChristoph Hellwig char uuid[PARTITION_META_INFO_UUIDLTH]; 66322cbb50SChristoph Hellwig u8 volname[PARTITION_META_INFO_VOLNAMELTH]; 67322cbb50SChristoph Hellwig }; 68322cbb50SChristoph Hellwig 69322cbb50SChristoph Hellwig /** 70322cbb50SChristoph Hellwig * DOC: genhd capability flags 71322cbb50SChristoph Hellwig * 72322cbb50SChristoph Hellwig * ``GENHD_FL_REMOVABLE``: indicates that the block device gives access to 73322cbb50SChristoph Hellwig * removable media. When set, the device remains present even when media is not 74322cbb50SChristoph Hellwig * inserted. Shall not be set for devices which are removed entirely when the 75322cbb50SChristoph Hellwig * media is removed. 76322cbb50SChristoph Hellwig * 77322cbb50SChristoph Hellwig * ``GENHD_FL_HIDDEN``: the block device is hidden; it doesn't produce events, 78322cbb50SChristoph Hellwig * doesn't appear in sysfs, and can't be opened from userspace or using 79322cbb50SChristoph Hellwig * blkdev_get*. Used for the underlying components of multipath devices. 80322cbb50SChristoph Hellwig * 81322cbb50SChristoph Hellwig * ``GENHD_FL_NO_PART``: partition support is disabled. The kernel will not 82322cbb50SChristoph Hellwig * scan for partitions from add_disk, and users can't add partitions manually. 83322cbb50SChristoph Hellwig * 84322cbb50SChristoph Hellwig */ 85322cbb50SChristoph Hellwig enum { 86322cbb50SChristoph Hellwig GENHD_FL_REMOVABLE = 1 << 0, 87322cbb50SChristoph Hellwig GENHD_FL_HIDDEN = 1 << 1, 88322cbb50SChristoph Hellwig GENHD_FL_NO_PART = 1 << 2, 89322cbb50SChristoph Hellwig }; 90322cbb50SChristoph Hellwig 91322cbb50SChristoph Hellwig enum { 92322cbb50SChristoph Hellwig DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */ 93322cbb50SChristoph Hellwig DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */ 94322cbb50SChristoph Hellwig }; 95322cbb50SChristoph Hellwig 96322cbb50SChristoph Hellwig enum { 97322cbb50SChristoph Hellwig /* Poll even if events_poll_msecs is unset */ 98322cbb50SChristoph Hellwig DISK_EVENT_FLAG_POLL = 1 << 0, 99322cbb50SChristoph Hellwig /* Forward events to udev */ 100322cbb50SChristoph Hellwig DISK_EVENT_FLAG_UEVENT = 1 << 1, 101322cbb50SChristoph Hellwig /* Block event polling when open for exclusive write */ 102322cbb50SChristoph Hellwig DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE = 1 << 2, 103322cbb50SChristoph Hellwig }; 104322cbb50SChristoph Hellwig 105322cbb50SChristoph Hellwig struct disk_events; 106322cbb50SChristoph Hellwig struct badblocks; 107322cbb50SChristoph Hellwig 108e9f5f44aSChristoph Hellwig enum blk_integrity_checksum { 109e9f5f44aSChristoph Hellwig BLK_INTEGRITY_CSUM_NONE = 0, 110e9f5f44aSChristoph Hellwig BLK_INTEGRITY_CSUM_IP = 1, 111e9f5f44aSChristoph Hellwig BLK_INTEGRITY_CSUM_CRC = 2, 112e9f5f44aSChristoph Hellwig BLK_INTEGRITY_CSUM_CRC64 = 3, 113e9f5f44aSChristoph Hellwig } __packed ; 114e9f5f44aSChristoph Hellwig 115322cbb50SChristoph Hellwig struct blk_integrity { 116322cbb50SChristoph Hellwig unsigned char flags; 117e9f5f44aSChristoph Hellwig enum blk_integrity_checksum csum_type; 118322cbb50SChristoph Hellwig unsigned char tuple_size; 11960d21aacSKanchan Joshi unsigned char pi_offset; 120322cbb50SChristoph Hellwig unsigned char interval_exp; 121322cbb50SChristoph Hellwig unsigned char tag_size; 122322cbb50SChristoph Hellwig }; 123322cbb50SChristoph Hellwig 12405bdb996SChristoph Hellwig typedef unsigned int __bitwise blk_mode_t; 12505bdb996SChristoph Hellwig 12605bdb996SChristoph Hellwig /* open for reading */ 12705bdb996SChristoph Hellwig #define BLK_OPEN_READ ((__force blk_mode_t)(1 << 0)) 12805bdb996SChristoph Hellwig /* open for writing */ 12905bdb996SChristoph Hellwig #define BLK_OPEN_WRITE ((__force blk_mode_t)(1 << 1)) 13005bdb996SChristoph Hellwig /* open exclusively (vs other exclusive openers */ 13105bdb996SChristoph Hellwig #define BLK_OPEN_EXCL ((__force blk_mode_t)(1 << 2)) 13205bdb996SChristoph Hellwig /* opened with O_NDELAY */ 13305bdb996SChristoph Hellwig #define BLK_OPEN_NDELAY ((__force blk_mode_t)(1 << 3)) 13405bdb996SChristoph Hellwig /* open for "writes" only for ioctls (specialy hack for floppy.c) */ 13505bdb996SChristoph Hellwig #define BLK_OPEN_WRITE_IOCTL ((__force blk_mode_t)(1 << 4)) 136ed5cc702SJan Kara /* open is exclusive wrt all other BLK_OPEN_WRITE opens to the device */ 137ed5cc702SJan Kara #define BLK_OPEN_RESTRICT_WRITES ((__force blk_mode_t)(1 << 5)) 138752863bdSChristoph Hellwig /* return partition scanning errors */ 139752863bdSChristoph Hellwig #define BLK_OPEN_STRICT_SCAN ((__force blk_mode_t)(1 << 6)) 14005bdb996SChristoph Hellwig 141322cbb50SChristoph Hellwig struct gendisk { 142322cbb50SChristoph Hellwig /* 143322cbb50SChristoph Hellwig * major/first_minor/minors should not be set by any new driver, the 144322cbb50SChristoph Hellwig * block core will take care of allocating them automatically. 145322cbb50SChristoph Hellwig */ 146322cbb50SChristoph Hellwig int major; 147322cbb50SChristoph Hellwig int first_minor; 148322cbb50SChristoph Hellwig int minors; 149322cbb50SChristoph Hellwig 150322cbb50SChristoph Hellwig char disk_name[DISK_NAME_LEN]; /* name of major driver */ 151322cbb50SChristoph Hellwig 152322cbb50SChristoph Hellwig unsigned short events; /* supported events */ 153322cbb50SChristoph Hellwig unsigned short event_flags; /* flags related to event processing */ 154322cbb50SChristoph Hellwig 155322cbb50SChristoph Hellwig struct xarray part_tbl; 156322cbb50SChristoph Hellwig struct block_device *part0; 157322cbb50SChristoph Hellwig 158322cbb50SChristoph Hellwig const struct block_device_operations *fops; 159322cbb50SChristoph Hellwig struct request_queue *queue; 160322cbb50SChristoph Hellwig void *private_data; 161322cbb50SChristoph Hellwig 16246754bd0SChristoph Hellwig struct bio_set bio_split; 16346754bd0SChristoph Hellwig 164322cbb50SChristoph Hellwig int flags; 165322cbb50SChristoph Hellwig unsigned long state; 166322cbb50SChristoph Hellwig #define GD_NEED_PART_SCAN 0 167322cbb50SChristoph Hellwig #define GD_READ_ONLY 1 168322cbb50SChristoph Hellwig #define GD_DEAD 2 169322cbb50SChristoph Hellwig #define GD_NATIVE_CAPACITY 3 17076792055SChristoph Hellwig #define GD_ADDED 4 171b9684a71SChristoph Hellwig #define GD_SUPPRESS_PART_SCAN 5 1726f8191fdSChristoph Hellwig #define GD_OWNS_QUEUE 6 173322cbb50SChristoph Hellwig 174322cbb50SChristoph Hellwig struct mutex open_mutex; /* open/close mutex */ 175322cbb50SChristoph Hellwig unsigned open_partitions; /* number of open partitions */ 176322cbb50SChristoph Hellwig 177322cbb50SChristoph Hellwig struct backing_dev_info *bdi; 1782bd85221SChristoph Hellwig struct kobject queue_kobj; /* the queue/ directory */ 179322cbb50SChristoph Hellwig struct kobject *slave_dir; 180322cbb50SChristoph Hellwig #ifdef CONFIG_BLOCK_HOLDER_DEPRECATED 181322cbb50SChristoph Hellwig struct list_head slave_bdevs; 182322cbb50SChristoph Hellwig #endif 183322cbb50SChristoph Hellwig struct timer_rand_state *random; 184322cbb50SChristoph Hellwig atomic_t sync_io; /* RAID */ 185322cbb50SChristoph Hellwig struct disk_events *ev; 186d86e716aSChristoph Hellwig 187d86e716aSChristoph Hellwig #ifdef CONFIG_BLK_DEV_ZONED 188d86e716aSChristoph Hellwig /* 18902ccd7c3SDamien Le Moal * Zoned block device information. Reads of this information must be 19002ccd7c3SDamien Le Moal * protected with blk_queue_enter() / blk_queue_exit(). Modifying this 19102ccd7c3SDamien Le Moal * information is only allowed while no requests are being processed. 19202ccd7c3SDamien Le Moal * See also blk_mq_freeze_queue() and blk_mq_unfreeze_queue(). 193d86e716aSChristoph Hellwig */ 194d86e716aSChristoph Hellwig unsigned int nr_zones; 195ecfe43b1SDamien Le Moal unsigned int zone_capacity; 19629459c3eSDamien Le Moal unsigned int last_zone_capacity; 197d86e716aSChristoph Hellwig unsigned long *conv_zones_bitmap; 198dd291d77SDamien Le Moal unsigned int zone_wplugs_hash_bits; 199dd291d77SDamien Le Moal spinlock_t zone_wplugs_lock; 200dd291d77SDamien Le Moal struct mempool_s *zone_wplugs_pool; 201dd291d77SDamien Le Moal struct hlist_head *zone_wplugs_hash; 202dd291d77SDamien Le Moal struct list_head zone_wplugs_err_list; 203dd291d77SDamien Le Moal struct work_struct zone_wplugs_work; 204a8f59e5aSDamien Le Moal struct workqueue_struct *zone_wplugs_wq; 205d86e716aSChristoph Hellwig #endif /* CONFIG_BLK_DEV_ZONED */ 206d86e716aSChristoph Hellwig 207322cbb50SChristoph Hellwig #if IS_ENABLED(CONFIG_CDROM) 208322cbb50SChristoph Hellwig struct cdrom_device_info *cdi; 209322cbb50SChristoph Hellwig #endif 210322cbb50SChristoph Hellwig int node_id; 211322cbb50SChristoph Hellwig struct badblocks *bb; 212322cbb50SChristoph Hellwig struct lockdep_map lockdep_map; 213322cbb50SChristoph Hellwig u64 diskseq; 21405bdb996SChristoph Hellwig blk_mode_t open_mode; 2156a27d28cSChristoph Hellwig 2166a27d28cSChristoph Hellwig /* 2176a27d28cSChristoph Hellwig * Independent sector access ranges. This is always NULL for 2186a27d28cSChristoph Hellwig * devices that do not have multiple independent access ranges. 2196a27d28cSChristoph Hellwig */ 2206a27d28cSChristoph Hellwig struct blk_independent_access_ranges *ia_ranges; 221322cbb50SChristoph Hellwig }; 222322cbb50SChristoph Hellwig 223dbdc1be3SChristoph Hellwig /** 224dbdc1be3SChristoph Hellwig * disk_openers - returns how many openers are there for a disk 225dbdc1be3SChristoph Hellwig * @disk: disk to check 226dbdc1be3SChristoph Hellwig * 227dbdc1be3SChristoph Hellwig * This returns the number of openers for a disk. Note that this value is only 228dbdc1be3SChristoph Hellwig * stable if disk->open_mutex is held. 229dbdc1be3SChristoph Hellwig * 230dbdc1be3SChristoph Hellwig * Note: Due to a quirk in the block layer open code, each open partition is 231dbdc1be3SChristoph Hellwig * only counted once even if there are multiple openers. 232dbdc1be3SChristoph Hellwig */ 233dbdc1be3SChristoph Hellwig static inline unsigned int disk_openers(struct gendisk *disk) 234dbdc1be3SChristoph Hellwig { 2359acf381fSChristoph Hellwig return atomic_read(&disk->part0->bd_openers); 236dbdc1be3SChristoph Hellwig } 237dbdc1be3SChristoph Hellwig 238140ce28dSChristoph Hellwig /** 239140ce28dSChristoph Hellwig * disk_has_partscan - return %true if partition scanning is enabled on a disk 240140ce28dSChristoph Hellwig * @disk: disk to check 241140ce28dSChristoph Hellwig * 242140ce28dSChristoph Hellwig * Returns %true if partitions scanning is enabled for @disk, or %false if 243140ce28dSChristoph Hellwig * partition scanning is disabled either permanently or temporarily. 244140ce28dSChristoph Hellwig */ 245140ce28dSChristoph Hellwig static inline bool disk_has_partscan(struct gendisk *disk) 246140ce28dSChristoph Hellwig { 247140ce28dSChristoph Hellwig return !(disk->flags & (GENHD_FL_NO_PART | GENHD_FL_HIDDEN)) && 248140ce28dSChristoph Hellwig !test_bit(GD_SUPPRESS_PART_SCAN, &disk->state); 249140ce28dSChristoph Hellwig } 250140ce28dSChristoph Hellwig 251322cbb50SChristoph Hellwig /* 252322cbb50SChristoph Hellwig * The gendisk is refcounted by the part0 block_device, and the bd_device 253322cbb50SChristoph Hellwig * therein is also used for device model presentation in sysfs. 254322cbb50SChristoph Hellwig */ 255322cbb50SChristoph Hellwig #define dev_to_disk(device) \ 256322cbb50SChristoph Hellwig (dev_to_bdev(device)->bd_disk) 257322cbb50SChristoph Hellwig #define disk_to_dev(disk) \ 258322cbb50SChristoph Hellwig (&((disk)->part0->bd_device)) 259322cbb50SChristoph Hellwig 260322cbb50SChristoph Hellwig #if IS_REACHABLE(CONFIG_CDROM) 261322cbb50SChristoph Hellwig #define disk_to_cdi(disk) ((disk)->cdi) 262322cbb50SChristoph Hellwig #else 263322cbb50SChristoph Hellwig #define disk_to_cdi(disk) NULL 264322cbb50SChristoph Hellwig #endif 265322cbb50SChristoph Hellwig 266322cbb50SChristoph Hellwig static inline dev_t disk_devt(struct gendisk *disk) 267322cbb50SChristoph Hellwig { 268322cbb50SChristoph Hellwig return MKDEV(disk->major, disk->first_minor); 269322cbb50SChristoph Hellwig } 270322cbb50SChristoph Hellwig 271fe3d508bSJohn Garry /* blk_validate_limits() validates bsize, so drivers don't usually need to */ 27237ae5a0fSTetsuo Handa static inline int blk_validate_block_size(unsigned long bsize) 273570b1cacSXie Yongji { 274570b1cacSXie Yongji if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize)) 275570b1cacSXie Yongji return -EINVAL; 276570b1cacSXie Yongji 277570b1cacSXie Yongji return 0; 278570b1cacSXie Yongji } 279570b1cacSXie Yongji 28016458cf3SBart Van Assche static inline bool blk_op_is_passthrough(blk_opf_t op) 28114cb0dc6SMing Lei { 282da6269daSChristoph Hellwig op &= REQ_OP_MASK; 28314cb0dc6SMing Lei return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT; 28414cb0dc6SMing Lei } 28514cb0dc6SMing Lei 2861122c0c1SChristoph Hellwig /* flags set by the driver in queue_limits.features */ 287fcf865e3SChristoph Hellwig typedef unsigned int __bitwise blk_features_t; 288fcf865e3SChristoph Hellwig 2891122c0c1SChristoph Hellwig /* supports a volatile write cache */ 290fcf865e3SChristoph Hellwig #define BLK_FEAT_WRITE_CACHE ((__force blk_features_t)(1u << 0)) 2911122c0c1SChristoph Hellwig 2921122c0c1SChristoph Hellwig /* supports passing on the FUA bit */ 293fcf865e3SChristoph Hellwig #define BLK_FEAT_FUA ((__force blk_features_t)(1u << 1)) 294bd4a633bSChristoph Hellwig 295bd4a633bSChristoph Hellwig /* rotational device (hard drive or floppy) */ 296fcf865e3SChristoph Hellwig #define BLK_FEAT_ROTATIONAL ((__force blk_features_t)(1u << 2)) 29739a9f1c3SChristoph Hellwig 29839a9f1c3SChristoph Hellwig /* contributes to the random number pool */ 299fcf865e3SChristoph Hellwig #define BLK_FEAT_ADD_RANDOM ((__force blk_features_t)(1u << 3)) 300cdb24979SChristoph Hellwig 301cdb24979SChristoph Hellwig /* do disk/partitions IO accounting */ 302fcf865e3SChristoph Hellwig #define BLK_FEAT_IO_STAT ((__force blk_features_t)(1u << 4)) 3031a02f3a7SChristoph Hellwig 3041a02f3a7SChristoph Hellwig /* don't modify data until writeback is done */ 305fcf865e3SChristoph Hellwig #define BLK_FEAT_STABLE_WRITES ((__force blk_features_t)(1u << 5)) 306aadd5c59SChristoph Hellwig 307aadd5c59SChristoph Hellwig /* always completes in submit context */ 308fcf865e3SChristoph Hellwig #define BLK_FEAT_SYNCHRONOUS ((__force blk_features_t)(1u << 6)) 309f76af42fSChristoph Hellwig 310f76af42fSChristoph Hellwig /* supports REQ_NOWAIT */ 311fcf865e3SChristoph Hellwig #define BLK_FEAT_NOWAIT ((__force blk_features_t)(1u << 7)) 312f467fee4SChristoph Hellwig 313f467fee4SChristoph Hellwig /* supports DAX */ 314fcf865e3SChristoph Hellwig #define BLK_FEAT_DAX ((__force blk_features_t)(1u << 8)) 3158023e144SChristoph Hellwig 3168023e144SChristoph Hellwig /* supports I/O polling */ 317fcf865e3SChristoph Hellwig #define BLK_FEAT_POLL ((__force blk_features_t)(1u << 9)) 318b1fc937aSChristoph Hellwig 319b1fc937aSChristoph Hellwig /* is a zoned device */ 320fcf865e3SChristoph Hellwig #define BLK_FEAT_ZONED ((__force blk_features_t)(1u << 10)) 321a52758a3SChristoph Hellwig 3229c1e42e3SChristoph Hellwig /* supports PCI(e) p2p requests */ 323fcf865e3SChristoph Hellwig #define BLK_FEAT_PCI_P2PDMA ((__force blk_features_t)(1u << 12)) 3248c8f5c85SChristoph Hellwig 3258c8f5c85SChristoph Hellwig /* skip this queue in blk_mq_(un)quiesce_tagset */ 326fcf865e3SChristoph Hellwig #define BLK_FEAT_SKIP_TAGSET_QUIESCE ((__force blk_features_t)(1u << 13)) 327339d3948SChristoph Hellwig 328339d3948SChristoph Hellwig /* bounce all highmem pages */ 329fcf865e3SChristoph Hellwig #define BLK_FEAT_BOUNCE_HIGH ((__force blk_features_t)(1u << 14)) 3307d4dec52SChristoph Hellwig 3317d4dec52SChristoph Hellwig /* undocumented magic for bcache */ 332fcf865e3SChristoph Hellwig #define BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE \ 333fcf865e3SChristoph Hellwig ((__force blk_features_t)(1u << 15)) 3341122c0c1SChristoph Hellwig 335797476b8SDamien Le Moal /* 3361122c0c1SChristoph Hellwig * Flags automatically inherited when stacking limits. 3379bb33f24SChristoph Hellwig */ 3381122c0c1SChristoph Hellwig #define BLK_FEAT_INHERIT_MASK \ 3391a02f3a7SChristoph Hellwig (BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA | BLK_FEAT_ROTATIONAL | \ 3407d4dec52SChristoph Hellwig BLK_FEAT_STABLE_WRITES | BLK_FEAT_ZONED | BLK_FEAT_BOUNCE_HIGH | \ 3417d4dec52SChristoph Hellwig BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE) 3421122c0c1SChristoph Hellwig 3431122c0c1SChristoph Hellwig /* internal flags in queue_limits.flags */ 344fcf865e3SChristoph Hellwig typedef unsigned int __bitwise blk_flags_t; 345fcf865e3SChristoph Hellwig 346bae1c743SChristoph Hellwig /* do not send FLUSH/FUA commands despite advertising a write cache */ 347fcf865e3SChristoph Hellwig #define BLK_FLAG_WRITE_CACHE_DISABLED ((__force blk_flags_t)(1u << 0)) 3481122c0c1SChristoph Hellwig 3495543217bSChristoph Hellwig /* I/O topology is misaligned */ 350fcf865e3SChristoph Hellwig #define BLK_FLAG_MISALIGNED ((__force blk_flags_t)(1u << 1)) 3519bb33f24SChristoph Hellwig 352025146e1SMartin K. Petersen struct queue_limits { 353fcf865e3SChristoph Hellwig blk_features_t features; 354fcf865e3SChristoph Hellwig blk_flags_t flags; 355025146e1SMartin K. Petersen unsigned long seg_boundary_mask; 35603100aadSKeith Busch unsigned long virt_boundary_mask; 357025146e1SMartin K. Petersen 358025146e1SMartin K. Petersen unsigned int max_hw_sectors; 359ca369d51SMartin K. Petersen unsigned int max_dev_sectors; 360762380adSJens Axboe unsigned int chunk_sectors; 361025146e1SMartin K. Petersen unsigned int max_sectors; 362c9c77418SKeith Busch unsigned int max_user_sectors; 363025146e1SMartin K. Petersen unsigned int max_segment_size; 364c72758f3SMartin K. Petersen unsigned int physical_block_size; 365ad6bf88aSMikulas Patocka unsigned int logical_block_size; 366c72758f3SMartin K. Petersen unsigned int alignment_offset; 367c72758f3SMartin K. Petersen unsigned int io_min; 368c72758f3SMartin K. Petersen unsigned int io_opt; 36967efc925SChristoph Hellwig unsigned int max_discard_sectors; 3700034af03SJens Axboe unsigned int max_hw_discard_sectors; 3714f563a64SChristoph Hellwig unsigned int max_user_discard_sectors; 37244abff2cSChristoph Hellwig unsigned int max_secure_erase_sectors; 373a6f0788eSChaitanya Kulkarni unsigned int max_write_zeroes_sectors; 3740512a75bSKeith Busch unsigned int max_zone_append_sectors; 37586b37281SMartin K. Petersen unsigned int discard_granularity; 37686b37281SMartin K. Petersen unsigned int discard_alignment; 377a805a4faSDamien Le Moal unsigned int zone_write_granularity; 378025146e1SMartin K. Petersen 3799da3d1e9SJohn Garry /* atomic write limits */ 3809da3d1e9SJohn Garry unsigned int atomic_write_hw_max; 3819da3d1e9SJohn Garry unsigned int atomic_write_max_sectors; 3829da3d1e9SJohn Garry unsigned int atomic_write_hw_boundary; 3839da3d1e9SJohn Garry unsigned int atomic_write_boundary_sectors; 3849da3d1e9SJohn Garry unsigned int atomic_write_hw_unit_min; 3859da3d1e9SJohn Garry unsigned int atomic_write_unit_min; 3869da3d1e9SJohn Garry unsigned int atomic_write_hw_unit_max; 3879da3d1e9SJohn Garry unsigned int atomic_write_unit_max; 3889da3d1e9SJohn Garry 3898a78362cSMartin K. Petersen unsigned short max_segments; 39013f05c8dSMartin K. Petersen unsigned short max_integrity_segments; 3911e739730SChristoph Hellwig unsigned short max_discard_segments; 392025146e1SMartin K. Petersen 3938c4955c0SChristoph Hellwig unsigned int max_open_zones; 3948c4955c0SChristoph Hellwig unsigned int max_active_zones; 395c964d62fSKeith Busch 396c964d62fSKeith Busch /* 397c964d62fSKeith Busch * Drivers that set dma_alignment to less than 511 must be prepared to 398c964d62fSKeith Busch * handle individual bvec's that are not a multiple of a SECTOR_SIZE 399c964d62fSKeith Busch * due to possible offsets. 400c964d62fSKeith Busch */ 401c964d62fSKeith Busch unsigned int dma_alignment; 402e94b45d0SChristoph Hellwig unsigned int dma_pad_mask; 403c6e56cf6SChristoph Hellwig 404c6e56cf6SChristoph Hellwig struct blk_integrity integrity; 405025146e1SMartin K. Petersen }; 406025146e1SMartin K. Petersen 407d4100351SChristoph Hellwig typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx, 408d4100351SChristoph Hellwig void *data); 409d4100351SChristoph Hellwig 410d4100351SChristoph Hellwig #define BLK_ALL_ZONES ((unsigned int)-1) 411d4100351SChristoph Hellwig int blkdev_report_zones(struct block_device *bdev, sector_t sector, 412d4100351SChristoph Hellwig unsigned int nr_zones, report_zones_cb cb, void *data); 413668bfeeaSChristoph Hellwig int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op, 41471f4ecdbSJohannes Thumshirn sector_t sectors, sector_t nr_sectors); 4159b3c08b9SDamien Le Moal int blk_revalidate_disk_zones(struct gendisk *disk); 4166a0cb1bcSHannes Reinecke 417a2247f19SDamien Le Moal /* 418a2247f19SDamien Le Moal * Independent access ranges: struct blk_independent_access_range describes 419a2247f19SDamien Le Moal * a range of contiguous sectors that can be accessed using device command 420a2247f19SDamien Le Moal * execution resources that are independent from the resources used for 421a2247f19SDamien Le Moal * other access ranges. This is typically found with single-LUN multi-actuator 422a2247f19SDamien Le Moal * HDDs where each access range is served by a different set of heads. 423a2247f19SDamien Le Moal * The set of independent ranges supported by the device is defined using 424a2247f19SDamien Le Moal * struct blk_independent_access_ranges. The independent ranges must not overlap 425a2247f19SDamien Le Moal * and must include all sectors within the disk capacity (no sector holes 426a2247f19SDamien Le Moal * allowed). 427a2247f19SDamien Le Moal * For a device with multiple ranges, requests targeting sectors in different 428a2247f19SDamien Le Moal * ranges can be executed in parallel. A request can straddle an access range 429a2247f19SDamien Le Moal * boundary. 430a2247f19SDamien Le Moal */ 431a2247f19SDamien Le Moal struct blk_independent_access_range { 432a2247f19SDamien Le Moal struct kobject kobj; 433a2247f19SDamien Le Moal sector_t sector; 434a2247f19SDamien Le Moal sector_t nr_sectors; 435a2247f19SDamien Le Moal }; 436a2247f19SDamien Le Moal 437a2247f19SDamien Le Moal struct blk_independent_access_ranges { 438a2247f19SDamien Le Moal struct kobject kobj; 439a2247f19SDamien Le Moal bool sysfs_registered; 440a2247f19SDamien Le Moal unsigned int nr_ia_ranges; 441a2247f19SDamien Le Moal struct blk_independent_access_range ia_range[]; 442a2247f19SDamien Le Moal }; 443a2247f19SDamien Le Moal 444d7b76301SRichard Kennedy struct request_queue { 4451da177e4SLinus Torvalds /* 4461da177e4SLinus Torvalds * The queue owner gets to use this for whatever they like. 4471da177e4SLinus Torvalds * ll_rw_blk doesn't touch it. 4481da177e4SLinus Torvalds */ 4491da177e4SLinus Torvalds void *queuedata; 4501da177e4SLinus Torvalds 4510c734c5eSJens Axboe struct elevator_queue *elevator; 4520c734c5eSJens Axboe 4530c734c5eSJens Axboe const struct blk_mq_ops *mq_ops; 4540c734c5eSJens Axboe 4550c734c5eSJens Axboe /* sw queues */ 4560c734c5eSJens Axboe struct blk_mq_ctx __percpu *queue_ctx; 4570c734c5eSJens Axboe 4581da177e4SLinus Torvalds /* 4591da177e4SLinus Torvalds * various queue flags, see QUEUE_* below 4601da177e4SLinus Torvalds */ 4611da177e4SLinus Torvalds unsigned long queue_flags; 4621da177e4SLinus Torvalds 4630c734c5eSJens Axboe unsigned int rq_timeout; 4640c734c5eSJens Axboe 4650c734c5eSJens Axboe unsigned int queue_depth; 4660c734c5eSJens Axboe 4670c734c5eSJens Axboe refcount_t refs; 4680c734c5eSJens Axboe 4690c734c5eSJens Axboe /* hw dispatch queues */ 4700c734c5eSJens Axboe unsigned int nr_hw_queues; 4710c734c5eSJens Axboe struct xarray hctx_table; 4720c734c5eSJens Axboe 4730c734c5eSJens Axboe struct percpu_ref q_usage_counter; 4740c734c5eSJens Axboe 4750c734c5eSJens Axboe struct request *last_merge; 476a73f730dSTejun Heo 4770d945c1fSChristoph Hellwig spinlock_t queue_lock; 4781da177e4SLinus Torvalds 4790c734c5eSJens Axboe int quiesce_depth; 480d152c682SChristoph Hellwig 4810c734c5eSJens Axboe struct gendisk *disk; 4821da177e4SLinus Torvalds 483320ae51fSJens Axboe /* 484320ae51fSJens Axboe * mq queue kobject 485320ae51fSJens Axboe */ 4861db4909eSMing Lei struct kobject *mq_kobj; 487320ae51fSJens Axboe 4880c734c5eSJens Axboe struct queue_limits limits; 4890c734c5eSJens Axboe 49047fafbc7SRafael J. Wysocki #ifdef CONFIG_PM 4916c954667SLin Ming struct device *dev; 492db04e18dSGeert Uytterhoeven enum rpm_status rpm_status; 4936c954667SLin Ming #endif 4946c954667SLin Ming 4951da177e4SLinus Torvalds /* 4960c734c5eSJens Axboe * Number of contexts that have called blk_set_pm_only(). If this 4970c734c5eSJens Axboe * counter is above zero then only RQF_PM requests are processed. 4980c734c5eSJens Axboe */ 4990c734c5eSJens Axboe atomic_t pm_only; 5000c734c5eSJens Axboe 5010c734c5eSJens Axboe struct blk_queue_stats *stats; 5020c734c5eSJens Axboe struct rq_qos *rq_qos; 5030c734c5eSJens Axboe struct mutex rq_qos_mutex; 5040c734c5eSJens Axboe 5050c734c5eSJens Axboe /* 5060c734c5eSJens Axboe * ida allocated id for this queue. Used to index queues from 5070c734c5eSJens Axboe * ioctx. 5080c734c5eSJens Axboe */ 5090c734c5eSJens Axboe int id; 5100c734c5eSJens Axboe 5110c734c5eSJens Axboe /* 5121da177e4SLinus Torvalds * queue settings 5131da177e4SLinus Torvalds */ 5141da177e4SLinus Torvalds unsigned long nr_requests; /* Max # of requests */ 5151da177e4SLinus Torvalds 5161b262839SSatya Tangirala #ifdef CONFIG_BLK_INLINE_ENCRYPTION 517cb77cb5aSEric Biggers struct blk_crypto_profile *crypto_profile; 51820f01f16SEric Biggers struct kobject *crypto_kobject; 5191b262839SSatya Tangirala #endif 5201b262839SSatya Tangirala 521242f9dcbSJens Axboe struct timer_list timeout; 522287922ebSChristoph Hellwig struct work_struct timeout_work; 523242f9dcbSJens Axboe 524079a2e3eSJohn Garry atomic_t nr_active_requests_shared_tags; 525bccf5e26SJohn Garry 526079a2e3eSJohn Garry struct blk_mq_tags *sched_shared_tags; 527d97e594cSJohn Garry 528a612fddfSTejun Heo struct list_head icq_list; 5291231039dSChristoph Hellwig #ifdef CONFIG_BLK_CGROUP 5301231039dSChristoph Hellwig DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS); 5311231039dSChristoph Hellwig struct blkcg_gq *root_blkg; 5321231039dSChristoph Hellwig struct list_head blkg_list; 5331231039dSChristoph Hellwig struct mutex blkcg_mutex; 5341231039dSChristoph Hellwig #endif 535a612fddfSTejun Heo 5361946089aSChristoph Lameter int node; 5370c734c5eSJens Axboe 5380c734c5eSJens Axboe spinlock_t requeue_lock; 5390c734c5eSJens Axboe struct list_head requeue_list; 5400c734c5eSJens Axboe struct delayed_work requeue_work; 5410c734c5eSJens Axboe 5426c5c9341SAlexey Dobriyan #ifdef CONFIG_BLK_DEV_IO_TRACE 543c780e86dSJan Kara struct blk_trace __rcu *blk_trace; 5446c5c9341SAlexey Dobriyan #endif 5451da177e4SLinus Torvalds /* 5464913efe4STejun Heo * for flush operations 5471da177e4SLinus Torvalds */ 5487c94e1c1SMing Lei struct blk_flush_queue *fq; 5499a67aa52SChristoph Hellwig struct list_head flush_list; 550483f4afcSAl Viro 551483f4afcSAl Viro struct mutex sysfs_lock; 552cecf5d87SMing Lei struct mutex sysfs_dir_lock; 553d690cb8aSChristoph Hellwig struct mutex limits_lock; 554d351af01SFUJITA Tomonori 5552f8f1336SMing Lei /* 5562f8f1336SMing Lei * for reusing dead hctx instance in case of updating 5572f8f1336SMing Lei * nr_hw_queues 5582f8f1336SMing Lei */ 5592f8f1336SMing Lei struct list_head unused_hctx_list; 5602f8f1336SMing Lei spinlock_t unused_hctx_lock; 5612f8f1336SMing Lei 5627996a8b5SBob Liu int mq_freeze_depth; 563d732580bSTejun Heo 564e43473b7SVivek Goyal #ifdef CONFIG_BLK_DEV_THROTTLING 565e43473b7SVivek Goyal /* Throttle data */ 566e43473b7SVivek Goyal struct throtl_data *td; 567e43473b7SVivek Goyal #endif 568548bc8e1STejun Heo struct rcu_head rcu_head; 569320ae51fSJens Axboe wait_queue_head_t mq_freeze_wq; 5707996a8b5SBob Liu /* 5717996a8b5SBob Liu * Protect concurrent access to q_usage_counter by 5727996a8b5SBob Liu * percpu_ref_kill() and percpu_ref_reinit(). 5737996a8b5SBob Liu */ 5747996a8b5SBob Liu struct mutex mq_freeze_lock; 5750d2602caSJens Axboe 5760d2602caSJens Axboe struct blk_mq_tag_set *tag_set; 5770d2602caSJens Axboe struct list_head tag_set_list; 5784593fdbeSAkinobu Mita 57907e4feadSOmar Sandoval struct dentry *debugfs_dir; 580d332ce09SOmar Sandoval struct dentry *sched_debugfs_dir; 581cc56694fSMing Lei struct dentry *rqos_debugfs_dir; 5825cf9c91bSChristoph Hellwig /* 5835cf9c91bSChristoph Hellwig * Serializes all debugfs metadata operations using the above dentries. 5845cf9c91bSChristoph Hellwig */ 5855cf9c91bSChristoph Hellwig struct mutex debugfs_mutex; 58607e4feadSOmar Sandoval 5874593fdbeSAkinobu Mita bool mq_sysfs_init_done; 5881da177e4SLinus Torvalds }; 5891da177e4SLinus Torvalds 590bfe373f6SHou Tao /* Keep blk_queue_flag_name[] in sync with the definitions below */ 59155177adfSJohn Garry enum { 59255177adfSJohn Garry QUEUE_FLAG_DYING, /* queue being torn down */ 59355177adfSJohn Garry QUEUE_FLAG_NOMERGES, /* disable merge attempts */ 59455177adfSJohn Garry QUEUE_FLAG_SAME_COMP, /* complete on same CPU-group */ 59555177adfSJohn Garry QUEUE_FLAG_FAIL_IO, /* fake timeout */ 59655177adfSJohn Garry QUEUE_FLAG_NOXMERGES, /* No extended merges */ 59755177adfSJohn Garry QUEUE_FLAG_SAME_FORCE, /* force complete on same CPU */ 59855177adfSJohn Garry QUEUE_FLAG_INIT_DONE, /* queue is initialized */ 59955177adfSJohn Garry QUEUE_FLAG_STATS, /* track IO start and completion times */ 60055177adfSJohn Garry QUEUE_FLAG_REGISTERED, /* queue has been registered to a disk */ 60155177adfSJohn Garry QUEUE_FLAG_QUIESCED, /* queue has been quiesced */ 60255177adfSJohn Garry QUEUE_FLAG_RQ_ALLOC_TIME, /* record rq->alloc_time_ns */ 60355177adfSJohn Garry QUEUE_FLAG_HCTX_ACTIVE, /* at least one blk-mq hctx is active */ 60455177adfSJohn Garry QUEUE_FLAG_SQ_SCHED, /* single queue style io dispatch */ 60555177adfSJohn Garry QUEUE_FLAG_MAX 60655177adfSJohn Garry }; 607797e7dbbSTejun Heo 608f76af42fSChristoph Hellwig #define QUEUE_FLAG_MQ_DEFAULT (1UL << QUEUE_FLAG_SAME_COMP) 60994eddfbeSJens Axboe 6108814ce8aSBart Van Assche void blk_queue_flag_set(unsigned int flag, struct request_queue *q); 6118814ce8aSBart Van Assche void blk_queue_flag_clear(unsigned int flag, struct request_queue *q); 6128814ce8aSBart Van Assche 6133f3299d5SBart Van Assche #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) 614320ae51fSJens Axboe #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) 615ac9fafa1SAlan D. Brunelle #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 616488991e2SAlan D. Brunelle #define blk_queue_noxmerges(q) \ 617488991e2SAlan D. Brunelle test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 61844348870SChristoph Hellwig #define blk_queue_nonrot(q) (!((q)->limits.features & BLK_FEAT_ROTATIONAL)) 619cdb24979SChristoph Hellwig #define blk_queue_io_stat(q) ((q)->limits.features & BLK_FEAT_IO_STAT) 620f467fee4SChristoph Hellwig #define blk_queue_dax(q) ((q)->limits.features & BLK_FEAT_DAX) 6219c1e42e3SChristoph Hellwig #define blk_queue_pci_p2pdma(q) ((q)->limits.features & BLK_FEAT_PCI_P2PDMA) 6226f816b4bSTejun Heo #ifdef CONFIG_BLK_RQ_ALLOC_TIME 6236f816b4bSTejun Heo #define blk_queue_rq_alloc_time(q) \ 6246f816b4bSTejun Heo test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags) 6256f816b4bSTejun Heo #else 6266f816b4bSTejun Heo #define blk_queue_rq_alloc_time(q) false 6276f816b4bSTejun Heo #endif 6281da177e4SLinus Torvalds 62933659ebbSChristoph Hellwig #define blk_noretry_request(rq) \ 63033659ebbSChristoph Hellwig ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ 63133659ebbSChristoph Hellwig REQ_FAILFAST_DRIVER)) 632f4560ffeSMing Lei #define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags) 633cd84a62eSBart Van Assche #define blk_queue_pm_only(q) atomic_read(&(q)->pm_only) 63458c898baSMing Lei #define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags) 6354d337cebSMing Lei #define blk_queue_sq_sched(q) test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags) 636414dd48eSChao Leng #define blk_queue_skip_tagset_quiesce(q) \ 6378c8f5c85SChristoph Hellwig ((q)->limits.features & BLK_FEAT_SKIP_TAGSET_QUIESCE) 638c9254f2dSBart Van Assche 639cd84a62eSBart Van Assche extern void blk_set_pm_only(struct request_queue *q); 640cd84a62eSBart Van Assche extern void blk_clear_pm_only(struct request_queue *q); 6414aff5e23SJens Axboe 6421da177e4SLinus Torvalds #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 6431da177e4SLinus Torvalds 6443ab3a031SChristoph Hellwig #define dma_map_bvec(dev, bv, dir, attrs) \ 6453ab3a031SChristoph Hellwig dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \ 6463ab3a031SChristoph Hellwig (dir), (attrs)) 6473ab3a031SChristoph Hellwig 648344e9ffcSJens Axboe static inline bool queue_is_mq(struct request_queue *q) 64949fd524fSJens Axboe { 650a1ce35faSJens Axboe return q->mq_ops; 65149fd524fSJens Axboe } 65249fd524fSJens Axboe 65352abca64SAlan Stern #ifdef CONFIG_PM 65452abca64SAlan Stern static inline enum rpm_status queue_rpm_status(struct request_queue *q) 65552abca64SAlan Stern { 65652abca64SAlan Stern return q->rpm_status; 65752abca64SAlan Stern } 65852abca64SAlan Stern #else 65952abca64SAlan Stern static inline enum rpm_status queue_rpm_status(struct request_queue *q) 66052abca64SAlan Stern { 66152abca64SAlan Stern return RPM_ACTIVE; 66252abca64SAlan Stern } 66352abca64SAlan Stern #endif 66452abca64SAlan Stern 665797476b8SDamien Le Moal static inline bool blk_queue_is_zoned(struct request_queue *q) 666797476b8SDamien Le Moal { 667b1fc937aSChristoph Hellwig return IS_ENABLED(CONFIG_BLK_DEV_ZONED) && 668b1fc937aSChristoph Hellwig (q->limits.features & BLK_FEAT_ZONED); 669797476b8SDamien Le Moal } 670797476b8SDamien Le Moal 6716a5ac984SBart Van Assche #ifdef CONFIG_BLK_DEV_ZONED 672d86e716aSChristoph Hellwig static inline unsigned int disk_nr_zones(struct gendisk *disk) 673965b652eSDamien Le Moal { 674caaf7101SDamien Le Moal return disk->nr_zones; 675965b652eSDamien Le Moal } 676caaf7101SDamien Le Moal bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs); 677caaf7101SDamien Le Moal #else /* CONFIG_BLK_DEV_ZONED */ 678caaf7101SDamien Le Moal static inline unsigned int disk_nr_zones(struct gendisk *disk) 679caaf7101SDamien Le Moal { 680caaf7101SDamien Le Moal return 0; 681caaf7101SDamien Le Moal } 682caaf7101SDamien Le Moal static inline bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs) 683caaf7101SDamien Le Moal { 684caaf7101SDamien Le Moal return false; 685caaf7101SDamien Le Moal } 686caaf7101SDamien Le Moal #endif /* CONFIG_BLK_DEV_ZONED */ 687965b652eSDamien Le Moal 688d86e716aSChristoph Hellwig static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector) 6896cc77e9cSChristoph Hellwig { 690d86e716aSChristoph Hellwig if (!blk_queue_is_zoned(disk->queue)) 6916cc77e9cSChristoph Hellwig return 0; 692d86e716aSChristoph Hellwig return sector >> ilog2(disk->queue->limits.chunk_sectors); 6936cc77e9cSChristoph Hellwig } 6946cc77e9cSChristoph Hellwig 695b6cfe228SDamien Le Moal static inline unsigned int bdev_nr_zones(struct block_device *bdev) 696e15864f8SNiklas Cassel { 697b6cfe228SDamien Le Moal return disk_nr_zones(bdev->bd_disk); 698659bf827SNiklas Cassel } 699659bf827SNiklas Cassel 7001dc01720SChristoph Hellwig static inline unsigned int bdev_max_open_zones(struct block_device *bdev) 701659bf827SNiklas Cassel { 7028c4955c0SChristoph Hellwig return bdev->bd_disk->queue->limits.max_open_zones; 703659bf827SNiklas Cassel } 7041dc01720SChristoph Hellwig 7051dc01720SChristoph Hellwig static inline unsigned int bdev_max_active_zones(struct block_device *bdev) 7061dc01720SChristoph Hellwig { 7078c4955c0SChristoph Hellwig return bdev->bd_disk->queue->limits.max_active_zones; 7081dc01720SChristoph Hellwig } 7091dc01720SChristoph Hellwig 710d278d4a8SJens Axboe static inline unsigned int blk_queue_depth(struct request_queue *q) 711d278d4a8SJens Axboe { 712d278d4a8SJens Axboe if (q->queue_depth) 713d278d4a8SJens Axboe return q->queue_depth; 714d278d4a8SJens Axboe 715d278d4a8SJens Axboe return q->nr_requests; 716d278d4a8SJens Axboe } 717d278d4a8SJens Axboe 7183d6392cfSJens Axboe /* 7193d6392cfSJens Axboe * default timeout for SG_IO if none specified 7203d6392cfSJens Axboe */ 7213d6392cfSJens Axboe #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) 722f2f1fa78SLinus Torvalds #define BLK_MIN_SG_TIMEOUT (7 * HZ) 7233d6392cfSJens Axboe 7245705f702SNeilBrown /* This should not be used directly - use rq_for_each_segment */ 7251e428079SJens Axboe #define for_each_bio(_bio) \ 7261e428079SJens Axboe for (; _bio; _bio = _bio->bi_next) 7271da177e4SLinus Torvalds 728*9dfd9ea9SChristian Marangi int __must_check add_disk_fwnode(struct device *parent, struct gendisk *disk, 729*9dfd9ea9SChristian Marangi const struct attribute_group **groups, 730*9dfd9ea9SChristian Marangi struct fwnode_handle *fwnode); 731322cbb50SChristoph Hellwig int __must_check device_add_disk(struct device *parent, struct gendisk *disk, 732322cbb50SChristoph Hellwig const struct attribute_group **groups); 733322cbb50SChristoph Hellwig static inline int __must_check add_disk(struct gendisk *disk) 734322cbb50SChristoph Hellwig { 735322cbb50SChristoph Hellwig return device_add_disk(NULL, disk, NULL); 736322cbb50SChristoph Hellwig } 737322cbb50SChristoph Hellwig void del_gendisk(struct gendisk *gp); 738322cbb50SChristoph Hellwig void invalidate_disk(struct gendisk *disk); 739322cbb50SChristoph Hellwig void set_disk_ro(struct gendisk *disk, bool read_only); 740322cbb50SChristoph Hellwig void disk_uevent(struct gendisk *disk, enum kobject_action action); 741322cbb50SChristoph Hellwig 742b8c873edSAl Viro static inline u8 bdev_partno(const struct block_device *bdev) 743b8c873edSAl Viro { 7441116b9faSAl Viro return atomic_read(&bdev->__bd_flags) & BD_PARTNO; 7451116b9faSAl Viro } 7461116b9faSAl Viro 7471116b9faSAl Viro static inline bool bdev_test_flag(const struct block_device *bdev, unsigned flag) 7481116b9faSAl Viro { 7491116b9faSAl Viro return atomic_read(&bdev->__bd_flags) & flag; 7501116b9faSAl Viro } 7511116b9faSAl Viro 7521116b9faSAl Viro static inline void bdev_set_flag(struct block_device *bdev, unsigned flag) 7531116b9faSAl Viro { 7541116b9faSAl Viro atomic_or(flag, &bdev->__bd_flags); 7551116b9faSAl Viro } 7561116b9faSAl Viro 7571116b9faSAl Viro static inline void bdev_clear_flag(struct block_device *bdev, unsigned flag) 7581116b9faSAl Viro { 7591116b9faSAl Viro atomic_andnot(flag, &bdev->__bd_flags); 760b8c873edSAl Viro } 761b8c873edSAl Viro 762322cbb50SChristoph Hellwig static inline int get_disk_ro(struct gendisk *disk) 763322cbb50SChristoph Hellwig { 76401e198f0SAl Viro return bdev_test_flag(disk->part0, BD_READ_ONLY) || 765322cbb50SChristoph Hellwig test_bit(GD_READ_ONLY, &disk->state); 766322cbb50SChristoph Hellwig } 767322cbb50SChristoph Hellwig 768322cbb50SChristoph Hellwig static inline int bdev_read_only(struct block_device *bdev) 769322cbb50SChristoph Hellwig { 77001e198f0SAl Viro return bdev_test_flag(bdev, BD_READ_ONLY) || get_disk_ro(bdev->bd_disk); 771322cbb50SChristoph Hellwig } 772322cbb50SChristoph Hellwig 773322cbb50SChristoph Hellwig bool set_capacity_and_notify(struct gendisk *disk, sector_t size); 774ab6860f6SChristoph Hellwig void disk_force_media_change(struct gendisk *disk); 775560e20e4SChristoph Hellwig void bdev_mark_dead(struct block_device *bdev, bool surprise); 776322cbb50SChristoph Hellwig 777322cbb50SChristoph Hellwig void add_disk_randomness(struct gendisk *disk) __latent_entropy; 778322cbb50SChristoph Hellwig void rand_initialize_disk(struct gendisk *disk); 779322cbb50SChristoph Hellwig 780322cbb50SChristoph Hellwig static inline sector_t get_start_sect(struct block_device *bdev) 781322cbb50SChristoph Hellwig { 782322cbb50SChristoph Hellwig return bdev->bd_start_sect; 783322cbb50SChristoph Hellwig } 784322cbb50SChristoph Hellwig 785322cbb50SChristoph Hellwig static inline sector_t bdev_nr_sectors(struct block_device *bdev) 786322cbb50SChristoph Hellwig { 787322cbb50SChristoph Hellwig return bdev->bd_nr_sectors; 788322cbb50SChristoph Hellwig } 789322cbb50SChristoph Hellwig 790322cbb50SChristoph Hellwig static inline loff_t bdev_nr_bytes(struct block_device *bdev) 791322cbb50SChristoph Hellwig { 792322cbb50SChristoph Hellwig return (loff_t)bdev_nr_sectors(bdev) << SECTOR_SHIFT; 793322cbb50SChristoph Hellwig } 794322cbb50SChristoph Hellwig 795322cbb50SChristoph Hellwig static inline sector_t get_capacity(struct gendisk *disk) 796322cbb50SChristoph Hellwig { 797322cbb50SChristoph Hellwig return bdev_nr_sectors(disk->part0); 798322cbb50SChristoph Hellwig } 799322cbb50SChristoph Hellwig 800322cbb50SChristoph Hellwig static inline u64 sb_bdev_nr_blocks(struct super_block *sb) 801322cbb50SChristoph Hellwig { 802322cbb50SChristoph Hellwig return bdev_nr_sectors(sb->s_bdev) >> 803322cbb50SChristoph Hellwig (sb->s_blocksize_bits - SECTOR_SHIFT); 804322cbb50SChristoph Hellwig } 805322cbb50SChristoph Hellwig 806322cbb50SChristoph Hellwig int bdev_disk_changed(struct gendisk *disk, bool invalidate); 807322cbb50SChristoph Hellwig 808322cbb50SChristoph Hellwig void put_disk(struct gendisk *disk); 80974fa8f9cSChristoph Hellwig struct gendisk *__blk_alloc_disk(struct queue_limits *lim, int node, 81074fa8f9cSChristoph Hellwig struct lock_class_key *lkclass); 811322cbb50SChristoph Hellwig 812322cbb50SChristoph Hellwig /** 813322cbb50SChristoph Hellwig * blk_alloc_disk - allocate a gendisk structure 81474fa8f9cSChristoph Hellwig * @lim: queue limits to be used for this disk. 815322cbb50SChristoph Hellwig * @node_id: numa node to allocate on 816322cbb50SChristoph Hellwig * 817322cbb50SChristoph Hellwig * Allocate and pre-initialize a gendisk structure for use with BIO based 818322cbb50SChristoph Hellwig * drivers. 819322cbb50SChristoph Hellwig * 82074fa8f9cSChristoph Hellwig * Returns an ERR_PTR on error, else the allocated disk. 82174fa8f9cSChristoph Hellwig * 822322cbb50SChristoph Hellwig * Context: can sleep 823322cbb50SChristoph Hellwig */ 82474fa8f9cSChristoph Hellwig #define blk_alloc_disk(lim, node_id) \ 825322cbb50SChristoph Hellwig ({ \ 826322cbb50SChristoph Hellwig static struct lock_class_key __key; \ 827322cbb50SChristoph Hellwig \ 82874fa8f9cSChristoph Hellwig __blk_alloc_disk(lim, node_id, &__key); \ 829322cbb50SChristoph Hellwig }) 830322cbb50SChristoph Hellwig 831322cbb50SChristoph Hellwig int __register_blkdev(unsigned int major, const char *name, 832322cbb50SChristoph Hellwig void (*probe)(dev_t devt)); 833322cbb50SChristoph Hellwig #define register_blkdev(major, name) \ 834322cbb50SChristoph Hellwig __register_blkdev(major, name, NULL) 835322cbb50SChristoph Hellwig void unregister_blkdev(unsigned int major, const char *name); 836322cbb50SChristoph Hellwig 837444aa2c5SChristoph Hellwig bool disk_check_media_change(struct gendisk *disk); 838322cbb50SChristoph Hellwig void set_capacity(struct gendisk *disk, sector_t size); 839322cbb50SChristoph Hellwig 840322cbb50SChristoph Hellwig #ifdef CONFIG_BLOCK_HOLDER_DEPRECATED 841322cbb50SChristoph Hellwig int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk); 842322cbb50SChristoph Hellwig void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk); 843322cbb50SChristoph Hellwig #else 844322cbb50SChristoph Hellwig static inline int bd_link_disk_holder(struct block_device *bdev, 845322cbb50SChristoph Hellwig struct gendisk *disk) 846322cbb50SChristoph Hellwig { 847322cbb50SChristoph Hellwig return 0; 848322cbb50SChristoph Hellwig } 849322cbb50SChristoph Hellwig static inline void bd_unlink_disk_holder(struct block_device *bdev, 850322cbb50SChristoph Hellwig struct gendisk *disk) 851322cbb50SChristoph Hellwig { 852322cbb50SChristoph Hellwig } 853322cbb50SChristoph Hellwig #endif /* CONFIG_BLOCK_HOLDER_DEPRECATED */ 854322cbb50SChristoph Hellwig 855322cbb50SChristoph Hellwig dev_t part_devt(struct gendisk *disk, u8 partno); 856322cbb50SChristoph Hellwig void inc_diskseq(struct gendisk *disk); 857322cbb50SChristoph Hellwig void blk_request_module(dev_t devt); 8582d4dc890SIlya Loginov 8591da177e4SLinus Torvalds extern int blk_register_queue(struct gendisk *disk); 8601da177e4SLinus Torvalds extern void blk_unregister_queue(struct gendisk *disk); 8613e08773cSChristoph Hellwig void submit_bio_noacct(struct bio *bio); 8625a97806fSChristoph Hellwig struct bio *bio_split_to_limits(struct bio *bio); 86324b83debSChristoph Hellwig 864ef9e3facSKiyoshi Ueda extern int blk_lld_busy(struct request_queue *q); 8659a95e4efSBart Van Assche extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags); 8662e6edc95SDan Williams extern void blk_queue_exit(struct request_queue *q); 8671da177e4SLinus Torvalds extern void blk_sync_queue(struct request_queue *q); 868fb9b16e1SKeith Busch 869e47bc4edSChaitanya Kulkarni /* Helper to convert REQ_OP_XXX to its string format XXX */ 87077e7ffd7SBart Van Assche extern const char *blk_op_str(enum req_op op); 871e47bc4edSChaitanya Kulkarni 8722a842acaSChristoph Hellwig int blk_status_to_errno(blk_status_t status); 8732a842acaSChristoph Hellwig blk_status_t errno_to_blk_status(int errno); 8747ba37927SKent Overstreet const char *blk_status_to_str(blk_status_t status); 8752a842acaSChristoph Hellwig 876ef99b2d3SChristoph Hellwig /* only poll the hardware once, don't continue until a completion was found */ 877ef99b2d3SChristoph Hellwig #define BLK_POLL_ONESHOT (1 << 0) 8785a72e899SJens Axboe int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags); 8795a72e899SJens Axboe int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob, 8805a72e899SJens Axboe unsigned int flags); 88105229beeSJens Axboe 882165125e1SJens Axboe static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 8831da177e4SLinus Torvalds { 88417220ca5SPavel Begunkov return bdev->bd_queue; /* this is never NULL */ 8851da177e4SLinus Torvalds } 8861da177e4SLinus Torvalds 88702694e86SChaitanya Kulkarni /* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */ 88802694e86SChaitanya Kulkarni const char *blk_zone_cond_str(enum blk_zone_cond zone_cond); 88902694e86SChaitanya Kulkarni 890d0ea6bdeSDamien Le Moal static inline unsigned int bio_zone_no(struct bio *bio) 891d0ea6bdeSDamien Le Moal { 892d86e716aSChristoph Hellwig return disk_zone_no(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector); 893d0ea6bdeSDamien Le Moal } 894d0ea6bdeSDamien Le Moal 895b85a3c1bSDamien Le Moal static inline bool bio_straddles_zones(struct bio *bio) 896d0ea6bdeSDamien Le Moal { 897b85a3c1bSDamien Le Moal return bio_sectors(bio) && 898b85a3c1bSDamien Le Moal bio_zone_no(bio) != 899b85a3c1bSDamien Le Moal disk_zone_no(bio->bi_bdev->bd_disk, bio_end_sector(bio) - 1); 900d0ea6bdeSDamien Le Moal } 9016cc77e9cSChristoph Hellwig 902762380adSJens Axboe /* 903f70167a7SJohn Garry * Return how much within the boundary is left to be used for I/O at a given 904f70167a7SJohn Garry * offset. 9058689461bSChristoph Hellwig */ 906f70167a7SJohn Garry static inline unsigned int blk_boundary_sectors_left(sector_t offset, 907f70167a7SJohn Garry unsigned int boundary_sectors) 9088689461bSChristoph Hellwig { 909f70167a7SJohn Garry if (unlikely(!is_power_of_2(boundary_sectors))) 910f70167a7SJohn Garry return boundary_sectors - sector_div(offset, boundary_sectors); 911f70167a7SJohn Garry return boundary_sectors - (offset & (boundary_sectors - 1)); 9128689461bSChristoph Hellwig } 9138689461bSChristoph Hellwig 914d690cb8aSChristoph Hellwig /** 915d690cb8aSChristoph Hellwig * queue_limits_start_update - start an atomic update of queue limits 916d690cb8aSChristoph Hellwig * @q: queue to update 917d690cb8aSChristoph Hellwig * 918d690cb8aSChristoph Hellwig * This functions starts an atomic update of the queue limits. It takes a lock 919d690cb8aSChristoph Hellwig * to prevent other updates and returns a snapshot of the current limits that 920d690cb8aSChristoph Hellwig * the caller can modify. The caller must call queue_limits_commit_update() 921d690cb8aSChristoph Hellwig * to finish the update. 922d690cb8aSChristoph Hellwig * 923d690cb8aSChristoph Hellwig * Context: process context. The caller must have frozen the queue or ensured 924d690cb8aSChristoph Hellwig * that there is outstanding I/O by other means. 925d690cb8aSChristoph Hellwig */ 926d690cb8aSChristoph Hellwig static inline struct queue_limits 927d690cb8aSChristoph Hellwig queue_limits_start_update(struct request_queue *q) 928d690cb8aSChristoph Hellwig { 929d690cb8aSChristoph Hellwig mutex_lock(&q->limits_lock); 930d690cb8aSChristoph Hellwig return q->limits; 931d690cb8aSChristoph Hellwig } 932d690cb8aSChristoph Hellwig int queue_limits_commit_update(struct request_queue *q, 933d690cb8aSChristoph Hellwig struct queue_limits *lim); 934631d4efbSChristoph Hellwig int queue_limits_set(struct request_queue *q, struct queue_limits *lim); 935d690cb8aSChristoph Hellwig 93629306626SChristoph Hellwig /** 93729306626SChristoph Hellwig * queue_limits_cancel_update - cancel an atomic update of queue limits 93829306626SChristoph Hellwig * @q: queue to update 93929306626SChristoph Hellwig * 94029306626SChristoph Hellwig * This functions cancels an atomic update of the queue limits started by 94129306626SChristoph Hellwig * queue_limits_start_update() and should be used when an error occurs after 94229306626SChristoph Hellwig * starting update. 94329306626SChristoph Hellwig */ 94429306626SChristoph Hellwig static inline void queue_limits_cancel_update(struct request_queue *q) 94529306626SChristoph Hellwig { 94629306626SChristoph Hellwig mutex_unlock(&q->limits_lock); 94729306626SChristoph Hellwig } 94829306626SChristoph Hellwig 9498689461bSChristoph Hellwig /* 95073e3715eSChristoph Hellwig * These helpers are for drivers that have sloppy feature negotiation and might 95173e3715eSChristoph Hellwig * have to disable DISCARD, WRITE_ZEROES or SECURE_DISCARD from the I/O 95273e3715eSChristoph Hellwig * completion handler when the device returned an indicator that the respective 95373e3715eSChristoph Hellwig * feature is not actually supported. They are racy and the driver needs to 95473e3715eSChristoph Hellwig * cope with that. Try to avoid this scheme if you can. 95573e3715eSChristoph Hellwig */ 95673e3715eSChristoph Hellwig static inline void blk_queue_disable_discard(struct request_queue *q) 95773e3715eSChristoph Hellwig { 95873e3715eSChristoph Hellwig q->limits.max_discard_sectors = 0; 95973e3715eSChristoph Hellwig } 96073e3715eSChristoph Hellwig 96173e3715eSChristoph Hellwig static inline void blk_queue_disable_secure_erase(struct request_queue *q) 96273e3715eSChristoph Hellwig { 96373e3715eSChristoph Hellwig q->limits.max_secure_erase_sectors = 0; 96473e3715eSChristoph Hellwig } 96573e3715eSChristoph Hellwig 96673e3715eSChristoph Hellwig static inline void blk_queue_disable_write_zeroes(struct request_queue *q) 96773e3715eSChristoph Hellwig { 96873e3715eSChristoph Hellwig q->limits.max_write_zeroes_sectors = 0; 96973e3715eSChristoph Hellwig } 97073e3715eSChristoph Hellwig 97173e3715eSChristoph Hellwig /* 9721da177e4SLinus Torvalds * Access functions for manipulating queue properties 9731da177e4SLinus Torvalds */ 974d278d4a8SJens Axboe extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth); 975b1bd055dSMartin K. Petersen extern void blk_set_stacking_limits(struct queue_limits *lim); 976c72758f3SMartin K. Petersen extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 977c72758f3SMartin K. Petersen sector_t offset); 978c1373f1cSChristoph Hellwig void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev, 979c1373f1cSChristoph Hellwig sector_t offset, const char *pfx); 980242f9dcbSJens Axboe extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 9812e9bc346SChristoph Hellwig 982a2247f19SDamien Le Moal struct blk_independent_access_ranges * 983a2247f19SDamien Le Moal disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges); 984a2247f19SDamien Le Moal void disk_set_independent_access_ranges(struct gendisk *disk, 985a2247f19SDamien Le Moal struct blk_independent_access_ranges *iars); 986a2247f19SDamien Le Moal 98709ac46c4STejun Heo bool __must_check blk_get_queue(struct request_queue *); 988165125e1SJens Axboe extern void blk_put_queue(struct request_queue *); 9897a5428dcSChristoph Hellwig 9907a5428dcSChristoph Hellwig void blk_mark_disk_dead(struct gendisk *disk); 9911da177e4SLinus Torvalds 9921a4dcfa8SChristoph Hellwig #ifdef CONFIG_BLOCK 993316cc67dSShaohua Li /* 99475df7136SSuresh Jayaraman * blk_plug permits building a queue of related requests by holding the I/O 99575df7136SSuresh Jayaraman * fragments for a short period. This allows merging of sequential requests 99675df7136SSuresh Jayaraman * into single larger request. As the requests are moved from a per-task list to 99775df7136SSuresh Jayaraman * the device's request_queue in a batch, this results in improved scalability 99875df7136SSuresh Jayaraman * as the lock contention for request_queue lock is reduced. 99975df7136SSuresh Jayaraman * 100075df7136SSuresh Jayaraman * It is ok not to disable preemption when adding the request to the plug list 1001008f75a2SChristoph Hellwig * or when attempting a merge. For details, please see schedule() where 1002008f75a2SChristoph Hellwig * blk_flush_plug() is called. 1003316cc67dSShaohua Li */ 100473c10101SJens Axboe struct blk_plug { 1005bc490f81SJens Axboe struct request *mq_list; /* blk-mq requests */ 100647c122e3SJens Axboe 100747c122e3SJens Axboe /* if ios_left is > 1, we can batch tag/rq allocations */ 100847c122e3SJens Axboe struct request *cached_rq; 1009da4c8c3dSJens Axboe u64 cur_ktime; 101047c122e3SJens Axboe unsigned short nr_ios; 101147c122e3SJens Axboe 10125f0ed774SJens Axboe unsigned short rq_count; 101347c122e3SJens Axboe 1014ce5b009cSJens Axboe bool multiple_queues; 1015dc5fc361SJens Axboe bool has_elevator; 101647c122e3SJens Axboe 101747c122e3SJens Axboe struct list_head cb_list; /* md requires an unplug callback */ 101873c10101SJens Axboe }; 101955c022bbSShaohua Li 10209cbb1750SNeilBrown struct blk_plug_cb; 102174018dc3SNeilBrown typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); 1022048c9374SNeilBrown struct blk_plug_cb { 1023048c9374SNeilBrown struct list_head list; 10249cbb1750SNeilBrown blk_plug_cb_fn callback; 10259cbb1750SNeilBrown void *data; 1026048c9374SNeilBrown }; 10279cbb1750SNeilBrown extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, 10289cbb1750SNeilBrown void *data, int size); 102973c10101SJens Axboe extern void blk_start_plug(struct blk_plug *); 103047c122e3SJens Axboe extern void blk_start_plug_nr_ios(struct blk_plug *, unsigned short); 103173c10101SJens Axboe extern void blk_finish_plug(struct blk_plug *); 103273c10101SJens Axboe 1033aa8dcccaSChristoph Hellwig void __blk_flush_plug(struct blk_plug *plug, bool from_schedule); 1034aa8dcccaSChristoph Hellwig static inline void blk_flush_plug(struct blk_plug *plug, bool async) 103573c10101SJens Axboe { 1036aa8dcccaSChristoph Hellwig if (plug) 1037aa8dcccaSChristoph Hellwig __blk_flush_plug(plug, async); 103873c10101SJens Axboe } 103973c10101SJens Axboe 104006b23f92SJens Axboe /* 104106b23f92SJens Axboe * tsk == current here 104206b23f92SJens Axboe */ 104306b23f92SJens Axboe static inline void blk_plug_invalidate_ts(struct task_struct *tsk) 104406b23f92SJens Axboe { 104506b23f92SJens Axboe struct blk_plug *plug = tsk->plug; 104606b23f92SJens Axboe 104706b23f92SJens Axboe if (plug) 104806b23f92SJens Axboe plug->cur_ktime = 0; 104906b23f92SJens Axboe current->flags &= ~PF_BLOCK_TS; 105006b23f92SJens Axboe } 105106b23f92SJens Axboe 1052c6bf3f0eSChristoph Hellwig int blkdev_issue_flush(struct block_device *bdev); 10531a4dcfa8SChristoph Hellwig long nr_blockdev_pages(void); 10541a4dcfa8SChristoph Hellwig #else /* CONFIG_BLOCK */ 10551a4dcfa8SChristoph Hellwig struct blk_plug { 10561a4dcfa8SChristoph Hellwig }; 10571a4dcfa8SChristoph Hellwig 105847c122e3SJens Axboe static inline void blk_start_plug_nr_ios(struct blk_plug *plug, 105947c122e3SJens Axboe unsigned short nr_ios) 106047c122e3SJens Axboe { 106147c122e3SJens Axboe } 106247c122e3SJens Axboe 10631a4dcfa8SChristoph Hellwig static inline void blk_start_plug(struct blk_plug *plug) 10641a4dcfa8SChristoph Hellwig { 10651a4dcfa8SChristoph Hellwig } 10661a4dcfa8SChristoph Hellwig 10671a4dcfa8SChristoph Hellwig static inline void blk_finish_plug(struct blk_plug *plug) 10681a4dcfa8SChristoph Hellwig { 10691a4dcfa8SChristoph Hellwig } 10701a4dcfa8SChristoph Hellwig 1071008f75a2SChristoph Hellwig static inline void blk_flush_plug(struct blk_plug *plug, bool async) 10721a4dcfa8SChristoph Hellwig { 10731a4dcfa8SChristoph Hellwig } 10741a4dcfa8SChristoph Hellwig 107506b23f92SJens Axboe static inline void blk_plug_invalidate_ts(struct task_struct *tsk) 107606b23f92SJens Axboe { 107706b23f92SJens Axboe } 107806b23f92SJens Axboe 1079c6bf3f0eSChristoph Hellwig static inline int blkdev_issue_flush(struct block_device *bdev) 10801a4dcfa8SChristoph Hellwig { 10811a4dcfa8SChristoph Hellwig return 0; 10821a4dcfa8SChristoph Hellwig } 10831a4dcfa8SChristoph Hellwig 10841a4dcfa8SChristoph Hellwig static inline long nr_blockdev_pages(void) 10851a4dcfa8SChristoph Hellwig { 10861a4dcfa8SChristoph Hellwig return 0; 10871a4dcfa8SChristoph Hellwig } 10881a4dcfa8SChristoph Hellwig #endif /* CONFIG_BLOCK */ 10891a4dcfa8SChristoph Hellwig 109071ac860aSMing Lei extern void blk_io_schedule(void); 109171ac860aSMing Lei 109244abff2cSChristoph Hellwig int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 109344abff2cSChristoph Hellwig sector_t nr_sects, gfp_t gfp_mask); 109444abff2cSChristoph Hellwig int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, 109544abff2cSChristoph Hellwig sector_t nr_sects, gfp_t gfp_mask, struct bio **biop); 109644abff2cSChristoph Hellwig int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector, 109744abff2cSChristoph Hellwig sector_t nr_sects, gfp_t gfp); 1098ee472d83SChristoph Hellwig 1099ee472d83SChristoph Hellwig #define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */ 1100cb365b96SChristoph Hellwig #define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */ 1101bf86bcdbSChristoph Hellwig #define BLKDEV_ZERO_KILLABLE (1 << 2) /* interruptible by fatal signals */ 1102ee472d83SChristoph Hellwig 1103e73c23ffSChaitanya Kulkarni extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1104e73c23ffSChaitanya Kulkarni sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, 1105ee472d83SChristoph Hellwig unsigned flags); 11063f14d792SDmitry Monakhov extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1107ee472d83SChristoph Hellwig sector_t nr_sects, gfp_t gfp_mask, unsigned flags); 1108ee472d83SChristoph Hellwig 11092cf6d26aSChristoph Hellwig static inline int sb_issue_discard(struct super_block *sb, sector_t block, 11102cf6d26aSChristoph Hellwig sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) 1111fb2dce86SDavid Woodhouse { 1112233bde21SBart Van Assche return blkdev_issue_discard(sb->s_bdev, 1113233bde21SBart Van Assche block << (sb->s_blocksize_bits - 1114233bde21SBart Van Assche SECTOR_SHIFT), 1115233bde21SBart Van Assche nr_blocks << (sb->s_blocksize_bits - 1116233bde21SBart Van Assche SECTOR_SHIFT), 111744abff2cSChristoph Hellwig gfp_mask); 1118fb2dce86SDavid Woodhouse } 1119e6fa0be6SLukas Czerner static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, 1120a107e5a3STheodore Ts'o sector_t nr_blocks, gfp_t gfp_mask) 1121e6fa0be6SLukas Czerner { 1122e6fa0be6SLukas Czerner return blkdev_issue_zeroout(sb->s_bdev, 1123233bde21SBart Van Assche block << (sb->s_blocksize_bits - 1124233bde21SBart Van Assche SECTOR_SHIFT), 1125233bde21SBart Van Assche nr_blocks << (sb->s_blocksize_bits - 1126233bde21SBart Van Assche SECTOR_SHIFT), 1127ee472d83SChristoph Hellwig gfp_mask, 0); 1128e6fa0be6SLukas Czerner } 11291da177e4SLinus Torvalds 1130fa01b1e9SChristoph Hellwig static inline bool bdev_is_partition(struct block_device *bdev) 1131fa01b1e9SChristoph Hellwig { 1132b8c873edSAl Viro return bdev_partno(bdev) != 0; 1133fa01b1e9SChristoph Hellwig } 1134fa01b1e9SChristoph Hellwig 1135eb28d31bSMartin K. Petersen enum blk_default_limits { 1136eb28d31bSMartin K. Petersen BLK_MAX_SEGMENTS = 128, 1137eb28d31bSMartin K. Petersen BLK_SAFE_MAX_SECTORS = 255, 1138eb28d31bSMartin K. Petersen BLK_MAX_SEGMENT_SIZE = 65536, 1139eb28d31bSMartin K. Petersen BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, 1140eb28d31bSMartin K. Petersen }; 11410e435ac2SMilan Broz 1142d6b9f4e6SChristoph Hellwig /* 1143d6b9f4e6SChristoph Hellwig * Default upper limit for the software max_sectors limit used for 1144d6b9f4e6SChristoph Hellwig * regular file system I/O. This can be increased through sysfs. 1145d6b9f4e6SChristoph Hellwig * 1146d6b9f4e6SChristoph Hellwig * Not to be confused with the max_hw_sector limit that is entirely 1147d6b9f4e6SChristoph Hellwig * controlled by the driver, usually based on hardware limits. 1148d6b9f4e6SChristoph Hellwig */ 1149d6b9f4e6SChristoph Hellwig #define BLK_DEF_MAX_SECTORS_CAP 2560u 11500a26f327SKeith Busch 1151af2c68feSBart Van Assche static inline unsigned long queue_segment_boundary(const struct request_queue *q) 1152ae03bf63SMartin K. Petersen { 1153025146e1SMartin K. Petersen return q->limits.seg_boundary_mask; 1154ae03bf63SMartin K. Petersen } 1155ae03bf63SMartin K. Petersen 1156af2c68feSBart Van Assche static inline unsigned long queue_virt_boundary(const struct request_queue *q) 115703100aadSKeith Busch { 115803100aadSKeith Busch return q->limits.virt_boundary_mask; 115903100aadSKeith Busch } 116003100aadSKeith Busch 1161af2c68feSBart Van Assche static inline unsigned int queue_max_sectors(const struct request_queue *q) 1162ae03bf63SMartin K. Petersen { 1163025146e1SMartin K. Petersen return q->limits.max_sectors; 1164ae03bf63SMartin K. Petersen } 1165ae03bf63SMartin K. Petersen 1166547e2f70SChristoph Hellwig static inline unsigned int queue_max_bytes(struct request_queue *q) 1167547e2f70SChristoph Hellwig { 1168547e2f70SChristoph Hellwig return min_t(unsigned int, queue_max_sectors(q), INT_MAX >> 9) << 9; 1169547e2f70SChristoph Hellwig } 1170547e2f70SChristoph Hellwig 1171af2c68feSBart Van Assche static inline unsigned int queue_max_hw_sectors(const struct request_queue *q) 1172ae03bf63SMartin K. Petersen { 1173025146e1SMartin K. Petersen return q->limits.max_hw_sectors; 1174ae03bf63SMartin K. Petersen } 1175ae03bf63SMartin K. Petersen 1176af2c68feSBart Van Assche static inline unsigned short queue_max_segments(const struct request_queue *q) 1177ae03bf63SMartin K. Petersen { 11788a78362cSMartin K. Petersen return q->limits.max_segments; 1179ae03bf63SMartin K. Petersen } 1180ae03bf63SMartin K. Petersen 1181af2c68feSBart Van Assche static inline unsigned short queue_max_discard_segments(const struct request_queue *q) 11821e739730SChristoph Hellwig { 11831e739730SChristoph Hellwig return q->limits.max_discard_segments; 11841e739730SChristoph Hellwig } 11851e739730SChristoph Hellwig 1186af2c68feSBart Van Assche static inline unsigned int queue_max_segment_size(const struct request_queue *q) 1187ae03bf63SMartin K. Petersen { 1188025146e1SMartin K. Petersen return q->limits.max_segment_size; 1189ae03bf63SMartin K. Petersen } 1190ae03bf63SMartin K. Petersen 1191379b122aSChristoph Hellwig static inline unsigned int 1192379b122aSChristoph Hellwig queue_limits_max_zone_append_sectors(const struct queue_limits *l) 11930512a75bSKeith Busch { 1194ccdbf0aaSDamien Le Moal unsigned int max_sectors = min(l->chunk_sectors, l->max_hw_sectors); 1195fe6f0cdcSJohannes Thumshirn 1196ccdbf0aaSDamien Le Moal return min_not_zero(l->max_zone_append_sectors, max_sectors); 1197ccdbf0aaSDamien Le Moal } 1198fe6f0cdcSJohannes Thumshirn 1199ccdbf0aaSDamien Le Moal static inline unsigned int queue_max_zone_append_sectors(struct request_queue *q) 1200ccdbf0aaSDamien Le Moal { 1201ccdbf0aaSDamien Le Moal if (!blk_queue_is_zoned(q)) 1202ccdbf0aaSDamien Le Moal return 0; 1203ccdbf0aaSDamien Le Moal 1204ccdbf0aaSDamien Le Moal return queue_limits_max_zone_append_sectors(&q->limits); 1205ccdbf0aaSDamien Le Moal } 1206ccdbf0aaSDamien Le Moal 1207ccdbf0aaSDamien Le Moal static inline bool queue_emulates_zone_append(struct request_queue *q) 1208ccdbf0aaSDamien Le Moal { 1209ccdbf0aaSDamien Le Moal return blk_queue_is_zoned(q) && !q->limits.max_zone_append_sectors; 1210ccdbf0aaSDamien Le Moal } 1211ccdbf0aaSDamien Le Moal 1212ccdbf0aaSDamien Le Moal static inline bool bdev_emulates_zone_append(struct block_device *bdev) 1213ccdbf0aaSDamien Le Moal { 1214ccdbf0aaSDamien Le Moal return queue_emulates_zone_append(bdev_get_queue(bdev)); 12150512a75bSKeith Busch } 12160512a75bSKeith Busch 12172aba0d19SChristoph Hellwig static inline unsigned int 12182aba0d19SChristoph Hellwig bdev_max_zone_append_sectors(struct block_device *bdev) 12192aba0d19SChristoph Hellwig { 12202aba0d19SChristoph Hellwig return queue_max_zone_append_sectors(bdev_get_queue(bdev)); 12212aba0d19SChristoph Hellwig } 12222aba0d19SChristoph Hellwig 122365ea1b66SNaohiro Aota static inline unsigned int bdev_max_segments(struct block_device *bdev) 122465ea1b66SNaohiro Aota { 122565ea1b66SNaohiro Aota return queue_max_segments(bdev_get_queue(bdev)); 122665ea1b66SNaohiro Aota } 122765ea1b66SNaohiro Aota 1228ad6bf88aSMikulas Patocka static inline unsigned queue_logical_block_size(const struct request_queue *q) 12291da177e4SLinus Torvalds { 12305476394aSChristoph Hellwig return q->limits.logical_block_size; 12311da177e4SLinus Torvalds } 12321da177e4SLinus Torvalds 1233ad6bf88aSMikulas Patocka static inline unsigned int bdev_logical_block_size(struct block_device *bdev) 12341da177e4SLinus Torvalds { 1235e1defc4fSMartin K. Petersen return queue_logical_block_size(bdev_get_queue(bdev)); 12361da177e4SLinus Torvalds } 12371da177e4SLinus Torvalds 1238af2c68feSBart Van Assche static inline unsigned int queue_physical_block_size(const struct request_queue *q) 1239c72758f3SMartin K. Petersen { 1240c72758f3SMartin K. Petersen return q->limits.physical_block_size; 1241c72758f3SMartin K. Petersen } 1242c72758f3SMartin K. Petersen 1243892b6f90SMartin K. Petersen static inline unsigned int bdev_physical_block_size(struct block_device *bdev) 1244ac481c20SMartin K. Petersen { 1245ac481c20SMartin K. Petersen return queue_physical_block_size(bdev_get_queue(bdev)); 1246ac481c20SMartin K. Petersen } 1247ac481c20SMartin K. Petersen 1248af2c68feSBart Van Assche static inline unsigned int queue_io_min(const struct request_queue *q) 1249c72758f3SMartin K. Petersen { 1250c72758f3SMartin K. Petersen return q->limits.io_min; 1251c72758f3SMartin K. Petersen } 1252c72758f3SMartin K. Petersen 1253ac481c20SMartin K. Petersen static inline int bdev_io_min(struct block_device *bdev) 1254ac481c20SMartin K. Petersen { 1255ac481c20SMartin K. Petersen return queue_io_min(bdev_get_queue(bdev)); 1256ac481c20SMartin K. Petersen } 1257ac481c20SMartin K. Petersen 1258af2c68feSBart Van Assche static inline unsigned int queue_io_opt(const struct request_queue *q) 1259c72758f3SMartin K. Petersen { 1260c72758f3SMartin K. Petersen return q->limits.io_opt; 1261c72758f3SMartin K. Petersen } 1262c72758f3SMartin K. Petersen 1263ac481c20SMartin K. Petersen static inline int bdev_io_opt(struct block_device *bdev) 1264ac481c20SMartin K. Petersen { 1265ac481c20SMartin K. Petersen return queue_io_opt(bdev_get_queue(bdev)); 1266ac481c20SMartin K. Petersen } 1267ac481c20SMartin K. Petersen 1268a805a4faSDamien Le Moal static inline unsigned int 1269a805a4faSDamien Le Moal queue_zone_write_granularity(const struct request_queue *q) 1270a805a4faSDamien Le Moal { 1271a805a4faSDamien Le Moal return q->limits.zone_write_granularity; 1272a805a4faSDamien Le Moal } 1273a805a4faSDamien Le Moal 1274a805a4faSDamien Le Moal static inline unsigned int 1275a805a4faSDamien Le Moal bdev_zone_write_granularity(struct block_device *bdev) 1276a805a4faSDamien Le Moal { 1277a805a4faSDamien Le Moal return queue_zone_write_granularity(bdev_get_queue(bdev)); 1278a805a4faSDamien Le Moal } 1279a805a4faSDamien Le Moal 128089098b07SChristoph Hellwig int bdev_alignment_offset(struct block_device *bdev); 12815c4b4a5cSChristoph Hellwig unsigned int bdev_discard_alignment(struct block_device *bdev); 1282c6e66634SPaolo Bonzini 1283cf0fbf89SChristoph Hellwig static inline unsigned int bdev_max_discard_sectors(struct block_device *bdev) 1284cf0fbf89SChristoph Hellwig { 1285cf0fbf89SChristoph Hellwig return bdev_get_queue(bdev)->limits.max_discard_sectors; 1286cf0fbf89SChristoph Hellwig } 1287cf0fbf89SChristoph Hellwig 12887b47ef52SChristoph Hellwig static inline unsigned int bdev_discard_granularity(struct block_device *bdev) 12897b47ef52SChristoph Hellwig { 12907b47ef52SChristoph Hellwig return bdev_get_queue(bdev)->limits.discard_granularity; 12917b47ef52SChristoph Hellwig } 12927b47ef52SChristoph Hellwig 129344abff2cSChristoph Hellwig static inline unsigned int 129444abff2cSChristoph Hellwig bdev_max_secure_erase_sectors(struct block_device *bdev) 129544abff2cSChristoph Hellwig { 129644abff2cSChristoph Hellwig return bdev_get_queue(bdev)->limits.max_secure_erase_sectors; 129744abff2cSChristoph Hellwig } 129844abff2cSChristoph Hellwig 1299a6f0788eSChaitanya Kulkarni static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev) 1300a6f0788eSChaitanya Kulkarni { 130181475bebSJohn Garry return bdev_get_queue(bdev)->limits.max_write_zeroes_sectors; 1302a6f0788eSChaitanya Kulkarni } 1303a6f0788eSChaitanya Kulkarni 130410f0d2a5SChristoph Hellwig static inline bool bdev_nonrot(struct block_device *bdev) 130510f0d2a5SChristoph Hellwig { 130610f0d2a5SChristoph Hellwig return blk_queue_nonrot(bdev_get_queue(bdev)); 130710f0d2a5SChristoph Hellwig } 130810f0d2a5SChristoph Hellwig 13093222d8c2SChristoph Hellwig static inline bool bdev_synchronous(struct block_device *bdev) 13103222d8c2SChristoph Hellwig { 1311aadd5c59SChristoph Hellwig return bdev->bd_disk->queue->limits.features & BLK_FEAT_SYNCHRONOUS; 13123222d8c2SChristoph Hellwig } 13133222d8c2SChristoph Hellwig 131436d25489SChristoph Hellwig static inline bool bdev_stable_writes(struct block_device *bdev) 131536d25489SChristoph Hellwig { 13163c3e85ddSChristoph Hellwig struct request_queue *q = bdev_get_queue(bdev); 13173c3e85ddSChristoph Hellwig 1318c6e56cf6SChristoph Hellwig if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) && 1319c6e56cf6SChristoph Hellwig q->limits.integrity.csum_type != BLK_INTEGRITY_CSUM_NONE) 13203c3e85ddSChristoph Hellwig return true; 13211a02f3a7SChristoph Hellwig return q->limits.features & BLK_FEAT_STABLE_WRITES; 132236d25489SChristoph Hellwig } 132336d25489SChristoph Hellwig 13241122c0c1SChristoph Hellwig static inline bool blk_queue_write_cache(struct request_queue *q) 13251122c0c1SChristoph Hellwig { 13261122c0c1SChristoph Hellwig return (q->limits.features & BLK_FEAT_WRITE_CACHE) && 1327bae1c743SChristoph Hellwig !(q->limits.flags & BLK_FLAG_WRITE_CACHE_DISABLED); 132808e688fdSChristoph Hellwig } 1329a557e82eSChristoph Hellwig 1330a557e82eSChristoph Hellwig static inline bool bdev_write_cache(struct block_device *bdev) 1331a557e82eSChristoph Hellwig { 13321122c0c1SChristoph Hellwig return blk_queue_write_cache(bdev_get_queue(bdev)); 1333797476b8SDamien Le Moal } 1334797476b8SDamien Le Moal 1335a557e82eSChristoph Hellwig static inline bool bdev_fua(struct block_device *bdev) 1336a557e82eSChristoph Hellwig { 13371122c0c1SChristoph Hellwig return bdev_get_queue(bdev)->limits.features & BLK_FEAT_FUA; 1338a557e82eSChristoph Hellwig } 1339a557e82eSChristoph Hellwig 1340568ec936SChristoph Hellwig static inline bool bdev_nowait(struct block_device *bdev) 1341568ec936SChristoph Hellwig { 1342f76af42fSChristoph Hellwig return bdev->bd_disk->queue->limits.features & BLK_FEAT_NOWAIT; 1343568ec936SChristoph Hellwig } 1344568ec936SChristoph Hellwig 1345797476b8SDamien Le Moal static inline bool bdev_is_zoned(struct block_device *bdev) 1346797476b8SDamien Le Moal { 1347fea127b3SPankaj Raghav return blk_queue_is_zoned(bdev_get_queue(bdev)); 1348797476b8SDamien Le Moal } 1349797476b8SDamien Le Moal 1350d67ea690SPankaj Raghav static inline unsigned int bdev_zone_no(struct block_device *bdev, sector_t sec) 1351d67ea690SPankaj Raghav { 1352d67ea690SPankaj Raghav return disk_zone_no(bdev->bd_disk, sec); 1353d67ea690SPankaj Raghav } 1354d67ea690SPankaj Raghav 1355113ab72eSDamien Le Moal static inline sector_t bdev_zone_sectors(struct block_device *bdev) 13566a0cb1bcSHannes Reinecke { 13576a0cb1bcSHannes Reinecke struct request_queue *q = bdev_get_queue(bdev); 13586a0cb1bcSHannes Reinecke 1359de71973cSChristoph Hellwig if (!blk_queue_is_zoned(q)) 13606cc77e9cSChristoph Hellwig return 0; 1361de71973cSChristoph Hellwig return q->limits.chunk_sectors; 13626cc77e9cSChristoph Hellwig } 13636a0cb1bcSHannes Reinecke 1364e29b2100SPankaj Raghav static inline sector_t bdev_offset_from_zone_start(struct block_device *bdev, 1365e29b2100SPankaj Raghav sector_t sector) 1366e29b2100SPankaj Raghav { 1367e29b2100SPankaj Raghav return sector & (bdev_zone_sectors(bdev) - 1); 1368e29b2100SPankaj Raghav } 1369e29b2100SPankaj Raghav 1370b85a3c1bSDamien Le Moal static inline sector_t bio_offset_from_zone_start(struct bio *bio) 1371b85a3c1bSDamien Le Moal { 1372b85a3c1bSDamien Le Moal return bdev_offset_from_zone_start(bio->bi_bdev, 1373b85a3c1bSDamien Le Moal bio->bi_iter.bi_sector); 1374b85a3c1bSDamien Le Moal } 1375b85a3c1bSDamien Le Moal 1376e29b2100SPankaj Raghav static inline bool bdev_is_zone_start(struct block_device *bdev, 1377e29b2100SPankaj Raghav sector_t sector) 1378e29b2100SPankaj Raghav { 1379e29b2100SPankaj Raghav return bdev_offset_from_zone_start(bdev, sector) == 0; 1380e29b2100SPankaj Raghav } 1381e29b2100SPankaj Raghav 1382af2c68feSBart Van Assche static inline int queue_dma_alignment(const struct request_queue *q) 13831da177e4SLinus Torvalds { 1384abfc9d81SChristoph Hellwig return q->limits.dma_alignment; 13851da177e4SLinus Torvalds } 13861da177e4SLinus Torvalds 13879da3d1e9SJohn Garry static inline unsigned int 13889da3d1e9SJohn Garry queue_atomic_write_unit_max_bytes(const struct request_queue *q) 13899da3d1e9SJohn Garry { 13909da3d1e9SJohn Garry return q->limits.atomic_write_unit_max; 13919da3d1e9SJohn Garry } 13929da3d1e9SJohn Garry 13939da3d1e9SJohn Garry static inline unsigned int 13949da3d1e9SJohn Garry queue_atomic_write_unit_min_bytes(const struct request_queue *q) 13959da3d1e9SJohn Garry { 13969da3d1e9SJohn Garry return q->limits.atomic_write_unit_min; 13979da3d1e9SJohn Garry } 13989da3d1e9SJohn Garry 13999da3d1e9SJohn Garry static inline unsigned int 14009da3d1e9SJohn Garry queue_atomic_write_boundary_bytes(const struct request_queue *q) 14019da3d1e9SJohn Garry { 14029da3d1e9SJohn Garry return q->limits.atomic_write_boundary_sectors << SECTOR_SHIFT; 14039da3d1e9SJohn Garry } 14049da3d1e9SJohn Garry 14059da3d1e9SJohn Garry static inline unsigned int 14069da3d1e9SJohn Garry queue_atomic_write_max_bytes(const struct request_queue *q) 14079da3d1e9SJohn Garry { 14089da3d1e9SJohn Garry return q->limits.atomic_write_max_sectors << SECTOR_SHIFT; 14091da177e4SLinus Torvalds } 14101da177e4SLinus Torvalds 14114a2dcc35SKeith Busch static inline unsigned int bdev_dma_alignment(struct block_device *bdev) 14124a2dcc35SKeith Busch { 14134a2dcc35SKeith Busch return queue_dma_alignment(bdev_get_queue(bdev)); 14144a2dcc35SKeith Busch } 14154a2dcc35SKeith Busch 14165debd969SKeith Busch static inline bool bdev_iter_is_aligned(struct block_device *bdev, 14175debd969SKeith Busch struct iov_iter *iter) 14185debd969SKeith Busch { 14195debd969SKeith Busch return iov_iter_is_aligned(iter, bdev_dma_alignment(bdev), 14205debd969SKeith Busch bdev_logical_block_size(bdev) - 1); 14215debd969SKeith Busch } 14225debd969SKeith Busch 1423e94b45d0SChristoph Hellwig static inline int blk_lim_dma_alignment_and_pad(struct queue_limits *lim) 1424e94b45d0SChristoph Hellwig { 1425e94b45d0SChristoph Hellwig return lim->dma_alignment | lim->dma_pad_mask; 1426e94b45d0SChristoph Hellwig } 1427e94b45d0SChristoph Hellwig 142814417799SNamhyung Kim static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, 142987904074SFUJITA Tomonori unsigned int len) 143087904074SFUJITA Tomonori { 1431e94b45d0SChristoph Hellwig unsigned int alignment = blk_lim_dma_alignment_and_pad(&q->limits); 1432e94b45d0SChristoph Hellwig 143314417799SNamhyung Kim return !(addr & alignment) && !(len & alignment); 143487904074SFUJITA Tomonori } 143587904074SFUJITA Tomonori 14361da177e4SLinus Torvalds /* assumes size > 256 */ 14371da177e4SLinus Torvalds static inline unsigned int blksize_bits(unsigned int size) 14381da177e4SLinus Torvalds { 1439adff2158SDawei Li return order_base_2(size >> SECTOR_SHIFT) + SECTOR_SHIFT; 14401da177e4SLinus Torvalds } 14411da177e4SLinus Torvalds 144259c3d45eSJens Axboe int kblockd_schedule_work(struct work_struct *work); 1443818cd1cbSJens Axboe int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); 14441da177e4SLinus Torvalds 14451da177e4SLinus Torvalds #define MODULE_ALIAS_BLOCKDEV(major,minor) \ 14461da177e4SLinus Torvalds MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 14471da177e4SLinus Torvalds #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 14481da177e4SLinus Torvalds MODULE_ALIAS("block-major-" __stringify(major) "-*") 14491da177e4SLinus Torvalds 1450d145dc23SSatya Tangirala #ifdef CONFIG_BLK_INLINE_ENCRYPTION 1451d145dc23SSatya Tangirala 1452cb77cb5aSEric Biggers bool blk_crypto_register(struct blk_crypto_profile *profile, 1453cb77cb5aSEric Biggers struct request_queue *q); 1454d145dc23SSatya Tangirala 1455d145dc23SSatya Tangirala #else /* CONFIG_BLK_INLINE_ENCRYPTION */ 1456d145dc23SSatya Tangirala 1457cb77cb5aSEric Biggers static inline bool blk_crypto_register(struct blk_crypto_profile *profile, 1458d145dc23SSatya Tangirala struct request_queue *q) 1459d145dc23SSatya Tangirala { 1460d145dc23SSatya Tangirala return true; 1461d145dc23SSatya Tangirala } 1462d145dc23SSatya Tangirala 1463d145dc23SSatya Tangirala #endif /* CONFIG_BLK_INLINE_ENCRYPTION */ 1464d145dc23SSatya Tangirala 14659208d414SChristoph Hellwig enum blk_unique_id { 14669208d414SChristoph Hellwig /* these match the Designator Types specified in SPC */ 14679208d414SChristoph Hellwig BLK_UID_T10 = 1, 14689208d414SChristoph Hellwig BLK_UID_EUI64 = 2, 14699208d414SChristoph Hellwig BLK_UID_NAA = 3, 14709208d414SChristoph Hellwig }; 14719208d414SChristoph Hellwig 147208f85851SAl Viro struct block_device_operations { 14733e08773cSChristoph Hellwig void (*submit_bio)(struct bio *bio); 147469fe0f29SMing Lei int (*poll_bio)(struct bio *bio, struct io_comp_batch *iob, 147569fe0f29SMing Lei unsigned int flags); 147605bdb996SChristoph Hellwig int (*open)(struct gendisk *disk, blk_mode_t mode); 1477ae220766SChristoph Hellwig void (*release)(struct gendisk *disk); 147805bdb996SChristoph Hellwig int (*ioctl)(struct block_device *bdev, blk_mode_t mode, 147905bdb996SChristoph Hellwig unsigned cmd, unsigned long arg); 148005bdb996SChristoph Hellwig int (*compat_ioctl)(struct block_device *bdev, blk_mode_t mode, 148105bdb996SChristoph Hellwig unsigned cmd, unsigned long arg); 148277ea887eSTejun Heo unsigned int (*check_events) (struct gendisk *disk, 148377ea887eSTejun Heo unsigned int clearing); 1484c3e33e04STejun Heo void (*unlock_native_capacity) (struct gendisk *); 148508f85851SAl Viro int (*getgeo)(struct block_device *, struct hd_geometry *); 1486e00adcadSChristoph Hellwig int (*set_read_only)(struct block_device *bdev, bool ro); 148776792055SChristoph Hellwig void (*free_disk)(struct gendisk *disk); 1488b3a27d05SNitin Gupta /* this callback is with swap_lock and sometimes page table lock held */ 1489b3a27d05SNitin Gupta void (*swap_slot_free_notify) (struct block_device *, unsigned long); 1490e76239a3SChristoph Hellwig int (*report_zones)(struct gendisk *, sector_t sector, 1491d4100351SChristoph Hellwig unsigned int nr_zones, report_zones_cb cb, void *data); 1492050a4f34SJens Axboe char *(*devnode)(struct gendisk *disk, umode_t *mode); 14939208d414SChristoph Hellwig /* returns the length of the identifier or a negative errno: */ 14949208d414SChristoph Hellwig int (*get_unique_id)(struct gendisk *disk, u8 id[16], 14959208d414SChristoph Hellwig enum blk_unique_id id_type); 149608f85851SAl Viro struct module *owner; 1497bbd3e064SChristoph Hellwig const struct pr_ops *pr_ops; 14980bdfbca8SDmitry Osipenko 14990bdfbca8SDmitry Osipenko /* 15000bdfbca8SDmitry Osipenko * Special callback for probing GPT entry at a given sector. 15010bdfbca8SDmitry Osipenko * Needed by Android devices, used by GPT scanner and MMC blk 15020bdfbca8SDmitry Osipenko * driver. 15030bdfbca8SDmitry Osipenko */ 15040bdfbca8SDmitry Osipenko int (*alternative_gpt_sector)(struct gendisk *disk, sector_t *sector); 150508f85851SAl Viro }; 150608f85851SAl Viro 1507ee6a129dSArnd Bergmann #ifdef CONFIG_COMPAT 150805bdb996SChristoph Hellwig extern int blkdev_compat_ptr_ioctl(struct block_device *, blk_mode_t, 1509ee6a129dSArnd Bergmann unsigned int, unsigned long); 1510ee6a129dSArnd Bergmann #else 1511ee6a129dSArnd Bergmann #define blkdev_compat_ptr_ioctl NULL 1512ee6a129dSArnd Bergmann #endif 1513ee6a129dSArnd Bergmann 15140619317fSJens Axboe static inline void blk_wake_io_task(struct task_struct *waiter) 15150619317fSJens Axboe { 15160619317fSJens Axboe /* 15170619317fSJens Axboe * If we're polling, the task itself is doing the completions. For 15180619317fSJens Axboe * that case, we don't need to signal a wakeup, it's enough to just 15190619317fSJens Axboe * mark us as RUNNING. 15200619317fSJens Axboe */ 15210619317fSJens Axboe if (waiter == current) 15220619317fSJens Axboe __set_current_state(TASK_RUNNING); 15230619317fSJens Axboe else 15240619317fSJens Axboe wake_up_process(waiter); 15250619317fSJens Axboe } 15260619317fSJens Axboe 15275f275713SYu Kuai unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op, 15285f0614a5SMing Lei unsigned long start_time); 152977e7ffd7SBart Van Assche void bdev_end_io_acct(struct block_device *bdev, enum req_op op, 15305f275713SYu Kuai unsigned int sectors, unsigned long start_time); 1531956d510eSChristoph Hellwig 153299dfc43eSChristoph Hellwig unsigned long bio_start_io_acct(struct bio *bio); 153399dfc43eSChristoph Hellwig void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time, 153499dfc43eSChristoph Hellwig struct block_device *orig_bdev); 1535956d510eSChristoph Hellwig 1536956d510eSChristoph Hellwig /** 1537956d510eSChristoph Hellwig * bio_end_io_acct - end I/O accounting for bio based drivers 1538956d510eSChristoph Hellwig * @bio: bio to end account for 1539b42c1fc3SChristoph Hellwig * @start_time: start time returned by bio_start_io_acct() 1540956d510eSChristoph Hellwig */ 1541956d510eSChristoph Hellwig static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time) 1542956d510eSChristoph Hellwig { 154399dfc43eSChristoph Hellwig return bio_end_io_acct_remapped(bio, start_time, bio->bi_bdev); 1544956d510eSChristoph Hellwig } 1545956d510eSChristoph Hellwig 15463f1266f1SChristoph Hellwig int bdev_read_only(struct block_device *bdev); 1547ead083aeSAl Viro int set_blocksize(struct file *file, int size); 15483f1266f1SChristoph Hellwig 15494e7b5671SChristoph Hellwig int lookup_bdev(const char *pathname, dev_t *dev); 15503f1266f1SChristoph Hellwig 15513f1266f1SChristoph Hellwig void blkdev_show(struct seq_file *seqf, off_t offset); 15523f1266f1SChristoph Hellwig 15533f1266f1SChristoph Hellwig #define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */ 15543f1266f1SChristoph Hellwig #define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */ 15553f1266f1SChristoph Hellwig #ifdef CONFIG_BLOCK 15563f1266f1SChristoph Hellwig #define BLKDEV_MAJOR_MAX 512 15573f1266f1SChristoph Hellwig #else 15583f1266f1SChristoph Hellwig #define BLKDEV_MAJOR_MAX 0 15591da177e4SLinus Torvalds #endif 15603f1266f1SChristoph Hellwig 15610718afd4SChristoph Hellwig struct blk_holder_ops { 1562d8530de5SChristoph Hellwig void (*mark_dead)(struct block_device *bdev, bool surprise); 15632142b88cSChristoph Hellwig 15642142b88cSChristoph Hellwig /* 15652142b88cSChristoph Hellwig * Sync the file system mounted on the block device. 15662142b88cSChristoph Hellwig */ 15672142b88cSChristoph Hellwig void (*sync)(struct block_device *bdev); 1568a30561a9SChristian Brauner 1569a30561a9SChristian Brauner /* 1570a30561a9SChristian Brauner * Freeze the file system mounted on the block device. 1571a30561a9SChristian Brauner */ 1572a30561a9SChristian Brauner int (*freeze)(struct block_device *bdev); 1573a30561a9SChristian Brauner 1574a30561a9SChristian Brauner /* 1575a30561a9SChristian Brauner * Thaw the file system mounted on the block device. 1576a30561a9SChristian Brauner */ 1577a30561a9SChristian Brauner int (*thaw)(struct block_device *bdev); 15780718afd4SChristoph Hellwig }; 15790718afd4SChristoph Hellwig 1580e419cf3eSChristian Brauner /* 1581e419cf3eSChristian Brauner * For filesystems using @fs_holder_ops, the @holder argument passed to 1582e419cf3eSChristian Brauner * helpers used to open and claim block devices via 1583e419cf3eSChristian Brauner * bd_prepare_to_claim() must point to a superblock. 1584e419cf3eSChristian Brauner */ 15857ecd0b6fSChristoph Hellwig extern const struct blk_holder_ops fs_holder_ops; 15867ecd0b6fSChristoph Hellwig 15873f0b3e78SChristoph Hellwig /* 15883f0b3e78SChristoph Hellwig * Return the correct open flags for blkdev_get_by_* for super block flags 15893f0b3e78SChristoph Hellwig * as stored in sb->s_flags. 15903f0b3e78SChristoph Hellwig */ 15913f0b3e78SChristoph Hellwig #define sb_open_mode(flags) \ 15926f861765SJan Kara (BLK_OPEN_READ | BLK_OPEN_RESTRICT_WRITES | \ 15936f861765SJan Kara (((flags) & SB_RDONLY) ? 0 : BLK_OPEN_WRITE)) 15943f0b3e78SChristoph Hellwig 1595f3a60882SChristian Brauner struct file *bdev_file_open_by_dev(dev_t dev, blk_mode_t mode, void *holder, 1596f3a60882SChristian Brauner const struct blk_holder_ops *hops); 1597f3a60882SChristian Brauner struct file *bdev_file_open_by_path(const char *path, blk_mode_t mode, 1598f3a60882SChristian Brauner void *holder, const struct blk_holder_ops *hops); 15990718afd4SChristoph Hellwig int bd_prepare_to_claim(struct block_device *bdev, void *holder, 16000718afd4SChristoph Hellwig const struct blk_holder_ops *hops); 160137c3fc9aSChristoph Hellwig void bd_abort_claiming(struct block_device *bdev, void *holder); 16023f1266f1SChristoph Hellwig 160322ae8ce8SChristoph Hellwig /* just for blk-cgroup, don't use elsewhere */ 160422ae8ce8SChristoph Hellwig struct block_device *blkdev_get_no_open(dev_t dev); 160522ae8ce8SChristoph Hellwig void blkdev_put_no_open(struct block_device *bdev); 160622ae8ce8SChristoph Hellwig 1607621c1f42SChristoph Hellwig struct block_device *I_BDEV(struct inode *inode); 1608f3a60882SChristian Brauner struct block_device *file_bdev(struct file *bdev_file); 1609186ddac2SYu Kuai bool disk_live(struct gendisk *disk); 1610186ddac2SYu Kuai unsigned int block_size(struct block_device *bdev); 16113f1266f1SChristoph Hellwig 16123f1266f1SChristoph Hellwig #ifdef CONFIG_BLOCK 16133f1266f1SChristoph Hellwig void invalidate_bdev(struct block_device *bdev); 16143f1266f1SChristoph Hellwig int sync_blockdev(struct block_device *bdev); 161597d6fb1bSYuezhang Mo int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend); 161670164eb6SChristoph Hellwig int sync_blockdev_nowait(struct block_device *bdev); 16171e03a36bSChristoph Hellwig void sync_bdevs(bool wait); 16183e781988SLinus Torvalds void bdev_statx(struct path *, struct kstat *, u32); 1619322cbb50SChristoph Hellwig void printk_all_partitions(void); 16202577f53fSChristoph Hellwig int __init early_lookup_bdev(const char *pathname, dev_t *dev); 16213f1266f1SChristoph Hellwig #else 16223f1266f1SChristoph Hellwig static inline void invalidate_bdev(struct block_device *bdev) 16233f1266f1SChristoph Hellwig { 16243f1266f1SChristoph Hellwig } 16253f1266f1SChristoph Hellwig static inline int sync_blockdev(struct block_device *bdev) 16263f1266f1SChristoph Hellwig { 16273f1266f1SChristoph Hellwig return 0; 16283f1266f1SChristoph Hellwig } 162970164eb6SChristoph Hellwig static inline int sync_blockdev_nowait(struct block_device *bdev) 163070164eb6SChristoph Hellwig { 163170164eb6SChristoph Hellwig return 0; 163270164eb6SChristoph Hellwig } 16331e03a36bSChristoph Hellwig static inline void sync_bdevs(bool wait) 16341e03a36bSChristoph Hellwig { 16351e03a36bSChristoph Hellwig } 16363e781988SLinus Torvalds static inline void bdev_statx(struct path *path, struct kstat *stat, 16379abcfbd2SPrasad Singamsetty u32 request_mask) 16382d985f8cSEric Biggers { 16392d985f8cSEric Biggers } 1640322cbb50SChristoph Hellwig static inline void printk_all_partitions(void) 1641322cbb50SChristoph Hellwig { 1642322cbb50SChristoph Hellwig } 1643cf056a43SChristoph Hellwig static inline int early_lookup_bdev(const char *pathname, dev_t *dev) 1644cf056a43SChristoph Hellwig { 1645cf056a43SChristoph Hellwig return -EINVAL; 1646cf056a43SChristoph Hellwig } 1647322cbb50SChristoph Hellwig #endif /* CONFIG_BLOCK */ 1648322cbb50SChristoph Hellwig 1649982c3b30SChristian Brauner int bdev_freeze(struct block_device *bdev); 1650982c3b30SChristian Brauner int bdev_thaw(struct block_device *bdev); 165122650a99SChristian Brauner void bdev_fput(struct file *bdev_file); 16523f1266f1SChristoph Hellwig 16535a72e899SJens Axboe struct io_comp_batch { 16545a72e899SJens Axboe struct request *req_list; 16555a72e899SJens Axboe bool need_ts; 16565a72e899SJens Axboe void (*complete)(struct io_comp_batch *); 16575a72e899SJens Axboe }; 16585a72e899SJens Axboe 16599da3d1e9SJohn Garry static inline bool bdev_can_atomic_write(struct block_device *bdev) 16609da3d1e9SJohn Garry { 16619da3d1e9SJohn Garry struct request_queue *bd_queue = bdev->bd_queue; 16629da3d1e9SJohn Garry struct queue_limits *limits = &bd_queue->limits; 16639da3d1e9SJohn Garry 16649da3d1e9SJohn Garry if (!limits->atomic_write_unit_min) 16659da3d1e9SJohn Garry return false; 16669da3d1e9SJohn Garry 16679da3d1e9SJohn Garry if (bdev_is_partition(bdev)) { 16689da3d1e9SJohn Garry sector_t bd_start_sect = bdev->bd_start_sect; 16699da3d1e9SJohn Garry unsigned int alignment = 16709da3d1e9SJohn Garry max(limits->atomic_write_unit_min, 16719da3d1e9SJohn Garry limits->atomic_write_hw_boundary); 16729da3d1e9SJohn Garry 16739da3d1e9SJohn Garry if (!IS_ALIGNED(bd_start_sect, alignment >> SECTOR_SHIFT)) 16749da3d1e9SJohn Garry return false; 16759da3d1e9SJohn Garry } 16769da3d1e9SJohn Garry 16779da3d1e9SJohn Garry return true; 16789da3d1e9SJohn Garry } 16799da3d1e9SJohn Garry 16805a72e899SJens Axboe #define DEFINE_IO_COMP_BATCH(name) struct io_comp_batch name = { } 16815a72e899SJens Axboe 16823f1266f1SChristoph Hellwig #endif /* _LINUX_BLKDEV_H */ 1683