1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 21da177e4SLinus Torvalds #ifndef _LINUX_BLKDEV_H 31da177e4SLinus Torvalds #define _LINUX_BLKDEV_H 41da177e4SLinus Torvalds 585fd0bc9SRussell King #include <linux/sched.h> 6e6017571SIngo Molnar #include <linux/sched/clock.h> 785fd0bc9SRussell King 8f5ff8422SJens Axboe #ifdef CONFIG_BLOCK 9f5ff8422SJens Axboe 101da177e4SLinus Torvalds #include <linux/major.h> 111da177e4SLinus Torvalds #include <linux/genhd.h> 121da177e4SLinus Torvalds #include <linux/list.h> 13320ae51fSJens Axboe #include <linux/llist.h> 141da177e4SLinus Torvalds #include <linux/timer.h> 151da177e4SLinus Torvalds #include <linux/workqueue.h> 161da177e4SLinus Torvalds #include <linux/pagemap.h> 1766114cadSTejun Heo #include <linux/backing-dev-defs.h> 181da177e4SLinus Torvalds #include <linux/wait.h> 191da177e4SLinus Torvalds #include <linux/mempool.h> 2034c0fd54SDan Williams #include <linux/pfn.h> 211da177e4SLinus Torvalds #include <linux/bio.h> 221da177e4SLinus Torvalds #include <linux/stringify.h> 233e6053d7SHugh Dickins #include <linux/gfp.h> 24d351af01SFUJITA Tomonori #include <linux/bsg.h> 25c7c22e4dSJens Axboe #include <linux/smp.h> 26548bc8e1STejun Heo #include <linux/rcupdate.h> 27add703fdSTejun Heo #include <linux/percpu-refcount.h> 2884be456fSChristoph Hellwig #include <linux/scatterlist.h> 296a0cb1bcSHannes Reinecke #include <linux/blkzoned.h> 301da177e4SLinus Torvalds 31de477254SPaul Gortmaker struct module; 3221b2f0c8SChristoph Hellwig struct scsi_ioctl_command; 3321b2f0c8SChristoph Hellwig 341da177e4SLinus Torvalds struct request_queue; 351da177e4SLinus Torvalds struct elevator_queue; 362056a782SJens Axboe struct blk_trace; 373d6392cfSJens Axboe struct request; 383d6392cfSJens Axboe struct sg_io_hdr; 39aa387cc8SMike Christie struct bsg_job; 403c798398STejun Heo struct blkcg_gq; 417c94e1c1SMing Lei struct blk_flush_queue; 42bbd3e064SChristoph Hellwig struct pr_ops; 43a7905043SJosef Bacik struct rq_qos; 4434dbad5dSOmar Sandoval struct blk_queue_stats; 4534dbad5dSOmar Sandoval struct blk_stat_callback; 461da177e4SLinus Torvalds 471da177e4SLinus Torvalds #define BLKDEV_MIN_RQ 4 481da177e4SLinus Torvalds #define BLKDEV_MAX_RQ 128 /* Default maximum */ 491da177e4SLinus Torvalds 50096392e0SMinwoo Im /* Must be consistent with blk_mq_poll_stats_bkt() */ 510206319fSStephen Bates #define BLK_MQ_POLL_STATS_BKTS 16 520206319fSStephen Bates 5329ece8b4SYufen Yu /* Doing classic polling */ 5429ece8b4SYufen Yu #define BLK_MQ_POLL_CLASSIC -1 5529ece8b4SYufen Yu 568bd435b3STejun Heo /* 578bd435b3STejun Heo * Maximum number of blkcg policies allowed to be registered concurrently. 588bd435b3STejun Heo * Defined here to simplify include dependency. 598bd435b3STejun Heo */ 6001c5f85aSJens Axboe #define BLKCG_MAX_POLS 5 618bd435b3STejun Heo 622a842acaSChristoph Hellwig typedef void (rq_end_io_fn)(struct request *, blk_status_t); 631da177e4SLinus Torvalds 644aff5e23SJens Axboe /* 65e8064021SChristoph Hellwig * request flags */ 66e8064021SChristoph Hellwig typedef __u32 __bitwise req_flags_t; 67e8064021SChristoph Hellwig 68e8064021SChristoph Hellwig /* elevator knows about this request */ 69e8064021SChristoph Hellwig #define RQF_SORTED ((__force req_flags_t)(1 << 0)) 70e8064021SChristoph Hellwig /* drive already may have started this one */ 71e8064021SChristoph Hellwig #define RQF_STARTED ((__force req_flags_t)(1 << 1)) 72e8064021SChristoph Hellwig /* may not be passed by ioscheduler */ 73e8064021SChristoph Hellwig #define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3)) 74e8064021SChristoph Hellwig /* request for flush sequence */ 75e8064021SChristoph Hellwig #define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4)) 76e8064021SChristoph Hellwig /* merge of different types, fail separately */ 77e8064021SChristoph Hellwig #define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5)) 78e8064021SChristoph Hellwig /* track inflight for MQ */ 79e8064021SChristoph Hellwig #define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6)) 80e8064021SChristoph Hellwig /* don't call prep for this one */ 81e8064021SChristoph Hellwig #define RQF_DONTPREP ((__force req_flags_t)(1 << 7)) 82e8064021SChristoph Hellwig /* set for "ide_preempt" requests and also for requests for which the SCSI 83e8064021SChristoph Hellwig "quiesce" state must be ignored. */ 84e8064021SChristoph Hellwig #define RQF_PREEMPT ((__force req_flags_t)(1 << 8)) 85e8064021SChristoph Hellwig /* contains copies of user pages */ 86e8064021SChristoph Hellwig #define RQF_COPY_USER ((__force req_flags_t)(1 << 9)) 87e8064021SChristoph Hellwig /* vaguely specified driver internal error. Ignored by the block layer */ 88e8064021SChristoph Hellwig #define RQF_FAILED ((__force req_flags_t)(1 << 10)) 89e8064021SChristoph Hellwig /* don't warn about errors */ 90e8064021SChristoph Hellwig #define RQF_QUIET ((__force req_flags_t)(1 << 11)) 91e8064021SChristoph Hellwig /* elevator private data attached */ 92e8064021SChristoph Hellwig #define RQF_ELVPRIV ((__force req_flags_t)(1 << 12)) 934822e902SKonstantin Khlebnikov /* account into disk and partition IO statistics */ 94e8064021SChristoph Hellwig #define RQF_IO_STAT ((__force req_flags_t)(1 << 13)) 95e8064021SChristoph Hellwig /* request came from our alloc pool */ 96e8064021SChristoph Hellwig #define RQF_ALLOCED ((__force req_flags_t)(1 << 14)) 97e8064021SChristoph Hellwig /* runtime pm request */ 98e8064021SChristoph Hellwig #define RQF_PM ((__force req_flags_t)(1 << 15)) 99e8064021SChristoph Hellwig /* on IO scheduler merge hash */ 100e8064021SChristoph Hellwig #define RQF_HASHED ((__force req_flags_t)(1 << 16)) 1014822e902SKonstantin Khlebnikov /* track IO completion time */ 102cf43e6beSJens Axboe #define RQF_STATS ((__force req_flags_t)(1 << 17)) 103f9d03f96SChristoph Hellwig /* Look at ->special_vec for the actual data payload instead of the 104f9d03f96SChristoph Hellwig bio chain. */ 105f9d03f96SChristoph Hellwig #define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18)) 1066cc77e9cSChristoph Hellwig /* The per-zone write lock is held for this request */ 1076cc77e9cSChristoph Hellwig #define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19)) 10876a86f9dSJens Axboe /* already slept for hybrid poll */ 10912f5b931SKeith Busch #define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 20)) 110da661267SChristoph Hellwig /* ->timeout has been called, don't expire again */ 111da661267SChristoph Hellwig #define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21)) 112e8064021SChristoph Hellwig 113e8064021SChristoph Hellwig /* flags that prevent us from merging requests: */ 114e8064021SChristoph Hellwig #define RQF_NOMERGE_FLAGS \ 115f9d03f96SChristoph Hellwig (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD) 116e8064021SChristoph Hellwig 1171da177e4SLinus Torvalds /* 11812f5b931SKeith Busch * Request state for blk-mq. 11912f5b931SKeith Busch */ 12012f5b931SKeith Busch enum mq_rq_state { 12112f5b931SKeith Busch MQ_RQ_IDLE = 0, 12212f5b931SKeith Busch MQ_RQ_IN_FLIGHT = 1, 12312f5b931SKeith Busch MQ_RQ_COMPLETE = 2, 12412f5b931SKeith Busch }; 12512f5b931SKeith Busch 12612f5b931SKeith Busch /* 127af76e555SChristoph Hellwig * Try to put the fields that are referenced together in the same cacheline. 128af76e555SChristoph Hellwig * 129af76e555SChristoph Hellwig * If you modify this structure, make sure to update blk_rq_init() and 130af76e555SChristoph Hellwig * especially blk_mq_rq_ctx_init() to take care of the added fields. 1311da177e4SLinus Torvalds */ 1321da177e4SLinus Torvalds struct request { 133165125e1SJens Axboe struct request_queue *q; 134320ae51fSJens Axboe struct blk_mq_ctx *mq_ctx; 135ea4f995eSJens Axboe struct blk_mq_hw_ctx *mq_hctx; 136e6a1c874SJens Axboe 137ef295ecfSChristoph Hellwig unsigned int cmd_flags; /* op and common flags */ 138e8064021SChristoph Hellwig req_flags_t rq_flags; 139d486f1f2SJens Axboe 1402f578aafSMinwoo Im int tag; 141d486f1f2SJens Axboe int internal_tag; 142d486f1f2SJens Axboe 143a2dec7b3STejun Heo /* the following two fields are internal, NEVER access directly */ 144a2dec7b3STejun Heo unsigned int __data_len; /* total data len */ 145181fdde3SRichard Kennedy sector_t __sector; /* sector cursor */ 1461da177e4SLinus Torvalds 1471da177e4SLinus Torvalds struct bio *bio; 1481da177e4SLinus Torvalds struct bio *biotail; 1491da177e4SLinus Torvalds 1507c3fb70fSJens Axboe struct list_head queuelist; 1517c3fb70fSJens Axboe 152360f92c2SJens Axboe /* 153360f92c2SJens Axboe * The hash is used inside the scheduler, and killed once the 154360f92c2SJens Axboe * request reaches the dispatch list. The ipi_list is only used 155360f92c2SJens Axboe * to queue the request for softirq completion, which is long 156360f92c2SJens Axboe * after the request has been unhashed (and even removed from 157360f92c2SJens Axboe * the dispatch list). 158360f92c2SJens Axboe */ 159360f92c2SJens Axboe union { 1609817064bSJens Axboe struct hlist_node hash; /* merge hash */ 161360f92c2SJens Axboe struct list_head ipi_list; 162360f92c2SJens Axboe }; 163360f92c2SJens Axboe 164e6a1c874SJens Axboe /* 165e6a1c874SJens Axboe * The rb_node is only used inside the io scheduler, requests 166e6a1c874SJens Axboe * are pruned when moved to the dispatch queue. So let the 167c186794dSMike Snitzer * completion_data share space with the rb_node. 168e6a1c874SJens Axboe */ 169e6a1c874SJens Axboe union { 1702e662b65SJens Axboe struct rb_node rb_node; /* sort/lookup */ 171f9d03f96SChristoph Hellwig struct bio_vec special_vec; 172c186794dSMike Snitzer void *completion_data; 173e26738e0SChristoph Hellwig int error_count; /* for legacy drivers, don't use */ 174c186794dSMike Snitzer }; 175c186794dSMike Snitzer 176c186794dSMike Snitzer /* 177c186794dSMike Snitzer * Three pointers are available for the IO schedulers, if they need 178c186794dSMike Snitzer * more they have to dynamically allocate it. Flush requests are 179c186794dSMike Snitzer * never put on the IO scheduler. So let the flush fields share 180a612fddfSTejun Heo * space with the elevator data. 181c186794dSMike Snitzer */ 182c186794dSMike Snitzer union { 183a612fddfSTejun Heo struct { 184a612fddfSTejun Heo struct io_cq *icq; 185a612fddfSTejun Heo void *priv[2]; 186a612fddfSTejun Heo } elv; 187a612fddfSTejun Heo 188ae1b1539STejun Heo struct { 189ae1b1539STejun Heo unsigned int seq; 190ae1b1539STejun Heo struct list_head list; 1914853abaaSJeff Moyer rq_end_io_fn *saved_end_io; 192ae1b1539STejun Heo } flush; 193e6a1c874SJens Axboe }; 1949817064bSJens Axboe 1958f34ee75SJens Axboe struct gendisk *rq_disk; 19609e099d4SJerome Marchand struct hd_struct *part; 1976f816b4bSTejun Heo #ifdef CONFIG_BLK_RQ_ALLOC_TIME 1986f816b4bSTejun Heo /* Time that the first bio started allocating this request. */ 1996f816b4bSTejun Heo u64 alloc_time_ns; 2006f816b4bSTejun Heo #endif 2016f816b4bSTejun Heo /* Time that this request was allocated for this IO. */ 202522a7775SOmar Sandoval u64 start_time_ns; 203544ccc8dSOmar Sandoval /* Time that I/O was submitted to the device. */ 204544ccc8dSOmar Sandoval u64 io_start_time_ns; 205544ccc8dSOmar Sandoval 206544ccc8dSOmar Sandoval #ifdef CONFIG_BLK_WBT 207544ccc8dSOmar Sandoval unsigned short wbt_flags; 208544ccc8dSOmar Sandoval #endif 2093d244306SHou Tao /* 2103d244306SHou Tao * rq sectors used for blk stats. It has the same value 2113d244306SHou Tao * with blk_rq_sectors(rq), except that it never be zeroed 2123d244306SHou Tao * by completion. 2133d244306SHou Tao */ 2143d244306SHou Tao unsigned short stats_sectors; 215544ccc8dSOmar Sandoval 216544ccc8dSOmar Sandoval /* 217544ccc8dSOmar Sandoval * Number of scatter-gather DMA addr+len pairs after 2181da177e4SLinus Torvalds * physical address coalescing is performed. 2191da177e4SLinus Torvalds */ 2201da177e4SLinus Torvalds unsigned short nr_phys_segments; 2217c3fb70fSJens Axboe 22213f05c8dSMartin K. Petersen #if defined(CONFIG_BLK_DEV_INTEGRITY) 22313f05c8dSMartin K. Petersen unsigned short nr_integrity_segments; 22413f05c8dSMartin K. Petersen #endif 2251da177e4SLinus Torvalds 2267c3fb70fSJens Axboe unsigned short write_hint; 2278f34ee75SJens Axboe unsigned short ioprio; 2288f34ee75SJens Axboe 2297a85f889SFUJITA Tomonori unsigned int extra_len; /* length of alignment and padding */ 2301da177e4SLinus Torvalds 23112f5b931SKeith Busch enum mq_rq_state state; 23212f5b931SKeith Busch refcount_t ref; 2331d9bd516STejun Heo 2340b7576d8SJens Axboe unsigned int timeout; 235079076b3SChristoph Hellwig unsigned long deadline; 2360a72e7f4SJens Axboe 2377c3fb70fSJens Axboe union { 2380a4b6e2fSLinus Torvalds struct __call_single_data csd; 2397c3fb70fSJens Axboe u64 fifo_time; 2407c3fb70fSJens Axboe }; 2417c3fb70fSJens Axboe 2421da177e4SLinus Torvalds /* 243c00895abSJens Axboe * completion callback. 2441da177e4SLinus Torvalds */ 2451da177e4SLinus Torvalds rq_end_io_fn *end_io; 2461da177e4SLinus Torvalds void *end_io_data; 2471da177e4SLinus Torvalds }; 2481da177e4SLinus Torvalds 24914cb0dc6SMing Lei static inline bool blk_op_is_scsi(unsigned int op) 25014cb0dc6SMing Lei { 25114cb0dc6SMing Lei return op == REQ_OP_SCSI_IN || op == REQ_OP_SCSI_OUT; 25214cb0dc6SMing Lei } 25314cb0dc6SMing Lei 25414cb0dc6SMing Lei static inline bool blk_op_is_private(unsigned int op) 25514cb0dc6SMing Lei { 25614cb0dc6SMing Lei return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT; 25714cb0dc6SMing Lei } 25814cb0dc6SMing Lei 259aebf526bSChristoph Hellwig static inline bool blk_rq_is_scsi(struct request *rq) 260aebf526bSChristoph Hellwig { 26114cb0dc6SMing Lei return blk_op_is_scsi(req_op(rq)); 262aebf526bSChristoph Hellwig } 263aebf526bSChristoph Hellwig 264aebf526bSChristoph Hellwig static inline bool blk_rq_is_private(struct request *rq) 265aebf526bSChristoph Hellwig { 26614cb0dc6SMing Lei return blk_op_is_private(req_op(rq)); 267aebf526bSChristoph Hellwig } 268aebf526bSChristoph Hellwig 26957292b58SChristoph Hellwig static inline bool blk_rq_is_passthrough(struct request *rq) 27057292b58SChristoph Hellwig { 271aebf526bSChristoph Hellwig return blk_rq_is_scsi(rq) || blk_rq_is_private(rq); 27257292b58SChristoph Hellwig } 27357292b58SChristoph Hellwig 27414cb0dc6SMing Lei static inline bool bio_is_passthrough(struct bio *bio) 27514cb0dc6SMing Lei { 27614cb0dc6SMing Lei unsigned op = bio_op(bio); 27714cb0dc6SMing Lei 27814cb0dc6SMing Lei return blk_op_is_scsi(op) || blk_op_is_private(op); 27914cb0dc6SMing Lei } 28014cb0dc6SMing Lei 281766ca442SFernando Luis Vázquez Cao static inline unsigned short req_get_ioprio(struct request *req) 282766ca442SFernando Luis Vázquez Cao { 283766ca442SFernando Luis Vázquez Cao return req->ioprio; 284766ca442SFernando Luis Vázquez Cao } 285766ca442SFernando Luis Vázquez Cao 2861da177e4SLinus Torvalds #include <linux/elevator.h> 2871da177e4SLinus Torvalds 288320ae51fSJens Axboe struct blk_queue_ctx; 289320ae51fSJens Axboe 290dece1635SJens Axboe typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio); 2911da177e4SLinus Torvalds 2921da177e4SLinus Torvalds struct bio_vec; 2932fb98e84STejun Heo typedef int (dma_drain_needed_fn)(struct request *); 2941da177e4SLinus Torvalds 295242f9dcbSJens Axboe enum blk_eh_timer_return { 29688b0cfadSChristoph Hellwig BLK_EH_DONE, /* drivers has completed the command */ 29788b0cfadSChristoph Hellwig BLK_EH_RESET_TIMER, /* reset timer and try again */ 298242f9dcbSJens Axboe }; 299242f9dcbSJens Axboe 3001da177e4SLinus Torvalds enum blk_queue_state { 3011da177e4SLinus Torvalds Queue_down, 3021da177e4SLinus Torvalds Queue_up, 3031da177e4SLinus Torvalds }; 3041da177e4SLinus Torvalds 305ee1b6f7aSShaohua Li #define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */ 306ee1b6f7aSShaohua Li #define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */ 3071da177e4SLinus Torvalds 308abf54393SFUJITA Tomonori #define BLK_SCSI_MAX_CMDS (256) 309abf54393SFUJITA Tomonori #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) 310abf54393SFUJITA Tomonori 311797476b8SDamien Le Moal /* 312797476b8SDamien Le Moal * Zoned block device models (zoned limit). 313797476b8SDamien Le Moal */ 314797476b8SDamien Le Moal enum blk_zoned_model { 315797476b8SDamien Le Moal BLK_ZONED_NONE, /* Regular block device */ 316797476b8SDamien Le Moal BLK_ZONED_HA, /* Host-aware zoned block device */ 317797476b8SDamien Le Moal BLK_ZONED_HM, /* Host-managed zoned block device */ 318797476b8SDamien Le Moal }; 319797476b8SDamien Le Moal 320025146e1SMartin K. Petersen struct queue_limits { 321025146e1SMartin K. Petersen unsigned long bounce_pfn; 322025146e1SMartin K. Petersen unsigned long seg_boundary_mask; 32303100aadSKeith Busch unsigned long virt_boundary_mask; 324025146e1SMartin K. Petersen 325025146e1SMartin K. Petersen unsigned int max_hw_sectors; 326ca369d51SMartin K. Petersen unsigned int max_dev_sectors; 327762380adSJens Axboe unsigned int chunk_sectors; 328025146e1SMartin K. Petersen unsigned int max_sectors; 329025146e1SMartin K. Petersen unsigned int max_segment_size; 330c72758f3SMartin K. Petersen unsigned int physical_block_size; 331c72758f3SMartin K. Petersen unsigned int alignment_offset; 332c72758f3SMartin K. Petersen unsigned int io_min; 333c72758f3SMartin K. Petersen unsigned int io_opt; 33467efc925SChristoph Hellwig unsigned int max_discard_sectors; 3350034af03SJens Axboe unsigned int max_hw_discard_sectors; 3364363ac7cSMartin K. Petersen unsigned int max_write_same_sectors; 337a6f0788eSChaitanya Kulkarni unsigned int max_write_zeroes_sectors; 33886b37281SMartin K. Petersen unsigned int discard_granularity; 33986b37281SMartin K. Petersen unsigned int discard_alignment; 340025146e1SMartin K. Petersen 341025146e1SMartin K. Petersen unsigned short logical_block_size; 3428a78362cSMartin K. Petersen unsigned short max_segments; 34313f05c8dSMartin K. Petersen unsigned short max_integrity_segments; 3441e739730SChristoph Hellwig unsigned short max_discard_segments; 345025146e1SMartin K. Petersen 346c72758f3SMartin K. Petersen unsigned char misaligned; 34786b37281SMartin K. Petersen unsigned char discard_misaligned; 348c78afc62SKent Overstreet unsigned char raid_partial_stripes_expensive; 349797476b8SDamien Le Moal enum blk_zoned_model zoned; 350025146e1SMartin K. Petersen }; 351025146e1SMartin K. Petersen 3526a0cb1bcSHannes Reinecke #ifdef CONFIG_BLK_DEV_ZONED 3536a0cb1bcSHannes Reinecke 35426202928SDamien Le Moal /* 35526202928SDamien Le Moal * Maximum number of zones to report with a single report zones command. 35626202928SDamien Le Moal */ 35726202928SDamien Le Moal #define BLK_ZONED_REPORT_MAX_ZONES 8192U 35826202928SDamien Le Moal 359a91e1380SDamien Le Moal extern unsigned int blkdev_nr_zones(struct block_device *bdev); 3606a0cb1bcSHannes Reinecke extern int blkdev_report_zones(struct block_device *bdev, 3616a0cb1bcSHannes Reinecke sector_t sector, struct blk_zone *zones, 362bd976e52SDamien Le Moal unsigned int *nr_zones); 3636a0cb1bcSHannes Reinecke extern int blkdev_reset_zones(struct block_device *bdev, sector_t sectors, 3646a0cb1bcSHannes Reinecke sector_t nr_sectors, gfp_t gfp_mask); 365bf505456SDamien Le Moal extern int blk_revalidate_disk_zones(struct gendisk *disk); 3666a0cb1bcSHannes Reinecke 3673ed05a98SShaun Tancheff extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, 3683ed05a98SShaun Tancheff unsigned int cmd, unsigned long arg); 3693ed05a98SShaun Tancheff extern int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode, 3703ed05a98SShaun Tancheff unsigned int cmd, unsigned long arg); 3713ed05a98SShaun Tancheff 3723ed05a98SShaun Tancheff #else /* CONFIG_BLK_DEV_ZONED */ 3733ed05a98SShaun Tancheff 374a91e1380SDamien Le Moal static inline unsigned int blkdev_nr_zones(struct block_device *bdev) 375a91e1380SDamien Le Moal { 376a91e1380SDamien Le Moal return 0; 377a91e1380SDamien Le Moal } 378bf505456SDamien Le Moal 379bf505456SDamien Le Moal static inline int blk_revalidate_disk_zones(struct gendisk *disk) 380bf505456SDamien Le Moal { 381bf505456SDamien Le Moal return 0; 382bf505456SDamien Le Moal } 383bf505456SDamien Le Moal 3843ed05a98SShaun Tancheff static inline int blkdev_report_zones_ioctl(struct block_device *bdev, 3853ed05a98SShaun Tancheff fmode_t mode, unsigned int cmd, 3863ed05a98SShaun Tancheff unsigned long arg) 3873ed05a98SShaun Tancheff { 3883ed05a98SShaun Tancheff return -ENOTTY; 3893ed05a98SShaun Tancheff } 3903ed05a98SShaun Tancheff 3913ed05a98SShaun Tancheff static inline int blkdev_reset_zones_ioctl(struct block_device *bdev, 3923ed05a98SShaun Tancheff fmode_t mode, unsigned int cmd, 3933ed05a98SShaun Tancheff unsigned long arg) 3943ed05a98SShaun Tancheff { 3953ed05a98SShaun Tancheff return -ENOTTY; 3963ed05a98SShaun Tancheff } 3973ed05a98SShaun Tancheff 3986a0cb1bcSHannes Reinecke #endif /* CONFIG_BLK_DEV_ZONED */ 3996a0cb1bcSHannes Reinecke 400d7b76301SRichard Kennedy struct request_queue { 4011da177e4SLinus Torvalds struct request *last_merge; 402b374d18aSJens Axboe struct elevator_queue *elevator; 4031da177e4SLinus Torvalds 40434dbad5dSOmar Sandoval struct blk_queue_stats *stats; 405a7905043SJosef Bacik struct rq_qos *rq_qos; 40687760e5eSJens Axboe 4071da177e4SLinus Torvalds make_request_fn *make_request_fn; 4082fb98e84STejun Heo dma_drain_needed_fn *dma_drain_needed; 4091da177e4SLinus Torvalds 410f8a5b122SJens Axboe const struct blk_mq_ops *mq_ops; 411320ae51fSJens Axboe 412320ae51fSJens Axboe /* sw queues */ 413e6cdb092SMing Lei struct blk_mq_ctx __percpu *queue_ctx; 414320ae51fSJens Axboe unsigned int nr_queues; 415320ae51fSJens Axboe 416d278d4a8SJens Axboe unsigned int queue_depth; 417d278d4a8SJens Axboe 418320ae51fSJens Axboe /* hw dispatch queues */ 419320ae51fSJens Axboe struct blk_mq_hw_ctx **queue_hw_ctx; 420320ae51fSJens Axboe unsigned int nr_hw_queues; 421320ae51fSJens Axboe 422dc3b17ccSJan Kara struct backing_dev_info *backing_dev_info; 4231da177e4SLinus Torvalds 4241da177e4SLinus Torvalds /* 4251da177e4SLinus Torvalds * The queue owner gets to use this for whatever they like. 4261da177e4SLinus Torvalds * ll_rw_blk doesn't touch it. 4271da177e4SLinus Torvalds */ 4281da177e4SLinus Torvalds void *queuedata; 4291da177e4SLinus Torvalds 4301da177e4SLinus Torvalds /* 4311da177e4SLinus Torvalds * various queue flags, see QUEUE_* below 4321da177e4SLinus Torvalds */ 4331da177e4SLinus Torvalds unsigned long queue_flags; 434cd84a62eSBart Van Assche /* 435cd84a62eSBart Van Assche * Number of contexts that have called blk_set_pm_only(). If this 436cd84a62eSBart Van Assche * counter is above zero then only RQF_PM and RQF_PREEMPT requests are 437cd84a62eSBart Van Assche * processed. 438cd84a62eSBart Van Assche */ 439cd84a62eSBart Van Assche atomic_t pm_only; 4401da177e4SLinus Torvalds 4411da177e4SLinus Torvalds /* 442a73f730dSTejun Heo * ida allocated id for this queue. Used to index queues from 443a73f730dSTejun Heo * ioctx. 444a73f730dSTejun Heo */ 445a73f730dSTejun Heo int id; 446a73f730dSTejun Heo 447a73f730dSTejun Heo /* 448d7b76301SRichard Kennedy * queue needs bounce pages for pages above this limit 449d7b76301SRichard Kennedy */ 450d7b76301SRichard Kennedy gfp_t bounce_gfp; 451d7b76301SRichard Kennedy 4520d945c1fSChristoph Hellwig spinlock_t queue_lock; 4531da177e4SLinus Torvalds 4541da177e4SLinus Torvalds /* 4551da177e4SLinus Torvalds * queue kobject 4561da177e4SLinus Torvalds */ 4571da177e4SLinus Torvalds struct kobject kobj; 4581da177e4SLinus Torvalds 459320ae51fSJens Axboe /* 460320ae51fSJens Axboe * mq queue kobject 461320ae51fSJens Axboe */ 4621db4909eSMing Lei struct kobject *mq_kobj; 463320ae51fSJens Axboe 464ac6fc48cSDan Williams #ifdef CONFIG_BLK_DEV_INTEGRITY 465ac6fc48cSDan Williams struct blk_integrity integrity; 466ac6fc48cSDan Williams #endif /* CONFIG_BLK_DEV_INTEGRITY */ 467ac6fc48cSDan Williams 46847fafbc7SRafael J. Wysocki #ifdef CONFIG_PM 4696c954667SLin Ming struct device *dev; 4706c954667SLin Ming int rpm_status; 4716c954667SLin Ming unsigned int nr_pending; 4726c954667SLin Ming #endif 4736c954667SLin Ming 4741da177e4SLinus Torvalds /* 4751da177e4SLinus Torvalds * queue settings 4761da177e4SLinus Torvalds */ 4771da177e4SLinus Torvalds unsigned long nr_requests; /* Max # of requests */ 4781da177e4SLinus Torvalds 479fa0ccd83SJames Bottomley unsigned int dma_drain_size; 480d7b76301SRichard Kennedy void *dma_drain_buffer; 481e3790c7dSTejun Heo unsigned int dma_pad_mask; 4821da177e4SLinus Torvalds unsigned int dma_alignment; 4831da177e4SLinus Torvalds 484242f9dcbSJens Axboe unsigned int rq_timeout; 48564f1c21eSJens Axboe int poll_nsec; 48634dbad5dSOmar Sandoval 48734dbad5dSOmar Sandoval struct blk_stat_callback *poll_cb; 4880206319fSStephen Bates struct blk_rq_stat poll_stat[BLK_MQ_POLL_STATS_BKTS]; 48934dbad5dSOmar Sandoval 490242f9dcbSJens Axboe struct timer_list timeout; 491287922ebSChristoph Hellwig struct work_struct timeout_work; 492242f9dcbSJens Axboe 493a612fddfSTejun Heo struct list_head icq_list; 4944eef3049STejun Heo #ifdef CONFIG_BLK_CGROUP 495a2b1693bSTejun Heo DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS); 4963c798398STejun Heo struct blkcg_gq *root_blkg; 49703aa264aSTejun Heo struct list_head blkg_list; 4984eef3049STejun Heo #endif 499a612fddfSTejun Heo 500025146e1SMartin K. Petersen struct queue_limits limits; 501025146e1SMartin K. Petersen 50268c43f13SDamien Le Moal unsigned int required_elevator_features; 50368c43f13SDamien Le Moal 5046a5ac984SBart Van Assche #ifdef CONFIG_BLK_DEV_ZONED 5051da177e4SLinus Torvalds /* 5066cc77e9cSChristoph Hellwig * Zoned block device information for request dispatch control. 5076cc77e9cSChristoph Hellwig * nr_zones is the total number of zones of the device. This is always 5086cc77e9cSChristoph Hellwig * 0 for regular block devices. seq_zones_bitmap is a bitmap of nr_zones 5096cc77e9cSChristoph Hellwig * bits which indicates if a zone is conventional (bit clear) or 5106cc77e9cSChristoph Hellwig * sequential (bit set). seq_zones_wlock is a bitmap of nr_zones 5116cc77e9cSChristoph Hellwig * bits which indicates if a zone is write locked, that is, if a write 5126cc77e9cSChristoph Hellwig * request targeting the zone was dispatched. All three fields are 5136cc77e9cSChristoph Hellwig * initialized by the low level device driver (e.g. scsi/sd.c). 5146cc77e9cSChristoph Hellwig * Stacking drivers (device mappers) may or may not initialize 5156cc77e9cSChristoph Hellwig * these fields. 516ccce20fcSBart Van Assche * 517ccce20fcSBart Van Assche * Reads of this information must be protected with blk_queue_enter() / 518ccce20fcSBart Van Assche * blk_queue_exit(). Modifying this information is only allowed while 519ccce20fcSBart Van Assche * no requests are being processed. See also blk_mq_freeze_queue() and 520ccce20fcSBart Van Assche * blk_mq_unfreeze_queue(). 5216cc77e9cSChristoph Hellwig */ 5226cc77e9cSChristoph Hellwig unsigned int nr_zones; 5236cc77e9cSChristoph Hellwig unsigned long *seq_zones_bitmap; 5246cc77e9cSChristoph Hellwig unsigned long *seq_zones_wlock; 5256a5ac984SBart Van Assche #endif /* CONFIG_BLK_DEV_ZONED */ 5266cc77e9cSChristoph Hellwig 5276cc77e9cSChristoph Hellwig /* 5281da177e4SLinus Torvalds * sg stuff 5291da177e4SLinus Torvalds */ 5301da177e4SLinus Torvalds unsigned int sg_timeout; 5311da177e4SLinus Torvalds unsigned int sg_reserved_size; 5321946089aSChristoph Lameter int node; 5336c5c9341SAlexey Dobriyan #ifdef CONFIG_BLK_DEV_IO_TRACE 5342056a782SJens Axboe struct blk_trace *blk_trace; 5355acb3cc2SWaiman Long struct mutex blk_trace_mutex; 5366c5c9341SAlexey Dobriyan #endif 5371da177e4SLinus Torvalds /* 5384913efe4STejun Heo * for flush operations 5391da177e4SLinus Torvalds */ 5407c94e1c1SMing Lei struct blk_flush_queue *fq; 541483f4afcSAl Viro 5426fca6a61SChristoph Hellwig struct list_head requeue_list; 5436fca6a61SChristoph Hellwig spinlock_t requeue_lock; 5442849450aSMike Snitzer struct delayed_work requeue_work; 5456fca6a61SChristoph Hellwig 546483f4afcSAl Viro struct mutex sysfs_lock; 547cecf5d87SMing Lei struct mutex sysfs_dir_lock; 548d351af01SFUJITA Tomonori 5492f8f1336SMing Lei /* 5502f8f1336SMing Lei * for reusing dead hctx instance in case of updating 5512f8f1336SMing Lei * nr_hw_queues 5522f8f1336SMing Lei */ 5532f8f1336SMing Lei struct list_head unused_hctx_list; 5542f8f1336SMing Lei spinlock_t unused_hctx_lock; 5552f8f1336SMing Lei 5567996a8b5SBob Liu int mq_freeze_depth; 557d732580bSTejun Heo 558d351af01SFUJITA Tomonori #if defined(CONFIG_BLK_DEV_BSG) 559d351af01SFUJITA Tomonori struct bsg_class_device bsg_dev; 560d351af01SFUJITA Tomonori #endif 561e43473b7SVivek Goyal 562e43473b7SVivek Goyal #ifdef CONFIG_BLK_DEV_THROTTLING 563e43473b7SVivek Goyal /* Throttle data */ 564e43473b7SVivek Goyal struct throtl_data *td; 565e43473b7SVivek Goyal #endif 566548bc8e1STejun Heo struct rcu_head rcu_head; 567320ae51fSJens Axboe wait_queue_head_t mq_freeze_wq; 5687996a8b5SBob Liu /* 5697996a8b5SBob Liu * Protect concurrent access to q_usage_counter by 5707996a8b5SBob Liu * percpu_ref_kill() and percpu_ref_reinit(). 5717996a8b5SBob Liu */ 5727996a8b5SBob Liu struct mutex mq_freeze_lock; 5733ef28e83SDan Williams struct percpu_ref q_usage_counter; 5740d2602caSJens Axboe 5750d2602caSJens Axboe struct blk_mq_tag_set *tag_set; 5760d2602caSJens Axboe struct list_head tag_set_list; 577338aa96dSKent Overstreet struct bio_set bio_split; 5784593fdbeSAkinobu Mita 57903796c14SOmar Sandoval #ifdef CONFIG_BLK_DEBUG_FS 58007e4feadSOmar Sandoval struct dentry *debugfs_dir; 581d332ce09SOmar Sandoval struct dentry *sched_debugfs_dir; 582cc56694fSMing Lei struct dentry *rqos_debugfs_dir; 58307e4feadSOmar Sandoval #endif 58407e4feadSOmar Sandoval 5854593fdbeSAkinobu Mita bool mq_sysfs_init_done; 5866d247d7fSChristoph Hellwig 5876d247d7fSChristoph Hellwig size_t cmd_size; 588dc9edc44SBart Van Assche 589dc9edc44SBart Van Assche struct work_struct release_work; 590f793dfd3SJens Axboe 591f793dfd3SJens Axboe #define BLK_MAX_WRITE_HINTS 5 592f793dfd3SJens Axboe u64 write_hints[BLK_MAX_WRITE_HINTS]; 5931da177e4SLinus Torvalds }; 5941da177e4SLinus Torvalds 595eca7abf3SJens Axboe #define QUEUE_FLAG_STOPPED 0 /* queue is stopped */ 596eca7abf3SJens Axboe #define QUEUE_FLAG_DYING 1 /* queue being torn down */ 597eca7abf3SJens Axboe #define QUEUE_FLAG_NOMERGES 3 /* disable merge attempts */ 598eca7abf3SJens Axboe #define QUEUE_FLAG_SAME_COMP 4 /* complete on same CPU-group */ 599eca7abf3SJens Axboe #define QUEUE_FLAG_FAIL_IO 5 /* fake timeout */ 600eca7abf3SJens Axboe #define QUEUE_FLAG_NONROT 6 /* non-rotational device (SSD) */ 60188e740f1SFernando Luis Vázquez Cao #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 602eca7abf3SJens Axboe #define QUEUE_FLAG_IO_STAT 7 /* do disk/partitions IO accounting */ 603eca7abf3SJens Axboe #define QUEUE_FLAG_DISCARD 8 /* supports DISCARD */ 604eca7abf3SJens Axboe #define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */ 605eca7abf3SJens Axboe #define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */ 606eca7abf3SJens Axboe #define QUEUE_FLAG_SECERASE 11 /* supports secure erase */ 607eca7abf3SJens Axboe #define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */ 608eca7abf3SJens Axboe #define QUEUE_FLAG_DEAD 13 /* queue tear-down finished */ 609eca7abf3SJens Axboe #define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */ 610eca7abf3SJens Axboe #define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */ 611eca7abf3SJens Axboe #define QUEUE_FLAG_WC 17 /* Write back caching */ 612eca7abf3SJens Axboe #define QUEUE_FLAG_FUA 18 /* device supports FUA writes */ 613eca7abf3SJens Axboe #define QUEUE_FLAG_DAX 19 /* device supports DAX */ 614eca7abf3SJens Axboe #define QUEUE_FLAG_STATS 20 /* track IO start and completion times */ 615eca7abf3SJens Axboe #define QUEUE_FLAG_POLL_STATS 21 /* collecting stats for hybrid polling */ 616eca7abf3SJens Axboe #define QUEUE_FLAG_REGISTERED 22 /* queue has been registered to a disk */ 617eca7abf3SJens Axboe #define QUEUE_FLAG_SCSI_PASSTHROUGH 23 /* queue supports SCSI commands */ 618eca7abf3SJens Axboe #define QUEUE_FLAG_QUIESCED 24 /* queue has been quiesced */ 619eca7abf3SJens Axboe #define QUEUE_FLAG_PCI_P2PDMA 25 /* device supports PCI p2p requests */ 620e84e8f06SChaitanya Kulkarni #define QUEUE_FLAG_ZONE_RESETALL 26 /* supports Zone Reset All */ 6216f816b4bSTejun Heo #define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */ 622797e7dbbSTejun Heo 62394eddfbeSJens Axboe #define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 6246e0de611SJens Axboe (1 << QUEUE_FLAG_SAME_COMP)) 62594eddfbeSJens Axboe 6268814ce8aSBart Van Assche void blk_queue_flag_set(unsigned int flag, struct request_queue *q); 6278814ce8aSBart Van Assche void blk_queue_flag_clear(unsigned int flag, struct request_queue *q); 6288814ce8aSBart Van Assche bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); 6298814ce8aSBart Van Assche 6301da177e4SLinus Torvalds #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 6313f3299d5SBart Van Assche #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) 632c246e80dSBart Van Assche #define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) 633320ae51fSJens Axboe #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) 634ac9fafa1SAlan D. Brunelle #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 635488991e2SAlan D. Brunelle #define blk_queue_noxmerges(q) \ 636488991e2SAlan D. Brunelle test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 637a68bbddbSJens Axboe #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 638bc58ba94SJens Axboe #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) 639e2e1a148SJens Axboe #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) 640c15227deSChristoph Hellwig #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) 641e84e8f06SChaitanya Kulkarni #define blk_queue_zone_resetall(q) \ 642e84e8f06SChaitanya Kulkarni test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags) 643288dab8aSChristoph Hellwig #define blk_queue_secure_erase(q) \ 644288dab8aSChristoph Hellwig (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags)) 645163d4baaSToshi Kani #define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags) 6469efc160fSBart Van Assche #define blk_queue_scsi_passthrough(q) \ 6479efc160fSBart Van Assche test_bit(QUEUE_FLAG_SCSI_PASSTHROUGH, &(q)->queue_flags) 64849d92c0dSLogan Gunthorpe #define blk_queue_pci_p2pdma(q) \ 64949d92c0dSLogan Gunthorpe test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags) 6506f816b4bSTejun Heo #ifdef CONFIG_BLK_RQ_ALLOC_TIME 6516f816b4bSTejun Heo #define blk_queue_rq_alloc_time(q) \ 6526f816b4bSTejun Heo test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags) 6536f816b4bSTejun Heo #else 6546f816b4bSTejun Heo #define blk_queue_rq_alloc_time(q) false 6556f816b4bSTejun Heo #endif 6561da177e4SLinus Torvalds 65733659ebbSChristoph Hellwig #define blk_noretry_request(rq) \ 65833659ebbSChristoph Hellwig ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ 65933659ebbSChristoph Hellwig REQ_FAILFAST_DRIVER)) 660f4560ffeSMing Lei #define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags) 661cd84a62eSBart Van Assche #define blk_queue_pm_only(q) atomic_read(&(q)->pm_only) 6620ce91444SDave Chinner #define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags) 66358c898baSMing Lei #define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags) 664c9254f2dSBart Van Assche 665cd84a62eSBart Van Assche extern void blk_set_pm_only(struct request_queue *q); 666cd84a62eSBart Van Assche extern void blk_clear_pm_only(struct request_queue *q); 6674aff5e23SJens Axboe 66857292b58SChristoph Hellwig static inline bool blk_account_rq(struct request *rq) 66957292b58SChristoph Hellwig { 67057292b58SChristoph Hellwig return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq); 67157292b58SChristoph Hellwig } 6721da177e4SLinus Torvalds 6731da177e4SLinus Torvalds #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 6741da177e4SLinus Torvalds 6754e1b2d52SMike Christie #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) 6761da177e4SLinus Torvalds 6779d9de535SChristoph Hellwig #define rq_dma_dir(rq) \ 6789d9de535SChristoph Hellwig (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE) 6799d9de535SChristoph Hellwig 6803ab3a031SChristoph Hellwig #define dma_map_bvec(dev, bv, dir, attrs) \ 6813ab3a031SChristoph Hellwig dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \ 6823ab3a031SChristoph Hellwig (dir), (attrs)) 6833ab3a031SChristoph Hellwig 684344e9ffcSJens Axboe static inline bool queue_is_mq(struct request_queue *q) 68549fd524fSJens Axboe { 686a1ce35faSJens Axboe return q->mq_ops; 68749fd524fSJens Axboe } 68849fd524fSJens Axboe 689797476b8SDamien Le Moal static inline enum blk_zoned_model 690797476b8SDamien Le Moal blk_queue_zoned_model(struct request_queue *q) 691797476b8SDamien Le Moal { 692797476b8SDamien Le Moal return q->limits.zoned; 693797476b8SDamien Le Moal } 694797476b8SDamien Le Moal 695797476b8SDamien Le Moal static inline bool blk_queue_is_zoned(struct request_queue *q) 696797476b8SDamien Le Moal { 697797476b8SDamien Le Moal switch (blk_queue_zoned_model(q)) { 698797476b8SDamien Le Moal case BLK_ZONED_HA: 699797476b8SDamien Le Moal case BLK_ZONED_HM: 700797476b8SDamien Le Moal return true; 701797476b8SDamien Le Moal default: 702797476b8SDamien Le Moal return false; 703797476b8SDamien Le Moal } 704797476b8SDamien Le Moal } 705797476b8SDamien Le Moal 706113ab72eSDamien Le Moal static inline sector_t blk_queue_zone_sectors(struct request_queue *q) 7076a0cb1bcSHannes Reinecke { 7086a0cb1bcSHannes Reinecke return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0; 7096a0cb1bcSHannes Reinecke } 7106a0cb1bcSHannes Reinecke 7116a5ac984SBart Van Assche #ifdef CONFIG_BLK_DEV_ZONED 712965b652eSDamien Le Moal static inline unsigned int blk_queue_nr_zones(struct request_queue *q) 713965b652eSDamien Le Moal { 714965b652eSDamien Le Moal return blk_queue_is_zoned(q) ? q->nr_zones : 0; 715965b652eSDamien Le Moal } 716965b652eSDamien Le Moal 7176cc77e9cSChristoph Hellwig static inline unsigned int blk_queue_zone_no(struct request_queue *q, 7186cc77e9cSChristoph Hellwig sector_t sector) 7196cc77e9cSChristoph Hellwig { 7206cc77e9cSChristoph Hellwig if (!blk_queue_is_zoned(q)) 7216cc77e9cSChristoph Hellwig return 0; 7226cc77e9cSChristoph Hellwig return sector >> ilog2(q->limits.chunk_sectors); 7236cc77e9cSChristoph Hellwig } 7246cc77e9cSChristoph Hellwig 7256cc77e9cSChristoph Hellwig static inline bool blk_queue_zone_is_seq(struct request_queue *q, 7266cc77e9cSChristoph Hellwig sector_t sector) 7276cc77e9cSChristoph Hellwig { 7286cc77e9cSChristoph Hellwig if (!blk_queue_is_zoned(q) || !q->seq_zones_bitmap) 7296cc77e9cSChristoph Hellwig return false; 7306cc77e9cSChristoph Hellwig return test_bit(blk_queue_zone_no(q, sector), q->seq_zones_bitmap); 7316cc77e9cSChristoph Hellwig } 732965b652eSDamien Le Moal #else /* CONFIG_BLK_DEV_ZONED */ 733965b652eSDamien Le Moal static inline unsigned int blk_queue_nr_zones(struct request_queue *q) 734965b652eSDamien Le Moal { 735965b652eSDamien Le Moal return 0; 736965b652eSDamien Le Moal } 7376a5ac984SBart Van Assche #endif /* CONFIG_BLK_DEV_ZONED */ 7386cc77e9cSChristoph Hellwig 7391faa16d2SJens Axboe static inline bool rq_is_sync(struct request *rq) 7401faa16d2SJens Axboe { 741ef295ecfSChristoph Hellwig return op_is_sync(rq->cmd_flags); 7421faa16d2SJens Axboe } 7431faa16d2SJens Axboe 744e2a60da7SMartin K. Petersen static inline bool rq_mergeable(struct request *rq) 745e2a60da7SMartin K. Petersen { 74657292b58SChristoph Hellwig if (blk_rq_is_passthrough(rq)) 747e2a60da7SMartin K. Petersen return false; 7481da177e4SLinus Torvalds 7493a5e02ceSMike Christie if (req_op(rq) == REQ_OP_FLUSH) 7503a5e02ceSMike Christie return false; 7513a5e02ceSMike Christie 752a6f0788eSChaitanya Kulkarni if (req_op(rq) == REQ_OP_WRITE_ZEROES) 753a6f0788eSChaitanya Kulkarni return false; 754a6f0788eSChaitanya Kulkarni 755e2a60da7SMartin K. Petersen if (rq->cmd_flags & REQ_NOMERGE_FLAGS) 756e2a60da7SMartin K. Petersen return false; 757e8064021SChristoph Hellwig if (rq->rq_flags & RQF_NOMERGE_FLAGS) 758e8064021SChristoph Hellwig return false; 759e2a60da7SMartin K. Petersen 760e2a60da7SMartin K. Petersen return true; 761e2a60da7SMartin K. Petersen } 7621da177e4SLinus Torvalds 7634363ac7cSMartin K. Petersen static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) 7644363ac7cSMartin K. Petersen { 765efbeccdbSChristoph Hellwig if (bio_page(a) == bio_page(b) && 766efbeccdbSChristoph Hellwig bio_offset(a) == bio_offset(b)) 7674363ac7cSMartin K. Petersen return true; 7684363ac7cSMartin K. Petersen 7694363ac7cSMartin K. Petersen return false; 7704363ac7cSMartin K. Petersen } 7714363ac7cSMartin K. Petersen 772d278d4a8SJens Axboe static inline unsigned int blk_queue_depth(struct request_queue *q) 773d278d4a8SJens Axboe { 774d278d4a8SJens Axboe if (q->queue_depth) 775d278d4a8SJens Axboe return q->queue_depth; 776d278d4a8SJens Axboe 777d278d4a8SJens Axboe return q->nr_requests; 778d278d4a8SJens Axboe } 779d278d4a8SJens Axboe 7801da177e4SLinus Torvalds extern unsigned long blk_max_low_pfn, blk_max_pfn; 7811da177e4SLinus Torvalds 7821da177e4SLinus Torvalds /* 7831da177e4SLinus Torvalds * standard bounce addresses: 7841da177e4SLinus Torvalds * 7851da177e4SLinus Torvalds * BLK_BOUNCE_HIGH : bounce all highmem pages 7861da177e4SLinus Torvalds * BLK_BOUNCE_ANY : don't bounce anything 7871da177e4SLinus Torvalds * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary 7881da177e4SLinus Torvalds */ 7892472892aSAndi Kleen 7902472892aSAndi Kleen #if BITS_PER_LONG == 32 7911da177e4SLinus Torvalds #define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT) 7922472892aSAndi Kleen #else 7932472892aSAndi Kleen #define BLK_BOUNCE_HIGH -1ULL 7942472892aSAndi Kleen #endif 7952472892aSAndi Kleen #define BLK_BOUNCE_ANY (-1ULL) 796bfe17231SFUJITA Tomonori #define BLK_BOUNCE_ISA (DMA_BIT_MASK(24)) 7971da177e4SLinus Torvalds 7983d6392cfSJens Axboe /* 7993d6392cfSJens Axboe * default timeout for SG_IO if none specified 8003d6392cfSJens Axboe */ 8013d6392cfSJens Axboe #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) 802f2f1fa78SLinus Torvalds #define BLK_MIN_SG_TIMEOUT (7 * HZ) 8033d6392cfSJens Axboe 804152e283fSFUJITA Tomonori struct rq_map_data { 805152e283fSFUJITA Tomonori struct page **pages; 806152e283fSFUJITA Tomonori int page_order; 807152e283fSFUJITA Tomonori int nr_entries; 80856c451f4SFUJITA Tomonori unsigned long offset; 80997ae77a1SFUJITA Tomonori int null_mapped; 810ecb554a8SFUJITA Tomonori int from_user; 811152e283fSFUJITA Tomonori }; 812152e283fSFUJITA Tomonori 8135705f702SNeilBrown struct req_iterator { 8147988613bSKent Overstreet struct bvec_iter iter; 8155705f702SNeilBrown struct bio *bio; 8165705f702SNeilBrown }; 8175705f702SNeilBrown 8185705f702SNeilBrown /* This should not be used directly - use rq_for_each_segment */ 8191e428079SJens Axboe #define for_each_bio(_bio) \ 8201e428079SJens Axboe for (; _bio; _bio = _bio->bi_next) 8215705f702SNeilBrown #define __rq_for_each_bio(_bio, rq) \ 8221da177e4SLinus Torvalds if ((rq->bio)) \ 8231da177e4SLinus Torvalds for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) 8241da177e4SLinus Torvalds 8255705f702SNeilBrown #define rq_for_each_segment(bvl, _rq, _iter) \ 8265705f702SNeilBrown __rq_for_each_bio(_iter.bio, _rq) \ 8277988613bSKent Overstreet bio_for_each_segment(bvl, _iter.bio, _iter.iter) 8285705f702SNeilBrown 829d18d9174SMing Lei #define rq_for_each_bvec(bvl, _rq, _iter) \ 830d18d9174SMing Lei __rq_for_each_bio(_iter.bio, _rq) \ 831d18d9174SMing Lei bio_for_each_bvec(bvl, _iter.bio, _iter.iter) 832d18d9174SMing Lei 8334550dd6cSKent Overstreet #define rq_iter_last(bvec, _iter) \ 8347988613bSKent Overstreet (_iter.bio->bi_next == NULL && \ 8354550dd6cSKent Overstreet bio_iter_last(bvec, _iter.iter)) 8365705f702SNeilBrown 8372d4dc890SIlya Loginov #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 8382d4dc890SIlya Loginov # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" 8392d4dc890SIlya Loginov #endif 8402d4dc890SIlya Loginov #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 8412d4dc890SIlya Loginov extern void rq_flush_dcache_pages(struct request *rq); 8422d4dc890SIlya Loginov #else 8432d4dc890SIlya Loginov static inline void rq_flush_dcache_pages(struct request *rq) 8442d4dc890SIlya Loginov { 8452d4dc890SIlya Loginov } 8462d4dc890SIlya Loginov #endif 8472d4dc890SIlya Loginov 8481da177e4SLinus Torvalds extern int blk_register_queue(struct gendisk *disk); 8491da177e4SLinus Torvalds extern void blk_unregister_queue(struct gendisk *disk); 850dece1635SJens Axboe extern blk_qc_t generic_make_request(struct bio *bio); 851f421e1d9SChristoph Hellwig extern blk_qc_t direct_make_request(struct bio *bio); 8522a4aa30cSFUJITA Tomonori extern void blk_rq_init(struct request_queue *q, struct request *rq); 8531da177e4SLinus Torvalds extern void blk_put_request(struct request *); 854cd6ce148SBart Van Assche extern struct request *blk_get_request(struct request_queue *, unsigned int op, 855ff005a06SChristoph Hellwig blk_mq_req_flags_t flags); 856ef9e3facSKiyoshi Ueda extern int blk_lld_busy(struct request_queue *q); 85778d8e58aSMike Snitzer extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 85878d8e58aSMike Snitzer struct bio_set *bs, gfp_t gfp_mask, 85978d8e58aSMike Snitzer int (*bio_ctr)(struct bio *, struct bio *, void *), 86078d8e58aSMike Snitzer void *data); 86178d8e58aSMike Snitzer extern void blk_rq_unprep_clone(struct request *rq); 8622a842acaSChristoph Hellwig extern blk_status_t blk_insert_cloned_request(struct request_queue *q, 86382124d60SKiyoshi Ueda struct request *rq); 8640abc2a10SJens Axboe extern int blk_rq_append_bio(struct request *rq, struct bio **bio); 865af67c31fSNeilBrown extern void blk_queue_split(struct request_queue *, struct bio **); 8660bfc96cbSPaolo Bonzini extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); 867577ebb37SPaolo Bonzini extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t, 868577ebb37SPaolo Bonzini unsigned int, void __user *); 86974f3c8afSAl Viro extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, 87074f3c8afSAl Viro unsigned int, void __user *); 871e915e872SAl Viro extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, 872e915e872SAl Viro struct scsi_ioctl_command __user *); 873*98aaaec4SArnd Bergmann extern int get_sg_io_hdr(struct sg_io_hdr *hdr, const void __user *argp); 874*98aaaec4SArnd Bergmann extern int put_sg_io_hdr(const struct sg_io_hdr *hdr, void __user *argp); 8753fcfab16SAndrew Morton 8769a95e4efSBart Van Assche extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags); 8772e6edc95SDan Williams extern void blk_queue_exit(struct request_queue *q); 8781da177e4SLinus Torvalds extern void blk_sync_queue(struct request_queue *q); 879a3bce90eSFUJITA Tomonori extern int blk_rq_map_user(struct request_queue *, struct request *, 880152e283fSFUJITA Tomonori struct rq_map_data *, void __user *, unsigned long, 881152e283fSFUJITA Tomonori gfp_t); 8828e5cfc45SJens Axboe extern int blk_rq_unmap_user(struct bio *); 883165125e1SJens Axboe extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); 884165125e1SJens Axboe extern int blk_rq_map_user_iov(struct request_queue *, struct request *, 88526e49cfcSKent Overstreet struct rq_map_data *, const struct iov_iter *, 88626e49cfcSKent Overstreet gfp_t); 887b7819b92SChristoph Hellwig extern void blk_execute_rq(struct request_queue *, struct gendisk *, 888994ca9a1SJames Bottomley struct request *, int); 889165125e1SJens Axboe extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 89015fc858aSJens Axboe struct request *, int, rq_end_io_fn *); 8916e39b69eSMike Christie 892e47bc4edSChaitanya Kulkarni /* Helper to convert REQ_OP_XXX to its string format XXX */ 893e47bc4edSChaitanya Kulkarni extern const char *blk_op_str(unsigned int op); 894e47bc4edSChaitanya Kulkarni 8952a842acaSChristoph Hellwig int blk_status_to_errno(blk_status_t status); 8962a842acaSChristoph Hellwig blk_status_t errno_to_blk_status(int errno); 8972a842acaSChristoph Hellwig 8980a1b8b87SJens Axboe int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin); 89905229beeSJens Axboe 900165125e1SJens Axboe static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 9011da177e4SLinus Torvalds { 902ff9ea323STejun Heo return bdev->bd_disk->queue; /* this is never NULL */ 9031da177e4SLinus Torvalds } 9041da177e4SLinus Torvalds 9051da177e4SLinus Torvalds /* 906233bde21SBart Van Assche * The basic unit of block I/O is a sector. It is used in a number of contexts 907233bde21SBart Van Assche * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9 908233bde21SBart Van Assche * bytes. Variables of type sector_t represent an offset or size that is a 909233bde21SBart Van Assche * multiple of 512 bytes. Hence these two constants. 910233bde21SBart Van Assche */ 911233bde21SBart Van Assche #ifndef SECTOR_SHIFT 912233bde21SBart Van Assche #define SECTOR_SHIFT 9 913233bde21SBart Van Assche #endif 914233bde21SBart Van Assche #ifndef SECTOR_SIZE 915233bde21SBart Van Assche #define SECTOR_SIZE (1 << SECTOR_SHIFT) 916233bde21SBart Van Assche #endif 917233bde21SBart Van Assche 918233bde21SBart Van Assche /* 9195b93629bSTejun Heo * blk_rq_pos() : the current sector 9205b93629bSTejun Heo * blk_rq_bytes() : bytes left in the entire request 9215b93629bSTejun Heo * blk_rq_cur_bytes() : bytes left in the current segment 92280a761fdSTejun Heo * blk_rq_err_bytes() : bytes left till the next error boundary 9235b93629bSTejun Heo * blk_rq_sectors() : sectors left in the entire request 9245b93629bSTejun Heo * blk_rq_cur_sectors() : sectors left in the current segment 9253d244306SHou Tao * blk_rq_stats_sectors() : sectors of the entire request used for stats 9265efccd17STejun Heo */ 9275b93629bSTejun Heo static inline sector_t blk_rq_pos(const struct request *rq) 9285b93629bSTejun Heo { 929a2dec7b3STejun Heo return rq->__sector; 9305b93629bSTejun Heo } 9315b93629bSTejun Heo 9322e46e8b2STejun Heo static inline unsigned int blk_rq_bytes(const struct request *rq) 9332e46e8b2STejun Heo { 934a2dec7b3STejun Heo return rq->__data_len; 9352e46e8b2STejun Heo } 9362e46e8b2STejun Heo 9372e46e8b2STejun Heo static inline int blk_rq_cur_bytes(const struct request *rq) 9382e46e8b2STejun Heo { 9392e46e8b2STejun Heo return rq->bio ? bio_cur_bytes(rq->bio) : 0; 9402e46e8b2STejun Heo } 9415efccd17STejun Heo 94280a761fdSTejun Heo extern unsigned int blk_rq_err_bytes(const struct request *rq); 94380a761fdSTejun Heo 9445b93629bSTejun Heo static inline unsigned int blk_rq_sectors(const struct request *rq) 9455b93629bSTejun Heo { 946233bde21SBart Van Assche return blk_rq_bytes(rq) >> SECTOR_SHIFT; 9475b93629bSTejun Heo } 9485b93629bSTejun Heo 9495b93629bSTejun Heo static inline unsigned int blk_rq_cur_sectors(const struct request *rq) 9505b93629bSTejun Heo { 951233bde21SBart Van Assche return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT; 9525b93629bSTejun Heo } 9535b93629bSTejun Heo 9543d244306SHou Tao static inline unsigned int blk_rq_stats_sectors(const struct request *rq) 9553d244306SHou Tao { 9563d244306SHou Tao return rq->stats_sectors; 9573d244306SHou Tao } 9583d244306SHou Tao 9596a5ac984SBart Van Assche #ifdef CONFIG_BLK_DEV_ZONED 9606cc77e9cSChristoph Hellwig static inline unsigned int blk_rq_zone_no(struct request *rq) 9616cc77e9cSChristoph Hellwig { 9626cc77e9cSChristoph Hellwig return blk_queue_zone_no(rq->q, blk_rq_pos(rq)); 9636cc77e9cSChristoph Hellwig } 9646cc77e9cSChristoph Hellwig 9656cc77e9cSChristoph Hellwig static inline unsigned int blk_rq_zone_is_seq(struct request *rq) 9666cc77e9cSChristoph Hellwig { 9676cc77e9cSChristoph Hellwig return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq)); 9686cc77e9cSChristoph Hellwig } 9696a5ac984SBart Van Assche #endif /* CONFIG_BLK_DEV_ZONED */ 9706cc77e9cSChristoph Hellwig 9712e3258ecSChristoph Hellwig /* 9722e3258ecSChristoph Hellwig * Some commands like WRITE SAME have a payload or data transfer size which 9732e3258ecSChristoph Hellwig * is different from the size of the request. Any driver that supports such 9742e3258ecSChristoph Hellwig * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to 9752e3258ecSChristoph Hellwig * calculate the data transfer size. 9762e3258ecSChristoph Hellwig */ 9772e3258ecSChristoph Hellwig static inline unsigned int blk_rq_payload_bytes(struct request *rq) 9782e3258ecSChristoph Hellwig { 9792e3258ecSChristoph Hellwig if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) 9802e3258ecSChristoph Hellwig return rq->special_vec.bv_len; 9812e3258ecSChristoph Hellwig return blk_rq_bytes(rq); 9822e3258ecSChristoph Hellwig } 9832e3258ecSChristoph Hellwig 9843aef3caeSChristoph Hellwig /* 9853aef3caeSChristoph Hellwig * Return the first full biovec in the request. The caller needs to check that 9863aef3caeSChristoph Hellwig * there are any bvecs before calling this helper. 9873aef3caeSChristoph Hellwig */ 9883aef3caeSChristoph Hellwig static inline struct bio_vec req_bvec(struct request *rq) 9893aef3caeSChristoph Hellwig { 9903aef3caeSChristoph Hellwig if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) 9913aef3caeSChristoph Hellwig return rq->special_vec; 9923aef3caeSChristoph Hellwig return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter); 9933aef3caeSChristoph Hellwig } 9943aef3caeSChristoph Hellwig 995f31dc1cdSMartin K. Petersen static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, 9968fe0d473SMike Christie int op) 997f31dc1cdSMartin K. Petersen { 9987afafc8aSAdrian Hunter if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)) 999233bde21SBart Van Assche return min(q->limits.max_discard_sectors, 1000233bde21SBart Van Assche UINT_MAX >> SECTOR_SHIFT); 1001f31dc1cdSMartin K. Petersen 10028fe0d473SMike Christie if (unlikely(op == REQ_OP_WRITE_SAME)) 10034363ac7cSMartin K. Petersen return q->limits.max_write_same_sectors; 10044363ac7cSMartin K. Petersen 1005a6f0788eSChaitanya Kulkarni if (unlikely(op == REQ_OP_WRITE_ZEROES)) 1006a6f0788eSChaitanya Kulkarni return q->limits.max_write_zeroes_sectors; 1007a6f0788eSChaitanya Kulkarni 1008f31dc1cdSMartin K. Petersen return q->limits.max_sectors; 1009f31dc1cdSMartin K. Petersen } 1010f31dc1cdSMartin K. Petersen 1011762380adSJens Axboe /* 1012762380adSJens Axboe * Return maximum size of a request at given offset. Only valid for 1013762380adSJens Axboe * file system requests. 1014762380adSJens Axboe */ 1015762380adSJens Axboe static inline unsigned int blk_max_size_offset(struct request_queue *q, 1016762380adSJens Axboe sector_t offset) 1017762380adSJens Axboe { 1018762380adSJens Axboe if (!q->limits.chunk_sectors) 1019736ed4deSJens Axboe return q->limits.max_sectors; 1020762380adSJens Axboe 102115bfd21fSKeith Busch return min(q->limits.max_sectors, (unsigned int)(q->limits.chunk_sectors - 102215bfd21fSKeith Busch (offset & (q->limits.chunk_sectors - 1)))); 1023762380adSJens Axboe } 1024762380adSJens Axboe 102517007f39SDamien Le Moal static inline unsigned int blk_rq_get_max_sectors(struct request *rq, 102617007f39SDamien Le Moal sector_t offset) 1027f31dc1cdSMartin K. Petersen { 1028f31dc1cdSMartin K. Petersen struct request_queue *q = rq->q; 1029f31dc1cdSMartin K. Petersen 103057292b58SChristoph Hellwig if (blk_rq_is_passthrough(rq)) 1031f31dc1cdSMartin K. Petersen return q->limits.max_hw_sectors; 1032f31dc1cdSMartin K. Petersen 10337afafc8aSAdrian Hunter if (!q->limits.chunk_sectors || 10347afafc8aSAdrian Hunter req_op(rq) == REQ_OP_DISCARD || 10357afafc8aSAdrian Hunter req_op(rq) == REQ_OP_SECURE_ERASE) 10368fe0d473SMike Christie return blk_queue_get_max_sectors(q, req_op(rq)); 1037762380adSJens Axboe 103817007f39SDamien Le Moal return min(blk_max_size_offset(q, offset), 10398fe0d473SMike Christie blk_queue_get_max_sectors(q, req_op(rq))); 1040f31dc1cdSMartin K. Petersen } 1041f31dc1cdSMartin K. Petersen 104275afb352SJun'ichi Nomura static inline unsigned int blk_rq_count_bios(struct request *rq) 104375afb352SJun'ichi Nomura { 104475afb352SJun'ichi Nomura unsigned int nr_bios = 0; 104575afb352SJun'ichi Nomura struct bio *bio; 104675afb352SJun'ichi Nomura 104775afb352SJun'ichi Nomura __rq_for_each_bio(bio, rq) 104875afb352SJun'ichi Nomura nr_bios++; 104975afb352SJun'ichi Nomura 105075afb352SJun'ichi Nomura return nr_bios; 105175afb352SJun'ichi Nomura } 105275afb352SJun'ichi Nomura 1053ef71de8bSChristoph Hellwig void blk_steal_bios(struct bio_list *list, struct request *rq); 1054ef71de8bSChristoph Hellwig 10559934c8c0STejun Heo /* 10562e60e022STejun Heo * Request completion related functions. 10572e60e022STejun Heo * 10582e60e022STejun Heo * blk_update_request() completes given number of bytes and updates 10592e60e022STejun Heo * the request without completing it. 10601da177e4SLinus Torvalds */ 10612a842acaSChristoph Hellwig extern bool blk_update_request(struct request *rq, blk_status_t error, 106222b13210SJens Axboe unsigned int nr_bytes); 10632e60e022STejun Heo 1064242f9dcbSJens Axboe extern void __blk_complete_request(struct request *); 1065242f9dcbSJens Axboe extern void blk_abort_request(struct request *); 1066ff856badSJens Axboe 10671da177e4SLinus Torvalds /* 10681da177e4SLinus Torvalds * Access functions for manipulating queue properties 10691da177e4SLinus Torvalds */ 1070165125e1SJens Axboe extern void blk_cleanup_queue(struct request_queue *); 1071165125e1SJens Axboe extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 1072165125e1SJens Axboe extern void blk_queue_bounce_limit(struct request_queue *, u64); 1073086fa5ffSMartin K. Petersen extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 1074762380adSJens Axboe extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); 10758a78362cSMartin K. Petersen extern void blk_queue_max_segments(struct request_queue *, unsigned short); 10761e739730SChristoph Hellwig extern void blk_queue_max_discard_segments(struct request_queue *, 10771e739730SChristoph Hellwig unsigned short); 1078165125e1SJens Axboe extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 107967efc925SChristoph Hellwig extern void blk_queue_max_discard_sectors(struct request_queue *q, 108067efc925SChristoph Hellwig unsigned int max_discard_sectors); 10814363ac7cSMartin K. Petersen extern void blk_queue_max_write_same_sectors(struct request_queue *q, 10824363ac7cSMartin K. Petersen unsigned int max_write_same_sectors); 1083a6f0788eSChaitanya Kulkarni extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q, 1084a6f0788eSChaitanya Kulkarni unsigned int max_write_same_sectors); 1085e1defc4fSMartin K. Petersen extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); 1086892b6f90SMartin K. Petersen extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); 1087c72758f3SMartin K. Petersen extern void blk_queue_alignment_offset(struct request_queue *q, 1088c72758f3SMartin K. Petersen unsigned int alignment); 10897c958e32SMartin K. Petersen extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); 1090c72758f3SMartin K. Petersen extern void blk_queue_io_min(struct request_queue *q, unsigned int min); 10913c5820c7SMartin K. Petersen extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); 1092c72758f3SMartin K. Petersen extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); 1093d278d4a8SJens Axboe extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth); 1094e475bba2SMartin K. Petersen extern void blk_set_default_limits(struct queue_limits *lim); 1095b1bd055dSMartin K. Petersen extern void blk_set_stacking_limits(struct queue_limits *lim); 1096c72758f3SMartin K. Petersen extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 1097c72758f3SMartin K. Petersen sector_t offset); 109817be8c24SMartin K. Petersen extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, 109917be8c24SMartin K. Petersen sector_t offset); 1100c72758f3SMartin K. Petersen extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, 1101c72758f3SMartin K. Petersen sector_t offset); 1102165125e1SJens Axboe extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 110327f8221aSFUJITA Tomonori extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); 11042fb98e84STejun Heo extern int blk_queue_dma_drain(struct request_queue *q, 11052fb98e84STejun Heo dma_drain_needed_fn *dma_drain_needed, 11062fb98e84STejun Heo void *buf, unsigned int size); 1107165125e1SJens Axboe extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 110803100aadSKeith Busch extern void blk_queue_virt_boundary(struct request_queue *, unsigned long); 1109165125e1SJens Axboe extern void blk_queue_dma_alignment(struct request_queue *, int); 111011c3e689SJames Bottomley extern void blk_queue_update_dma_alignment(struct request_queue *, int); 1111242f9dcbSJens Axboe extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 111293e9d8e8SJens Axboe extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); 111368c43f13SDamien Le Moal extern void blk_queue_required_elevator_features(struct request_queue *q, 111468c43f13SDamien Le Moal unsigned int features); 111545147fb5SYoshihiro Shimoda extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q, 111645147fb5SYoshihiro Shimoda struct device *dev); 11171da177e4SLinus Torvalds 11181e739730SChristoph Hellwig /* 11191e739730SChristoph Hellwig * Number of physical segments as sent to the device. 11201e739730SChristoph Hellwig * 11211e739730SChristoph Hellwig * Normally this is the number of discontiguous data segments sent by the 11221e739730SChristoph Hellwig * submitter. But for data-less command like discard we might have no 11231e739730SChristoph Hellwig * actual data segments submitted, but the driver might have to add it's 11241e739730SChristoph Hellwig * own special payload. In that case we still return 1 here so that this 11251e739730SChristoph Hellwig * special payload will be mapped. 11261e739730SChristoph Hellwig */ 1127f9d03f96SChristoph Hellwig static inline unsigned short blk_rq_nr_phys_segments(struct request *rq) 1128f9d03f96SChristoph Hellwig { 1129f9d03f96SChristoph Hellwig if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) 1130f9d03f96SChristoph Hellwig return 1; 1131f9d03f96SChristoph Hellwig return rq->nr_phys_segments; 1132f9d03f96SChristoph Hellwig } 1133f9d03f96SChristoph Hellwig 11341e739730SChristoph Hellwig /* 11351e739730SChristoph Hellwig * Number of discard segments (or ranges) the driver needs to fill in. 11361e739730SChristoph Hellwig * Each discard bio merged into a request is counted as one segment. 11371e739730SChristoph Hellwig */ 11381e739730SChristoph Hellwig static inline unsigned short blk_rq_nr_discard_segments(struct request *rq) 11391e739730SChristoph Hellwig { 11401e739730SChristoph Hellwig return max_t(unsigned short, rq->nr_phys_segments, 1); 11411e739730SChristoph Hellwig } 11421e739730SChristoph Hellwig 1143165125e1SJens Axboe extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 11441da177e4SLinus Torvalds extern void blk_dump_rq_flags(struct request *, char *); 11451da177e4SLinus Torvalds extern long nr_blockdev_pages(void); 11461da177e4SLinus Torvalds 114709ac46c4STejun Heo bool __must_check blk_get_queue(struct request_queue *); 1148165125e1SJens Axboe struct request_queue *blk_alloc_queue(gfp_t); 11496d469642SChristoph Hellwig struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id); 1150165125e1SJens Axboe extern void blk_put_queue(struct request_queue *); 11513f21c265SJens Axboe extern void blk_set_queue_dying(struct request_queue *); 11521da177e4SLinus Torvalds 1153316cc67dSShaohua Li /* 115475df7136SSuresh Jayaraman * blk_plug permits building a queue of related requests by holding the I/O 115575df7136SSuresh Jayaraman * fragments for a short period. This allows merging of sequential requests 115675df7136SSuresh Jayaraman * into single larger request. As the requests are moved from a per-task list to 115775df7136SSuresh Jayaraman * the device's request_queue in a batch, this results in improved scalability 115875df7136SSuresh Jayaraman * as the lock contention for request_queue lock is reduced. 115975df7136SSuresh Jayaraman * 116075df7136SSuresh Jayaraman * It is ok not to disable preemption when adding the request to the plug list 116175df7136SSuresh Jayaraman * or when attempting a merge, because blk_schedule_flush_list() will only flush 116275df7136SSuresh Jayaraman * the plug list when the task sleeps by itself. For details, please see 116375df7136SSuresh Jayaraman * schedule() where blk_schedule_flush_plug() is called. 1164316cc67dSShaohua Li */ 116573c10101SJens Axboe struct blk_plug { 1166320ae51fSJens Axboe struct list_head mq_list; /* blk-mq requests */ 116775df7136SSuresh Jayaraman struct list_head cb_list; /* md requires an unplug callback */ 11685f0ed774SJens Axboe unsigned short rq_count; 1169ce5b009cSJens Axboe bool multiple_queues; 117073c10101SJens Axboe }; 117155c022bbSShaohua Li #define BLK_MAX_REQUEST_COUNT 16 117250d24c34SShaohua Li #define BLK_PLUG_FLUSH_SIZE (128 * 1024) 117355c022bbSShaohua Li 11749cbb1750SNeilBrown struct blk_plug_cb; 117574018dc3SNeilBrown typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); 1176048c9374SNeilBrown struct blk_plug_cb { 1177048c9374SNeilBrown struct list_head list; 11789cbb1750SNeilBrown blk_plug_cb_fn callback; 11799cbb1750SNeilBrown void *data; 1180048c9374SNeilBrown }; 11819cbb1750SNeilBrown extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, 11829cbb1750SNeilBrown void *data, int size); 118373c10101SJens Axboe extern void blk_start_plug(struct blk_plug *); 118473c10101SJens Axboe extern void blk_finish_plug(struct blk_plug *); 1185f6603783SJens Axboe extern void blk_flush_plug_list(struct blk_plug *, bool); 118673c10101SJens Axboe 118773c10101SJens Axboe static inline void blk_flush_plug(struct task_struct *tsk) 118873c10101SJens Axboe { 118973c10101SJens Axboe struct blk_plug *plug = tsk->plug; 119073c10101SJens Axboe 119188b996cdSChristoph Hellwig if (plug) 1192a237c1c5SJens Axboe blk_flush_plug_list(plug, false); 1193a237c1c5SJens Axboe } 1194a237c1c5SJens Axboe 1195a237c1c5SJens Axboe static inline void blk_schedule_flush_plug(struct task_struct *tsk) 1196a237c1c5SJens Axboe { 1197a237c1c5SJens Axboe struct blk_plug *plug = tsk->plug; 1198a237c1c5SJens Axboe 1199a237c1c5SJens Axboe if (plug) 1200f6603783SJens Axboe blk_flush_plug_list(plug, true); 120173c10101SJens Axboe } 120273c10101SJens Axboe 120373c10101SJens Axboe static inline bool blk_needs_flush_plug(struct task_struct *tsk) 120473c10101SJens Axboe { 120573c10101SJens Axboe struct blk_plug *plug = tsk->plug; 120673c10101SJens Axboe 1207320ae51fSJens Axboe return plug && 1208a1ce35faSJens Axboe (!list_empty(&plug->mq_list) || 1209320ae51fSJens Axboe !list_empty(&plug->cb_list)); 121073c10101SJens Axboe } 121173c10101SJens Axboe 1212ee472d83SChristoph Hellwig extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); 1213ee472d83SChristoph Hellwig extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, 1214ee472d83SChristoph Hellwig sector_t nr_sects, gfp_t gfp_mask, struct page *page); 1215e950fdf7SChristoph Hellwig 1216e950fdf7SChristoph Hellwig #define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */ 1217dd3932edSChristoph Hellwig 1218fbd9b09aSDmitry Monakhov extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1219fbd9b09aSDmitry Monakhov sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); 122038f25255SChristoph Hellwig extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1221288dab8aSChristoph Hellwig sector_t nr_sects, gfp_t gfp_mask, int flags, 1222469e3216SMike Christie struct bio **biop); 1223ee472d83SChristoph Hellwig 1224ee472d83SChristoph Hellwig #define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */ 1225cb365b96SChristoph Hellwig #define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */ 1226ee472d83SChristoph Hellwig 1227e73c23ffSChaitanya Kulkarni extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1228e73c23ffSChaitanya Kulkarni sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, 1229ee472d83SChristoph Hellwig unsigned flags); 12303f14d792SDmitry Monakhov extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1231ee472d83SChristoph Hellwig sector_t nr_sects, gfp_t gfp_mask, unsigned flags); 1232ee472d83SChristoph Hellwig 12332cf6d26aSChristoph Hellwig static inline int sb_issue_discard(struct super_block *sb, sector_t block, 12342cf6d26aSChristoph Hellwig sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) 1235fb2dce86SDavid Woodhouse { 1236233bde21SBart Van Assche return blkdev_issue_discard(sb->s_bdev, 1237233bde21SBart Van Assche block << (sb->s_blocksize_bits - 1238233bde21SBart Van Assche SECTOR_SHIFT), 1239233bde21SBart Van Assche nr_blocks << (sb->s_blocksize_bits - 1240233bde21SBart Van Assche SECTOR_SHIFT), 12412cf6d26aSChristoph Hellwig gfp_mask, flags); 1242fb2dce86SDavid Woodhouse } 1243e6fa0be6SLukas Czerner static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, 1244a107e5a3STheodore Ts'o sector_t nr_blocks, gfp_t gfp_mask) 1245e6fa0be6SLukas Czerner { 1246e6fa0be6SLukas Czerner return blkdev_issue_zeroout(sb->s_bdev, 1247233bde21SBart Van Assche block << (sb->s_blocksize_bits - 1248233bde21SBart Van Assche SECTOR_SHIFT), 1249233bde21SBart Van Assche nr_blocks << (sb->s_blocksize_bits - 1250233bde21SBart Van Assche SECTOR_SHIFT), 1251ee472d83SChristoph Hellwig gfp_mask, 0); 1252e6fa0be6SLukas Czerner } 12531da177e4SLinus Torvalds 1254f00c4d80SChristoph Hellwig extern int blk_verify_command(unsigned char *cmd, fmode_t mode); 12550b07de85SAdel Gadllah 1256eb28d31bSMartin K. Petersen enum blk_default_limits { 1257eb28d31bSMartin K. Petersen BLK_MAX_SEGMENTS = 128, 1258eb28d31bSMartin K. Petersen BLK_SAFE_MAX_SECTORS = 255, 1259d2be537cSJeff Moyer BLK_DEF_MAX_SECTORS = 2560, 1260eb28d31bSMartin K. Petersen BLK_MAX_SEGMENT_SIZE = 65536, 1261eb28d31bSMartin K. Petersen BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, 1262eb28d31bSMartin K. Petersen }; 12630e435ac2SMilan Broz 1264af2c68feSBart Van Assche static inline unsigned long queue_segment_boundary(const struct request_queue *q) 1265ae03bf63SMartin K. Petersen { 1266025146e1SMartin K. Petersen return q->limits.seg_boundary_mask; 1267ae03bf63SMartin K. Petersen } 1268ae03bf63SMartin K. Petersen 1269af2c68feSBart Van Assche static inline unsigned long queue_virt_boundary(const struct request_queue *q) 127003100aadSKeith Busch { 127103100aadSKeith Busch return q->limits.virt_boundary_mask; 127203100aadSKeith Busch } 127303100aadSKeith Busch 1274af2c68feSBart Van Assche static inline unsigned int queue_max_sectors(const struct request_queue *q) 1275ae03bf63SMartin K. Petersen { 1276025146e1SMartin K. Petersen return q->limits.max_sectors; 1277ae03bf63SMartin K. Petersen } 1278ae03bf63SMartin K. Petersen 1279af2c68feSBart Van Assche static inline unsigned int queue_max_hw_sectors(const struct request_queue *q) 1280ae03bf63SMartin K. Petersen { 1281025146e1SMartin K. Petersen return q->limits.max_hw_sectors; 1282ae03bf63SMartin K. Petersen } 1283ae03bf63SMartin K. Petersen 1284af2c68feSBart Van Assche static inline unsigned short queue_max_segments(const struct request_queue *q) 1285ae03bf63SMartin K. Petersen { 12868a78362cSMartin K. Petersen return q->limits.max_segments; 1287ae03bf63SMartin K. Petersen } 1288ae03bf63SMartin K. Petersen 1289af2c68feSBart Van Assche static inline unsigned short queue_max_discard_segments(const struct request_queue *q) 12901e739730SChristoph Hellwig { 12911e739730SChristoph Hellwig return q->limits.max_discard_segments; 12921e739730SChristoph Hellwig } 12931e739730SChristoph Hellwig 1294af2c68feSBart Van Assche static inline unsigned int queue_max_segment_size(const struct request_queue *q) 1295ae03bf63SMartin K. Petersen { 1296025146e1SMartin K. Petersen return q->limits.max_segment_size; 1297ae03bf63SMartin K. Petersen } 1298ae03bf63SMartin K. Petersen 1299af2c68feSBart Van Assche static inline unsigned short queue_logical_block_size(const struct request_queue *q) 13001da177e4SLinus Torvalds { 13011da177e4SLinus Torvalds int retval = 512; 13021da177e4SLinus Torvalds 1303025146e1SMartin K. Petersen if (q && q->limits.logical_block_size) 1304025146e1SMartin K. Petersen retval = q->limits.logical_block_size; 13051da177e4SLinus Torvalds 13061da177e4SLinus Torvalds return retval; 13071da177e4SLinus Torvalds } 13081da177e4SLinus Torvalds 1309e1defc4fSMartin K. Petersen static inline unsigned short bdev_logical_block_size(struct block_device *bdev) 13101da177e4SLinus Torvalds { 1311e1defc4fSMartin K. Petersen return queue_logical_block_size(bdev_get_queue(bdev)); 13121da177e4SLinus Torvalds } 13131da177e4SLinus Torvalds 1314af2c68feSBart Van Assche static inline unsigned int queue_physical_block_size(const struct request_queue *q) 1315c72758f3SMartin K. Petersen { 1316c72758f3SMartin K. Petersen return q->limits.physical_block_size; 1317c72758f3SMartin K. Petersen } 1318c72758f3SMartin K. Petersen 1319892b6f90SMartin K. Petersen static inline unsigned int bdev_physical_block_size(struct block_device *bdev) 1320ac481c20SMartin K. Petersen { 1321ac481c20SMartin K. Petersen return queue_physical_block_size(bdev_get_queue(bdev)); 1322ac481c20SMartin K. Petersen } 1323ac481c20SMartin K. Petersen 1324af2c68feSBart Van Assche static inline unsigned int queue_io_min(const struct request_queue *q) 1325c72758f3SMartin K. Petersen { 1326c72758f3SMartin K. Petersen return q->limits.io_min; 1327c72758f3SMartin K. Petersen } 1328c72758f3SMartin K. Petersen 1329ac481c20SMartin K. Petersen static inline int bdev_io_min(struct block_device *bdev) 1330ac481c20SMartin K. Petersen { 1331ac481c20SMartin K. Petersen return queue_io_min(bdev_get_queue(bdev)); 1332ac481c20SMartin K. Petersen } 1333ac481c20SMartin K. Petersen 1334af2c68feSBart Van Assche static inline unsigned int queue_io_opt(const struct request_queue *q) 1335c72758f3SMartin K. Petersen { 1336c72758f3SMartin K. Petersen return q->limits.io_opt; 1337c72758f3SMartin K. Petersen } 1338c72758f3SMartin K. Petersen 1339ac481c20SMartin K. Petersen static inline int bdev_io_opt(struct block_device *bdev) 1340ac481c20SMartin K. Petersen { 1341ac481c20SMartin K. Petersen return queue_io_opt(bdev_get_queue(bdev)); 1342ac481c20SMartin K. Petersen } 1343ac481c20SMartin K. Petersen 1344af2c68feSBart Van Assche static inline int queue_alignment_offset(const struct request_queue *q) 1345c72758f3SMartin K. Petersen { 1346ac481c20SMartin K. Petersen if (q->limits.misaligned) 1347c72758f3SMartin K. Petersen return -1; 1348c72758f3SMartin K. Petersen 1349c72758f3SMartin K. Petersen return q->limits.alignment_offset; 1350c72758f3SMartin K. Petersen } 1351c72758f3SMartin K. Petersen 1352e03a72e1SMartin K. Petersen static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) 135381744ee4SMartin K. Petersen { 135481744ee4SMartin K. Petersen unsigned int granularity = max(lim->physical_block_size, lim->io_min); 1355233bde21SBart Van Assche unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT) 1356233bde21SBart Van Assche << SECTOR_SHIFT; 135781744ee4SMartin K. Petersen 1358b8839b8cSMike Snitzer return (granularity + lim->alignment_offset - alignment) % granularity; 1359c72758f3SMartin K. Petersen } 1360c72758f3SMartin K. Petersen 1361ac481c20SMartin K. Petersen static inline int bdev_alignment_offset(struct block_device *bdev) 1362ac481c20SMartin K. Petersen { 1363ac481c20SMartin K. Petersen struct request_queue *q = bdev_get_queue(bdev); 1364ac481c20SMartin K. Petersen 1365ac481c20SMartin K. Petersen if (q->limits.misaligned) 1366ac481c20SMartin K. Petersen return -1; 1367ac481c20SMartin K. Petersen 1368ac481c20SMartin K. Petersen if (bdev != bdev->bd_contains) 1369ac481c20SMartin K. Petersen return bdev->bd_part->alignment_offset; 1370ac481c20SMartin K. Petersen 1371ac481c20SMartin K. Petersen return q->limits.alignment_offset; 1372ac481c20SMartin K. Petersen } 1373ac481c20SMartin K. Petersen 1374af2c68feSBart Van Assche static inline int queue_discard_alignment(const struct request_queue *q) 137586b37281SMartin K. Petersen { 137686b37281SMartin K. Petersen if (q->limits.discard_misaligned) 137786b37281SMartin K. Petersen return -1; 137886b37281SMartin K. Petersen 137986b37281SMartin K. Petersen return q->limits.discard_alignment; 138086b37281SMartin K. Petersen } 138186b37281SMartin K. Petersen 1382e03a72e1SMartin K. Petersen static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) 138386b37281SMartin K. Petersen { 138459771079SLinus Torvalds unsigned int alignment, granularity, offset; 1385dd3d145dSMartin K. Petersen 1386a934a00aSMartin K. Petersen if (!lim->max_discard_sectors) 1387a934a00aSMartin K. Petersen return 0; 1388a934a00aSMartin K. Petersen 138959771079SLinus Torvalds /* Why are these in bytes, not sectors? */ 1390233bde21SBart Van Assche alignment = lim->discard_alignment >> SECTOR_SHIFT; 1391233bde21SBart Van Assche granularity = lim->discard_granularity >> SECTOR_SHIFT; 139259771079SLinus Torvalds if (!granularity) 139359771079SLinus Torvalds return 0; 139459771079SLinus Torvalds 139559771079SLinus Torvalds /* Offset of the partition start in 'granularity' sectors */ 139659771079SLinus Torvalds offset = sector_div(sector, granularity); 139759771079SLinus Torvalds 139859771079SLinus Torvalds /* And why do we do this modulus *again* in blkdev_issue_discard()? */ 139959771079SLinus Torvalds offset = (granularity + alignment - offset) % granularity; 140059771079SLinus Torvalds 140159771079SLinus Torvalds /* Turn it back into bytes, gaah */ 1402233bde21SBart Van Assche return offset << SECTOR_SHIFT; 140386b37281SMartin K. Petersen } 140486b37281SMartin K. Petersen 1405c6e66634SPaolo Bonzini static inline int bdev_discard_alignment(struct block_device *bdev) 1406c6e66634SPaolo Bonzini { 1407c6e66634SPaolo Bonzini struct request_queue *q = bdev_get_queue(bdev); 1408c6e66634SPaolo Bonzini 1409c6e66634SPaolo Bonzini if (bdev != bdev->bd_contains) 1410c6e66634SPaolo Bonzini return bdev->bd_part->discard_alignment; 1411c6e66634SPaolo Bonzini 1412c6e66634SPaolo Bonzini return q->limits.discard_alignment; 1413c6e66634SPaolo Bonzini } 1414c6e66634SPaolo Bonzini 14154363ac7cSMartin K. Petersen static inline unsigned int bdev_write_same(struct block_device *bdev) 14164363ac7cSMartin K. Petersen { 14174363ac7cSMartin K. Petersen struct request_queue *q = bdev_get_queue(bdev); 14184363ac7cSMartin K. Petersen 14194363ac7cSMartin K. Petersen if (q) 14204363ac7cSMartin K. Petersen return q->limits.max_write_same_sectors; 14214363ac7cSMartin K. Petersen 14224363ac7cSMartin K. Petersen return 0; 14234363ac7cSMartin K. Petersen } 14244363ac7cSMartin K. Petersen 1425a6f0788eSChaitanya Kulkarni static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev) 1426a6f0788eSChaitanya Kulkarni { 1427a6f0788eSChaitanya Kulkarni struct request_queue *q = bdev_get_queue(bdev); 1428a6f0788eSChaitanya Kulkarni 1429a6f0788eSChaitanya Kulkarni if (q) 1430a6f0788eSChaitanya Kulkarni return q->limits.max_write_zeroes_sectors; 1431a6f0788eSChaitanya Kulkarni 1432a6f0788eSChaitanya Kulkarni return 0; 1433a6f0788eSChaitanya Kulkarni } 1434a6f0788eSChaitanya Kulkarni 1435797476b8SDamien Le Moal static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev) 1436797476b8SDamien Le Moal { 1437797476b8SDamien Le Moal struct request_queue *q = bdev_get_queue(bdev); 1438797476b8SDamien Le Moal 1439797476b8SDamien Le Moal if (q) 1440797476b8SDamien Le Moal return blk_queue_zoned_model(q); 1441797476b8SDamien Le Moal 1442797476b8SDamien Le Moal return BLK_ZONED_NONE; 1443797476b8SDamien Le Moal } 1444797476b8SDamien Le Moal 1445797476b8SDamien Le Moal static inline bool bdev_is_zoned(struct block_device *bdev) 1446797476b8SDamien Le Moal { 1447797476b8SDamien Le Moal struct request_queue *q = bdev_get_queue(bdev); 1448797476b8SDamien Le Moal 1449797476b8SDamien Le Moal if (q) 1450797476b8SDamien Le Moal return blk_queue_is_zoned(q); 1451797476b8SDamien Le Moal 1452797476b8SDamien Le Moal return false; 1453797476b8SDamien Le Moal } 1454797476b8SDamien Le Moal 1455113ab72eSDamien Le Moal static inline sector_t bdev_zone_sectors(struct block_device *bdev) 14566a0cb1bcSHannes Reinecke { 14576a0cb1bcSHannes Reinecke struct request_queue *q = bdev_get_queue(bdev); 14586a0cb1bcSHannes Reinecke 14596a0cb1bcSHannes Reinecke if (q) 1460f99e8648SDamien Le Moal return blk_queue_zone_sectors(q); 14616cc77e9cSChristoph Hellwig return 0; 14626cc77e9cSChristoph Hellwig } 14636a0cb1bcSHannes Reinecke 1464af2c68feSBart Van Assche static inline int queue_dma_alignment(const struct request_queue *q) 14651da177e4SLinus Torvalds { 1466482eb689SPete Wyckoff return q ? q->dma_alignment : 511; 14671da177e4SLinus Torvalds } 14681da177e4SLinus Torvalds 146914417799SNamhyung Kim static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, 147087904074SFUJITA Tomonori unsigned int len) 147187904074SFUJITA Tomonori { 147287904074SFUJITA Tomonori unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; 147314417799SNamhyung Kim return !(addr & alignment) && !(len & alignment); 147487904074SFUJITA Tomonori } 147587904074SFUJITA Tomonori 14761da177e4SLinus Torvalds /* assumes size > 256 */ 14771da177e4SLinus Torvalds static inline unsigned int blksize_bits(unsigned int size) 14781da177e4SLinus Torvalds { 14791da177e4SLinus Torvalds unsigned int bits = 8; 14801da177e4SLinus Torvalds do { 14811da177e4SLinus Torvalds bits++; 14821da177e4SLinus Torvalds size >>= 1; 14831da177e4SLinus Torvalds } while (size > 256); 14841da177e4SLinus Torvalds return bits; 14851da177e4SLinus Torvalds } 14861da177e4SLinus Torvalds 14872befb9e3SAdrian Bunk static inline unsigned int block_size(struct block_device *bdev) 14881da177e4SLinus Torvalds { 14891da177e4SLinus Torvalds return bdev->bd_block_size; 14901da177e4SLinus Torvalds } 14911da177e4SLinus Torvalds 14921da177e4SLinus Torvalds typedef struct {struct page *v;} Sector; 14931da177e4SLinus Torvalds 14941da177e4SLinus Torvalds unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); 14951da177e4SLinus Torvalds 14961da177e4SLinus Torvalds static inline void put_dev_sector(Sector p) 14971da177e4SLinus Torvalds { 149809cbfeafSKirill A. Shutemov put_page(p.v); 14991da177e4SLinus Torvalds } 15001da177e4SLinus Torvalds 150159c3d45eSJens Axboe int kblockd_schedule_work(struct work_struct *work); 1502ee63cfa7SJens Axboe int kblockd_schedule_work_on(int cpu, struct work_struct *work); 1503818cd1cbSJens Axboe int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); 15041da177e4SLinus Torvalds 15051da177e4SLinus Torvalds #define MODULE_ALIAS_BLOCKDEV(major,minor) \ 15061da177e4SLinus Torvalds MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 15071da177e4SLinus Torvalds #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 15081da177e4SLinus Torvalds MODULE_ALIAS("block-major-" __stringify(major) "-*") 15091da177e4SLinus Torvalds 15107ba1ba12SMartin K. Petersen #if defined(CONFIG_BLK_DEV_INTEGRITY) 15117ba1ba12SMartin K. Petersen 15128288f496SMartin K. Petersen enum blk_integrity_flags { 15138288f496SMartin K. Petersen BLK_INTEGRITY_VERIFY = 1 << 0, 15148288f496SMartin K. Petersen BLK_INTEGRITY_GENERATE = 1 << 1, 15153aec2f41SMartin K. Petersen BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2, 1516aae7df50SMartin K. Petersen BLK_INTEGRITY_IP_CHECKSUM = 1 << 3, 15178288f496SMartin K. Petersen }; 15187ba1ba12SMartin K. Petersen 151918593088SMartin K. Petersen struct blk_integrity_iter { 15207ba1ba12SMartin K. Petersen void *prot_buf; 15217ba1ba12SMartin K. Petersen void *data_buf; 15223be91c4aSMartin K. Petersen sector_t seed; 15237ba1ba12SMartin K. Petersen unsigned int data_size; 15243be91c4aSMartin K. Petersen unsigned short interval; 15257ba1ba12SMartin K. Petersen const char *disk_name; 15267ba1ba12SMartin K. Petersen }; 15277ba1ba12SMartin K. Petersen 15284e4cbee9SChristoph Hellwig typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *); 152954d4e6abSMax Gurtovoy typedef void (integrity_prepare_fn) (struct request *); 153054d4e6abSMax Gurtovoy typedef void (integrity_complete_fn) (struct request *, unsigned int); 15317ba1ba12SMartin K. Petersen 15320f8087ecSMartin K. Petersen struct blk_integrity_profile { 153318593088SMartin K. Petersen integrity_processing_fn *generate_fn; 153418593088SMartin K. Petersen integrity_processing_fn *verify_fn; 153554d4e6abSMax Gurtovoy integrity_prepare_fn *prepare_fn; 153654d4e6abSMax Gurtovoy integrity_complete_fn *complete_fn; 15370f8087ecSMartin K. Petersen const char *name; 15380f8087ecSMartin K. Petersen }; 15397ba1ba12SMartin K. Petersen 154025520d55SMartin K. Petersen extern void blk_integrity_register(struct gendisk *, struct blk_integrity *); 15417ba1ba12SMartin K. Petersen extern void blk_integrity_unregister(struct gendisk *); 1542ad7fce93SMartin K. Petersen extern int blk_integrity_compare(struct gendisk *, struct gendisk *); 154313f05c8dSMartin K. Petersen extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, 154413f05c8dSMartin K. Petersen struct scatterlist *); 154513f05c8dSMartin K. Petersen extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); 15464eaf99beSMartin K. Petersen extern bool blk_integrity_merge_rq(struct request_queue *, struct request *, 154713f05c8dSMartin K. Petersen struct request *); 15484eaf99beSMartin K. Petersen extern bool blk_integrity_merge_bio(struct request_queue *, struct request *, 154913f05c8dSMartin K. Petersen struct bio *); 15507ba1ba12SMartin K. Petersen 155125520d55SMartin K. Petersen static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 155225520d55SMartin K. Petersen { 1553ac6fc48cSDan Williams struct blk_integrity *bi = &disk->queue->integrity; 155425520d55SMartin K. Petersen 155525520d55SMartin K. Petersen if (!bi->profile) 155625520d55SMartin K. Petersen return NULL; 155725520d55SMartin K. Petersen 155825520d55SMartin K. Petersen return bi; 155925520d55SMartin K. Petersen } 156025520d55SMartin K. Petersen 1561b04accc4SJens Axboe static inline 1562b04accc4SJens Axboe struct blk_integrity *bdev_get_integrity(struct block_device *bdev) 1563b04accc4SJens Axboe { 156425520d55SMartin K. Petersen return blk_get_integrity(bdev->bd_disk); 1565b02739b0SMartin K. Petersen } 1566b02739b0SMartin K. Petersen 1567180b2f95SMartin K. Petersen static inline bool blk_integrity_rq(struct request *rq) 15687ba1ba12SMartin K. Petersen { 1569180b2f95SMartin K. Petersen return rq->cmd_flags & REQ_INTEGRITY; 15707ba1ba12SMartin K. Petersen } 15717ba1ba12SMartin K. Petersen 157213f05c8dSMartin K. Petersen static inline void blk_queue_max_integrity_segments(struct request_queue *q, 157313f05c8dSMartin K. Petersen unsigned int segs) 157413f05c8dSMartin K. Petersen { 157513f05c8dSMartin K. Petersen q->limits.max_integrity_segments = segs; 157613f05c8dSMartin K. Petersen } 157713f05c8dSMartin K. Petersen 157813f05c8dSMartin K. Petersen static inline unsigned short 1579af2c68feSBart Van Assche queue_max_integrity_segments(const struct request_queue *q) 158013f05c8dSMartin K. Petersen { 158113f05c8dSMartin K. Petersen return q->limits.max_integrity_segments; 158213f05c8dSMartin K. Petersen } 158313f05c8dSMartin K. Petersen 1584359f6427SGreg Edwards /** 1585359f6427SGreg Edwards * bio_integrity_intervals - Return number of integrity intervals for a bio 1586359f6427SGreg Edwards * @bi: blk_integrity profile for device 1587359f6427SGreg Edwards * @sectors: Size of the bio in 512-byte sectors 1588359f6427SGreg Edwards * 1589359f6427SGreg Edwards * Description: The block layer calculates everything in 512 byte 1590359f6427SGreg Edwards * sectors but integrity metadata is done in terms of the data integrity 1591359f6427SGreg Edwards * interval size of the storage device. Convert the block layer sectors 1592359f6427SGreg Edwards * to the appropriate number of integrity intervals. 1593359f6427SGreg Edwards */ 1594359f6427SGreg Edwards static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi, 1595359f6427SGreg Edwards unsigned int sectors) 1596359f6427SGreg Edwards { 1597359f6427SGreg Edwards return sectors >> (bi->interval_exp - 9); 1598359f6427SGreg Edwards } 1599359f6427SGreg Edwards 1600359f6427SGreg Edwards static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi, 1601359f6427SGreg Edwards unsigned int sectors) 1602359f6427SGreg Edwards { 1603359f6427SGreg Edwards return bio_integrity_intervals(bi, sectors) * bi->tuple_size; 1604359f6427SGreg Edwards } 1605359f6427SGreg Edwards 16062a876f5eSChristoph Hellwig /* 16072a876f5eSChristoph Hellwig * Return the first bvec that contains integrity data. Only drivers that are 16082a876f5eSChristoph Hellwig * limited to a single integrity segment should use this helper. 16092a876f5eSChristoph Hellwig */ 16102a876f5eSChristoph Hellwig static inline struct bio_vec *rq_integrity_vec(struct request *rq) 16112a876f5eSChristoph Hellwig { 16122a876f5eSChristoph Hellwig if (WARN_ON_ONCE(queue_max_integrity_segments(rq->q) > 1)) 16132a876f5eSChristoph Hellwig return NULL; 16142a876f5eSChristoph Hellwig return rq->bio->bi_integrity->bip_vec; 16152a876f5eSChristoph Hellwig } 16162a876f5eSChristoph Hellwig 16177ba1ba12SMartin K. Petersen #else /* CONFIG_BLK_DEV_INTEGRITY */ 16187ba1ba12SMartin K. Petersen 1619fd83240aSStephen Rothwell struct bio; 1620fd83240aSStephen Rothwell struct block_device; 1621fd83240aSStephen Rothwell struct gendisk; 1622fd83240aSStephen Rothwell struct blk_integrity; 1623fd83240aSStephen Rothwell 1624fd83240aSStephen Rothwell static inline int blk_integrity_rq(struct request *rq) 1625fd83240aSStephen Rothwell { 1626fd83240aSStephen Rothwell return 0; 1627fd83240aSStephen Rothwell } 1628fd83240aSStephen Rothwell static inline int blk_rq_count_integrity_sg(struct request_queue *q, 1629fd83240aSStephen Rothwell struct bio *b) 1630fd83240aSStephen Rothwell { 1631fd83240aSStephen Rothwell return 0; 1632fd83240aSStephen Rothwell } 1633fd83240aSStephen Rothwell static inline int blk_rq_map_integrity_sg(struct request_queue *q, 1634fd83240aSStephen Rothwell struct bio *b, 1635fd83240aSStephen Rothwell struct scatterlist *s) 1636fd83240aSStephen Rothwell { 1637fd83240aSStephen Rothwell return 0; 1638fd83240aSStephen Rothwell } 1639fd83240aSStephen Rothwell static inline struct blk_integrity *bdev_get_integrity(struct block_device *b) 1640fd83240aSStephen Rothwell { 164161a04e5bSMichele Curti return NULL; 1642fd83240aSStephen Rothwell } 1643fd83240aSStephen Rothwell static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 1644fd83240aSStephen Rothwell { 1645fd83240aSStephen Rothwell return NULL; 1646fd83240aSStephen Rothwell } 1647fd83240aSStephen Rothwell static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b) 1648fd83240aSStephen Rothwell { 1649fd83240aSStephen Rothwell return 0; 1650fd83240aSStephen Rothwell } 165125520d55SMartin K. Petersen static inline void blk_integrity_register(struct gendisk *d, 1652fd83240aSStephen Rothwell struct blk_integrity *b) 1653fd83240aSStephen Rothwell { 1654fd83240aSStephen Rothwell } 1655fd83240aSStephen Rothwell static inline void blk_integrity_unregister(struct gendisk *d) 1656fd83240aSStephen Rothwell { 1657fd83240aSStephen Rothwell } 1658fd83240aSStephen Rothwell static inline void blk_queue_max_integrity_segments(struct request_queue *q, 1659fd83240aSStephen Rothwell unsigned int segs) 1660fd83240aSStephen Rothwell { 1661fd83240aSStephen Rothwell } 1662af2c68feSBart Van Assche static inline unsigned short queue_max_integrity_segments(const struct request_queue *q) 1663fd83240aSStephen Rothwell { 1664fd83240aSStephen Rothwell return 0; 1665fd83240aSStephen Rothwell } 16664eaf99beSMartin K. Petersen static inline bool blk_integrity_merge_rq(struct request_queue *rq, 1667fd83240aSStephen Rothwell struct request *r1, 1668fd83240aSStephen Rothwell struct request *r2) 1669fd83240aSStephen Rothwell { 1670cb1a5ab6SMartin K. Petersen return true; 1671fd83240aSStephen Rothwell } 16724eaf99beSMartin K. Petersen static inline bool blk_integrity_merge_bio(struct request_queue *rq, 1673fd83240aSStephen Rothwell struct request *r, 1674fd83240aSStephen Rothwell struct bio *b) 1675fd83240aSStephen Rothwell { 1676cb1a5ab6SMartin K. Petersen return true; 1677fd83240aSStephen Rothwell } 167825520d55SMartin K. Petersen 1679359f6427SGreg Edwards static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi, 1680359f6427SGreg Edwards unsigned int sectors) 1681359f6427SGreg Edwards { 1682359f6427SGreg Edwards return 0; 1683359f6427SGreg Edwards } 1684359f6427SGreg Edwards 1685359f6427SGreg Edwards static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi, 1686359f6427SGreg Edwards unsigned int sectors) 1687359f6427SGreg Edwards { 1688359f6427SGreg Edwards return 0; 1689359f6427SGreg Edwards } 1690359f6427SGreg Edwards 16912a876f5eSChristoph Hellwig static inline struct bio_vec *rq_integrity_vec(struct request *rq) 16922a876f5eSChristoph Hellwig { 16932a876f5eSChristoph Hellwig return NULL; 16942a876f5eSChristoph Hellwig } 16952a876f5eSChristoph Hellwig 16967ba1ba12SMartin K. Petersen #endif /* CONFIG_BLK_DEV_INTEGRITY */ 16977ba1ba12SMartin K. Petersen 169808f85851SAl Viro struct block_device_operations { 1699d4430d62SAl Viro int (*open) (struct block_device *, fmode_t); 1700db2a144bSAl Viro void (*release) (struct gendisk *, fmode_t); 17013f289dcbSTejun Heo int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int); 1702d4430d62SAl Viro int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1703d4430d62SAl Viro int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 170477ea887eSTejun Heo unsigned int (*check_events) (struct gendisk *disk, 170577ea887eSTejun Heo unsigned int clearing); 170677ea887eSTejun Heo /* ->media_changed() is DEPRECATED, use ->check_events() instead */ 170708f85851SAl Viro int (*media_changed) (struct gendisk *); 1708c3e33e04STejun Heo void (*unlock_native_capacity) (struct gendisk *); 170908f85851SAl Viro int (*revalidate_disk) (struct gendisk *); 171008f85851SAl Viro int (*getgeo)(struct block_device *, struct hd_geometry *); 1711b3a27d05SNitin Gupta /* this callback is with swap_lock and sometimes page table lock held */ 1712b3a27d05SNitin Gupta void (*swap_slot_free_notify) (struct block_device *, unsigned long); 1713e76239a3SChristoph Hellwig int (*report_zones)(struct gendisk *, sector_t sector, 1714bd976e52SDamien Le Moal struct blk_zone *zones, unsigned int *nr_zones); 171508f85851SAl Viro struct module *owner; 1716bbd3e064SChristoph Hellwig const struct pr_ops *pr_ops; 171708f85851SAl Viro }; 171808f85851SAl Viro 1719633a08b8SAl Viro extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, 1720633a08b8SAl Viro unsigned long); 172147a191fdSMatthew Wilcox extern int bdev_read_page(struct block_device *, sector_t, struct page *); 172247a191fdSMatthew Wilcox extern int bdev_write_page(struct block_device *, sector_t, struct page *, 172347a191fdSMatthew Wilcox struct writeback_control *); 17246cc77e9cSChristoph Hellwig 17256cc77e9cSChristoph Hellwig #ifdef CONFIG_BLK_DEV_ZONED 17266cc77e9cSChristoph Hellwig bool blk_req_needs_zone_write_lock(struct request *rq); 17276cc77e9cSChristoph Hellwig void __blk_req_zone_write_lock(struct request *rq); 17286cc77e9cSChristoph Hellwig void __blk_req_zone_write_unlock(struct request *rq); 17296cc77e9cSChristoph Hellwig 17306cc77e9cSChristoph Hellwig static inline void blk_req_zone_write_lock(struct request *rq) 17316cc77e9cSChristoph Hellwig { 17326cc77e9cSChristoph Hellwig if (blk_req_needs_zone_write_lock(rq)) 17336cc77e9cSChristoph Hellwig __blk_req_zone_write_lock(rq); 17346cc77e9cSChristoph Hellwig } 17356cc77e9cSChristoph Hellwig 17366cc77e9cSChristoph Hellwig static inline void blk_req_zone_write_unlock(struct request *rq) 17376cc77e9cSChristoph Hellwig { 17386cc77e9cSChristoph Hellwig if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED) 17396cc77e9cSChristoph Hellwig __blk_req_zone_write_unlock(rq); 17406cc77e9cSChristoph Hellwig } 17416cc77e9cSChristoph Hellwig 17426cc77e9cSChristoph Hellwig static inline bool blk_req_zone_is_write_locked(struct request *rq) 17436cc77e9cSChristoph Hellwig { 17446cc77e9cSChristoph Hellwig return rq->q->seq_zones_wlock && 17456cc77e9cSChristoph Hellwig test_bit(blk_rq_zone_no(rq), rq->q->seq_zones_wlock); 17466cc77e9cSChristoph Hellwig } 17476cc77e9cSChristoph Hellwig 17486cc77e9cSChristoph Hellwig static inline bool blk_req_can_dispatch_to_zone(struct request *rq) 17496cc77e9cSChristoph Hellwig { 17506cc77e9cSChristoph Hellwig if (!blk_req_needs_zone_write_lock(rq)) 17516cc77e9cSChristoph Hellwig return true; 17526cc77e9cSChristoph Hellwig return !blk_req_zone_is_write_locked(rq); 17536cc77e9cSChristoph Hellwig } 17546cc77e9cSChristoph Hellwig #else 17556cc77e9cSChristoph Hellwig static inline bool blk_req_needs_zone_write_lock(struct request *rq) 17566cc77e9cSChristoph Hellwig { 17576cc77e9cSChristoph Hellwig return false; 17586cc77e9cSChristoph Hellwig } 17596cc77e9cSChristoph Hellwig 17606cc77e9cSChristoph Hellwig static inline void blk_req_zone_write_lock(struct request *rq) 17616cc77e9cSChristoph Hellwig { 17626cc77e9cSChristoph Hellwig } 17636cc77e9cSChristoph Hellwig 17646cc77e9cSChristoph Hellwig static inline void blk_req_zone_write_unlock(struct request *rq) 17656cc77e9cSChristoph Hellwig { 17666cc77e9cSChristoph Hellwig } 17676cc77e9cSChristoph Hellwig static inline bool blk_req_zone_is_write_locked(struct request *rq) 17686cc77e9cSChristoph Hellwig { 17696cc77e9cSChristoph Hellwig return false; 17706cc77e9cSChristoph Hellwig } 17716cc77e9cSChristoph Hellwig 17726cc77e9cSChristoph Hellwig static inline bool blk_req_can_dispatch_to_zone(struct request *rq) 17736cc77e9cSChristoph Hellwig { 17746cc77e9cSChristoph Hellwig return true; 17756cc77e9cSChristoph Hellwig } 17766cc77e9cSChristoph Hellwig #endif /* CONFIG_BLK_DEV_ZONED */ 17776cc77e9cSChristoph Hellwig 17789361401eSDavid Howells #else /* CONFIG_BLOCK */ 1779ac13a829SFabian Frederick 1780ac13a829SFabian Frederick struct block_device; 1781ac13a829SFabian Frederick 17829361401eSDavid Howells /* 17839361401eSDavid Howells * stubs for when the block layer is configured out 17849361401eSDavid Howells */ 17859361401eSDavid Howells #define buffer_heads_over_limit 0 17869361401eSDavid Howells 17879361401eSDavid Howells static inline long nr_blockdev_pages(void) 17889361401eSDavid Howells { 17899361401eSDavid Howells return 0; 17909361401eSDavid Howells } 17919361401eSDavid Howells 17921f940bdfSJens Axboe struct blk_plug { 17931f940bdfSJens Axboe }; 17941f940bdfSJens Axboe 17951f940bdfSJens Axboe static inline void blk_start_plug(struct blk_plug *plug) 179673c10101SJens Axboe { 179773c10101SJens Axboe } 179873c10101SJens Axboe 17991f940bdfSJens Axboe static inline void blk_finish_plug(struct blk_plug *plug) 180073c10101SJens Axboe { 180173c10101SJens Axboe } 180273c10101SJens Axboe 18031f940bdfSJens Axboe static inline void blk_flush_plug(struct task_struct *task) 180473c10101SJens Axboe { 180573c10101SJens Axboe } 180673c10101SJens Axboe 1807a237c1c5SJens Axboe static inline void blk_schedule_flush_plug(struct task_struct *task) 1808a237c1c5SJens Axboe { 1809a237c1c5SJens Axboe } 1810a237c1c5SJens Axboe 1811a237c1c5SJens Axboe 181273c10101SJens Axboe static inline bool blk_needs_flush_plug(struct task_struct *tsk) 181373c10101SJens Axboe { 181473c10101SJens Axboe return false; 181573c10101SJens Axboe } 181673c10101SJens Axboe 1817ac13a829SFabian Frederick static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, 1818ac13a829SFabian Frederick sector_t *error_sector) 1819ac13a829SFabian Frederick { 1820ac13a829SFabian Frederick return 0; 1821ac13a829SFabian Frederick } 1822ac13a829SFabian Frederick 18239361401eSDavid Howells #endif /* CONFIG_BLOCK */ 18249361401eSDavid Howells 18250619317fSJens Axboe static inline void blk_wake_io_task(struct task_struct *waiter) 18260619317fSJens Axboe { 18270619317fSJens Axboe /* 18280619317fSJens Axboe * If we're polling, the task itself is doing the completions. For 18290619317fSJens Axboe * that case, we don't need to signal a wakeup, it's enough to just 18300619317fSJens Axboe * mark us as RUNNING. 18310619317fSJens Axboe */ 18320619317fSJens Axboe if (waiter == current) 18330619317fSJens Axboe __set_current_state(TASK_RUNNING); 18340619317fSJens Axboe else 18350619317fSJens Axboe wake_up_process(waiter); 18360619317fSJens Axboe } 18370619317fSJens Axboe 18381da177e4SLinus Torvalds #endif 1839