1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 21da177e4SLinus Torvalds #ifndef _LINUX_BLKDEV_H 31da177e4SLinus Torvalds #define _LINUX_BLKDEV_H 41da177e4SLinus Torvalds 585fd0bc9SRussell King #include <linux/sched.h> 6e6017571SIngo Molnar #include <linux/sched/clock.h> 785fd0bc9SRussell King 8f5ff8422SJens Axboe #ifdef CONFIG_BLOCK 9f5ff8422SJens Axboe 101da177e4SLinus Torvalds #include <linux/major.h> 111da177e4SLinus Torvalds #include <linux/genhd.h> 121da177e4SLinus Torvalds #include <linux/list.h> 13320ae51fSJens Axboe #include <linux/llist.h> 141da177e4SLinus Torvalds #include <linux/timer.h> 151da177e4SLinus Torvalds #include <linux/workqueue.h> 161da177e4SLinus Torvalds #include <linux/pagemap.h> 1766114cadSTejun Heo #include <linux/backing-dev-defs.h> 181da177e4SLinus Torvalds #include <linux/wait.h> 191da177e4SLinus Torvalds #include <linux/mempool.h> 2034c0fd54SDan Williams #include <linux/pfn.h> 211da177e4SLinus Torvalds #include <linux/bio.h> 221da177e4SLinus Torvalds #include <linux/stringify.h> 233e6053d7SHugh Dickins #include <linux/gfp.h> 24d351af01SFUJITA Tomonori #include <linux/bsg.h> 25c7c22e4dSJens Axboe #include <linux/smp.h> 26548bc8e1STejun Heo #include <linux/rcupdate.h> 27add703fdSTejun Heo #include <linux/percpu-refcount.h> 2884be456fSChristoph Hellwig #include <linux/scatterlist.h> 296a0cb1bcSHannes Reinecke #include <linux/blkzoned.h> 301da177e4SLinus Torvalds 31de477254SPaul Gortmaker struct module; 3221b2f0c8SChristoph Hellwig struct scsi_ioctl_command; 3321b2f0c8SChristoph Hellwig 341da177e4SLinus Torvalds struct request_queue; 351da177e4SLinus Torvalds struct elevator_queue; 362056a782SJens Axboe struct blk_trace; 373d6392cfSJens Axboe struct request; 383d6392cfSJens Axboe struct sg_io_hdr; 39aa387cc8SMike Christie struct bsg_job; 403c798398STejun Heo struct blkcg_gq; 417c94e1c1SMing Lei struct blk_flush_queue; 42bbd3e064SChristoph Hellwig struct pr_ops; 43a7905043SJosef Bacik struct rq_qos; 4434dbad5dSOmar Sandoval struct blk_queue_stats; 4534dbad5dSOmar Sandoval struct blk_stat_callback; 461b262839SSatya Tangirala struct blk_keyslot_manager; 471da177e4SLinus Torvalds 481da177e4SLinus Torvalds #define BLKDEV_MIN_RQ 4 491da177e4SLinus Torvalds #define BLKDEV_MAX_RQ 128 /* Default maximum */ 501da177e4SLinus Torvalds 51096392e0SMinwoo Im /* Must be consistent with blk_mq_poll_stats_bkt() */ 520206319fSStephen Bates #define BLK_MQ_POLL_STATS_BKTS 16 530206319fSStephen Bates 5429ece8b4SYufen Yu /* Doing classic polling */ 5529ece8b4SYufen Yu #define BLK_MQ_POLL_CLASSIC -1 5629ece8b4SYufen Yu 578bd435b3STejun Heo /* 588bd435b3STejun Heo * Maximum number of blkcg policies allowed to be registered concurrently. 598bd435b3STejun Heo * Defined here to simplify include dependency. 608bd435b3STejun Heo */ 6101c5f85aSJens Axboe #define BLKCG_MAX_POLS 5 628bd435b3STejun Heo 632a842acaSChristoph Hellwig typedef void (rq_end_io_fn)(struct request *, blk_status_t); 641da177e4SLinus Torvalds 654aff5e23SJens Axboe /* 66e8064021SChristoph Hellwig * request flags */ 67e8064021SChristoph Hellwig typedef __u32 __bitwise req_flags_t; 68e8064021SChristoph Hellwig 69e8064021SChristoph Hellwig /* elevator knows about this request */ 70e8064021SChristoph Hellwig #define RQF_SORTED ((__force req_flags_t)(1 << 0)) 71e8064021SChristoph Hellwig /* drive already may have started this one */ 72e8064021SChristoph Hellwig #define RQF_STARTED ((__force req_flags_t)(1 << 1)) 73e8064021SChristoph Hellwig /* may not be passed by ioscheduler */ 74e8064021SChristoph Hellwig #define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3)) 75e8064021SChristoph Hellwig /* request for flush sequence */ 76e8064021SChristoph Hellwig #define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4)) 77e8064021SChristoph Hellwig /* merge of different types, fail separately */ 78e8064021SChristoph Hellwig #define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5)) 79e8064021SChristoph Hellwig /* track inflight for MQ */ 80e8064021SChristoph Hellwig #define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6)) 81e8064021SChristoph Hellwig /* don't call prep for this one */ 82e8064021SChristoph Hellwig #define RQF_DONTPREP ((__force req_flags_t)(1 << 7)) 83e8064021SChristoph Hellwig /* set for "ide_preempt" requests and also for requests for which the SCSI 84e8064021SChristoph Hellwig "quiesce" state must be ignored. */ 85e8064021SChristoph Hellwig #define RQF_PREEMPT ((__force req_flags_t)(1 << 8)) 86e8064021SChristoph Hellwig /* vaguely specified driver internal error. Ignored by the block layer */ 87e8064021SChristoph Hellwig #define RQF_FAILED ((__force req_flags_t)(1 << 10)) 88e8064021SChristoph Hellwig /* don't warn about errors */ 89e8064021SChristoph Hellwig #define RQF_QUIET ((__force req_flags_t)(1 << 11)) 90e8064021SChristoph Hellwig /* elevator private data attached */ 91e8064021SChristoph Hellwig #define RQF_ELVPRIV ((__force req_flags_t)(1 << 12)) 924822e902SKonstantin Khlebnikov /* account into disk and partition IO statistics */ 93e8064021SChristoph Hellwig #define RQF_IO_STAT ((__force req_flags_t)(1 << 13)) 94e8064021SChristoph Hellwig /* request came from our alloc pool */ 95e8064021SChristoph Hellwig #define RQF_ALLOCED ((__force req_flags_t)(1 << 14)) 96e8064021SChristoph Hellwig /* runtime pm request */ 97e8064021SChristoph Hellwig #define RQF_PM ((__force req_flags_t)(1 << 15)) 98e8064021SChristoph Hellwig /* on IO scheduler merge hash */ 99e8064021SChristoph Hellwig #define RQF_HASHED ((__force req_flags_t)(1 << 16)) 1004822e902SKonstantin Khlebnikov /* track IO completion time */ 101cf43e6beSJens Axboe #define RQF_STATS ((__force req_flags_t)(1 << 17)) 102f9d03f96SChristoph Hellwig /* Look at ->special_vec for the actual data payload instead of the 103f9d03f96SChristoph Hellwig bio chain. */ 104f9d03f96SChristoph Hellwig #define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18)) 1056cc77e9cSChristoph Hellwig /* The per-zone write lock is held for this request */ 1066cc77e9cSChristoph Hellwig #define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19)) 10776a86f9dSJens Axboe /* already slept for hybrid poll */ 10812f5b931SKeith Busch #define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 20)) 109da661267SChristoph Hellwig /* ->timeout has been called, don't expire again */ 110da661267SChristoph Hellwig #define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21)) 111e8064021SChristoph Hellwig 112e8064021SChristoph Hellwig /* flags that prevent us from merging requests: */ 113e8064021SChristoph Hellwig #define RQF_NOMERGE_FLAGS \ 114f9d03f96SChristoph Hellwig (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD) 115e8064021SChristoph Hellwig 1161da177e4SLinus Torvalds /* 11712f5b931SKeith Busch * Request state for blk-mq. 11812f5b931SKeith Busch */ 11912f5b931SKeith Busch enum mq_rq_state { 12012f5b931SKeith Busch MQ_RQ_IDLE = 0, 12112f5b931SKeith Busch MQ_RQ_IN_FLIGHT = 1, 12212f5b931SKeith Busch MQ_RQ_COMPLETE = 2, 12312f5b931SKeith Busch }; 12412f5b931SKeith Busch 12512f5b931SKeith Busch /* 126af76e555SChristoph Hellwig * Try to put the fields that are referenced together in the same cacheline. 127af76e555SChristoph Hellwig * 128af76e555SChristoph Hellwig * If you modify this structure, make sure to update blk_rq_init() and 129af76e555SChristoph Hellwig * especially blk_mq_rq_ctx_init() to take care of the added fields. 1301da177e4SLinus Torvalds */ 1311da177e4SLinus Torvalds struct request { 132165125e1SJens Axboe struct request_queue *q; 133320ae51fSJens Axboe struct blk_mq_ctx *mq_ctx; 134ea4f995eSJens Axboe struct blk_mq_hw_ctx *mq_hctx; 135e6a1c874SJens Axboe 136ef295ecfSChristoph Hellwig unsigned int cmd_flags; /* op and common flags */ 137e8064021SChristoph Hellwig req_flags_t rq_flags; 138d486f1f2SJens Axboe 1392f578aafSMinwoo Im int tag; 140d486f1f2SJens Axboe int internal_tag; 141d486f1f2SJens Axboe 142a2dec7b3STejun Heo /* the following two fields are internal, NEVER access directly */ 143a2dec7b3STejun Heo unsigned int __data_len; /* total data len */ 144181fdde3SRichard Kennedy sector_t __sector; /* sector cursor */ 1451da177e4SLinus Torvalds 1461da177e4SLinus Torvalds struct bio *bio; 1471da177e4SLinus Torvalds struct bio *biotail; 1481da177e4SLinus Torvalds 1497c3fb70fSJens Axboe struct list_head queuelist; 1507c3fb70fSJens Axboe 151360f92c2SJens Axboe /* 152360f92c2SJens Axboe * The hash is used inside the scheduler, and killed once the 153360f92c2SJens Axboe * request reaches the dispatch list. The ipi_list is only used 154360f92c2SJens Axboe * to queue the request for softirq completion, which is long 155360f92c2SJens Axboe * after the request has been unhashed (and even removed from 156360f92c2SJens Axboe * the dispatch list). 157360f92c2SJens Axboe */ 158360f92c2SJens Axboe union { 1599817064bSJens Axboe struct hlist_node hash; /* merge hash */ 160360f92c2SJens Axboe struct list_head ipi_list; 161360f92c2SJens Axboe }; 162360f92c2SJens Axboe 163e6a1c874SJens Axboe /* 164e6a1c874SJens Axboe * The rb_node is only used inside the io scheduler, requests 165e6a1c874SJens Axboe * are pruned when moved to the dispatch queue. So let the 166c186794dSMike Snitzer * completion_data share space with the rb_node. 167e6a1c874SJens Axboe */ 168e6a1c874SJens Axboe union { 1692e662b65SJens Axboe struct rb_node rb_node; /* sort/lookup */ 170f9d03f96SChristoph Hellwig struct bio_vec special_vec; 171c186794dSMike Snitzer void *completion_data; 172e26738e0SChristoph Hellwig int error_count; /* for legacy drivers, don't use */ 173c186794dSMike Snitzer }; 174c186794dSMike Snitzer 175c186794dSMike Snitzer /* 176c186794dSMike Snitzer * Three pointers are available for the IO schedulers, if they need 177c186794dSMike Snitzer * more they have to dynamically allocate it. Flush requests are 178c186794dSMike Snitzer * never put on the IO scheduler. So let the flush fields share 179a612fddfSTejun Heo * space with the elevator data. 180c186794dSMike Snitzer */ 181c186794dSMike Snitzer union { 182a612fddfSTejun Heo struct { 183a612fddfSTejun Heo struct io_cq *icq; 184a612fddfSTejun Heo void *priv[2]; 185a612fddfSTejun Heo } elv; 186a612fddfSTejun Heo 187ae1b1539STejun Heo struct { 188ae1b1539STejun Heo unsigned int seq; 189ae1b1539STejun Heo struct list_head list; 1904853abaaSJeff Moyer rq_end_io_fn *saved_end_io; 191ae1b1539STejun Heo } flush; 192e6a1c874SJens Axboe }; 1939817064bSJens Axboe 1948f34ee75SJens Axboe struct gendisk *rq_disk; 19509e099d4SJerome Marchand struct hd_struct *part; 1966f816b4bSTejun Heo #ifdef CONFIG_BLK_RQ_ALLOC_TIME 1976f816b4bSTejun Heo /* Time that the first bio started allocating this request. */ 1986f816b4bSTejun Heo u64 alloc_time_ns; 1996f816b4bSTejun Heo #endif 2006f816b4bSTejun Heo /* Time that this request was allocated for this IO. */ 201522a7775SOmar Sandoval u64 start_time_ns; 202544ccc8dSOmar Sandoval /* Time that I/O was submitted to the device. */ 203544ccc8dSOmar Sandoval u64 io_start_time_ns; 204544ccc8dSOmar Sandoval 205544ccc8dSOmar Sandoval #ifdef CONFIG_BLK_WBT 206544ccc8dSOmar Sandoval unsigned short wbt_flags; 207544ccc8dSOmar Sandoval #endif 2083d244306SHou Tao /* 2093d244306SHou Tao * rq sectors used for blk stats. It has the same value 2103d244306SHou Tao * with blk_rq_sectors(rq), except that it never be zeroed 2113d244306SHou Tao * by completion. 2123d244306SHou Tao */ 2133d244306SHou Tao unsigned short stats_sectors; 214544ccc8dSOmar Sandoval 215544ccc8dSOmar Sandoval /* 216544ccc8dSOmar Sandoval * Number of scatter-gather DMA addr+len pairs after 2171da177e4SLinus Torvalds * physical address coalescing is performed. 2181da177e4SLinus Torvalds */ 2191da177e4SLinus Torvalds unsigned short nr_phys_segments; 2207c3fb70fSJens Axboe 22113f05c8dSMartin K. Petersen #if defined(CONFIG_BLK_DEV_INTEGRITY) 22213f05c8dSMartin K. Petersen unsigned short nr_integrity_segments; 22313f05c8dSMartin K. Petersen #endif 2241da177e4SLinus Torvalds 225*a892c8d5SSatya Tangirala #ifdef CONFIG_BLK_INLINE_ENCRYPTION 226*a892c8d5SSatya Tangirala struct bio_crypt_ctx *crypt_ctx; 227*a892c8d5SSatya Tangirala struct blk_ksm_keyslot *crypt_keyslot; 228*a892c8d5SSatya Tangirala #endif 229*a892c8d5SSatya Tangirala 2307c3fb70fSJens Axboe unsigned short write_hint; 2318f34ee75SJens Axboe unsigned short ioprio; 2328f34ee75SJens Axboe 23312f5b931SKeith Busch enum mq_rq_state state; 23412f5b931SKeith Busch refcount_t ref; 2351d9bd516STejun Heo 2360b7576d8SJens Axboe unsigned int timeout; 237079076b3SChristoph Hellwig unsigned long deadline; 2380a72e7f4SJens Axboe 2397c3fb70fSJens Axboe union { 2400a4b6e2fSLinus Torvalds struct __call_single_data csd; 2417c3fb70fSJens Axboe u64 fifo_time; 2427c3fb70fSJens Axboe }; 2437c3fb70fSJens Axboe 2441da177e4SLinus Torvalds /* 245c00895abSJens Axboe * completion callback. 2461da177e4SLinus Torvalds */ 2471da177e4SLinus Torvalds rq_end_io_fn *end_io; 2481da177e4SLinus Torvalds void *end_io_data; 2491da177e4SLinus Torvalds }; 2501da177e4SLinus Torvalds 25114cb0dc6SMing Lei static inline bool blk_op_is_scsi(unsigned int op) 25214cb0dc6SMing Lei { 25314cb0dc6SMing Lei return op == REQ_OP_SCSI_IN || op == REQ_OP_SCSI_OUT; 25414cb0dc6SMing Lei } 25514cb0dc6SMing Lei 25614cb0dc6SMing Lei static inline bool blk_op_is_private(unsigned int op) 25714cb0dc6SMing Lei { 25814cb0dc6SMing Lei return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT; 25914cb0dc6SMing Lei } 26014cb0dc6SMing Lei 261aebf526bSChristoph Hellwig static inline bool blk_rq_is_scsi(struct request *rq) 262aebf526bSChristoph Hellwig { 26314cb0dc6SMing Lei return blk_op_is_scsi(req_op(rq)); 264aebf526bSChristoph Hellwig } 265aebf526bSChristoph Hellwig 266aebf526bSChristoph Hellwig static inline bool blk_rq_is_private(struct request *rq) 267aebf526bSChristoph Hellwig { 26814cb0dc6SMing Lei return blk_op_is_private(req_op(rq)); 269aebf526bSChristoph Hellwig } 270aebf526bSChristoph Hellwig 27157292b58SChristoph Hellwig static inline bool blk_rq_is_passthrough(struct request *rq) 27257292b58SChristoph Hellwig { 273aebf526bSChristoph Hellwig return blk_rq_is_scsi(rq) || blk_rq_is_private(rq); 27457292b58SChristoph Hellwig } 27557292b58SChristoph Hellwig 27614cb0dc6SMing Lei static inline bool bio_is_passthrough(struct bio *bio) 27714cb0dc6SMing Lei { 27814cb0dc6SMing Lei unsigned op = bio_op(bio); 27914cb0dc6SMing Lei 28014cb0dc6SMing Lei return blk_op_is_scsi(op) || blk_op_is_private(op); 28114cb0dc6SMing Lei } 28214cb0dc6SMing Lei 283766ca442SFernando Luis Vázquez Cao static inline unsigned short req_get_ioprio(struct request *req) 284766ca442SFernando Luis Vázquez Cao { 285766ca442SFernando Luis Vázquez Cao return req->ioprio; 286766ca442SFernando Luis Vázquez Cao } 287766ca442SFernando Luis Vázquez Cao 2881da177e4SLinus Torvalds #include <linux/elevator.h> 2891da177e4SLinus Torvalds 290320ae51fSJens Axboe struct blk_queue_ctx; 291320ae51fSJens Axboe 292dece1635SJens Axboe typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio); 2931da177e4SLinus Torvalds 2941da177e4SLinus Torvalds struct bio_vec; 2951da177e4SLinus Torvalds 296242f9dcbSJens Axboe enum blk_eh_timer_return { 29788b0cfadSChristoph Hellwig BLK_EH_DONE, /* drivers has completed the command */ 29888b0cfadSChristoph Hellwig BLK_EH_RESET_TIMER, /* reset timer and try again */ 299242f9dcbSJens Axboe }; 300242f9dcbSJens Axboe 3011da177e4SLinus Torvalds enum blk_queue_state { 3021da177e4SLinus Torvalds Queue_down, 3031da177e4SLinus Torvalds Queue_up, 3041da177e4SLinus Torvalds }; 3051da177e4SLinus Torvalds 306ee1b6f7aSShaohua Li #define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */ 307ee1b6f7aSShaohua Li #define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */ 3081da177e4SLinus Torvalds 309abf54393SFUJITA Tomonori #define BLK_SCSI_MAX_CMDS (256) 310abf54393SFUJITA Tomonori #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) 311abf54393SFUJITA Tomonori 312797476b8SDamien Le Moal /* 313797476b8SDamien Le Moal * Zoned block device models (zoned limit). 314797476b8SDamien Le Moal */ 315797476b8SDamien Le Moal enum blk_zoned_model { 316797476b8SDamien Le Moal BLK_ZONED_NONE, /* Regular block device */ 317797476b8SDamien Le Moal BLK_ZONED_HA, /* Host-aware zoned block device */ 318797476b8SDamien Le Moal BLK_ZONED_HM, /* Host-managed zoned block device */ 319797476b8SDamien Le Moal }; 320797476b8SDamien Le Moal 321025146e1SMartin K. Petersen struct queue_limits { 322025146e1SMartin K. Petersen unsigned long bounce_pfn; 323025146e1SMartin K. Petersen unsigned long seg_boundary_mask; 32403100aadSKeith Busch unsigned long virt_boundary_mask; 325025146e1SMartin K. Petersen 326025146e1SMartin K. Petersen unsigned int max_hw_sectors; 327ca369d51SMartin K. Petersen unsigned int max_dev_sectors; 328762380adSJens Axboe unsigned int chunk_sectors; 329025146e1SMartin K. Petersen unsigned int max_sectors; 330025146e1SMartin K. Petersen unsigned int max_segment_size; 331c72758f3SMartin K. Petersen unsigned int physical_block_size; 332ad6bf88aSMikulas Patocka unsigned int logical_block_size; 333c72758f3SMartin K. Petersen unsigned int alignment_offset; 334c72758f3SMartin K. Petersen unsigned int io_min; 335c72758f3SMartin K. Petersen unsigned int io_opt; 33667efc925SChristoph Hellwig unsigned int max_discard_sectors; 3370034af03SJens Axboe unsigned int max_hw_discard_sectors; 3384363ac7cSMartin K. Petersen unsigned int max_write_same_sectors; 339a6f0788eSChaitanya Kulkarni unsigned int max_write_zeroes_sectors; 3400512a75bSKeith Busch unsigned int max_zone_append_sectors; 34186b37281SMartin K. Petersen unsigned int discard_granularity; 34286b37281SMartin K. Petersen unsigned int discard_alignment; 343025146e1SMartin K. Petersen 3448a78362cSMartin K. Petersen unsigned short max_segments; 34513f05c8dSMartin K. Petersen unsigned short max_integrity_segments; 3461e739730SChristoph Hellwig unsigned short max_discard_segments; 347025146e1SMartin K. Petersen 348c72758f3SMartin K. Petersen unsigned char misaligned; 34986b37281SMartin K. Petersen unsigned char discard_misaligned; 350c78afc62SKent Overstreet unsigned char raid_partial_stripes_expensive; 351797476b8SDamien Le Moal enum blk_zoned_model zoned; 352025146e1SMartin K. Petersen }; 353025146e1SMartin K. Petersen 354d4100351SChristoph Hellwig typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx, 355d4100351SChristoph Hellwig void *data); 356d4100351SChristoph Hellwig 3576a0cb1bcSHannes Reinecke #ifdef CONFIG_BLK_DEV_ZONED 3586a0cb1bcSHannes Reinecke 359d4100351SChristoph Hellwig #define BLK_ALL_ZONES ((unsigned int)-1) 360d4100351SChristoph Hellwig int blkdev_report_zones(struct block_device *bdev, sector_t sector, 361d4100351SChristoph Hellwig unsigned int nr_zones, report_zones_cb cb, void *data); 3629b38bb4bSChristoph Hellwig unsigned int blkdev_nr_zones(struct gendisk *disk); 3636c1b1da5SAjay Joshi extern int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op, 3646c1b1da5SAjay Joshi sector_t sectors, sector_t nr_sectors, 3656c1b1da5SAjay Joshi gfp_t gfp_mask); 366e732671aSDamien Le Moal int blk_revalidate_disk_zones(struct gendisk *disk, 367e732671aSDamien Le Moal void (*update_driver_data)(struct gendisk *disk)); 3686a0cb1bcSHannes Reinecke 3693ed05a98SShaun Tancheff extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, 3703ed05a98SShaun Tancheff unsigned int cmd, unsigned long arg); 371e876df1fSAjay Joshi extern int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode, 3723ed05a98SShaun Tancheff unsigned int cmd, unsigned long arg); 3733ed05a98SShaun Tancheff 3743ed05a98SShaun Tancheff #else /* CONFIG_BLK_DEV_ZONED */ 3753ed05a98SShaun Tancheff 3769b38bb4bSChristoph Hellwig static inline unsigned int blkdev_nr_zones(struct gendisk *disk) 377a91e1380SDamien Le Moal { 378a91e1380SDamien Le Moal return 0; 379a91e1380SDamien Le Moal } 380bf505456SDamien Le Moal 3813ed05a98SShaun Tancheff static inline int blkdev_report_zones_ioctl(struct block_device *bdev, 3823ed05a98SShaun Tancheff fmode_t mode, unsigned int cmd, 3833ed05a98SShaun Tancheff unsigned long arg) 3843ed05a98SShaun Tancheff { 3853ed05a98SShaun Tancheff return -ENOTTY; 3863ed05a98SShaun Tancheff } 3873ed05a98SShaun Tancheff 388e876df1fSAjay Joshi static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev, 3893ed05a98SShaun Tancheff fmode_t mode, unsigned int cmd, 3903ed05a98SShaun Tancheff unsigned long arg) 3913ed05a98SShaun Tancheff { 3923ed05a98SShaun Tancheff return -ENOTTY; 3933ed05a98SShaun Tancheff } 3943ed05a98SShaun Tancheff 3956a0cb1bcSHannes Reinecke #endif /* CONFIG_BLK_DEV_ZONED */ 3966a0cb1bcSHannes Reinecke 397d7b76301SRichard Kennedy struct request_queue { 3981da177e4SLinus Torvalds struct request *last_merge; 399b374d18aSJens Axboe struct elevator_queue *elevator; 4001da177e4SLinus Torvalds 40134dbad5dSOmar Sandoval struct blk_queue_stats *stats; 402a7905043SJosef Bacik struct rq_qos *rq_qos; 40387760e5eSJens Axboe 4041da177e4SLinus Torvalds make_request_fn *make_request_fn; 4051da177e4SLinus Torvalds 406f8a5b122SJens Axboe const struct blk_mq_ops *mq_ops; 407320ae51fSJens Axboe 408320ae51fSJens Axboe /* sw queues */ 409e6cdb092SMing Lei struct blk_mq_ctx __percpu *queue_ctx; 410320ae51fSJens Axboe 411d278d4a8SJens Axboe unsigned int queue_depth; 412d278d4a8SJens Axboe 413320ae51fSJens Axboe /* hw dispatch queues */ 414320ae51fSJens Axboe struct blk_mq_hw_ctx **queue_hw_ctx; 415320ae51fSJens Axboe unsigned int nr_hw_queues; 416320ae51fSJens Axboe 417dc3b17ccSJan Kara struct backing_dev_info *backing_dev_info; 4181da177e4SLinus Torvalds 4191da177e4SLinus Torvalds /* 4201da177e4SLinus Torvalds * The queue owner gets to use this for whatever they like. 4211da177e4SLinus Torvalds * ll_rw_blk doesn't touch it. 4221da177e4SLinus Torvalds */ 4231da177e4SLinus Torvalds void *queuedata; 4241da177e4SLinus Torvalds 4251da177e4SLinus Torvalds /* 4261da177e4SLinus Torvalds * various queue flags, see QUEUE_* below 4271da177e4SLinus Torvalds */ 4281da177e4SLinus Torvalds unsigned long queue_flags; 429cd84a62eSBart Van Assche /* 430cd84a62eSBart Van Assche * Number of contexts that have called blk_set_pm_only(). If this 431cd84a62eSBart Van Assche * counter is above zero then only RQF_PM and RQF_PREEMPT requests are 432cd84a62eSBart Van Assche * processed. 433cd84a62eSBart Van Assche */ 434cd84a62eSBart Van Assche atomic_t pm_only; 4351da177e4SLinus Torvalds 4361da177e4SLinus Torvalds /* 437a73f730dSTejun Heo * ida allocated id for this queue. Used to index queues from 438a73f730dSTejun Heo * ioctx. 439a73f730dSTejun Heo */ 440a73f730dSTejun Heo int id; 441a73f730dSTejun Heo 442a73f730dSTejun Heo /* 443d7b76301SRichard Kennedy * queue needs bounce pages for pages above this limit 444d7b76301SRichard Kennedy */ 445d7b76301SRichard Kennedy gfp_t bounce_gfp; 446d7b76301SRichard Kennedy 4470d945c1fSChristoph Hellwig spinlock_t queue_lock; 4481da177e4SLinus Torvalds 4491da177e4SLinus Torvalds /* 4501da177e4SLinus Torvalds * queue kobject 4511da177e4SLinus Torvalds */ 4521da177e4SLinus Torvalds struct kobject kobj; 4531da177e4SLinus Torvalds 454320ae51fSJens Axboe /* 455320ae51fSJens Axboe * mq queue kobject 456320ae51fSJens Axboe */ 4571db4909eSMing Lei struct kobject *mq_kobj; 458320ae51fSJens Axboe 459ac6fc48cSDan Williams #ifdef CONFIG_BLK_DEV_INTEGRITY 460ac6fc48cSDan Williams struct blk_integrity integrity; 461ac6fc48cSDan Williams #endif /* CONFIG_BLK_DEV_INTEGRITY */ 462ac6fc48cSDan Williams 46347fafbc7SRafael J. Wysocki #ifdef CONFIG_PM 4646c954667SLin Ming struct device *dev; 4656c954667SLin Ming int rpm_status; 4666c954667SLin Ming unsigned int nr_pending; 4676c954667SLin Ming #endif 4686c954667SLin Ming 4691da177e4SLinus Torvalds /* 4701da177e4SLinus Torvalds * queue settings 4711da177e4SLinus Torvalds */ 4721da177e4SLinus Torvalds unsigned long nr_requests; /* Max # of requests */ 4731da177e4SLinus Torvalds 474e3790c7dSTejun Heo unsigned int dma_pad_mask; 4751da177e4SLinus Torvalds unsigned int dma_alignment; 4761da177e4SLinus Torvalds 4771b262839SSatya Tangirala #ifdef CONFIG_BLK_INLINE_ENCRYPTION 4781b262839SSatya Tangirala /* Inline crypto capabilities */ 4791b262839SSatya Tangirala struct blk_keyslot_manager *ksm; 4801b262839SSatya Tangirala #endif 4811b262839SSatya Tangirala 482242f9dcbSJens Axboe unsigned int rq_timeout; 48364f1c21eSJens Axboe int poll_nsec; 48434dbad5dSOmar Sandoval 48534dbad5dSOmar Sandoval struct blk_stat_callback *poll_cb; 4860206319fSStephen Bates struct blk_rq_stat poll_stat[BLK_MQ_POLL_STATS_BKTS]; 48734dbad5dSOmar Sandoval 488242f9dcbSJens Axboe struct timer_list timeout; 489287922ebSChristoph Hellwig struct work_struct timeout_work; 490242f9dcbSJens Axboe 491a612fddfSTejun Heo struct list_head icq_list; 4924eef3049STejun Heo #ifdef CONFIG_BLK_CGROUP 493a2b1693bSTejun Heo DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS); 4943c798398STejun Heo struct blkcg_gq *root_blkg; 49503aa264aSTejun Heo struct list_head blkg_list; 4964eef3049STejun Heo #endif 497a612fddfSTejun Heo 498025146e1SMartin K. Petersen struct queue_limits limits; 499025146e1SMartin K. Petersen 50068c43f13SDamien Le Moal unsigned int required_elevator_features; 50168c43f13SDamien Le Moal 5026a5ac984SBart Van Assche #ifdef CONFIG_BLK_DEV_ZONED 5031da177e4SLinus Torvalds /* 5046cc77e9cSChristoph Hellwig * Zoned block device information for request dispatch control. 5056cc77e9cSChristoph Hellwig * nr_zones is the total number of zones of the device. This is always 506f216fdd7SChristoph Hellwig * 0 for regular block devices. conv_zones_bitmap is a bitmap of nr_zones 507f216fdd7SChristoph Hellwig * bits which indicates if a zone is conventional (bit set) or 508f216fdd7SChristoph Hellwig * sequential (bit clear). seq_zones_wlock is a bitmap of nr_zones 5096cc77e9cSChristoph Hellwig * bits which indicates if a zone is write locked, that is, if a write 5106cc77e9cSChristoph Hellwig * request targeting the zone was dispatched. All three fields are 5116cc77e9cSChristoph Hellwig * initialized by the low level device driver (e.g. scsi/sd.c). 5126cc77e9cSChristoph Hellwig * Stacking drivers (device mappers) may or may not initialize 5136cc77e9cSChristoph Hellwig * these fields. 514ccce20fcSBart Van Assche * 515ccce20fcSBart Van Assche * Reads of this information must be protected with blk_queue_enter() / 516ccce20fcSBart Van Assche * blk_queue_exit(). Modifying this information is only allowed while 517ccce20fcSBart Van Assche * no requests are being processed. See also blk_mq_freeze_queue() and 518ccce20fcSBart Van Assche * blk_mq_unfreeze_queue(). 5196cc77e9cSChristoph Hellwig */ 5206cc77e9cSChristoph Hellwig unsigned int nr_zones; 521f216fdd7SChristoph Hellwig unsigned long *conv_zones_bitmap; 5226cc77e9cSChristoph Hellwig unsigned long *seq_zones_wlock; 5236a5ac984SBart Van Assche #endif /* CONFIG_BLK_DEV_ZONED */ 5246cc77e9cSChristoph Hellwig 5256cc77e9cSChristoph Hellwig /* 5261da177e4SLinus Torvalds * sg stuff 5271da177e4SLinus Torvalds */ 5281da177e4SLinus Torvalds unsigned int sg_timeout; 5291da177e4SLinus Torvalds unsigned int sg_reserved_size; 5301946089aSChristoph Lameter int node; 5316c5c9341SAlexey Dobriyan #ifdef CONFIG_BLK_DEV_IO_TRACE 532c780e86dSJan Kara struct blk_trace __rcu *blk_trace; 5335acb3cc2SWaiman Long struct mutex blk_trace_mutex; 5346c5c9341SAlexey Dobriyan #endif 5351da177e4SLinus Torvalds /* 5364913efe4STejun Heo * for flush operations 5371da177e4SLinus Torvalds */ 5387c94e1c1SMing Lei struct blk_flush_queue *fq; 539483f4afcSAl Viro 5406fca6a61SChristoph Hellwig struct list_head requeue_list; 5416fca6a61SChristoph Hellwig spinlock_t requeue_lock; 5422849450aSMike Snitzer struct delayed_work requeue_work; 5436fca6a61SChristoph Hellwig 544483f4afcSAl Viro struct mutex sysfs_lock; 545cecf5d87SMing Lei struct mutex sysfs_dir_lock; 546d351af01SFUJITA Tomonori 5472f8f1336SMing Lei /* 5482f8f1336SMing Lei * for reusing dead hctx instance in case of updating 5492f8f1336SMing Lei * nr_hw_queues 5502f8f1336SMing Lei */ 5512f8f1336SMing Lei struct list_head unused_hctx_list; 5522f8f1336SMing Lei spinlock_t unused_hctx_lock; 5532f8f1336SMing Lei 5547996a8b5SBob Liu int mq_freeze_depth; 555d732580bSTejun Heo 556d351af01SFUJITA Tomonori #if defined(CONFIG_BLK_DEV_BSG) 557d351af01SFUJITA Tomonori struct bsg_class_device bsg_dev; 558d351af01SFUJITA Tomonori #endif 559e43473b7SVivek Goyal 560e43473b7SVivek Goyal #ifdef CONFIG_BLK_DEV_THROTTLING 561e43473b7SVivek Goyal /* Throttle data */ 562e43473b7SVivek Goyal struct throtl_data *td; 563e43473b7SVivek Goyal #endif 564548bc8e1STejun Heo struct rcu_head rcu_head; 565320ae51fSJens Axboe wait_queue_head_t mq_freeze_wq; 5667996a8b5SBob Liu /* 5677996a8b5SBob Liu * Protect concurrent access to q_usage_counter by 5687996a8b5SBob Liu * percpu_ref_kill() and percpu_ref_reinit(). 5697996a8b5SBob Liu */ 5707996a8b5SBob Liu struct mutex mq_freeze_lock; 5713ef28e83SDan Williams struct percpu_ref q_usage_counter; 5720d2602caSJens Axboe 5730d2602caSJens Axboe struct blk_mq_tag_set *tag_set; 5740d2602caSJens Axboe struct list_head tag_set_list; 575338aa96dSKent Overstreet struct bio_set bio_split; 5764593fdbeSAkinobu Mita 57703796c14SOmar Sandoval #ifdef CONFIG_BLK_DEBUG_FS 57807e4feadSOmar Sandoval struct dentry *debugfs_dir; 579d332ce09SOmar Sandoval struct dentry *sched_debugfs_dir; 580cc56694fSMing Lei struct dentry *rqos_debugfs_dir; 58107e4feadSOmar Sandoval #endif 58207e4feadSOmar Sandoval 5834593fdbeSAkinobu Mita bool mq_sysfs_init_done; 5846d247d7fSChristoph Hellwig 5856d247d7fSChristoph Hellwig size_t cmd_size; 586dc9edc44SBart Van Assche 587dc9edc44SBart Van Assche struct work_struct release_work; 588f793dfd3SJens Axboe 589f793dfd3SJens Axboe #define BLK_MAX_WRITE_HINTS 5 590f793dfd3SJens Axboe u64 write_hints[BLK_MAX_WRITE_HINTS]; 5911da177e4SLinus Torvalds }; 5921da177e4SLinus Torvalds 593eca7abf3SJens Axboe #define QUEUE_FLAG_STOPPED 0 /* queue is stopped */ 594eca7abf3SJens Axboe #define QUEUE_FLAG_DYING 1 /* queue being torn down */ 595eca7abf3SJens Axboe #define QUEUE_FLAG_NOMERGES 3 /* disable merge attempts */ 596eca7abf3SJens Axboe #define QUEUE_FLAG_SAME_COMP 4 /* complete on same CPU-group */ 597eca7abf3SJens Axboe #define QUEUE_FLAG_FAIL_IO 5 /* fake timeout */ 598eca7abf3SJens Axboe #define QUEUE_FLAG_NONROT 6 /* non-rotational device (SSD) */ 59988e740f1SFernando Luis Vázquez Cao #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 600eca7abf3SJens Axboe #define QUEUE_FLAG_IO_STAT 7 /* do disk/partitions IO accounting */ 601eca7abf3SJens Axboe #define QUEUE_FLAG_DISCARD 8 /* supports DISCARD */ 602eca7abf3SJens Axboe #define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */ 603eca7abf3SJens Axboe #define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */ 604eca7abf3SJens Axboe #define QUEUE_FLAG_SECERASE 11 /* supports secure erase */ 605eca7abf3SJens Axboe #define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */ 606eca7abf3SJens Axboe #define QUEUE_FLAG_DEAD 13 /* queue tear-down finished */ 607eca7abf3SJens Axboe #define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */ 608eca7abf3SJens Axboe #define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */ 609eca7abf3SJens Axboe #define QUEUE_FLAG_WC 17 /* Write back caching */ 610eca7abf3SJens Axboe #define QUEUE_FLAG_FUA 18 /* device supports FUA writes */ 611eca7abf3SJens Axboe #define QUEUE_FLAG_DAX 19 /* device supports DAX */ 612eca7abf3SJens Axboe #define QUEUE_FLAG_STATS 20 /* track IO start and completion times */ 613eca7abf3SJens Axboe #define QUEUE_FLAG_POLL_STATS 21 /* collecting stats for hybrid polling */ 614eca7abf3SJens Axboe #define QUEUE_FLAG_REGISTERED 22 /* queue has been registered to a disk */ 615eca7abf3SJens Axboe #define QUEUE_FLAG_SCSI_PASSTHROUGH 23 /* queue supports SCSI commands */ 616eca7abf3SJens Axboe #define QUEUE_FLAG_QUIESCED 24 /* queue has been quiesced */ 617eca7abf3SJens Axboe #define QUEUE_FLAG_PCI_P2PDMA 25 /* device supports PCI p2p requests */ 618e84e8f06SChaitanya Kulkarni #define QUEUE_FLAG_ZONE_RESETALL 26 /* supports Zone Reset All */ 6196f816b4bSTejun Heo #define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */ 620797e7dbbSTejun Heo 62194eddfbeSJens Axboe #define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 6226e0de611SJens Axboe (1 << QUEUE_FLAG_SAME_COMP)) 62394eddfbeSJens Axboe 6248814ce8aSBart Van Assche void blk_queue_flag_set(unsigned int flag, struct request_queue *q); 6258814ce8aSBart Van Assche void blk_queue_flag_clear(unsigned int flag, struct request_queue *q); 6268814ce8aSBart Van Assche bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); 6278814ce8aSBart Van Assche 6281da177e4SLinus Torvalds #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 6293f3299d5SBart Van Assche #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) 630c246e80dSBart Van Assche #define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) 631320ae51fSJens Axboe #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) 632ac9fafa1SAlan D. Brunelle #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 633488991e2SAlan D. Brunelle #define blk_queue_noxmerges(q) \ 634488991e2SAlan D. Brunelle test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 635a68bbddbSJens Axboe #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 636bc58ba94SJens Axboe #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) 637e2e1a148SJens Axboe #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) 638c15227deSChristoph Hellwig #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) 639e84e8f06SChaitanya Kulkarni #define blk_queue_zone_resetall(q) \ 640e84e8f06SChaitanya Kulkarni test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags) 641288dab8aSChristoph Hellwig #define blk_queue_secure_erase(q) \ 642288dab8aSChristoph Hellwig (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags)) 643163d4baaSToshi Kani #define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags) 6449efc160fSBart Van Assche #define blk_queue_scsi_passthrough(q) \ 6459efc160fSBart Van Assche test_bit(QUEUE_FLAG_SCSI_PASSTHROUGH, &(q)->queue_flags) 64649d92c0dSLogan Gunthorpe #define blk_queue_pci_p2pdma(q) \ 64749d92c0dSLogan Gunthorpe test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags) 6486f816b4bSTejun Heo #ifdef CONFIG_BLK_RQ_ALLOC_TIME 6496f816b4bSTejun Heo #define blk_queue_rq_alloc_time(q) \ 6506f816b4bSTejun Heo test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags) 6516f816b4bSTejun Heo #else 6526f816b4bSTejun Heo #define blk_queue_rq_alloc_time(q) false 6536f816b4bSTejun Heo #endif 6541da177e4SLinus Torvalds 65533659ebbSChristoph Hellwig #define blk_noretry_request(rq) \ 65633659ebbSChristoph Hellwig ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ 65733659ebbSChristoph Hellwig REQ_FAILFAST_DRIVER)) 658f4560ffeSMing Lei #define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags) 659cd84a62eSBart Van Assche #define blk_queue_pm_only(q) atomic_read(&(q)->pm_only) 6600ce91444SDave Chinner #define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags) 66158c898baSMing Lei #define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags) 662c9254f2dSBart Van Assche 663cd84a62eSBart Van Assche extern void blk_set_pm_only(struct request_queue *q); 664cd84a62eSBart Van Assche extern void blk_clear_pm_only(struct request_queue *q); 6654aff5e23SJens Axboe 66657292b58SChristoph Hellwig static inline bool blk_account_rq(struct request *rq) 66757292b58SChristoph Hellwig { 66857292b58SChristoph Hellwig return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq); 66957292b58SChristoph Hellwig } 6701da177e4SLinus Torvalds 6711da177e4SLinus Torvalds #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 6721da177e4SLinus Torvalds 6734e1b2d52SMike Christie #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) 6741da177e4SLinus Torvalds 6759d9de535SChristoph Hellwig #define rq_dma_dir(rq) \ 6769d9de535SChristoph Hellwig (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE) 6779d9de535SChristoph Hellwig 6783ab3a031SChristoph Hellwig #define dma_map_bvec(dev, bv, dir, attrs) \ 6793ab3a031SChristoph Hellwig dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \ 6803ab3a031SChristoph Hellwig (dir), (attrs)) 6813ab3a031SChristoph Hellwig 682344e9ffcSJens Axboe static inline bool queue_is_mq(struct request_queue *q) 68349fd524fSJens Axboe { 684a1ce35faSJens Axboe return q->mq_ops; 68549fd524fSJens Axboe } 68649fd524fSJens Axboe 687797476b8SDamien Le Moal static inline enum blk_zoned_model 688797476b8SDamien Le Moal blk_queue_zoned_model(struct request_queue *q) 689797476b8SDamien Le Moal { 690797476b8SDamien Le Moal return q->limits.zoned; 691797476b8SDamien Le Moal } 692797476b8SDamien Le Moal 693797476b8SDamien Le Moal static inline bool blk_queue_is_zoned(struct request_queue *q) 694797476b8SDamien Le Moal { 695797476b8SDamien Le Moal switch (blk_queue_zoned_model(q)) { 696797476b8SDamien Le Moal case BLK_ZONED_HA: 697797476b8SDamien Le Moal case BLK_ZONED_HM: 698797476b8SDamien Le Moal return true; 699797476b8SDamien Le Moal default: 700797476b8SDamien Le Moal return false; 701797476b8SDamien Le Moal } 702797476b8SDamien Le Moal } 703797476b8SDamien Le Moal 704113ab72eSDamien Le Moal static inline sector_t blk_queue_zone_sectors(struct request_queue *q) 7056a0cb1bcSHannes Reinecke { 7066a0cb1bcSHannes Reinecke return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0; 7076a0cb1bcSHannes Reinecke } 7086a0cb1bcSHannes Reinecke 7096a5ac984SBart Van Assche #ifdef CONFIG_BLK_DEV_ZONED 710965b652eSDamien Le Moal static inline unsigned int blk_queue_nr_zones(struct request_queue *q) 711965b652eSDamien Le Moal { 712965b652eSDamien Le Moal return blk_queue_is_zoned(q) ? q->nr_zones : 0; 713965b652eSDamien Le Moal } 714965b652eSDamien Le Moal 7156cc77e9cSChristoph Hellwig static inline unsigned int blk_queue_zone_no(struct request_queue *q, 7166cc77e9cSChristoph Hellwig sector_t sector) 7176cc77e9cSChristoph Hellwig { 7186cc77e9cSChristoph Hellwig if (!blk_queue_is_zoned(q)) 7196cc77e9cSChristoph Hellwig return 0; 7206cc77e9cSChristoph Hellwig return sector >> ilog2(q->limits.chunk_sectors); 7216cc77e9cSChristoph Hellwig } 7226cc77e9cSChristoph Hellwig 7236cc77e9cSChristoph Hellwig static inline bool blk_queue_zone_is_seq(struct request_queue *q, 7246cc77e9cSChristoph Hellwig sector_t sector) 7256cc77e9cSChristoph Hellwig { 726f216fdd7SChristoph Hellwig if (!blk_queue_is_zoned(q)) 7276cc77e9cSChristoph Hellwig return false; 728f216fdd7SChristoph Hellwig if (!q->conv_zones_bitmap) 729f216fdd7SChristoph Hellwig return true; 730f216fdd7SChristoph Hellwig return !test_bit(blk_queue_zone_no(q, sector), q->conv_zones_bitmap); 7316cc77e9cSChristoph Hellwig } 732965b652eSDamien Le Moal #else /* CONFIG_BLK_DEV_ZONED */ 733965b652eSDamien Le Moal static inline unsigned int blk_queue_nr_zones(struct request_queue *q) 734965b652eSDamien Le Moal { 735965b652eSDamien Le Moal return 0; 736965b652eSDamien Le Moal } 73702992df8SJohannes Thumshirn static inline bool blk_queue_zone_is_seq(struct request_queue *q, 73802992df8SJohannes Thumshirn sector_t sector) 73902992df8SJohannes Thumshirn { 74002992df8SJohannes Thumshirn return false; 74102992df8SJohannes Thumshirn } 74202992df8SJohannes Thumshirn static inline unsigned int blk_queue_zone_no(struct request_queue *q, 74302992df8SJohannes Thumshirn sector_t sector) 74402992df8SJohannes Thumshirn { 74502992df8SJohannes Thumshirn return 0; 74602992df8SJohannes Thumshirn } 7476a5ac984SBart Van Assche #endif /* CONFIG_BLK_DEV_ZONED */ 7486cc77e9cSChristoph Hellwig 7491faa16d2SJens Axboe static inline bool rq_is_sync(struct request *rq) 7501faa16d2SJens Axboe { 751ef295ecfSChristoph Hellwig return op_is_sync(rq->cmd_flags); 7521faa16d2SJens Axboe } 7531faa16d2SJens Axboe 754e2a60da7SMartin K. Petersen static inline bool rq_mergeable(struct request *rq) 755e2a60da7SMartin K. Petersen { 75657292b58SChristoph Hellwig if (blk_rq_is_passthrough(rq)) 757e2a60da7SMartin K. Petersen return false; 7581da177e4SLinus Torvalds 7593a5e02ceSMike Christie if (req_op(rq) == REQ_OP_FLUSH) 7603a5e02ceSMike Christie return false; 7613a5e02ceSMike Christie 762a6f0788eSChaitanya Kulkarni if (req_op(rq) == REQ_OP_WRITE_ZEROES) 763a6f0788eSChaitanya Kulkarni return false; 764a6f0788eSChaitanya Kulkarni 7650512a75bSKeith Busch if (req_op(rq) == REQ_OP_ZONE_APPEND) 7660512a75bSKeith Busch return false; 7670512a75bSKeith Busch 768e2a60da7SMartin K. Petersen if (rq->cmd_flags & REQ_NOMERGE_FLAGS) 769e2a60da7SMartin K. Petersen return false; 770e8064021SChristoph Hellwig if (rq->rq_flags & RQF_NOMERGE_FLAGS) 771e8064021SChristoph Hellwig return false; 772e2a60da7SMartin K. Petersen 773e2a60da7SMartin K. Petersen return true; 774e2a60da7SMartin K. Petersen } 7751da177e4SLinus Torvalds 7764363ac7cSMartin K. Petersen static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) 7774363ac7cSMartin K. Petersen { 778efbeccdbSChristoph Hellwig if (bio_page(a) == bio_page(b) && 779efbeccdbSChristoph Hellwig bio_offset(a) == bio_offset(b)) 7804363ac7cSMartin K. Petersen return true; 7814363ac7cSMartin K. Petersen 7824363ac7cSMartin K. Petersen return false; 7834363ac7cSMartin K. Petersen } 7844363ac7cSMartin K. Petersen 785d278d4a8SJens Axboe static inline unsigned int blk_queue_depth(struct request_queue *q) 786d278d4a8SJens Axboe { 787d278d4a8SJens Axboe if (q->queue_depth) 788d278d4a8SJens Axboe return q->queue_depth; 789d278d4a8SJens Axboe 790d278d4a8SJens Axboe return q->nr_requests; 791d278d4a8SJens Axboe } 792d278d4a8SJens Axboe 7931da177e4SLinus Torvalds extern unsigned long blk_max_low_pfn, blk_max_pfn; 7941da177e4SLinus Torvalds 7951da177e4SLinus Torvalds /* 7961da177e4SLinus Torvalds * standard bounce addresses: 7971da177e4SLinus Torvalds * 7981da177e4SLinus Torvalds * BLK_BOUNCE_HIGH : bounce all highmem pages 7991da177e4SLinus Torvalds * BLK_BOUNCE_ANY : don't bounce anything 8001da177e4SLinus Torvalds * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary 8011da177e4SLinus Torvalds */ 8022472892aSAndi Kleen 8032472892aSAndi Kleen #if BITS_PER_LONG == 32 8041da177e4SLinus Torvalds #define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT) 8052472892aSAndi Kleen #else 8062472892aSAndi Kleen #define BLK_BOUNCE_HIGH -1ULL 8072472892aSAndi Kleen #endif 8082472892aSAndi Kleen #define BLK_BOUNCE_ANY (-1ULL) 809bfe17231SFUJITA Tomonori #define BLK_BOUNCE_ISA (DMA_BIT_MASK(24)) 8101da177e4SLinus Torvalds 8113d6392cfSJens Axboe /* 8123d6392cfSJens Axboe * default timeout for SG_IO if none specified 8133d6392cfSJens Axboe */ 8143d6392cfSJens Axboe #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) 815f2f1fa78SLinus Torvalds #define BLK_MIN_SG_TIMEOUT (7 * HZ) 8163d6392cfSJens Axboe 817152e283fSFUJITA Tomonori struct rq_map_data { 818152e283fSFUJITA Tomonori struct page **pages; 819152e283fSFUJITA Tomonori int page_order; 820152e283fSFUJITA Tomonori int nr_entries; 82156c451f4SFUJITA Tomonori unsigned long offset; 82297ae77a1SFUJITA Tomonori int null_mapped; 823ecb554a8SFUJITA Tomonori int from_user; 824152e283fSFUJITA Tomonori }; 825152e283fSFUJITA Tomonori 8265705f702SNeilBrown struct req_iterator { 8277988613bSKent Overstreet struct bvec_iter iter; 8285705f702SNeilBrown struct bio *bio; 8295705f702SNeilBrown }; 8305705f702SNeilBrown 8315705f702SNeilBrown /* This should not be used directly - use rq_for_each_segment */ 8321e428079SJens Axboe #define for_each_bio(_bio) \ 8331e428079SJens Axboe for (; _bio; _bio = _bio->bi_next) 8345705f702SNeilBrown #define __rq_for_each_bio(_bio, rq) \ 8351da177e4SLinus Torvalds if ((rq->bio)) \ 8361da177e4SLinus Torvalds for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) 8371da177e4SLinus Torvalds 8385705f702SNeilBrown #define rq_for_each_segment(bvl, _rq, _iter) \ 8395705f702SNeilBrown __rq_for_each_bio(_iter.bio, _rq) \ 8407988613bSKent Overstreet bio_for_each_segment(bvl, _iter.bio, _iter.iter) 8415705f702SNeilBrown 842d18d9174SMing Lei #define rq_for_each_bvec(bvl, _rq, _iter) \ 843d18d9174SMing Lei __rq_for_each_bio(_iter.bio, _rq) \ 844d18d9174SMing Lei bio_for_each_bvec(bvl, _iter.bio, _iter.iter) 845d18d9174SMing Lei 8464550dd6cSKent Overstreet #define rq_iter_last(bvec, _iter) \ 8477988613bSKent Overstreet (_iter.bio->bi_next == NULL && \ 8484550dd6cSKent Overstreet bio_iter_last(bvec, _iter.iter)) 8495705f702SNeilBrown 8502d4dc890SIlya Loginov #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 8512d4dc890SIlya Loginov # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" 8522d4dc890SIlya Loginov #endif 8532d4dc890SIlya Loginov #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 8542d4dc890SIlya Loginov extern void rq_flush_dcache_pages(struct request *rq); 8552d4dc890SIlya Loginov #else 8562d4dc890SIlya Loginov static inline void rq_flush_dcache_pages(struct request *rq) 8572d4dc890SIlya Loginov { 8582d4dc890SIlya Loginov } 8592d4dc890SIlya Loginov #endif 8602d4dc890SIlya Loginov 8611da177e4SLinus Torvalds extern int blk_register_queue(struct gendisk *disk); 8621da177e4SLinus Torvalds extern void blk_unregister_queue(struct gendisk *disk); 863dece1635SJens Axboe extern blk_qc_t generic_make_request(struct bio *bio); 864f421e1d9SChristoph Hellwig extern blk_qc_t direct_make_request(struct bio *bio); 8652a4aa30cSFUJITA Tomonori extern void blk_rq_init(struct request_queue *q, struct request *rq); 8661da177e4SLinus Torvalds extern void blk_put_request(struct request *); 867cd6ce148SBart Van Assche extern struct request *blk_get_request(struct request_queue *, unsigned int op, 868ff005a06SChristoph Hellwig blk_mq_req_flags_t flags); 869ef9e3facSKiyoshi Ueda extern int blk_lld_busy(struct request_queue *q); 87078d8e58aSMike Snitzer extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 87178d8e58aSMike Snitzer struct bio_set *bs, gfp_t gfp_mask, 87278d8e58aSMike Snitzer int (*bio_ctr)(struct bio *, struct bio *, void *), 87378d8e58aSMike Snitzer void *data); 87478d8e58aSMike Snitzer extern void blk_rq_unprep_clone(struct request *rq); 8752a842acaSChristoph Hellwig extern blk_status_t blk_insert_cloned_request(struct request_queue *q, 87682124d60SKiyoshi Ueda struct request *rq); 8770abc2a10SJens Axboe extern int blk_rq_append_bio(struct request *rq, struct bio **bio); 878af67c31fSNeilBrown extern void blk_queue_split(struct request_queue *, struct bio **); 8790bfc96cbSPaolo Bonzini extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); 880577ebb37SPaolo Bonzini extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t, 881577ebb37SPaolo Bonzini unsigned int, void __user *); 88274f3c8afSAl Viro extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, 88374f3c8afSAl Viro unsigned int, void __user *); 884e915e872SAl Viro extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, 885e915e872SAl Viro struct scsi_ioctl_command __user *); 88698aaaec4SArnd Bergmann extern int get_sg_io_hdr(struct sg_io_hdr *hdr, const void __user *argp); 88798aaaec4SArnd Bergmann extern int put_sg_io_hdr(const struct sg_io_hdr *hdr, void __user *argp); 8883fcfab16SAndrew Morton 8899a95e4efSBart Van Assche extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags); 8902e6edc95SDan Williams extern void blk_queue_exit(struct request_queue *q); 8911da177e4SLinus Torvalds extern void blk_sync_queue(struct request_queue *q); 892a3bce90eSFUJITA Tomonori extern int blk_rq_map_user(struct request_queue *, struct request *, 893152e283fSFUJITA Tomonori struct rq_map_data *, void __user *, unsigned long, 894152e283fSFUJITA Tomonori gfp_t); 8958e5cfc45SJens Axboe extern int blk_rq_unmap_user(struct bio *); 896165125e1SJens Axboe extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); 897165125e1SJens Axboe extern int blk_rq_map_user_iov(struct request_queue *, struct request *, 89826e49cfcSKent Overstreet struct rq_map_data *, const struct iov_iter *, 89926e49cfcSKent Overstreet gfp_t); 900b7819b92SChristoph Hellwig extern void blk_execute_rq(struct request_queue *, struct gendisk *, 901994ca9a1SJames Bottomley struct request *, int); 902165125e1SJens Axboe extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 90315fc858aSJens Axboe struct request *, int, rq_end_io_fn *); 9046e39b69eSMike Christie 905e47bc4edSChaitanya Kulkarni /* Helper to convert REQ_OP_XXX to its string format XXX */ 906e47bc4edSChaitanya Kulkarni extern const char *blk_op_str(unsigned int op); 907e47bc4edSChaitanya Kulkarni 9082a842acaSChristoph Hellwig int blk_status_to_errno(blk_status_t status); 9092a842acaSChristoph Hellwig blk_status_t errno_to_blk_status(int errno); 9102a842acaSChristoph Hellwig 9110a1b8b87SJens Axboe int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin); 91205229beeSJens Axboe 913165125e1SJens Axboe static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 9141da177e4SLinus Torvalds { 915ff9ea323STejun Heo return bdev->bd_disk->queue; /* this is never NULL */ 9161da177e4SLinus Torvalds } 9171da177e4SLinus Torvalds 9181da177e4SLinus Torvalds /* 919233bde21SBart Van Assche * The basic unit of block I/O is a sector. It is used in a number of contexts 920233bde21SBart Van Assche * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9 921233bde21SBart Van Assche * bytes. Variables of type sector_t represent an offset or size that is a 922233bde21SBart Van Assche * multiple of 512 bytes. Hence these two constants. 923233bde21SBart Van Assche */ 924233bde21SBart Van Assche #ifndef SECTOR_SHIFT 925233bde21SBart Van Assche #define SECTOR_SHIFT 9 926233bde21SBart Van Assche #endif 927233bde21SBart Van Assche #ifndef SECTOR_SIZE 928233bde21SBart Van Assche #define SECTOR_SIZE (1 << SECTOR_SHIFT) 929233bde21SBart Van Assche #endif 930233bde21SBart Van Assche 931233bde21SBart Van Assche /* 9325b93629bSTejun Heo * blk_rq_pos() : the current sector 9335b93629bSTejun Heo * blk_rq_bytes() : bytes left in the entire request 9345b93629bSTejun Heo * blk_rq_cur_bytes() : bytes left in the current segment 93580a761fdSTejun Heo * blk_rq_err_bytes() : bytes left till the next error boundary 9365b93629bSTejun Heo * blk_rq_sectors() : sectors left in the entire request 9375b93629bSTejun Heo * blk_rq_cur_sectors() : sectors left in the current segment 9383d244306SHou Tao * blk_rq_stats_sectors() : sectors of the entire request used for stats 9395efccd17STejun Heo */ 9405b93629bSTejun Heo static inline sector_t blk_rq_pos(const struct request *rq) 9415b93629bSTejun Heo { 942a2dec7b3STejun Heo return rq->__sector; 9435b93629bSTejun Heo } 9445b93629bSTejun Heo 9452e46e8b2STejun Heo static inline unsigned int blk_rq_bytes(const struct request *rq) 9462e46e8b2STejun Heo { 947a2dec7b3STejun Heo return rq->__data_len; 9482e46e8b2STejun Heo } 9492e46e8b2STejun Heo 9502e46e8b2STejun Heo static inline int blk_rq_cur_bytes(const struct request *rq) 9512e46e8b2STejun Heo { 9522e46e8b2STejun Heo return rq->bio ? bio_cur_bytes(rq->bio) : 0; 9532e46e8b2STejun Heo } 9545efccd17STejun Heo 95580a761fdSTejun Heo extern unsigned int blk_rq_err_bytes(const struct request *rq); 95680a761fdSTejun Heo 9575b93629bSTejun Heo static inline unsigned int blk_rq_sectors(const struct request *rq) 9585b93629bSTejun Heo { 959233bde21SBart Van Assche return blk_rq_bytes(rq) >> SECTOR_SHIFT; 9605b93629bSTejun Heo } 9615b93629bSTejun Heo 9625b93629bSTejun Heo static inline unsigned int blk_rq_cur_sectors(const struct request *rq) 9635b93629bSTejun Heo { 964233bde21SBart Van Assche return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT; 9655b93629bSTejun Heo } 9665b93629bSTejun Heo 9673d244306SHou Tao static inline unsigned int blk_rq_stats_sectors(const struct request *rq) 9683d244306SHou Tao { 9693d244306SHou Tao return rq->stats_sectors; 9703d244306SHou Tao } 9713d244306SHou Tao 9726a5ac984SBart Van Assche #ifdef CONFIG_BLK_DEV_ZONED 97302694e86SChaitanya Kulkarni 97402694e86SChaitanya Kulkarni /* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */ 97502694e86SChaitanya Kulkarni const char *blk_zone_cond_str(enum blk_zone_cond zone_cond); 97602694e86SChaitanya Kulkarni 9776cc77e9cSChristoph Hellwig static inline unsigned int blk_rq_zone_no(struct request *rq) 9786cc77e9cSChristoph Hellwig { 9796cc77e9cSChristoph Hellwig return blk_queue_zone_no(rq->q, blk_rq_pos(rq)); 9806cc77e9cSChristoph Hellwig } 9816cc77e9cSChristoph Hellwig 9826cc77e9cSChristoph Hellwig static inline unsigned int blk_rq_zone_is_seq(struct request *rq) 9836cc77e9cSChristoph Hellwig { 9846cc77e9cSChristoph Hellwig return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq)); 9856cc77e9cSChristoph Hellwig } 9866a5ac984SBart Van Assche #endif /* CONFIG_BLK_DEV_ZONED */ 9876cc77e9cSChristoph Hellwig 9882e3258ecSChristoph Hellwig /* 9892e3258ecSChristoph Hellwig * Some commands like WRITE SAME have a payload or data transfer size which 9902e3258ecSChristoph Hellwig * is different from the size of the request. Any driver that supports such 9912e3258ecSChristoph Hellwig * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to 9922e3258ecSChristoph Hellwig * calculate the data transfer size. 9932e3258ecSChristoph Hellwig */ 9942e3258ecSChristoph Hellwig static inline unsigned int blk_rq_payload_bytes(struct request *rq) 9952e3258ecSChristoph Hellwig { 9962e3258ecSChristoph Hellwig if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) 9972e3258ecSChristoph Hellwig return rq->special_vec.bv_len; 9982e3258ecSChristoph Hellwig return blk_rq_bytes(rq); 9992e3258ecSChristoph Hellwig } 10002e3258ecSChristoph Hellwig 10013aef3caeSChristoph Hellwig /* 10023aef3caeSChristoph Hellwig * Return the first full biovec in the request. The caller needs to check that 10033aef3caeSChristoph Hellwig * there are any bvecs before calling this helper. 10043aef3caeSChristoph Hellwig */ 10053aef3caeSChristoph Hellwig static inline struct bio_vec req_bvec(struct request *rq) 10063aef3caeSChristoph Hellwig { 10073aef3caeSChristoph Hellwig if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) 10083aef3caeSChristoph Hellwig return rq->special_vec; 10093aef3caeSChristoph Hellwig return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter); 10103aef3caeSChristoph Hellwig } 10113aef3caeSChristoph Hellwig 1012f31dc1cdSMartin K. Petersen static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, 10138fe0d473SMike Christie int op) 1014f31dc1cdSMartin K. Petersen { 10157afafc8aSAdrian Hunter if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)) 1016233bde21SBart Van Assche return min(q->limits.max_discard_sectors, 1017233bde21SBart Van Assche UINT_MAX >> SECTOR_SHIFT); 1018f31dc1cdSMartin K. Petersen 10198fe0d473SMike Christie if (unlikely(op == REQ_OP_WRITE_SAME)) 10204363ac7cSMartin K. Petersen return q->limits.max_write_same_sectors; 10214363ac7cSMartin K. Petersen 1022a6f0788eSChaitanya Kulkarni if (unlikely(op == REQ_OP_WRITE_ZEROES)) 1023a6f0788eSChaitanya Kulkarni return q->limits.max_write_zeroes_sectors; 1024a6f0788eSChaitanya Kulkarni 1025f31dc1cdSMartin K. Petersen return q->limits.max_sectors; 1026f31dc1cdSMartin K. Petersen } 1027f31dc1cdSMartin K. Petersen 1028762380adSJens Axboe /* 1029762380adSJens Axboe * Return maximum size of a request at given offset. Only valid for 1030762380adSJens Axboe * file system requests. 1031762380adSJens Axboe */ 1032762380adSJens Axboe static inline unsigned int blk_max_size_offset(struct request_queue *q, 1033762380adSJens Axboe sector_t offset) 1034762380adSJens Axboe { 1035762380adSJens Axboe if (!q->limits.chunk_sectors) 1036736ed4deSJens Axboe return q->limits.max_sectors; 1037762380adSJens Axboe 103815bfd21fSKeith Busch return min(q->limits.max_sectors, (unsigned int)(q->limits.chunk_sectors - 103915bfd21fSKeith Busch (offset & (q->limits.chunk_sectors - 1)))); 1040762380adSJens Axboe } 1041762380adSJens Axboe 104217007f39SDamien Le Moal static inline unsigned int blk_rq_get_max_sectors(struct request *rq, 104317007f39SDamien Le Moal sector_t offset) 1044f31dc1cdSMartin K. Petersen { 1045f31dc1cdSMartin K. Petersen struct request_queue *q = rq->q; 1046f31dc1cdSMartin K. Petersen 104757292b58SChristoph Hellwig if (blk_rq_is_passthrough(rq)) 1048f31dc1cdSMartin K. Petersen return q->limits.max_hw_sectors; 1049f31dc1cdSMartin K. Petersen 10507afafc8aSAdrian Hunter if (!q->limits.chunk_sectors || 10517afafc8aSAdrian Hunter req_op(rq) == REQ_OP_DISCARD || 10527afafc8aSAdrian Hunter req_op(rq) == REQ_OP_SECURE_ERASE) 10538fe0d473SMike Christie return blk_queue_get_max_sectors(q, req_op(rq)); 1054762380adSJens Axboe 105517007f39SDamien Le Moal return min(blk_max_size_offset(q, offset), 10568fe0d473SMike Christie blk_queue_get_max_sectors(q, req_op(rq))); 1057f31dc1cdSMartin K. Petersen } 1058f31dc1cdSMartin K. Petersen 105975afb352SJun'ichi Nomura static inline unsigned int blk_rq_count_bios(struct request *rq) 106075afb352SJun'ichi Nomura { 106175afb352SJun'ichi Nomura unsigned int nr_bios = 0; 106275afb352SJun'ichi Nomura struct bio *bio; 106375afb352SJun'ichi Nomura 106475afb352SJun'ichi Nomura __rq_for_each_bio(bio, rq) 106575afb352SJun'ichi Nomura nr_bios++; 106675afb352SJun'ichi Nomura 106775afb352SJun'ichi Nomura return nr_bios; 106875afb352SJun'ichi Nomura } 106975afb352SJun'ichi Nomura 1070ef71de8bSChristoph Hellwig void blk_steal_bios(struct bio_list *list, struct request *rq); 1071ef71de8bSChristoph Hellwig 10729934c8c0STejun Heo /* 10732e60e022STejun Heo * Request completion related functions. 10742e60e022STejun Heo * 10752e60e022STejun Heo * blk_update_request() completes given number of bytes and updates 10762e60e022STejun Heo * the request without completing it. 10771da177e4SLinus Torvalds */ 10782a842acaSChristoph Hellwig extern bool blk_update_request(struct request *rq, blk_status_t error, 107922b13210SJens Axboe unsigned int nr_bytes); 10802e60e022STejun Heo 1081242f9dcbSJens Axboe extern void __blk_complete_request(struct request *); 1082242f9dcbSJens Axboe extern void blk_abort_request(struct request *); 1083ff856badSJens Axboe 10841da177e4SLinus Torvalds /* 10851da177e4SLinus Torvalds * Access functions for manipulating queue properties 10861da177e4SLinus Torvalds */ 1087165125e1SJens Axboe extern void blk_cleanup_queue(struct request_queue *); 1088165125e1SJens Axboe extern void blk_queue_bounce_limit(struct request_queue *, u64); 1089086fa5ffSMartin K. Petersen extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 1090762380adSJens Axboe extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); 10918a78362cSMartin K. Petersen extern void blk_queue_max_segments(struct request_queue *, unsigned short); 10921e739730SChristoph Hellwig extern void blk_queue_max_discard_segments(struct request_queue *, 10931e739730SChristoph Hellwig unsigned short); 1094165125e1SJens Axboe extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 109567efc925SChristoph Hellwig extern void blk_queue_max_discard_sectors(struct request_queue *q, 109667efc925SChristoph Hellwig unsigned int max_discard_sectors); 10974363ac7cSMartin K. Petersen extern void blk_queue_max_write_same_sectors(struct request_queue *q, 10984363ac7cSMartin K. Petersen unsigned int max_write_same_sectors); 1099a6f0788eSChaitanya Kulkarni extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q, 1100a6f0788eSChaitanya Kulkarni unsigned int max_write_same_sectors); 1101ad6bf88aSMikulas Patocka extern void blk_queue_logical_block_size(struct request_queue *, unsigned int); 11020512a75bSKeith Busch extern void blk_queue_max_zone_append_sectors(struct request_queue *q, 11030512a75bSKeith Busch unsigned int max_zone_append_sectors); 1104892b6f90SMartin K. Petersen extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); 1105c72758f3SMartin K. Petersen extern void blk_queue_alignment_offset(struct request_queue *q, 1106c72758f3SMartin K. Petersen unsigned int alignment); 11077c958e32SMartin K. Petersen extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); 1108c72758f3SMartin K. Petersen extern void blk_queue_io_min(struct request_queue *q, unsigned int min); 11093c5820c7SMartin K. Petersen extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); 1110c72758f3SMartin K. Petersen extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); 1111d278d4a8SJens Axboe extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth); 1112e475bba2SMartin K. Petersen extern void blk_set_default_limits(struct queue_limits *lim); 1113b1bd055dSMartin K. Petersen extern void blk_set_stacking_limits(struct queue_limits *lim); 1114c72758f3SMartin K. Petersen extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 1115c72758f3SMartin K. Petersen sector_t offset); 111617be8c24SMartin K. Petersen extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, 111717be8c24SMartin K. Petersen sector_t offset); 1118c72758f3SMartin K. Petersen extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, 1119c72758f3SMartin K. Petersen sector_t offset); 1120165125e1SJens Axboe extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 112127f8221aSFUJITA Tomonori extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); 1122165125e1SJens Axboe extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 112303100aadSKeith Busch extern void blk_queue_virt_boundary(struct request_queue *, unsigned long); 1124165125e1SJens Axboe extern void blk_queue_dma_alignment(struct request_queue *, int); 112511c3e689SJames Bottomley extern void blk_queue_update_dma_alignment(struct request_queue *, int); 1126242f9dcbSJens Axboe extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 112793e9d8e8SJens Axboe extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); 112868c43f13SDamien Le Moal extern void blk_queue_required_elevator_features(struct request_queue *q, 112968c43f13SDamien Le Moal unsigned int features); 113045147fb5SYoshihiro Shimoda extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q, 113145147fb5SYoshihiro Shimoda struct device *dev); 11321da177e4SLinus Torvalds 11331e739730SChristoph Hellwig /* 11341e739730SChristoph Hellwig * Number of physical segments as sent to the device. 11351e739730SChristoph Hellwig * 11361e739730SChristoph Hellwig * Normally this is the number of discontiguous data segments sent by the 11371e739730SChristoph Hellwig * submitter. But for data-less command like discard we might have no 11381e739730SChristoph Hellwig * actual data segments submitted, but the driver might have to add it's 11391e739730SChristoph Hellwig * own special payload. In that case we still return 1 here so that this 11401e739730SChristoph Hellwig * special payload will be mapped. 11411e739730SChristoph Hellwig */ 1142f9d03f96SChristoph Hellwig static inline unsigned short blk_rq_nr_phys_segments(struct request *rq) 1143f9d03f96SChristoph Hellwig { 1144f9d03f96SChristoph Hellwig if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) 1145f9d03f96SChristoph Hellwig return 1; 1146f9d03f96SChristoph Hellwig return rq->nr_phys_segments; 1147f9d03f96SChristoph Hellwig } 1148f9d03f96SChristoph Hellwig 11491e739730SChristoph Hellwig /* 11501e739730SChristoph Hellwig * Number of discard segments (or ranges) the driver needs to fill in. 11511e739730SChristoph Hellwig * Each discard bio merged into a request is counted as one segment. 11521e739730SChristoph Hellwig */ 11531e739730SChristoph Hellwig static inline unsigned short blk_rq_nr_discard_segments(struct request *rq) 11541e739730SChristoph Hellwig { 11551e739730SChristoph Hellwig return max_t(unsigned short, rq->nr_phys_segments, 1); 11561e739730SChristoph Hellwig } 11571e739730SChristoph Hellwig 115889de1504SChristoph Hellwig int __blk_rq_map_sg(struct request_queue *q, struct request *rq, 115989de1504SChristoph Hellwig struct scatterlist *sglist, struct scatterlist **last_sg); 116089de1504SChristoph Hellwig static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq, 116189de1504SChristoph Hellwig struct scatterlist *sglist) 116289de1504SChristoph Hellwig { 116389de1504SChristoph Hellwig struct scatterlist *last_sg = NULL; 116489de1504SChristoph Hellwig 116589de1504SChristoph Hellwig return __blk_rq_map_sg(q, rq, sglist, &last_sg); 116689de1504SChristoph Hellwig } 11671da177e4SLinus Torvalds extern void blk_dump_rq_flags(struct request *, char *); 11681da177e4SLinus Torvalds extern long nr_blockdev_pages(void); 11691da177e4SLinus Torvalds 117009ac46c4STejun Heo bool __must_check blk_get_queue(struct request_queue *); 11713d745ea5SChristoph Hellwig struct request_queue *blk_alloc_queue(make_request_fn make_request, int node_id); 1172165125e1SJens Axboe extern void blk_put_queue(struct request_queue *); 11733f21c265SJens Axboe extern void blk_set_queue_dying(struct request_queue *); 11741da177e4SLinus Torvalds 1175316cc67dSShaohua Li /* 117675df7136SSuresh Jayaraman * blk_plug permits building a queue of related requests by holding the I/O 117775df7136SSuresh Jayaraman * fragments for a short period. This allows merging of sequential requests 117875df7136SSuresh Jayaraman * into single larger request. As the requests are moved from a per-task list to 117975df7136SSuresh Jayaraman * the device's request_queue in a batch, this results in improved scalability 118075df7136SSuresh Jayaraman * as the lock contention for request_queue lock is reduced. 118175df7136SSuresh Jayaraman * 118275df7136SSuresh Jayaraman * It is ok not to disable preemption when adding the request to the plug list 118375df7136SSuresh Jayaraman * or when attempting a merge, because blk_schedule_flush_list() will only flush 118475df7136SSuresh Jayaraman * the plug list when the task sleeps by itself. For details, please see 118575df7136SSuresh Jayaraman * schedule() where blk_schedule_flush_plug() is called. 1186316cc67dSShaohua Li */ 118773c10101SJens Axboe struct blk_plug { 1188320ae51fSJens Axboe struct list_head mq_list; /* blk-mq requests */ 118975df7136SSuresh Jayaraman struct list_head cb_list; /* md requires an unplug callback */ 11905f0ed774SJens Axboe unsigned short rq_count; 1191ce5b009cSJens Axboe bool multiple_queues; 119273c10101SJens Axboe }; 119355c022bbSShaohua Li #define BLK_MAX_REQUEST_COUNT 16 119450d24c34SShaohua Li #define BLK_PLUG_FLUSH_SIZE (128 * 1024) 119555c022bbSShaohua Li 11969cbb1750SNeilBrown struct blk_plug_cb; 119774018dc3SNeilBrown typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); 1198048c9374SNeilBrown struct blk_plug_cb { 1199048c9374SNeilBrown struct list_head list; 12009cbb1750SNeilBrown blk_plug_cb_fn callback; 12019cbb1750SNeilBrown void *data; 1202048c9374SNeilBrown }; 12039cbb1750SNeilBrown extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, 12049cbb1750SNeilBrown void *data, int size); 120573c10101SJens Axboe extern void blk_start_plug(struct blk_plug *); 120673c10101SJens Axboe extern void blk_finish_plug(struct blk_plug *); 1207f6603783SJens Axboe extern void blk_flush_plug_list(struct blk_plug *, bool); 120873c10101SJens Axboe 120973c10101SJens Axboe static inline void blk_flush_plug(struct task_struct *tsk) 121073c10101SJens Axboe { 121173c10101SJens Axboe struct blk_plug *plug = tsk->plug; 121273c10101SJens Axboe 121388b996cdSChristoph Hellwig if (plug) 1214a237c1c5SJens Axboe blk_flush_plug_list(plug, false); 1215a237c1c5SJens Axboe } 1216a237c1c5SJens Axboe 1217a237c1c5SJens Axboe static inline void blk_schedule_flush_plug(struct task_struct *tsk) 1218a237c1c5SJens Axboe { 1219a237c1c5SJens Axboe struct blk_plug *plug = tsk->plug; 1220a237c1c5SJens Axboe 1221a237c1c5SJens Axboe if (plug) 1222f6603783SJens Axboe blk_flush_plug_list(plug, true); 122373c10101SJens Axboe } 122473c10101SJens Axboe 122573c10101SJens Axboe static inline bool blk_needs_flush_plug(struct task_struct *tsk) 122673c10101SJens Axboe { 122773c10101SJens Axboe struct blk_plug *plug = tsk->plug; 122873c10101SJens Axboe 1229320ae51fSJens Axboe return plug && 1230a1ce35faSJens Axboe (!list_empty(&plug->mq_list) || 1231320ae51fSJens Axboe !list_empty(&plug->cb_list)); 123273c10101SJens Axboe } 123373c10101SJens Axboe 123471ac860aSMing Lei extern void blk_io_schedule(void); 123571ac860aSMing Lei 1236ee472d83SChristoph Hellwig extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); 1237ee472d83SChristoph Hellwig extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, 1238ee472d83SChristoph Hellwig sector_t nr_sects, gfp_t gfp_mask, struct page *page); 1239e950fdf7SChristoph Hellwig 1240e950fdf7SChristoph Hellwig #define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */ 1241dd3932edSChristoph Hellwig 1242fbd9b09aSDmitry Monakhov extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1243fbd9b09aSDmitry Monakhov sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); 124438f25255SChristoph Hellwig extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1245288dab8aSChristoph Hellwig sector_t nr_sects, gfp_t gfp_mask, int flags, 1246469e3216SMike Christie struct bio **biop); 1247ee472d83SChristoph Hellwig 1248ee472d83SChristoph Hellwig #define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */ 1249cb365b96SChristoph Hellwig #define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */ 1250ee472d83SChristoph Hellwig 1251e73c23ffSChaitanya Kulkarni extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1252e73c23ffSChaitanya Kulkarni sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, 1253ee472d83SChristoph Hellwig unsigned flags); 12543f14d792SDmitry Monakhov extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1255ee472d83SChristoph Hellwig sector_t nr_sects, gfp_t gfp_mask, unsigned flags); 1256ee472d83SChristoph Hellwig 12572cf6d26aSChristoph Hellwig static inline int sb_issue_discard(struct super_block *sb, sector_t block, 12582cf6d26aSChristoph Hellwig sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) 1259fb2dce86SDavid Woodhouse { 1260233bde21SBart Van Assche return blkdev_issue_discard(sb->s_bdev, 1261233bde21SBart Van Assche block << (sb->s_blocksize_bits - 1262233bde21SBart Van Assche SECTOR_SHIFT), 1263233bde21SBart Van Assche nr_blocks << (sb->s_blocksize_bits - 1264233bde21SBart Van Assche SECTOR_SHIFT), 12652cf6d26aSChristoph Hellwig gfp_mask, flags); 1266fb2dce86SDavid Woodhouse } 1267e6fa0be6SLukas Czerner static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, 1268a107e5a3STheodore Ts'o sector_t nr_blocks, gfp_t gfp_mask) 1269e6fa0be6SLukas Czerner { 1270e6fa0be6SLukas Czerner return blkdev_issue_zeroout(sb->s_bdev, 1271233bde21SBart Van Assche block << (sb->s_blocksize_bits - 1272233bde21SBart Van Assche SECTOR_SHIFT), 1273233bde21SBart Van Assche nr_blocks << (sb->s_blocksize_bits - 1274233bde21SBart Van Assche SECTOR_SHIFT), 1275ee472d83SChristoph Hellwig gfp_mask, 0); 1276e6fa0be6SLukas Czerner } 12771da177e4SLinus Torvalds 1278f00c4d80SChristoph Hellwig extern int blk_verify_command(unsigned char *cmd, fmode_t mode); 12790b07de85SAdel Gadllah 1280eb28d31bSMartin K. Petersen enum blk_default_limits { 1281eb28d31bSMartin K. Petersen BLK_MAX_SEGMENTS = 128, 1282eb28d31bSMartin K. Petersen BLK_SAFE_MAX_SECTORS = 255, 1283d2be537cSJeff Moyer BLK_DEF_MAX_SECTORS = 2560, 1284eb28d31bSMartin K. Petersen BLK_MAX_SEGMENT_SIZE = 65536, 1285eb28d31bSMartin K. Petersen BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, 1286eb28d31bSMartin K. Petersen }; 12870e435ac2SMilan Broz 1288af2c68feSBart Van Assche static inline unsigned long queue_segment_boundary(const struct request_queue *q) 1289ae03bf63SMartin K. Petersen { 1290025146e1SMartin K. Petersen return q->limits.seg_boundary_mask; 1291ae03bf63SMartin K. Petersen } 1292ae03bf63SMartin K. Petersen 1293af2c68feSBart Van Assche static inline unsigned long queue_virt_boundary(const struct request_queue *q) 129403100aadSKeith Busch { 129503100aadSKeith Busch return q->limits.virt_boundary_mask; 129603100aadSKeith Busch } 129703100aadSKeith Busch 1298af2c68feSBart Van Assche static inline unsigned int queue_max_sectors(const struct request_queue *q) 1299ae03bf63SMartin K. Petersen { 1300025146e1SMartin K. Petersen return q->limits.max_sectors; 1301ae03bf63SMartin K. Petersen } 1302ae03bf63SMartin K. Petersen 1303af2c68feSBart Van Assche static inline unsigned int queue_max_hw_sectors(const struct request_queue *q) 1304ae03bf63SMartin K. Petersen { 1305025146e1SMartin K. Petersen return q->limits.max_hw_sectors; 1306ae03bf63SMartin K. Petersen } 1307ae03bf63SMartin K. Petersen 1308af2c68feSBart Van Assche static inline unsigned short queue_max_segments(const struct request_queue *q) 1309ae03bf63SMartin K. Petersen { 13108a78362cSMartin K. Petersen return q->limits.max_segments; 1311ae03bf63SMartin K. Petersen } 1312ae03bf63SMartin K. Petersen 1313af2c68feSBart Van Assche static inline unsigned short queue_max_discard_segments(const struct request_queue *q) 13141e739730SChristoph Hellwig { 13151e739730SChristoph Hellwig return q->limits.max_discard_segments; 13161e739730SChristoph Hellwig } 13171e739730SChristoph Hellwig 1318af2c68feSBart Van Assche static inline unsigned int queue_max_segment_size(const struct request_queue *q) 1319ae03bf63SMartin K. Petersen { 1320025146e1SMartin K. Petersen return q->limits.max_segment_size; 1321ae03bf63SMartin K. Petersen } 1322ae03bf63SMartin K. Petersen 13230512a75bSKeith Busch static inline unsigned int queue_max_zone_append_sectors(const struct request_queue *q) 13240512a75bSKeith Busch { 13250512a75bSKeith Busch return q->limits.max_zone_append_sectors; 13260512a75bSKeith Busch } 13270512a75bSKeith Busch 1328ad6bf88aSMikulas Patocka static inline unsigned queue_logical_block_size(const struct request_queue *q) 13291da177e4SLinus Torvalds { 13301da177e4SLinus Torvalds int retval = 512; 13311da177e4SLinus Torvalds 1332025146e1SMartin K. Petersen if (q && q->limits.logical_block_size) 1333025146e1SMartin K. Petersen retval = q->limits.logical_block_size; 13341da177e4SLinus Torvalds 13351da177e4SLinus Torvalds return retval; 13361da177e4SLinus Torvalds } 13371da177e4SLinus Torvalds 1338ad6bf88aSMikulas Patocka static inline unsigned int bdev_logical_block_size(struct block_device *bdev) 13391da177e4SLinus Torvalds { 1340e1defc4fSMartin K. Petersen return queue_logical_block_size(bdev_get_queue(bdev)); 13411da177e4SLinus Torvalds } 13421da177e4SLinus Torvalds 1343af2c68feSBart Van Assche static inline unsigned int queue_physical_block_size(const struct request_queue *q) 1344c72758f3SMartin K. Petersen { 1345c72758f3SMartin K. Petersen return q->limits.physical_block_size; 1346c72758f3SMartin K. Petersen } 1347c72758f3SMartin K. Petersen 1348892b6f90SMartin K. Petersen static inline unsigned int bdev_physical_block_size(struct block_device *bdev) 1349ac481c20SMartin K. Petersen { 1350ac481c20SMartin K. Petersen return queue_physical_block_size(bdev_get_queue(bdev)); 1351ac481c20SMartin K. Petersen } 1352ac481c20SMartin K. Petersen 1353af2c68feSBart Van Assche static inline unsigned int queue_io_min(const struct request_queue *q) 1354c72758f3SMartin K. Petersen { 1355c72758f3SMartin K. Petersen return q->limits.io_min; 1356c72758f3SMartin K. Petersen } 1357c72758f3SMartin K. Petersen 1358ac481c20SMartin K. Petersen static inline int bdev_io_min(struct block_device *bdev) 1359ac481c20SMartin K. Petersen { 1360ac481c20SMartin K. Petersen return queue_io_min(bdev_get_queue(bdev)); 1361ac481c20SMartin K. Petersen } 1362ac481c20SMartin K. Petersen 1363af2c68feSBart Van Assche static inline unsigned int queue_io_opt(const struct request_queue *q) 1364c72758f3SMartin K. Petersen { 1365c72758f3SMartin K. Petersen return q->limits.io_opt; 1366c72758f3SMartin K. Petersen } 1367c72758f3SMartin K. Petersen 1368ac481c20SMartin K. Petersen static inline int bdev_io_opt(struct block_device *bdev) 1369ac481c20SMartin K. Petersen { 1370ac481c20SMartin K. Petersen return queue_io_opt(bdev_get_queue(bdev)); 1371ac481c20SMartin K. Petersen } 1372ac481c20SMartin K. Petersen 1373af2c68feSBart Van Assche static inline int queue_alignment_offset(const struct request_queue *q) 1374c72758f3SMartin K. Petersen { 1375ac481c20SMartin K. Petersen if (q->limits.misaligned) 1376c72758f3SMartin K. Petersen return -1; 1377c72758f3SMartin K. Petersen 1378c72758f3SMartin K. Petersen return q->limits.alignment_offset; 1379c72758f3SMartin K. Petersen } 1380c72758f3SMartin K. Petersen 1381e03a72e1SMartin K. Petersen static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) 138281744ee4SMartin K. Petersen { 138381744ee4SMartin K. Petersen unsigned int granularity = max(lim->physical_block_size, lim->io_min); 1384233bde21SBart Van Assche unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT) 1385233bde21SBart Van Assche << SECTOR_SHIFT; 138681744ee4SMartin K. Petersen 1387b8839b8cSMike Snitzer return (granularity + lim->alignment_offset - alignment) % granularity; 1388c72758f3SMartin K. Petersen } 1389c72758f3SMartin K. Petersen 1390ac481c20SMartin K. Petersen static inline int bdev_alignment_offset(struct block_device *bdev) 1391ac481c20SMartin K. Petersen { 1392ac481c20SMartin K. Petersen struct request_queue *q = bdev_get_queue(bdev); 1393ac481c20SMartin K. Petersen 1394ac481c20SMartin K. Petersen if (q->limits.misaligned) 1395ac481c20SMartin K. Petersen return -1; 1396ac481c20SMartin K. Petersen 1397ac481c20SMartin K. Petersen if (bdev != bdev->bd_contains) 1398ac481c20SMartin K. Petersen return bdev->bd_part->alignment_offset; 1399ac481c20SMartin K. Petersen 1400ac481c20SMartin K. Petersen return q->limits.alignment_offset; 1401ac481c20SMartin K. Petersen } 1402ac481c20SMartin K. Petersen 1403af2c68feSBart Van Assche static inline int queue_discard_alignment(const struct request_queue *q) 140486b37281SMartin K. Petersen { 140586b37281SMartin K. Petersen if (q->limits.discard_misaligned) 140686b37281SMartin K. Petersen return -1; 140786b37281SMartin K. Petersen 140886b37281SMartin K. Petersen return q->limits.discard_alignment; 140986b37281SMartin K. Petersen } 141086b37281SMartin K. Petersen 1411e03a72e1SMartin K. Petersen static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) 141286b37281SMartin K. Petersen { 141359771079SLinus Torvalds unsigned int alignment, granularity, offset; 1414dd3d145dSMartin K. Petersen 1415a934a00aSMartin K. Petersen if (!lim->max_discard_sectors) 1416a934a00aSMartin K. Petersen return 0; 1417a934a00aSMartin K. Petersen 141859771079SLinus Torvalds /* Why are these in bytes, not sectors? */ 1419233bde21SBart Van Assche alignment = lim->discard_alignment >> SECTOR_SHIFT; 1420233bde21SBart Van Assche granularity = lim->discard_granularity >> SECTOR_SHIFT; 142159771079SLinus Torvalds if (!granularity) 142259771079SLinus Torvalds return 0; 142359771079SLinus Torvalds 142459771079SLinus Torvalds /* Offset of the partition start in 'granularity' sectors */ 142559771079SLinus Torvalds offset = sector_div(sector, granularity); 142659771079SLinus Torvalds 142759771079SLinus Torvalds /* And why do we do this modulus *again* in blkdev_issue_discard()? */ 142859771079SLinus Torvalds offset = (granularity + alignment - offset) % granularity; 142959771079SLinus Torvalds 143059771079SLinus Torvalds /* Turn it back into bytes, gaah */ 1431233bde21SBart Van Assche return offset << SECTOR_SHIFT; 143286b37281SMartin K. Petersen } 143386b37281SMartin K. Petersen 1434c6e66634SPaolo Bonzini static inline int bdev_discard_alignment(struct block_device *bdev) 1435c6e66634SPaolo Bonzini { 1436c6e66634SPaolo Bonzini struct request_queue *q = bdev_get_queue(bdev); 1437c6e66634SPaolo Bonzini 1438c6e66634SPaolo Bonzini if (bdev != bdev->bd_contains) 1439c6e66634SPaolo Bonzini return bdev->bd_part->discard_alignment; 1440c6e66634SPaolo Bonzini 1441c6e66634SPaolo Bonzini return q->limits.discard_alignment; 1442c6e66634SPaolo Bonzini } 1443c6e66634SPaolo Bonzini 14444363ac7cSMartin K. Petersen static inline unsigned int bdev_write_same(struct block_device *bdev) 14454363ac7cSMartin K. Petersen { 14464363ac7cSMartin K. Petersen struct request_queue *q = bdev_get_queue(bdev); 14474363ac7cSMartin K. Petersen 14484363ac7cSMartin K. Petersen if (q) 14494363ac7cSMartin K. Petersen return q->limits.max_write_same_sectors; 14504363ac7cSMartin K. Petersen 14514363ac7cSMartin K. Petersen return 0; 14524363ac7cSMartin K. Petersen } 14534363ac7cSMartin K. Petersen 1454a6f0788eSChaitanya Kulkarni static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev) 1455a6f0788eSChaitanya Kulkarni { 1456a6f0788eSChaitanya Kulkarni struct request_queue *q = bdev_get_queue(bdev); 1457a6f0788eSChaitanya Kulkarni 1458a6f0788eSChaitanya Kulkarni if (q) 1459a6f0788eSChaitanya Kulkarni return q->limits.max_write_zeroes_sectors; 1460a6f0788eSChaitanya Kulkarni 1461a6f0788eSChaitanya Kulkarni return 0; 1462a6f0788eSChaitanya Kulkarni } 1463a6f0788eSChaitanya Kulkarni 1464797476b8SDamien Le Moal static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev) 1465797476b8SDamien Le Moal { 1466797476b8SDamien Le Moal struct request_queue *q = bdev_get_queue(bdev); 1467797476b8SDamien Le Moal 1468797476b8SDamien Le Moal if (q) 1469797476b8SDamien Le Moal return blk_queue_zoned_model(q); 1470797476b8SDamien Le Moal 1471797476b8SDamien Le Moal return BLK_ZONED_NONE; 1472797476b8SDamien Le Moal } 1473797476b8SDamien Le Moal 1474797476b8SDamien Le Moal static inline bool bdev_is_zoned(struct block_device *bdev) 1475797476b8SDamien Le Moal { 1476797476b8SDamien Le Moal struct request_queue *q = bdev_get_queue(bdev); 1477797476b8SDamien Le Moal 1478797476b8SDamien Le Moal if (q) 1479797476b8SDamien Le Moal return blk_queue_is_zoned(q); 1480797476b8SDamien Le Moal 1481797476b8SDamien Le Moal return false; 1482797476b8SDamien Le Moal } 1483797476b8SDamien Le Moal 1484113ab72eSDamien Le Moal static inline sector_t bdev_zone_sectors(struct block_device *bdev) 14856a0cb1bcSHannes Reinecke { 14866a0cb1bcSHannes Reinecke struct request_queue *q = bdev_get_queue(bdev); 14876a0cb1bcSHannes Reinecke 14886a0cb1bcSHannes Reinecke if (q) 1489f99e8648SDamien Le Moal return blk_queue_zone_sectors(q); 14906cc77e9cSChristoph Hellwig return 0; 14916cc77e9cSChristoph Hellwig } 14926a0cb1bcSHannes Reinecke 1493af2c68feSBart Van Assche static inline int queue_dma_alignment(const struct request_queue *q) 14941da177e4SLinus Torvalds { 1495482eb689SPete Wyckoff return q ? q->dma_alignment : 511; 14961da177e4SLinus Torvalds } 14971da177e4SLinus Torvalds 149814417799SNamhyung Kim static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, 149987904074SFUJITA Tomonori unsigned int len) 150087904074SFUJITA Tomonori { 150187904074SFUJITA Tomonori unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; 150214417799SNamhyung Kim return !(addr & alignment) && !(len & alignment); 150387904074SFUJITA Tomonori } 150487904074SFUJITA Tomonori 15051da177e4SLinus Torvalds /* assumes size > 256 */ 15061da177e4SLinus Torvalds static inline unsigned int blksize_bits(unsigned int size) 15071da177e4SLinus Torvalds { 15081da177e4SLinus Torvalds unsigned int bits = 8; 15091da177e4SLinus Torvalds do { 15101da177e4SLinus Torvalds bits++; 15111da177e4SLinus Torvalds size >>= 1; 15121da177e4SLinus Torvalds } while (size > 256); 15131da177e4SLinus Torvalds return bits; 15141da177e4SLinus Torvalds } 15151da177e4SLinus Torvalds 15162befb9e3SAdrian Bunk static inline unsigned int block_size(struct block_device *bdev) 15171da177e4SLinus Torvalds { 15181da177e4SLinus Torvalds return bdev->bd_block_size; 15191da177e4SLinus Torvalds } 15201da177e4SLinus Torvalds 152159c3d45eSJens Axboe int kblockd_schedule_work(struct work_struct *work); 1522818cd1cbSJens Axboe int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); 15231da177e4SLinus Torvalds 15241da177e4SLinus Torvalds #define MODULE_ALIAS_BLOCKDEV(major,minor) \ 15251da177e4SLinus Torvalds MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 15261da177e4SLinus Torvalds #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 15271da177e4SLinus Torvalds MODULE_ALIAS("block-major-" __stringify(major) "-*") 15281da177e4SLinus Torvalds 15297ba1ba12SMartin K. Petersen #if defined(CONFIG_BLK_DEV_INTEGRITY) 15307ba1ba12SMartin K. Petersen 15318288f496SMartin K. Petersen enum blk_integrity_flags { 15328288f496SMartin K. Petersen BLK_INTEGRITY_VERIFY = 1 << 0, 15338288f496SMartin K. Petersen BLK_INTEGRITY_GENERATE = 1 << 1, 15343aec2f41SMartin K. Petersen BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2, 1535aae7df50SMartin K. Petersen BLK_INTEGRITY_IP_CHECKSUM = 1 << 3, 15368288f496SMartin K. Petersen }; 15377ba1ba12SMartin K. Petersen 153818593088SMartin K. Petersen struct blk_integrity_iter { 15397ba1ba12SMartin K. Petersen void *prot_buf; 15407ba1ba12SMartin K. Petersen void *data_buf; 15413be91c4aSMartin K. Petersen sector_t seed; 15427ba1ba12SMartin K. Petersen unsigned int data_size; 15433be91c4aSMartin K. Petersen unsigned short interval; 15447ba1ba12SMartin K. Petersen const char *disk_name; 15457ba1ba12SMartin K. Petersen }; 15467ba1ba12SMartin K. Petersen 15474e4cbee9SChristoph Hellwig typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *); 154854d4e6abSMax Gurtovoy typedef void (integrity_prepare_fn) (struct request *); 154954d4e6abSMax Gurtovoy typedef void (integrity_complete_fn) (struct request *, unsigned int); 15507ba1ba12SMartin K. Petersen 15510f8087ecSMartin K. Petersen struct blk_integrity_profile { 155218593088SMartin K. Petersen integrity_processing_fn *generate_fn; 155318593088SMartin K. Petersen integrity_processing_fn *verify_fn; 155454d4e6abSMax Gurtovoy integrity_prepare_fn *prepare_fn; 155554d4e6abSMax Gurtovoy integrity_complete_fn *complete_fn; 15560f8087ecSMartin K. Petersen const char *name; 15570f8087ecSMartin K. Petersen }; 15587ba1ba12SMartin K. Petersen 155925520d55SMartin K. Petersen extern void blk_integrity_register(struct gendisk *, struct blk_integrity *); 15607ba1ba12SMartin K. Petersen extern void blk_integrity_unregister(struct gendisk *); 1561ad7fce93SMartin K. Petersen extern int blk_integrity_compare(struct gendisk *, struct gendisk *); 156213f05c8dSMartin K. Petersen extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, 156313f05c8dSMartin K. Petersen struct scatterlist *); 156413f05c8dSMartin K. Petersen extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); 15654eaf99beSMartin K. Petersen extern bool blk_integrity_merge_rq(struct request_queue *, struct request *, 156613f05c8dSMartin K. Petersen struct request *); 15674eaf99beSMartin K. Petersen extern bool blk_integrity_merge_bio(struct request_queue *, struct request *, 156813f05c8dSMartin K. Petersen struct bio *); 15697ba1ba12SMartin K. Petersen 157025520d55SMartin K. Petersen static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 157125520d55SMartin K. Petersen { 1572ac6fc48cSDan Williams struct blk_integrity *bi = &disk->queue->integrity; 157325520d55SMartin K. Petersen 157425520d55SMartin K. Petersen if (!bi->profile) 157525520d55SMartin K. Petersen return NULL; 157625520d55SMartin K. Petersen 157725520d55SMartin K. Petersen return bi; 157825520d55SMartin K. Petersen } 157925520d55SMartin K. Petersen 1580b04accc4SJens Axboe static inline 1581b04accc4SJens Axboe struct blk_integrity *bdev_get_integrity(struct block_device *bdev) 1582b04accc4SJens Axboe { 158325520d55SMartin K. Petersen return blk_get_integrity(bdev->bd_disk); 1584b02739b0SMartin K. Petersen } 1585b02739b0SMartin K. Petersen 1586180b2f95SMartin K. Petersen static inline bool blk_integrity_rq(struct request *rq) 15877ba1ba12SMartin K. Petersen { 1588180b2f95SMartin K. Petersen return rq->cmd_flags & REQ_INTEGRITY; 15897ba1ba12SMartin K. Petersen } 15907ba1ba12SMartin K. Petersen 159113f05c8dSMartin K. Petersen static inline void blk_queue_max_integrity_segments(struct request_queue *q, 159213f05c8dSMartin K. Petersen unsigned int segs) 159313f05c8dSMartin K. Petersen { 159413f05c8dSMartin K. Petersen q->limits.max_integrity_segments = segs; 159513f05c8dSMartin K. Petersen } 159613f05c8dSMartin K. Petersen 159713f05c8dSMartin K. Petersen static inline unsigned short 1598af2c68feSBart Van Assche queue_max_integrity_segments(const struct request_queue *q) 159913f05c8dSMartin K. Petersen { 160013f05c8dSMartin K. Petersen return q->limits.max_integrity_segments; 160113f05c8dSMartin K. Petersen } 160213f05c8dSMartin K. Petersen 1603359f6427SGreg Edwards /** 1604359f6427SGreg Edwards * bio_integrity_intervals - Return number of integrity intervals for a bio 1605359f6427SGreg Edwards * @bi: blk_integrity profile for device 1606359f6427SGreg Edwards * @sectors: Size of the bio in 512-byte sectors 1607359f6427SGreg Edwards * 1608359f6427SGreg Edwards * Description: The block layer calculates everything in 512 byte 1609359f6427SGreg Edwards * sectors but integrity metadata is done in terms of the data integrity 1610359f6427SGreg Edwards * interval size of the storage device. Convert the block layer sectors 1611359f6427SGreg Edwards * to the appropriate number of integrity intervals. 1612359f6427SGreg Edwards */ 1613359f6427SGreg Edwards static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi, 1614359f6427SGreg Edwards unsigned int sectors) 1615359f6427SGreg Edwards { 1616359f6427SGreg Edwards return sectors >> (bi->interval_exp - 9); 1617359f6427SGreg Edwards } 1618359f6427SGreg Edwards 1619359f6427SGreg Edwards static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi, 1620359f6427SGreg Edwards unsigned int sectors) 1621359f6427SGreg Edwards { 1622359f6427SGreg Edwards return bio_integrity_intervals(bi, sectors) * bi->tuple_size; 1623359f6427SGreg Edwards } 1624359f6427SGreg Edwards 16252a876f5eSChristoph Hellwig /* 16262a876f5eSChristoph Hellwig * Return the first bvec that contains integrity data. Only drivers that are 16272a876f5eSChristoph Hellwig * limited to a single integrity segment should use this helper. 16282a876f5eSChristoph Hellwig */ 16292a876f5eSChristoph Hellwig static inline struct bio_vec *rq_integrity_vec(struct request *rq) 16302a876f5eSChristoph Hellwig { 16312a876f5eSChristoph Hellwig if (WARN_ON_ONCE(queue_max_integrity_segments(rq->q) > 1)) 16322a876f5eSChristoph Hellwig return NULL; 16332a876f5eSChristoph Hellwig return rq->bio->bi_integrity->bip_vec; 16342a876f5eSChristoph Hellwig } 16352a876f5eSChristoph Hellwig 16367ba1ba12SMartin K. Petersen #else /* CONFIG_BLK_DEV_INTEGRITY */ 16377ba1ba12SMartin K. Petersen 1638fd83240aSStephen Rothwell struct bio; 1639fd83240aSStephen Rothwell struct block_device; 1640fd83240aSStephen Rothwell struct gendisk; 1641fd83240aSStephen Rothwell struct blk_integrity; 1642fd83240aSStephen Rothwell 1643fd83240aSStephen Rothwell static inline int blk_integrity_rq(struct request *rq) 1644fd83240aSStephen Rothwell { 1645fd83240aSStephen Rothwell return 0; 1646fd83240aSStephen Rothwell } 1647fd83240aSStephen Rothwell static inline int blk_rq_count_integrity_sg(struct request_queue *q, 1648fd83240aSStephen Rothwell struct bio *b) 1649fd83240aSStephen Rothwell { 1650fd83240aSStephen Rothwell return 0; 1651fd83240aSStephen Rothwell } 1652fd83240aSStephen Rothwell static inline int blk_rq_map_integrity_sg(struct request_queue *q, 1653fd83240aSStephen Rothwell struct bio *b, 1654fd83240aSStephen Rothwell struct scatterlist *s) 1655fd83240aSStephen Rothwell { 1656fd83240aSStephen Rothwell return 0; 1657fd83240aSStephen Rothwell } 1658fd83240aSStephen Rothwell static inline struct blk_integrity *bdev_get_integrity(struct block_device *b) 1659fd83240aSStephen Rothwell { 166061a04e5bSMichele Curti return NULL; 1661fd83240aSStephen Rothwell } 1662fd83240aSStephen Rothwell static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 1663fd83240aSStephen Rothwell { 1664fd83240aSStephen Rothwell return NULL; 1665fd83240aSStephen Rothwell } 1666fd83240aSStephen Rothwell static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b) 1667fd83240aSStephen Rothwell { 1668fd83240aSStephen Rothwell return 0; 1669fd83240aSStephen Rothwell } 167025520d55SMartin K. Petersen static inline void blk_integrity_register(struct gendisk *d, 1671fd83240aSStephen Rothwell struct blk_integrity *b) 1672fd83240aSStephen Rothwell { 1673fd83240aSStephen Rothwell } 1674fd83240aSStephen Rothwell static inline void blk_integrity_unregister(struct gendisk *d) 1675fd83240aSStephen Rothwell { 1676fd83240aSStephen Rothwell } 1677fd83240aSStephen Rothwell static inline void blk_queue_max_integrity_segments(struct request_queue *q, 1678fd83240aSStephen Rothwell unsigned int segs) 1679fd83240aSStephen Rothwell { 1680fd83240aSStephen Rothwell } 1681af2c68feSBart Van Assche static inline unsigned short queue_max_integrity_segments(const struct request_queue *q) 1682fd83240aSStephen Rothwell { 1683fd83240aSStephen Rothwell return 0; 1684fd83240aSStephen Rothwell } 16854eaf99beSMartin K. Petersen static inline bool blk_integrity_merge_rq(struct request_queue *rq, 1686fd83240aSStephen Rothwell struct request *r1, 1687fd83240aSStephen Rothwell struct request *r2) 1688fd83240aSStephen Rothwell { 1689cb1a5ab6SMartin K. Petersen return true; 1690fd83240aSStephen Rothwell } 16914eaf99beSMartin K. Petersen static inline bool blk_integrity_merge_bio(struct request_queue *rq, 1692fd83240aSStephen Rothwell struct request *r, 1693fd83240aSStephen Rothwell struct bio *b) 1694fd83240aSStephen Rothwell { 1695cb1a5ab6SMartin K. Petersen return true; 1696fd83240aSStephen Rothwell } 169725520d55SMartin K. Petersen 1698359f6427SGreg Edwards static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi, 1699359f6427SGreg Edwards unsigned int sectors) 1700359f6427SGreg Edwards { 1701359f6427SGreg Edwards return 0; 1702359f6427SGreg Edwards } 1703359f6427SGreg Edwards 1704359f6427SGreg Edwards static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi, 1705359f6427SGreg Edwards unsigned int sectors) 1706359f6427SGreg Edwards { 1707359f6427SGreg Edwards return 0; 1708359f6427SGreg Edwards } 1709359f6427SGreg Edwards 17102a876f5eSChristoph Hellwig static inline struct bio_vec *rq_integrity_vec(struct request *rq) 17112a876f5eSChristoph Hellwig { 17122a876f5eSChristoph Hellwig return NULL; 17132a876f5eSChristoph Hellwig } 17142a876f5eSChristoph Hellwig 17157ba1ba12SMartin K. Petersen #endif /* CONFIG_BLK_DEV_INTEGRITY */ 17167ba1ba12SMartin K. Petersen 171708f85851SAl Viro struct block_device_operations { 1718d4430d62SAl Viro int (*open) (struct block_device *, fmode_t); 1719db2a144bSAl Viro void (*release) (struct gendisk *, fmode_t); 17203f289dcbSTejun Heo int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int); 1721d4430d62SAl Viro int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1722d4430d62SAl Viro int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 172377ea887eSTejun Heo unsigned int (*check_events) (struct gendisk *disk, 172477ea887eSTejun Heo unsigned int clearing); 172577ea887eSTejun Heo /* ->media_changed() is DEPRECATED, use ->check_events() instead */ 172608f85851SAl Viro int (*media_changed) (struct gendisk *); 1727c3e33e04STejun Heo void (*unlock_native_capacity) (struct gendisk *); 172808f85851SAl Viro int (*revalidate_disk) (struct gendisk *); 172908f85851SAl Viro int (*getgeo)(struct block_device *, struct hd_geometry *); 1730b3a27d05SNitin Gupta /* this callback is with swap_lock and sometimes page table lock held */ 1731b3a27d05SNitin Gupta void (*swap_slot_free_notify) (struct block_device *, unsigned long); 1732e76239a3SChristoph Hellwig int (*report_zones)(struct gendisk *, sector_t sector, 1733d4100351SChristoph Hellwig unsigned int nr_zones, report_zones_cb cb, void *data); 1734348e114bSChristoph Hellwig char *(*devnode)(struct gendisk *disk, umode_t *mode); 173508f85851SAl Viro struct module *owner; 1736bbd3e064SChristoph Hellwig const struct pr_ops *pr_ops; 173708f85851SAl Viro }; 173808f85851SAl Viro 1739ee6a129dSArnd Bergmann #ifdef CONFIG_COMPAT 1740ee6a129dSArnd Bergmann extern int blkdev_compat_ptr_ioctl(struct block_device *, fmode_t, 1741ee6a129dSArnd Bergmann unsigned int, unsigned long); 1742ee6a129dSArnd Bergmann #else 1743ee6a129dSArnd Bergmann #define blkdev_compat_ptr_ioctl NULL 1744ee6a129dSArnd Bergmann #endif 1745ee6a129dSArnd Bergmann 1746633a08b8SAl Viro extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, 1747633a08b8SAl Viro unsigned long); 174847a191fdSMatthew Wilcox extern int bdev_read_page(struct block_device *, sector_t, struct page *); 174947a191fdSMatthew Wilcox extern int bdev_write_page(struct block_device *, sector_t, struct page *, 175047a191fdSMatthew Wilcox struct writeback_control *); 17516cc77e9cSChristoph Hellwig 17526cc77e9cSChristoph Hellwig #ifdef CONFIG_BLK_DEV_ZONED 17536cc77e9cSChristoph Hellwig bool blk_req_needs_zone_write_lock(struct request *rq); 17541392d370SJohannes Thumshirn bool blk_req_zone_write_trylock(struct request *rq); 17556cc77e9cSChristoph Hellwig void __blk_req_zone_write_lock(struct request *rq); 17566cc77e9cSChristoph Hellwig void __blk_req_zone_write_unlock(struct request *rq); 17576cc77e9cSChristoph Hellwig 17586cc77e9cSChristoph Hellwig static inline void blk_req_zone_write_lock(struct request *rq) 17596cc77e9cSChristoph Hellwig { 17606cc77e9cSChristoph Hellwig if (blk_req_needs_zone_write_lock(rq)) 17616cc77e9cSChristoph Hellwig __blk_req_zone_write_lock(rq); 17626cc77e9cSChristoph Hellwig } 17636cc77e9cSChristoph Hellwig 17646cc77e9cSChristoph Hellwig static inline void blk_req_zone_write_unlock(struct request *rq) 17656cc77e9cSChristoph Hellwig { 17666cc77e9cSChristoph Hellwig if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED) 17676cc77e9cSChristoph Hellwig __blk_req_zone_write_unlock(rq); 17686cc77e9cSChristoph Hellwig } 17696cc77e9cSChristoph Hellwig 17706cc77e9cSChristoph Hellwig static inline bool blk_req_zone_is_write_locked(struct request *rq) 17716cc77e9cSChristoph Hellwig { 17726cc77e9cSChristoph Hellwig return rq->q->seq_zones_wlock && 17736cc77e9cSChristoph Hellwig test_bit(blk_rq_zone_no(rq), rq->q->seq_zones_wlock); 17746cc77e9cSChristoph Hellwig } 17756cc77e9cSChristoph Hellwig 17766cc77e9cSChristoph Hellwig static inline bool blk_req_can_dispatch_to_zone(struct request *rq) 17776cc77e9cSChristoph Hellwig { 17786cc77e9cSChristoph Hellwig if (!blk_req_needs_zone_write_lock(rq)) 17796cc77e9cSChristoph Hellwig return true; 17806cc77e9cSChristoph Hellwig return !blk_req_zone_is_write_locked(rq); 17816cc77e9cSChristoph Hellwig } 17826cc77e9cSChristoph Hellwig #else 17836cc77e9cSChristoph Hellwig static inline bool blk_req_needs_zone_write_lock(struct request *rq) 17846cc77e9cSChristoph Hellwig { 17856cc77e9cSChristoph Hellwig return false; 17866cc77e9cSChristoph Hellwig } 17876cc77e9cSChristoph Hellwig 17886cc77e9cSChristoph Hellwig static inline void blk_req_zone_write_lock(struct request *rq) 17896cc77e9cSChristoph Hellwig { 17906cc77e9cSChristoph Hellwig } 17916cc77e9cSChristoph Hellwig 17926cc77e9cSChristoph Hellwig static inline void blk_req_zone_write_unlock(struct request *rq) 17936cc77e9cSChristoph Hellwig { 17946cc77e9cSChristoph Hellwig } 17956cc77e9cSChristoph Hellwig static inline bool blk_req_zone_is_write_locked(struct request *rq) 17966cc77e9cSChristoph Hellwig { 17976cc77e9cSChristoph Hellwig return false; 17986cc77e9cSChristoph Hellwig } 17996cc77e9cSChristoph Hellwig 18006cc77e9cSChristoph Hellwig static inline bool blk_req_can_dispatch_to_zone(struct request *rq) 18016cc77e9cSChristoph Hellwig { 18026cc77e9cSChristoph Hellwig return true; 18036cc77e9cSChristoph Hellwig } 18046cc77e9cSChristoph Hellwig #endif /* CONFIG_BLK_DEV_ZONED */ 18056cc77e9cSChristoph Hellwig 18069361401eSDavid Howells #else /* CONFIG_BLOCK */ 1807ac13a829SFabian Frederick 1808ac13a829SFabian Frederick struct block_device; 1809ac13a829SFabian Frederick 18109361401eSDavid Howells /* 18119361401eSDavid Howells * stubs for when the block layer is configured out 18129361401eSDavid Howells */ 18139361401eSDavid Howells #define buffer_heads_over_limit 0 18149361401eSDavid Howells 18159361401eSDavid Howells static inline long nr_blockdev_pages(void) 18169361401eSDavid Howells { 18179361401eSDavid Howells return 0; 18189361401eSDavid Howells } 18199361401eSDavid Howells 18201f940bdfSJens Axboe struct blk_plug { 18211f940bdfSJens Axboe }; 18221f940bdfSJens Axboe 18231f940bdfSJens Axboe static inline void blk_start_plug(struct blk_plug *plug) 182473c10101SJens Axboe { 182573c10101SJens Axboe } 182673c10101SJens Axboe 18271f940bdfSJens Axboe static inline void blk_finish_plug(struct blk_plug *plug) 182873c10101SJens Axboe { 182973c10101SJens Axboe } 183073c10101SJens Axboe 18311f940bdfSJens Axboe static inline void blk_flush_plug(struct task_struct *task) 183273c10101SJens Axboe { 183373c10101SJens Axboe } 183473c10101SJens Axboe 1835a237c1c5SJens Axboe static inline void blk_schedule_flush_plug(struct task_struct *task) 1836a237c1c5SJens Axboe { 1837a237c1c5SJens Axboe } 1838a237c1c5SJens Axboe 1839a237c1c5SJens Axboe 184073c10101SJens Axboe static inline bool blk_needs_flush_plug(struct task_struct *tsk) 184173c10101SJens Axboe { 184273c10101SJens Axboe return false; 184373c10101SJens Axboe } 184473c10101SJens Axboe 1845ac13a829SFabian Frederick static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, 1846ac13a829SFabian Frederick sector_t *error_sector) 1847ac13a829SFabian Frederick { 1848ac13a829SFabian Frederick return 0; 1849ac13a829SFabian Frederick } 1850ac13a829SFabian Frederick 18519361401eSDavid Howells #endif /* CONFIG_BLOCK */ 18529361401eSDavid Howells 18530619317fSJens Axboe static inline void blk_wake_io_task(struct task_struct *waiter) 18540619317fSJens Axboe { 18550619317fSJens Axboe /* 18560619317fSJens Axboe * If we're polling, the task itself is doing the completions. For 18570619317fSJens Axboe * that case, we don't need to signal a wakeup, it's enough to just 18580619317fSJens Axboe * mark us as RUNNING. 18590619317fSJens Axboe */ 18600619317fSJens Axboe if (waiter == current) 18610619317fSJens Axboe __set_current_state(TASK_RUNNING); 18620619317fSJens Axboe else 18630619317fSJens Axboe wake_up_process(waiter); 18640619317fSJens Axboe } 18650619317fSJens Axboe 18661da177e4SLinus Torvalds #endif 1867