11da177e4SLinus Torvalds #ifndef _LINUX_BLKDEV_H 21da177e4SLinus Torvalds #define _LINUX_BLKDEV_H 31da177e4SLinus Torvalds 485fd0bc9SRussell King #include <linux/sched.h> 585fd0bc9SRussell King 6f5ff8422SJens Axboe #ifdef CONFIG_BLOCK 7f5ff8422SJens Axboe 81da177e4SLinus Torvalds #include <linux/major.h> 91da177e4SLinus Torvalds #include <linux/genhd.h> 101da177e4SLinus Torvalds #include <linux/list.h> 11320ae51fSJens Axboe #include <linux/llist.h> 121da177e4SLinus Torvalds #include <linux/timer.h> 131da177e4SLinus Torvalds #include <linux/workqueue.h> 141da177e4SLinus Torvalds #include <linux/pagemap.h> 1566114cadSTejun Heo #include <linux/backing-dev-defs.h> 161da177e4SLinus Torvalds #include <linux/wait.h> 171da177e4SLinus Torvalds #include <linux/mempool.h> 1834c0fd54SDan Williams #include <linux/pfn.h> 191da177e4SLinus Torvalds #include <linux/bio.h> 201da177e4SLinus Torvalds #include <linux/stringify.h> 213e6053d7SHugh Dickins #include <linux/gfp.h> 22d351af01SFUJITA Tomonori #include <linux/bsg.h> 23c7c22e4dSJens Axboe #include <linux/smp.h> 24548bc8e1STejun Heo #include <linux/rcupdate.h> 25add703fdSTejun Heo #include <linux/percpu-refcount.h> 2684be456fSChristoph Hellwig #include <linux/scatterlist.h> 276a0cb1bcSHannes Reinecke #include <linux/blkzoned.h> 281da177e4SLinus Torvalds 29de477254SPaul Gortmaker struct module; 3021b2f0c8SChristoph Hellwig struct scsi_ioctl_command; 3121b2f0c8SChristoph Hellwig 321da177e4SLinus Torvalds struct request_queue; 331da177e4SLinus Torvalds struct elevator_queue; 342056a782SJens Axboe struct blk_trace; 353d6392cfSJens Axboe struct request; 363d6392cfSJens Axboe struct sg_io_hdr; 37aa387cc8SMike Christie struct bsg_job; 383c798398STejun Heo struct blkcg_gq; 397c94e1c1SMing Lei struct blk_flush_queue; 40bbd3e064SChristoph Hellwig struct pr_ops; 4187760e5eSJens Axboe struct rq_wb; 421da177e4SLinus Torvalds 431da177e4SLinus Torvalds #define BLKDEV_MIN_RQ 4 441da177e4SLinus Torvalds #define BLKDEV_MAX_RQ 128 /* Default maximum */ 451da177e4SLinus Torvalds 468bd435b3STejun Heo /* 478bd435b3STejun Heo * Maximum number of blkcg policies allowed to be registered concurrently. 488bd435b3STejun Heo * Defined here to simplify include dependency. 498bd435b3STejun Heo */ 508bd435b3STejun Heo #define BLKCG_MAX_POLS 2 518bd435b3STejun Heo 528ffdc655STejun Heo typedef void (rq_end_io_fn)(struct request *, int); 531da177e4SLinus Torvalds 545b788ce3STejun Heo #define BLK_RL_SYNCFULL (1U << 0) 555b788ce3STejun Heo #define BLK_RL_ASYNCFULL (1U << 1) 565b788ce3STejun Heo 571da177e4SLinus Torvalds struct request_list { 585b788ce3STejun Heo struct request_queue *q; /* the queue this rl belongs to */ 59a051661cSTejun Heo #ifdef CONFIG_BLK_CGROUP 60a051661cSTejun Heo struct blkcg_gq *blkg; /* blkg this request pool belongs to */ 61a051661cSTejun Heo #endif 621faa16d2SJens Axboe /* 631faa16d2SJens Axboe * count[], starved[], and wait[] are indexed by 641faa16d2SJens Axboe * BLK_RW_SYNC/BLK_RW_ASYNC 651faa16d2SJens Axboe */ 661da177e4SLinus Torvalds int count[2]; 671da177e4SLinus Torvalds int starved[2]; 681da177e4SLinus Torvalds mempool_t *rq_pool; 691da177e4SLinus Torvalds wait_queue_head_t wait[2]; 705b788ce3STejun Heo unsigned int flags; 711da177e4SLinus Torvalds }; 721da177e4SLinus Torvalds 734aff5e23SJens Axboe /* 744aff5e23SJens Axboe * request command types 754aff5e23SJens Axboe */ 764aff5e23SJens Axboe enum rq_cmd_type_bits { 774aff5e23SJens Axboe REQ_TYPE_FS = 1, /* fs request */ 784aff5e23SJens Axboe REQ_TYPE_BLOCK_PC, /* scsi command */ 79b42171efSChristoph Hellwig REQ_TYPE_DRV_PRIV, /* driver defined types from here */ 804aff5e23SJens Axboe }; 814aff5e23SJens Axboe 82e8064021SChristoph Hellwig /* 83e8064021SChristoph Hellwig * request flags */ 84e8064021SChristoph Hellwig typedef __u32 __bitwise req_flags_t; 85e8064021SChristoph Hellwig 86e8064021SChristoph Hellwig /* elevator knows about this request */ 87e8064021SChristoph Hellwig #define RQF_SORTED ((__force req_flags_t)(1 << 0)) 88e8064021SChristoph Hellwig /* drive already may have started this one */ 89e8064021SChristoph Hellwig #define RQF_STARTED ((__force req_flags_t)(1 << 1)) 90e8064021SChristoph Hellwig /* uses tagged queueing */ 91e8064021SChristoph Hellwig #define RQF_QUEUED ((__force req_flags_t)(1 << 2)) 92e8064021SChristoph Hellwig /* may not be passed by ioscheduler */ 93e8064021SChristoph Hellwig #define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3)) 94e8064021SChristoph Hellwig /* request for flush sequence */ 95e8064021SChristoph Hellwig #define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4)) 96e8064021SChristoph Hellwig /* merge of different types, fail separately */ 97e8064021SChristoph Hellwig #define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5)) 98e8064021SChristoph Hellwig /* track inflight for MQ */ 99e8064021SChristoph Hellwig #define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6)) 100e8064021SChristoph Hellwig /* don't call prep for this one */ 101e8064021SChristoph Hellwig #define RQF_DONTPREP ((__force req_flags_t)(1 << 7)) 102e8064021SChristoph Hellwig /* set for "ide_preempt" requests and also for requests for which the SCSI 103e8064021SChristoph Hellwig "quiesce" state must be ignored. */ 104e8064021SChristoph Hellwig #define RQF_PREEMPT ((__force req_flags_t)(1 << 8)) 105e8064021SChristoph Hellwig /* contains copies of user pages */ 106e8064021SChristoph Hellwig #define RQF_COPY_USER ((__force req_flags_t)(1 << 9)) 107e8064021SChristoph Hellwig /* vaguely specified driver internal error. Ignored by the block layer */ 108e8064021SChristoph Hellwig #define RQF_FAILED ((__force req_flags_t)(1 << 10)) 109e8064021SChristoph Hellwig /* don't warn about errors */ 110e8064021SChristoph Hellwig #define RQF_QUIET ((__force req_flags_t)(1 << 11)) 111e8064021SChristoph Hellwig /* elevator private data attached */ 112e8064021SChristoph Hellwig #define RQF_ELVPRIV ((__force req_flags_t)(1 << 12)) 113e8064021SChristoph Hellwig /* account I/O stat */ 114e8064021SChristoph Hellwig #define RQF_IO_STAT ((__force req_flags_t)(1 << 13)) 115e8064021SChristoph Hellwig /* request came from our alloc pool */ 116e8064021SChristoph Hellwig #define RQF_ALLOCED ((__force req_flags_t)(1 << 14)) 117e8064021SChristoph Hellwig /* runtime pm request */ 118e8064021SChristoph Hellwig #define RQF_PM ((__force req_flags_t)(1 << 15)) 119e8064021SChristoph Hellwig /* on IO scheduler merge hash */ 120e8064021SChristoph Hellwig #define RQF_HASHED ((__force req_flags_t)(1 << 16)) 121cf43e6beSJens Axboe /* IO stats tracking on */ 122cf43e6beSJens Axboe #define RQF_STATS ((__force req_flags_t)(1 << 17)) 123f9d03f96SChristoph Hellwig /* Look at ->special_vec for the actual data payload instead of the 124f9d03f96SChristoph Hellwig bio chain. */ 125f9d03f96SChristoph Hellwig #define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18)) 126e8064021SChristoph Hellwig 127e8064021SChristoph Hellwig /* flags that prevent us from merging requests: */ 128e8064021SChristoph Hellwig #define RQF_NOMERGE_FLAGS \ 129f9d03f96SChristoph Hellwig (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD) 130e8064021SChristoph Hellwig 1311da177e4SLinus Torvalds #define BLK_MAX_CDB 16 1321da177e4SLinus Torvalds 1331da177e4SLinus Torvalds /* 134af76e555SChristoph Hellwig * Try to put the fields that are referenced together in the same cacheline. 135af76e555SChristoph Hellwig * 136af76e555SChristoph Hellwig * If you modify this structure, make sure to update blk_rq_init() and 137af76e555SChristoph Hellwig * especially blk_mq_rq_ctx_init() to take care of the added fields. 1381da177e4SLinus Torvalds */ 1391da177e4SLinus Torvalds struct request { 140ff856badSJens Axboe struct list_head queuelist; 141320ae51fSJens Axboe union { 142c7c22e4dSJens Axboe struct call_single_data csd; 1439828c2c6SJan Kara u64 fifo_time; 144320ae51fSJens Axboe }; 145ff856badSJens Axboe 146165125e1SJens Axboe struct request_queue *q; 147320ae51fSJens Axboe struct blk_mq_ctx *mq_ctx; 148e6a1c874SJens Axboe 149181fdde3SRichard Kennedy int cpu; 150ca93e453SChristoph Hellwig unsigned cmd_type; 151ef295ecfSChristoph Hellwig unsigned int cmd_flags; /* op and common flags */ 152e8064021SChristoph Hellwig req_flags_t rq_flags; 153ca93e453SChristoph Hellwig unsigned long atomic_flags; 154181fdde3SRichard Kennedy 155a2dec7b3STejun Heo /* the following two fields are internal, NEVER access directly */ 156a2dec7b3STejun Heo unsigned int __data_len; /* total data len */ 157181fdde3SRichard Kennedy sector_t __sector; /* sector cursor */ 1581da177e4SLinus Torvalds 1591da177e4SLinus Torvalds struct bio *bio; 1601da177e4SLinus Torvalds struct bio *biotail; 1611da177e4SLinus Torvalds 162360f92c2SJens Axboe /* 163360f92c2SJens Axboe * The hash is used inside the scheduler, and killed once the 164360f92c2SJens Axboe * request reaches the dispatch list. The ipi_list is only used 165360f92c2SJens Axboe * to queue the request for softirq completion, which is long 166360f92c2SJens Axboe * after the request has been unhashed (and even removed from 167360f92c2SJens Axboe * the dispatch list). 168360f92c2SJens Axboe */ 169360f92c2SJens Axboe union { 1709817064bSJens Axboe struct hlist_node hash; /* merge hash */ 171360f92c2SJens Axboe struct list_head ipi_list; 172360f92c2SJens Axboe }; 173360f92c2SJens Axboe 174e6a1c874SJens Axboe /* 175e6a1c874SJens Axboe * The rb_node is only used inside the io scheduler, requests 176e6a1c874SJens Axboe * are pruned when moved to the dispatch queue. So let the 177c186794dSMike Snitzer * completion_data share space with the rb_node. 178e6a1c874SJens Axboe */ 179e6a1c874SJens Axboe union { 1802e662b65SJens Axboe struct rb_node rb_node; /* sort/lookup */ 181f9d03f96SChristoph Hellwig struct bio_vec special_vec; 182c186794dSMike Snitzer void *completion_data; 183c186794dSMike Snitzer }; 184c186794dSMike Snitzer 185c186794dSMike Snitzer /* 186c186794dSMike Snitzer * Three pointers are available for the IO schedulers, if they need 187c186794dSMike Snitzer * more they have to dynamically allocate it. Flush requests are 188c186794dSMike Snitzer * never put on the IO scheduler. So let the flush fields share 189a612fddfSTejun Heo * space with the elevator data. 190c186794dSMike Snitzer */ 191c186794dSMike Snitzer union { 192a612fddfSTejun Heo struct { 193a612fddfSTejun Heo struct io_cq *icq; 194a612fddfSTejun Heo void *priv[2]; 195a612fddfSTejun Heo } elv; 196a612fddfSTejun Heo 197ae1b1539STejun Heo struct { 198ae1b1539STejun Heo unsigned int seq; 199ae1b1539STejun Heo struct list_head list; 2004853abaaSJeff Moyer rq_end_io_fn *saved_end_io; 201ae1b1539STejun Heo } flush; 202e6a1c874SJens Axboe }; 2039817064bSJens Axboe 2048f34ee75SJens Axboe struct gendisk *rq_disk; 20509e099d4SJerome Marchand struct hd_struct *part; 2061da177e4SLinus Torvalds unsigned long start_time; 207cf43e6beSJens Axboe struct blk_issue_stat issue_stat; 2089195291eSDivyesh Shah #ifdef CONFIG_BLK_CGROUP 209a051661cSTejun Heo struct request_list *rl; /* rl this rq is alloced from */ 2109195291eSDivyesh Shah unsigned long long start_time_ns; 2119195291eSDivyesh Shah unsigned long long io_start_time_ns; /* when passed to hardware */ 2129195291eSDivyesh Shah #endif 2131da177e4SLinus Torvalds /* Number of scatter-gather DMA addr+len pairs after 2141da177e4SLinus Torvalds * physical address coalescing is performed. 2151da177e4SLinus Torvalds */ 2161da177e4SLinus Torvalds unsigned short nr_phys_segments; 21713f05c8dSMartin K. Petersen #if defined(CONFIG_BLK_DEV_INTEGRITY) 21813f05c8dSMartin K. Petersen unsigned short nr_integrity_segments; 21913f05c8dSMartin K. Petersen #endif 2201da177e4SLinus Torvalds 2218f34ee75SJens Axboe unsigned short ioprio; 2228f34ee75SJens Axboe 223731ec497STejun Heo void *special; /* opaque pointer available for LLD use */ 2241da177e4SLinus Torvalds 225cdd60262SJens Axboe int tag; 226cdd60262SJens Axboe int errors; 227cdd60262SJens Axboe 2281da177e4SLinus Torvalds /* 2291da177e4SLinus Torvalds * when request is used as a packet command carrier 2301da177e4SLinus Torvalds */ 231d7e3c324SFUJITA Tomonori unsigned char __cmd[BLK_MAX_CDB]; 232d7e3c324SFUJITA Tomonori unsigned char *cmd; 233181fdde3SRichard Kennedy unsigned short cmd_len; 2341da177e4SLinus Torvalds 2357a85f889SFUJITA Tomonori unsigned int extra_len; /* length of alignment and padding */ 2361da177e4SLinus Torvalds unsigned int sense_len; 237c3a4d78cSTejun Heo unsigned int resid_len; /* residual count */ 2381da177e4SLinus Torvalds void *sense; 2391da177e4SLinus Torvalds 240242f9dcbSJens Axboe unsigned long deadline; 241242f9dcbSJens Axboe struct list_head timeout_list; 2421da177e4SLinus Torvalds unsigned int timeout; 24317e01f21SMike Christie int retries; 2441da177e4SLinus Torvalds 2451da177e4SLinus Torvalds /* 246c00895abSJens Axboe * completion callback. 2471da177e4SLinus Torvalds */ 2481da177e4SLinus Torvalds rq_end_io_fn *end_io; 2491da177e4SLinus Torvalds void *end_io_data; 250abae1fdeSFUJITA Tomonori 251abae1fdeSFUJITA Tomonori /* for bidi */ 252abae1fdeSFUJITA Tomonori struct request *next_rq; 2531da177e4SLinus Torvalds }; 2541da177e4SLinus Torvalds 255766ca442SFernando Luis Vázquez Cao static inline unsigned short req_get_ioprio(struct request *req) 256766ca442SFernando Luis Vázquez Cao { 257766ca442SFernando Luis Vázquez Cao return req->ioprio; 258766ca442SFernando Luis Vázquez Cao } 259766ca442SFernando Luis Vázquez Cao 2601da177e4SLinus Torvalds #include <linux/elevator.h> 2611da177e4SLinus Torvalds 262320ae51fSJens Axboe struct blk_queue_ctx; 263320ae51fSJens Axboe 264165125e1SJens Axboe typedef void (request_fn_proc) (struct request_queue *q); 265dece1635SJens Axboe typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio); 266165125e1SJens Axboe typedef int (prep_rq_fn) (struct request_queue *, struct request *); 26728018c24SJames Bottomley typedef void (unprep_rq_fn) (struct request_queue *, struct request *); 2681da177e4SLinus Torvalds 2691da177e4SLinus Torvalds struct bio_vec; 270ff856badSJens Axboe typedef void (softirq_done_fn)(struct request *); 2712fb98e84STejun Heo typedef int (dma_drain_needed_fn)(struct request *); 272ef9e3facSKiyoshi Ueda typedef int (lld_busy_fn) (struct request_queue *q); 273aa387cc8SMike Christie typedef int (bsg_job_fn) (struct bsg_job *); 2741da177e4SLinus Torvalds 275242f9dcbSJens Axboe enum blk_eh_timer_return { 276242f9dcbSJens Axboe BLK_EH_NOT_HANDLED, 277242f9dcbSJens Axboe BLK_EH_HANDLED, 278242f9dcbSJens Axboe BLK_EH_RESET_TIMER, 279242f9dcbSJens Axboe }; 280242f9dcbSJens Axboe 281242f9dcbSJens Axboe typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *); 282242f9dcbSJens Axboe 2831da177e4SLinus Torvalds enum blk_queue_state { 2841da177e4SLinus Torvalds Queue_down, 2851da177e4SLinus Torvalds Queue_up, 2861da177e4SLinus Torvalds }; 2871da177e4SLinus Torvalds 2881da177e4SLinus Torvalds struct blk_queue_tag { 2891da177e4SLinus Torvalds struct request **tag_index; /* map of busy tags */ 2901da177e4SLinus Torvalds unsigned long *tag_map; /* bit map of free/busy tags */ 2911da177e4SLinus Torvalds int max_depth; /* what we will send to device */ 292ba025082STejun Heo int real_max_depth; /* what the array can hold */ 2931da177e4SLinus Torvalds atomic_t refcnt; /* map can be shared */ 294ee1b6f7aSShaohua Li int alloc_policy; /* tag allocation policy */ 295ee1b6f7aSShaohua Li int next_tag; /* next tag */ 2961da177e4SLinus Torvalds }; 297ee1b6f7aSShaohua Li #define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */ 298ee1b6f7aSShaohua Li #define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */ 2991da177e4SLinus Torvalds 300abf54393SFUJITA Tomonori #define BLK_SCSI_MAX_CMDS (256) 301abf54393SFUJITA Tomonori #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) 302abf54393SFUJITA Tomonori 303797476b8SDamien Le Moal /* 304797476b8SDamien Le Moal * Zoned block device models (zoned limit). 305797476b8SDamien Le Moal */ 306797476b8SDamien Le Moal enum blk_zoned_model { 307797476b8SDamien Le Moal BLK_ZONED_NONE, /* Regular block device */ 308797476b8SDamien Le Moal BLK_ZONED_HA, /* Host-aware zoned block device */ 309797476b8SDamien Le Moal BLK_ZONED_HM, /* Host-managed zoned block device */ 310797476b8SDamien Le Moal }; 311797476b8SDamien Le Moal 312025146e1SMartin K. Petersen struct queue_limits { 313025146e1SMartin K. Petersen unsigned long bounce_pfn; 314025146e1SMartin K. Petersen unsigned long seg_boundary_mask; 31503100aadSKeith Busch unsigned long virt_boundary_mask; 316025146e1SMartin K. Petersen 317025146e1SMartin K. Petersen unsigned int max_hw_sectors; 318ca369d51SMartin K. Petersen unsigned int max_dev_sectors; 319762380adSJens Axboe unsigned int chunk_sectors; 320025146e1SMartin K. Petersen unsigned int max_sectors; 321025146e1SMartin K. Petersen unsigned int max_segment_size; 322c72758f3SMartin K. Petersen unsigned int physical_block_size; 323c72758f3SMartin K. Petersen unsigned int alignment_offset; 324c72758f3SMartin K. Petersen unsigned int io_min; 325c72758f3SMartin K. Petersen unsigned int io_opt; 32667efc925SChristoph Hellwig unsigned int max_discard_sectors; 3270034af03SJens Axboe unsigned int max_hw_discard_sectors; 3284363ac7cSMartin K. Petersen unsigned int max_write_same_sectors; 329a6f0788eSChaitanya Kulkarni unsigned int max_write_zeroes_sectors; 33086b37281SMartin K. Petersen unsigned int discard_granularity; 33186b37281SMartin K. Petersen unsigned int discard_alignment; 332025146e1SMartin K. Petersen 333025146e1SMartin K. Petersen unsigned short logical_block_size; 3348a78362cSMartin K. Petersen unsigned short max_segments; 33513f05c8dSMartin K. Petersen unsigned short max_integrity_segments; 336025146e1SMartin K. Petersen 337c72758f3SMartin K. Petersen unsigned char misaligned; 33886b37281SMartin K. Petersen unsigned char discard_misaligned; 339e692cb66SMartin K. Petersen unsigned char cluster; 340a934a00aSMartin K. Petersen unsigned char discard_zeroes_data; 341c78afc62SKent Overstreet unsigned char raid_partial_stripes_expensive; 342797476b8SDamien Le Moal enum blk_zoned_model zoned; 343025146e1SMartin K. Petersen }; 344025146e1SMartin K. Petersen 3456a0cb1bcSHannes Reinecke #ifdef CONFIG_BLK_DEV_ZONED 3466a0cb1bcSHannes Reinecke 3476a0cb1bcSHannes Reinecke struct blk_zone_report_hdr { 3486a0cb1bcSHannes Reinecke unsigned int nr_zones; 3496a0cb1bcSHannes Reinecke u8 padding[60]; 3506a0cb1bcSHannes Reinecke }; 3516a0cb1bcSHannes Reinecke 3526a0cb1bcSHannes Reinecke extern int blkdev_report_zones(struct block_device *bdev, 3536a0cb1bcSHannes Reinecke sector_t sector, struct blk_zone *zones, 3546a0cb1bcSHannes Reinecke unsigned int *nr_zones, gfp_t gfp_mask); 3556a0cb1bcSHannes Reinecke extern int blkdev_reset_zones(struct block_device *bdev, sector_t sectors, 3566a0cb1bcSHannes Reinecke sector_t nr_sectors, gfp_t gfp_mask); 3576a0cb1bcSHannes Reinecke 3583ed05a98SShaun Tancheff extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, 3593ed05a98SShaun Tancheff unsigned int cmd, unsigned long arg); 3603ed05a98SShaun Tancheff extern int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode, 3613ed05a98SShaun Tancheff unsigned int cmd, unsigned long arg); 3623ed05a98SShaun Tancheff 3633ed05a98SShaun Tancheff #else /* CONFIG_BLK_DEV_ZONED */ 3643ed05a98SShaun Tancheff 3653ed05a98SShaun Tancheff static inline int blkdev_report_zones_ioctl(struct block_device *bdev, 3663ed05a98SShaun Tancheff fmode_t mode, unsigned int cmd, 3673ed05a98SShaun Tancheff unsigned long arg) 3683ed05a98SShaun Tancheff { 3693ed05a98SShaun Tancheff return -ENOTTY; 3703ed05a98SShaun Tancheff } 3713ed05a98SShaun Tancheff 3723ed05a98SShaun Tancheff static inline int blkdev_reset_zones_ioctl(struct block_device *bdev, 3733ed05a98SShaun Tancheff fmode_t mode, unsigned int cmd, 3743ed05a98SShaun Tancheff unsigned long arg) 3753ed05a98SShaun Tancheff { 3763ed05a98SShaun Tancheff return -ENOTTY; 3773ed05a98SShaun Tancheff } 3783ed05a98SShaun Tancheff 3796a0cb1bcSHannes Reinecke #endif /* CONFIG_BLK_DEV_ZONED */ 3806a0cb1bcSHannes Reinecke 381d7b76301SRichard Kennedy struct request_queue { 3821da177e4SLinus Torvalds /* 3831da177e4SLinus Torvalds * Together with queue_head for cacheline sharing 3841da177e4SLinus Torvalds */ 3851da177e4SLinus Torvalds struct list_head queue_head; 3861da177e4SLinus Torvalds struct request *last_merge; 387b374d18aSJens Axboe struct elevator_queue *elevator; 3888a5ecdd4STejun Heo int nr_rqs[2]; /* # allocated [a]sync rqs */ 3898a5ecdd4STejun Heo int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */ 3901da177e4SLinus Torvalds 39187760e5eSJens Axboe struct rq_wb *rq_wb; 39287760e5eSJens Axboe 3931da177e4SLinus Torvalds /* 394a051661cSTejun Heo * If blkcg is not used, @q->root_rl serves all requests. If blkcg 395a051661cSTejun Heo * is used, root blkg allocates from @q->root_rl and all other 396a051661cSTejun Heo * blkgs from their own blkg->rl. Which one to use should be 397a051661cSTejun Heo * determined using bio_request_list(). 3981da177e4SLinus Torvalds */ 399a051661cSTejun Heo struct request_list root_rl; 4001da177e4SLinus Torvalds 4011da177e4SLinus Torvalds request_fn_proc *request_fn; 4021da177e4SLinus Torvalds make_request_fn *make_request_fn; 4031da177e4SLinus Torvalds prep_rq_fn *prep_rq_fn; 40428018c24SJames Bottomley unprep_rq_fn *unprep_rq_fn; 405ff856badSJens Axboe softirq_done_fn *softirq_done_fn; 406242f9dcbSJens Axboe rq_timed_out_fn *rq_timed_out_fn; 4072fb98e84STejun Heo dma_drain_needed_fn *dma_drain_needed; 408ef9e3facSKiyoshi Ueda lld_busy_fn *lld_busy_fn; 4091da177e4SLinus Torvalds 410*f8a5b122SJens Axboe const struct blk_mq_ops *mq_ops; 411320ae51fSJens Axboe 412320ae51fSJens Axboe unsigned int *mq_map; 413320ae51fSJens Axboe 414320ae51fSJens Axboe /* sw queues */ 415e6cdb092SMing Lei struct blk_mq_ctx __percpu *queue_ctx; 416320ae51fSJens Axboe unsigned int nr_queues; 417320ae51fSJens Axboe 418d278d4a8SJens Axboe unsigned int queue_depth; 419d278d4a8SJens Axboe 420320ae51fSJens Axboe /* hw dispatch queues */ 421320ae51fSJens Axboe struct blk_mq_hw_ctx **queue_hw_ctx; 422320ae51fSJens Axboe unsigned int nr_hw_queues; 423320ae51fSJens Axboe 4241da177e4SLinus Torvalds /* 4258922e16cSTejun Heo * Dispatch queue sorting 4268922e16cSTejun Heo */ 4271b47f531SJens Axboe sector_t end_sector; 4288922e16cSTejun Heo struct request *boundary_rq; 4298922e16cSTejun Heo 4308922e16cSTejun Heo /* 4313cca6dc1SJens Axboe * Delayed queue handling 4321da177e4SLinus Torvalds */ 4333cca6dc1SJens Axboe struct delayed_work delay_work; 4341da177e4SLinus Torvalds 4351da177e4SLinus Torvalds struct backing_dev_info backing_dev_info; 4361da177e4SLinus Torvalds 4371da177e4SLinus Torvalds /* 4381da177e4SLinus Torvalds * The queue owner gets to use this for whatever they like. 4391da177e4SLinus Torvalds * ll_rw_blk doesn't touch it. 4401da177e4SLinus Torvalds */ 4411da177e4SLinus Torvalds void *queuedata; 4421da177e4SLinus Torvalds 4431da177e4SLinus Torvalds /* 4441da177e4SLinus Torvalds * various queue flags, see QUEUE_* below 4451da177e4SLinus Torvalds */ 4461da177e4SLinus Torvalds unsigned long queue_flags; 4471da177e4SLinus Torvalds 4481da177e4SLinus Torvalds /* 449a73f730dSTejun Heo * ida allocated id for this queue. Used to index queues from 450a73f730dSTejun Heo * ioctx. 451a73f730dSTejun Heo */ 452a73f730dSTejun Heo int id; 453a73f730dSTejun Heo 454a73f730dSTejun Heo /* 455d7b76301SRichard Kennedy * queue needs bounce pages for pages above this limit 456d7b76301SRichard Kennedy */ 457d7b76301SRichard Kennedy gfp_t bounce_gfp; 458d7b76301SRichard Kennedy 459d7b76301SRichard Kennedy /* 460152587deS * protects queue structures from reentrancy. ->__queue_lock should 461152587deS * _never_ be used directly, it is queue private. always use 462152587deS * ->queue_lock. 4631da177e4SLinus Torvalds */ 464152587deS spinlock_t __queue_lock; 4651da177e4SLinus Torvalds spinlock_t *queue_lock; 4661da177e4SLinus Torvalds 4671da177e4SLinus Torvalds /* 4681da177e4SLinus Torvalds * queue kobject 4691da177e4SLinus Torvalds */ 4701da177e4SLinus Torvalds struct kobject kobj; 4711da177e4SLinus Torvalds 472320ae51fSJens Axboe /* 473320ae51fSJens Axboe * mq queue kobject 474320ae51fSJens Axboe */ 475320ae51fSJens Axboe struct kobject mq_kobj; 476320ae51fSJens Axboe 477ac6fc48cSDan Williams #ifdef CONFIG_BLK_DEV_INTEGRITY 478ac6fc48cSDan Williams struct blk_integrity integrity; 479ac6fc48cSDan Williams #endif /* CONFIG_BLK_DEV_INTEGRITY */ 480ac6fc48cSDan Williams 48147fafbc7SRafael J. Wysocki #ifdef CONFIG_PM 4826c954667SLin Ming struct device *dev; 4836c954667SLin Ming int rpm_status; 4846c954667SLin Ming unsigned int nr_pending; 4856c954667SLin Ming #endif 4866c954667SLin Ming 4871da177e4SLinus Torvalds /* 4881da177e4SLinus Torvalds * queue settings 4891da177e4SLinus Torvalds */ 4901da177e4SLinus Torvalds unsigned long nr_requests; /* Max # of requests */ 4911da177e4SLinus Torvalds unsigned int nr_congestion_on; 4921da177e4SLinus Torvalds unsigned int nr_congestion_off; 4931da177e4SLinus Torvalds unsigned int nr_batching; 4941da177e4SLinus Torvalds 495fa0ccd83SJames Bottomley unsigned int dma_drain_size; 496d7b76301SRichard Kennedy void *dma_drain_buffer; 497e3790c7dSTejun Heo unsigned int dma_pad_mask; 4981da177e4SLinus Torvalds unsigned int dma_alignment; 4991da177e4SLinus Torvalds 5001da177e4SLinus Torvalds struct blk_queue_tag *queue_tags; 5016eca9004SJens Axboe struct list_head tag_busy_list; 5021da177e4SLinus Torvalds 50315853af9STejun Heo unsigned int nr_sorted; 5040a7ae2ffSJens Axboe unsigned int in_flight[2]; 505cf43e6beSJens Axboe 506cf43e6beSJens Axboe struct blk_rq_stat rq_stats[2]; 507cf43e6beSJens Axboe 50824faf6f6SBart Van Assche /* 50924faf6f6SBart Van Assche * Number of active block driver functions for which blk_drain_queue() 51024faf6f6SBart Van Assche * must wait. Must be incremented around functions that unlock the 51124faf6f6SBart Van Assche * queue_lock internally, e.g. scsi_request_fn(). 51224faf6f6SBart Van Assche */ 51324faf6f6SBart Van Assche unsigned int request_fn_active; 5141da177e4SLinus Torvalds 515242f9dcbSJens Axboe unsigned int rq_timeout; 51664f1c21eSJens Axboe int poll_nsec; 517242f9dcbSJens Axboe struct timer_list timeout; 518287922ebSChristoph Hellwig struct work_struct timeout_work; 519242f9dcbSJens Axboe struct list_head timeout_list; 520242f9dcbSJens Axboe 521a612fddfSTejun Heo struct list_head icq_list; 5224eef3049STejun Heo #ifdef CONFIG_BLK_CGROUP 523a2b1693bSTejun Heo DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS); 5243c798398STejun Heo struct blkcg_gq *root_blkg; 52503aa264aSTejun Heo struct list_head blkg_list; 5264eef3049STejun Heo #endif 527a612fddfSTejun Heo 528025146e1SMartin K. Petersen struct queue_limits limits; 529025146e1SMartin K. Petersen 5301da177e4SLinus Torvalds /* 5311da177e4SLinus Torvalds * sg stuff 5321da177e4SLinus Torvalds */ 5331da177e4SLinus Torvalds unsigned int sg_timeout; 5341da177e4SLinus Torvalds unsigned int sg_reserved_size; 5351946089aSChristoph Lameter int node; 5366c5c9341SAlexey Dobriyan #ifdef CONFIG_BLK_DEV_IO_TRACE 5372056a782SJens Axboe struct blk_trace *blk_trace; 5386c5c9341SAlexey Dobriyan #endif 5391da177e4SLinus Torvalds /* 5404913efe4STejun Heo * for flush operations 5411da177e4SLinus Torvalds */ 5427c94e1c1SMing Lei struct blk_flush_queue *fq; 543483f4afcSAl Viro 5446fca6a61SChristoph Hellwig struct list_head requeue_list; 5456fca6a61SChristoph Hellwig spinlock_t requeue_lock; 5462849450aSMike Snitzer struct delayed_work requeue_work; 5476fca6a61SChristoph Hellwig 548483f4afcSAl Viro struct mutex sysfs_lock; 549d351af01SFUJITA Tomonori 550d732580bSTejun Heo int bypass_depth; 5514ecd4fefSChristoph Hellwig atomic_t mq_freeze_depth; 552d732580bSTejun Heo 553d351af01SFUJITA Tomonori #if defined(CONFIG_BLK_DEV_BSG) 554aa387cc8SMike Christie bsg_job_fn *bsg_job_fn; 555aa387cc8SMike Christie int bsg_job_size; 556d351af01SFUJITA Tomonori struct bsg_class_device bsg_dev; 557d351af01SFUJITA Tomonori #endif 558e43473b7SVivek Goyal 559e43473b7SVivek Goyal #ifdef CONFIG_BLK_DEV_THROTTLING 560e43473b7SVivek Goyal /* Throttle data */ 561e43473b7SVivek Goyal struct throtl_data *td; 562e43473b7SVivek Goyal #endif 563548bc8e1STejun Heo struct rcu_head rcu_head; 564320ae51fSJens Axboe wait_queue_head_t mq_freeze_wq; 5653ef28e83SDan Williams struct percpu_ref q_usage_counter; 566320ae51fSJens Axboe struct list_head all_q_node; 5670d2602caSJens Axboe 5680d2602caSJens Axboe struct blk_mq_tag_set *tag_set; 5690d2602caSJens Axboe struct list_head tag_set_list; 57054efd50bSKent Overstreet struct bio_set *bio_split; 5714593fdbeSAkinobu Mita 5724593fdbeSAkinobu Mita bool mq_sysfs_init_done; 5731da177e4SLinus Torvalds }; 5741da177e4SLinus Torvalds 5751da177e4SLinus Torvalds #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 5761da177e4SLinus Torvalds #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 5771faa16d2SJens Axboe #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ 5781faa16d2SJens Axboe #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ 5793f3299d5SBart Van Assche #define QUEUE_FLAG_DYING 5 /* queue being torn down */ 580d732580bSTejun Heo #define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */ 581c21e6bebSJens Axboe #define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ 582c21e6bebSJens Axboe #define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ 5835757a6d7SDan Williams #define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */ 584c21e6bebSJens Axboe #define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */ 585c21e6bebSJens Axboe #define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */ 586c21e6bebSJens Axboe #define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */ 58788e740f1SFernando Luis Vázquez Cao #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 588c21e6bebSJens Axboe #define QUEUE_FLAG_IO_STAT 13 /* do IO stats */ 589c21e6bebSJens Axboe #define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */ 590c21e6bebSJens Axboe #define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */ 591c21e6bebSJens Axboe #define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ 592288dab8aSChristoph Hellwig #define QUEUE_FLAG_SECERASE 17 /* supports secure erase */ 5935757a6d7SDan Williams #define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ 594c246e80dSBart Van Assche #define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ 595320ae51fSJens Axboe #define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ 59605f1dd53SJens Axboe #define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/ 59705229beeSJens Axboe #define QUEUE_FLAG_POLL 22 /* IO polling enabled if set */ 59893e9d8e8SJens Axboe #define QUEUE_FLAG_WC 23 /* Write back caching */ 59993e9d8e8SJens Axboe #define QUEUE_FLAG_FUA 24 /* device supports FUA writes */ 600c888a8f9SJens Axboe #define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */ 601163d4baaSToshi Kani #define QUEUE_FLAG_DAX 26 /* device supports DAX */ 602cf43e6beSJens Axboe #define QUEUE_FLAG_STATS 27 /* track rq completion times */ 603bc58ba94SJens Axboe 604bc58ba94SJens Axboe #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 60501e97f6bSJens Axboe (1 << QUEUE_FLAG_STACKABLE) | \ 606e2e1a148SJens Axboe (1 << QUEUE_FLAG_SAME_COMP) | \ 607e2e1a148SJens Axboe (1 << QUEUE_FLAG_ADD_RANDOM)) 608797e7dbbSTejun Heo 60994eddfbeSJens Axboe #define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 610ad9cf3bbSMike Snitzer (1 << QUEUE_FLAG_STACKABLE) | \ 6118e0b60b9SChristoph Hellwig (1 << QUEUE_FLAG_SAME_COMP) | \ 6128e0b60b9SChristoph Hellwig (1 << QUEUE_FLAG_POLL)) 61394eddfbeSJens Axboe 6148bcb6c7dSAndi Kleen static inline void queue_lockdep_assert_held(struct request_queue *q) 6158f45c1a5SLinus Torvalds { 6168bcb6c7dSAndi Kleen if (q->queue_lock) 6178bcb6c7dSAndi Kleen lockdep_assert_held(q->queue_lock); 6188f45c1a5SLinus Torvalds } 6198f45c1a5SLinus Torvalds 62075ad23bcSNick Piggin static inline void queue_flag_set_unlocked(unsigned int flag, 62175ad23bcSNick Piggin struct request_queue *q) 62275ad23bcSNick Piggin { 62375ad23bcSNick Piggin __set_bit(flag, &q->queue_flags); 62475ad23bcSNick Piggin } 62575ad23bcSNick Piggin 626e48ec690SJens Axboe static inline int queue_flag_test_and_clear(unsigned int flag, 627e48ec690SJens Axboe struct request_queue *q) 628e48ec690SJens Axboe { 6298bcb6c7dSAndi Kleen queue_lockdep_assert_held(q); 630e48ec690SJens Axboe 631e48ec690SJens Axboe if (test_bit(flag, &q->queue_flags)) { 632e48ec690SJens Axboe __clear_bit(flag, &q->queue_flags); 633e48ec690SJens Axboe return 1; 634e48ec690SJens Axboe } 635e48ec690SJens Axboe 636e48ec690SJens Axboe return 0; 637e48ec690SJens Axboe } 638e48ec690SJens Axboe 639e48ec690SJens Axboe static inline int queue_flag_test_and_set(unsigned int flag, 640e48ec690SJens Axboe struct request_queue *q) 641e48ec690SJens Axboe { 6428bcb6c7dSAndi Kleen queue_lockdep_assert_held(q); 643e48ec690SJens Axboe 644e48ec690SJens Axboe if (!test_bit(flag, &q->queue_flags)) { 645e48ec690SJens Axboe __set_bit(flag, &q->queue_flags); 646e48ec690SJens Axboe return 0; 647e48ec690SJens Axboe } 648e48ec690SJens Axboe 649e48ec690SJens Axboe return 1; 650e48ec690SJens Axboe } 651e48ec690SJens Axboe 65275ad23bcSNick Piggin static inline void queue_flag_set(unsigned int flag, struct request_queue *q) 65375ad23bcSNick Piggin { 6548bcb6c7dSAndi Kleen queue_lockdep_assert_held(q); 65575ad23bcSNick Piggin __set_bit(flag, &q->queue_flags); 65675ad23bcSNick Piggin } 65775ad23bcSNick Piggin 65875ad23bcSNick Piggin static inline void queue_flag_clear_unlocked(unsigned int flag, 65975ad23bcSNick Piggin struct request_queue *q) 66075ad23bcSNick Piggin { 66175ad23bcSNick Piggin __clear_bit(flag, &q->queue_flags); 66275ad23bcSNick Piggin } 66375ad23bcSNick Piggin 6640a7ae2ffSJens Axboe static inline int queue_in_flight(struct request_queue *q) 6650a7ae2ffSJens Axboe { 6660a7ae2ffSJens Axboe return q->in_flight[0] + q->in_flight[1]; 6670a7ae2ffSJens Axboe } 6680a7ae2ffSJens Axboe 66975ad23bcSNick Piggin static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) 67075ad23bcSNick Piggin { 6718bcb6c7dSAndi Kleen queue_lockdep_assert_held(q); 67275ad23bcSNick Piggin __clear_bit(flag, &q->queue_flags); 67375ad23bcSNick Piggin } 67475ad23bcSNick Piggin 6751da177e4SLinus Torvalds #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 6761da177e4SLinus Torvalds #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 6773f3299d5SBart Van Assche #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) 678c246e80dSBart Van Assche #define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) 679d732580bSTejun Heo #define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags) 680320ae51fSJens Axboe #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) 681ac9fafa1SAlan D. Brunelle #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 682488991e2SAlan D. Brunelle #define blk_queue_noxmerges(q) \ 683488991e2SAlan D. Brunelle test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 684a68bbddbSJens Axboe #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 685bc58ba94SJens Axboe #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) 686e2e1a148SJens Axboe #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) 6874ee5eaf4SKiyoshi Ueda #define blk_queue_stackable(q) \ 6884ee5eaf4SKiyoshi Ueda test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) 689c15227deSChristoph Hellwig #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) 690288dab8aSChristoph Hellwig #define blk_queue_secure_erase(q) \ 691288dab8aSChristoph Hellwig (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags)) 692163d4baaSToshi Kani #define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags) 6931da177e4SLinus Torvalds 69433659ebbSChristoph Hellwig #define blk_noretry_request(rq) \ 69533659ebbSChristoph Hellwig ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ 69633659ebbSChristoph Hellwig REQ_FAILFAST_DRIVER)) 6974aff5e23SJens Axboe 69833659ebbSChristoph Hellwig #define blk_account_rq(rq) \ 699e8064021SChristoph Hellwig (((rq)->rq_flags & RQF_STARTED) && \ 700e2a60da7SMartin K. Petersen ((rq)->cmd_type == REQ_TYPE_FS)) 7011da177e4SLinus Torvalds 702ab780f1eSJens Axboe #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) 703abae1fdeSFUJITA Tomonori #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 704336cdb40SKiyoshi Ueda /* rq->queuelist of dequeued request must be list_empty() */ 705336cdb40SKiyoshi Ueda #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) 7061da177e4SLinus Torvalds 7071da177e4SLinus Torvalds #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 7081da177e4SLinus Torvalds 7094e1b2d52SMike Christie #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) 7101da177e4SLinus Torvalds 71149fd524fSJens Axboe /* 71249fd524fSJens Axboe * Driver can handle struct request, if it either has an old style 71349fd524fSJens Axboe * request_fn defined, or is blk-mq based. 71449fd524fSJens Axboe */ 71549fd524fSJens Axboe static inline bool queue_is_rq_based(struct request_queue *q) 71649fd524fSJens Axboe { 71749fd524fSJens Axboe return q->request_fn || q->mq_ops; 71849fd524fSJens Axboe } 71949fd524fSJens Axboe 720e692cb66SMartin K. Petersen static inline unsigned int blk_queue_cluster(struct request_queue *q) 721e692cb66SMartin K. Petersen { 722e692cb66SMartin K. Petersen return q->limits.cluster; 723e692cb66SMartin K. Petersen } 724e692cb66SMartin K. Petersen 725797476b8SDamien Le Moal static inline enum blk_zoned_model 726797476b8SDamien Le Moal blk_queue_zoned_model(struct request_queue *q) 727797476b8SDamien Le Moal { 728797476b8SDamien Le Moal return q->limits.zoned; 729797476b8SDamien Le Moal } 730797476b8SDamien Le Moal 731797476b8SDamien Le Moal static inline bool blk_queue_is_zoned(struct request_queue *q) 732797476b8SDamien Le Moal { 733797476b8SDamien Le Moal switch (blk_queue_zoned_model(q)) { 734797476b8SDamien Le Moal case BLK_ZONED_HA: 735797476b8SDamien Le Moal case BLK_ZONED_HM: 736797476b8SDamien Le Moal return true; 737797476b8SDamien Le Moal default: 738797476b8SDamien Le Moal return false; 739797476b8SDamien Le Moal } 740797476b8SDamien Le Moal } 741797476b8SDamien Le Moal 7426a0cb1bcSHannes Reinecke static inline unsigned int blk_queue_zone_size(struct request_queue *q) 7436a0cb1bcSHannes Reinecke { 7446a0cb1bcSHannes Reinecke return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0; 7456a0cb1bcSHannes Reinecke } 7466a0cb1bcSHannes Reinecke 7471faa16d2SJens Axboe static inline bool rq_is_sync(struct request *rq) 7481faa16d2SJens Axboe { 749ef295ecfSChristoph Hellwig return op_is_sync(rq->cmd_flags); 7501faa16d2SJens Axboe } 7511faa16d2SJens Axboe 7525b788ce3STejun Heo static inline bool blk_rl_full(struct request_list *rl, bool sync) 7531da177e4SLinus Torvalds { 7545b788ce3STejun Heo unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 7555b788ce3STejun Heo 7565b788ce3STejun Heo return rl->flags & flag; 7571da177e4SLinus Torvalds } 7581da177e4SLinus Torvalds 7595b788ce3STejun Heo static inline void blk_set_rl_full(struct request_list *rl, bool sync) 7601da177e4SLinus Torvalds { 7615b788ce3STejun Heo unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 7625b788ce3STejun Heo 7635b788ce3STejun Heo rl->flags |= flag; 7641da177e4SLinus Torvalds } 7651da177e4SLinus Torvalds 7665b788ce3STejun Heo static inline void blk_clear_rl_full(struct request_list *rl, bool sync) 7671da177e4SLinus Torvalds { 7685b788ce3STejun Heo unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 7695b788ce3STejun Heo 7705b788ce3STejun Heo rl->flags &= ~flag; 7711da177e4SLinus Torvalds } 7721da177e4SLinus Torvalds 773e2a60da7SMartin K. Petersen static inline bool rq_mergeable(struct request *rq) 774e2a60da7SMartin K. Petersen { 775e2a60da7SMartin K. Petersen if (rq->cmd_type != REQ_TYPE_FS) 776e2a60da7SMartin K. Petersen return false; 7771da177e4SLinus Torvalds 7783a5e02ceSMike Christie if (req_op(rq) == REQ_OP_FLUSH) 7793a5e02ceSMike Christie return false; 7803a5e02ceSMike Christie 781a6f0788eSChaitanya Kulkarni if (req_op(rq) == REQ_OP_WRITE_ZEROES) 782a6f0788eSChaitanya Kulkarni return false; 783a6f0788eSChaitanya Kulkarni 784e2a60da7SMartin K. Petersen if (rq->cmd_flags & REQ_NOMERGE_FLAGS) 785e2a60da7SMartin K. Petersen return false; 786e8064021SChristoph Hellwig if (rq->rq_flags & RQF_NOMERGE_FLAGS) 787e8064021SChristoph Hellwig return false; 788e2a60da7SMartin K. Petersen 789e2a60da7SMartin K. Petersen return true; 790e2a60da7SMartin K. Petersen } 7911da177e4SLinus Torvalds 7924363ac7cSMartin K. Petersen static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) 7934363ac7cSMartin K. Petersen { 7944363ac7cSMartin K. Petersen if (bio_data(a) == bio_data(b)) 7954363ac7cSMartin K. Petersen return true; 7964363ac7cSMartin K. Petersen 7974363ac7cSMartin K. Petersen return false; 7984363ac7cSMartin K. Petersen } 7994363ac7cSMartin K. Petersen 800d278d4a8SJens Axboe static inline unsigned int blk_queue_depth(struct request_queue *q) 801d278d4a8SJens Axboe { 802d278d4a8SJens Axboe if (q->queue_depth) 803d278d4a8SJens Axboe return q->queue_depth; 804d278d4a8SJens Axboe 805d278d4a8SJens Axboe return q->nr_requests; 806d278d4a8SJens Axboe } 807d278d4a8SJens Axboe 8081da177e4SLinus Torvalds /* 8091da177e4SLinus Torvalds * q->prep_rq_fn return values 8101da177e4SLinus Torvalds */ 8110fb5b1fbSMartin K. Petersen enum { 8120fb5b1fbSMartin K. Petersen BLKPREP_OK, /* serve it */ 8130fb5b1fbSMartin K. Petersen BLKPREP_KILL, /* fatal error, kill, return -EIO */ 8140fb5b1fbSMartin K. Petersen BLKPREP_DEFER, /* leave on queue */ 8150fb5b1fbSMartin K. Petersen BLKPREP_INVALID, /* invalid command, kill, return -EREMOTEIO */ 8160fb5b1fbSMartin K. Petersen }; 8171da177e4SLinus Torvalds 8181da177e4SLinus Torvalds extern unsigned long blk_max_low_pfn, blk_max_pfn; 8191da177e4SLinus Torvalds 8201da177e4SLinus Torvalds /* 8211da177e4SLinus Torvalds * standard bounce addresses: 8221da177e4SLinus Torvalds * 8231da177e4SLinus Torvalds * BLK_BOUNCE_HIGH : bounce all highmem pages 8241da177e4SLinus Torvalds * BLK_BOUNCE_ANY : don't bounce anything 8251da177e4SLinus Torvalds * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary 8261da177e4SLinus Torvalds */ 8272472892aSAndi Kleen 8282472892aSAndi Kleen #if BITS_PER_LONG == 32 8291da177e4SLinus Torvalds #define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT) 8302472892aSAndi Kleen #else 8312472892aSAndi Kleen #define BLK_BOUNCE_HIGH -1ULL 8322472892aSAndi Kleen #endif 8332472892aSAndi Kleen #define BLK_BOUNCE_ANY (-1ULL) 834bfe17231SFUJITA Tomonori #define BLK_BOUNCE_ISA (DMA_BIT_MASK(24)) 8351da177e4SLinus Torvalds 8363d6392cfSJens Axboe /* 8373d6392cfSJens Axboe * default timeout for SG_IO if none specified 8383d6392cfSJens Axboe */ 8393d6392cfSJens Axboe #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) 840f2f1fa78SLinus Torvalds #define BLK_MIN_SG_TIMEOUT (7 * HZ) 8413d6392cfSJens Axboe 8422a7326b5SChristoph Lameter #ifdef CONFIG_BOUNCE 8431da177e4SLinus Torvalds extern int init_emergency_isa_pool(void); 844165125e1SJens Axboe extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); 8451da177e4SLinus Torvalds #else 8461da177e4SLinus Torvalds static inline int init_emergency_isa_pool(void) 8471da177e4SLinus Torvalds { 8481da177e4SLinus Torvalds return 0; 8491da177e4SLinus Torvalds } 850165125e1SJens Axboe static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) 8511da177e4SLinus Torvalds { 8521da177e4SLinus Torvalds } 8531da177e4SLinus Torvalds #endif /* CONFIG_MMU */ 8541da177e4SLinus Torvalds 855152e283fSFUJITA Tomonori struct rq_map_data { 856152e283fSFUJITA Tomonori struct page **pages; 857152e283fSFUJITA Tomonori int page_order; 858152e283fSFUJITA Tomonori int nr_entries; 85956c451f4SFUJITA Tomonori unsigned long offset; 86097ae77a1SFUJITA Tomonori int null_mapped; 861ecb554a8SFUJITA Tomonori int from_user; 862152e283fSFUJITA Tomonori }; 863152e283fSFUJITA Tomonori 8645705f702SNeilBrown struct req_iterator { 8657988613bSKent Overstreet struct bvec_iter iter; 8665705f702SNeilBrown struct bio *bio; 8675705f702SNeilBrown }; 8685705f702SNeilBrown 8695705f702SNeilBrown /* This should not be used directly - use rq_for_each_segment */ 8701e428079SJens Axboe #define for_each_bio(_bio) \ 8711e428079SJens Axboe for (; _bio; _bio = _bio->bi_next) 8725705f702SNeilBrown #define __rq_for_each_bio(_bio, rq) \ 8731da177e4SLinus Torvalds if ((rq->bio)) \ 8741da177e4SLinus Torvalds for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) 8751da177e4SLinus Torvalds 8765705f702SNeilBrown #define rq_for_each_segment(bvl, _rq, _iter) \ 8775705f702SNeilBrown __rq_for_each_bio(_iter.bio, _rq) \ 8787988613bSKent Overstreet bio_for_each_segment(bvl, _iter.bio, _iter.iter) 8795705f702SNeilBrown 8804550dd6cSKent Overstreet #define rq_iter_last(bvec, _iter) \ 8817988613bSKent Overstreet (_iter.bio->bi_next == NULL && \ 8824550dd6cSKent Overstreet bio_iter_last(bvec, _iter.iter)) 8835705f702SNeilBrown 8842d4dc890SIlya Loginov #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 8852d4dc890SIlya Loginov # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" 8862d4dc890SIlya Loginov #endif 8872d4dc890SIlya Loginov #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 8882d4dc890SIlya Loginov extern void rq_flush_dcache_pages(struct request *rq); 8892d4dc890SIlya Loginov #else 8902d4dc890SIlya Loginov static inline void rq_flush_dcache_pages(struct request *rq) 8912d4dc890SIlya Loginov { 8922d4dc890SIlya Loginov } 8932d4dc890SIlya Loginov #endif 8942d4dc890SIlya Loginov 8952af3a815SToshi Kani #ifdef CONFIG_PRINTK 8962af3a815SToshi Kani #define vfs_msg(sb, level, fmt, ...) \ 8972af3a815SToshi Kani __vfs_msg(sb, level, fmt, ##__VA_ARGS__) 8982af3a815SToshi Kani #else 8992af3a815SToshi Kani #define vfs_msg(sb, level, fmt, ...) \ 9002af3a815SToshi Kani do { \ 9012af3a815SToshi Kani no_printk(fmt, ##__VA_ARGS__); \ 9022af3a815SToshi Kani __vfs_msg(sb, "", " "); \ 9032af3a815SToshi Kani } while (0) 9042af3a815SToshi Kani #endif 9052af3a815SToshi Kani 9061da177e4SLinus Torvalds extern int blk_register_queue(struct gendisk *disk); 9071da177e4SLinus Torvalds extern void blk_unregister_queue(struct gendisk *disk); 908dece1635SJens Axboe extern blk_qc_t generic_make_request(struct bio *bio); 9092a4aa30cSFUJITA Tomonori extern void blk_rq_init(struct request_queue *q, struct request *rq); 9101da177e4SLinus Torvalds extern void blk_put_request(struct request *); 911165125e1SJens Axboe extern void __blk_put_request(struct request_queue *, struct request *); 912165125e1SJens Axboe extern struct request *blk_get_request(struct request_queue *, int, gfp_t); 913f27b087bSJens Axboe extern void blk_rq_set_block_pc(struct request *); 914165125e1SJens Axboe extern void blk_requeue_request(struct request_queue *, struct request *); 915ef9e3facSKiyoshi Ueda extern int blk_lld_busy(struct request_queue *q); 91678d8e58aSMike Snitzer extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 91778d8e58aSMike Snitzer struct bio_set *bs, gfp_t gfp_mask, 91878d8e58aSMike Snitzer int (*bio_ctr)(struct bio *, struct bio *, void *), 91978d8e58aSMike Snitzer void *data); 92078d8e58aSMike Snitzer extern void blk_rq_unprep_clone(struct request *rq); 92182124d60SKiyoshi Ueda extern int blk_insert_cloned_request(struct request_queue *q, 92282124d60SKiyoshi Ueda struct request *rq); 92398d61d5bSChristoph Hellwig extern int blk_rq_append_bio(struct request *rq, struct bio *bio); 9243cca6dc1SJens Axboe extern void blk_delay_queue(struct request_queue *, unsigned long); 92554efd50bSKent Overstreet extern void blk_queue_split(struct request_queue *, struct bio **, 92654efd50bSKent Overstreet struct bio_set *); 927165125e1SJens Axboe extern void blk_recount_segments(struct request_queue *, struct bio *); 9280bfc96cbSPaolo Bonzini extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); 929577ebb37SPaolo Bonzini extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t, 930577ebb37SPaolo Bonzini unsigned int, void __user *); 93174f3c8afSAl Viro extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, 93274f3c8afSAl Viro unsigned int, void __user *); 933e915e872SAl Viro extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, 934e915e872SAl Viro struct scsi_ioctl_command __user *); 9353fcfab16SAndrew Morton 9366f3b0e8bSChristoph Hellwig extern int blk_queue_enter(struct request_queue *q, bool nowait); 9372e6edc95SDan Williams extern void blk_queue_exit(struct request_queue *q); 938165125e1SJens Axboe extern void blk_start_queue(struct request_queue *q); 93921491412SJens Axboe extern void blk_start_queue_async(struct request_queue *q); 940165125e1SJens Axboe extern void blk_stop_queue(struct request_queue *q); 9411da177e4SLinus Torvalds extern void blk_sync_queue(struct request_queue *q); 942165125e1SJens Axboe extern void __blk_stop_queue(struct request_queue *q); 94324ecfbe2SChristoph Hellwig extern void __blk_run_queue(struct request_queue *q); 944a7928c15SChristoph Hellwig extern void __blk_run_queue_uncond(struct request_queue *q); 945165125e1SJens Axboe extern void blk_run_queue(struct request_queue *); 946c21e6bebSJens Axboe extern void blk_run_queue_async(struct request_queue *q); 9476a83e74dSBart Van Assche extern void blk_mq_quiesce_queue(struct request_queue *q); 948a3bce90eSFUJITA Tomonori extern int blk_rq_map_user(struct request_queue *, struct request *, 949152e283fSFUJITA Tomonori struct rq_map_data *, void __user *, unsigned long, 950152e283fSFUJITA Tomonori gfp_t); 9518e5cfc45SJens Axboe extern int blk_rq_unmap_user(struct bio *); 952165125e1SJens Axboe extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); 953165125e1SJens Axboe extern int blk_rq_map_user_iov(struct request_queue *, struct request *, 95426e49cfcSKent Overstreet struct rq_map_data *, const struct iov_iter *, 95526e49cfcSKent Overstreet gfp_t); 956165125e1SJens Axboe extern int blk_execute_rq(struct request_queue *, struct gendisk *, 957994ca9a1SJames Bottomley struct request *, int); 958165125e1SJens Axboe extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 95915fc858aSJens Axboe struct request *, int, rq_end_io_fn *); 9606e39b69eSMike Christie 961bbd7bb70SJens Axboe bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie); 96205229beeSJens Axboe 963165125e1SJens Axboe static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 9641da177e4SLinus Torvalds { 965ff9ea323STejun Heo return bdev->bd_disk->queue; /* this is never NULL */ 9661da177e4SLinus Torvalds } 9671da177e4SLinus Torvalds 9681da177e4SLinus Torvalds /* 9695b93629bSTejun Heo * blk_rq_pos() : the current sector 9705b93629bSTejun Heo * blk_rq_bytes() : bytes left in the entire request 9715b93629bSTejun Heo * blk_rq_cur_bytes() : bytes left in the current segment 97280a761fdSTejun Heo * blk_rq_err_bytes() : bytes left till the next error boundary 9735b93629bSTejun Heo * blk_rq_sectors() : sectors left in the entire request 9745b93629bSTejun Heo * blk_rq_cur_sectors() : sectors left in the current segment 9755efccd17STejun Heo */ 9765b93629bSTejun Heo static inline sector_t blk_rq_pos(const struct request *rq) 9775b93629bSTejun Heo { 978a2dec7b3STejun Heo return rq->__sector; 9795b93629bSTejun Heo } 9805b93629bSTejun Heo 9812e46e8b2STejun Heo static inline unsigned int blk_rq_bytes(const struct request *rq) 9822e46e8b2STejun Heo { 983a2dec7b3STejun Heo return rq->__data_len; 9842e46e8b2STejun Heo } 9852e46e8b2STejun Heo 9862e46e8b2STejun Heo static inline int blk_rq_cur_bytes(const struct request *rq) 9872e46e8b2STejun Heo { 9882e46e8b2STejun Heo return rq->bio ? bio_cur_bytes(rq->bio) : 0; 9892e46e8b2STejun Heo } 9905efccd17STejun Heo 99180a761fdSTejun Heo extern unsigned int blk_rq_err_bytes(const struct request *rq); 99280a761fdSTejun Heo 9935b93629bSTejun Heo static inline unsigned int blk_rq_sectors(const struct request *rq) 9945b93629bSTejun Heo { 9952e46e8b2STejun Heo return blk_rq_bytes(rq) >> 9; 9965b93629bSTejun Heo } 9975b93629bSTejun Heo 9985b93629bSTejun Heo static inline unsigned int blk_rq_cur_sectors(const struct request *rq) 9995b93629bSTejun Heo { 10002e46e8b2STejun Heo return blk_rq_cur_bytes(rq) >> 9; 10015b93629bSTejun Heo } 10025b93629bSTejun Heo 1003f31dc1cdSMartin K. Petersen static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, 10048fe0d473SMike Christie int op) 1005f31dc1cdSMartin K. Petersen { 10067afafc8aSAdrian Hunter if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)) 1007871dd928SJames Bottomley return min(q->limits.max_discard_sectors, UINT_MAX >> 9); 1008f31dc1cdSMartin K. Petersen 10098fe0d473SMike Christie if (unlikely(op == REQ_OP_WRITE_SAME)) 10104363ac7cSMartin K. Petersen return q->limits.max_write_same_sectors; 10114363ac7cSMartin K. Petersen 1012a6f0788eSChaitanya Kulkarni if (unlikely(op == REQ_OP_WRITE_ZEROES)) 1013a6f0788eSChaitanya Kulkarni return q->limits.max_write_zeroes_sectors; 1014a6f0788eSChaitanya Kulkarni 1015f31dc1cdSMartin K. Petersen return q->limits.max_sectors; 1016f31dc1cdSMartin K. Petersen } 1017f31dc1cdSMartin K. Petersen 1018762380adSJens Axboe /* 1019762380adSJens Axboe * Return maximum size of a request at given offset. Only valid for 1020762380adSJens Axboe * file system requests. 1021762380adSJens Axboe */ 1022762380adSJens Axboe static inline unsigned int blk_max_size_offset(struct request_queue *q, 1023762380adSJens Axboe sector_t offset) 1024762380adSJens Axboe { 1025762380adSJens Axboe if (!q->limits.chunk_sectors) 1026736ed4deSJens Axboe return q->limits.max_sectors; 1027762380adSJens Axboe 1028762380adSJens Axboe return q->limits.chunk_sectors - 1029762380adSJens Axboe (offset & (q->limits.chunk_sectors - 1)); 1030762380adSJens Axboe } 1031762380adSJens Axboe 103217007f39SDamien Le Moal static inline unsigned int blk_rq_get_max_sectors(struct request *rq, 103317007f39SDamien Le Moal sector_t offset) 1034f31dc1cdSMartin K. Petersen { 1035f31dc1cdSMartin K. Petersen struct request_queue *q = rq->q; 1036f31dc1cdSMartin K. Petersen 1037f2101842SChristoph Hellwig if (unlikely(rq->cmd_type != REQ_TYPE_FS)) 1038f31dc1cdSMartin K. Petersen return q->limits.max_hw_sectors; 1039f31dc1cdSMartin K. Petersen 10407afafc8aSAdrian Hunter if (!q->limits.chunk_sectors || 10417afafc8aSAdrian Hunter req_op(rq) == REQ_OP_DISCARD || 10427afafc8aSAdrian Hunter req_op(rq) == REQ_OP_SECURE_ERASE) 10438fe0d473SMike Christie return blk_queue_get_max_sectors(q, req_op(rq)); 1044762380adSJens Axboe 104517007f39SDamien Le Moal return min(blk_max_size_offset(q, offset), 10468fe0d473SMike Christie blk_queue_get_max_sectors(q, req_op(rq))); 1047f31dc1cdSMartin K. Petersen } 1048f31dc1cdSMartin K. Petersen 104975afb352SJun'ichi Nomura static inline unsigned int blk_rq_count_bios(struct request *rq) 105075afb352SJun'ichi Nomura { 105175afb352SJun'ichi Nomura unsigned int nr_bios = 0; 105275afb352SJun'ichi Nomura struct bio *bio; 105375afb352SJun'ichi Nomura 105475afb352SJun'ichi Nomura __rq_for_each_bio(bio, rq) 105575afb352SJun'ichi Nomura nr_bios++; 105675afb352SJun'ichi Nomura 105775afb352SJun'ichi Nomura return nr_bios; 105875afb352SJun'ichi Nomura } 105975afb352SJun'ichi Nomura 10605efccd17STejun Heo /* 10615dc8b362SAdam Manzanares * blk_rq_set_prio - associate a request with prio from ioc 10625dc8b362SAdam Manzanares * @rq: request of interest 10635dc8b362SAdam Manzanares * @ioc: target iocontext 10645dc8b362SAdam Manzanares * 10655dc8b362SAdam Manzanares * Assocate request prio with ioc prio so request based drivers 10665dc8b362SAdam Manzanares * can leverage priority information. 10675dc8b362SAdam Manzanares */ 10685dc8b362SAdam Manzanares static inline void blk_rq_set_prio(struct request *rq, struct io_context *ioc) 10695dc8b362SAdam Manzanares { 10705dc8b362SAdam Manzanares if (ioc) 10715dc8b362SAdam Manzanares rq->ioprio = ioc->ioprio; 10725dc8b362SAdam Manzanares } 10735dc8b362SAdam Manzanares 10745dc8b362SAdam Manzanares /* 10759934c8c0STejun Heo * Request issue related functions. 10769934c8c0STejun Heo */ 10779934c8c0STejun Heo extern struct request *blk_peek_request(struct request_queue *q); 10789934c8c0STejun Heo extern void blk_start_request(struct request *rq); 10799934c8c0STejun Heo extern struct request *blk_fetch_request(struct request_queue *q); 10809934c8c0STejun Heo 10819934c8c0STejun Heo /* 10822e60e022STejun Heo * Request completion related functions. 10832e60e022STejun Heo * 10842e60e022STejun Heo * blk_update_request() completes given number of bytes and updates 10852e60e022STejun Heo * the request without completing it. 10862e60e022STejun Heo * 1087f06d9a2bSTejun Heo * blk_end_request() and friends. __blk_end_request() must be called 1088f06d9a2bSTejun Heo * with the request queue spinlock acquired. 10891da177e4SLinus Torvalds * 10901da177e4SLinus Torvalds * Several drivers define their own end_request and call 10913bcddeacSKiyoshi Ueda * blk_end_request() for parts of the original function. 10923bcddeacSKiyoshi Ueda * This prevents code duplication in drivers. 10931da177e4SLinus Torvalds */ 10942e60e022STejun Heo extern bool blk_update_request(struct request *rq, int error, 109522b13210SJens Axboe unsigned int nr_bytes); 109612120077SChristoph Hellwig extern void blk_finish_request(struct request *rq, int error); 1097b1f74493SFUJITA Tomonori extern bool blk_end_request(struct request *rq, int error, 1098b1f74493SFUJITA Tomonori unsigned int nr_bytes); 1099b1f74493SFUJITA Tomonori extern void blk_end_request_all(struct request *rq, int error); 1100b1f74493SFUJITA Tomonori extern bool blk_end_request_cur(struct request *rq, int error); 110180a761fdSTejun Heo extern bool blk_end_request_err(struct request *rq, int error); 1102b1f74493SFUJITA Tomonori extern bool __blk_end_request(struct request *rq, int error, 1103b1f74493SFUJITA Tomonori unsigned int nr_bytes); 1104b1f74493SFUJITA Tomonori extern void __blk_end_request_all(struct request *rq, int error); 1105b1f74493SFUJITA Tomonori extern bool __blk_end_request_cur(struct request *rq, int error); 110680a761fdSTejun Heo extern bool __blk_end_request_err(struct request *rq, int error); 11072e60e022STejun Heo 1108ff856badSJens Axboe extern void blk_complete_request(struct request *); 1109242f9dcbSJens Axboe extern void __blk_complete_request(struct request *); 1110242f9dcbSJens Axboe extern void blk_abort_request(struct request *); 111128018c24SJames Bottomley extern void blk_unprep_request(struct request *); 1112ff856badSJens Axboe 11131da177e4SLinus Torvalds /* 11141da177e4SLinus Torvalds * Access functions for manipulating queue properties 11151da177e4SLinus Torvalds */ 1116165125e1SJens Axboe extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, 11171946089aSChristoph Lameter spinlock_t *lock, int node_id); 1118165125e1SJens Axboe extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); 111901effb0dSMike Snitzer extern struct request_queue *blk_init_allocated_queue(struct request_queue *, 112001effb0dSMike Snitzer request_fn_proc *, spinlock_t *); 1121165125e1SJens Axboe extern void blk_cleanup_queue(struct request_queue *); 1122165125e1SJens Axboe extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 1123165125e1SJens Axboe extern void blk_queue_bounce_limit(struct request_queue *, u64); 1124086fa5ffSMartin K. Petersen extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 1125762380adSJens Axboe extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); 11268a78362cSMartin K. Petersen extern void blk_queue_max_segments(struct request_queue *, unsigned short); 1127165125e1SJens Axboe extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 112867efc925SChristoph Hellwig extern void blk_queue_max_discard_sectors(struct request_queue *q, 112967efc925SChristoph Hellwig unsigned int max_discard_sectors); 11304363ac7cSMartin K. Petersen extern void blk_queue_max_write_same_sectors(struct request_queue *q, 11314363ac7cSMartin K. Petersen unsigned int max_write_same_sectors); 1132a6f0788eSChaitanya Kulkarni extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q, 1133a6f0788eSChaitanya Kulkarni unsigned int max_write_same_sectors); 1134e1defc4fSMartin K. Petersen extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); 1135892b6f90SMartin K. Petersen extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); 1136c72758f3SMartin K. Petersen extern void blk_queue_alignment_offset(struct request_queue *q, 1137c72758f3SMartin K. Petersen unsigned int alignment); 11387c958e32SMartin K. Petersen extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); 1139c72758f3SMartin K. Petersen extern void blk_queue_io_min(struct request_queue *q, unsigned int min); 11403c5820c7SMartin K. Petersen extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); 1141c72758f3SMartin K. Petersen extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); 1142d278d4a8SJens Axboe extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth); 1143e475bba2SMartin K. Petersen extern void blk_set_default_limits(struct queue_limits *lim); 1144b1bd055dSMartin K. Petersen extern void blk_set_stacking_limits(struct queue_limits *lim); 1145c72758f3SMartin K. Petersen extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 1146c72758f3SMartin K. Petersen sector_t offset); 114717be8c24SMartin K. Petersen extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, 114817be8c24SMartin K. Petersen sector_t offset); 1149c72758f3SMartin K. Petersen extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, 1150c72758f3SMartin K. Petersen sector_t offset); 1151165125e1SJens Axboe extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 1152e3790c7dSTejun Heo extern void blk_queue_dma_pad(struct request_queue *, unsigned int); 115327f8221aSFUJITA Tomonori extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); 11542fb98e84STejun Heo extern int blk_queue_dma_drain(struct request_queue *q, 11552fb98e84STejun Heo dma_drain_needed_fn *dma_drain_needed, 11562fb98e84STejun Heo void *buf, unsigned int size); 1157ef9e3facSKiyoshi Ueda extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); 1158165125e1SJens Axboe extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 115903100aadSKeith Busch extern void blk_queue_virt_boundary(struct request_queue *, unsigned long); 1160165125e1SJens Axboe extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 116128018c24SJames Bottomley extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn); 1162165125e1SJens Axboe extern void blk_queue_dma_alignment(struct request_queue *, int); 116311c3e689SJames Bottomley extern void blk_queue_update_dma_alignment(struct request_queue *, int); 1164165125e1SJens Axboe extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 1165242f9dcbSJens Axboe extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); 1166242f9dcbSJens Axboe extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 1167f3876930S[email protected] extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable); 116893e9d8e8SJens Axboe extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); 11691da177e4SLinus Torvalds extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 11701da177e4SLinus Torvalds 1171f9d03f96SChristoph Hellwig static inline unsigned short blk_rq_nr_phys_segments(struct request *rq) 1172f9d03f96SChristoph Hellwig { 1173f9d03f96SChristoph Hellwig if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) 1174f9d03f96SChristoph Hellwig return 1; 1175f9d03f96SChristoph Hellwig return rq->nr_phys_segments; 1176f9d03f96SChristoph Hellwig } 1177f9d03f96SChristoph Hellwig 1178165125e1SJens Axboe extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 11791da177e4SLinus Torvalds extern void blk_dump_rq_flags(struct request *, char *); 11801da177e4SLinus Torvalds extern long nr_blockdev_pages(void); 11811da177e4SLinus Torvalds 118209ac46c4STejun Heo bool __must_check blk_get_queue(struct request_queue *); 1183165125e1SJens Axboe struct request_queue *blk_alloc_queue(gfp_t); 1184165125e1SJens Axboe struct request_queue *blk_alloc_queue_node(gfp_t, int); 1185165125e1SJens Axboe extern void blk_put_queue(struct request_queue *); 11863f21c265SJens Axboe extern void blk_set_queue_dying(struct request_queue *); 11871da177e4SLinus Torvalds 1188316cc67dSShaohua Li /* 11896c954667SLin Ming * block layer runtime pm functions 11906c954667SLin Ming */ 119147fafbc7SRafael J. Wysocki #ifdef CONFIG_PM 11926c954667SLin Ming extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev); 11936c954667SLin Ming extern int blk_pre_runtime_suspend(struct request_queue *q); 11946c954667SLin Ming extern void blk_post_runtime_suspend(struct request_queue *q, int err); 11956c954667SLin Ming extern void blk_pre_runtime_resume(struct request_queue *q); 11966c954667SLin Ming extern void blk_post_runtime_resume(struct request_queue *q, int err); 1197d07ab6d1SMika Westerberg extern void blk_set_runtime_active(struct request_queue *q); 11986c954667SLin Ming #else 11996c954667SLin Ming static inline void blk_pm_runtime_init(struct request_queue *q, 12006c954667SLin Ming struct device *dev) {} 12016c954667SLin Ming static inline int blk_pre_runtime_suspend(struct request_queue *q) 12026c954667SLin Ming { 12036c954667SLin Ming return -ENOSYS; 12046c954667SLin Ming } 12056c954667SLin Ming static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {} 12066c954667SLin Ming static inline void blk_pre_runtime_resume(struct request_queue *q) {} 12076c954667SLin Ming static inline void blk_post_runtime_resume(struct request_queue *q, int err) {} 12089a05e754STobias Klauser static inline void blk_set_runtime_active(struct request_queue *q) {} 12096c954667SLin Ming #endif 12106c954667SLin Ming 12116c954667SLin Ming /* 121275df7136SSuresh Jayaraman * blk_plug permits building a queue of related requests by holding the I/O 121375df7136SSuresh Jayaraman * fragments for a short period. This allows merging of sequential requests 121475df7136SSuresh Jayaraman * into single larger request. As the requests are moved from a per-task list to 121575df7136SSuresh Jayaraman * the device's request_queue in a batch, this results in improved scalability 121675df7136SSuresh Jayaraman * as the lock contention for request_queue lock is reduced. 121775df7136SSuresh Jayaraman * 121875df7136SSuresh Jayaraman * It is ok not to disable preemption when adding the request to the plug list 121975df7136SSuresh Jayaraman * or when attempting a merge, because blk_schedule_flush_list() will only flush 122075df7136SSuresh Jayaraman * the plug list when the task sleeps by itself. For details, please see 122175df7136SSuresh Jayaraman * schedule() where blk_schedule_flush_plug() is called. 1222316cc67dSShaohua Li */ 122373c10101SJens Axboe struct blk_plug { 122475df7136SSuresh Jayaraman struct list_head list; /* requests */ 1225320ae51fSJens Axboe struct list_head mq_list; /* blk-mq requests */ 122675df7136SSuresh Jayaraman struct list_head cb_list; /* md requires an unplug callback */ 122773c10101SJens Axboe }; 122855c022bbSShaohua Li #define BLK_MAX_REQUEST_COUNT 16 122950d24c34SShaohua Li #define BLK_PLUG_FLUSH_SIZE (128 * 1024) 123055c022bbSShaohua Li 12319cbb1750SNeilBrown struct blk_plug_cb; 123274018dc3SNeilBrown typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); 1233048c9374SNeilBrown struct blk_plug_cb { 1234048c9374SNeilBrown struct list_head list; 12359cbb1750SNeilBrown blk_plug_cb_fn callback; 12369cbb1750SNeilBrown void *data; 1237048c9374SNeilBrown }; 12389cbb1750SNeilBrown extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, 12399cbb1750SNeilBrown void *data, int size); 124073c10101SJens Axboe extern void blk_start_plug(struct blk_plug *); 124173c10101SJens Axboe extern void blk_finish_plug(struct blk_plug *); 1242f6603783SJens Axboe extern void blk_flush_plug_list(struct blk_plug *, bool); 124373c10101SJens Axboe 124473c10101SJens Axboe static inline void blk_flush_plug(struct task_struct *tsk) 124573c10101SJens Axboe { 124673c10101SJens Axboe struct blk_plug *plug = tsk->plug; 124773c10101SJens Axboe 124888b996cdSChristoph Hellwig if (plug) 1249a237c1c5SJens Axboe blk_flush_plug_list(plug, false); 1250a237c1c5SJens Axboe } 1251a237c1c5SJens Axboe 1252a237c1c5SJens Axboe static inline void blk_schedule_flush_plug(struct task_struct *tsk) 1253a237c1c5SJens Axboe { 1254a237c1c5SJens Axboe struct blk_plug *plug = tsk->plug; 1255a237c1c5SJens Axboe 1256a237c1c5SJens Axboe if (plug) 1257f6603783SJens Axboe blk_flush_plug_list(plug, true); 125873c10101SJens Axboe } 125973c10101SJens Axboe 126073c10101SJens Axboe static inline bool blk_needs_flush_plug(struct task_struct *tsk) 126173c10101SJens Axboe { 126273c10101SJens Axboe struct blk_plug *plug = tsk->plug; 126373c10101SJens Axboe 1264320ae51fSJens Axboe return plug && 1265320ae51fSJens Axboe (!list_empty(&plug->list) || 1266320ae51fSJens Axboe !list_empty(&plug->mq_list) || 1267320ae51fSJens Axboe !list_empty(&plug->cb_list)); 126873c10101SJens Axboe } 126973c10101SJens Axboe 12701da177e4SLinus Torvalds /* 12711da177e4SLinus Torvalds * tag stuff 12721da177e4SLinus Torvalds */ 1273165125e1SJens Axboe extern int blk_queue_start_tag(struct request_queue *, struct request *); 1274165125e1SJens Axboe extern struct request *blk_queue_find_tag(struct request_queue *, int); 1275165125e1SJens Axboe extern void blk_queue_end_tag(struct request_queue *, struct request *); 1276ee1b6f7aSShaohua Li extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int); 1277165125e1SJens Axboe extern void blk_queue_free_tags(struct request_queue *); 1278165125e1SJens Axboe extern int blk_queue_resize_tags(struct request_queue *, int); 1279165125e1SJens Axboe extern void blk_queue_invalidate_tags(struct request_queue *); 1280ee1b6f7aSShaohua Li extern struct blk_queue_tag *blk_init_tags(int, int); 1281492dfb48SJames Bottomley extern void blk_free_tags(struct blk_queue_tag *); 12821da177e4SLinus Torvalds 1283f583f492SDavid C Somayajulu static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, 1284f583f492SDavid C Somayajulu int tag) 1285f583f492SDavid C Somayajulu { 1286f583f492SDavid C Somayajulu if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) 1287f583f492SDavid C Somayajulu return NULL; 1288f583f492SDavid C Somayajulu return bqt->tag_index[tag]; 1289f583f492SDavid C Somayajulu } 1290dd3932edSChristoph Hellwig 1291e950fdf7SChristoph Hellwig 1292e950fdf7SChristoph Hellwig #define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */ 1293e950fdf7SChristoph Hellwig #define BLKDEV_DISCARD_ZERO (1 << 1) /* must reliably zero data */ 1294dd3932edSChristoph Hellwig 1295dd3932edSChristoph Hellwig extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); 1296fbd9b09aSDmitry Monakhov extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1297fbd9b09aSDmitry Monakhov sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); 129838f25255SChristoph Hellwig extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1299288dab8aSChristoph Hellwig sector_t nr_sects, gfp_t gfp_mask, int flags, 1300469e3216SMike Christie struct bio **biop); 13014363ac7cSMartin K. Petersen extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, 13024363ac7cSMartin K. Petersen sector_t nr_sects, gfp_t gfp_mask, struct page *page); 1303e73c23ffSChaitanya Kulkarni extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1304e73c23ffSChaitanya Kulkarni sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, 1305e73c23ffSChaitanya Kulkarni bool discard); 13063f14d792SDmitry Monakhov extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1307d93ba7a5SMartin K. Petersen sector_t nr_sects, gfp_t gfp_mask, bool discard); 13082cf6d26aSChristoph Hellwig static inline int sb_issue_discard(struct super_block *sb, sector_t block, 13092cf6d26aSChristoph Hellwig sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) 1310fb2dce86SDavid Woodhouse { 13112cf6d26aSChristoph Hellwig return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9), 13122cf6d26aSChristoph Hellwig nr_blocks << (sb->s_blocksize_bits - 9), 13132cf6d26aSChristoph Hellwig gfp_mask, flags); 1314fb2dce86SDavid Woodhouse } 1315e6fa0be6SLukas Czerner static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, 1316a107e5a3STheodore Ts'o sector_t nr_blocks, gfp_t gfp_mask) 1317e6fa0be6SLukas Czerner { 1318e6fa0be6SLukas Czerner return blkdev_issue_zeroout(sb->s_bdev, 1319e6fa0be6SLukas Czerner block << (sb->s_blocksize_bits - 9), 1320e6fa0be6SLukas Czerner nr_blocks << (sb->s_blocksize_bits - 9), 1321d93ba7a5SMartin K. Petersen gfp_mask, true); 1322e6fa0be6SLukas Czerner } 13231da177e4SLinus Torvalds 1324018e0446SJens Axboe extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); 13250b07de85SAdel Gadllah 1326eb28d31bSMartin K. Petersen enum blk_default_limits { 1327eb28d31bSMartin K. Petersen BLK_MAX_SEGMENTS = 128, 1328eb28d31bSMartin K. Petersen BLK_SAFE_MAX_SECTORS = 255, 1329d2be537cSJeff Moyer BLK_DEF_MAX_SECTORS = 2560, 1330eb28d31bSMartin K. Petersen BLK_MAX_SEGMENT_SIZE = 65536, 1331eb28d31bSMartin K. Petersen BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, 1332eb28d31bSMartin K. Petersen }; 13330e435ac2SMilan Broz 13341da177e4SLinus Torvalds #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 13351da177e4SLinus Torvalds 1336ae03bf63SMartin K. Petersen static inline unsigned long queue_bounce_pfn(struct request_queue *q) 1337ae03bf63SMartin K. Petersen { 1338025146e1SMartin K. Petersen return q->limits.bounce_pfn; 1339ae03bf63SMartin K. Petersen } 1340ae03bf63SMartin K. Petersen 1341ae03bf63SMartin K. Petersen static inline unsigned long queue_segment_boundary(struct request_queue *q) 1342ae03bf63SMartin K. Petersen { 1343025146e1SMartin K. Petersen return q->limits.seg_boundary_mask; 1344ae03bf63SMartin K. Petersen } 1345ae03bf63SMartin K. Petersen 134603100aadSKeith Busch static inline unsigned long queue_virt_boundary(struct request_queue *q) 134703100aadSKeith Busch { 134803100aadSKeith Busch return q->limits.virt_boundary_mask; 134903100aadSKeith Busch } 135003100aadSKeith Busch 1351ae03bf63SMartin K. Petersen static inline unsigned int queue_max_sectors(struct request_queue *q) 1352ae03bf63SMartin K. Petersen { 1353025146e1SMartin K. Petersen return q->limits.max_sectors; 1354ae03bf63SMartin K. Petersen } 1355ae03bf63SMartin K. Petersen 1356ae03bf63SMartin K. Petersen static inline unsigned int queue_max_hw_sectors(struct request_queue *q) 1357ae03bf63SMartin K. Petersen { 1358025146e1SMartin K. Petersen return q->limits.max_hw_sectors; 1359ae03bf63SMartin K. Petersen } 1360ae03bf63SMartin K. Petersen 13618a78362cSMartin K. Petersen static inline unsigned short queue_max_segments(struct request_queue *q) 1362ae03bf63SMartin K. Petersen { 13638a78362cSMartin K. Petersen return q->limits.max_segments; 1364ae03bf63SMartin K. Petersen } 1365ae03bf63SMartin K. Petersen 1366ae03bf63SMartin K. Petersen static inline unsigned int queue_max_segment_size(struct request_queue *q) 1367ae03bf63SMartin K. Petersen { 1368025146e1SMartin K. Petersen return q->limits.max_segment_size; 1369ae03bf63SMartin K. Petersen } 1370ae03bf63SMartin K. Petersen 1371e1defc4fSMartin K. Petersen static inline unsigned short queue_logical_block_size(struct request_queue *q) 13721da177e4SLinus Torvalds { 13731da177e4SLinus Torvalds int retval = 512; 13741da177e4SLinus Torvalds 1375025146e1SMartin K. Petersen if (q && q->limits.logical_block_size) 1376025146e1SMartin K. Petersen retval = q->limits.logical_block_size; 13771da177e4SLinus Torvalds 13781da177e4SLinus Torvalds return retval; 13791da177e4SLinus Torvalds } 13801da177e4SLinus Torvalds 1381e1defc4fSMartin K. Petersen static inline unsigned short bdev_logical_block_size(struct block_device *bdev) 13821da177e4SLinus Torvalds { 1383e1defc4fSMartin K. Petersen return queue_logical_block_size(bdev_get_queue(bdev)); 13841da177e4SLinus Torvalds } 13851da177e4SLinus Torvalds 1386c72758f3SMartin K. Petersen static inline unsigned int queue_physical_block_size(struct request_queue *q) 1387c72758f3SMartin K. Petersen { 1388c72758f3SMartin K. Petersen return q->limits.physical_block_size; 1389c72758f3SMartin K. Petersen } 1390c72758f3SMartin K. Petersen 1391892b6f90SMartin K. Petersen static inline unsigned int bdev_physical_block_size(struct block_device *bdev) 1392ac481c20SMartin K. Petersen { 1393ac481c20SMartin K. Petersen return queue_physical_block_size(bdev_get_queue(bdev)); 1394ac481c20SMartin K. Petersen } 1395ac481c20SMartin K. Petersen 1396c72758f3SMartin K. Petersen static inline unsigned int queue_io_min(struct request_queue *q) 1397c72758f3SMartin K. Petersen { 1398c72758f3SMartin K. Petersen return q->limits.io_min; 1399c72758f3SMartin K. Petersen } 1400c72758f3SMartin K. Petersen 1401ac481c20SMartin K. Petersen static inline int bdev_io_min(struct block_device *bdev) 1402ac481c20SMartin K. Petersen { 1403ac481c20SMartin K. Petersen return queue_io_min(bdev_get_queue(bdev)); 1404ac481c20SMartin K. Petersen } 1405ac481c20SMartin K. Petersen 1406c72758f3SMartin K. Petersen static inline unsigned int queue_io_opt(struct request_queue *q) 1407c72758f3SMartin K. Petersen { 1408c72758f3SMartin K. Petersen return q->limits.io_opt; 1409c72758f3SMartin K. Petersen } 1410c72758f3SMartin K. Petersen 1411ac481c20SMartin K. Petersen static inline int bdev_io_opt(struct block_device *bdev) 1412ac481c20SMartin K. Petersen { 1413ac481c20SMartin K. Petersen return queue_io_opt(bdev_get_queue(bdev)); 1414ac481c20SMartin K. Petersen } 1415ac481c20SMartin K. Petersen 1416c72758f3SMartin K. Petersen static inline int queue_alignment_offset(struct request_queue *q) 1417c72758f3SMartin K. Petersen { 1418ac481c20SMartin K. Petersen if (q->limits.misaligned) 1419c72758f3SMartin K. Petersen return -1; 1420c72758f3SMartin K. Petersen 1421c72758f3SMartin K. Petersen return q->limits.alignment_offset; 1422c72758f3SMartin K. Petersen } 1423c72758f3SMartin K. Petersen 1424e03a72e1SMartin K. Petersen static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) 142581744ee4SMartin K. Petersen { 142681744ee4SMartin K. Petersen unsigned int granularity = max(lim->physical_block_size, lim->io_min); 1427b8839b8cSMike Snitzer unsigned int alignment = sector_div(sector, granularity >> 9) << 9; 142881744ee4SMartin K. Petersen 1429b8839b8cSMike Snitzer return (granularity + lim->alignment_offset - alignment) % granularity; 1430c72758f3SMartin K. Petersen } 1431c72758f3SMartin K. Petersen 1432ac481c20SMartin K. Petersen static inline int bdev_alignment_offset(struct block_device *bdev) 1433ac481c20SMartin K. Petersen { 1434ac481c20SMartin K. Petersen struct request_queue *q = bdev_get_queue(bdev); 1435ac481c20SMartin K. Petersen 1436ac481c20SMartin K. Petersen if (q->limits.misaligned) 1437ac481c20SMartin K. Petersen return -1; 1438ac481c20SMartin K. Petersen 1439ac481c20SMartin K. Petersen if (bdev != bdev->bd_contains) 1440ac481c20SMartin K. Petersen return bdev->bd_part->alignment_offset; 1441ac481c20SMartin K. Petersen 1442ac481c20SMartin K. Petersen return q->limits.alignment_offset; 1443ac481c20SMartin K. Petersen } 1444ac481c20SMartin K. Petersen 144586b37281SMartin K. Petersen static inline int queue_discard_alignment(struct request_queue *q) 144686b37281SMartin K. Petersen { 144786b37281SMartin K. Petersen if (q->limits.discard_misaligned) 144886b37281SMartin K. Petersen return -1; 144986b37281SMartin K. Petersen 145086b37281SMartin K. Petersen return q->limits.discard_alignment; 145186b37281SMartin K. Petersen } 145286b37281SMartin K. Petersen 1453e03a72e1SMartin K. Petersen static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) 145486b37281SMartin K. Petersen { 145559771079SLinus Torvalds unsigned int alignment, granularity, offset; 1456dd3d145dSMartin K. Petersen 1457a934a00aSMartin K. Petersen if (!lim->max_discard_sectors) 1458a934a00aSMartin K. Petersen return 0; 1459a934a00aSMartin K. Petersen 146059771079SLinus Torvalds /* Why are these in bytes, not sectors? */ 146159771079SLinus Torvalds alignment = lim->discard_alignment >> 9; 146259771079SLinus Torvalds granularity = lim->discard_granularity >> 9; 146359771079SLinus Torvalds if (!granularity) 146459771079SLinus Torvalds return 0; 146559771079SLinus Torvalds 146659771079SLinus Torvalds /* Offset of the partition start in 'granularity' sectors */ 146759771079SLinus Torvalds offset = sector_div(sector, granularity); 146859771079SLinus Torvalds 146959771079SLinus Torvalds /* And why do we do this modulus *again* in blkdev_issue_discard()? */ 147059771079SLinus Torvalds offset = (granularity + alignment - offset) % granularity; 147159771079SLinus Torvalds 147259771079SLinus Torvalds /* Turn it back into bytes, gaah */ 147359771079SLinus Torvalds return offset << 9; 147486b37281SMartin K. Petersen } 147586b37281SMartin K. Petersen 1476c6e66634SPaolo Bonzini static inline int bdev_discard_alignment(struct block_device *bdev) 1477c6e66634SPaolo Bonzini { 1478c6e66634SPaolo Bonzini struct request_queue *q = bdev_get_queue(bdev); 1479c6e66634SPaolo Bonzini 1480c6e66634SPaolo Bonzini if (bdev != bdev->bd_contains) 1481c6e66634SPaolo Bonzini return bdev->bd_part->discard_alignment; 1482c6e66634SPaolo Bonzini 1483c6e66634SPaolo Bonzini return q->limits.discard_alignment; 1484c6e66634SPaolo Bonzini } 1485c6e66634SPaolo Bonzini 148698262f27SMartin K. Petersen static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) 148798262f27SMartin K. Petersen { 1488a934a00aSMartin K. Petersen if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1) 148998262f27SMartin K. Petersen return 1; 149098262f27SMartin K. Petersen 149198262f27SMartin K. Petersen return 0; 149298262f27SMartin K. Petersen } 149398262f27SMartin K. Petersen 149498262f27SMartin K. Petersen static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev) 149598262f27SMartin K. Petersen { 149698262f27SMartin K. Petersen return queue_discard_zeroes_data(bdev_get_queue(bdev)); 149798262f27SMartin K. Petersen } 149898262f27SMartin K. Petersen 14994363ac7cSMartin K. Petersen static inline unsigned int bdev_write_same(struct block_device *bdev) 15004363ac7cSMartin K. Petersen { 15014363ac7cSMartin K. Petersen struct request_queue *q = bdev_get_queue(bdev); 15024363ac7cSMartin K. Petersen 15034363ac7cSMartin K. Petersen if (q) 15044363ac7cSMartin K. Petersen return q->limits.max_write_same_sectors; 15054363ac7cSMartin K. Petersen 15064363ac7cSMartin K. Petersen return 0; 15074363ac7cSMartin K. Petersen } 15084363ac7cSMartin K. Petersen 1509a6f0788eSChaitanya Kulkarni static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev) 1510a6f0788eSChaitanya Kulkarni { 1511a6f0788eSChaitanya Kulkarni struct request_queue *q = bdev_get_queue(bdev); 1512a6f0788eSChaitanya Kulkarni 1513a6f0788eSChaitanya Kulkarni if (q) 1514a6f0788eSChaitanya Kulkarni return q->limits.max_write_zeroes_sectors; 1515a6f0788eSChaitanya Kulkarni 1516a6f0788eSChaitanya Kulkarni return 0; 1517a6f0788eSChaitanya Kulkarni } 1518a6f0788eSChaitanya Kulkarni 1519797476b8SDamien Le Moal static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev) 1520797476b8SDamien Le Moal { 1521797476b8SDamien Le Moal struct request_queue *q = bdev_get_queue(bdev); 1522797476b8SDamien Le Moal 1523797476b8SDamien Le Moal if (q) 1524797476b8SDamien Le Moal return blk_queue_zoned_model(q); 1525797476b8SDamien Le Moal 1526797476b8SDamien Le Moal return BLK_ZONED_NONE; 1527797476b8SDamien Le Moal } 1528797476b8SDamien Le Moal 1529797476b8SDamien Le Moal static inline bool bdev_is_zoned(struct block_device *bdev) 1530797476b8SDamien Le Moal { 1531797476b8SDamien Le Moal struct request_queue *q = bdev_get_queue(bdev); 1532797476b8SDamien Le Moal 1533797476b8SDamien Le Moal if (q) 1534797476b8SDamien Le Moal return blk_queue_is_zoned(q); 1535797476b8SDamien Le Moal 1536797476b8SDamien Le Moal return false; 1537797476b8SDamien Le Moal } 1538797476b8SDamien Le Moal 15396a0cb1bcSHannes Reinecke static inline unsigned int bdev_zone_size(struct block_device *bdev) 15406a0cb1bcSHannes Reinecke { 15416a0cb1bcSHannes Reinecke struct request_queue *q = bdev_get_queue(bdev); 15426a0cb1bcSHannes Reinecke 15436a0cb1bcSHannes Reinecke if (q) 15446a0cb1bcSHannes Reinecke return blk_queue_zone_size(q); 15456a0cb1bcSHannes Reinecke 15466a0cb1bcSHannes Reinecke return 0; 15476a0cb1bcSHannes Reinecke } 15486a0cb1bcSHannes Reinecke 1549165125e1SJens Axboe static inline int queue_dma_alignment(struct request_queue *q) 15501da177e4SLinus Torvalds { 1551482eb689SPete Wyckoff return q ? q->dma_alignment : 511; 15521da177e4SLinus Torvalds } 15531da177e4SLinus Torvalds 155414417799SNamhyung Kim static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, 155587904074SFUJITA Tomonori unsigned int len) 155687904074SFUJITA Tomonori { 155787904074SFUJITA Tomonori unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; 155814417799SNamhyung Kim return !(addr & alignment) && !(len & alignment); 155987904074SFUJITA Tomonori } 156087904074SFUJITA Tomonori 15611da177e4SLinus Torvalds /* assumes size > 256 */ 15621da177e4SLinus Torvalds static inline unsigned int blksize_bits(unsigned int size) 15631da177e4SLinus Torvalds { 15641da177e4SLinus Torvalds unsigned int bits = 8; 15651da177e4SLinus Torvalds do { 15661da177e4SLinus Torvalds bits++; 15671da177e4SLinus Torvalds size >>= 1; 15681da177e4SLinus Torvalds } while (size > 256); 15691da177e4SLinus Torvalds return bits; 15701da177e4SLinus Torvalds } 15711da177e4SLinus Torvalds 15722befb9e3SAdrian Bunk static inline unsigned int block_size(struct block_device *bdev) 15731da177e4SLinus Torvalds { 15741da177e4SLinus Torvalds return bdev->bd_block_size; 15751da177e4SLinus Torvalds } 15761da177e4SLinus Torvalds 1577f3876930S[email protected] static inline bool queue_flush_queueable(struct request_queue *q) 1578f3876930S[email protected] { 1579c888a8f9SJens Axboe return !test_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags); 1580f3876930S[email protected] } 1581f3876930S[email protected] 15821da177e4SLinus Torvalds typedef struct {struct page *v;} Sector; 15831da177e4SLinus Torvalds 15841da177e4SLinus Torvalds unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); 15851da177e4SLinus Torvalds 15861da177e4SLinus Torvalds static inline void put_dev_sector(Sector p) 15871da177e4SLinus Torvalds { 158809cbfeafSKirill A. Shutemov put_page(p.v); 15891da177e4SLinus Torvalds } 15901da177e4SLinus Torvalds 1591e0af2917SMing Lei static inline bool __bvec_gap_to_prev(struct request_queue *q, 1592e0af2917SMing Lei struct bio_vec *bprv, unsigned int offset) 1593e0af2917SMing Lei { 1594e0af2917SMing Lei return offset || 1595e0af2917SMing Lei ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); 1596e0af2917SMing Lei } 1597e0af2917SMing Lei 159803100aadSKeith Busch /* 159903100aadSKeith Busch * Check if adding a bio_vec after bprv with offset would create a gap in 160003100aadSKeith Busch * the SG list. Most drivers don't care about this, but some do. 160103100aadSKeith Busch */ 160203100aadSKeith Busch static inline bool bvec_gap_to_prev(struct request_queue *q, 160303100aadSKeith Busch struct bio_vec *bprv, unsigned int offset) 160403100aadSKeith Busch { 160503100aadSKeith Busch if (!queue_virt_boundary(q)) 160603100aadSKeith Busch return false; 1607e0af2917SMing Lei return __bvec_gap_to_prev(q, bprv, offset); 160803100aadSKeith Busch } 160903100aadSKeith Busch 1610729204efSMing Lei /* 1611729204efSMing Lei * Check if the two bvecs from two bios can be merged to one segment. 1612729204efSMing Lei * If yes, no need to check gap between the two bios since the 1st bio 1613729204efSMing Lei * and the 1st bvec in the 2nd bio can be handled in one segment. 1614729204efSMing Lei */ 1615729204efSMing Lei static inline bool bios_segs_mergeable(struct request_queue *q, 1616729204efSMing Lei struct bio *prev, struct bio_vec *prev_last_bv, 1617729204efSMing Lei struct bio_vec *next_first_bv) 1618729204efSMing Lei { 1619729204efSMing Lei if (!BIOVEC_PHYS_MERGEABLE(prev_last_bv, next_first_bv)) 1620729204efSMing Lei return false; 1621729204efSMing Lei if (!BIOVEC_SEG_BOUNDARY(q, prev_last_bv, next_first_bv)) 1622729204efSMing Lei return false; 1623729204efSMing Lei if (prev->bi_seg_back_size + next_first_bv->bv_len > 1624729204efSMing Lei queue_max_segment_size(q)) 1625729204efSMing Lei return false; 1626729204efSMing Lei return true; 1627729204efSMing Lei } 1628729204efSMing Lei 16295e7c4274SJens Axboe static inline bool bio_will_gap(struct request_queue *q, struct bio *prev, 16305e7c4274SJens Axboe struct bio *next) 16315e7c4274SJens Axboe { 163225e71a99SMing Lei if (bio_has_data(prev) && queue_virt_boundary(q)) { 163325e71a99SMing Lei struct bio_vec pb, nb; 16345e7c4274SJens Axboe 163525e71a99SMing Lei bio_get_last_bvec(prev, &pb); 163625e71a99SMing Lei bio_get_first_bvec(next, &nb); 163725e71a99SMing Lei 1638729204efSMing Lei if (!bios_segs_mergeable(q, prev, &pb, &nb)) 163925e71a99SMing Lei return __bvec_gap_to_prev(q, &pb, nb.bv_offset); 164025e71a99SMing Lei } 164125e71a99SMing Lei 164225e71a99SMing Lei return false; 16435e7c4274SJens Axboe } 16445e7c4274SJens Axboe 16455e7c4274SJens Axboe static inline bool req_gap_back_merge(struct request *req, struct bio *bio) 16465e7c4274SJens Axboe { 16475e7c4274SJens Axboe return bio_will_gap(req->q, req->biotail, bio); 16485e7c4274SJens Axboe } 16495e7c4274SJens Axboe 16505e7c4274SJens Axboe static inline bool req_gap_front_merge(struct request *req, struct bio *bio) 16515e7c4274SJens Axboe { 16525e7c4274SJens Axboe return bio_will_gap(req->q, bio, req->bio); 16535e7c4274SJens Axboe } 16545e7c4274SJens Axboe 165559c3d45eSJens Axboe int kblockd_schedule_work(struct work_struct *work); 1656ee63cfa7SJens Axboe int kblockd_schedule_work_on(int cpu, struct work_struct *work); 165759c3d45eSJens Axboe int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay); 16588ab14595SJens Axboe int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); 16591da177e4SLinus Torvalds 16609195291eSDivyesh Shah #ifdef CONFIG_BLK_CGROUP 166128f4197eSJens Axboe /* 166228f4197eSJens Axboe * This should not be using sched_clock(). A real patch is in progress 166328f4197eSJens Axboe * to fix this up, until that is in place we need to disable preemption 166428f4197eSJens Axboe * around sched_clock() in this function and set_io_start_time_ns(). 166528f4197eSJens Axboe */ 16669195291eSDivyesh Shah static inline void set_start_time_ns(struct request *req) 16679195291eSDivyesh Shah { 166828f4197eSJens Axboe preempt_disable(); 16699195291eSDivyesh Shah req->start_time_ns = sched_clock(); 167028f4197eSJens Axboe preempt_enable(); 16719195291eSDivyesh Shah } 16729195291eSDivyesh Shah 16739195291eSDivyesh Shah static inline void set_io_start_time_ns(struct request *req) 16749195291eSDivyesh Shah { 167528f4197eSJens Axboe preempt_disable(); 16769195291eSDivyesh Shah req->io_start_time_ns = sched_clock(); 167728f4197eSJens Axboe preempt_enable(); 16789195291eSDivyesh Shah } 167984c124daSDivyesh Shah 168084c124daSDivyesh Shah static inline uint64_t rq_start_time_ns(struct request *req) 168184c124daSDivyesh Shah { 168284c124daSDivyesh Shah return req->start_time_ns; 168384c124daSDivyesh Shah } 168484c124daSDivyesh Shah 168584c124daSDivyesh Shah static inline uint64_t rq_io_start_time_ns(struct request *req) 168684c124daSDivyesh Shah { 168784c124daSDivyesh Shah return req->io_start_time_ns; 168884c124daSDivyesh Shah } 16899195291eSDivyesh Shah #else 16909195291eSDivyesh Shah static inline void set_start_time_ns(struct request *req) {} 16919195291eSDivyesh Shah static inline void set_io_start_time_ns(struct request *req) {} 169284c124daSDivyesh Shah static inline uint64_t rq_start_time_ns(struct request *req) 169384c124daSDivyesh Shah { 169484c124daSDivyesh Shah return 0; 169584c124daSDivyesh Shah } 169684c124daSDivyesh Shah static inline uint64_t rq_io_start_time_ns(struct request *req) 169784c124daSDivyesh Shah { 169884c124daSDivyesh Shah return 0; 169984c124daSDivyesh Shah } 17009195291eSDivyesh Shah #endif 17019195291eSDivyesh Shah 17021da177e4SLinus Torvalds #define MODULE_ALIAS_BLOCKDEV(major,minor) \ 17031da177e4SLinus Torvalds MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 17041da177e4SLinus Torvalds #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 17051da177e4SLinus Torvalds MODULE_ALIAS("block-major-" __stringify(major) "-*") 17061da177e4SLinus Torvalds 17077ba1ba12SMartin K. Petersen #if defined(CONFIG_BLK_DEV_INTEGRITY) 17087ba1ba12SMartin K. Petersen 17098288f496SMartin K. Petersen enum blk_integrity_flags { 17108288f496SMartin K. Petersen BLK_INTEGRITY_VERIFY = 1 << 0, 17118288f496SMartin K. Petersen BLK_INTEGRITY_GENERATE = 1 << 1, 17123aec2f41SMartin K. Petersen BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2, 1713aae7df50SMartin K. Petersen BLK_INTEGRITY_IP_CHECKSUM = 1 << 3, 17148288f496SMartin K. Petersen }; 17157ba1ba12SMartin K. Petersen 171618593088SMartin K. Petersen struct blk_integrity_iter { 17177ba1ba12SMartin K. Petersen void *prot_buf; 17187ba1ba12SMartin K. Petersen void *data_buf; 17193be91c4aSMartin K. Petersen sector_t seed; 17207ba1ba12SMartin K. Petersen unsigned int data_size; 17213be91c4aSMartin K. Petersen unsigned short interval; 17227ba1ba12SMartin K. Petersen const char *disk_name; 17237ba1ba12SMartin K. Petersen }; 17247ba1ba12SMartin K. Petersen 172518593088SMartin K. Petersen typedef int (integrity_processing_fn) (struct blk_integrity_iter *); 17267ba1ba12SMartin K. Petersen 17270f8087ecSMartin K. Petersen struct blk_integrity_profile { 172818593088SMartin K. Petersen integrity_processing_fn *generate_fn; 172918593088SMartin K. Petersen integrity_processing_fn *verify_fn; 17300f8087ecSMartin K. Petersen const char *name; 17310f8087ecSMartin K. Petersen }; 17327ba1ba12SMartin K. Petersen 173325520d55SMartin K. Petersen extern void blk_integrity_register(struct gendisk *, struct blk_integrity *); 17347ba1ba12SMartin K. Petersen extern void blk_integrity_unregister(struct gendisk *); 1735ad7fce93SMartin K. Petersen extern int blk_integrity_compare(struct gendisk *, struct gendisk *); 173613f05c8dSMartin K. Petersen extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, 173713f05c8dSMartin K. Petersen struct scatterlist *); 173813f05c8dSMartin K. Petersen extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); 17394eaf99beSMartin K. Petersen extern bool blk_integrity_merge_rq(struct request_queue *, struct request *, 174013f05c8dSMartin K. Petersen struct request *); 17414eaf99beSMartin K. Petersen extern bool blk_integrity_merge_bio(struct request_queue *, struct request *, 174213f05c8dSMartin K. Petersen struct bio *); 17437ba1ba12SMartin K. Petersen 174425520d55SMartin K. Petersen static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 174525520d55SMartin K. Petersen { 1746ac6fc48cSDan Williams struct blk_integrity *bi = &disk->queue->integrity; 174725520d55SMartin K. Petersen 174825520d55SMartin K. Petersen if (!bi->profile) 174925520d55SMartin K. Petersen return NULL; 175025520d55SMartin K. Petersen 175125520d55SMartin K. Petersen return bi; 175225520d55SMartin K. Petersen } 175325520d55SMartin K. Petersen 1754b04accc4SJens Axboe static inline 1755b04accc4SJens Axboe struct blk_integrity *bdev_get_integrity(struct block_device *bdev) 1756b04accc4SJens Axboe { 175725520d55SMartin K. Petersen return blk_get_integrity(bdev->bd_disk); 1758b02739b0SMartin K. Petersen } 1759b02739b0SMartin K. Petersen 1760180b2f95SMartin K. Petersen static inline bool blk_integrity_rq(struct request *rq) 17617ba1ba12SMartin K. Petersen { 1762180b2f95SMartin K. Petersen return rq->cmd_flags & REQ_INTEGRITY; 17637ba1ba12SMartin K. Petersen } 17647ba1ba12SMartin K. Petersen 176513f05c8dSMartin K. Petersen static inline void blk_queue_max_integrity_segments(struct request_queue *q, 176613f05c8dSMartin K. Petersen unsigned int segs) 176713f05c8dSMartin K. Petersen { 176813f05c8dSMartin K. Petersen q->limits.max_integrity_segments = segs; 176913f05c8dSMartin K. Petersen } 177013f05c8dSMartin K. Petersen 177113f05c8dSMartin K. Petersen static inline unsigned short 177213f05c8dSMartin K. Petersen queue_max_integrity_segments(struct request_queue *q) 177313f05c8dSMartin K. Petersen { 177413f05c8dSMartin K. Petersen return q->limits.max_integrity_segments; 177513f05c8dSMartin K. Petersen } 177613f05c8dSMartin K. Petersen 17777f39add3SSagi Grimberg static inline bool integrity_req_gap_back_merge(struct request *req, 17787f39add3SSagi Grimberg struct bio *next) 17797f39add3SSagi Grimberg { 17807f39add3SSagi Grimberg struct bio_integrity_payload *bip = bio_integrity(req->bio); 17817f39add3SSagi Grimberg struct bio_integrity_payload *bip_next = bio_integrity(next); 17827f39add3SSagi Grimberg 17837f39add3SSagi Grimberg return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], 17847f39add3SSagi Grimberg bip_next->bip_vec[0].bv_offset); 17857f39add3SSagi Grimberg } 17867f39add3SSagi Grimberg 17877f39add3SSagi Grimberg static inline bool integrity_req_gap_front_merge(struct request *req, 17887f39add3SSagi Grimberg struct bio *bio) 17897f39add3SSagi Grimberg { 17907f39add3SSagi Grimberg struct bio_integrity_payload *bip = bio_integrity(bio); 17917f39add3SSagi Grimberg struct bio_integrity_payload *bip_next = bio_integrity(req->bio); 17927f39add3SSagi Grimberg 17937f39add3SSagi Grimberg return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], 17947f39add3SSagi Grimberg bip_next->bip_vec[0].bv_offset); 17957f39add3SSagi Grimberg } 17967f39add3SSagi Grimberg 17977ba1ba12SMartin K. Petersen #else /* CONFIG_BLK_DEV_INTEGRITY */ 17987ba1ba12SMartin K. Petersen 1799fd83240aSStephen Rothwell struct bio; 1800fd83240aSStephen Rothwell struct block_device; 1801fd83240aSStephen Rothwell struct gendisk; 1802fd83240aSStephen Rothwell struct blk_integrity; 1803fd83240aSStephen Rothwell 1804fd83240aSStephen Rothwell static inline int blk_integrity_rq(struct request *rq) 1805fd83240aSStephen Rothwell { 1806fd83240aSStephen Rothwell return 0; 1807fd83240aSStephen Rothwell } 1808fd83240aSStephen Rothwell static inline int blk_rq_count_integrity_sg(struct request_queue *q, 1809fd83240aSStephen Rothwell struct bio *b) 1810fd83240aSStephen Rothwell { 1811fd83240aSStephen Rothwell return 0; 1812fd83240aSStephen Rothwell } 1813fd83240aSStephen Rothwell static inline int blk_rq_map_integrity_sg(struct request_queue *q, 1814fd83240aSStephen Rothwell struct bio *b, 1815fd83240aSStephen Rothwell struct scatterlist *s) 1816fd83240aSStephen Rothwell { 1817fd83240aSStephen Rothwell return 0; 1818fd83240aSStephen Rothwell } 1819fd83240aSStephen Rothwell static inline struct blk_integrity *bdev_get_integrity(struct block_device *b) 1820fd83240aSStephen Rothwell { 182161a04e5bSMichele Curti return NULL; 1822fd83240aSStephen Rothwell } 1823fd83240aSStephen Rothwell static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 1824fd83240aSStephen Rothwell { 1825fd83240aSStephen Rothwell return NULL; 1826fd83240aSStephen Rothwell } 1827fd83240aSStephen Rothwell static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b) 1828fd83240aSStephen Rothwell { 1829fd83240aSStephen Rothwell return 0; 1830fd83240aSStephen Rothwell } 183125520d55SMartin K. Petersen static inline void blk_integrity_register(struct gendisk *d, 1832fd83240aSStephen Rothwell struct blk_integrity *b) 1833fd83240aSStephen Rothwell { 1834fd83240aSStephen Rothwell } 1835fd83240aSStephen Rothwell static inline void blk_integrity_unregister(struct gendisk *d) 1836fd83240aSStephen Rothwell { 1837fd83240aSStephen Rothwell } 1838fd83240aSStephen Rothwell static inline void blk_queue_max_integrity_segments(struct request_queue *q, 1839fd83240aSStephen Rothwell unsigned int segs) 1840fd83240aSStephen Rothwell { 1841fd83240aSStephen Rothwell } 1842fd83240aSStephen Rothwell static inline unsigned short queue_max_integrity_segments(struct request_queue *q) 1843fd83240aSStephen Rothwell { 1844fd83240aSStephen Rothwell return 0; 1845fd83240aSStephen Rothwell } 18464eaf99beSMartin K. Petersen static inline bool blk_integrity_merge_rq(struct request_queue *rq, 1847fd83240aSStephen Rothwell struct request *r1, 1848fd83240aSStephen Rothwell struct request *r2) 1849fd83240aSStephen Rothwell { 1850cb1a5ab6SMartin K. Petersen return true; 1851fd83240aSStephen Rothwell } 18524eaf99beSMartin K. Petersen static inline bool blk_integrity_merge_bio(struct request_queue *rq, 1853fd83240aSStephen Rothwell struct request *r, 1854fd83240aSStephen Rothwell struct bio *b) 1855fd83240aSStephen Rothwell { 1856cb1a5ab6SMartin K. Petersen return true; 1857fd83240aSStephen Rothwell } 185825520d55SMartin K. Petersen 18597f39add3SSagi Grimberg static inline bool integrity_req_gap_back_merge(struct request *req, 18607f39add3SSagi Grimberg struct bio *next) 18617f39add3SSagi Grimberg { 18627f39add3SSagi Grimberg return false; 18637f39add3SSagi Grimberg } 18647f39add3SSagi Grimberg static inline bool integrity_req_gap_front_merge(struct request *req, 18657f39add3SSagi Grimberg struct bio *bio) 18667f39add3SSagi Grimberg { 18677f39add3SSagi Grimberg return false; 18687f39add3SSagi Grimberg } 18697ba1ba12SMartin K. Petersen 18707ba1ba12SMartin K. Petersen #endif /* CONFIG_BLK_DEV_INTEGRITY */ 18717ba1ba12SMartin K. Petersen 1872b2e0d162SDan Williams /** 1873b2e0d162SDan Williams * struct blk_dax_ctl - control and output parameters for ->direct_access 1874b2e0d162SDan Williams * @sector: (input) offset relative to a block_device 1875b2e0d162SDan Williams * @addr: (output) kernel virtual address for @sector populated by driver 1876b2e0d162SDan Williams * @pfn: (output) page frame number for @addr populated by driver 1877b2e0d162SDan Williams * @size: (input) number of bytes requested 1878b2e0d162SDan Williams */ 1879b2e0d162SDan Williams struct blk_dax_ctl { 1880b2e0d162SDan Williams sector_t sector; 18817a9eb206SDan Williams void *addr; 1882b2e0d162SDan Williams long size; 188334c0fd54SDan Williams pfn_t pfn; 1884b2e0d162SDan Williams }; 1885b2e0d162SDan Williams 188608f85851SAl Viro struct block_device_operations { 1887d4430d62SAl Viro int (*open) (struct block_device *, fmode_t); 1888db2a144bSAl Viro void (*release) (struct gendisk *, fmode_t); 1889c11f0c0bSJens Axboe int (*rw_page)(struct block_device *, sector_t, struct page *, bool); 1890d4430d62SAl Viro int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1891d4430d62SAl Viro int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 18927a9eb206SDan Williams long (*direct_access)(struct block_device *, sector_t, void **, pfn_t *, 18937a9eb206SDan Williams long); 189477ea887eSTejun Heo unsigned int (*check_events) (struct gendisk *disk, 189577ea887eSTejun Heo unsigned int clearing); 189677ea887eSTejun Heo /* ->media_changed() is DEPRECATED, use ->check_events() instead */ 189708f85851SAl Viro int (*media_changed) (struct gendisk *); 1898c3e33e04STejun Heo void (*unlock_native_capacity) (struct gendisk *); 189908f85851SAl Viro int (*revalidate_disk) (struct gendisk *); 190008f85851SAl Viro int (*getgeo)(struct block_device *, struct hd_geometry *); 1901b3a27d05SNitin Gupta /* this callback is with swap_lock and sometimes page table lock held */ 1902b3a27d05SNitin Gupta void (*swap_slot_free_notify) (struct block_device *, unsigned long); 190308f85851SAl Viro struct module *owner; 1904bbd3e064SChristoph Hellwig const struct pr_ops *pr_ops; 190508f85851SAl Viro }; 190608f85851SAl Viro 1907633a08b8SAl Viro extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, 1908633a08b8SAl Viro unsigned long); 190947a191fdSMatthew Wilcox extern int bdev_read_page(struct block_device *, sector_t, struct page *); 191047a191fdSMatthew Wilcox extern int bdev_write_page(struct block_device *, sector_t, struct page *, 191147a191fdSMatthew Wilcox struct writeback_control *); 1912b2e0d162SDan Williams extern long bdev_direct_access(struct block_device *, struct blk_dax_ctl *); 19132d96afc8SToshi Kani extern int bdev_dax_supported(struct super_block *, int); 1914a8078b1fSToshi Kani extern bool bdev_dax_capable(struct block_device *); 19159361401eSDavid Howells #else /* CONFIG_BLOCK */ 1916ac13a829SFabian Frederick 1917ac13a829SFabian Frederick struct block_device; 1918ac13a829SFabian Frederick 19199361401eSDavid Howells /* 19209361401eSDavid Howells * stubs for when the block layer is configured out 19219361401eSDavid Howells */ 19229361401eSDavid Howells #define buffer_heads_over_limit 0 19239361401eSDavid Howells 19249361401eSDavid Howells static inline long nr_blockdev_pages(void) 19259361401eSDavid Howells { 19269361401eSDavid Howells return 0; 19279361401eSDavid Howells } 19289361401eSDavid Howells 19291f940bdfSJens Axboe struct blk_plug { 19301f940bdfSJens Axboe }; 19311f940bdfSJens Axboe 19321f940bdfSJens Axboe static inline void blk_start_plug(struct blk_plug *plug) 193373c10101SJens Axboe { 193473c10101SJens Axboe } 193573c10101SJens Axboe 19361f940bdfSJens Axboe static inline void blk_finish_plug(struct blk_plug *plug) 193773c10101SJens Axboe { 193873c10101SJens Axboe } 193973c10101SJens Axboe 19401f940bdfSJens Axboe static inline void blk_flush_plug(struct task_struct *task) 194173c10101SJens Axboe { 194273c10101SJens Axboe } 194373c10101SJens Axboe 1944a237c1c5SJens Axboe static inline void blk_schedule_flush_plug(struct task_struct *task) 1945a237c1c5SJens Axboe { 1946a237c1c5SJens Axboe } 1947a237c1c5SJens Axboe 1948a237c1c5SJens Axboe 194973c10101SJens Axboe static inline bool blk_needs_flush_plug(struct task_struct *tsk) 195073c10101SJens Axboe { 195173c10101SJens Axboe return false; 195273c10101SJens Axboe } 195373c10101SJens Axboe 1954ac13a829SFabian Frederick static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, 1955ac13a829SFabian Frederick sector_t *error_sector) 1956ac13a829SFabian Frederick { 1957ac13a829SFabian Frederick return 0; 1958ac13a829SFabian Frederick } 1959ac13a829SFabian Frederick 19609361401eSDavid Howells #endif /* CONFIG_BLOCK */ 19619361401eSDavid Howells 19621da177e4SLinus Torvalds #endif 1963