11da177e4SLinus Torvalds #ifndef _LINUX_BLKDEV_H 21da177e4SLinus Torvalds #define _LINUX_BLKDEV_H 31da177e4SLinus Torvalds 485fd0bc9SRussell King #include <linux/sched.h> 585fd0bc9SRussell King 6f5ff8422SJens Axboe #ifdef CONFIG_BLOCK 7f5ff8422SJens Axboe 81da177e4SLinus Torvalds #include <linux/major.h> 91da177e4SLinus Torvalds #include <linux/genhd.h> 101da177e4SLinus Torvalds #include <linux/list.h> 11320ae51fSJens Axboe #include <linux/llist.h> 121da177e4SLinus Torvalds #include <linux/timer.h> 131da177e4SLinus Torvalds #include <linux/workqueue.h> 141da177e4SLinus Torvalds #include <linux/pagemap.h> 1566114cadSTejun Heo #include <linux/backing-dev-defs.h> 161da177e4SLinus Torvalds #include <linux/wait.h> 171da177e4SLinus Torvalds #include <linux/mempool.h> 1834c0fd54SDan Williams #include <linux/pfn.h> 191da177e4SLinus Torvalds #include <linux/bio.h> 201da177e4SLinus Torvalds #include <linux/stringify.h> 213e6053d7SHugh Dickins #include <linux/gfp.h> 22d351af01SFUJITA Tomonori #include <linux/bsg.h> 23c7c22e4dSJens Axboe #include <linux/smp.h> 24548bc8e1STejun Heo #include <linux/rcupdate.h> 25add703fdSTejun Heo #include <linux/percpu-refcount.h> 2684be456fSChristoph Hellwig #include <linux/scatterlist.h> 276a0cb1bcSHannes Reinecke #include <linux/blkzoned.h> 281da177e4SLinus Torvalds 29de477254SPaul Gortmaker struct module; 3021b2f0c8SChristoph Hellwig struct scsi_ioctl_command; 3121b2f0c8SChristoph Hellwig 321da177e4SLinus Torvalds struct request_queue; 331da177e4SLinus Torvalds struct elevator_queue; 342056a782SJens Axboe struct blk_trace; 353d6392cfSJens Axboe struct request; 363d6392cfSJens Axboe struct sg_io_hdr; 37aa387cc8SMike Christie struct bsg_job; 383c798398STejun Heo struct blkcg_gq; 397c94e1c1SMing Lei struct blk_flush_queue; 40bbd3e064SChristoph Hellwig struct pr_ops; 411da177e4SLinus Torvalds 421da177e4SLinus Torvalds #define BLKDEV_MIN_RQ 4 431da177e4SLinus Torvalds #define BLKDEV_MAX_RQ 128 /* Default maximum */ 441da177e4SLinus Torvalds 458bd435b3STejun Heo /* 468bd435b3STejun Heo * Maximum number of blkcg policies allowed to be registered concurrently. 478bd435b3STejun Heo * Defined here to simplify include dependency. 488bd435b3STejun Heo */ 498bd435b3STejun Heo #define BLKCG_MAX_POLS 2 508bd435b3STejun Heo 518ffdc655STejun Heo typedef void (rq_end_io_fn)(struct request *, int); 521da177e4SLinus Torvalds 535b788ce3STejun Heo #define BLK_RL_SYNCFULL (1U << 0) 545b788ce3STejun Heo #define BLK_RL_ASYNCFULL (1U << 1) 555b788ce3STejun Heo 561da177e4SLinus Torvalds struct request_list { 575b788ce3STejun Heo struct request_queue *q; /* the queue this rl belongs to */ 58a051661cSTejun Heo #ifdef CONFIG_BLK_CGROUP 59a051661cSTejun Heo struct blkcg_gq *blkg; /* blkg this request pool belongs to */ 60a051661cSTejun Heo #endif 611faa16d2SJens Axboe /* 621faa16d2SJens Axboe * count[], starved[], and wait[] are indexed by 631faa16d2SJens Axboe * BLK_RW_SYNC/BLK_RW_ASYNC 641faa16d2SJens Axboe */ 651da177e4SLinus Torvalds int count[2]; 661da177e4SLinus Torvalds int starved[2]; 671da177e4SLinus Torvalds mempool_t *rq_pool; 681da177e4SLinus Torvalds wait_queue_head_t wait[2]; 695b788ce3STejun Heo unsigned int flags; 701da177e4SLinus Torvalds }; 711da177e4SLinus Torvalds 724aff5e23SJens Axboe /* 734aff5e23SJens Axboe * request command types 744aff5e23SJens Axboe */ 754aff5e23SJens Axboe enum rq_cmd_type_bits { 764aff5e23SJens Axboe REQ_TYPE_FS = 1, /* fs request */ 774aff5e23SJens Axboe REQ_TYPE_BLOCK_PC, /* scsi command */ 78b42171efSChristoph Hellwig REQ_TYPE_DRV_PRIV, /* driver defined types from here */ 794aff5e23SJens Axboe }; 804aff5e23SJens Axboe 81e8064021SChristoph Hellwig /* 82e8064021SChristoph Hellwig * request flags */ 83e8064021SChristoph Hellwig typedef __u32 __bitwise req_flags_t; 84e8064021SChristoph Hellwig 85e8064021SChristoph Hellwig /* elevator knows about this request */ 86e8064021SChristoph Hellwig #define RQF_SORTED ((__force req_flags_t)(1 << 0)) 87e8064021SChristoph Hellwig /* drive already may have started this one */ 88e8064021SChristoph Hellwig #define RQF_STARTED ((__force req_flags_t)(1 << 1)) 89e8064021SChristoph Hellwig /* uses tagged queueing */ 90e8064021SChristoph Hellwig #define RQF_QUEUED ((__force req_flags_t)(1 << 2)) 91e8064021SChristoph Hellwig /* may not be passed by ioscheduler */ 92e8064021SChristoph Hellwig #define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3)) 93e8064021SChristoph Hellwig /* request for flush sequence */ 94e8064021SChristoph Hellwig #define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4)) 95e8064021SChristoph Hellwig /* merge of different types, fail separately */ 96e8064021SChristoph Hellwig #define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5)) 97e8064021SChristoph Hellwig /* track inflight for MQ */ 98e8064021SChristoph Hellwig #define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6)) 99e8064021SChristoph Hellwig /* don't call prep for this one */ 100e8064021SChristoph Hellwig #define RQF_DONTPREP ((__force req_flags_t)(1 << 7)) 101e8064021SChristoph Hellwig /* set for "ide_preempt" requests and also for requests for which the SCSI 102e8064021SChristoph Hellwig "quiesce" state must be ignored. */ 103e8064021SChristoph Hellwig #define RQF_PREEMPT ((__force req_flags_t)(1 << 8)) 104e8064021SChristoph Hellwig /* contains copies of user pages */ 105e8064021SChristoph Hellwig #define RQF_COPY_USER ((__force req_flags_t)(1 << 9)) 106e8064021SChristoph Hellwig /* vaguely specified driver internal error. Ignored by the block layer */ 107e8064021SChristoph Hellwig #define RQF_FAILED ((__force req_flags_t)(1 << 10)) 108e8064021SChristoph Hellwig /* don't warn about errors */ 109e8064021SChristoph Hellwig #define RQF_QUIET ((__force req_flags_t)(1 << 11)) 110e8064021SChristoph Hellwig /* elevator private data attached */ 111e8064021SChristoph Hellwig #define RQF_ELVPRIV ((__force req_flags_t)(1 << 12)) 112e8064021SChristoph Hellwig /* account I/O stat */ 113e8064021SChristoph Hellwig #define RQF_IO_STAT ((__force req_flags_t)(1 << 13)) 114e8064021SChristoph Hellwig /* request came from our alloc pool */ 115e8064021SChristoph Hellwig #define RQF_ALLOCED ((__force req_flags_t)(1 << 14)) 116e8064021SChristoph Hellwig /* runtime pm request */ 117e8064021SChristoph Hellwig #define RQF_PM ((__force req_flags_t)(1 << 15)) 118e8064021SChristoph Hellwig /* on IO scheduler merge hash */ 119e8064021SChristoph Hellwig #define RQF_HASHED ((__force req_flags_t)(1 << 16)) 120e8064021SChristoph Hellwig 121e8064021SChristoph Hellwig /* flags that prevent us from merging requests: */ 122e8064021SChristoph Hellwig #define RQF_NOMERGE_FLAGS \ 123e8064021SChristoph Hellwig (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ) 124e8064021SChristoph Hellwig 1251da177e4SLinus Torvalds #define BLK_MAX_CDB 16 1261da177e4SLinus Torvalds 1271da177e4SLinus Torvalds /* 128af76e555SChristoph Hellwig * Try to put the fields that are referenced together in the same cacheline. 129af76e555SChristoph Hellwig * 130af76e555SChristoph Hellwig * If you modify this structure, make sure to update blk_rq_init() and 131af76e555SChristoph Hellwig * especially blk_mq_rq_ctx_init() to take care of the added fields. 1321da177e4SLinus Torvalds */ 1331da177e4SLinus Torvalds struct request { 134ff856badSJens Axboe struct list_head queuelist; 135320ae51fSJens Axboe union { 136c7c22e4dSJens Axboe struct call_single_data csd; 1379828c2c6SJan Kara u64 fifo_time; 138320ae51fSJens Axboe }; 139ff856badSJens Axboe 140165125e1SJens Axboe struct request_queue *q; 141320ae51fSJens Axboe struct blk_mq_ctx *mq_ctx; 142e6a1c874SJens Axboe 143181fdde3SRichard Kennedy int cpu; 144ca93e453SChristoph Hellwig unsigned cmd_type; 145*ef295ecfSChristoph Hellwig unsigned int cmd_flags; /* op and common flags */ 146e8064021SChristoph Hellwig req_flags_t rq_flags; 147ca93e453SChristoph Hellwig unsigned long atomic_flags; 148181fdde3SRichard Kennedy 149a2dec7b3STejun Heo /* the following two fields are internal, NEVER access directly */ 150a2dec7b3STejun Heo unsigned int __data_len; /* total data len */ 151181fdde3SRichard Kennedy sector_t __sector; /* sector cursor */ 1521da177e4SLinus Torvalds 1531da177e4SLinus Torvalds struct bio *bio; 1541da177e4SLinus Torvalds struct bio *biotail; 1551da177e4SLinus Torvalds 156360f92c2SJens Axboe /* 157360f92c2SJens Axboe * The hash is used inside the scheduler, and killed once the 158360f92c2SJens Axboe * request reaches the dispatch list. The ipi_list is only used 159360f92c2SJens Axboe * to queue the request for softirq completion, which is long 160360f92c2SJens Axboe * after the request has been unhashed (and even removed from 161360f92c2SJens Axboe * the dispatch list). 162360f92c2SJens Axboe */ 163360f92c2SJens Axboe union { 1649817064bSJens Axboe struct hlist_node hash; /* merge hash */ 165360f92c2SJens Axboe struct list_head ipi_list; 166360f92c2SJens Axboe }; 167360f92c2SJens Axboe 168e6a1c874SJens Axboe /* 169e6a1c874SJens Axboe * The rb_node is only used inside the io scheduler, requests 170e6a1c874SJens Axboe * are pruned when moved to the dispatch queue. So let the 171c186794dSMike Snitzer * completion_data share space with the rb_node. 172e6a1c874SJens Axboe */ 173e6a1c874SJens Axboe union { 1742e662b65SJens Axboe struct rb_node rb_node; /* sort/lookup */ 175c186794dSMike Snitzer void *completion_data; 176c186794dSMike Snitzer }; 177c186794dSMike Snitzer 178c186794dSMike Snitzer /* 179c186794dSMike Snitzer * Three pointers are available for the IO schedulers, if they need 180c186794dSMike Snitzer * more they have to dynamically allocate it. Flush requests are 181c186794dSMike Snitzer * never put on the IO scheduler. So let the flush fields share 182a612fddfSTejun Heo * space with the elevator data. 183c186794dSMike Snitzer */ 184c186794dSMike Snitzer union { 185a612fddfSTejun Heo struct { 186a612fddfSTejun Heo struct io_cq *icq; 187a612fddfSTejun Heo void *priv[2]; 188a612fddfSTejun Heo } elv; 189a612fddfSTejun Heo 190ae1b1539STejun Heo struct { 191ae1b1539STejun Heo unsigned int seq; 192ae1b1539STejun Heo struct list_head list; 1934853abaaSJeff Moyer rq_end_io_fn *saved_end_io; 194ae1b1539STejun Heo } flush; 195e6a1c874SJens Axboe }; 1969817064bSJens Axboe 1978f34ee75SJens Axboe struct gendisk *rq_disk; 19809e099d4SJerome Marchand struct hd_struct *part; 1991da177e4SLinus Torvalds unsigned long start_time; 2009195291eSDivyesh Shah #ifdef CONFIG_BLK_CGROUP 201a051661cSTejun Heo struct request_list *rl; /* rl this rq is alloced from */ 2029195291eSDivyesh Shah unsigned long long start_time_ns; 2039195291eSDivyesh Shah unsigned long long io_start_time_ns; /* when passed to hardware */ 2049195291eSDivyesh Shah #endif 2051da177e4SLinus Torvalds /* Number of scatter-gather DMA addr+len pairs after 2061da177e4SLinus Torvalds * physical address coalescing is performed. 2071da177e4SLinus Torvalds */ 2081da177e4SLinus Torvalds unsigned short nr_phys_segments; 20913f05c8dSMartin K. Petersen #if defined(CONFIG_BLK_DEV_INTEGRITY) 21013f05c8dSMartin K. Petersen unsigned short nr_integrity_segments; 21113f05c8dSMartin K. Petersen #endif 2121da177e4SLinus Torvalds 2138f34ee75SJens Axboe unsigned short ioprio; 2148f34ee75SJens Axboe 215731ec497STejun Heo void *special; /* opaque pointer available for LLD use */ 2161da177e4SLinus Torvalds 217cdd60262SJens Axboe int tag; 218cdd60262SJens Axboe int errors; 219cdd60262SJens Axboe 2201da177e4SLinus Torvalds /* 2211da177e4SLinus Torvalds * when request is used as a packet command carrier 2221da177e4SLinus Torvalds */ 223d7e3c324SFUJITA Tomonori unsigned char __cmd[BLK_MAX_CDB]; 224d7e3c324SFUJITA Tomonori unsigned char *cmd; 225181fdde3SRichard Kennedy unsigned short cmd_len; 2261da177e4SLinus Torvalds 2277a85f889SFUJITA Tomonori unsigned int extra_len; /* length of alignment and padding */ 2281da177e4SLinus Torvalds unsigned int sense_len; 229c3a4d78cSTejun Heo unsigned int resid_len; /* residual count */ 2301da177e4SLinus Torvalds void *sense; 2311da177e4SLinus Torvalds 232242f9dcbSJens Axboe unsigned long deadline; 233242f9dcbSJens Axboe struct list_head timeout_list; 2341da177e4SLinus Torvalds unsigned int timeout; 23517e01f21SMike Christie int retries; 2361da177e4SLinus Torvalds 2371da177e4SLinus Torvalds /* 238c00895abSJens Axboe * completion callback. 2391da177e4SLinus Torvalds */ 2401da177e4SLinus Torvalds rq_end_io_fn *end_io; 2411da177e4SLinus Torvalds void *end_io_data; 242abae1fdeSFUJITA Tomonori 243abae1fdeSFUJITA Tomonori /* for bidi */ 244abae1fdeSFUJITA Tomonori struct request *next_rq; 2451da177e4SLinus Torvalds }; 2461da177e4SLinus Torvalds 247766ca442SFernando Luis Vázquez Cao static inline unsigned short req_get_ioprio(struct request *req) 248766ca442SFernando Luis Vázquez Cao { 249766ca442SFernando Luis Vázquez Cao return req->ioprio; 250766ca442SFernando Luis Vázquez Cao } 251766ca442SFernando Luis Vázquez Cao 2521da177e4SLinus Torvalds #include <linux/elevator.h> 2531da177e4SLinus Torvalds 254320ae51fSJens Axboe struct blk_queue_ctx; 255320ae51fSJens Axboe 256165125e1SJens Axboe typedef void (request_fn_proc) (struct request_queue *q); 257dece1635SJens Axboe typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio); 258165125e1SJens Axboe typedef int (prep_rq_fn) (struct request_queue *, struct request *); 25928018c24SJames Bottomley typedef void (unprep_rq_fn) (struct request_queue *, struct request *); 2601da177e4SLinus Torvalds 2611da177e4SLinus Torvalds struct bio_vec; 262ff856badSJens Axboe typedef void (softirq_done_fn)(struct request *); 2632fb98e84STejun Heo typedef int (dma_drain_needed_fn)(struct request *); 264ef9e3facSKiyoshi Ueda typedef int (lld_busy_fn) (struct request_queue *q); 265aa387cc8SMike Christie typedef int (bsg_job_fn) (struct bsg_job *); 2661da177e4SLinus Torvalds 267242f9dcbSJens Axboe enum blk_eh_timer_return { 268242f9dcbSJens Axboe BLK_EH_NOT_HANDLED, 269242f9dcbSJens Axboe BLK_EH_HANDLED, 270242f9dcbSJens Axboe BLK_EH_RESET_TIMER, 271242f9dcbSJens Axboe }; 272242f9dcbSJens Axboe 273242f9dcbSJens Axboe typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *); 274242f9dcbSJens Axboe 2751da177e4SLinus Torvalds enum blk_queue_state { 2761da177e4SLinus Torvalds Queue_down, 2771da177e4SLinus Torvalds Queue_up, 2781da177e4SLinus Torvalds }; 2791da177e4SLinus Torvalds 2801da177e4SLinus Torvalds struct blk_queue_tag { 2811da177e4SLinus Torvalds struct request **tag_index; /* map of busy tags */ 2821da177e4SLinus Torvalds unsigned long *tag_map; /* bit map of free/busy tags */ 2831da177e4SLinus Torvalds int busy; /* current depth */ 2841da177e4SLinus Torvalds int max_depth; /* what we will send to device */ 285ba025082STejun Heo int real_max_depth; /* what the array can hold */ 2861da177e4SLinus Torvalds atomic_t refcnt; /* map can be shared */ 287ee1b6f7aSShaohua Li int alloc_policy; /* tag allocation policy */ 288ee1b6f7aSShaohua Li int next_tag; /* next tag */ 2891da177e4SLinus Torvalds }; 290ee1b6f7aSShaohua Li #define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */ 291ee1b6f7aSShaohua Li #define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */ 2921da177e4SLinus Torvalds 293abf54393SFUJITA Tomonori #define BLK_SCSI_MAX_CMDS (256) 294abf54393SFUJITA Tomonori #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) 295abf54393SFUJITA Tomonori 296797476b8SDamien Le Moal /* 297797476b8SDamien Le Moal * Zoned block device models (zoned limit). 298797476b8SDamien Le Moal */ 299797476b8SDamien Le Moal enum blk_zoned_model { 300797476b8SDamien Le Moal BLK_ZONED_NONE, /* Regular block device */ 301797476b8SDamien Le Moal BLK_ZONED_HA, /* Host-aware zoned block device */ 302797476b8SDamien Le Moal BLK_ZONED_HM, /* Host-managed zoned block device */ 303797476b8SDamien Le Moal }; 304797476b8SDamien Le Moal 305025146e1SMartin K. Petersen struct queue_limits { 306025146e1SMartin K. Petersen unsigned long bounce_pfn; 307025146e1SMartin K. Petersen unsigned long seg_boundary_mask; 30803100aadSKeith Busch unsigned long virt_boundary_mask; 309025146e1SMartin K. Petersen 310025146e1SMartin K. Petersen unsigned int max_hw_sectors; 311ca369d51SMartin K. Petersen unsigned int max_dev_sectors; 312762380adSJens Axboe unsigned int chunk_sectors; 313025146e1SMartin K. Petersen unsigned int max_sectors; 314025146e1SMartin K. Petersen unsigned int max_segment_size; 315c72758f3SMartin K. Petersen unsigned int physical_block_size; 316c72758f3SMartin K. Petersen unsigned int alignment_offset; 317c72758f3SMartin K. Petersen unsigned int io_min; 318c72758f3SMartin K. Petersen unsigned int io_opt; 31967efc925SChristoph Hellwig unsigned int max_discard_sectors; 3200034af03SJens Axboe unsigned int max_hw_discard_sectors; 3214363ac7cSMartin K. Petersen unsigned int max_write_same_sectors; 32286b37281SMartin K. Petersen unsigned int discard_granularity; 32386b37281SMartin K. Petersen unsigned int discard_alignment; 324025146e1SMartin K. Petersen 325025146e1SMartin K. Petersen unsigned short logical_block_size; 3268a78362cSMartin K. Petersen unsigned short max_segments; 32713f05c8dSMartin K. Petersen unsigned short max_integrity_segments; 328025146e1SMartin K. Petersen 329c72758f3SMartin K. Petersen unsigned char misaligned; 33086b37281SMartin K. Petersen unsigned char discard_misaligned; 331e692cb66SMartin K. Petersen unsigned char cluster; 332a934a00aSMartin K. Petersen unsigned char discard_zeroes_data; 333c78afc62SKent Overstreet unsigned char raid_partial_stripes_expensive; 334797476b8SDamien Le Moal enum blk_zoned_model zoned; 335025146e1SMartin K. Petersen }; 336025146e1SMartin K. Petersen 3376a0cb1bcSHannes Reinecke #ifdef CONFIG_BLK_DEV_ZONED 3386a0cb1bcSHannes Reinecke 3396a0cb1bcSHannes Reinecke struct blk_zone_report_hdr { 3406a0cb1bcSHannes Reinecke unsigned int nr_zones; 3416a0cb1bcSHannes Reinecke u8 padding[60]; 3426a0cb1bcSHannes Reinecke }; 3436a0cb1bcSHannes Reinecke 3446a0cb1bcSHannes Reinecke extern int blkdev_report_zones(struct block_device *bdev, 3456a0cb1bcSHannes Reinecke sector_t sector, struct blk_zone *zones, 3466a0cb1bcSHannes Reinecke unsigned int *nr_zones, gfp_t gfp_mask); 3476a0cb1bcSHannes Reinecke extern int blkdev_reset_zones(struct block_device *bdev, sector_t sectors, 3486a0cb1bcSHannes Reinecke sector_t nr_sectors, gfp_t gfp_mask); 3496a0cb1bcSHannes Reinecke 3503ed05a98SShaun Tancheff extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, 3513ed05a98SShaun Tancheff unsigned int cmd, unsigned long arg); 3523ed05a98SShaun Tancheff extern int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode, 3533ed05a98SShaun Tancheff unsigned int cmd, unsigned long arg); 3543ed05a98SShaun Tancheff 3553ed05a98SShaun Tancheff #else /* CONFIG_BLK_DEV_ZONED */ 3563ed05a98SShaun Tancheff 3573ed05a98SShaun Tancheff static inline int blkdev_report_zones_ioctl(struct block_device *bdev, 3583ed05a98SShaun Tancheff fmode_t mode, unsigned int cmd, 3593ed05a98SShaun Tancheff unsigned long arg) 3603ed05a98SShaun Tancheff { 3613ed05a98SShaun Tancheff return -ENOTTY; 3623ed05a98SShaun Tancheff } 3633ed05a98SShaun Tancheff 3643ed05a98SShaun Tancheff static inline int blkdev_reset_zones_ioctl(struct block_device *bdev, 3653ed05a98SShaun Tancheff fmode_t mode, unsigned int cmd, 3663ed05a98SShaun Tancheff unsigned long arg) 3673ed05a98SShaun Tancheff { 3683ed05a98SShaun Tancheff return -ENOTTY; 3693ed05a98SShaun Tancheff } 3703ed05a98SShaun Tancheff 3716a0cb1bcSHannes Reinecke #endif /* CONFIG_BLK_DEV_ZONED */ 3726a0cb1bcSHannes Reinecke 373d7b76301SRichard Kennedy struct request_queue { 3741da177e4SLinus Torvalds /* 3751da177e4SLinus Torvalds * Together with queue_head for cacheline sharing 3761da177e4SLinus Torvalds */ 3771da177e4SLinus Torvalds struct list_head queue_head; 3781da177e4SLinus Torvalds struct request *last_merge; 379b374d18aSJens Axboe struct elevator_queue *elevator; 3808a5ecdd4STejun Heo int nr_rqs[2]; /* # allocated [a]sync rqs */ 3818a5ecdd4STejun Heo int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */ 3821da177e4SLinus Torvalds 3831da177e4SLinus Torvalds /* 384a051661cSTejun Heo * If blkcg is not used, @q->root_rl serves all requests. If blkcg 385a051661cSTejun Heo * is used, root blkg allocates from @q->root_rl and all other 386a051661cSTejun Heo * blkgs from their own blkg->rl. Which one to use should be 387a051661cSTejun Heo * determined using bio_request_list(). 3881da177e4SLinus Torvalds */ 389a051661cSTejun Heo struct request_list root_rl; 3901da177e4SLinus Torvalds 3911da177e4SLinus Torvalds request_fn_proc *request_fn; 3921da177e4SLinus Torvalds make_request_fn *make_request_fn; 3931da177e4SLinus Torvalds prep_rq_fn *prep_rq_fn; 39428018c24SJames Bottomley unprep_rq_fn *unprep_rq_fn; 395ff856badSJens Axboe softirq_done_fn *softirq_done_fn; 396242f9dcbSJens Axboe rq_timed_out_fn *rq_timed_out_fn; 3972fb98e84STejun Heo dma_drain_needed_fn *dma_drain_needed; 398ef9e3facSKiyoshi Ueda lld_busy_fn *lld_busy_fn; 3991da177e4SLinus Torvalds 400320ae51fSJens Axboe struct blk_mq_ops *mq_ops; 401320ae51fSJens Axboe 402320ae51fSJens Axboe unsigned int *mq_map; 403320ae51fSJens Axboe 404320ae51fSJens Axboe /* sw queues */ 405e6cdb092SMing Lei struct blk_mq_ctx __percpu *queue_ctx; 406320ae51fSJens Axboe unsigned int nr_queues; 407320ae51fSJens Axboe 408320ae51fSJens Axboe /* hw dispatch queues */ 409320ae51fSJens Axboe struct blk_mq_hw_ctx **queue_hw_ctx; 410320ae51fSJens Axboe unsigned int nr_hw_queues; 411320ae51fSJens Axboe 4121da177e4SLinus Torvalds /* 4138922e16cSTejun Heo * Dispatch queue sorting 4148922e16cSTejun Heo */ 4151b47f531SJens Axboe sector_t end_sector; 4168922e16cSTejun Heo struct request *boundary_rq; 4178922e16cSTejun Heo 4188922e16cSTejun Heo /* 4193cca6dc1SJens Axboe * Delayed queue handling 4201da177e4SLinus Torvalds */ 4213cca6dc1SJens Axboe struct delayed_work delay_work; 4221da177e4SLinus Torvalds 4231da177e4SLinus Torvalds struct backing_dev_info backing_dev_info; 4241da177e4SLinus Torvalds 4251da177e4SLinus Torvalds /* 4261da177e4SLinus Torvalds * The queue owner gets to use this for whatever they like. 4271da177e4SLinus Torvalds * ll_rw_blk doesn't touch it. 4281da177e4SLinus Torvalds */ 4291da177e4SLinus Torvalds void *queuedata; 4301da177e4SLinus Torvalds 4311da177e4SLinus Torvalds /* 4321da177e4SLinus Torvalds * various queue flags, see QUEUE_* below 4331da177e4SLinus Torvalds */ 4341da177e4SLinus Torvalds unsigned long queue_flags; 4351da177e4SLinus Torvalds 4361da177e4SLinus Torvalds /* 437a73f730dSTejun Heo * ida allocated id for this queue. Used to index queues from 438a73f730dSTejun Heo * ioctx. 439a73f730dSTejun Heo */ 440a73f730dSTejun Heo int id; 441a73f730dSTejun Heo 442a73f730dSTejun Heo /* 443d7b76301SRichard Kennedy * queue needs bounce pages for pages above this limit 444d7b76301SRichard Kennedy */ 445d7b76301SRichard Kennedy gfp_t bounce_gfp; 446d7b76301SRichard Kennedy 447d7b76301SRichard Kennedy /* 448152587deS * protects queue structures from reentrancy. ->__queue_lock should 449152587deS * _never_ be used directly, it is queue private. always use 450152587deS * ->queue_lock. 4511da177e4SLinus Torvalds */ 452152587deS spinlock_t __queue_lock; 4531da177e4SLinus Torvalds spinlock_t *queue_lock; 4541da177e4SLinus Torvalds 4551da177e4SLinus Torvalds /* 4561da177e4SLinus Torvalds * queue kobject 4571da177e4SLinus Torvalds */ 4581da177e4SLinus Torvalds struct kobject kobj; 4591da177e4SLinus Torvalds 460320ae51fSJens Axboe /* 461320ae51fSJens Axboe * mq queue kobject 462320ae51fSJens Axboe */ 463320ae51fSJens Axboe struct kobject mq_kobj; 464320ae51fSJens Axboe 465ac6fc48cSDan Williams #ifdef CONFIG_BLK_DEV_INTEGRITY 466ac6fc48cSDan Williams struct blk_integrity integrity; 467ac6fc48cSDan Williams #endif /* CONFIG_BLK_DEV_INTEGRITY */ 468ac6fc48cSDan Williams 46947fafbc7SRafael J. Wysocki #ifdef CONFIG_PM 4706c954667SLin Ming struct device *dev; 4716c954667SLin Ming int rpm_status; 4726c954667SLin Ming unsigned int nr_pending; 4736c954667SLin Ming #endif 4746c954667SLin Ming 4751da177e4SLinus Torvalds /* 4761da177e4SLinus Torvalds * queue settings 4771da177e4SLinus Torvalds */ 4781da177e4SLinus Torvalds unsigned long nr_requests; /* Max # of requests */ 4791da177e4SLinus Torvalds unsigned int nr_congestion_on; 4801da177e4SLinus Torvalds unsigned int nr_congestion_off; 4811da177e4SLinus Torvalds unsigned int nr_batching; 4821da177e4SLinus Torvalds 483fa0ccd83SJames Bottomley unsigned int dma_drain_size; 484d7b76301SRichard Kennedy void *dma_drain_buffer; 485e3790c7dSTejun Heo unsigned int dma_pad_mask; 4861da177e4SLinus Torvalds unsigned int dma_alignment; 4871da177e4SLinus Torvalds 4881da177e4SLinus Torvalds struct blk_queue_tag *queue_tags; 4896eca9004SJens Axboe struct list_head tag_busy_list; 4901da177e4SLinus Torvalds 49115853af9STejun Heo unsigned int nr_sorted; 4920a7ae2ffSJens Axboe unsigned int in_flight[2]; 49324faf6f6SBart Van Assche /* 49424faf6f6SBart Van Assche * Number of active block driver functions for which blk_drain_queue() 49524faf6f6SBart Van Assche * must wait. Must be incremented around functions that unlock the 49624faf6f6SBart Van Assche * queue_lock internally, e.g. scsi_request_fn(). 49724faf6f6SBart Van Assche */ 49824faf6f6SBart Van Assche unsigned int request_fn_active; 4991da177e4SLinus Torvalds 500242f9dcbSJens Axboe unsigned int rq_timeout; 501242f9dcbSJens Axboe struct timer_list timeout; 502287922ebSChristoph Hellwig struct work_struct timeout_work; 503242f9dcbSJens Axboe struct list_head timeout_list; 504242f9dcbSJens Axboe 505a612fddfSTejun Heo struct list_head icq_list; 5064eef3049STejun Heo #ifdef CONFIG_BLK_CGROUP 507a2b1693bSTejun Heo DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS); 5083c798398STejun Heo struct blkcg_gq *root_blkg; 50903aa264aSTejun Heo struct list_head blkg_list; 5104eef3049STejun Heo #endif 511a612fddfSTejun Heo 512025146e1SMartin K. Petersen struct queue_limits limits; 513025146e1SMartin K. Petersen 5141da177e4SLinus Torvalds /* 5151da177e4SLinus Torvalds * sg stuff 5161da177e4SLinus Torvalds */ 5171da177e4SLinus Torvalds unsigned int sg_timeout; 5181da177e4SLinus Torvalds unsigned int sg_reserved_size; 5191946089aSChristoph Lameter int node; 5206c5c9341SAlexey Dobriyan #ifdef CONFIG_BLK_DEV_IO_TRACE 5212056a782SJens Axboe struct blk_trace *blk_trace; 5226c5c9341SAlexey Dobriyan #endif 5231da177e4SLinus Torvalds /* 5244913efe4STejun Heo * for flush operations 5251da177e4SLinus Torvalds */ 5267c94e1c1SMing Lei struct blk_flush_queue *fq; 527483f4afcSAl Viro 5286fca6a61SChristoph Hellwig struct list_head requeue_list; 5296fca6a61SChristoph Hellwig spinlock_t requeue_lock; 5302849450aSMike Snitzer struct delayed_work requeue_work; 5316fca6a61SChristoph Hellwig 532483f4afcSAl Viro struct mutex sysfs_lock; 533d351af01SFUJITA Tomonori 534d732580bSTejun Heo int bypass_depth; 5354ecd4fefSChristoph Hellwig atomic_t mq_freeze_depth; 536d732580bSTejun Heo 537d351af01SFUJITA Tomonori #if defined(CONFIG_BLK_DEV_BSG) 538aa387cc8SMike Christie bsg_job_fn *bsg_job_fn; 539aa387cc8SMike Christie int bsg_job_size; 540d351af01SFUJITA Tomonori struct bsg_class_device bsg_dev; 541d351af01SFUJITA Tomonori #endif 542e43473b7SVivek Goyal 543e43473b7SVivek Goyal #ifdef CONFIG_BLK_DEV_THROTTLING 544e43473b7SVivek Goyal /* Throttle data */ 545e43473b7SVivek Goyal struct throtl_data *td; 546e43473b7SVivek Goyal #endif 547548bc8e1STejun Heo struct rcu_head rcu_head; 548320ae51fSJens Axboe wait_queue_head_t mq_freeze_wq; 5493ef28e83SDan Williams struct percpu_ref q_usage_counter; 550320ae51fSJens Axboe struct list_head all_q_node; 5510d2602caSJens Axboe 5520d2602caSJens Axboe struct blk_mq_tag_set *tag_set; 5530d2602caSJens Axboe struct list_head tag_set_list; 55454efd50bSKent Overstreet struct bio_set *bio_split; 5554593fdbeSAkinobu Mita 5564593fdbeSAkinobu Mita bool mq_sysfs_init_done; 5571da177e4SLinus Torvalds }; 5581da177e4SLinus Torvalds 5591da177e4SLinus Torvalds #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 5601da177e4SLinus Torvalds #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 5611faa16d2SJens Axboe #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ 5621faa16d2SJens Axboe #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ 5633f3299d5SBart Van Assche #define QUEUE_FLAG_DYING 5 /* queue being torn down */ 564d732580bSTejun Heo #define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */ 565c21e6bebSJens Axboe #define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ 566c21e6bebSJens Axboe #define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ 5675757a6d7SDan Williams #define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */ 568c21e6bebSJens Axboe #define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */ 569c21e6bebSJens Axboe #define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */ 570c21e6bebSJens Axboe #define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */ 57188e740f1SFernando Luis Vázquez Cao #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 572c21e6bebSJens Axboe #define QUEUE_FLAG_IO_STAT 13 /* do IO stats */ 573c21e6bebSJens Axboe #define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */ 574c21e6bebSJens Axboe #define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */ 575c21e6bebSJens Axboe #define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ 576288dab8aSChristoph Hellwig #define QUEUE_FLAG_SECERASE 17 /* supports secure erase */ 5775757a6d7SDan Williams #define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ 578c246e80dSBart Van Assche #define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ 579320ae51fSJens Axboe #define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ 58005f1dd53SJens Axboe #define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/ 58105229beeSJens Axboe #define QUEUE_FLAG_POLL 22 /* IO polling enabled if set */ 58293e9d8e8SJens Axboe #define QUEUE_FLAG_WC 23 /* Write back caching */ 58393e9d8e8SJens Axboe #define QUEUE_FLAG_FUA 24 /* device supports FUA writes */ 584c888a8f9SJens Axboe #define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */ 585163d4baaSToshi Kani #define QUEUE_FLAG_DAX 26 /* device supports DAX */ 586bc58ba94SJens Axboe 587bc58ba94SJens Axboe #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 58801e97f6bSJens Axboe (1 << QUEUE_FLAG_STACKABLE) | \ 589e2e1a148SJens Axboe (1 << QUEUE_FLAG_SAME_COMP) | \ 590e2e1a148SJens Axboe (1 << QUEUE_FLAG_ADD_RANDOM)) 591797e7dbbSTejun Heo 59294eddfbeSJens Axboe #define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 593ad9cf3bbSMike Snitzer (1 << QUEUE_FLAG_STACKABLE) | \ 5948e0b60b9SChristoph Hellwig (1 << QUEUE_FLAG_SAME_COMP) | \ 5958e0b60b9SChristoph Hellwig (1 << QUEUE_FLAG_POLL)) 59694eddfbeSJens Axboe 5978bcb6c7dSAndi Kleen static inline void queue_lockdep_assert_held(struct request_queue *q) 5988f45c1a5SLinus Torvalds { 5998bcb6c7dSAndi Kleen if (q->queue_lock) 6008bcb6c7dSAndi Kleen lockdep_assert_held(q->queue_lock); 6018f45c1a5SLinus Torvalds } 6028f45c1a5SLinus Torvalds 60375ad23bcSNick Piggin static inline void queue_flag_set_unlocked(unsigned int flag, 60475ad23bcSNick Piggin struct request_queue *q) 60575ad23bcSNick Piggin { 60675ad23bcSNick Piggin __set_bit(flag, &q->queue_flags); 60775ad23bcSNick Piggin } 60875ad23bcSNick Piggin 609e48ec690SJens Axboe static inline int queue_flag_test_and_clear(unsigned int flag, 610e48ec690SJens Axboe struct request_queue *q) 611e48ec690SJens Axboe { 6128bcb6c7dSAndi Kleen queue_lockdep_assert_held(q); 613e48ec690SJens Axboe 614e48ec690SJens Axboe if (test_bit(flag, &q->queue_flags)) { 615e48ec690SJens Axboe __clear_bit(flag, &q->queue_flags); 616e48ec690SJens Axboe return 1; 617e48ec690SJens Axboe } 618e48ec690SJens Axboe 619e48ec690SJens Axboe return 0; 620e48ec690SJens Axboe } 621e48ec690SJens Axboe 622e48ec690SJens Axboe static inline int queue_flag_test_and_set(unsigned int flag, 623e48ec690SJens Axboe struct request_queue *q) 624e48ec690SJens Axboe { 6258bcb6c7dSAndi Kleen queue_lockdep_assert_held(q); 626e48ec690SJens Axboe 627e48ec690SJens Axboe if (!test_bit(flag, &q->queue_flags)) { 628e48ec690SJens Axboe __set_bit(flag, &q->queue_flags); 629e48ec690SJens Axboe return 0; 630e48ec690SJens Axboe } 631e48ec690SJens Axboe 632e48ec690SJens Axboe return 1; 633e48ec690SJens Axboe } 634e48ec690SJens Axboe 63575ad23bcSNick Piggin static inline void queue_flag_set(unsigned int flag, struct request_queue *q) 63675ad23bcSNick Piggin { 6378bcb6c7dSAndi Kleen queue_lockdep_assert_held(q); 63875ad23bcSNick Piggin __set_bit(flag, &q->queue_flags); 63975ad23bcSNick Piggin } 64075ad23bcSNick Piggin 64175ad23bcSNick Piggin static inline void queue_flag_clear_unlocked(unsigned int flag, 64275ad23bcSNick Piggin struct request_queue *q) 64375ad23bcSNick Piggin { 64475ad23bcSNick Piggin __clear_bit(flag, &q->queue_flags); 64575ad23bcSNick Piggin } 64675ad23bcSNick Piggin 6470a7ae2ffSJens Axboe static inline int queue_in_flight(struct request_queue *q) 6480a7ae2ffSJens Axboe { 6490a7ae2ffSJens Axboe return q->in_flight[0] + q->in_flight[1]; 6500a7ae2ffSJens Axboe } 6510a7ae2ffSJens Axboe 65275ad23bcSNick Piggin static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) 65375ad23bcSNick Piggin { 6548bcb6c7dSAndi Kleen queue_lockdep_assert_held(q); 65575ad23bcSNick Piggin __clear_bit(flag, &q->queue_flags); 65675ad23bcSNick Piggin } 65775ad23bcSNick Piggin 6581da177e4SLinus Torvalds #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 6591da177e4SLinus Torvalds #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 6603f3299d5SBart Van Assche #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) 661c246e80dSBart Van Assche #define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) 662d732580bSTejun Heo #define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags) 663320ae51fSJens Axboe #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) 664ac9fafa1SAlan D. Brunelle #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 665488991e2SAlan D. Brunelle #define blk_queue_noxmerges(q) \ 666488991e2SAlan D. Brunelle test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 667a68bbddbSJens Axboe #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 668bc58ba94SJens Axboe #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) 669e2e1a148SJens Axboe #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) 6704ee5eaf4SKiyoshi Ueda #define blk_queue_stackable(q) \ 6714ee5eaf4SKiyoshi Ueda test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) 672c15227deSChristoph Hellwig #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) 673288dab8aSChristoph Hellwig #define blk_queue_secure_erase(q) \ 674288dab8aSChristoph Hellwig (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags)) 675163d4baaSToshi Kani #define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags) 6761da177e4SLinus Torvalds 67733659ebbSChristoph Hellwig #define blk_noretry_request(rq) \ 67833659ebbSChristoph Hellwig ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ 67933659ebbSChristoph Hellwig REQ_FAILFAST_DRIVER)) 6804aff5e23SJens Axboe 68133659ebbSChristoph Hellwig #define blk_account_rq(rq) \ 682e8064021SChristoph Hellwig (((rq)->rq_flags & RQF_STARTED) && \ 683e2a60da7SMartin K. Petersen ((rq)->cmd_type == REQ_TYPE_FS)) 6841da177e4SLinus Torvalds 685ab780f1eSJens Axboe #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) 686abae1fdeSFUJITA Tomonori #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 687336cdb40SKiyoshi Ueda /* rq->queuelist of dequeued request must be list_empty() */ 688336cdb40SKiyoshi Ueda #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) 6891da177e4SLinus Torvalds 6901da177e4SLinus Torvalds #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 6911da177e4SLinus Torvalds 6924e1b2d52SMike Christie #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) 6931da177e4SLinus Torvalds 69449fd524fSJens Axboe /* 69549fd524fSJens Axboe * Driver can handle struct request, if it either has an old style 69649fd524fSJens Axboe * request_fn defined, or is blk-mq based. 69749fd524fSJens Axboe */ 69849fd524fSJens Axboe static inline bool queue_is_rq_based(struct request_queue *q) 69949fd524fSJens Axboe { 70049fd524fSJens Axboe return q->request_fn || q->mq_ops; 70149fd524fSJens Axboe } 70249fd524fSJens Axboe 703e692cb66SMartin K. Petersen static inline unsigned int blk_queue_cluster(struct request_queue *q) 704e692cb66SMartin K. Petersen { 705e692cb66SMartin K. Petersen return q->limits.cluster; 706e692cb66SMartin K. Petersen } 707e692cb66SMartin K. Petersen 708797476b8SDamien Le Moal static inline enum blk_zoned_model 709797476b8SDamien Le Moal blk_queue_zoned_model(struct request_queue *q) 710797476b8SDamien Le Moal { 711797476b8SDamien Le Moal return q->limits.zoned; 712797476b8SDamien Le Moal } 713797476b8SDamien Le Moal 714797476b8SDamien Le Moal static inline bool blk_queue_is_zoned(struct request_queue *q) 715797476b8SDamien Le Moal { 716797476b8SDamien Le Moal switch (blk_queue_zoned_model(q)) { 717797476b8SDamien Le Moal case BLK_ZONED_HA: 718797476b8SDamien Le Moal case BLK_ZONED_HM: 719797476b8SDamien Le Moal return true; 720797476b8SDamien Le Moal default: 721797476b8SDamien Le Moal return false; 722797476b8SDamien Le Moal } 723797476b8SDamien Le Moal } 724797476b8SDamien Le Moal 7256a0cb1bcSHannes Reinecke static inline unsigned int blk_queue_zone_size(struct request_queue *q) 7266a0cb1bcSHannes Reinecke { 7276a0cb1bcSHannes Reinecke return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0; 7286a0cb1bcSHannes Reinecke } 7296a0cb1bcSHannes Reinecke 7301faa16d2SJens Axboe static inline bool rq_is_sync(struct request *rq) 7311faa16d2SJens Axboe { 732*ef295ecfSChristoph Hellwig return op_is_sync(rq->cmd_flags); 7331faa16d2SJens Axboe } 7341faa16d2SJens Axboe 7355b788ce3STejun Heo static inline bool blk_rl_full(struct request_list *rl, bool sync) 7361da177e4SLinus Torvalds { 7375b788ce3STejun Heo unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 7385b788ce3STejun Heo 7395b788ce3STejun Heo return rl->flags & flag; 7401da177e4SLinus Torvalds } 7411da177e4SLinus Torvalds 7425b788ce3STejun Heo static inline void blk_set_rl_full(struct request_list *rl, bool sync) 7431da177e4SLinus Torvalds { 7445b788ce3STejun Heo unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 7455b788ce3STejun Heo 7465b788ce3STejun Heo rl->flags |= flag; 7471da177e4SLinus Torvalds } 7481da177e4SLinus Torvalds 7495b788ce3STejun Heo static inline void blk_clear_rl_full(struct request_list *rl, bool sync) 7501da177e4SLinus Torvalds { 7515b788ce3STejun Heo unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 7525b788ce3STejun Heo 7535b788ce3STejun Heo rl->flags &= ~flag; 7541da177e4SLinus Torvalds } 7551da177e4SLinus Torvalds 756e2a60da7SMartin K. Petersen static inline bool rq_mergeable(struct request *rq) 757e2a60da7SMartin K. Petersen { 758e2a60da7SMartin K. Petersen if (rq->cmd_type != REQ_TYPE_FS) 759e2a60da7SMartin K. Petersen return false; 7601da177e4SLinus Torvalds 7613a5e02ceSMike Christie if (req_op(rq) == REQ_OP_FLUSH) 7623a5e02ceSMike Christie return false; 7633a5e02ceSMike Christie 764e2a60da7SMartin K. Petersen if (rq->cmd_flags & REQ_NOMERGE_FLAGS) 765e2a60da7SMartin K. Petersen return false; 766e8064021SChristoph Hellwig if (rq->rq_flags & RQF_NOMERGE_FLAGS) 767e8064021SChristoph Hellwig return false; 768e2a60da7SMartin K. Petersen 769e2a60da7SMartin K. Petersen return true; 770e2a60da7SMartin K. Petersen } 7711da177e4SLinus Torvalds 7724363ac7cSMartin K. Petersen static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) 7734363ac7cSMartin K. Petersen { 7744363ac7cSMartin K. Petersen if (bio_data(a) == bio_data(b)) 7754363ac7cSMartin K. Petersen return true; 7764363ac7cSMartin K. Petersen 7774363ac7cSMartin K. Petersen return false; 7784363ac7cSMartin K. Petersen } 7794363ac7cSMartin K. Petersen 7801da177e4SLinus Torvalds /* 7811da177e4SLinus Torvalds * q->prep_rq_fn return values 7821da177e4SLinus Torvalds */ 7830fb5b1fbSMartin K. Petersen enum { 7840fb5b1fbSMartin K. Petersen BLKPREP_OK, /* serve it */ 7850fb5b1fbSMartin K. Petersen BLKPREP_KILL, /* fatal error, kill, return -EIO */ 7860fb5b1fbSMartin K. Petersen BLKPREP_DEFER, /* leave on queue */ 7870fb5b1fbSMartin K. Petersen BLKPREP_INVALID, /* invalid command, kill, return -EREMOTEIO */ 7880fb5b1fbSMartin K. Petersen }; 7891da177e4SLinus Torvalds 7901da177e4SLinus Torvalds extern unsigned long blk_max_low_pfn, blk_max_pfn; 7911da177e4SLinus Torvalds 7921da177e4SLinus Torvalds /* 7931da177e4SLinus Torvalds * standard bounce addresses: 7941da177e4SLinus Torvalds * 7951da177e4SLinus Torvalds * BLK_BOUNCE_HIGH : bounce all highmem pages 7961da177e4SLinus Torvalds * BLK_BOUNCE_ANY : don't bounce anything 7971da177e4SLinus Torvalds * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary 7981da177e4SLinus Torvalds */ 7992472892aSAndi Kleen 8002472892aSAndi Kleen #if BITS_PER_LONG == 32 8011da177e4SLinus Torvalds #define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT) 8022472892aSAndi Kleen #else 8032472892aSAndi Kleen #define BLK_BOUNCE_HIGH -1ULL 8042472892aSAndi Kleen #endif 8052472892aSAndi Kleen #define BLK_BOUNCE_ANY (-1ULL) 806bfe17231SFUJITA Tomonori #define BLK_BOUNCE_ISA (DMA_BIT_MASK(24)) 8071da177e4SLinus Torvalds 8083d6392cfSJens Axboe /* 8093d6392cfSJens Axboe * default timeout for SG_IO if none specified 8103d6392cfSJens Axboe */ 8113d6392cfSJens Axboe #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) 812f2f1fa78SLinus Torvalds #define BLK_MIN_SG_TIMEOUT (7 * HZ) 8133d6392cfSJens Axboe 8142a7326b5SChristoph Lameter #ifdef CONFIG_BOUNCE 8151da177e4SLinus Torvalds extern int init_emergency_isa_pool(void); 816165125e1SJens Axboe extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); 8171da177e4SLinus Torvalds #else 8181da177e4SLinus Torvalds static inline int init_emergency_isa_pool(void) 8191da177e4SLinus Torvalds { 8201da177e4SLinus Torvalds return 0; 8211da177e4SLinus Torvalds } 822165125e1SJens Axboe static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) 8231da177e4SLinus Torvalds { 8241da177e4SLinus Torvalds } 8251da177e4SLinus Torvalds #endif /* CONFIG_MMU */ 8261da177e4SLinus Torvalds 827152e283fSFUJITA Tomonori struct rq_map_data { 828152e283fSFUJITA Tomonori struct page **pages; 829152e283fSFUJITA Tomonori int page_order; 830152e283fSFUJITA Tomonori int nr_entries; 83156c451f4SFUJITA Tomonori unsigned long offset; 83297ae77a1SFUJITA Tomonori int null_mapped; 833ecb554a8SFUJITA Tomonori int from_user; 834152e283fSFUJITA Tomonori }; 835152e283fSFUJITA Tomonori 8365705f702SNeilBrown struct req_iterator { 8377988613bSKent Overstreet struct bvec_iter iter; 8385705f702SNeilBrown struct bio *bio; 8395705f702SNeilBrown }; 8405705f702SNeilBrown 8415705f702SNeilBrown /* This should not be used directly - use rq_for_each_segment */ 8421e428079SJens Axboe #define for_each_bio(_bio) \ 8431e428079SJens Axboe for (; _bio; _bio = _bio->bi_next) 8445705f702SNeilBrown #define __rq_for_each_bio(_bio, rq) \ 8451da177e4SLinus Torvalds if ((rq->bio)) \ 8461da177e4SLinus Torvalds for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) 8471da177e4SLinus Torvalds 8485705f702SNeilBrown #define rq_for_each_segment(bvl, _rq, _iter) \ 8495705f702SNeilBrown __rq_for_each_bio(_iter.bio, _rq) \ 8507988613bSKent Overstreet bio_for_each_segment(bvl, _iter.bio, _iter.iter) 8515705f702SNeilBrown 8524550dd6cSKent Overstreet #define rq_iter_last(bvec, _iter) \ 8537988613bSKent Overstreet (_iter.bio->bi_next == NULL && \ 8544550dd6cSKent Overstreet bio_iter_last(bvec, _iter.iter)) 8555705f702SNeilBrown 8562d4dc890SIlya Loginov #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 8572d4dc890SIlya Loginov # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" 8582d4dc890SIlya Loginov #endif 8592d4dc890SIlya Loginov #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 8602d4dc890SIlya Loginov extern void rq_flush_dcache_pages(struct request *rq); 8612d4dc890SIlya Loginov #else 8622d4dc890SIlya Loginov static inline void rq_flush_dcache_pages(struct request *rq) 8632d4dc890SIlya Loginov { 8642d4dc890SIlya Loginov } 8652d4dc890SIlya Loginov #endif 8662d4dc890SIlya Loginov 8672af3a815SToshi Kani #ifdef CONFIG_PRINTK 8682af3a815SToshi Kani #define vfs_msg(sb, level, fmt, ...) \ 8692af3a815SToshi Kani __vfs_msg(sb, level, fmt, ##__VA_ARGS__) 8702af3a815SToshi Kani #else 8712af3a815SToshi Kani #define vfs_msg(sb, level, fmt, ...) \ 8722af3a815SToshi Kani do { \ 8732af3a815SToshi Kani no_printk(fmt, ##__VA_ARGS__); \ 8742af3a815SToshi Kani __vfs_msg(sb, "", " "); \ 8752af3a815SToshi Kani } while (0) 8762af3a815SToshi Kani #endif 8772af3a815SToshi Kani 8781da177e4SLinus Torvalds extern int blk_register_queue(struct gendisk *disk); 8791da177e4SLinus Torvalds extern void blk_unregister_queue(struct gendisk *disk); 880dece1635SJens Axboe extern blk_qc_t generic_make_request(struct bio *bio); 8812a4aa30cSFUJITA Tomonori extern void blk_rq_init(struct request_queue *q, struct request *rq); 8821da177e4SLinus Torvalds extern void blk_put_request(struct request *); 883165125e1SJens Axboe extern void __blk_put_request(struct request_queue *, struct request *); 884165125e1SJens Axboe extern struct request *blk_get_request(struct request_queue *, int, gfp_t); 885f27b087bSJens Axboe extern void blk_rq_set_block_pc(struct request *); 886165125e1SJens Axboe extern void blk_requeue_request(struct request_queue *, struct request *); 88766ac0280SChristoph Hellwig extern void blk_add_request_payload(struct request *rq, struct page *page, 88837e58237SMing Lin int offset, unsigned int len); 889ef9e3facSKiyoshi Ueda extern int blk_lld_busy(struct request_queue *q); 89078d8e58aSMike Snitzer extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 89178d8e58aSMike Snitzer struct bio_set *bs, gfp_t gfp_mask, 89278d8e58aSMike Snitzer int (*bio_ctr)(struct bio *, struct bio *, void *), 89378d8e58aSMike Snitzer void *data); 89478d8e58aSMike Snitzer extern void blk_rq_unprep_clone(struct request *rq); 89582124d60SKiyoshi Ueda extern int blk_insert_cloned_request(struct request_queue *q, 89682124d60SKiyoshi Ueda struct request *rq); 89798d61d5bSChristoph Hellwig extern int blk_rq_append_bio(struct request *rq, struct bio *bio); 8983cca6dc1SJens Axboe extern void blk_delay_queue(struct request_queue *, unsigned long); 89954efd50bSKent Overstreet extern void blk_queue_split(struct request_queue *, struct bio **, 90054efd50bSKent Overstreet struct bio_set *); 901165125e1SJens Axboe extern void blk_recount_segments(struct request_queue *, struct bio *); 9020bfc96cbSPaolo Bonzini extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); 903577ebb37SPaolo Bonzini extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t, 904577ebb37SPaolo Bonzini unsigned int, void __user *); 90574f3c8afSAl Viro extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, 90674f3c8afSAl Viro unsigned int, void __user *); 907e915e872SAl Viro extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, 908e915e872SAl Viro struct scsi_ioctl_command __user *); 9093fcfab16SAndrew Morton 9106f3b0e8bSChristoph Hellwig extern int blk_queue_enter(struct request_queue *q, bool nowait); 9112e6edc95SDan Williams extern void blk_queue_exit(struct request_queue *q); 912165125e1SJens Axboe extern void blk_start_queue(struct request_queue *q); 91321491412SJens Axboe extern void blk_start_queue_async(struct request_queue *q); 914165125e1SJens Axboe extern void blk_stop_queue(struct request_queue *q); 9151da177e4SLinus Torvalds extern void blk_sync_queue(struct request_queue *q); 916165125e1SJens Axboe extern void __blk_stop_queue(struct request_queue *q); 91724ecfbe2SChristoph Hellwig extern void __blk_run_queue(struct request_queue *q); 918a7928c15SChristoph Hellwig extern void __blk_run_queue_uncond(struct request_queue *q); 919165125e1SJens Axboe extern void blk_run_queue(struct request_queue *); 920c21e6bebSJens Axboe extern void blk_run_queue_async(struct request_queue *q); 921a3bce90eSFUJITA Tomonori extern int blk_rq_map_user(struct request_queue *, struct request *, 922152e283fSFUJITA Tomonori struct rq_map_data *, void __user *, unsigned long, 923152e283fSFUJITA Tomonori gfp_t); 9248e5cfc45SJens Axboe extern int blk_rq_unmap_user(struct bio *); 925165125e1SJens Axboe extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); 926165125e1SJens Axboe extern int blk_rq_map_user_iov(struct request_queue *, struct request *, 92726e49cfcSKent Overstreet struct rq_map_data *, const struct iov_iter *, 92826e49cfcSKent Overstreet gfp_t); 929165125e1SJens Axboe extern int blk_execute_rq(struct request_queue *, struct gendisk *, 930994ca9a1SJames Bottomley struct request *, int); 931165125e1SJens Axboe extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 93215fc858aSJens Axboe struct request *, int, rq_end_io_fn *); 9336e39b69eSMike Christie 93405229beeSJens Axboe bool blk_poll(struct request_queue *q, blk_qc_t cookie); 93505229beeSJens Axboe 936165125e1SJens Axboe static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 9371da177e4SLinus Torvalds { 938ff9ea323STejun Heo return bdev->bd_disk->queue; /* this is never NULL */ 9391da177e4SLinus Torvalds } 9401da177e4SLinus Torvalds 9411da177e4SLinus Torvalds /* 9425b93629bSTejun Heo * blk_rq_pos() : the current sector 9435b93629bSTejun Heo * blk_rq_bytes() : bytes left in the entire request 9445b93629bSTejun Heo * blk_rq_cur_bytes() : bytes left in the current segment 94580a761fdSTejun Heo * blk_rq_err_bytes() : bytes left till the next error boundary 9465b93629bSTejun Heo * blk_rq_sectors() : sectors left in the entire request 9475b93629bSTejun Heo * blk_rq_cur_sectors() : sectors left in the current segment 9485efccd17STejun Heo */ 9495b93629bSTejun Heo static inline sector_t blk_rq_pos(const struct request *rq) 9505b93629bSTejun Heo { 951a2dec7b3STejun Heo return rq->__sector; 9525b93629bSTejun Heo } 9535b93629bSTejun Heo 9542e46e8b2STejun Heo static inline unsigned int blk_rq_bytes(const struct request *rq) 9552e46e8b2STejun Heo { 956a2dec7b3STejun Heo return rq->__data_len; 9572e46e8b2STejun Heo } 9582e46e8b2STejun Heo 9592e46e8b2STejun Heo static inline int blk_rq_cur_bytes(const struct request *rq) 9602e46e8b2STejun Heo { 9612e46e8b2STejun Heo return rq->bio ? bio_cur_bytes(rq->bio) : 0; 9622e46e8b2STejun Heo } 9635efccd17STejun Heo 96480a761fdSTejun Heo extern unsigned int blk_rq_err_bytes(const struct request *rq); 96580a761fdSTejun Heo 9665b93629bSTejun Heo static inline unsigned int blk_rq_sectors(const struct request *rq) 9675b93629bSTejun Heo { 9682e46e8b2STejun Heo return blk_rq_bytes(rq) >> 9; 9695b93629bSTejun Heo } 9705b93629bSTejun Heo 9715b93629bSTejun Heo static inline unsigned int blk_rq_cur_sectors(const struct request *rq) 9725b93629bSTejun Heo { 9732e46e8b2STejun Heo return blk_rq_cur_bytes(rq) >> 9; 9745b93629bSTejun Heo } 9755b93629bSTejun Heo 976f31dc1cdSMartin K. Petersen static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, 9778fe0d473SMike Christie int op) 978f31dc1cdSMartin K. Petersen { 9797afafc8aSAdrian Hunter if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)) 980871dd928SJames Bottomley return min(q->limits.max_discard_sectors, UINT_MAX >> 9); 981f31dc1cdSMartin K. Petersen 9828fe0d473SMike Christie if (unlikely(op == REQ_OP_WRITE_SAME)) 9834363ac7cSMartin K. Petersen return q->limits.max_write_same_sectors; 9844363ac7cSMartin K. Petersen 985f31dc1cdSMartin K. Petersen return q->limits.max_sectors; 986f31dc1cdSMartin K. Petersen } 987f31dc1cdSMartin K. Petersen 988762380adSJens Axboe /* 989762380adSJens Axboe * Return maximum size of a request at given offset. Only valid for 990762380adSJens Axboe * file system requests. 991762380adSJens Axboe */ 992762380adSJens Axboe static inline unsigned int blk_max_size_offset(struct request_queue *q, 993762380adSJens Axboe sector_t offset) 994762380adSJens Axboe { 995762380adSJens Axboe if (!q->limits.chunk_sectors) 996736ed4deSJens Axboe return q->limits.max_sectors; 997762380adSJens Axboe 998762380adSJens Axboe return q->limits.chunk_sectors - 999762380adSJens Axboe (offset & (q->limits.chunk_sectors - 1)); 1000762380adSJens Axboe } 1001762380adSJens Axboe 100217007f39SDamien Le Moal static inline unsigned int blk_rq_get_max_sectors(struct request *rq, 100317007f39SDamien Le Moal sector_t offset) 1004f31dc1cdSMartin K. Petersen { 1005f31dc1cdSMartin K. Petersen struct request_queue *q = rq->q; 1006f31dc1cdSMartin K. Petersen 1007f2101842SChristoph Hellwig if (unlikely(rq->cmd_type != REQ_TYPE_FS)) 1008f31dc1cdSMartin K. Petersen return q->limits.max_hw_sectors; 1009f31dc1cdSMartin K. Petersen 10107afafc8aSAdrian Hunter if (!q->limits.chunk_sectors || 10117afafc8aSAdrian Hunter req_op(rq) == REQ_OP_DISCARD || 10127afafc8aSAdrian Hunter req_op(rq) == REQ_OP_SECURE_ERASE) 10138fe0d473SMike Christie return blk_queue_get_max_sectors(q, req_op(rq)); 1014762380adSJens Axboe 101517007f39SDamien Le Moal return min(blk_max_size_offset(q, offset), 10168fe0d473SMike Christie blk_queue_get_max_sectors(q, req_op(rq))); 1017f31dc1cdSMartin K. Petersen } 1018f31dc1cdSMartin K. Petersen 101975afb352SJun'ichi Nomura static inline unsigned int blk_rq_count_bios(struct request *rq) 102075afb352SJun'ichi Nomura { 102175afb352SJun'ichi Nomura unsigned int nr_bios = 0; 102275afb352SJun'ichi Nomura struct bio *bio; 102375afb352SJun'ichi Nomura 102475afb352SJun'ichi Nomura __rq_for_each_bio(bio, rq) 102575afb352SJun'ichi Nomura nr_bios++; 102675afb352SJun'ichi Nomura 102775afb352SJun'ichi Nomura return nr_bios; 102875afb352SJun'ichi Nomura } 102975afb352SJun'ichi Nomura 10305efccd17STejun Heo /* 10319934c8c0STejun Heo * Request issue related functions. 10329934c8c0STejun Heo */ 10339934c8c0STejun Heo extern struct request *blk_peek_request(struct request_queue *q); 10349934c8c0STejun Heo extern void blk_start_request(struct request *rq); 10359934c8c0STejun Heo extern struct request *blk_fetch_request(struct request_queue *q); 10369934c8c0STejun Heo 10379934c8c0STejun Heo /* 10382e60e022STejun Heo * Request completion related functions. 10392e60e022STejun Heo * 10402e60e022STejun Heo * blk_update_request() completes given number of bytes and updates 10412e60e022STejun Heo * the request without completing it. 10422e60e022STejun Heo * 1043f06d9a2bSTejun Heo * blk_end_request() and friends. __blk_end_request() must be called 1044f06d9a2bSTejun Heo * with the request queue spinlock acquired. 10451da177e4SLinus Torvalds * 10461da177e4SLinus Torvalds * Several drivers define their own end_request and call 10473bcddeacSKiyoshi Ueda * blk_end_request() for parts of the original function. 10483bcddeacSKiyoshi Ueda * This prevents code duplication in drivers. 10491da177e4SLinus Torvalds */ 10502e60e022STejun Heo extern bool blk_update_request(struct request *rq, int error, 105122b13210SJens Axboe unsigned int nr_bytes); 105212120077SChristoph Hellwig extern void blk_finish_request(struct request *rq, int error); 1053b1f74493SFUJITA Tomonori extern bool blk_end_request(struct request *rq, int error, 1054b1f74493SFUJITA Tomonori unsigned int nr_bytes); 1055b1f74493SFUJITA Tomonori extern void blk_end_request_all(struct request *rq, int error); 1056b1f74493SFUJITA Tomonori extern bool blk_end_request_cur(struct request *rq, int error); 105780a761fdSTejun Heo extern bool blk_end_request_err(struct request *rq, int error); 1058b1f74493SFUJITA Tomonori extern bool __blk_end_request(struct request *rq, int error, 1059b1f74493SFUJITA Tomonori unsigned int nr_bytes); 1060b1f74493SFUJITA Tomonori extern void __blk_end_request_all(struct request *rq, int error); 1061b1f74493SFUJITA Tomonori extern bool __blk_end_request_cur(struct request *rq, int error); 106280a761fdSTejun Heo extern bool __blk_end_request_err(struct request *rq, int error); 10632e60e022STejun Heo 1064ff856badSJens Axboe extern void blk_complete_request(struct request *); 1065242f9dcbSJens Axboe extern void __blk_complete_request(struct request *); 1066242f9dcbSJens Axboe extern void blk_abort_request(struct request *); 106728018c24SJames Bottomley extern void blk_unprep_request(struct request *); 1068ff856badSJens Axboe 10691da177e4SLinus Torvalds /* 10701da177e4SLinus Torvalds * Access functions for manipulating queue properties 10711da177e4SLinus Torvalds */ 1072165125e1SJens Axboe extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, 10731946089aSChristoph Lameter spinlock_t *lock, int node_id); 1074165125e1SJens Axboe extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); 107501effb0dSMike Snitzer extern struct request_queue *blk_init_allocated_queue(struct request_queue *, 107601effb0dSMike Snitzer request_fn_proc *, spinlock_t *); 1077165125e1SJens Axboe extern void blk_cleanup_queue(struct request_queue *); 1078165125e1SJens Axboe extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 1079165125e1SJens Axboe extern void blk_queue_bounce_limit(struct request_queue *, u64); 1080086fa5ffSMartin K. Petersen extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 1081762380adSJens Axboe extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); 10828a78362cSMartin K. Petersen extern void blk_queue_max_segments(struct request_queue *, unsigned short); 1083165125e1SJens Axboe extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 108467efc925SChristoph Hellwig extern void blk_queue_max_discard_sectors(struct request_queue *q, 108567efc925SChristoph Hellwig unsigned int max_discard_sectors); 10864363ac7cSMartin K. Petersen extern void blk_queue_max_write_same_sectors(struct request_queue *q, 10874363ac7cSMartin K. Petersen unsigned int max_write_same_sectors); 1088e1defc4fSMartin K. Petersen extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); 1089892b6f90SMartin K. Petersen extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); 1090c72758f3SMartin K. Petersen extern void blk_queue_alignment_offset(struct request_queue *q, 1091c72758f3SMartin K. Petersen unsigned int alignment); 10927c958e32SMartin K. Petersen extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); 1093c72758f3SMartin K. Petersen extern void blk_queue_io_min(struct request_queue *q, unsigned int min); 10943c5820c7SMartin K. Petersen extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); 1095c72758f3SMartin K. Petersen extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); 1096e475bba2SMartin K. Petersen extern void blk_set_default_limits(struct queue_limits *lim); 1097b1bd055dSMartin K. Petersen extern void blk_set_stacking_limits(struct queue_limits *lim); 1098c72758f3SMartin K. Petersen extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 1099c72758f3SMartin K. Petersen sector_t offset); 110017be8c24SMartin K. Petersen extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, 110117be8c24SMartin K. Petersen sector_t offset); 1102c72758f3SMartin K. Petersen extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, 1103c72758f3SMartin K. Petersen sector_t offset); 1104165125e1SJens Axboe extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 1105e3790c7dSTejun Heo extern void blk_queue_dma_pad(struct request_queue *, unsigned int); 110627f8221aSFUJITA Tomonori extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); 11072fb98e84STejun Heo extern int blk_queue_dma_drain(struct request_queue *q, 11082fb98e84STejun Heo dma_drain_needed_fn *dma_drain_needed, 11092fb98e84STejun Heo void *buf, unsigned int size); 1110ef9e3facSKiyoshi Ueda extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); 1111165125e1SJens Axboe extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 111203100aadSKeith Busch extern void blk_queue_virt_boundary(struct request_queue *, unsigned long); 1113165125e1SJens Axboe extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 111428018c24SJames Bottomley extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn); 1115165125e1SJens Axboe extern void blk_queue_dma_alignment(struct request_queue *, int); 111611c3e689SJames Bottomley extern void blk_queue_update_dma_alignment(struct request_queue *, int); 1117165125e1SJens Axboe extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 1118242f9dcbSJens Axboe extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); 1119242f9dcbSJens Axboe extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 1120f3876930S[email protected] extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable); 112193e9d8e8SJens Axboe extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); 11221da177e4SLinus Torvalds extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 11231da177e4SLinus Torvalds 1124165125e1SJens Axboe extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 11251da177e4SLinus Torvalds extern void blk_dump_rq_flags(struct request *, char *); 11261da177e4SLinus Torvalds extern long nr_blockdev_pages(void); 11271da177e4SLinus Torvalds 112809ac46c4STejun Heo bool __must_check blk_get_queue(struct request_queue *); 1129165125e1SJens Axboe struct request_queue *blk_alloc_queue(gfp_t); 1130165125e1SJens Axboe struct request_queue *blk_alloc_queue_node(gfp_t, int); 1131165125e1SJens Axboe extern void blk_put_queue(struct request_queue *); 11323f21c265SJens Axboe extern void blk_set_queue_dying(struct request_queue *); 11331da177e4SLinus Torvalds 1134316cc67dSShaohua Li /* 11356c954667SLin Ming * block layer runtime pm functions 11366c954667SLin Ming */ 113747fafbc7SRafael J. Wysocki #ifdef CONFIG_PM 11386c954667SLin Ming extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev); 11396c954667SLin Ming extern int blk_pre_runtime_suspend(struct request_queue *q); 11406c954667SLin Ming extern void blk_post_runtime_suspend(struct request_queue *q, int err); 11416c954667SLin Ming extern void blk_pre_runtime_resume(struct request_queue *q); 11426c954667SLin Ming extern void blk_post_runtime_resume(struct request_queue *q, int err); 1143d07ab6d1SMika Westerberg extern void blk_set_runtime_active(struct request_queue *q); 11446c954667SLin Ming #else 11456c954667SLin Ming static inline void blk_pm_runtime_init(struct request_queue *q, 11466c954667SLin Ming struct device *dev) {} 11476c954667SLin Ming static inline int blk_pre_runtime_suspend(struct request_queue *q) 11486c954667SLin Ming { 11496c954667SLin Ming return -ENOSYS; 11506c954667SLin Ming } 11516c954667SLin Ming static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {} 11526c954667SLin Ming static inline void blk_pre_runtime_resume(struct request_queue *q) {} 11536c954667SLin Ming static inline void blk_post_runtime_resume(struct request_queue *q, int err) {} 1154d07ab6d1SMika Westerberg extern inline void blk_set_runtime_active(struct request_queue *q) {} 11556c954667SLin Ming #endif 11566c954667SLin Ming 11576c954667SLin Ming /* 115875df7136SSuresh Jayaraman * blk_plug permits building a queue of related requests by holding the I/O 115975df7136SSuresh Jayaraman * fragments for a short period. This allows merging of sequential requests 116075df7136SSuresh Jayaraman * into single larger request. As the requests are moved from a per-task list to 116175df7136SSuresh Jayaraman * the device's request_queue in a batch, this results in improved scalability 116275df7136SSuresh Jayaraman * as the lock contention for request_queue lock is reduced. 116375df7136SSuresh Jayaraman * 116475df7136SSuresh Jayaraman * It is ok not to disable preemption when adding the request to the plug list 116575df7136SSuresh Jayaraman * or when attempting a merge, because blk_schedule_flush_list() will only flush 116675df7136SSuresh Jayaraman * the plug list when the task sleeps by itself. For details, please see 116775df7136SSuresh Jayaraman * schedule() where blk_schedule_flush_plug() is called. 1168316cc67dSShaohua Li */ 116973c10101SJens Axboe struct blk_plug { 117075df7136SSuresh Jayaraman struct list_head list; /* requests */ 1171320ae51fSJens Axboe struct list_head mq_list; /* blk-mq requests */ 117275df7136SSuresh Jayaraman struct list_head cb_list; /* md requires an unplug callback */ 117373c10101SJens Axboe }; 117455c022bbSShaohua Li #define BLK_MAX_REQUEST_COUNT 16 117555c022bbSShaohua Li 11769cbb1750SNeilBrown struct blk_plug_cb; 117774018dc3SNeilBrown typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); 1178048c9374SNeilBrown struct blk_plug_cb { 1179048c9374SNeilBrown struct list_head list; 11809cbb1750SNeilBrown blk_plug_cb_fn callback; 11819cbb1750SNeilBrown void *data; 1182048c9374SNeilBrown }; 11839cbb1750SNeilBrown extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, 11849cbb1750SNeilBrown void *data, int size); 118573c10101SJens Axboe extern void blk_start_plug(struct blk_plug *); 118673c10101SJens Axboe extern void blk_finish_plug(struct blk_plug *); 1187f6603783SJens Axboe extern void blk_flush_plug_list(struct blk_plug *, bool); 118873c10101SJens Axboe 118973c10101SJens Axboe static inline void blk_flush_plug(struct task_struct *tsk) 119073c10101SJens Axboe { 119173c10101SJens Axboe struct blk_plug *plug = tsk->plug; 119273c10101SJens Axboe 119388b996cdSChristoph Hellwig if (plug) 1194a237c1c5SJens Axboe blk_flush_plug_list(plug, false); 1195a237c1c5SJens Axboe } 1196a237c1c5SJens Axboe 1197a237c1c5SJens Axboe static inline void blk_schedule_flush_plug(struct task_struct *tsk) 1198a237c1c5SJens Axboe { 1199a237c1c5SJens Axboe struct blk_plug *plug = tsk->plug; 1200a237c1c5SJens Axboe 1201a237c1c5SJens Axboe if (plug) 1202f6603783SJens Axboe blk_flush_plug_list(plug, true); 120373c10101SJens Axboe } 120473c10101SJens Axboe 120573c10101SJens Axboe static inline bool blk_needs_flush_plug(struct task_struct *tsk) 120673c10101SJens Axboe { 120773c10101SJens Axboe struct blk_plug *plug = tsk->plug; 120873c10101SJens Axboe 1209320ae51fSJens Axboe return plug && 1210320ae51fSJens Axboe (!list_empty(&plug->list) || 1211320ae51fSJens Axboe !list_empty(&plug->mq_list) || 1212320ae51fSJens Axboe !list_empty(&plug->cb_list)); 121373c10101SJens Axboe } 121473c10101SJens Axboe 12151da177e4SLinus Torvalds /* 12161da177e4SLinus Torvalds * tag stuff 12171da177e4SLinus Torvalds */ 1218165125e1SJens Axboe extern int blk_queue_start_tag(struct request_queue *, struct request *); 1219165125e1SJens Axboe extern struct request *blk_queue_find_tag(struct request_queue *, int); 1220165125e1SJens Axboe extern void blk_queue_end_tag(struct request_queue *, struct request *); 1221ee1b6f7aSShaohua Li extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int); 1222165125e1SJens Axboe extern void blk_queue_free_tags(struct request_queue *); 1223165125e1SJens Axboe extern int blk_queue_resize_tags(struct request_queue *, int); 1224165125e1SJens Axboe extern void blk_queue_invalidate_tags(struct request_queue *); 1225ee1b6f7aSShaohua Li extern struct blk_queue_tag *blk_init_tags(int, int); 1226492dfb48SJames Bottomley extern void blk_free_tags(struct blk_queue_tag *); 12271da177e4SLinus Torvalds 1228f583f492SDavid C Somayajulu static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, 1229f583f492SDavid C Somayajulu int tag) 1230f583f492SDavid C Somayajulu { 1231f583f492SDavid C Somayajulu if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) 1232f583f492SDavid C Somayajulu return NULL; 1233f583f492SDavid C Somayajulu return bqt->tag_index[tag]; 1234f583f492SDavid C Somayajulu } 1235dd3932edSChristoph Hellwig 1236e950fdf7SChristoph Hellwig 1237e950fdf7SChristoph Hellwig #define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */ 1238e950fdf7SChristoph Hellwig #define BLKDEV_DISCARD_ZERO (1 << 1) /* must reliably zero data */ 1239dd3932edSChristoph Hellwig 1240dd3932edSChristoph Hellwig extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); 1241fbd9b09aSDmitry Monakhov extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1242fbd9b09aSDmitry Monakhov sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); 124338f25255SChristoph Hellwig extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1244288dab8aSChristoph Hellwig sector_t nr_sects, gfp_t gfp_mask, int flags, 1245469e3216SMike Christie struct bio **biop); 12464363ac7cSMartin K. Petersen extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, 12474363ac7cSMartin K. Petersen sector_t nr_sects, gfp_t gfp_mask, struct page *page); 12483f14d792SDmitry Monakhov extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1249d93ba7a5SMartin K. Petersen sector_t nr_sects, gfp_t gfp_mask, bool discard); 12502cf6d26aSChristoph Hellwig static inline int sb_issue_discard(struct super_block *sb, sector_t block, 12512cf6d26aSChristoph Hellwig sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) 1252fb2dce86SDavid Woodhouse { 12532cf6d26aSChristoph Hellwig return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9), 12542cf6d26aSChristoph Hellwig nr_blocks << (sb->s_blocksize_bits - 9), 12552cf6d26aSChristoph Hellwig gfp_mask, flags); 1256fb2dce86SDavid Woodhouse } 1257e6fa0be6SLukas Czerner static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, 1258a107e5a3STheodore Ts'o sector_t nr_blocks, gfp_t gfp_mask) 1259e6fa0be6SLukas Czerner { 1260e6fa0be6SLukas Czerner return blkdev_issue_zeroout(sb->s_bdev, 1261e6fa0be6SLukas Czerner block << (sb->s_blocksize_bits - 9), 1262e6fa0be6SLukas Czerner nr_blocks << (sb->s_blocksize_bits - 9), 1263d93ba7a5SMartin K. Petersen gfp_mask, true); 1264e6fa0be6SLukas Czerner } 12651da177e4SLinus Torvalds 1266018e0446SJens Axboe extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); 12670b07de85SAdel Gadllah 1268eb28d31bSMartin K. Petersen enum blk_default_limits { 1269eb28d31bSMartin K. Petersen BLK_MAX_SEGMENTS = 128, 1270eb28d31bSMartin K. Petersen BLK_SAFE_MAX_SECTORS = 255, 1271d2be537cSJeff Moyer BLK_DEF_MAX_SECTORS = 2560, 1272eb28d31bSMartin K. Petersen BLK_MAX_SEGMENT_SIZE = 65536, 1273eb28d31bSMartin K. Petersen BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, 1274eb28d31bSMartin K. Petersen }; 12750e435ac2SMilan Broz 12761da177e4SLinus Torvalds #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 12771da177e4SLinus Torvalds 1278ae03bf63SMartin K. Petersen static inline unsigned long queue_bounce_pfn(struct request_queue *q) 1279ae03bf63SMartin K. Petersen { 1280025146e1SMartin K. Petersen return q->limits.bounce_pfn; 1281ae03bf63SMartin K. Petersen } 1282ae03bf63SMartin K. Petersen 1283ae03bf63SMartin K. Petersen static inline unsigned long queue_segment_boundary(struct request_queue *q) 1284ae03bf63SMartin K. Petersen { 1285025146e1SMartin K. Petersen return q->limits.seg_boundary_mask; 1286ae03bf63SMartin K. Petersen } 1287ae03bf63SMartin K. Petersen 128803100aadSKeith Busch static inline unsigned long queue_virt_boundary(struct request_queue *q) 128903100aadSKeith Busch { 129003100aadSKeith Busch return q->limits.virt_boundary_mask; 129103100aadSKeith Busch } 129203100aadSKeith Busch 1293ae03bf63SMartin K. Petersen static inline unsigned int queue_max_sectors(struct request_queue *q) 1294ae03bf63SMartin K. Petersen { 1295025146e1SMartin K. Petersen return q->limits.max_sectors; 1296ae03bf63SMartin K. Petersen } 1297ae03bf63SMartin K. Petersen 1298ae03bf63SMartin K. Petersen static inline unsigned int queue_max_hw_sectors(struct request_queue *q) 1299ae03bf63SMartin K. Petersen { 1300025146e1SMartin K. Petersen return q->limits.max_hw_sectors; 1301ae03bf63SMartin K. Petersen } 1302ae03bf63SMartin K. Petersen 13038a78362cSMartin K. Petersen static inline unsigned short queue_max_segments(struct request_queue *q) 1304ae03bf63SMartin K. Petersen { 13058a78362cSMartin K. Petersen return q->limits.max_segments; 1306ae03bf63SMartin K. Petersen } 1307ae03bf63SMartin K. Petersen 1308ae03bf63SMartin K. Petersen static inline unsigned int queue_max_segment_size(struct request_queue *q) 1309ae03bf63SMartin K. Petersen { 1310025146e1SMartin K. Petersen return q->limits.max_segment_size; 1311ae03bf63SMartin K. Petersen } 1312ae03bf63SMartin K. Petersen 1313e1defc4fSMartin K. Petersen static inline unsigned short queue_logical_block_size(struct request_queue *q) 13141da177e4SLinus Torvalds { 13151da177e4SLinus Torvalds int retval = 512; 13161da177e4SLinus Torvalds 1317025146e1SMartin K. Petersen if (q && q->limits.logical_block_size) 1318025146e1SMartin K. Petersen retval = q->limits.logical_block_size; 13191da177e4SLinus Torvalds 13201da177e4SLinus Torvalds return retval; 13211da177e4SLinus Torvalds } 13221da177e4SLinus Torvalds 1323e1defc4fSMartin K. Petersen static inline unsigned short bdev_logical_block_size(struct block_device *bdev) 13241da177e4SLinus Torvalds { 1325e1defc4fSMartin K. Petersen return queue_logical_block_size(bdev_get_queue(bdev)); 13261da177e4SLinus Torvalds } 13271da177e4SLinus Torvalds 1328c72758f3SMartin K. Petersen static inline unsigned int queue_physical_block_size(struct request_queue *q) 1329c72758f3SMartin K. Petersen { 1330c72758f3SMartin K. Petersen return q->limits.physical_block_size; 1331c72758f3SMartin K. Petersen } 1332c72758f3SMartin K. Petersen 1333892b6f90SMartin K. Petersen static inline unsigned int bdev_physical_block_size(struct block_device *bdev) 1334ac481c20SMartin K. Petersen { 1335ac481c20SMartin K. Petersen return queue_physical_block_size(bdev_get_queue(bdev)); 1336ac481c20SMartin K. Petersen } 1337ac481c20SMartin K. Petersen 1338c72758f3SMartin K. Petersen static inline unsigned int queue_io_min(struct request_queue *q) 1339c72758f3SMartin K. Petersen { 1340c72758f3SMartin K. Petersen return q->limits.io_min; 1341c72758f3SMartin K. Petersen } 1342c72758f3SMartin K. Petersen 1343ac481c20SMartin K. Petersen static inline int bdev_io_min(struct block_device *bdev) 1344ac481c20SMartin K. Petersen { 1345ac481c20SMartin K. Petersen return queue_io_min(bdev_get_queue(bdev)); 1346ac481c20SMartin K. Petersen } 1347ac481c20SMartin K. Petersen 1348c72758f3SMartin K. Petersen static inline unsigned int queue_io_opt(struct request_queue *q) 1349c72758f3SMartin K. Petersen { 1350c72758f3SMartin K. Petersen return q->limits.io_opt; 1351c72758f3SMartin K. Petersen } 1352c72758f3SMartin K. Petersen 1353ac481c20SMartin K. Petersen static inline int bdev_io_opt(struct block_device *bdev) 1354ac481c20SMartin K. Petersen { 1355ac481c20SMartin K. Petersen return queue_io_opt(bdev_get_queue(bdev)); 1356ac481c20SMartin K. Petersen } 1357ac481c20SMartin K. Petersen 1358c72758f3SMartin K. Petersen static inline int queue_alignment_offset(struct request_queue *q) 1359c72758f3SMartin K. Petersen { 1360ac481c20SMartin K. Petersen if (q->limits.misaligned) 1361c72758f3SMartin K. Petersen return -1; 1362c72758f3SMartin K. Petersen 1363c72758f3SMartin K. Petersen return q->limits.alignment_offset; 1364c72758f3SMartin K. Petersen } 1365c72758f3SMartin K. Petersen 1366e03a72e1SMartin K. Petersen static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) 136781744ee4SMartin K. Petersen { 136881744ee4SMartin K. Petersen unsigned int granularity = max(lim->physical_block_size, lim->io_min); 1369b8839b8cSMike Snitzer unsigned int alignment = sector_div(sector, granularity >> 9) << 9; 137081744ee4SMartin K. Petersen 1371b8839b8cSMike Snitzer return (granularity + lim->alignment_offset - alignment) % granularity; 1372c72758f3SMartin K. Petersen } 1373c72758f3SMartin K. Petersen 1374ac481c20SMartin K. Petersen static inline int bdev_alignment_offset(struct block_device *bdev) 1375ac481c20SMartin K. Petersen { 1376ac481c20SMartin K. Petersen struct request_queue *q = bdev_get_queue(bdev); 1377ac481c20SMartin K. Petersen 1378ac481c20SMartin K. Petersen if (q->limits.misaligned) 1379ac481c20SMartin K. Petersen return -1; 1380ac481c20SMartin K. Petersen 1381ac481c20SMartin K. Petersen if (bdev != bdev->bd_contains) 1382ac481c20SMartin K. Petersen return bdev->bd_part->alignment_offset; 1383ac481c20SMartin K. Petersen 1384ac481c20SMartin K. Petersen return q->limits.alignment_offset; 1385ac481c20SMartin K. Petersen } 1386ac481c20SMartin K. Petersen 138786b37281SMartin K. Petersen static inline int queue_discard_alignment(struct request_queue *q) 138886b37281SMartin K. Petersen { 138986b37281SMartin K. Petersen if (q->limits.discard_misaligned) 139086b37281SMartin K. Petersen return -1; 139186b37281SMartin K. Petersen 139286b37281SMartin K. Petersen return q->limits.discard_alignment; 139386b37281SMartin K. Petersen } 139486b37281SMartin K. Petersen 1395e03a72e1SMartin K. Petersen static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) 139686b37281SMartin K. Petersen { 139759771079SLinus Torvalds unsigned int alignment, granularity, offset; 1398dd3d145dSMartin K. Petersen 1399a934a00aSMartin K. Petersen if (!lim->max_discard_sectors) 1400a934a00aSMartin K. Petersen return 0; 1401a934a00aSMartin K. Petersen 140259771079SLinus Torvalds /* Why are these in bytes, not sectors? */ 140359771079SLinus Torvalds alignment = lim->discard_alignment >> 9; 140459771079SLinus Torvalds granularity = lim->discard_granularity >> 9; 140559771079SLinus Torvalds if (!granularity) 140659771079SLinus Torvalds return 0; 140759771079SLinus Torvalds 140859771079SLinus Torvalds /* Offset of the partition start in 'granularity' sectors */ 140959771079SLinus Torvalds offset = sector_div(sector, granularity); 141059771079SLinus Torvalds 141159771079SLinus Torvalds /* And why do we do this modulus *again* in blkdev_issue_discard()? */ 141259771079SLinus Torvalds offset = (granularity + alignment - offset) % granularity; 141359771079SLinus Torvalds 141459771079SLinus Torvalds /* Turn it back into bytes, gaah */ 141559771079SLinus Torvalds return offset << 9; 141686b37281SMartin K. Petersen } 141786b37281SMartin K. Petersen 1418c6e66634SPaolo Bonzini static inline int bdev_discard_alignment(struct block_device *bdev) 1419c6e66634SPaolo Bonzini { 1420c6e66634SPaolo Bonzini struct request_queue *q = bdev_get_queue(bdev); 1421c6e66634SPaolo Bonzini 1422c6e66634SPaolo Bonzini if (bdev != bdev->bd_contains) 1423c6e66634SPaolo Bonzini return bdev->bd_part->discard_alignment; 1424c6e66634SPaolo Bonzini 1425c6e66634SPaolo Bonzini return q->limits.discard_alignment; 1426c6e66634SPaolo Bonzini } 1427c6e66634SPaolo Bonzini 142898262f27SMartin K. Petersen static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) 142998262f27SMartin K. Petersen { 1430a934a00aSMartin K. Petersen if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1) 143198262f27SMartin K. Petersen return 1; 143298262f27SMartin K. Petersen 143398262f27SMartin K. Petersen return 0; 143498262f27SMartin K. Petersen } 143598262f27SMartin K. Petersen 143698262f27SMartin K. Petersen static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev) 143798262f27SMartin K. Petersen { 143898262f27SMartin K. Petersen return queue_discard_zeroes_data(bdev_get_queue(bdev)); 143998262f27SMartin K. Petersen } 144098262f27SMartin K. Petersen 14414363ac7cSMartin K. Petersen static inline unsigned int bdev_write_same(struct block_device *bdev) 14424363ac7cSMartin K. Petersen { 14434363ac7cSMartin K. Petersen struct request_queue *q = bdev_get_queue(bdev); 14444363ac7cSMartin K. Petersen 14454363ac7cSMartin K. Petersen if (q) 14464363ac7cSMartin K. Petersen return q->limits.max_write_same_sectors; 14474363ac7cSMartin K. Petersen 14484363ac7cSMartin K. Petersen return 0; 14494363ac7cSMartin K. Petersen } 14504363ac7cSMartin K. Petersen 1451797476b8SDamien Le Moal static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev) 1452797476b8SDamien Le Moal { 1453797476b8SDamien Le Moal struct request_queue *q = bdev_get_queue(bdev); 1454797476b8SDamien Le Moal 1455797476b8SDamien Le Moal if (q) 1456797476b8SDamien Le Moal return blk_queue_zoned_model(q); 1457797476b8SDamien Le Moal 1458797476b8SDamien Le Moal return BLK_ZONED_NONE; 1459797476b8SDamien Le Moal } 1460797476b8SDamien Le Moal 1461797476b8SDamien Le Moal static inline bool bdev_is_zoned(struct block_device *bdev) 1462797476b8SDamien Le Moal { 1463797476b8SDamien Le Moal struct request_queue *q = bdev_get_queue(bdev); 1464797476b8SDamien Le Moal 1465797476b8SDamien Le Moal if (q) 1466797476b8SDamien Le Moal return blk_queue_is_zoned(q); 1467797476b8SDamien Le Moal 1468797476b8SDamien Le Moal return false; 1469797476b8SDamien Le Moal } 1470797476b8SDamien Le Moal 14716a0cb1bcSHannes Reinecke static inline unsigned int bdev_zone_size(struct block_device *bdev) 14726a0cb1bcSHannes Reinecke { 14736a0cb1bcSHannes Reinecke struct request_queue *q = bdev_get_queue(bdev); 14746a0cb1bcSHannes Reinecke 14756a0cb1bcSHannes Reinecke if (q) 14766a0cb1bcSHannes Reinecke return blk_queue_zone_size(q); 14776a0cb1bcSHannes Reinecke 14786a0cb1bcSHannes Reinecke return 0; 14796a0cb1bcSHannes Reinecke } 14806a0cb1bcSHannes Reinecke 1481165125e1SJens Axboe static inline int queue_dma_alignment(struct request_queue *q) 14821da177e4SLinus Torvalds { 1483482eb689SPete Wyckoff return q ? q->dma_alignment : 511; 14841da177e4SLinus Torvalds } 14851da177e4SLinus Torvalds 148614417799SNamhyung Kim static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, 148787904074SFUJITA Tomonori unsigned int len) 148887904074SFUJITA Tomonori { 148987904074SFUJITA Tomonori unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; 149014417799SNamhyung Kim return !(addr & alignment) && !(len & alignment); 149187904074SFUJITA Tomonori } 149287904074SFUJITA Tomonori 14931da177e4SLinus Torvalds /* assumes size > 256 */ 14941da177e4SLinus Torvalds static inline unsigned int blksize_bits(unsigned int size) 14951da177e4SLinus Torvalds { 14961da177e4SLinus Torvalds unsigned int bits = 8; 14971da177e4SLinus Torvalds do { 14981da177e4SLinus Torvalds bits++; 14991da177e4SLinus Torvalds size >>= 1; 15001da177e4SLinus Torvalds } while (size > 256); 15011da177e4SLinus Torvalds return bits; 15021da177e4SLinus Torvalds } 15031da177e4SLinus Torvalds 15042befb9e3SAdrian Bunk static inline unsigned int block_size(struct block_device *bdev) 15051da177e4SLinus Torvalds { 15061da177e4SLinus Torvalds return bdev->bd_block_size; 15071da177e4SLinus Torvalds } 15081da177e4SLinus Torvalds 1509f3876930S[email protected] static inline bool queue_flush_queueable(struct request_queue *q) 1510f3876930S[email protected] { 1511c888a8f9SJens Axboe return !test_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags); 1512f3876930S[email protected] } 1513f3876930S[email protected] 15141da177e4SLinus Torvalds typedef struct {struct page *v;} Sector; 15151da177e4SLinus Torvalds 15161da177e4SLinus Torvalds unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); 15171da177e4SLinus Torvalds 15181da177e4SLinus Torvalds static inline void put_dev_sector(Sector p) 15191da177e4SLinus Torvalds { 152009cbfeafSKirill A. Shutemov put_page(p.v); 15211da177e4SLinus Torvalds } 15221da177e4SLinus Torvalds 1523e0af2917SMing Lei static inline bool __bvec_gap_to_prev(struct request_queue *q, 1524e0af2917SMing Lei struct bio_vec *bprv, unsigned int offset) 1525e0af2917SMing Lei { 1526e0af2917SMing Lei return offset || 1527e0af2917SMing Lei ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); 1528e0af2917SMing Lei } 1529e0af2917SMing Lei 153003100aadSKeith Busch /* 153103100aadSKeith Busch * Check if adding a bio_vec after bprv with offset would create a gap in 153203100aadSKeith Busch * the SG list. Most drivers don't care about this, but some do. 153303100aadSKeith Busch */ 153403100aadSKeith Busch static inline bool bvec_gap_to_prev(struct request_queue *q, 153503100aadSKeith Busch struct bio_vec *bprv, unsigned int offset) 153603100aadSKeith Busch { 153703100aadSKeith Busch if (!queue_virt_boundary(q)) 153803100aadSKeith Busch return false; 1539e0af2917SMing Lei return __bvec_gap_to_prev(q, bprv, offset); 154003100aadSKeith Busch } 154103100aadSKeith Busch 15425e7c4274SJens Axboe static inline bool bio_will_gap(struct request_queue *q, struct bio *prev, 15435e7c4274SJens Axboe struct bio *next) 15445e7c4274SJens Axboe { 154525e71a99SMing Lei if (bio_has_data(prev) && queue_virt_boundary(q)) { 154625e71a99SMing Lei struct bio_vec pb, nb; 15475e7c4274SJens Axboe 154825e71a99SMing Lei bio_get_last_bvec(prev, &pb); 154925e71a99SMing Lei bio_get_first_bvec(next, &nb); 155025e71a99SMing Lei 155125e71a99SMing Lei return __bvec_gap_to_prev(q, &pb, nb.bv_offset); 155225e71a99SMing Lei } 155325e71a99SMing Lei 155425e71a99SMing Lei return false; 15555e7c4274SJens Axboe } 15565e7c4274SJens Axboe 15575e7c4274SJens Axboe static inline bool req_gap_back_merge(struct request *req, struct bio *bio) 15585e7c4274SJens Axboe { 15595e7c4274SJens Axboe return bio_will_gap(req->q, req->biotail, bio); 15605e7c4274SJens Axboe } 15615e7c4274SJens Axboe 15625e7c4274SJens Axboe static inline bool req_gap_front_merge(struct request *req, struct bio *bio) 15635e7c4274SJens Axboe { 15645e7c4274SJens Axboe return bio_will_gap(req->q, bio, req->bio); 15655e7c4274SJens Axboe } 15665e7c4274SJens Axboe 156759c3d45eSJens Axboe int kblockd_schedule_work(struct work_struct *work); 1568ee63cfa7SJens Axboe int kblockd_schedule_work_on(int cpu, struct work_struct *work); 156959c3d45eSJens Axboe int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay); 15708ab14595SJens Axboe int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); 15711da177e4SLinus Torvalds 15729195291eSDivyesh Shah #ifdef CONFIG_BLK_CGROUP 157328f4197eSJens Axboe /* 157428f4197eSJens Axboe * This should not be using sched_clock(). A real patch is in progress 157528f4197eSJens Axboe * to fix this up, until that is in place we need to disable preemption 157628f4197eSJens Axboe * around sched_clock() in this function and set_io_start_time_ns(). 157728f4197eSJens Axboe */ 15789195291eSDivyesh Shah static inline void set_start_time_ns(struct request *req) 15799195291eSDivyesh Shah { 158028f4197eSJens Axboe preempt_disable(); 15819195291eSDivyesh Shah req->start_time_ns = sched_clock(); 158228f4197eSJens Axboe preempt_enable(); 15839195291eSDivyesh Shah } 15849195291eSDivyesh Shah 15859195291eSDivyesh Shah static inline void set_io_start_time_ns(struct request *req) 15869195291eSDivyesh Shah { 158728f4197eSJens Axboe preempt_disable(); 15889195291eSDivyesh Shah req->io_start_time_ns = sched_clock(); 158928f4197eSJens Axboe preempt_enable(); 15909195291eSDivyesh Shah } 159184c124daSDivyesh Shah 159284c124daSDivyesh Shah static inline uint64_t rq_start_time_ns(struct request *req) 159384c124daSDivyesh Shah { 159484c124daSDivyesh Shah return req->start_time_ns; 159584c124daSDivyesh Shah } 159684c124daSDivyesh Shah 159784c124daSDivyesh Shah static inline uint64_t rq_io_start_time_ns(struct request *req) 159884c124daSDivyesh Shah { 159984c124daSDivyesh Shah return req->io_start_time_ns; 160084c124daSDivyesh Shah } 16019195291eSDivyesh Shah #else 16029195291eSDivyesh Shah static inline void set_start_time_ns(struct request *req) {} 16039195291eSDivyesh Shah static inline void set_io_start_time_ns(struct request *req) {} 160484c124daSDivyesh Shah static inline uint64_t rq_start_time_ns(struct request *req) 160584c124daSDivyesh Shah { 160684c124daSDivyesh Shah return 0; 160784c124daSDivyesh Shah } 160884c124daSDivyesh Shah static inline uint64_t rq_io_start_time_ns(struct request *req) 160984c124daSDivyesh Shah { 161084c124daSDivyesh Shah return 0; 161184c124daSDivyesh Shah } 16129195291eSDivyesh Shah #endif 16139195291eSDivyesh Shah 16141da177e4SLinus Torvalds #define MODULE_ALIAS_BLOCKDEV(major,minor) \ 16151da177e4SLinus Torvalds MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 16161da177e4SLinus Torvalds #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 16171da177e4SLinus Torvalds MODULE_ALIAS("block-major-" __stringify(major) "-*") 16181da177e4SLinus Torvalds 16197ba1ba12SMartin K. Petersen #if defined(CONFIG_BLK_DEV_INTEGRITY) 16207ba1ba12SMartin K. Petersen 16218288f496SMartin K. Petersen enum blk_integrity_flags { 16228288f496SMartin K. Petersen BLK_INTEGRITY_VERIFY = 1 << 0, 16238288f496SMartin K. Petersen BLK_INTEGRITY_GENERATE = 1 << 1, 16243aec2f41SMartin K. Petersen BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2, 1625aae7df50SMartin K. Petersen BLK_INTEGRITY_IP_CHECKSUM = 1 << 3, 16268288f496SMartin K. Petersen }; 16277ba1ba12SMartin K. Petersen 162818593088SMartin K. Petersen struct blk_integrity_iter { 16297ba1ba12SMartin K. Petersen void *prot_buf; 16307ba1ba12SMartin K. Petersen void *data_buf; 16313be91c4aSMartin K. Petersen sector_t seed; 16327ba1ba12SMartin K. Petersen unsigned int data_size; 16333be91c4aSMartin K. Petersen unsigned short interval; 16347ba1ba12SMartin K. Petersen const char *disk_name; 16357ba1ba12SMartin K. Petersen }; 16367ba1ba12SMartin K. Petersen 163718593088SMartin K. Petersen typedef int (integrity_processing_fn) (struct blk_integrity_iter *); 16387ba1ba12SMartin K. Petersen 16390f8087ecSMartin K. Petersen struct blk_integrity_profile { 164018593088SMartin K. Petersen integrity_processing_fn *generate_fn; 164118593088SMartin K. Petersen integrity_processing_fn *verify_fn; 16420f8087ecSMartin K. Petersen const char *name; 16430f8087ecSMartin K. Petersen }; 16447ba1ba12SMartin K. Petersen 164525520d55SMartin K. Petersen extern void blk_integrity_register(struct gendisk *, struct blk_integrity *); 16467ba1ba12SMartin K. Petersen extern void blk_integrity_unregister(struct gendisk *); 1647ad7fce93SMartin K. Petersen extern int blk_integrity_compare(struct gendisk *, struct gendisk *); 164813f05c8dSMartin K. Petersen extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, 164913f05c8dSMartin K. Petersen struct scatterlist *); 165013f05c8dSMartin K. Petersen extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); 16514eaf99beSMartin K. Petersen extern bool blk_integrity_merge_rq(struct request_queue *, struct request *, 165213f05c8dSMartin K. Petersen struct request *); 16534eaf99beSMartin K. Petersen extern bool blk_integrity_merge_bio(struct request_queue *, struct request *, 165413f05c8dSMartin K. Petersen struct bio *); 16557ba1ba12SMartin K. Petersen 165625520d55SMartin K. Petersen static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 165725520d55SMartin K. Petersen { 1658ac6fc48cSDan Williams struct blk_integrity *bi = &disk->queue->integrity; 165925520d55SMartin K. Petersen 166025520d55SMartin K. Petersen if (!bi->profile) 166125520d55SMartin K. Petersen return NULL; 166225520d55SMartin K. Petersen 166325520d55SMartin K. Petersen return bi; 166425520d55SMartin K. Petersen } 166525520d55SMartin K. Petersen 1666b04accc4SJens Axboe static inline 1667b04accc4SJens Axboe struct blk_integrity *bdev_get_integrity(struct block_device *bdev) 1668b04accc4SJens Axboe { 166925520d55SMartin K. Petersen return blk_get_integrity(bdev->bd_disk); 1670b02739b0SMartin K. Petersen } 1671b02739b0SMartin K. Petersen 1672180b2f95SMartin K. Petersen static inline bool blk_integrity_rq(struct request *rq) 16737ba1ba12SMartin K. Petersen { 1674180b2f95SMartin K. Petersen return rq->cmd_flags & REQ_INTEGRITY; 16757ba1ba12SMartin K. Petersen } 16767ba1ba12SMartin K. Petersen 167713f05c8dSMartin K. Petersen static inline void blk_queue_max_integrity_segments(struct request_queue *q, 167813f05c8dSMartin K. Petersen unsigned int segs) 167913f05c8dSMartin K. Petersen { 168013f05c8dSMartin K. Petersen q->limits.max_integrity_segments = segs; 168113f05c8dSMartin K. Petersen } 168213f05c8dSMartin K. Petersen 168313f05c8dSMartin K. Petersen static inline unsigned short 168413f05c8dSMartin K. Petersen queue_max_integrity_segments(struct request_queue *q) 168513f05c8dSMartin K. Petersen { 168613f05c8dSMartin K. Petersen return q->limits.max_integrity_segments; 168713f05c8dSMartin K. Petersen } 168813f05c8dSMartin K. Petersen 16897f39add3SSagi Grimberg static inline bool integrity_req_gap_back_merge(struct request *req, 16907f39add3SSagi Grimberg struct bio *next) 16917f39add3SSagi Grimberg { 16927f39add3SSagi Grimberg struct bio_integrity_payload *bip = bio_integrity(req->bio); 16937f39add3SSagi Grimberg struct bio_integrity_payload *bip_next = bio_integrity(next); 16947f39add3SSagi Grimberg 16957f39add3SSagi Grimberg return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], 16967f39add3SSagi Grimberg bip_next->bip_vec[0].bv_offset); 16977f39add3SSagi Grimberg } 16987f39add3SSagi Grimberg 16997f39add3SSagi Grimberg static inline bool integrity_req_gap_front_merge(struct request *req, 17007f39add3SSagi Grimberg struct bio *bio) 17017f39add3SSagi Grimberg { 17027f39add3SSagi Grimberg struct bio_integrity_payload *bip = bio_integrity(bio); 17037f39add3SSagi Grimberg struct bio_integrity_payload *bip_next = bio_integrity(req->bio); 17047f39add3SSagi Grimberg 17057f39add3SSagi Grimberg return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], 17067f39add3SSagi Grimberg bip_next->bip_vec[0].bv_offset); 17077f39add3SSagi Grimberg } 17087f39add3SSagi Grimberg 17097ba1ba12SMartin K. Petersen #else /* CONFIG_BLK_DEV_INTEGRITY */ 17107ba1ba12SMartin K. Petersen 1711fd83240aSStephen Rothwell struct bio; 1712fd83240aSStephen Rothwell struct block_device; 1713fd83240aSStephen Rothwell struct gendisk; 1714fd83240aSStephen Rothwell struct blk_integrity; 1715fd83240aSStephen Rothwell 1716fd83240aSStephen Rothwell static inline int blk_integrity_rq(struct request *rq) 1717fd83240aSStephen Rothwell { 1718fd83240aSStephen Rothwell return 0; 1719fd83240aSStephen Rothwell } 1720fd83240aSStephen Rothwell static inline int blk_rq_count_integrity_sg(struct request_queue *q, 1721fd83240aSStephen Rothwell struct bio *b) 1722fd83240aSStephen Rothwell { 1723fd83240aSStephen Rothwell return 0; 1724fd83240aSStephen Rothwell } 1725fd83240aSStephen Rothwell static inline int blk_rq_map_integrity_sg(struct request_queue *q, 1726fd83240aSStephen Rothwell struct bio *b, 1727fd83240aSStephen Rothwell struct scatterlist *s) 1728fd83240aSStephen Rothwell { 1729fd83240aSStephen Rothwell return 0; 1730fd83240aSStephen Rothwell } 1731fd83240aSStephen Rothwell static inline struct blk_integrity *bdev_get_integrity(struct block_device *b) 1732fd83240aSStephen Rothwell { 173361a04e5bSMichele Curti return NULL; 1734fd83240aSStephen Rothwell } 1735fd83240aSStephen Rothwell static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 1736fd83240aSStephen Rothwell { 1737fd83240aSStephen Rothwell return NULL; 1738fd83240aSStephen Rothwell } 1739fd83240aSStephen Rothwell static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b) 1740fd83240aSStephen Rothwell { 1741fd83240aSStephen Rothwell return 0; 1742fd83240aSStephen Rothwell } 174325520d55SMartin K. Petersen static inline void blk_integrity_register(struct gendisk *d, 1744fd83240aSStephen Rothwell struct blk_integrity *b) 1745fd83240aSStephen Rothwell { 1746fd83240aSStephen Rothwell } 1747fd83240aSStephen Rothwell static inline void blk_integrity_unregister(struct gendisk *d) 1748fd83240aSStephen Rothwell { 1749fd83240aSStephen Rothwell } 1750fd83240aSStephen Rothwell static inline void blk_queue_max_integrity_segments(struct request_queue *q, 1751fd83240aSStephen Rothwell unsigned int segs) 1752fd83240aSStephen Rothwell { 1753fd83240aSStephen Rothwell } 1754fd83240aSStephen Rothwell static inline unsigned short queue_max_integrity_segments(struct request_queue *q) 1755fd83240aSStephen Rothwell { 1756fd83240aSStephen Rothwell return 0; 1757fd83240aSStephen Rothwell } 17584eaf99beSMartin K. Petersen static inline bool blk_integrity_merge_rq(struct request_queue *rq, 1759fd83240aSStephen Rothwell struct request *r1, 1760fd83240aSStephen Rothwell struct request *r2) 1761fd83240aSStephen Rothwell { 1762cb1a5ab6SMartin K. Petersen return true; 1763fd83240aSStephen Rothwell } 17644eaf99beSMartin K. Petersen static inline bool blk_integrity_merge_bio(struct request_queue *rq, 1765fd83240aSStephen Rothwell struct request *r, 1766fd83240aSStephen Rothwell struct bio *b) 1767fd83240aSStephen Rothwell { 1768cb1a5ab6SMartin K. Petersen return true; 1769fd83240aSStephen Rothwell } 177025520d55SMartin K. Petersen 17717f39add3SSagi Grimberg static inline bool integrity_req_gap_back_merge(struct request *req, 17727f39add3SSagi Grimberg struct bio *next) 17737f39add3SSagi Grimberg { 17747f39add3SSagi Grimberg return false; 17757f39add3SSagi Grimberg } 17767f39add3SSagi Grimberg static inline bool integrity_req_gap_front_merge(struct request *req, 17777f39add3SSagi Grimberg struct bio *bio) 17787f39add3SSagi Grimberg { 17797f39add3SSagi Grimberg return false; 17807f39add3SSagi Grimberg } 17817ba1ba12SMartin K. Petersen 17827ba1ba12SMartin K. Petersen #endif /* CONFIG_BLK_DEV_INTEGRITY */ 17837ba1ba12SMartin K. Petersen 1784b2e0d162SDan Williams /** 1785b2e0d162SDan Williams * struct blk_dax_ctl - control and output parameters for ->direct_access 1786b2e0d162SDan Williams * @sector: (input) offset relative to a block_device 1787b2e0d162SDan Williams * @addr: (output) kernel virtual address for @sector populated by driver 1788b2e0d162SDan Williams * @pfn: (output) page frame number for @addr populated by driver 1789b2e0d162SDan Williams * @size: (input) number of bytes requested 1790b2e0d162SDan Williams */ 1791b2e0d162SDan Williams struct blk_dax_ctl { 1792b2e0d162SDan Williams sector_t sector; 17937a9eb206SDan Williams void *addr; 1794b2e0d162SDan Williams long size; 179534c0fd54SDan Williams pfn_t pfn; 1796b2e0d162SDan Williams }; 1797b2e0d162SDan Williams 179808f85851SAl Viro struct block_device_operations { 1799d4430d62SAl Viro int (*open) (struct block_device *, fmode_t); 1800db2a144bSAl Viro void (*release) (struct gendisk *, fmode_t); 1801c11f0c0bSJens Axboe int (*rw_page)(struct block_device *, sector_t, struct page *, bool); 1802d4430d62SAl Viro int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1803d4430d62SAl Viro int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 18047a9eb206SDan Williams long (*direct_access)(struct block_device *, sector_t, void **, pfn_t *, 18057a9eb206SDan Williams long); 180677ea887eSTejun Heo unsigned int (*check_events) (struct gendisk *disk, 180777ea887eSTejun Heo unsigned int clearing); 180877ea887eSTejun Heo /* ->media_changed() is DEPRECATED, use ->check_events() instead */ 180908f85851SAl Viro int (*media_changed) (struct gendisk *); 1810c3e33e04STejun Heo void (*unlock_native_capacity) (struct gendisk *); 181108f85851SAl Viro int (*revalidate_disk) (struct gendisk *); 181208f85851SAl Viro int (*getgeo)(struct block_device *, struct hd_geometry *); 1813b3a27d05SNitin Gupta /* this callback is with swap_lock and sometimes page table lock held */ 1814b3a27d05SNitin Gupta void (*swap_slot_free_notify) (struct block_device *, unsigned long); 181508f85851SAl Viro struct module *owner; 1816bbd3e064SChristoph Hellwig const struct pr_ops *pr_ops; 181708f85851SAl Viro }; 181808f85851SAl Viro 1819633a08b8SAl Viro extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, 1820633a08b8SAl Viro unsigned long); 182147a191fdSMatthew Wilcox extern int bdev_read_page(struct block_device *, sector_t, struct page *); 182247a191fdSMatthew Wilcox extern int bdev_write_page(struct block_device *, sector_t, struct page *, 182347a191fdSMatthew Wilcox struct writeback_control *); 1824b2e0d162SDan Williams extern long bdev_direct_access(struct block_device *, struct blk_dax_ctl *); 18252d96afc8SToshi Kani extern int bdev_dax_supported(struct super_block *, int); 1826a8078b1fSToshi Kani extern bool bdev_dax_capable(struct block_device *); 18279361401eSDavid Howells #else /* CONFIG_BLOCK */ 1828ac13a829SFabian Frederick 1829ac13a829SFabian Frederick struct block_device; 1830ac13a829SFabian Frederick 18319361401eSDavid Howells /* 18329361401eSDavid Howells * stubs for when the block layer is configured out 18339361401eSDavid Howells */ 18349361401eSDavid Howells #define buffer_heads_over_limit 0 18359361401eSDavid Howells 18369361401eSDavid Howells static inline long nr_blockdev_pages(void) 18379361401eSDavid Howells { 18389361401eSDavid Howells return 0; 18399361401eSDavid Howells } 18409361401eSDavid Howells 18411f940bdfSJens Axboe struct blk_plug { 18421f940bdfSJens Axboe }; 18431f940bdfSJens Axboe 18441f940bdfSJens Axboe static inline void blk_start_plug(struct blk_plug *plug) 184573c10101SJens Axboe { 184673c10101SJens Axboe } 184773c10101SJens Axboe 18481f940bdfSJens Axboe static inline void blk_finish_plug(struct blk_plug *plug) 184973c10101SJens Axboe { 185073c10101SJens Axboe } 185173c10101SJens Axboe 18521f940bdfSJens Axboe static inline void blk_flush_plug(struct task_struct *task) 185373c10101SJens Axboe { 185473c10101SJens Axboe } 185573c10101SJens Axboe 1856a237c1c5SJens Axboe static inline void blk_schedule_flush_plug(struct task_struct *task) 1857a237c1c5SJens Axboe { 1858a237c1c5SJens Axboe } 1859a237c1c5SJens Axboe 1860a237c1c5SJens Axboe 186173c10101SJens Axboe static inline bool blk_needs_flush_plug(struct task_struct *tsk) 186273c10101SJens Axboe { 186373c10101SJens Axboe return false; 186473c10101SJens Axboe } 186573c10101SJens Axboe 1866ac13a829SFabian Frederick static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, 1867ac13a829SFabian Frederick sector_t *error_sector) 1868ac13a829SFabian Frederick { 1869ac13a829SFabian Frederick return 0; 1870ac13a829SFabian Frederick } 1871ac13a829SFabian Frederick 18729361401eSDavid Howells #endif /* CONFIG_BLOCK */ 18739361401eSDavid Howells 18741da177e4SLinus Torvalds #endif 1875