11da177e4SLinus Torvalds #ifndef _LINUX_BLKDEV_H 21da177e4SLinus Torvalds #define _LINUX_BLKDEV_H 31da177e4SLinus Torvalds 485fd0bc9SRussell King #include <linux/sched.h> 585fd0bc9SRussell King 6f5ff8422SJens Axboe #ifdef CONFIG_BLOCK 7f5ff8422SJens Axboe 81da177e4SLinus Torvalds #include <linux/major.h> 91da177e4SLinus Torvalds #include <linux/genhd.h> 101da177e4SLinus Torvalds #include <linux/list.h> 11320ae51fSJens Axboe #include <linux/llist.h> 121da177e4SLinus Torvalds #include <linux/timer.h> 131da177e4SLinus Torvalds #include <linux/workqueue.h> 141da177e4SLinus Torvalds #include <linux/pagemap.h> 1566114cadSTejun Heo #include <linux/backing-dev-defs.h> 161da177e4SLinus Torvalds #include <linux/wait.h> 171da177e4SLinus Torvalds #include <linux/mempool.h> 1834c0fd54SDan Williams #include <linux/pfn.h> 191da177e4SLinus Torvalds #include <linux/bio.h> 201da177e4SLinus Torvalds #include <linux/stringify.h> 213e6053d7SHugh Dickins #include <linux/gfp.h> 22d351af01SFUJITA Tomonori #include <linux/bsg.h> 23c7c22e4dSJens Axboe #include <linux/smp.h> 24548bc8e1STejun Heo #include <linux/rcupdate.h> 25add703fdSTejun Heo #include <linux/percpu-refcount.h> 2684be456fSChristoph Hellwig #include <linux/scatterlist.h> 276a0cb1bcSHannes Reinecke #include <linux/blkzoned.h> 281da177e4SLinus Torvalds 29de477254SPaul Gortmaker struct module; 3021b2f0c8SChristoph Hellwig struct scsi_ioctl_command; 3121b2f0c8SChristoph Hellwig 321da177e4SLinus Torvalds struct request_queue; 331da177e4SLinus Torvalds struct elevator_queue; 342056a782SJens Axboe struct blk_trace; 353d6392cfSJens Axboe struct request; 363d6392cfSJens Axboe struct sg_io_hdr; 37aa387cc8SMike Christie struct bsg_job; 383c798398STejun Heo struct blkcg_gq; 397c94e1c1SMing Lei struct blk_flush_queue; 40bbd3e064SChristoph Hellwig struct pr_ops; 411da177e4SLinus Torvalds 421da177e4SLinus Torvalds #define BLKDEV_MIN_RQ 4 431da177e4SLinus Torvalds #define BLKDEV_MAX_RQ 128 /* Default maximum */ 441da177e4SLinus Torvalds 458bd435b3STejun Heo /* 468bd435b3STejun Heo * Maximum number of blkcg policies allowed to be registered concurrently. 478bd435b3STejun Heo * Defined here to simplify include dependency. 488bd435b3STejun Heo */ 498bd435b3STejun Heo #define BLKCG_MAX_POLS 2 508bd435b3STejun Heo 518ffdc655STejun Heo typedef void (rq_end_io_fn)(struct request *, int); 521da177e4SLinus Torvalds 535b788ce3STejun Heo #define BLK_RL_SYNCFULL (1U << 0) 545b788ce3STejun Heo #define BLK_RL_ASYNCFULL (1U << 1) 555b788ce3STejun Heo 561da177e4SLinus Torvalds struct request_list { 575b788ce3STejun Heo struct request_queue *q; /* the queue this rl belongs to */ 58a051661cSTejun Heo #ifdef CONFIG_BLK_CGROUP 59a051661cSTejun Heo struct blkcg_gq *blkg; /* blkg this request pool belongs to */ 60a051661cSTejun Heo #endif 611faa16d2SJens Axboe /* 621faa16d2SJens Axboe * count[], starved[], and wait[] are indexed by 631faa16d2SJens Axboe * BLK_RW_SYNC/BLK_RW_ASYNC 641faa16d2SJens Axboe */ 651da177e4SLinus Torvalds int count[2]; 661da177e4SLinus Torvalds int starved[2]; 671da177e4SLinus Torvalds mempool_t *rq_pool; 681da177e4SLinus Torvalds wait_queue_head_t wait[2]; 695b788ce3STejun Heo unsigned int flags; 701da177e4SLinus Torvalds }; 711da177e4SLinus Torvalds 724aff5e23SJens Axboe /* 734aff5e23SJens Axboe * request command types 744aff5e23SJens Axboe */ 754aff5e23SJens Axboe enum rq_cmd_type_bits { 764aff5e23SJens Axboe REQ_TYPE_FS = 1, /* fs request */ 774aff5e23SJens Axboe REQ_TYPE_BLOCK_PC, /* scsi command */ 78b42171efSChristoph Hellwig REQ_TYPE_DRV_PRIV, /* driver defined types from here */ 794aff5e23SJens Axboe }; 804aff5e23SJens Axboe 81e8064021SChristoph Hellwig /* 82e8064021SChristoph Hellwig * request flags */ 83e8064021SChristoph Hellwig typedef __u32 __bitwise req_flags_t; 84e8064021SChristoph Hellwig 85e8064021SChristoph Hellwig /* elevator knows about this request */ 86e8064021SChristoph Hellwig #define RQF_SORTED ((__force req_flags_t)(1 << 0)) 87e8064021SChristoph Hellwig /* drive already may have started this one */ 88e8064021SChristoph Hellwig #define RQF_STARTED ((__force req_flags_t)(1 << 1)) 89e8064021SChristoph Hellwig /* uses tagged queueing */ 90e8064021SChristoph Hellwig #define RQF_QUEUED ((__force req_flags_t)(1 << 2)) 91e8064021SChristoph Hellwig /* may not be passed by ioscheduler */ 92e8064021SChristoph Hellwig #define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3)) 93e8064021SChristoph Hellwig /* request for flush sequence */ 94e8064021SChristoph Hellwig #define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4)) 95e8064021SChristoph Hellwig /* merge of different types, fail separately */ 96e8064021SChristoph Hellwig #define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5)) 97e8064021SChristoph Hellwig /* track inflight for MQ */ 98e8064021SChristoph Hellwig #define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6)) 99e8064021SChristoph Hellwig /* don't call prep for this one */ 100e8064021SChristoph Hellwig #define RQF_DONTPREP ((__force req_flags_t)(1 << 7)) 101e8064021SChristoph Hellwig /* set for "ide_preempt" requests and also for requests for which the SCSI 102e8064021SChristoph Hellwig "quiesce" state must be ignored. */ 103e8064021SChristoph Hellwig #define RQF_PREEMPT ((__force req_flags_t)(1 << 8)) 104e8064021SChristoph Hellwig /* contains copies of user pages */ 105e8064021SChristoph Hellwig #define RQF_COPY_USER ((__force req_flags_t)(1 << 9)) 106e8064021SChristoph Hellwig /* vaguely specified driver internal error. Ignored by the block layer */ 107e8064021SChristoph Hellwig #define RQF_FAILED ((__force req_flags_t)(1 << 10)) 108e8064021SChristoph Hellwig /* don't warn about errors */ 109e8064021SChristoph Hellwig #define RQF_QUIET ((__force req_flags_t)(1 << 11)) 110e8064021SChristoph Hellwig /* elevator private data attached */ 111e8064021SChristoph Hellwig #define RQF_ELVPRIV ((__force req_flags_t)(1 << 12)) 112e8064021SChristoph Hellwig /* account I/O stat */ 113e8064021SChristoph Hellwig #define RQF_IO_STAT ((__force req_flags_t)(1 << 13)) 114e8064021SChristoph Hellwig /* request came from our alloc pool */ 115e8064021SChristoph Hellwig #define RQF_ALLOCED ((__force req_flags_t)(1 << 14)) 116e8064021SChristoph Hellwig /* runtime pm request */ 117e8064021SChristoph Hellwig #define RQF_PM ((__force req_flags_t)(1 << 15)) 118e8064021SChristoph Hellwig /* on IO scheduler merge hash */ 119e8064021SChristoph Hellwig #define RQF_HASHED ((__force req_flags_t)(1 << 16)) 120*cf43e6beSJens Axboe /* IO stats tracking on */ 121*cf43e6beSJens Axboe #define RQF_STATS ((__force req_flags_t)(1 << 17)) 122e8064021SChristoph Hellwig 123e8064021SChristoph Hellwig /* flags that prevent us from merging requests: */ 124e8064021SChristoph Hellwig #define RQF_NOMERGE_FLAGS \ 125e8064021SChristoph Hellwig (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ) 126e8064021SChristoph Hellwig 1271da177e4SLinus Torvalds #define BLK_MAX_CDB 16 1281da177e4SLinus Torvalds 1291da177e4SLinus Torvalds /* 130af76e555SChristoph Hellwig * Try to put the fields that are referenced together in the same cacheline. 131af76e555SChristoph Hellwig * 132af76e555SChristoph Hellwig * If you modify this structure, make sure to update blk_rq_init() and 133af76e555SChristoph Hellwig * especially blk_mq_rq_ctx_init() to take care of the added fields. 1341da177e4SLinus Torvalds */ 1351da177e4SLinus Torvalds struct request { 136ff856badSJens Axboe struct list_head queuelist; 137320ae51fSJens Axboe union { 138c7c22e4dSJens Axboe struct call_single_data csd; 1399828c2c6SJan Kara u64 fifo_time; 140320ae51fSJens Axboe }; 141ff856badSJens Axboe 142165125e1SJens Axboe struct request_queue *q; 143320ae51fSJens Axboe struct blk_mq_ctx *mq_ctx; 144e6a1c874SJens Axboe 145181fdde3SRichard Kennedy int cpu; 146ca93e453SChristoph Hellwig unsigned cmd_type; 147ef295ecfSChristoph Hellwig unsigned int cmd_flags; /* op and common flags */ 148e8064021SChristoph Hellwig req_flags_t rq_flags; 149ca93e453SChristoph Hellwig unsigned long atomic_flags; 150181fdde3SRichard Kennedy 151a2dec7b3STejun Heo /* the following two fields are internal, NEVER access directly */ 152a2dec7b3STejun Heo unsigned int __data_len; /* total data len */ 153181fdde3SRichard Kennedy sector_t __sector; /* sector cursor */ 1541da177e4SLinus Torvalds 1551da177e4SLinus Torvalds struct bio *bio; 1561da177e4SLinus Torvalds struct bio *biotail; 1571da177e4SLinus Torvalds 158360f92c2SJens Axboe /* 159360f92c2SJens Axboe * The hash is used inside the scheduler, and killed once the 160360f92c2SJens Axboe * request reaches the dispatch list. The ipi_list is only used 161360f92c2SJens Axboe * to queue the request for softirq completion, which is long 162360f92c2SJens Axboe * after the request has been unhashed (and even removed from 163360f92c2SJens Axboe * the dispatch list). 164360f92c2SJens Axboe */ 165360f92c2SJens Axboe union { 1669817064bSJens Axboe struct hlist_node hash; /* merge hash */ 167360f92c2SJens Axboe struct list_head ipi_list; 168360f92c2SJens Axboe }; 169360f92c2SJens Axboe 170e6a1c874SJens Axboe /* 171e6a1c874SJens Axboe * The rb_node is only used inside the io scheduler, requests 172e6a1c874SJens Axboe * are pruned when moved to the dispatch queue. So let the 173c186794dSMike Snitzer * completion_data share space with the rb_node. 174e6a1c874SJens Axboe */ 175e6a1c874SJens Axboe union { 1762e662b65SJens Axboe struct rb_node rb_node; /* sort/lookup */ 177c186794dSMike Snitzer void *completion_data; 178c186794dSMike Snitzer }; 179c186794dSMike Snitzer 180c186794dSMike Snitzer /* 181c186794dSMike Snitzer * Three pointers are available for the IO schedulers, if they need 182c186794dSMike Snitzer * more they have to dynamically allocate it. Flush requests are 183c186794dSMike Snitzer * never put on the IO scheduler. So let the flush fields share 184a612fddfSTejun Heo * space with the elevator data. 185c186794dSMike Snitzer */ 186c186794dSMike Snitzer union { 187a612fddfSTejun Heo struct { 188a612fddfSTejun Heo struct io_cq *icq; 189a612fddfSTejun Heo void *priv[2]; 190a612fddfSTejun Heo } elv; 191a612fddfSTejun Heo 192ae1b1539STejun Heo struct { 193ae1b1539STejun Heo unsigned int seq; 194ae1b1539STejun Heo struct list_head list; 1954853abaaSJeff Moyer rq_end_io_fn *saved_end_io; 196ae1b1539STejun Heo } flush; 197e6a1c874SJens Axboe }; 1989817064bSJens Axboe 1998f34ee75SJens Axboe struct gendisk *rq_disk; 20009e099d4SJerome Marchand struct hd_struct *part; 2011da177e4SLinus Torvalds unsigned long start_time; 202*cf43e6beSJens Axboe struct blk_issue_stat issue_stat; 2039195291eSDivyesh Shah #ifdef CONFIG_BLK_CGROUP 204a051661cSTejun Heo struct request_list *rl; /* rl this rq is alloced from */ 2059195291eSDivyesh Shah unsigned long long start_time_ns; 2069195291eSDivyesh Shah unsigned long long io_start_time_ns; /* when passed to hardware */ 2079195291eSDivyesh Shah #endif 2081da177e4SLinus Torvalds /* Number of scatter-gather DMA addr+len pairs after 2091da177e4SLinus Torvalds * physical address coalescing is performed. 2101da177e4SLinus Torvalds */ 2111da177e4SLinus Torvalds unsigned short nr_phys_segments; 21213f05c8dSMartin K. Petersen #if defined(CONFIG_BLK_DEV_INTEGRITY) 21313f05c8dSMartin K. Petersen unsigned short nr_integrity_segments; 21413f05c8dSMartin K. Petersen #endif 2151da177e4SLinus Torvalds 2168f34ee75SJens Axboe unsigned short ioprio; 2178f34ee75SJens Axboe 218731ec497STejun Heo void *special; /* opaque pointer available for LLD use */ 2191da177e4SLinus Torvalds 220cdd60262SJens Axboe int tag; 221cdd60262SJens Axboe int errors; 222cdd60262SJens Axboe 2231da177e4SLinus Torvalds /* 2241da177e4SLinus Torvalds * when request is used as a packet command carrier 2251da177e4SLinus Torvalds */ 226d7e3c324SFUJITA Tomonori unsigned char __cmd[BLK_MAX_CDB]; 227d7e3c324SFUJITA Tomonori unsigned char *cmd; 228181fdde3SRichard Kennedy unsigned short cmd_len; 2291da177e4SLinus Torvalds 2307a85f889SFUJITA Tomonori unsigned int extra_len; /* length of alignment and padding */ 2311da177e4SLinus Torvalds unsigned int sense_len; 232c3a4d78cSTejun Heo unsigned int resid_len; /* residual count */ 2331da177e4SLinus Torvalds void *sense; 2341da177e4SLinus Torvalds 235242f9dcbSJens Axboe unsigned long deadline; 236242f9dcbSJens Axboe struct list_head timeout_list; 2371da177e4SLinus Torvalds unsigned int timeout; 23817e01f21SMike Christie int retries; 2391da177e4SLinus Torvalds 2401da177e4SLinus Torvalds /* 241c00895abSJens Axboe * completion callback. 2421da177e4SLinus Torvalds */ 2431da177e4SLinus Torvalds rq_end_io_fn *end_io; 2441da177e4SLinus Torvalds void *end_io_data; 245abae1fdeSFUJITA Tomonori 246abae1fdeSFUJITA Tomonori /* for bidi */ 247abae1fdeSFUJITA Tomonori struct request *next_rq; 2481da177e4SLinus Torvalds }; 2491da177e4SLinus Torvalds 250766ca442SFernando Luis Vázquez Cao static inline unsigned short req_get_ioprio(struct request *req) 251766ca442SFernando Luis Vázquez Cao { 252766ca442SFernando Luis Vázquez Cao return req->ioprio; 253766ca442SFernando Luis Vázquez Cao } 254766ca442SFernando Luis Vázquez Cao 2551da177e4SLinus Torvalds #include <linux/elevator.h> 2561da177e4SLinus Torvalds 257320ae51fSJens Axboe struct blk_queue_ctx; 258320ae51fSJens Axboe 259165125e1SJens Axboe typedef void (request_fn_proc) (struct request_queue *q); 260dece1635SJens Axboe typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio); 261165125e1SJens Axboe typedef int (prep_rq_fn) (struct request_queue *, struct request *); 26228018c24SJames Bottomley typedef void (unprep_rq_fn) (struct request_queue *, struct request *); 2631da177e4SLinus Torvalds 2641da177e4SLinus Torvalds struct bio_vec; 265ff856badSJens Axboe typedef void (softirq_done_fn)(struct request *); 2662fb98e84STejun Heo typedef int (dma_drain_needed_fn)(struct request *); 267ef9e3facSKiyoshi Ueda typedef int (lld_busy_fn) (struct request_queue *q); 268aa387cc8SMike Christie typedef int (bsg_job_fn) (struct bsg_job *); 2691da177e4SLinus Torvalds 270242f9dcbSJens Axboe enum blk_eh_timer_return { 271242f9dcbSJens Axboe BLK_EH_NOT_HANDLED, 272242f9dcbSJens Axboe BLK_EH_HANDLED, 273242f9dcbSJens Axboe BLK_EH_RESET_TIMER, 274242f9dcbSJens Axboe }; 275242f9dcbSJens Axboe 276242f9dcbSJens Axboe typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *); 277242f9dcbSJens Axboe 2781da177e4SLinus Torvalds enum blk_queue_state { 2791da177e4SLinus Torvalds Queue_down, 2801da177e4SLinus Torvalds Queue_up, 2811da177e4SLinus Torvalds }; 2821da177e4SLinus Torvalds 2831da177e4SLinus Torvalds struct blk_queue_tag { 2841da177e4SLinus Torvalds struct request **tag_index; /* map of busy tags */ 2851da177e4SLinus Torvalds unsigned long *tag_map; /* bit map of free/busy tags */ 2861da177e4SLinus Torvalds int busy; /* current depth */ 2871da177e4SLinus Torvalds int max_depth; /* what we will send to device */ 288ba025082STejun Heo int real_max_depth; /* what the array can hold */ 2891da177e4SLinus Torvalds atomic_t refcnt; /* map can be shared */ 290ee1b6f7aSShaohua Li int alloc_policy; /* tag allocation policy */ 291ee1b6f7aSShaohua Li int next_tag; /* next tag */ 2921da177e4SLinus Torvalds }; 293ee1b6f7aSShaohua Li #define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */ 294ee1b6f7aSShaohua Li #define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */ 2951da177e4SLinus Torvalds 296abf54393SFUJITA Tomonori #define BLK_SCSI_MAX_CMDS (256) 297abf54393SFUJITA Tomonori #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) 298abf54393SFUJITA Tomonori 299797476b8SDamien Le Moal /* 300797476b8SDamien Le Moal * Zoned block device models (zoned limit). 301797476b8SDamien Le Moal */ 302797476b8SDamien Le Moal enum blk_zoned_model { 303797476b8SDamien Le Moal BLK_ZONED_NONE, /* Regular block device */ 304797476b8SDamien Le Moal BLK_ZONED_HA, /* Host-aware zoned block device */ 305797476b8SDamien Le Moal BLK_ZONED_HM, /* Host-managed zoned block device */ 306797476b8SDamien Le Moal }; 307797476b8SDamien Le Moal 308025146e1SMartin K. Petersen struct queue_limits { 309025146e1SMartin K. Petersen unsigned long bounce_pfn; 310025146e1SMartin K. Petersen unsigned long seg_boundary_mask; 31103100aadSKeith Busch unsigned long virt_boundary_mask; 312025146e1SMartin K. Petersen 313025146e1SMartin K. Petersen unsigned int max_hw_sectors; 314ca369d51SMartin K. Petersen unsigned int max_dev_sectors; 315762380adSJens Axboe unsigned int chunk_sectors; 316025146e1SMartin K. Petersen unsigned int max_sectors; 317025146e1SMartin K. Petersen unsigned int max_segment_size; 318c72758f3SMartin K. Petersen unsigned int physical_block_size; 319c72758f3SMartin K. Petersen unsigned int alignment_offset; 320c72758f3SMartin K. Petersen unsigned int io_min; 321c72758f3SMartin K. Petersen unsigned int io_opt; 32267efc925SChristoph Hellwig unsigned int max_discard_sectors; 3230034af03SJens Axboe unsigned int max_hw_discard_sectors; 3244363ac7cSMartin K. Petersen unsigned int max_write_same_sectors; 32586b37281SMartin K. Petersen unsigned int discard_granularity; 32686b37281SMartin K. Petersen unsigned int discard_alignment; 327025146e1SMartin K. Petersen 328025146e1SMartin K. Petersen unsigned short logical_block_size; 3298a78362cSMartin K. Petersen unsigned short max_segments; 33013f05c8dSMartin K. Petersen unsigned short max_integrity_segments; 331025146e1SMartin K. Petersen 332c72758f3SMartin K. Petersen unsigned char misaligned; 33386b37281SMartin K. Petersen unsigned char discard_misaligned; 334e692cb66SMartin K. Petersen unsigned char cluster; 335a934a00aSMartin K. Petersen unsigned char discard_zeroes_data; 336c78afc62SKent Overstreet unsigned char raid_partial_stripes_expensive; 337797476b8SDamien Le Moal enum blk_zoned_model zoned; 338025146e1SMartin K. Petersen }; 339025146e1SMartin K. Petersen 3406a0cb1bcSHannes Reinecke #ifdef CONFIG_BLK_DEV_ZONED 3416a0cb1bcSHannes Reinecke 3426a0cb1bcSHannes Reinecke struct blk_zone_report_hdr { 3436a0cb1bcSHannes Reinecke unsigned int nr_zones; 3446a0cb1bcSHannes Reinecke u8 padding[60]; 3456a0cb1bcSHannes Reinecke }; 3466a0cb1bcSHannes Reinecke 3476a0cb1bcSHannes Reinecke extern int blkdev_report_zones(struct block_device *bdev, 3486a0cb1bcSHannes Reinecke sector_t sector, struct blk_zone *zones, 3496a0cb1bcSHannes Reinecke unsigned int *nr_zones, gfp_t gfp_mask); 3506a0cb1bcSHannes Reinecke extern int blkdev_reset_zones(struct block_device *bdev, sector_t sectors, 3516a0cb1bcSHannes Reinecke sector_t nr_sectors, gfp_t gfp_mask); 3526a0cb1bcSHannes Reinecke 3533ed05a98SShaun Tancheff extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, 3543ed05a98SShaun Tancheff unsigned int cmd, unsigned long arg); 3553ed05a98SShaun Tancheff extern int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode, 3563ed05a98SShaun Tancheff unsigned int cmd, unsigned long arg); 3573ed05a98SShaun Tancheff 3583ed05a98SShaun Tancheff #else /* CONFIG_BLK_DEV_ZONED */ 3593ed05a98SShaun Tancheff 3603ed05a98SShaun Tancheff static inline int blkdev_report_zones_ioctl(struct block_device *bdev, 3613ed05a98SShaun Tancheff fmode_t mode, unsigned int cmd, 3623ed05a98SShaun Tancheff unsigned long arg) 3633ed05a98SShaun Tancheff { 3643ed05a98SShaun Tancheff return -ENOTTY; 3653ed05a98SShaun Tancheff } 3663ed05a98SShaun Tancheff 3673ed05a98SShaun Tancheff static inline int blkdev_reset_zones_ioctl(struct block_device *bdev, 3683ed05a98SShaun Tancheff fmode_t mode, unsigned int cmd, 3693ed05a98SShaun Tancheff unsigned long arg) 3703ed05a98SShaun Tancheff { 3713ed05a98SShaun Tancheff return -ENOTTY; 3723ed05a98SShaun Tancheff } 3733ed05a98SShaun Tancheff 3746a0cb1bcSHannes Reinecke #endif /* CONFIG_BLK_DEV_ZONED */ 3756a0cb1bcSHannes Reinecke 376d7b76301SRichard Kennedy struct request_queue { 3771da177e4SLinus Torvalds /* 3781da177e4SLinus Torvalds * Together with queue_head for cacheline sharing 3791da177e4SLinus Torvalds */ 3801da177e4SLinus Torvalds struct list_head queue_head; 3811da177e4SLinus Torvalds struct request *last_merge; 382b374d18aSJens Axboe struct elevator_queue *elevator; 3838a5ecdd4STejun Heo int nr_rqs[2]; /* # allocated [a]sync rqs */ 3848a5ecdd4STejun Heo int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */ 3851da177e4SLinus Torvalds 3861da177e4SLinus Torvalds /* 387a051661cSTejun Heo * If blkcg is not used, @q->root_rl serves all requests. If blkcg 388a051661cSTejun Heo * is used, root blkg allocates from @q->root_rl and all other 389a051661cSTejun Heo * blkgs from their own blkg->rl. Which one to use should be 390a051661cSTejun Heo * determined using bio_request_list(). 3911da177e4SLinus Torvalds */ 392a051661cSTejun Heo struct request_list root_rl; 3931da177e4SLinus Torvalds 3941da177e4SLinus Torvalds request_fn_proc *request_fn; 3951da177e4SLinus Torvalds make_request_fn *make_request_fn; 3961da177e4SLinus Torvalds prep_rq_fn *prep_rq_fn; 39728018c24SJames Bottomley unprep_rq_fn *unprep_rq_fn; 398ff856badSJens Axboe softirq_done_fn *softirq_done_fn; 399242f9dcbSJens Axboe rq_timed_out_fn *rq_timed_out_fn; 4002fb98e84STejun Heo dma_drain_needed_fn *dma_drain_needed; 401ef9e3facSKiyoshi Ueda lld_busy_fn *lld_busy_fn; 4021da177e4SLinus Torvalds 403320ae51fSJens Axboe struct blk_mq_ops *mq_ops; 404320ae51fSJens Axboe 405320ae51fSJens Axboe unsigned int *mq_map; 406320ae51fSJens Axboe 407320ae51fSJens Axboe /* sw queues */ 408e6cdb092SMing Lei struct blk_mq_ctx __percpu *queue_ctx; 409320ae51fSJens Axboe unsigned int nr_queues; 410320ae51fSJens Axboe 411d278d4a8SJens Axboe unsigned int queue_depth; 412d278d4a8SJens Axboe 413320ae51fSJens Axboe /* hw dispatch queues */ 414320ae51fSJens Axboe struct blk_mq_hw_ctx **queue_hw_ctx; 415320ae51fSJens Axboe unsigned int nr_hw_queues; 416320ae51fSJens Axboe 4171da177e4SLinus Torvalds /* 4188922e16cSTejun Heo * Dispatch queue sorting 4198922e16cSTejun Heo */ 4201b47f531SJens Axboe sector_t end_sector; 4218922e16cSTejun Heo struct request *boundary_rq; 4228922e16cSTejun Heo 4238922e16cSTejun Heo /* 4243cca6dc1SJens Axboe * Delayed queue handling 4251da177e4SLinus Torvalds */ 4263cca6dc1SJens Axboe struct delayed_work delay_work; 4271da177e4SLinus Torvalds 4281da177e4SLinus Torvalds struct backing_dev_info backing_dev_info; 4291da177e4SLinus Torvalds 4301da177e4SLinus Torvalds /* 4311da177e4SLinus Torvalds * The queue owner gets to use this for whatever they like. 4321da177e4SLinus Torvalds * ll_rw_blk doesn't touch it. 4331da177e4SLinus Torvalds */ 4341da177e4SLinus Torvalds void *queuedata; 4351da177e4SLinus Torvalds 4361da177e4SLinus Torvalds /* 4371da177e4SLinus Torvalds * various queue flags, see QUEUE_* below 4381da177e4SLinus Torvalds */ 4391da177e4SLinus Torvalds unsigned long queue_flags; 4401da177e4SLinus Torvalds 4411da177e4SLinus Torvalds /* 442a73f730dSTejun Heo * ida allocated id for this queue. Used to index queues from 443a73f730dSTejun Heo * ioctx. 444a73f730dSTejun Heo */ 445a73f730dSTejun Heo int id; 446a73f730dSTejun Heo 447a73f730dSTejun Heo /* 448d7b76301SRichard Kennedy * queue needs bounce pages for pages above this limit 449d7b76301SRichard Kennedy */ 450d7b76301SRichard Kennedy gfp_t bounce_gfp; 451d7b76301SRichard Kennedy 452d7b76301SRichard Kennedy /* 453152587deS * protects queue structures from reentrancy. ->__queue_lock should 454152587deS * _never_ be used directly, it is queue private. always use 455152587deS * ->queue_lock. 4561da177e4SLinus Torvalds */ 457152587deS spinlock_t __queue_lock; 4581da177e4SLinus Torvalds spinlock_t *queue_lock; 4591da177e4SLinus Torvalds 4601da177e4SLinus Torvalds /* 4611da177e4SLinus Torvalds * queue kobject 4621da177e4SLinus Torvalds */ 4631da177e4SLinus Torvalds struct kobject kobj; 4641da177e4SLinus Torvalds 465320ae51fSJens Axboe /* 466320ae51fSJens Axboe * mq queue kobject 467320ae51fSJens Axboe */ 468320ae51fSJens Axboe struct kobject mq_kobj; 469320ae51fSJens Axboe 470ac6fc48cSDan Williams #ifdef CONFIG_BLK_DEV_INTEGRITY 471ac6fc48cSDan Williams struct blk_integrity integrity; 472ac6fc48cSDan Williams #endif /* CONFIG_BLK_DEV_INTEGRITY */ 473ac6fc48cSDan Williams 47447fafbc7SRafael J. Wysocki #ifdef CONFIG_PM 4756c954667SLin Ming struct device *dev; 4766c954667SLin Ming int rpm_status; 4776c954667SLin Ming unsigned int nr_pending; 4786c954667SLin Ming #endif 4796c954667SLin Ming 4801da177e4SLinus Torvalds /* 4811da177e4SLinus Torvalds * queue settings 4821da177e4SLinus Torvalds */ 4831da177e4SLinus Torvalds unsigned long nr_requests; /* Max # of requests */ 4841da177e4SLinus Torvalds unsigned int nr_congestion_on; 4851da177e4SLinus Torvalds unsigned int nr_congestion_off; 4861da177e4SLinus Torvalds unsigned int nr_batching; 4871da177e4SLinus Torvalds 488fa0ccd83SJames Bottomley unsigned int dma_drain_size; 489d7b76301SRichard Kennedy void *dma_drain_buffer; 490e3790c7dSTejun Heo unsigned int dma_pad_mask; 4911da177e4SLinus Torvalds unsigned int dma_alignment; 4921da177e4SLinus Torvalds 4931da177e4SLinus Torvalds struct blk_queue_tag *queue_tags; 4946eca9004SJens Axboe struct list_head tag_busy_list; 4951da177e4SLinus Torvalds 49615853af9STejun Heo unsigned int nr_sorted; 4970a7ae2ffSJens Axboe unsigned int in_flight[2]; 498*cf43e6beSJens Axboe 499*cf43e6beSJens Axboe struct blk_rq_stat rq_stats[2]; 500*cf43e6beSJens Axboe 50124faf6f6SBart Van Assche /* 50224faf6f6SBart Van Assche * Number of active block driver functions for which blk_drain_queue() 50324faf6f6SBart Van Assche * must wait. Must be incremented around functions that unlock the 50424faf6f6SBart Van Assche * queue_lock internally, e.g. scsi_request_fn(). 50524faf6f6SBart Van Assche */ 50624faf6f6SBart Van Assche unsigned int request_fn_active; 5071da177e4SLinus Torvalds 508242f9dcbSJens Axboe unsigned int rq_timeout; 509242f9dcbSJens Axboe struct timer_list timeout; 510287922ebSChristoph Hellwig struct work_struct timeout_work; 511242f9dcbSJens Axboe struct list_head timeout_list; 512242f9dcbSJens Axboe 513a612fddfSTejun Heo struct list_head icq_list; 5144eef3049STejun Heo #ifdef CONFIG_BLK_CGROUP 515a2b1693bSTejun Heo DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS); 5163c798398STejun Heo struct blkcg_gq *root_blkg; 51703aa264aSTejun Heo struct list_head blkg_list; 5184eef3049STejun Heo #endif 519a612fddfSTejun Heo 520025146e1SMartin K. Petersen struct queue_limits limits; 521025146e1SMartin K. Petersen 5221da177e4SLinus Torvalds /* 5231da177e4SLinus Torvalds * sg stuff 5241da177e4SLinus Torvalds */ 5251da177e4SLinus Torvalds unsigned int sg_timeout; 5261da177e4SLinus Torvalds unsigned int sg_reserved_size; 5271946089aSChristoph Lameter int node; 5286c5c9341SAlexey Dobriyan #ifdef CONFIG_BLK_DEV_IO_TRACE 5292056a782SJens Axboe struct blk_trace *blk_trace; 5306c5c9341SAlexey Dobriyan #endif 5311da177e4SLinus Torvalds /* 5324913efe4STejun Heo * for flush operations 5331da177e4SLinus Torvalds */ 5347c94e1c1SMing Lei struct blk_flush_queue *fq; 535483f4afcSAl Viro 5366fca6a61SChristoph Hellwig struct list_head requeue_list; 5376fca6a61SChristoph Hellwig spinlock_t requeue_lock; 5382849450aSMike Snitzer struct delayed_work requeue_work; 5396fca6a61SChristoph Hellwig 540483f4afcSAl Viro struct mutex sysfs_lock; 541d351af01SFUJITA Tomonori 542d732580bSTejun Heo int bypass_depth; 5434ecd4fefSChristoph Hellwig atomic_t mq_freeze_depth; 544d732580bSTejun Heo 545d351af01SFUJITA Tomonori #if defined(CONFIG_BLK_DEV_BSG) 546aa387cc8SMike Christie bsg_job_fn *bsg_job_fn; 547aa387cc8SMike Christie int bsg_job_size; 548d351af01SFUJITA Tomonori struct bsg_class_device bsg_dev; 549d351af01SFUJITA Tomonori #endif 550e43473b7SVivek Goyal 551e43473b7SVivek Goyal #ifdef CONFIG_BLK_DEV_THROTTLING 552e43473b7SVivek Goyal /* Throttle data */ 553e43473b7SVivek Goyal struct throtl_data *td; 554e43473b7SVivek Goyal #endif 555548bc8e1STejun Heo struct rcu_head rcu_head; 556320ae51fSJens Axboe wait_queue_head_t mq_freeze_wq; 5573ef28e83SDan Williams struct percpu_ref q_usage_counter; 558320ae51fSJens Axboe struct list_head all_q_node; 5590d2602caSJens Axboe 5600d2602caSJens Axboe struct blk_mq_tag_set *tag_set; 5610d2602caSJens Axboe struct list_head tag_set_list; 56254efd50bSKent Overstreet struct bio_set *bio_split; 5634593fdbeSAkinobu Mita 5644593fdbeSAkinobu Mita bool mq_sysfs_init_done; 5651da177e4SLinus Torvalds }; 5661da177e4SLinus Torvalds 5671da177e4SLinus Torvalds #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 5681da177e4SLinus Torvalds #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 5691faa16d2SJens Axboe #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ 5701faa16d2SJens Axboe #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ 5713f3299d5SBart Van Assche #define QUEUE_FLAG_DYING 5 /* queue being torn down */ 572d732580bSTejun Heo #define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */ 573c21e6bebSJens Axboe #define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ 574c21e6bebSJens Axboe #define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ 5755757a6d7SDan Williams #define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */ 576c21e6bebSJens Axboe #define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */ 577c21e6bebSJens Axboe #define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */ 578c21e6bebSJens Axboe #define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */ 57988e740f1SFernando Luis Vázquez Cao #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 580c21e6bebSJens Axboe #define QUEUE_FLAG_IO_STAT 13 /* do IO stats */ 581c21e6bebSJens Axboe #define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */ 582c21e6bebSJens Axboe #define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */ 583c21e6bebSJens Axboe #define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ 584288dab8aSChristoph Hellwig #define QUEUE_FLAG_SECERASE 17 /* supports secure erase */ 5855757a6d7SDan Williams #define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ 586c246e80dSBart Van Assche #define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ 587320ae51fSJens Axboe #define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ 58805f1dd53SJens Axboe #define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/ 58905229beeSJens Axboe #define QUEUE_FLAG_POLL 22 /* IO polling enabled if set */ 59093e9d8e8SJens Axboe #define QUEUE_FLAG_WC 23 /* Write back caching */ 59193e9d8e8SJens Axboe #define QUEUE_FLAG_FUA 24 /* device supports FUA writes */ 592c888a8f9SJens Axboe #define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */ 593163d4baaSToshi Kani #define QUEUE_FLAG_DAX 26 /* device supports DAX */ 594*cf43e6beSJens Axboe #define QUEUE_FLAG_STATS 27 /* track rq completion times */ 595bc58ba94SJens Axboe 596bc58ba94SJens Axboe #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 59701e97f6bSJens Axboe (1 << QUEUE_FLAG_STACKABLE) | \ 598e2e1a148SJens Axboe (1 << QUEUE_FLAG_SAME_COMP) | \ 599e2e1a148SJens Axboe (1 << QUEUE_FLAG_ADD_RANDOM)) 600797e7dbbSTejun Heo 60194eddfbeSJens Axboe #define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 602ad9cf3bbSMike Snitzer (1 << QUEUE_FLAG_STACKABLE) | \ 6038e0b60b9SChristoph Hellwig (1 << QUEUE_FLAG_SAME_COMP) | \ 6048e0b60b9SChristoph Hellwig (1 << QUEUE_FLAG_POLL)) 60594eddfbeSJens Axboe 6068bcb6c7dSAndi Kleen static inline void queue_lockdep_assert_held(struct request_queue *q) 6078f45c1a5SLinus Torvalds { 6088bcb6c7dSAndi Kleen if (q->queue_lock) 6098bcb6c7dSAndi Kleen lockdep_assert_held(q->queue_lock); 6108f45c1a5SLinus Torvalds } 6118f45c1a5SLinus Torvalds 61275ad23bcSNick Piggin static inline void queue_flag_set_unlocked(unsigned int flag, 61375ad23bcSNick Piggin struct request_queue *q) 61475ad23bcSNick Piggin { 61575ad23bcSNick Piggin __set_bit(flag, &q->queue_flags); 61675ad23bcSNick Piggin } 61775ad23bcSNick Piggin 618e48ec690SJens Axboe static inline int queue_flag_test_and_clear(unsigned int flag, 619e48ec690SJens Axboe struct request_queue *q) 620e48ec690SJens Axboe { 6218bcb6c7dSAndi Kleen queue_lockdep_assert_held(q); 622e48ec690SJens Axboe 623e48ec690SJens Axboe if (test_bit(flag, &q->queue_flags)) { 624e48ec690SJens Axboe __clear_bit(flag, &q->queue_flags); 625e48ec690SJens Axboe return 1; 626e48ec690SJens Axboe } 627e48ec690SJens Axboe 628e48ec690SJens Axboe return 0; 629e48ec690SJens Axboe } 630e48ec690SJens Axboe 631e48ec690SJens Axboe static inline int queue_flag_test_and_set(unsigned int flag, 632e48ec690SJens Axboe struct request_queue *q) 633e48ec690SJens Axboe { 6348bcb6c7dSAndi Kleen queue_lockdep_assert_held(q); 635e48ec690SJens Axboe 636e48ec690SJens Axboe if (!test_bit(flag, &q->queue_flags)) { 637e48ec690SJens Axboe __set_bit(flag, &q->queue_flags); 638e48ec690SJens Axboe return 0; 639e48ec690SJens Axboe } 640e48ec690SJens Axboe 641e48ec690SJens Axboe return 1; 642e48ec690SJens Axboe } 643e48ec690SJens Axboe 64475ad23bcSNick Piggin static inline void queue_flag_set(unsigned int flag, struct request_queue *q) 64575ad23bcSNick Piggin { 6468bcb6c7dSAndi Kleen queue_lockdep_assert_held(q); 64775ad23bcSNick Piggin __set_bit(flag, &q->queue_flags); 64875ad23bcSNick Piggin } 64975ad23bcSNick Piggin 65075ad23bcSNick Piggin static inline void queue_flag_clear_unlocked(unsigned int flag, 65175ad23bcSNick Piggin struct request_queue *q) 65275ad23bcSNick Piggin { 65375ad23bcSNick Piggin __clear_bit(flag, &q->queue_flags); 65475ad23bcSNick Piggin } 65575ad23bcSNick Piggin 6560a7ae2ffSJens Axboe static inline int queue_in_flight(struct request_queue *q) 6570a7ae2ffSJens Axboe { 6580a7ae2ffSJens Axboe return q->in_flight[0] + q->in_flight[1]; 6590a7ae2ffSJens Axboe } 6600a7ae2ffSJens Axboe 66175ad23bcSNick Piggin static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) 66275ad23bcSNick Piggin { 6638bcb6c7dSAndi Kleen queue_lockdep_assert_held(q); 66475ad23bcSNick Piggin __clear_bit(flag, &q->queue_flags); 66575ad23bcSNick Piggin } 66675ad23bcSNick Piggin 6671da177e4SLinus Torvalds #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 6681da177e4SLinus Torvalds #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 6693f3299d5SBart Van Assche #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) 670c246e80dSBart Van Assche #define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) 671d732580bSTejun Heo #define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags) 672320ae51fSJens Axboe #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) 673ac9fafa1SAlan D. Brunelle #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 674488991e2SAlan D. Brunelle #define blk_queue_noxmerges(q) \ 675488991e2SAlan D. Brunelle test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 676a68bbddbSJens Axboe #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 677bc58ba94SJens Axboe #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) 678e2e1a148SJens Axboe #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) 6794ee5eaf4SKiyoshi Ueda #define blk_queue_stackable(q) \ 6804ee5eaf4SKiyoshi Ueda test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) 681c15227deSChristoph Hellwig #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) 682288dab8aSChristoph Hellwig #define blk_queue_secure_erase(q) \ 683288dab8aSChristoph Hellwig (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags)) 684163d4baaSToshi Kani #define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags) 6851da177e4SLinus Torvalds 68633659ebbSChristoph Hellwig #define blk_noretry_request(rq) \ 68733659ebbSChristoph Hellwig ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ 68833659ebbSChristoph Hellwig REQ_FAILFAST_DRIVER)) 6894aff5e23SJens Axboe 69033659ebbSChristoph Hellwig #define blk_account_rq(rq) \ 691e8064021SChristoph Hellwig (((rq)->rq_flags & RQF_STARTED) && \ 692e2a60da7SMartin K. Petersen ((rq)->cmd_type == REQ_TYPE_FS)) 6931da177e4SLinus Torvalds 694ab780f1eSJens Axboe #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) 695abae1fdeSFUJITA Tomonori #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 696336cdb40SKiyoshi Ueda /* rq->queuelist of dequeued request must be list_empty() */ 697336cdb40SKiyoshi Ueda #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) 6981da177e4SLinus Torvalds 6991da177e4SLinus Torvalds #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 7001da177e4SLinus Torvalds 7014e1b2d52SMike Christie #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) 7021da177e4SLinus Torvalds 70349fd524fSJens Axboe /* 70449fd524fSJens Axboe * Driver can handle struct request, if it either has an old style 70549fd524fSJens Axboe * request_fn defined, or is blk-mq based. 70649fd524fSJens Axboe */ 70749fd524fSJens Axboe static inline bool queue_is_rq_based(struct request_queue *q) 70849fd524fSJens Axboe { 70949fd524fSJens Axboe return q->request_fn || q->mq_ops; 71049fd524fSJens Axboe } 71149fd524fSJens Axboe 712e692cb66SMartin K. Petersen static inline unsigned int blk_queue_cluster(struct request_queue *q) 713e692cb66SMartin K. Petersen { 714e692cb66SMartin K. Petersen return q->limits.cluster; 715e692cb66SMartin K. Petersen } 716e692cb66SMartin K. Petersen 717797476b8SDamien Le Moal static inline enum blk_zoned_model 718797476b8SDamien Le Moal blk_queue_zoned_model(struct request_queue *q) 719797476b8SDamien Le Moal { 720797476b8SDamien Le Moal return q->limits.zoned; 721797476b8SDamien Le Moal } 722797476b8SDamien Le Moal 723797476b8SDamien Le Moal static inline bool blk_queue_is_zoned(struct request_queue *q) 724797476b8SDamien Le Moal { 725797476b8SDamien Le Moal switch (blk_queue_zoned_model(q)) { 726797476b8SDamien Le Moal case BLK_ZONED_HA: 727797476b8SDamien Le Moal case BLK_ZONED_HM: 728797476b8SDamien Le Moal return true; 729797476b8SDamien Le Moal default: 730797476b8SDamien Le Moal return false; 731797476b8SDamien Le Moal } 732797476b8SDamien Le Moal } 733797476b8SDamien Le Moal 7346a0cb1bcSHannes Reinecke static inline unsigned int blk_queue_zone_size(struct request_queue *q) 7356a0cb1bcSHannes Reinecke { 7366a0cb1bcSHannes Reinecke return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0; 7376a0cb1bcSHannes Reinecke } 7386a0cb1bcSHannes Reinecke 7391faa16d2SJens Axboe static inline bool rq_is_sync(struct request *rq) 7401faa16d2SJens Axboe { 741ef295ecfSChristoph Hellwig return op_is_sync(rq->cmd_flags); 7421faa16d2SJens Axboe } 7431faa16d2SJens Axboe 7445b788ce3STejun Heo static inline bool blk_rl_full(struct request_list *rl, bool sync) 7451da177e4SLinus Torvalds { 7465b788ce3STejun Heo unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 7475b788ce3STejun Heo 7485b788ce3STejun Heo return rl->flags & flag; 7491da177e4SLinus Torvalds } 7501da177e4SLinus Torvalds 7515b788ce3STejun Heo static inline void blk_set_rl_full(struct request_list *rl, bool sync) 7521da177e4SLinus Torvalds { 7535b788ce3STejun Heo unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 7545b788ce3STejun Heo 7555b788ce3STejun Heo rl->flags |= flag; 7561da177e4SLinus Torvalds } 7571da177e4SLinus Torvalds 7585b788ce3STejun Heo static inline void blk_clear_rl_full(struct request_list *rl, bool sync) 7591da177e4SLinus Torvalds { 7605b788ce3STejun Heo unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 7615b788ce3STejun Heo 7625b788ce3STejun Heo rl->flags &= ~flag; 7631da177e4SLinus Torvalds } 7641da177e4SLinus Torvalds 765e2a60da7SMartin K. Petersen static inline bool rq_mergeable(struct request *rq) 766e2a60da7SMartin K. Petersen { 767e2a60da7SMartin K. Petersen if (rq->cmd_type != REQ_TYPE_FS) 768e2a60da7SMartin K. Petersen return false; 7691da177e4SLinus Torvalds 7703a5e02ceSMike Christie if (req_op(rq) == REQ_OP_FLUSH) 7713a5e02ceSMike Christie return false; 7723a5e02ceSMike Christie 773e2a60da7SMartin K. Petersen if (rq->cmd_flags & REQ_NOMERGE_FLAGS) 774e2a60da7SMartin K. Petersen return false; 775e8064021SChristoph Hellwig if (rq->rq_flags & RQF_NOMERGE_FLAGS) 776e8064021SChristoph Hellwig return false; 777e2a60da7SMartin K. Petersen 778e2a60da7SMartin K. Petersen return true; 779e2a60da7SMartin K. Petersen } 7801da177e4SLinus Torvalds 7814363ac7cSMartin K. Petersen static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) 7824363ac7cSMartin K. Petersen { 7834363ac7cSMartin K. Petersen if (bio_data(a) == bio_data(b)) 7844363ac7cSMartin K. Petersen return true; 7854363ac7cSMartin K. Petersen 7864363ac7cSMartin K. Petersen return false; 7874363ac7cSMartin K. Petersen } 7884363ac7cSMartin K. Petersen 789d278d4a8SJens Axboe static inline unsigned int blk_queue_depth(struct request_queue *q) 790d278d4a8SJens Axboe { 791d278d4a8SJens Axboe if (q->queue_depth) 792d278d4a8SJens Axboe return q->queue_depth; 793d278d4a8SJens Axboe 794d278d4a8SJens Axboe return q->nr_requests; 795d278d4a8SJens Axboe } 796d278d4a8SJens Axboe 7971da177e4SLinus Torvalds /* 7981da177e4SLinus Torvalds * q->prep_rq_fn return values 7991da177e4SLinus Torvalds */ 8000fb5b1fbSMartin K. Petersen enum { 8010fb5b1fbSMartin K. Petersen BLKPREP_OK, /* serve it */ 8020fb5b1fbSMartin K. Petersen BLKPREP_KILL, /* fatal error, kill, return -EIO */ 8030fb5b1fbSMartin K. Petersen BLKPREP_DEFER, /* leave on queue */ 8040fb5b1fbSMartin K. Petersen BLKPREP_INVALID, /* invalid command, kill, return -EREMOTEIO */ 8050fb5b1fbSMartin K. Petersen }; 8061da177e4SLinus Torvalds 8071da177e4SLinus Torvalds extern unsigned long blk_max_low_pfn, blk_max_pfn; 8081da177e4SLinus Torvalds 8091da177e4SLinus Torvalds /* 8101da177e4SLinus Torvalds * standard bounce addresses: 8111da177e4SLinus Torvalds * 8121da177e4SLinus Torvalds * BLK_BOUNCE_HIGH : bounce all highmem pages 8131da177e4SLinus Torvalds * BLK_BOUNCE_ANY : don't bounce anything 8141da177e4SLinus Torvalds * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary 8151da177e4SLinus Torvalds */ 8162472892aSAndi Kleen 8172472892aSAndi Kleen #if BITS_PER_LONG == 32 8181da177e4SLinus Torvalds #define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT) 8192472892aSAndi Kleen #else 8202472892aSAndi Kleen #define BLK_BOUNCE_HIGH -1ULL 8212472892aSAndi Kleen #endif 8222472892aSAndi Kleen #define BLK_BOUNCE_ANY (-1ULL) 823bfe17231SFUJITA Tomonori #define BLK_BOUNCE_ISA (DMA_BIT_MASK(24)) 8241da177e4SLinus Torvalds 8253d6392cfSJens Axboe /* 8263d6392cfSJens Axboe * default timeout for SG_IO if none specified 8273d6392cfSJens Axboe */ 8283d6392cfSJens Axboe #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) 829f2f1fa78SLinus Torvalds #define BLK_MIN_SG_TIMEOUT (7 * HZ) 8303d6392cfSJens Axboe 8312a7326b5SChristoph Lameter #ifdef CONFIG_BOUNCE 8321da177e4SLinus Torvalds extern int init_emergency_isa_pool(void); 833165125e1SJens Axboe extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); 8341da177e4SLinus Torvalds #else 8351da177e4SLinus Torvalds static inline int init_emergency_isa_pool(void) 8361da177e4SLinus Torvalds { 8371da177e4SLinus Torvalds return 0; 8381da177e4SLinus Torvalds } 839165125e1SJens Axboe static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) 8401da177e4SLinus Torvalds { 8411da177e4SLinus Torvalds } 8421da177e4SLinus Torvalds #endif /* CONFIG_MMU */ 8431da177e4SLinus Torvalds 844152e283fSFUJITA Tomonori struct rq_map_data { 845152e283fSFUJITA Tomonori struct page **pages; 846152e283fSFUJITA Tomonori int page_order; 847152e283fSFUJITA Tomonori int nr_entries; 84856c451f4SFUJITA Tomonori unsigned long offset; 84997ae77a1SFUJITA Tomonori int null_mapped; 850ecb554a8SFUJITA Tomonori int from_user; 851152e283fSFUJITA Tomonori }; 852152e283fSFUJITA Tomonori 8535705f702SNeilBrown struct req_iterator { 8547988613bSKent Overstreet struct bvec_iter iter; 8555705f702SNeilBrown struct bio *bio; 8565705f702SNeilBrown }; 8575705f702SNeilBrown 8585705f702SNeilBrown /* This should not be used directly - use rq_for_each_segment */ 8591e428079SJens Axboe #define for_each_bio(_bio) \ 8601e428079SJens Axboe for (; _bio; _bio = _bio->bi_next) 8615705f702SNeilBrown #define __rq_for_each_bio(_bio, rq) \ 8621da177e4SLinus Torvalds if ((rq->bio)) \ 8631da177e4SLinus Torvalds for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) 8641da177e4SLinus Torvalds 8655705f702SNeilBrown #define rq_for_each_segment(bvl, _rq, _iter) \ 8665705f702SNeilBrown __rq_for_each_bio(_iter.bio, _rq) \ 8677988613bSKent Overstreet bio_for_each_segment(bvl, _iter.bio, _iter.iter) 8685705f702SNeilBrown 8694550dd6cSKent Overstreet #define rq_iter_last(bvec, _iter) \ 8707988613bSKent Overstreet (_iter.bio->bi_next == NULL && \ 8714550dd6cSKent Overstreet bio_iter_last(bvec, _iter.iter)) 8725705f702SNeilBrown 8732d4dc890SIlya Loginov #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 8742d4dc890SIlya Loginov # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" 8752d4dc890SIlya Loginov #endif 8762d4dc890SIlya Loginov #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 8772d4dc890SIlya Loginov extern void rq_flush_dcache_pages(struct request *rq); 8782d4dc890SIlya Loginov #else 8792d4dc890SIlya Loginov static inline void rq_flush_dcache_pages(struct request *rq) 8802d4dc890SIlya Loginov { 8812d4dc890SIlya Loginov } 8822d4dc890SIlya Loginov #endif 8832d4dc890SIlya Loginov 8842af3a815SToshi Kani #ifdef CONFIG_PRINTK 8852af3a815SToshi Kani #define vfs_msg(sb, level, fmt, ...) \ 8862af3a815SToshi Kani __vfs_msg(sb, level, fmt, ##__VA_ARGS__) 8872af3a815SToshi Kani #else 8882af3a815SToshi Kani #define vfs_msg(sb, level, fmt, ...) \ 8892af3a815SToshi Kani do { \ 8902af3a815SToshi Kani no_printk(fmt, ##__VA_ARGS__); \ 8912af3a815SToshi Kani __vfs_msg(sb, "", " "); \ 8922af3a815SToshi Kani } while (0) 8932af3a815SToshi Kani #endif 8942af3a815SToshi Kani 8951da177e4SLinus Torvalds extern int blk_register_queue(struct gendisk *disk); 8961da177e4SLinus Torvalds extern void blk_unregister_queue(struct gendisk *disk); 897dece1635SJens Axboe extern blk_qc_t generic_make_request(struct bio *bio); 8982a4aa30cSFUJITA Tomonori extern void blk_rq_init(struct request_queue *q, struct request *rq); 8991da177e4SLinus Torvalds extern void blk_put_request(struct request *); 900165125e1SJens Axboe extern void __blk_put_request(struct request_queue *, struct request *); 901165125e1SJens Axboe extern struct request *blk_get_request(struct request_queue *, int, gfp_t); 902f27b087bSJens Axboe extern void blk_rq_set_block_pc(struct request *); 903165125e1SJens Axboe extern void blk_requeue_request(struct request_queue *, struct request *); 90466ac0280SChristoph Hellwig extern void blk_add_request_payload(struct request *rq, struct page *page, 90537e58237SMing Lin int offset, unsigned int len); 906ef9e3facSKiyoshi Ueda extern int blk_lld_busy(struct request_queue *q); 90778d8e58aSMike Snitzer extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 90878d8e58aSMike Snitzer struct bio_set *bs, gfp_t gfp_mask, 90978d8e58aSMike Snitzer int (*bio_ctr)(struct bio *, struct bio *, void *), 91078d8e58aSMike Snitzer void *data); 91178d8e58aSMike Snitzer extern void blk_rq_unprep_clone(struct request *rq); 91282124d60SKiyoshi Ueda extern int blk_insert_cloned_request(struct request_queue *q, 91382124d60SKiyoshi Ueda struct request *rq); 91498d61d5bSChristoph Hellwig extern int blk_rq_append_bio(struct request *rq, struct bio *bio); 9153cca6dc1SJens Axboe extern void blk_delay_queue(struct request_queue *, unsigned long); 91654efd50bSKent Overstreet extern void blk_queue_split(struct request_queue *, struct bio **, 91754efd50bSKent Overstreet struct bio_set *); 918165125e1SJens Axboe extern void blk_recount_segments(struct request_queue *, struct bio *); 9190bfc96cbSPaolo Bonzini extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); 920577ebb37SPaolo Bonzini extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t, 921577ebb37SPaolo Bonzini unsigned int, void __user *); 92274f3c8afSAl Viro extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, 92374f3c8afSAl Viro unsigned int, void __user *); 924e915e872SAl Viro extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, 925e915e872SAl Viro struct scsi_ioctl_command __user *); 9263fcfab16SAndrew Morton 9276f3b0e8bSChristoph Hellwig extern int blk_queue_enter(struct request_queue *q, bool nowait); 9282e6edc95SDan Williams extern void blk_queue_exit(struct request_queue *q); 929165125e1SJens Axboe extern void blk_start_queue(struct request_queue *q); 93021491412SJens Axboe extern void blk_start_queue_async(struct request_queue *q); 931165125e1SJens Axboe extern void blk_stop_queue(struct request_queue *q); 9321da177e4SLinus Torvalds extern void blk_sync_queue(struct request_queue *q); 933165125e1SJens Axboe extern void __blk_stop_queue(struct request_queue *q); 93424ecfbe2SChristoph Hellwig extern void __blk_run_queue(struct request_queue *q); 935a7928c15SChristoph Hellwig extern void __blk_run_queue_uncond(struct request_queue *q); 936165125e1SJens Axboe extern void blk_run_queue(struct request_queue *); 937c21e6bebSJens Axboe extern void blk_run_queue_async(struct request_queue *q); 9386a83e74dSBart Van Assche extern void blk_mq_quiesce_queue(struct request_queue *q); 939a3bce90eSFUJITA Tomonori extern int blk_rq_map_user(struct request_queue *, struct request *, 940152e283fSFUJITA Tomonori struct rq_map_data *, void __user *, unsigned long, 941152e283fSFUJITA Tomonori gfp_t); 9428e5cfc45SJens Axboe extern int blk_rq_unmap_user(struct bio *); 943165125e1SJens Axboe extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); 944165125e1SJens Axboe extern int blk_rq_map_user_iov(struct request_queue *, struct request *, 94526e49cfcSKent Overstreet struct rq_map_data *, const struct iov_iter *, 94626e49cfcSKent Overstreet gfp_t); 947165125e1SJens Axboe extern int blk_execute_rq(struct request_queue *, struct gendisk *, 948994ca9a1SJames Bottomley struct request *, int); 949165125e1SJens Axboe extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 95015fc858aSJens Axboe struct request *, int, rq_end_io_fn *); 9516e39b69eSMike Christie 95205229beeSJens Axboe bool blk_poll(struct request_queue *q, blk_qc_t cookie); 95305229beeSJens Axboe 954165125e1SJens Axboe static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 9551da177e4SLinus Torvalds { 956ff9ea323STejun Heo return bdev->bd_disk->queue; /* this is never NULL */ 9571da177e4SLinus Torvalds } 9581da177e4SLinus Torvalds 9591da177e4SLinus Torvalds /* 9605b93629bSTejun Heo * blk_rq_pos() : the current sector 9615b93629bSTejun Heo * blk_rq_bytes() : bytes left in the entire request 9625b93629bSTejun Heo * blk_rq_cur_bytes() : bytes left in the current segment 96380a761fdSTejun Heo * blk_rq_err_bytes() : bytes left till the next error boundary 9645b93629bSTejun Heo * blk_rq_sectors() : sectors left in the entire request 9655b93629bSTejun Heo * blk_rq_cur_sectors() : sectors left in the current segment 9665efccd17STejun Heo */ 9675b93629bSTejun Heo static inline sector_t blk_rq_pos(const struct request *rq) 9685b93629bSTejun Heo { 969a2dec7b3STejun Heo return rq->__sector; 9705b93629bSTejun Heo } 9715b93629bSTejun Heo 9722e46e8b2STejun Heo static inline unsigned int blk_rq_bytes(const struct request *rq) 9732e46e8b2STejun Heo { 974a2dec7b3STejun Heo return rq->__data_len; 9752e46e8b2STejun Heo } 9762e46e8b2STejun Heo 9772e46e8b2STejun Heo static inline int blk_rq_cur_bytes(const struct request *rq) 9782e46e8b2STejun Heo { 9792e46e8b2STejun Heo return rq->bio ? bio_cur_bytes(rq->bio) : 0; 9802e46e8b2STejun Heo } 9815efccd17STejun Heo 98280a761fdSTejun Heo extern unsigned int blk_rq_err_bytes(const struct request *rq); 98380a761fdSTejun Heo 9845b93629bSTejun Heo static inline unsigned int blk_rq_sectors(const struct request *rq) 9855b93629bSTejun Heo { 9862e46e8b2STejun Heo return blk_rq_bytes(rq) >> 9; 9875b93629bSTejun Heo } 9885b93629bSTejun Heo 9895b93629bSTejun Heo static inline unsigned int blk_rq_cur_sectors(const struct request *rq) 9905b93629bSTejun Heo { 9912e46e8b2STejun Heo return blk_rq_cur_bytes(rq) >> 9; 9925b93629bSTejun Heo } 9935b93629bSTejun Heo 994f31dc1cdSMartin K. Petersen static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, 9958fe0d473SMike Christie int op) 996f31dc1cdSMartin K. Petersen { 9977afafc8aSAdrian Hunter if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)) 998871dd928SJames Bottomley return min(q->limits.max_discard_sectors, UINT_MAX >> 9); 999f31dc1cdSMartin K. Petersen 10008fe0d473SMike Christie if (unlikely(op == REQ_OP_WRITE_SAME)) 10014363ac7cSMartin K. Petersen return q->limits.max_write_same_sectors; 10024363ac7cSMartin K. Petersen 1003f31dc1cdSMartin K. Petersen return q->limits.max_sectors; 1004f31dc1cdSMartin K. Petersen } 1005f31dc1cdSMartin K. Petersen 1006762380adSJens Axboe /* 1007762380adSJens Axboe * Return maximum size of a request at given offset. Only valid for 1008762380adSJens Axboe * file system requests. 1009762380adSJens Axboe */ 1010762380adSJens Axboe static inline unsigned int blk_max_size_offset(struct request_queue *q, 1011762380adSJens Axboe sector_t offset) 1012762380adSJens Axboe { 1013762380adSJens Axboe if (!q->limits.chunk_sectors) 1014736ed4deSJens Axboe return q->limits.max_sectors; 1015762380adSJens Axboe 1016762380adSJens Axboe return q->limits.chunk_sectors - 1017762380adSJens Axboe (offset & (q->limits.chunk_sectors - 1)); 1018762380adSJens Axboe } 1019762380adSJens Axboe 102017007f39SDamien Le Moal static inline unsigned int blk_rq_get_max_sectors(struct request *rq, 102117007f39SDamien Le Moal sector_t offset) 1022f31dc1cdSMartin K. Petersen { 1023f31dc1cdSMartin K. Petersen struct request_queue *q = rq->q; 1024f31dc1cdSMartin K. Petersen 1025f2101842SChristoph Hellwig if (unlikely(rq->cmd_type != REQ_TYPE_FS)) 1026f31dc1cdSMartin K. Petersen return q->limits.max_hw_sectors; 1027f31dc1cdSMartin K. Petersen 10287afafc8aSAdrian Hunter if (!q->limits.chunk_sectors || 10297afafc8aSAdrian Hunter req_op(rq) == REQ_OP_DISCARD || 10307afafc8aSAdrian Hunter req_op(rq) == REQ_OP_SECURE_ERASE) 10318fe0d473SMike Christie return blk_queue_get_max_sectors(q, req_op(rq)); 1032762380adSJens Axboe 103317007f39SDamien Le Moal return min(blk_max_size_offset(q, offset), 10348fe0d473SMike Christie blk_queue_get_max_sectors(q, req_op(rq))); 1035f31dc1cdSMartin K. Petersen } 1036f31dc1cdSMartin K. Petersen 103775afb352SJun'ichi Nomura static inline unsigned int blk_rq_count_bios(struct request *rq) 103875afb352SJun'ichi Nomura { 103975afb352SJun'ichi Nomura unsigned int nr_bios = 0; 104075afb352SJun'ichi Nomura struct bio *bio; 104175afb352SJun'ichi Nomura 104275afb352SJun'ichi Nomura __rq_for_each_bio(bio, rq) 104375afb352SJun'ichi Nomura nr_bios++; 104475afb352SJun'ichi Nomura 104575afb352SJun'ichi Nomura return nr_bios; 104675afb352SJun'ichi Nomura } 104775afb352SJun'ichi Nomura 10485efccd17STejun Heo /* 10499934c8c0STejun Heo * Request issue related functions. 10509934c8c0STejun Heo */ 10519934c8c0STejun Heo extern struct request *blk_peek_request(struct request_queue *q); 10529934c8c0STejun Heo extern void blk_start_request(struct request *rq); 10539934c8c0STejun Heo extern struct request *blk_fetch_request(struct request_queue *q); 10549934c8c0STejun Heo 10559934c8c0STejun Heo /* 10562e60e022STejun Heo * Request completion related functions. 10572e60e022STejun Heo * 10582e60e022STejun Heo * blk_update_request() completes given number of bytes and updates 10592e60e022STejun Heo * the request without completing it. 10602e60e022STejun Heo * 1061f06d9a2bSTejun Heo * blk_end_request() and friends. __blk_end_request() must be called 1062f06d9a2bSTejun Heo * with the request queue spinlock acquired. 10631da177e4SLinus Torvalds * 10641da177e4SLinus Torvalds * Several drivers define their own end_request and call 10653bcddeacSKiyoshi Ueda * blk_end_request() for parts of the original function. 10663bcddeacSKiyoshi Ueda * This prevents code duplication in drivers. 10671da177e4SLinus Torvalds */ 10682e60e022STejun Heo extern bool blk_update_request(struct request *rq, int error, 106922b13210SJens Axboe unsigned int nr_bytes); 107012120077SChristoph Hellwig extern void blk_finish_request(struct request *rq, int error); 1071b1f74493SFUJITA Tomonori extern bool blk_end_request(struct request *rq, int error, 1072b1f74493SFUJITA Tomonori unsigned int nr_bytes); 1073b1f74493SFUJITA Tomonori extern void blk_end_request_all(struct request *rq, int error); 1074b1f74493SFUJITA Tomonori extern bool blk_end_request_cur(struct request *rq, int error); 107580a761fdSTejun Heo extern bool blk_end_request_err(struct request *rq, int error); 1076b1f74493SFUJITA Tomonori extern bool __blk_end_request(struct request *rq, int error, 1077b1f74493SFUJITA Tomonori unsigned int nr_bytes); 1078b1f74493SFUJITA Tomonori extern void __blk_end_request_all(struct request *rq, int error); 1079b1f74493SFUJITA Tomonori extern bool __blk_end_request_cur(struct request *rq, int error); 108080a761fdSTejun Heo extern bool __blk_end_request_err(struct request *rq, int error); 10812e60e022STejun Heo 1082ff856badSJens Axboe extern void blk_complete_request(struct request *); 1083242f9dcbSJens Axboe extern void __blk_complete_request(struct request *); 1084242f9dcbSJens Axboe extern void blk_abort_request(struct request *); 108528018c24SJames Bottomley extern void blk_unprep_request(struct request *); 1086ff856badSJens Axboe 10871da177e4SLinus Torvalds /* 10881da177e4SLinus Torvalds * Access functions for manipulating queue properties 10891da177e4SLinus Torvalds */ 1090165125e1SJens Axboe extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, 10911946089aSChristoph Lameter spinlock_t *lock, int node_id); 1092165125e1SJens Axboe extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); 109301effb0dSMike Snitzer extern struct request_queue *blk_init_allocated_queue(struct request_queue *, 109401effb0dSMike Snitzer request_fn_proc *, spinlock_t *); 1095165125e1SJens Axboe extern void blk_cleanup_queue(struct request_queue *); 1096165125e1SJens Axboe extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 1097165125e1SJens Axboe extern void blk_queue_bounce_limit(struct request_queue *, u64); 1098086fa5ffSMartin K. Petersen extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 1099762380adSJens Axboe extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); 11008a78362cSMartin K. Petersen extern void blk_queue_max_segments(struct request_queue *, unsigned short); 1101165125e1SJens Axboe extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 110267efc925SChristoph Hellwig extern void blk_queue_max_discard_sectors(struct request_queue *q, 110367efc925SChristoph Hellwig unsigned int max_discard_sectors); 11044363ac7cSMartin K. Petersen extern void blk_queue_max_write_same_sectors(struct request_queue *q, 11054363ac7cSMartin K. Petersen unsigned int max_write_same_sectors); 1106e1defc4fSMartin K. Petersen extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); 1107892b6f90SMartin K. Petersen extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); 1108c72758f3SMartin K. Petersen extern void blk_queue_alignment_offset(struct request_queue *q, 1109c72758f3SMartin K. Petersen unsigned int alignment); 11107c958e32SMartin K. Petersen extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); 1111c72758f3SMartin K. Petersen extern void blk_queue_io_min(struct request_queue *q, unsigned int min); 11123c5820c7SMartin K. Petersen extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); 1113c72758f3SMartin K. Petersen extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); 1114d278d4a8SJens Axboe extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth); 1115e475bba2SMartin K. Petersen extern void blk_set_default_limits(struct queue_limits *lim); 1116b1bd055dSMartin K. Petersen extern void blk_set_stacking_limits(struct queue_limits *lim); 1117c72758f3SMartin K. Petersen extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 1118c72758f3SMartin K. Petersen sector_t offset); 111917be8c24SMartin K. Petersen extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, 112017be8c24SMartin K. Petersen sector_t offset); 1121c72758f3SMartin K. Petersen extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, 1122c72758f3SMartin K. Petersen sector_t offset); 1123165125e1SJens Axboe extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 1124e3790c7dSTejun Heo extern void blk_queue_dma_pad(struct request_queue *, unsigned int); 112527f8221aSFUJITA Tomonori extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); 11262fb98e84STejun Heo extern int blk_queue_dma_drain(struct request_queue *q, 11272fb98e84STejun Heo dma_drain_needed_fn *dma_drain_needed, 11282fb98e84STejun Heo void *buf, unsigned int size); 1129ef9e3facSKiyoshi Ueda extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); 1130165125e1SJens Axboe extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 113103100aadSKeith Busch extern void blk_queue_virt_boundary(struct request_queue *, unsigned long); 1132165125e1SJens Axboe extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 113328018c24SJames Bottomley extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn); 1134165125e1SJens Axboe extern void blk_queue_dma_alignment(struct request_queue *, int); 113511c3e689SJames Bottomley extern void blk_queue_update_dma_alignment(struct request_queue *, int); 1136165125e1SJens Axboe extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 1137242f9dcbSJens Axboe extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); 1138242f9dcbSJens Axboe extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 1139f3876930S[email protected] extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable); 114093e9d8e8SJens Axboe extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); 11411da177e4SLinus Torvalds extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 11421da177e4SLinus Torvalds 1143165125e1SJens Axboe extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 11441da177e4SLinus Torvalds extern void blk_dump_rq_flags(struct request *, char *); 11451da177e4SLinus Torvalds extern long nr_blockdev_pages(void); 11461da177e4SLinus Torvalds 114709ac46c4STejun Heo bool __must_check blk_get_queue(struct request_queue *); 1148165125e1SJens Axboe struct request_queue *blk_alloc_queue(gfp_t); 1149165125e1SJens Axboe struct request_queue *blk_alloc_queue_node(gfp_t, int); 1150165125e1SJens Axboe extern void blk_put_queue(struct request_queue *); 11513f21c265SJens Axboe extern void blk_set_queue_dying(struct request_queue *); 11521da177e4SLinus Torvalds 1153316cc67dSShaohua Li /* 11546c954667SLin Ming * block layer runtime pm functions 11556c954667SLin Ming */ 115647fafbc7SRafael J. Wysocki #ifdef CONFIG_PM 11576c954667SLin Ming extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev); 11586c954667SLin Ming extern int blk_pre_runtime_suspend(struct request_queue *q); 11596c954667SLin Ming extern void blk_post_runtime_suspend(struct request_queue *q, int err); 11606c954667SLin Ming extern void blk_pre_runtime_resume(struct request_queue *q); 11616c954667SLin Ming extern void blk_post_runtime_resume(struct request_queue *q, int err); 1162d07ab6d1SMika Westerberg extern void blk_set_runtime_active(struct request_queue *q); 11636c954667SLin Ming #else 11646c954667SLin Ming static inline void blk_pm_runtime_init(struct request_queue *q, 11656c954667SLin Ming struct device *dev) {} 11666c954667SLin Ming static inline int blk_pre_runtime_suspend(struct request_queue *q) 11676c954667SLin Ming { 11686c954667SLin Ming return -ENOSYS; 11696c954667SLin Ming } 11706c954667SLin Ming static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {} 11716c954667SLin Ming static inline void blk_pre_runtime_resume(struct request_queue *q) {} 11726c954667SLin Ming static inline void blk_post_runtime_resume(struct request_queue *q, int err) {} 1173d07ab6d1SMika Westerberg extern inline void blk_set_runtime_active(struct request_queue *q) {} 11746c954667SLin Ming #endif 11756c954667SLin Ming 11766c954667SLin Ming /* 117775df7136SSuresh Jayaraman * blk_plug permits building a queue of related requests by holding the I/O 117875df7136SSuresh Jayaraman * fragments for a short period. This allows merging of sequential requests 117975df7136SSuresh Jayaraman * into single larger request. As the requests are moved from a per-task list to 118075df7136SSuresh Jayaraman * the device's request_queue in a batch, this results in improved scalability 118175df7136SSuresh Jayaraman * as the lock contention for request_queue lock is reduced. 118275df7136SSuresh Jayaraman * 118375df7136SSuresh Jayaraman * It is ok not to disable preemption when adding the request to the plug list 118475df7136SSuresh Jayaraman * or when attempting a merge, because blk_schedule_flush_list() will only flush 118575df7136SSuresh Jayaraman * the plug list when the task sleeps by itself. For details, please see 118675df7136SSuresh Jayaraman * schedule() where blk_schedule_flush_plug() is called. 1187316cc67dSShaohua Li */ 118873c10101SJens Axboe struct blk_plug { 118975df7136SSuresh Jayaraman struct list_head list; /* requests */ 1190320ae51fSJens Axboe struct list_head mq_list; /* blk-mq requests */ 119175df7136SSuresh Jayaraman struct list_head cb_list; /* md requires an unplug callback */ 119273c10101SJens Axboe }; 119355c022bbSShaohua Li #define BLK_MAX_REQUEST_COUNT 16 119450d24c34SShaohua Li #define BLK_PLUG_FLUSH_SIZE (128 * 1024) 119555c022bbSShaohua Li 11969cbb1750SNeilBrown struct blk_plug_cb; 119774018dc3SNeilBrown typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); 1198048c9374SNeilBrown struct blk_plug_cb { 1199048c9374SNeilBrown struct list_head list; 12009cbb1750SNeilBrown blk_plug_cb_fn callback; 12019cbb1750SNeilBrown void *data; 1202048c9374SNeilBrown }; 12039cbb1750SNeilBrown extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, 12049cbb1750SNeilBrown void *data, int size); 120573c10101SJens Axboe extern void blk_start_plug(struct blk_plug *); 120673c10101SJens Axboe extern void blk_finish_plug(struct blk_plug *); 1207f6603783SJens Axboe extern void blk_flush_plug_list(struct blk_plug *, bool); 120873c10101SJens Axboe 120973c10101SJens Axboe static inline void blk_flush_plug(struct task_struct *tsk) 121073c10101SJens Axboe { 121173c10101SJens Axboe struct blk_plug *plug = tsk->plug; 121273c10101SJens Axboe 121388b996cdSChristoph Hellwig if (plug) 1214a237c1c5SJens Axboe blk_flush_plug_list(plug, false); 1215a237c1c5SJens Axboe } 1216a237c1c5SJens Axboe 1217a237c1c5SJens Axboe static inline void blk_schedule_flush_plug(struct task_struct *tsk) 1218a237c1c5SJens Axboe { 1219a237c1c5SJens Axboe struct blk_plug *plug = tsk->plug; 1220a237c1c5SJens Axboe 1221a237c1c5SJens Axboe if (plug) 1222f6603783SJens Axboe blk_flush_plug_list(plug, true); 122373c10101SJens Axboe } 122473c10101SJens Axboe 122573c10101SJens Axboe static inline bool blk_needs_flush_plug(struct task_struct *tsk) 122673c10101SJens Axboe { 122773c10101SJens Axboe struct blk_plug *plug = tsk->plug; 122873c10101SJens Axboe 1229320ae51fSJens Axboe return plug && 1230320ae51fSJens Axboe (!list_empty(&plug->list) || 1231320ae51fSJens Axboe !list_empty(&plug->mq_list) || 1232320ae51fSJens Axboe !list_empty(&plug->cb_list)); 123373c10101SJens Axboe } 123473c10101SJens Axboe 12351da177e4SLinus Torvalds /* 12361da177e4SLinus Torvalds * tag stuff 12371da177e4SLinus Torvalds */ 1238165125e1SJens Axboe extern int blk_queue_start_tag(struct request_queue *, struct request *); 1239165125e1SJens Axboe extern struct request *blk_queue_find_tag(struct request_queue *, int); 1240165125e1SJens Axboe extern void blk_queue_end_tag(struct request_queue *, struct request *); 1241ee1b6f7aSShaohua Li extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int); 1242165125e1SJens Axboe extern void blk_queue_free_tags(struct request_queue *); 1243165125e1SJens Axboe extern int blk_queue_resize_tags(struct request_queue *, int); 1244165125e1SJens Axboe extern void blk_queue_invalidate_tags(struct request_queue *); 1245ee1b6f7aSShaohua Li extern struct blk_queue_tag *blk_init_tags(int, int); 1246492dfb48SJames Bottomley extern void blk_free_tags(struct blk_queue_tag *); 12471da177e4SLinus Torvalds 1248f583f492SDavid C Somayajulu static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, 1249f583f492SDavid C Somayajulu int tag) 1250f583f492SDavid C Somayajulu { 1251f583f492SDavid C Somayajulu if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) 1252f583f492SDavid C Somayajulu return NULL; 1253f583f492SDavid C Somayajulu return bqt->tag_index[tag]; 1254f583f492SDavid C Somayajulu } 1255dd3932edSChristoph Hellwig 1256e950fdf7SChristoph Hellwig 1257e950fdf7SChristoph Hellwig #define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */ 1258e950fdf7SChristoph Hellwig #define BLKDEV_DISCARD_ZERO (1 << 1) /* must reliably zero data */ 1259dd3932edSChristoph Hellwig 1260dd3932edSChristoph Hellwig extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); 1261fbd9b09aSDmitry Monakhov extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1262fbd9b09aSDmitry Monakhov sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); 126338f25255SChristoph Hellwig extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1264288dab8aSChristoph Hellwig sector_t nr_sects, gfp_t gfp_mask, int flags, 1265469e3216SMike Christie struct bio **biop); 12664363ac7cSMartin K. Petersen extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, 12674363ac7cSMartin K. Petersen sector_t nr_sects, gfp_t gfp_mask, struct page *page); 12683f14d792SDmitry Monakhov extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1269d93ba7a5SMartin K. Petersen sector_t nr_sects, gfp_t gfp_mask, bool discard); 12702cf6d26aSChristoph Hellwig static inline int sb_issue_discard(struct super_block *sb, sector_t block, 12712cf6d26aSChristoph Hellwig sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) 1272fb2dce86SDavid Woodhouse { 12732cf6d26aSChristoph Hellwig return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9), 12742cf6d26aSChristoph Hellwig nr_blocks << (sb->s_blocksize_bits - 9), 12752cf6d26aSChristoph Hellwig gfp_mask, flags); 1276fb2dce86SDavid Woodhouse } 1277e6fa0be6SLukas Czerner static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, 1278a107e5a3STheodore Ts'o sector_t nr_blocks, gfp_t gfp_mask) 1279e6fa0be6SLukas Czerner { 1280e6fa0be6SLukas Czerner return blkdev_issue_zeroout(sb->s_bdev, 1281e6fa0be6SLukas Czerner block << (sb->s_blocksize_bits - 9), 1282e6fa0be6SLukas Czerner nr_blocks << (sb->s_blocksize_bits - 9), 1283d93ba7a5SMartin K. Petersen gfp_mask, true); 1284e6fa0be6SLukas Czerner } 12851da177e4SLinus Torvalds 1286018e0446SJens Axboe extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); 12870b07de85SAdel Gadllah 1288eb28d31bSMartin K. Petersen enum blk_default_limits { 1289eb28d31bSMartin K. Petersen BLK_MAX_SEGMENTS = 128, 1290eb28d31bSMartin K. Petersen BLK_SAFE_MAX_SECTORS = 255, 1291d2be537cSJeff Moyer BLK_DEF_MAX_SECTORS = 2560, 1292eb28d31bSMartin K. Petersen BLK_MAX_SEGMENT_SIZE = 65536, 1293eb28d31bSMartin K. Petersen BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, 1294eb28d31bSMartin K. Petersen }; 12950e435ac2SMilan Broz 12961da177e4SLinus Torvalds #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 12971da177e4SLinus Torvalds 1298ae03bf63SMartin K. Petersen static inline unsigned long queue_bounce_pfn(struct request_queue *q) 1299ae03bf63SMartin K. Petersen { 1300025146e1SMartin K. Petersen return q->limits.bounce_pfn; 1301ae03bf63SMartin K. Petersen } 1302ae03bf63SMartin K. Petersen 1303ae03bf63SMartin K. Petersen static inline unsigned long queue_segment_boundary(struct request_queue *q) 1304ae03bf63SMartin K. Petersen { 1305025146e1SMartin K. Petersen return q->limits.seg_boundary_mask; 1306ae03bf63SMartin K. Petersen } 1307ae03bf63SMartin K. Petersen 130803100aadSKeith Busch static inline unsigned long queue_virt_boundary(struct request_queue *q) 130903100aadSKeith Busch { 131003100aadSKeith Busch return q->limits.virt_boundary_mask; 131103100aadSKeith Busch } 131203100aadSKeith Busch 1313ae03bf63SMartin K. Petersen static inline unsigned int queue_max_sectors(struct request_queue *q) 1314ae03bf63SMartin K. Petersen { 1315025146e1SMartin K. Petersen return q->limits.max_sectors; 1316ae03bf63SMartin K. Petersen } 1317ae03bf63SMartin K. Petersen 1318ae03bf63SMartin K. Petersen static inline unsigned int queue_max_hw_sectors(struct request_queue *q) 1319ae03bf63SMartin K. Petersen { 1320025146e1SMartin K. Petersen return q->limits.max_hw_sectors; 1321ae03bf63SMartin K. Petersen } 1322ae03bf63SMartin K. Petersen 13238a78362cSMartin K. Petersen static inline unsigned short queue_max_segments(struct request_queue *q) 1324ae03bf63SMartin K. Petersen { 13258a78362cSMartin K. Petersen return q->limits.max_segments; 1326ae03bf63SMartin K. Petersen } 1327ae03bf63SMartin K. Petersen 1328ae03bf63SMartin K. Petersen static inline unsigned int queue_max_segment_size(struct request_queue *q) 1329ae03bf63SMartin K. Petersen { 1330025146e1SMartin K. Petersen return q->limits.max_segment_size; 1331ae03bf63SMartin K. Petersen } 1332ae03bf63SMartin K. Petersen 1333e1defc4fSMartin K. Petersen static inline unsigned short queue_logical_block_size(struct request_queue *q) 13341da177e4SLinus Torvalds { 13351da177e4SLinus Torvalds int retval = 512; 13361da177e4SLinus Torvalds 1337025146e1SMartin K. Petersen if (q && q->limits.logical_block_size) 1338025146e1SMartin K. Petersen retval = q->limits.logical_block_size; 13391da177e4SLinus Torvalds 13401da177e4SLinus Torvalds return retval; 13411da177e4SLinus Torvalds } 13421da177e4SLinus Torvalds 1343e1defc4fSMartin K. Petersen static inline unsigned short bdev_logical_block_size(struct block_device *bdev) 13441da177e4SLinus Torvalds { 1345e1defc4fSMartin K. Petersen return queue_logical_block_size(bdev_get_queue(bdev)); 13461da177e4SLinus Torvalds } 13471da177e4SLinus Torvalds 1348c72758f3SMartin K. Petersen static inline unsigned int queue_physical_block_size(struct request_queue *q) 1349c72758f3SMartin K. Petersen { 1350c72758f3SMartin K. Petersen return q->limits.physical_block_size; 1351c72758f3SMartin K. Petersen } 1352c72758f3SMartin K. Petersen 1353892b6f90SMartin K. Petersen static inline unsigned int bdev_physical_block_size(struct block_device *bdev) 1354ac481c20SMartin K. Petersen { 1355ac481c20SMartin K. Petersen return queue_physical_block_size(bdev_get_queue(bdev)); 1356ac481c20SMartin K. Petersen } 1357ac481c20SMartin K. Petersen 1358c72758f3SMartin K. Petersen static inline unsigned int queue_io_min(struct request_queue *q) 1359c72758f3SMartin K. Petersen { 1360c72758f3SMartin K. Petersen return q->limits.io_min; 1361c72758f3SMartin K. Petersen } 1362c72758f3SMartin K. Petersen 1363ac481c20SMartin K. Petersen static inline int bdev_io_min(struct block_device *bdev) 1364ac481c20SMartin K. Petersen { 1365ac481c20SMartin K. Petersen return queue_io_min(bdev_get_queue(bdev)); 1366ac481c20SMartin K. Petersen } 1367ac481c20SMartin K. Petersen 1368c72758f3SMartin K. Petersen static inline unsigned int queue_io_opt(struct request_queue *q) 1369c72758f3SMartin K. Petersen { 1370c72758f3SMartin K. Petersen return q->limits.io_opt; 1371c72758f3SMartin K. Petersen } 1372c72758f3SMartin K. Petersen 1373ac481c20SMartin K. Petersen static inline int bdev_io_opt(struct block_device *bdev) 1374ac481c20SMartin K. Petersen { 1375ac481c20SMartin K. Petersen return queue_io_opt(bdev_get_queue(bdev)); 1376ac481c20SMartin K. Petersen } 1377ac481c20SMartin K. Petersen 1378c72758f3SMartin K. Petersen static inline int queue_alignment_offset(struct request_queue *q) 1379c72758f3SMartin K. Petersen { 1380ac481c20SMartin K. Petersen if (q->limits.misaligned) 1381c72758f3SMartin K. Petersen return -1; 1382c72758f3SMartin K. Petersen 1383c72758f3SMartin K. Petersen return q->limits.alignment_offset; 1384c72758f3SMartin K. Petersen } 1385c72758f3SMartin K. Petersen 1386e03a72e1SMartin K. Petersen static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) 138781744ee4SMartin K. Petersen { 138881744ee4SMartin K. Petersen unsigned int granularity = max(lim->physical_block_size, lim->io_min); 1389b8839b8cSMike Snitzer unsigned int alignment = sector_div(sector, granularity >> 9) << 9; 139081744ee4SMartin K. Petersen 1391b8839b8cSMike Snitzer return (granularity + lim->alignment_offset - alignment) % granularity; 1392c72758f3SMartin K. Petersen } 1393c72758f3SMartin K. Petersen 1394ac481c20SMartin K. Petersen static inline int bdev_alignment_offset(struct block_device *bdev) 1395ac481c20SMartin K. Petersen { 1396ac481c20SMartin K. Petersen struct request_queue *q = bdev_get_queue(bdev); 1397ac481c20SMartin K. Petersen 1398ac481c20SMartin K. Petersen if (q->limits.misaligned) 1399ac481c20SMartin K. Petersen return -1; 1400ac481c20SMartin K. Petersen 1401ac481c20SMartin K. Petersen if (bdev != bdev->bd_contains) 1402ac481c20SMartin K. Petersen return bdev->bd_part->alignment_offset; 1403ac481c20SMartin K. Petersen 1404ac481c20SMartin K. Petersen return q->limits.alignment_offset; 1405ac481c20SMartin K. Petersen } 1406ac481c20SMartin K. Petersen 140786b37281SMartin K. Petersen static inline int queue_discard_alignment(struct request_queue *q) 140886b37281SMartin K. Petersen { 140986b37281SMartin K. Petersen if (q->limits.discard_misaligned) 141086b37281SMartin K. Petersen return -1; 141186b37281SMartin K. Petersen 141286b37281SMartin K. Petersen return q->limits.discard_alignment; 141386b37281SMartin K. Petersen } 141486b37281SMartin K. Petersen 1415e03a72e1SMartin K. Petersen static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) 141686b37281SMartin K. Petersen { 141759771079SLinus Torvalds unsigned int alignment, granularity, offset; 1418dd3d145dSMartin K. Petersen 1419a934a00aSMartin K. Petersen if (!lim->max_discard_sectors) 1420a934a00aSMartin K. Petersen return 0; 1421a934a00aSMartin K. Petersen 142259771079SLinus Torvalds /* Why are these in bytes, not sectors? */ 142359771079SLinus Torvalds alignment = lim->discard_alignment >> 9; 142459771079SLinus Torvalds granularity = lim->discard_granularity >> 9; 142559771079SLinus Torvalds if (!granularity) 142659771079SLinus Torvalds return 0; 142759771079SLinus Torvalds 142859771079SLinus Torvalds /* Offset of the partition start in 'granularity' sectors */ 142959771079SLinus Torvalds offset = sector_div(sector, granularity); 143059771079SLinus Torvalds 143159771079SLinus Torvalds /* And why do we do this modulus *again* in blkdev_issue_discard()? */ 143259771079SLinus Torvalds offset = (granularity + alignment - offset) % granularity; 143359771079SLinus Torvalds 143459771079SLinus Torvalds /* Turn it back into bytes, gaah */ 143559771079SLinus Torvalds return offset << 9; 143686b37281SMartin K. Petersen } 143786b37281SMartin K. Petersen 1438c6e66634SPaolo Bonzini static inline int bdev_discard_alignment(struct block_device *bdev) 1439c6e66634SPaolo Bonzini { 1440c6e66634SPaolo Bonzini struct request_queue *q = bdev_get_queue(bdev); 1441c6e66634SPaolo Bonzini 1442c6e66634SPaolo Bonzini if (bdev != bdev->bd_contains) 1443c6e66634SPaolo Bonzini return bdev->bd_part->discard_alignment; 1444c6e66634SPaolo Bonzini 1445c6e66634SPaolo Bonzini return q->limits.discard_alignment; 1446c6e66634SPaolo Bonzini } 1447c6e66634SPaolo Bonzini 144898262f27SMartin K. Petersen static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) 144998262f27SMartin K. Petersen { 1450a934a00aSMartin K. Petersen if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1) 145198262f27SMartin K. Petersen return 1; 145298262f27SMartin K. Petersen 145398262f27SMartin K. Petersen return 0; 145498262f27SMartin K. Petersen } 145598262f27SMartin K. Petersen 145698262f27SMartin K. Petersen static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev) 145798262f27SMartin K. Petersen { 145898262f27SMartin K. Petersen return queue_discard_zeroes_data(bdev_get_queue(bdev)); 145998262f27SMartin K. Petersen } 146098262f27SMartin K. Petersen 14614363ac7cSMartin K. Petersen static inline unsigned int bdev_write_same(struct block_device *bdev) 14624363ac7cSMartin K. Petersen { 14634363ac7cSMartin K. Petersen struct request_queue *q = bdev_get_queue(bdev); 14644363ac7cSMartin K. Petersen 14654363ac7cSMartin K. Petersen if (q) 14664363ac7cSMartin K. Petersen return q->limits.max_write_same_sectors; 14674363ac7cSMartin K. Petersen 14684363ac7cSMartin K. Petersen return 0; 14694363ac7cSMartin K. Petersen } 14704363ac7cSMartin K. Petersen 1471797476b8SDamien Le Moal static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev) 1472797476b8SDamien Le Moal { 1473797476b8SDamien Le Moal struct request_queue *q = bdev_get_queue(bdev); 1474797476b8SDamien Le Moal 1475797476b8SDamien Le Moal if (q) 1476797476b8SDamien Le Moal return blk_queue_zoned_model(q); 1477797476b8SDamien Le Moal 1478797476b8SDamien Le Moal return BLK_ZONED_NONE; 1479797476b8SDamien Le Moal } 1480797476b8SDamien Le Moal 1481797476b8SDamien Le Moal static inline bool bdev_is_zoned(struct block_device *bdev) 1482797476b8SDamien Le Moal { 1483797476b8SDamien Le Moal struct request_queue *q = bdev_get_queue(bdev); 1484797476b8SDamien Le Moal 1485797476b8SDamien Le Moal if (q) 1486797476b8SDamien Le Moal return blk_queue_is_zoned(q); 1487797476b8SDamien Le Moal 1488797476b8SDamien Le Moal return false; 1489797476b8SDamien Le Moal } 1490797476b8SDamien Le Moal 14916a0cb1bcSHannes Reinecke static inline unsigned int bdev_zone_size(struct block_device *bdev) 14926a0cb1bcSHannes Reinecke { 14936a0cb1bcSHannes Reinecke struct request_queue *q = bdev_get_queue(bdev); 14946a0cb1bcSHannes Reinecke 14956a0cb1bcSHannes Reinecke if (q) 14966a0cb1bcSHannes Reinecke return blk_queue_zone_size(q); 14976a0cb1bcSHannes Reinecke 14986a0cb1bcSHannes Reinecke return 0; 14996a0cb1bcSHannes Reinecke } 15006a0cb1bcSHannes Reinecke 1501165125e1SJens Axboe static inline int queue_dma_alignment(struct request_queue *q) 15021da177e4SLinus Torvalds { 1503482eb689SPete Wyckoff return q ? q->dma_alignment : 511; 15041da177e4SLinus Torvalds } 15051da177e4SLinus Torvalds 150614417799SNamhyung Kim static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, 150787904074SFUJITA Tomonori unsigned int len) 150887904074SFUJITA Tomonori { 150987904074SFUJITA Tomonori unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; 151014417799SNamhyung Kim return !(addr & alignment) && !(len & alignment); 151187904074SFUJITA Tomonori } 151287904074SFUJITA Tomonori 15131da177e4SLinus Torvalds /* assumes size > 256 */ 15141da177e4SLinus Torvalds static inline unsigned int blksize_bits(unsigned int size) 15151da177e4SLinus Torvalds { 15161da177e4SLinus Torvalds unsigned int bits = 8; 15171da177e4SLinus Torvalds do { 15181da177e4SLinus Torvalds bits++; 15191da177e4SLinus Torvalds size >>= 1; 15201da177e4SLinus Torvalds } while (size > 256); 15211da177e4SLinus Torvalds return bits; 15221da177e4SLinus Torvalds } 15231da177e4SLinus Torvalds 15242befb9e3SAdrian Bunk static inline unsigned int block_size(struct block_device *bdev) 15251da177e4SLinus Torvalds { 15261da177e4SLinus Torvalds return bdev->bd_block_size; 15271da177e4SLinus Torvalds } 15281da177e4SLinus Torvalds 1529f3876930S[email protected] static inline bool queue_flush_queueable(struct request_queue *q) 1530f3876930S[email protected] { 1531c888a8f9SJens Axboe return !test_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags); 1532f3876930S[email protected] } 1533f3876930S[email protected] 15341da177e4SLinus Torvalds typedef struct {struct page *v;} Sector; 15351da177e4SLinus Torvalds 15361da177e4SLinus Torvalds unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); 15371da177e4SLinus Torvalds 15381da177e4SLinus Torvalds static inline void put_dev_sector(Sector p) 15391da177e4SLinus Torvalds { 154009cbfeafSKirill A. Shutemov put_page(p.v); 15411da177e4SLinus Torvalds } 15421da177e4SLinus Torvalds 1543e0af2917SMing Lei static inline bool __bvec_gap_to_prev(struct request_queue *q, 1544e0af2917SMing Lei struct bio_vec *bprv, unsigned int offset) 1545e0af2917SMing Lei { 1546e0af2917SMing Lei return offset || 1547e0af2917SMing Lei ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); 1548e0af2917SMing Lei } 1549e0af2917SMing Lei 155003100aadSKeith Busch /* 155103100aadSKeith Busch * Check if adding a bio_vec after bprv with offset would create a gap in 155203100aadSKeith Busch * the SG list. Most drivers don't care about this, but some do. 155303100aadSKeith Busch */ 155403100aadSKeith Busch static inline bool bvec_gap_to_prev(struct request_queue *q, 155503100aadSKeith Busch struct bio_vec *bprv, unsigned int offset) 155603100aadSKeith Busch { 155703100aadSKeith Busch if (!queue_virt_boundary(q)) 155803100aadSKeith Busch return false; 1559e0af2917SMing Lei return __bvec_gap_to_prev(q, bprv, offset); 156003100aadSKeith Busch } 156103100aadSKeith Busch 15625e7c4274SJens Axboe static inline bool bio_will_gap(struct request_queue *q, struct bio *prev, 15635e7c4274SJens Axboe struct bio *next) 15645e7c4274SJens Axboe { 156525e71a99SMing Lei if (bio_has_data(prev) && queue_virt_boundary(q)) { 156625e71a99SMing Lei struct bio_vec pb, nb; 15675e7c4274SJens Axboe 156825e71a99SMing Lei bio_get_last_bvec(prev, &pb); 156925e71a99SMing Lei bio_get_first_bvec(next, &nb); 157025e71a99SMing Lei 157125e71a99SMing Lei return __bvec_gap_to_prev(q, &pb, nb.bv_offset); 157225e71a99SMing Lei } 157325e71a99SMing Lei 157425e71a99SMing Lei return false; 15755e7c4274SJens Axboe } 15765e7c4274SJens Axboe 15775e7c4274SJens Axboe static inline bool req_gap_back_merge(struct request *req, struct bio *bio) 15785e7c4274SJens Axboe { 15795e7c4274SJens Axboe return bio_will_gap(req->q, req->biotail, bio); 15805e7c4274SJens Axboe } 15815e7c4274SJens Axboe 15825e7c4274SJens Axboe static inline bool req_gap_front_merge(struct request *req, struct bio *bio) 15835e7c4274SJens Axboe { 15845e7c4274SJens Axboe return bio_will_gap(req->q, bio, req->bio); 15855e7c4274SJens Axboe } 15865e7c4274SJens Axboe 158759c3d45eSJens Axboe int kblockd_schedule_work(struct work_struct *work); 1588ee63cfa7SJens Axboe int kblockd_schedule_work_on(int cpu, struct work_struct *work); 158959c3d45eSJens Axboe int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay); 15908ab14595SJens Axboe int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); 15911da177e4SLinus Torvalds 15929195291eSDivyesh Shah #ifdef CONFIG_BLK_CGROUP 159328f4197eSJens Axboe /* 159428f4197eSJens Axboe * This should not be using sched_clock(). A real patch is in progress 159528f4197eSJens Axboe * to fix this up, until that is in place we need to disable preemption 159628f4197eSJens Axboe * around sched_clock() in this function and set_io_start_time_ns(). 159728f4197eSJens Axboe */ 15989195291eSDivyesh Shah static inline void set_start_time_ns(struct request *req) 15999195291eSDivyesh Shah { 160028f4197eSJens Axboe preempt_disable(); 16019195291eSDivyesh Shah req->start_time_ns = sched_clock(); 160228f4197eSJens Axboe preempt_enable(); 16039195291eSDivyesh Shah } 16049195291eSDivyesh Shah 16059195291eSDivyesh Shah static inline void set_io_start_time_ns(struct request *req) 16069195291eSDivyesh Shah { 160728f4197eSJens Axboe preempt_disable(); 16089195291eSDivyesh Shah req->io_start_time_ns = sched_clock(); 160928f4197eSJens Axboe preempt_enable(); 16109195291eSDivyesh Shah } 161184c124daSDivyesh Shah 161284c124daSDivyesh Shah static inline uint64_t rq_start_time_ns(struct request *req) 161384c124daSDivyesh Shah { 161484c124daSDivyesh Shah return req->start_time_ns; 161584c124daSDivyesh Shah } 161684c124daSDivyesh Shah 161784c124daSDivyesh Shah static inline uint64_t rq_io_start_time_ns(struct request *req) 161884c124daSDivyesh Shah { 161984c124daSDivyesh Shah return req->io_start_time_ns; 162084c124daSDivyesh Shah } 16219195291eSDivyesh Shah #else 16229195291eSDivyesh Shah static inline void set_start_time_ns(struct request *req) {} 16239195291eSDivyesh Shah static inline void set_io_start_time_ns(struct request *req) {} 162484c124daSDivyesh Shah static inline uint64_t rq_start_time_ns(struct request *req) 162584c124daSDivyesh Shah { 162684c124daSDivyesh Shah return 0; 162784c124daSDivyesh Shah } 162884c124daSDivyesh Shah static inline uint64_t rq_io_start_time_ns(struct request *req) 162984c124daSDivyesh Shah { 163084c124daSDivyesh Shah return 0; 163184c124daSDivyesh Shah } 16329195291eSDivyesh Shah #endif 16339195291eSDivyesh Shah 16341da177e4SLinus Torvalds #define MODULE_ALIAS_BLOCKDEV(major,minor) \ 16351da177e4SLinus Torvalds MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 16361da177e4SLinus Torvalds #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 16371da177e4SLinus Torvalds MODULE_ALIAS("block-major-" __stringify(major) "-*") 16381da177e4SLinus Torvalds 16397ba1ba12SMartin K. Petersen #if defined(CONFIG_BLK_DEV_INTEGRITY) 16407ba1ba12SMartin K. Petersen 16418288f496SMartin K. Petersen enum blk_integrity_flags { 16428288f496SMartin K. Petersen BLK_INTEGRITY_VERIFY = 1 << 0, 16438288f496SMartin K. Petersen BLK_INTEGRITY_GENERATE = 1 << 1, 16443aec2f41SMartin K. Petersen BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2, 1645aae7df50SMartin K. Petersen BLK_INTEGRITY_IP_CHECKSUM = 1 << 3, 16468288f496SMartin K. Petersen }; 16477ba1ba12SMartin K. Petersen 164818593088SMartin K. Petersen struct blk_integrity_iter { 16497ba1ba12SMartin K. Petersen void *prot_buf; 16507ba1ba12SMartin K. Petersen void *data_buf; 16513be91c4aSMartin K. Petersen sector_t seed; 16527ba1ba12SMartin K. Petersen unsigned int data_size; 16533be91c4aSMartin K. Petersen unsigned short interval; 16547ba1ba12SMartin K. Petersen const char *disk_name; 16557ba1ba12SMartin K. Petersen }; 16567ba1ba12SMartin K. Petersen 165718593088SMartin K. Petersen typedef int (integrity_processing_fn) (struct blk_integrity_iter *); 16587ba1ba12SMartin K. Petersen 16590f8087ecSMartin K. Petersen struct blk_integrity_profile { 166018593088SMartin K. Petersen integrity_processing_fn *generate_fn; 166118593088SMartin K. Petersen integrity_processing_fn *verify_fn; 16620f8087ecSMartin K. Petersen const char *name; 16630f8087ecSMartin K. Petersen }; 16647ba1ba12SMartin K. Petersen 166525520d55SMartin K. Petersen extern void blk_integrity_register(struct gendisk *, struct blk_integrity *); 16667ba1ba12SMartin K. Petersen extern void blk_integrity_unregister(struct gendisk *); 1667ad7fce93SMartin K. Petersen extern int blk_integrity_compare(struct gendisk *, struct gendisk *); 166813f05c8dSMartin K. Petersen extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, 166913f05c8dSMartin K. Petersen struct scatterlist *); 167013f05c8dSMartin K. Petersen extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); 16714eaf99beSMartin K. Petersen extern bool blk_integrity_merge_rq(struct request_queue *, struct request *, 167213f05c8dSMartin K. Petersen struct request *); 16734eaf99beSMartin K. Petersen extern bool blk_integrity_merge_bio(struct request_queue *, struct request *, 167413f05c8dSMartin K. Petersen struct bio *); 16757ba1ba12SMartin K. Petersen 167625520d55SMartin K. Petersen static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 167725520d55SMartin K. Petersen { 1678ac6fc48cSDan Williams struct blk_integrity *bi = &disk->queue->integrity; 167925520d55SMartin K. Petersen 168025520d55SMartin K. Petersen if (!bi->profile) 168125520d55SMartin K. Petersen return NULL; 168225520d55SMartin K. Petersen 168325520d55SMartin K. Petersen return bi; 168425520d55SMartin K. Petersen } 168525520d55SMartin K. Petersen 1686b04accc4SJens Axboe static inline 1687b04accc4SJens Axboe struct blk_integrity *bdev_get_integrity(struct block_device *bdev) 1688b04accc4SJens Axboe { 168925520d55SMartin K. Petersen return blk_get_integrity(bdev->bd_disk); 1690b02739b0SMartin K. Petersen } 1691b02739b0SMartin K. Petersen 1692180b2f95SMartin K. Petersen static inline bool blk_integrity_rq(struct request *rq) 16937ba1ba12SMartin K. Petersen { 1694180b2f95SMartin K. Petersen return rq->cmd_flags & REQ_INTEGRITY; 16957ba1ba12SMartin K. Petersen } 16967ba1ba12SMartin K. Petersen 169713f05c8dSMartin K. Petersen static inline void blk_queue_max_integrity_segments(struct request_queue *q, 169813f05c8dSMartin K. Petersen unsigned int segs) 169913f05c8dSMartin K. Petersen { 170013f05c8dSMartin K. Petersen q->limits.max_integrity_segments = segs; 170113f05c8dSMartin K. Petersen } 170213f05c8dSMartin K. Petersen 170313f05c8dSMartin K. Petersen static inline unsigned short 170413f05c8dSMartin K. Petersen queue_max_integrity_segments(struct request_queue *q) 170513f05c8dSMartin K. Petersen { 170613f05c8dSMartin K. Petersen return q->limits.max_integrity_segments; 170713f05c8dSMartin K. Petersen } 170813f05c8dSMartin K. Petersen 17097f39add3SSagi Grimberg static inline bool integrity_req_gap_back_merge(struct request *req, 17107f39add3SSagi Grimberg struct bio *next) 17117f39add3SSagi Grimberg { 17127f39add3SSagi Grimberg struct bio_integrity_payload *bip = bio_integrity(req->bio); 17137f39add3SSagi Grimberg struct bio_integrity_payload *bip_next = bio_integrity(next); 17147f39add3SSagi Grimberg 17157f39add3SSagi Grimberg return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], 17167f39add3SSagi Grimberg bip_next->bip_vec[0].bv_offset); 17177f39add3SSagi Grimberg } 17187f39add3SSagi Grimberg 17197f39add3SSagi Grimberg static inline bool integrity_req_gap_front_merge(struct request *req, 17207f39add3SSagi Grimberg struct bio *bio) 17217f39add3SSagi Grimberg { 17227f39add3SSagi Grimberg struct bio_integrity_payload *bip = bio_integrity(bio); 17237f39add3SSagi Grimberg struct bio_integrity_payload *bip_next = bio_integrity(req->bio); 17247f39add3SSagi Grimberg 17257f39add3SSagi Grimberg return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], 17267f39add3SSagi Grimberg bip_next->bip_vec[0].bv_offset); 17277f39add3SSagi Grimberg } 17287f39add3SSagi Grimberg 17297ba1ba12SMartin K. Petersen #else /* CONFIG_BLK_DEV_INTEGRITY */ 17307ba1ba12SMartin K. Petersen 1731fd83240aSStephen Rothwell struct bio; 1732fd83240aSStephen Rothwell struct block_device; 1733fd83240aSStephen Rothwell struct gendisk; 1734fd83240aSStephen Rothwell struct blk_integrity; 1735fd83240aSStephen Rothwell 1736fd83240aSStephen Rothwell static inline int blk_integrity_rq(struct request *rq) 1737fd83240aSStephen Rothwell { 1738fd83240aSStephen Rothwell return 0; 1739fd83240aSStephen Rothwell } 1740fd83240aSStephen Rothwell static inline int blk_rq_count_integrity_sg(struct request_queue *q, 1741fd83240aSStephen Rothwell struct bio *b) 1742fd83240aSStephen Rothwell { 1743fd83240aSStephen Rothwell return 0; 1744fd83240aSStephen Rothwell } 1745fd83240aSStephen Rothwell static inline int blk_rq_map_integrity_sg(struct request_queue *q, 1746fd83240aSStephen Rothwell struct bio *b, 1747fd83240aSStephen Rothwell struct scatterlist *s) 1748fd83240aSStephen Rothwell { 1749fd83240aSStephen Rothwell return 0; 1750fd83240aSStephen Rothwell } 1751fd83240aSStephen Rothwell static inline struct blk_integrity *bdev_get_integrity(struct block_device *b) 1752fd83240aSStephen Rothwell { 175361a04e5bSMichele Curti return NULL; 1754fd83240aSStephen Rothwell } 1755fd83240aSStephen Rothwell static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 1756fd83240aSStephen Rothwell { 1757fd83240aSStephen Rothwell return NULL; 1758fd83240aSStephen Rothwell } 1759fd83240aSStephen Rothwell static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b) 1760fd83240aSStephen Rothwell { 1761fd83240aSStephen Rothwell return 0; 1762fd83240aSStephen Rothwell } 176325520d55SMartin K. Petersen static inline void blk_integrity_register(struct gendisk *d, 1764fd83240aSStephen Rothwell struct blk_integrity *b) 1765fd83240aSStephen Rothwell { 1766fd83240aSStephen Rothwell } 1767fd83240aSStephen Rothwell static inline void blk_integrity_unregister(struct gendisk *d) 1768fd83240aSStephen Rothwell { 1769fd83240aSStephen Rothwell } 1770fd83240aSStephen Rothwell static inline void blk_queue_max_integrity_segments(struct request_queue *q, 1771fd83240aSStephen Rothwell unsigned int segs) 1772fd83240aSStephen Rothwell { 1773fd83240aSStephen Rothwell } 1774fd83240aSStephen Rothwell static inline unsigned short queue_max_integrity_segments(struct request_queue *q) 1775fd83240aSStephen Rothwell { 1776fd83240aSStephen Rothwell return 0; 1777fd83240aSStephen Rothwell } 17784eaf99beSMartin K. Petersen static inline bool blk_integrity_merge_rq(struct request_queue *rq, 1779fd83240aSStephen Rothwell struct request *r1, 1780fd83240aSStephen Rothwell struct request *r2) 1781fd83240aSStephen Rothwell { 1782cb1a5ab6SMartin K. Petersen return true; 1783fd83240aSStephen Rothwell } 17844eaf99beSMartin K. Petersen static inline bool blk_integrity_merge_bio(struct request_queue *rq, 1785fd83240aSStephen Rothwell struct request *r, 1786fd83240aSStephen Rothwell struct bio *b) 1787fd83240aSStephen Rothwell { 1788cb1a5ab6SMartin K. Petersen return true; 1789fd83240aSStephen Rothwell } 179025520d55SMartin K. Petersen 17917f39add3SSagi Grimberg static inline bool integrity_req_gap_back_merge(struct request *req, 17927f39add3SSagi Grimberg struct bio *next) 17937f39add3SSagi Grimberg { 17947f39add3SSagi Grimberg return false; 17957f39add3SSagi Grimberg } 17967f39add3SSagi Grimberg static inline bool integrity_req_gap_front_merge(struct request *req, 17977f39add3SSagi Grimberg struct bio *bio) 17987f39add3SSagi Grimberg { 17997f39add3SSagi Grimberg return false; 18007f39add3SSagi Grimberg } 18017ba1ba12SMartin K. Petersen 18027ba1ba12SMartin K. Petersen #endif /* CONFIG_BLK_DEV_INTEGRITY */ 18037ba1ba12SMartin K. Petersen 1804b2e0d162SDan Williams /** 1805b2e0d162SDan Williams * struct blk_dax_ctl - control and output parameters for ->direct_access 1806b2e0d162SDan Williams * @sector: (input) offset relative to a block_device 1807b2e0d162SDan Williams * @addr: (output) kernel virtual address for @sector populated by driver 1808b2e0d162SDan Williams * @pfn: (output) page frame number for @addr populated by driver 1809b2e0d162SDan Williams * @size: (input) number of bytes requested 1810b2e0d162SDan Williams */ 1811b2e0d162SDan Williams struct blk_dax_ctl { 1812b2e0d162SDan Williams sector_t sector; 18137a9eb206SDan Williams void *addr; 1814b2e0d162SDan Williams long size; 181534c0fd54SDan Williams pfn_t pfn; 1816b2e0d162SDan Williams }; 1817b2e0d162SDan Williams 181808f85851SAl Viro struct block_device_operations { 1819d4430d62SAl Viro int (*open) (struct block_device *, fmode_t); 1820db2a144bSAl Viro void (*release) (struct gendisk *, fmode_t); 1821c11f0c0bSJens Axboe int (*rw_page)(struct block_device *, sector_t, struct page *, bool); 1822d4430d62SAl Viro int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1823d4430d62SAl Viro int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 18247a9eb206SDan Williams long (*direct_access)(struct block_device *, sector_t, void **, pfn_t *, 18257a9eb206SDan Williams long); 182677ea887eSTejun Heo unsigned int (*check_events) (struct gendisk *disk, 182777ea887eSTejun Heo unsigned int clearing); 182877ea887eSTejun Heo /* ->media_changed() is DEPRECATED, use ->check_events() instead */ 182908f85851SAl Viro int (*media_changed) (struct gendisk *); 1830c3e33e04STejun Heo void (*unlock_native_capacity) (struct gendisk *); 183108f85851SAl Viro int (*revalidate_disk) (struct gendisk *); 183208f85851SAl Viro int (*getgeo)(struct block_device *, struct hd_geometry *); 1833b3a27d05SNitin Gupta /* this callback is with swap_lock and sometimes page table lock held */ 1834b3a27d05SNitin Gupta void (*swap_slot_free_notify) (struct block_device *, unsigned long); 183508f85851SAl Viro struct module *owner; 1836bbd3e064SChristoph Hellwig const struct pr_ops *pr_ops; 183708f85851SAl Viro }; 183808f85851SAl Viro 1839633a08b8SAl Viro extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, 1840633a08b8SAl Viro unsigned long); 184147a191fdSMatthew Wilcox extern int bdev_read_page(struct block_device *, sector_t, struct page *); 184247a191fdSMatthew Wilcox extern int bdev_write_page(struct block_device *, sector_t, struct page *, 184347a191fdSMatthew Wilcox struct writeback_control *); 1844b2e0d162SDan Williams extern long bdev_direct_access(struct block_device *, struct blk_dax_ctl *); 18452d96afc8SToshi Kani extern int bdev_dax_supported(struct super_block *, int); 1846a8078b1fSToshi Kani extern bool bdev_dax_capable(struct block_device *); 18479361401eSDavid Howells #else /* CONFIG_BLOCK */ 1848ac13a829SFabian Frederick 1849ac13a829SFabian Frederick struct block_device; 1850ac13a829SFabian Frederick 18519361401eSDavid Howells /* 18529361401eSDavid Howells * stubs for when the block layer is configured out 18539361401eSDavid Howells */ 18549361401eSDavid Howells #define buffer_heads_over_limit 0 18559361401eSDavid Howells 18569361401eSDavid Howells static inline long nr_blockdev_pages(void) 18579361401eSDavid Howells { 18589361401eSDavid Howells return 0; 18599361401eSDavid Howells } 18609361401eSDavid Howells 18611f940bdfSJens Axboe struct blk_plug { 18621f940bdfSJens Axboe }; 18631f940bdfSJens Axboe 18641f940bdfSJens Axboe static inline void blk_start_plug(struct blk_plug *plug) 186573c10101SJens Axboe { 186673c10101SJens Axboe } 186773c10101SJens Axboe 18681f940bdfSJens Axboe static inline void blk_finish_plug(struct blk_plug *plug) 186973c10101SJens Axboe { 187073c10101SJens Axboe } 187173c10101SJens Axboe 18721f940bdfSJens Axboe static inline void blk_flush_plug(struct task_struct *task) 187373c10101SJens Axboe { 187473c10101SJens Axboe } 187573c10101SJens Axboe 1876a237c1c5SJens Axboe static inline void blk_schedule_flush_plug(struct task_struct *task) 1877a237c1c5SJens Axboe { 1878a237c1c5SJens Axboe } 1879a237c1c5SJens Axboe 1880a237c1c5SJens Axboe 188173c10101SJens Axboe static inline bool blk_needs_flush_plug(struct task_struct *tsk) 188273c10101SJens Axboe { 188373c10101SJens Axboe return false; 188473c10101SJens Axboe } 188573c10101SJens Axboe 1886ac13a829SFabian Frederick static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, 1887ac13a829SFabian Frederick sector_t *error_sector) 1888ac13a829SFabian Frederick { 1889ac13a829SFabian Frederick return 0; 1890ac13a829SFabian Frederick } 1891ac13a829SFabian Frederick 18929361401eSDavid Howells #endif /* CONFIG_BLOCK */ 18939361401eSDavid Howells 18941da177e4SLinus Torvalds #endif 1895