11da177e4SLinus Torvalds #ifndef _LINUX_BLKDEV_H 21da177e4SLinus Torvalds #define _LINUX_BLKDEV_H 31da177e4SLinus Torvalds 485fd0bc9SRussell King #include <linux/sched.h> 585fd0bc9SRussell King 6f5ff8422SJens Axboe #ifdef CONFIG_BLOCK 7f5ff8422SJens Axboe 81da177e4SLinus Torvalds #include <linux/major.h> 91da177e4SLinus Torvalds #include <linux/genhd.h> 101da177e4SLinus Torvalds #include <linux/list.h> 11320ae51fSJens Axboe #include <linux/llist.h> 121da177e4SLinus Torvalds #include <linux/timer.h> 131da177e4SLinus Torvalds #include <linux/workqueue.h> 141da177e4SLinus Torvalds #include <linux/pagemap.h> 1566114cadSTejun Heo #include <linux/backing-dev-defs.h> 161da177e4SLinus Torvalds #include <linux/wait.h> 171da177e4SLinus Torvalds #include <linux/mempool.h> 181da177e4SLinus Torvalds #include <linux/bio.h> 191da177e4SLinus Torvalds #include <linux/stringify.h> 203e6053d7SHugh Dickins #include <linux/gfp.h> 21d351af01SFUJITA Tomonori #include <linux/bsg.h> 22c7c22e4dSJens Axboe #include <linux/smp.h> 23548bc8e1STejun Heo #include <linux/rcupdate.h> 24add703fdSTejun Heo #include <linux/percpu-refcount.h> 2584be456fSChristoph Hellwig #include <linux/scatterlist.h> 261da177e4SLinus Torvalds 27de477254SPaul Gortmaker struct module; 2821b2f0c8SChristoph Hellwig struct scsi_ioctl_command; 2921b2f0c8SChristoph Hellwig 301da177e4SLinus Torvalds struct request_queue; 311da177e4SLinus Torvalds struct elevator_queue; 322056a782SJens Axboe struct blk_trace; 333d6392cfSJens Axboe struct request; 343d6392cfSJens Axboe struct sg_io_hdr; 35aa387cc8SMike Christie struct bsg_job; 363c798398STejun Heo struct blkcg_gq; 377c94e1c1SMing Lei struct blk_flush_queue; 381da177e4SLinus Torvalds 391da177e4SLinus Torvalds #define BLKDEV_MIN_RQ 4 401da177e4SLinus Torvalds #define BLKDEV_MAX_RQ 128 /* Default maximum */ 411da177e4SLinus Torvalds 428bd435b3STejun Heo /* 438bd435b3STejun Heo * Maximum number of blkcg policies allowed to be registered concurrently. 448bd435b3STejun Heo * Defined here to simplify include dependency. 458bd435b3STejun Heo */ 468bd435b3STejun Heo #define BLKCG_MAX_POLS 2 478bd435b3STejun Heo 481da177e4SLinus Torvalds struct request; 498ffdc655STejun Heo typedef void (rq_end_io_fn)(struct request *, int); 501da177e4SLinus Torvalds 515b788ce3STejun Heo #define BLK_RL_SYNCFULL (1U << 0) 525b788ce3STejun Heo #define BLK_RL_ASYNCFULL (1U << 1) 535b788ce3STejun Heo 541da177e4SLinus Torvalds struct request_list { 555b788ce3STejun Heo struct request_queue *q; /* the queue this rl belongs to */ 56a051661cSTejun Heo #ifdef CONFIG_BLK_CGROUP 57a051661cSTejun Heo struct blkcg_gq *blkg; /* blkg this request pool belongs to */ 58a051661cSTejun Heo #endif 591faa16d2SJens Axboe /* 601faa16d2SJens Axboe * count[], starved[], and wait[] are indexed by 611faa16d2SJens Axboe * BLK_RW_SYNC/BLK_RW_ASYNC 621faa16d2SJens Axboe */ 631da177e4SLinus Torvalds int count[2]; 641da177e4SLinus Torvalds int starved[2]; 651da177e4SLinus Torvalds mempool_t *rq_pool; 661da177e4SLinus Torvalds wait_queue_head_t wait[2]; 675b788ce3STejun Heo unsigned int flags; 681da177e4SLinus Torvalds }; 691da177e4SLinus Torvalds 704aff5e23SJens Axboe /* 714aff5e23SJens Axboe * request command types 724aff5e23SJens Axboe */ 734aff5e23SJens Axboe enum rq_cmd_type_bits { 744aff5e23SJens Axboe REQ_TYPE_FS = 1, /* fs request */ 754aff5e23SJens Axboe REQ_TYPE_BLOCK_PC, /* scsi command */ 76b42171efSChristoph Hellwig REQ_TYPE_DRV_PRIV, /* driver defined types from here */ 774aff5e23SJens Axboe }; 784aff5e23SJens Axboe 791da177e4SLinus Torvalds #define BLK_MAX_CDB 16 801da177e4SLinus Torvalds 811da177e4SLinus Torvalds /* 82af76e555SChristoph Hellwig * Try to put the fields that are referenced together in the same cacheline. 83af76e555SChristoph Hellwig * 84af76e555SChristoph Hellwig * If you modify this structure, make sure to update blk_rq_init() and 85af76e555SChristoph Hellwig * especially blk_mq_rq_ctx_init() to take care of the added fields. 861da177e4SLinus Torvalds */ 871da177e4SLinus Torvalds struct request { 88ff856badSJens Axboe struct list_head queuelist; 89320ae51fSJens Axboe union { 90c7c22e4dSJens Axboe struct call_single_data csd; 918b4922d3SJan Kara unsigned long fifo_time; 92320ae51fSJens Axboe }; 93ff856badSJens Axboe 94165125e1SJens Axboe struct request_queue *q; 95320ae51fSJens Axboe struct blk_mq_ctx *mq_ctx; 96e6a1c874SJens Axboe 975953316dSJens Axboe u64 cmd_flags; 98b42171efSChristoph Hellwig unsigned cmd_type; 99242f9dcbSJens Axboe unsigned long atomic_flags; 1001da177e4SLinus Torvalds 101181fdde3SRichard Kennedy int cpu; 102181fdde3SRichard Kennedy 103a2dec7b3STejun Heo /* the following two fields are internal, NEVER access directly */ 104a2dec7b3STejun Heo unsigned int __data_len; /* total data len */ 105181fdde3SRichard Kennedy sector_t __sector; /* sector cursor */ 1061da177e4SLinus Torvalds 1071da177e4SLinus Torvalds struct bio *bio; 1081da177e4SLinus Torvalds struct bio *biotail; 1091da177e4SLinus Torvalds 110360f92c2SJens Axboe /* 111360f92c2SJens Axboe * The hash is used inside the scheduler, and killed once the 112360f92c2SJens Axboe * request reaches the dispatch list. The ipi_list is only used 113360f92c2SJens Axboe * to queue the request for softirq completion, which is long 114360f92c2SJens Axboe * after the request has been unhashed (and even removed from 115360f92c2SJens Axboe * the dispatch list). 116360f92c2SJens Axboe */ 117360f92c2SJens Axboe union { 1189817064bSJens Axboe struct hlist_node hash; /* merge hash */ 119360f92c2SJens Axboe struct list_head ipi_list; 120360f92c2SJens Axboe }; 121360f92c2SJens Axboe 122e6a1c874SJens Axboe /* 123e6a1c874SJens Axboe * The rb_node is only used inside the io scheduler, requests 124e6a1c874SJens Axboe * are pruned when moved to the dispatch queue. So let the 125c186794dSMike Snitzer * completion_data share space with the rb_node. 126e6a1c874SJens Axboe */ 127e6a1c874SJens Axboe union { 1282e662b65SJens Axboe struct rb_node rb_node; /* sort/lookup */ 129c186794dSMike Snitzer void *completion_data; 130c186794dSMike Snitzer }; 131c186794dSMike Snitzer 132c186794dSMike Snitzer /* 133c186794dSMike Snitzer * Three pointers are available for the IO schedulers, if they need 134c186794dSMike Snitzer * more they have to dynamically allocate it. Flush requests are 135c186794dSMike Snitzer * never put on the IO scheduler. So let the flush fields share 136a612fddfSTejun Heo * space with the elevator data. 137c186794dSMike Snitzer */ 138c186794dSMike Snitzer union { 139a612fddfSTejun Heo struct { 140a612fddfSTejun Heo struct io_cq *icq; 141a612fddfSTejun Heo void *priv[2]; 142a612fddfSTejun Heo } elv; 143a612fddfSTejun Heo 144ae1b1539STejun Heo struct { 145ae1b1539STejun Heo unsigned int seq; 146ae1b1539STejun Heo struct list_head list; 1474853abaaSJeff Moyer rq_end_io_fn *saved_end_io; 148ae1b1539STejun Heo } flush; 149e6a1c874SJens Axboe }; 1509817064bSJens Axboe 1518f34ee75SJens Axboe struct gendisk *rq_disk; 15209e099d4SJerome Marchand struct hd_struct *part; 1531da177e4SLinus Torvalds unsigned long start_time; 1549195291eSDivyesh Shah #ifdef CONFIG_BLK_CGROUP 155a051661cSTejun Heo struct request_list *rl; /* rl this rq is alloced from */ 1569195291eSDivyesh Shah unsigned long long start_time_ns; 1579195291eSDivyesh Shah unsigned long long io_start_time_ns; /* when passed to hardware */ 1589195291eSDivyesh Shah #endif 1591da177e4SLinus Torvalds /* Number of scatter-gather DMA addr+len pairs after 1601da177e4SLinus Torvalds * physical address coalescing is performed. 1611da177e4SLinus Torvalds */ 1621da177e4SLinus Torvalds unsigned short nr_phys_segments; 16313f05c8dSMartin K. Petersen #if defined(CONFIG_BLK_DEV_INTEGRITY) 16413f05c8dSMartin K. Petersen unsigned short nr_integrity_segments; 16513f05c8dSMartin K. Petersen #endif 1661da177e4SLinus Torvalds 1678f34ee75SJens Axboe unsigned short ioprio; 1688f34ee75SJens Axboe 169731ec497STejun Heo void *special; /* opaque pointer available for LLD use */ 1701da177e4SLinus Torvalds 171cdd60262SJens Axboe int tag; 172cdd60262SJens Axboe int errors; 173cdd60262SJens Axboe 1741da177e4SLinus Torvalds /* 1751da177e4SLinus Torvalds * when request is used as a packet command carrier 1761da177e4SLinus Torvalds */ 177d7e3c324SFUJITA Tomonori unsigned char __cmd[BLK_MAX_CDB]; 178d7e3c324SFUJITA Tomonori unsigned char *cmd; 179181fdde3SRichard Kennedy unsigned short cmd_len; 1801da177e4SLinus Torvalds 1817a85f889SFUJITA Tomonori unsigned int extra_len; /* length of alignment and padding */ 1821da177e4SLinus Torvalds unsigned int sense_len; 183c3a4d78cSTejun Heo unsigned int resid_len; /* residual count */ 1841da177e4SLinus Torvalds void *sense; 1851da177e4SLinus Torvalds 186242f9dcbSJens Axboe unsigned long deadline; 187242f9dcbSJens Axboe struct list_head timeout_list; 1881da177e4SLinus Torvalds unsigned int timeout; 18917e01f21SMike Christie int retries; 1901da177e4SLinus Torvalds 1911da177e4SLinus Torvalds /* 192c00895abSJens Axboe * completion callback. 1931da177e4SLinus Torvalds */ 1941da177e4SLinus Torvalds rq_end_io_fn *end_io; 1951da177e4SLinus Torvalds void *end_io_data; 196abae1fdeSFUJITA Tomonori 197abae1fdeSFUJITA Tomonori /* for bidi */ 198abae1fdeSFUJITA Tomonori struct request *next_rq; 1991da177e4SLinus Torvalds }; 2001da177e4SLinus Torvalds 201766ca442SFernando Luis Vázquez Cao static inline unsigned short req_get_ioprio(struct request *req) 202766ca442SFernando Luis Vázquez Cao { 203766ca442SFernando Luis Vázquez Cao return req->ioprio; 204766ca442SFernando Luis Vázquez Cao } 205766ca442SFernando Luis Vázquez Cao 2061da177e4SLinus Torvalds #include <linux/elevator.h> 2071da177e4SLinus Torvalds 208320ae51fSJens Axboe struct blk_queue_ctx; 209320ae51fSJens Axboe 210165125e1SJens Axboe typedef void (request_fn_proc) (struct request_queue *q); 2115a7bbad2SChristoph Hellwig typedef void (make_request_fn) (struct request_queue *q, struct bio *bio); 212165125e1SJens Axboe typedef int (prep_rq_fn) (struct request_queue *, struct request *); 21328018c24SJames Bottomley typedef void (unprep_rq_fn) (struct request_queue *, struct request *); 2141da177e4SLinus Torvalds 2151da177e4SLinus Torvalds struct bio_vec; 216ff856badSJens Axboe typedef void (softirq_done_fn)(struct request *); 2172fb98e84STejun Heo typedef int (dma_drain_needed_fn)(struct request *); 218ef9e3facSKiyoshi Ueda typedef int (lld_busy_fn) (struct request_queue *q); 219aa387cc8SMike Christie typedef int (bsg_job_fn) (struct bsg_job *); 2201da177e4SLinus Torvalds 221242f9dcbSJens Axboe enum blk_eh_timer_return { 222242f9dcbSJens Axboe BLK_EH_NOT_HANDLED, 223242f9dcbSJens Axboe BLK_EH_HANDLED, 224242f9dcbSJens Axboe BLK_EH_RESET_TIMER, 225242f9dcbSJens Axboe }; 226242f9dcbSJens Axboe 227242f9dcbSJens Axboe typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *); 228242f9dcbSJens Axboe 2291da177e4SLinus Torvalds enum blk_queue_state { 2301da177e4SLinus Torvalds Queue_down, 2311da177e4SLinus Torvalds Queue_up, 2321da177e4SLinus Torvalds }; 2331da177e4SLinus Torvalds 2341da177e4SLinus Torvalds struct blk_queue_tag { 2351da177e4SLinus Torvalds struct request **tag_index; /* map of busy tags */ 2361da177e4SLinus Torvalds unsigned long *tag_map; /* bit map of free/busy tags */ 2371da177e4SLinus Torvalds int busy; /* current depth */ 2381da177e4SLinus Torvalds int max_depth; /* what we will send to device */ 239ba025082STejun Heo int real_max_depth; /* what the array can hold */ 2401da177e4SLinus Torvalds atomic_t refcnt; /* map can be shared */ 241ee1b6f7aSShaohua Li int alloc_policy; /* tag allocation policy */ 242ee1b6f7aSShaohua Li int next_tag; /* next tag */ 2431da177e4SLinus Torvalds }; 244ee1b6f7aSShaohua Li #define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */ 245ee1b6f7aSShaohua Li #define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */ 2461da177e4SLinus Torvalds 247abf54393SFUJITA Tomonori #define BLK_SCSI_MAX_CMDS (256) 248abf54393SFUJITA Tomonori #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) 249abf54393SFUJITA Tomonori 250025146e1SMartin K. Petersen struct queue_limits { 251025146e1SMartin K. Petersen unsigned long bounce_pfn; 252025146e1SMartin K. Petersen unsigned long seg_boundary_mask; 25303100aadSKeith Busch unsigned long virt_boundary_mask; 254025146e1SMartin K. Petersen 255025146e1SMartin K. Petersen unsigned int max_hw_sectors; 256762380adSJens Axboe unsigned int chunk_sectors; 257025146e1SMartin K. Petersen unsigned int max_sectors; 258025146e1SMartin K. Petersen unsigned int max_segment_size; 259c72758f3SMartin K. Petersen unsigned int physical_block_size; 260c72758f3SMartin K. Petersen unsigned int alignment_offset; 261c72758f3SMartin K. Petersen unsigned int io_min; 262c72758f3SMartin K. Petersen unsigned int io_opt; 26367efc925SChristoph Hellwig unsigned int max_discard_sectors; 2640034af03SJens Axboe unsigned int max_hw_discard_sectors; 2654363ac7cSMartin K. Petersen unsigned int max_write_same_sectors; 26686b37281SMartin K. Petersen unsigned int discard_granularity; 26786b37281SMartin K. Petersen unsigned int discard_alignment; 268025146e1SMartin K. Petersen 269025146e1SMartin K. Petersen unsigned short logical_block_size; 2708a78362cSMartin K. Petersen unsigned short max_segments; 27113f05c8dSMartin K. Petersen unsigned short max_integrity_segments; 272025146e1SMartin K. Petersen 273c72758f3SMartin K. Petersen unsigned char misaligned; 27486b37281SMartin K. Petersen unsigned char discard_misaligned; 275e692cb66SMartin K. Petersen unsigned char cluster; 276a934a00aSMartin K. Petersen unsigned char discard_zeroes_data; 277c78afc62SKent Overstreet unsigned char raid_partial_stripes_expensive; 278025146e1SMartin K. Petersen }; 279025146e1SMartin K. Petersen 280d7b76301SRichard Kennedy struct request_queue { 2811da177e4SLinus Torvalds /* 2821da177e4SLinus Torvalds * Together with queue_head for cacheline sharing 2831da177e4SLinus Torvalds */ 2841da177e4SLinus Torvalds struct list_head queue_head; 2851da177e4SLinus Torvalds struct request *last_merge; 286b374d18aSJens Axboe struct elevator_queue *elevator; 2878a5ecdd4STejun Heo int nr_rqs[2]; /* # allocated [a]sync rqs */ 2888a5ecdd4STejun Heo int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */ 2891da177e4SLinus Torvalds 2901da177e4SLinus Torvalds /* 291a051661cSTejun Heo * If blkcg is not used, @q->root_rl serves all requests. If blkcg 292a051661cSTejun Heo * is used, root blkg allocates from @q->root_rl and all other 293a051661cSTejun Heo * blkgs from their own blkg->rl. Which one to use should be 294a051661cSTejun Heo * determined using bio_request_list(). 2951da177e4SLinus Torvalds */ 296a051661cSTejun Heo struct request_list root_rl; 2971da177e4SLinus Torvalds 2981da177e4SLinus Torvalds request_fn_proc *request_fn; 2991da177e4SLinus Torvalds make_request_fn *make_request_fn; 3001da177e4SLinus Torvalds prep_rq_fn *prep_rq_fn; 30128018c24SJames Bottomley unprep_rq_fn *unprep_rq_fn; 302ff856badSJens Axboe softirq_done_fn *softirq_done_fn; 303242f9dcbSJens Axboe rq_timed_out_fn *rq_timed_out_fn; 3042fb98e84STejun Heo dma_drain_needed_fn *dma_drain_needed; 305ef9e3facSKiyoshi Ueda lld_busy_fn *lld_busy_fn; 3061da177e4SLinus Torvalds 307320ae51fSJens Axboe struct blk_mq_ops *mq_ops; 308320ae51fSJens Axboe 309320ae51fSJens Axboe unsigned int *mq_map; 310320ae51fSJens Axboe 311320ae51fSJens Axboe /* sw queues */ 312e6cdb092SMing Lei struct blk_mq_ctx __percpu *queue_ctx; 313320ae51fSJens Axboe unsigned int nr_queues; 314320ae51fSJens Axboe 315320ae51fSJens Axboe /* hw dispatch queues */ 316320ae51fSJens Axboe struct blk_mq_hw_ctx **queue_hw_ctx; 317320ae51fSJens Axboe unsigned int nr_hw_queues; 318320ae51fSJens Axboe 3191da177e4SLinus Torvalds /* 3208922e16cSTejun Heo * Dispatch queue sorting 3218922e16cSTejun Heo */ 3221b47f531SJens Axboe sector_t end_sector; 3238922e16cSTejun Heo struct request *boundary_rq; 3248922e16cSTejun Heo 3258922e16cSTejun Heo /* 3263cca6dc1SJens Axboe * Delayed queue handling 3271da177e4SLinus Torvalds */ 3283cca6dc1SJens Axboe struct delayed_work delay_work; 3291da177e4SLinus Torvalds 3301da177e4SLinus Torvalds struct backing_dev_info backing_dev_info; 3311da177e4SLinus Torvalds 3321da177e4SLinus Torvalds /* 3331da177e4SLinus Torvalds * The queue owner gets to use this for whatever they like. 3341da177e4SLinus Torvalds * ll_rw_blk doesn't touch it. 3351da177e4SLinus Torvalds */ 3361da177e4SLinus Torvalds void *queuedata; 3371da177e4SLinus Torvalds 3381da177e4SLinus Torvalds /* 3391da177e4SLinus Torvalds * various queue flags, see QUEUE_* below 3401da177e4SLinus Torvalds */ 3411da177e4SLinus Torvalds unsigned long queue_flags; 3421da177e4SLinus Torvalds 3431da177e4SLinus Torvalds /* 344a73f730dSTejun Heo * ida allocated id for this queue. Used to index queues from 345a73f730dSTejun Heo * ioctx. 346a73f730dSTejun Heo */ 347a73f730dSTejun Heo int id; 348a73f730dSTejun Heo 349a73f730dSTejun Heo /* 350d7b76301SRichard Kennedy * queue needs bounce pages for pages above this limit 351d7b76301SRichard Kennedy */ 352d7b76301SRichard Kennedy gfp_t bounce_gfp; 353d7b76301SRichard Kennedy 354d7b76301SRichard Kennedy /* 355152587deS * protects queue structures from reentrancy. ->__queue_lock should 356152587deS * _never_ be used directly, it is queue private. always use 357152587deS * ->queue_lock. 3581da177e4SLinus Torvalds */ 359152587deS spinlock_t __queue_lock; 3601da177e4SLinus Torvalds spinlock_t *queue_lock; 3611da177e4SLinus Torvalds 3621da177e4SLinus Torvalds /* 3631da177e4SLinus Torvalds * queue kobject 3641da177e4SLinus Torvalds */ 3651da177e4SLinus Torvalds struct kobject kobj; 3661da177e4SLinus Torvalds 367320ae51fSJens Axboe /* 368320ae51fSJens Axboe * mq queue kobject 369320ae51fSJens Axboe */ 370320ae51fSJens Axboe struct kobject mq_kobj; 371320ae51fSJens Axboe 37247fafbc7SRafael J. Wysocki #ifdef CONFIG_PM 3736c954667SLin Ming struct device *dev; 3746c954667SLin Ming int rpm_status; 3756c954667SLin Ming unsigned int nr_pending; 3766c954667SLin Ming #endif 3776c954667SLin Ming 3781da177e4SLinus Torvalds /* 3791da177e4SLinus Torvalds * queue settings 3801da177e4SLinus Torvalds */ 3811da177e4SLinus Torvalds unsigned long nr_requests; /* Max # of requests */ 3821da177e4SLinus Torvalds unsigned int nr_congestion_on; 3831da177e4SLinus Torvalds unsigned int nr_congestion_off; 3841da177e4SLinus Torvalds unsigned int nr_batching; 3851da177e4SLinus Torvalds 386fa0ccd83SJames Bottomley unsigned int dma_drain_size; 387d7b76301SRichard Kennedy void *dma_drain_buffer; 388e3790c7dSTejun Heo unsigned int dma_pad_mask; 3891da177e4SLinus Torvalds unsigned int dma_alignment; 3901da177e4SLinus Torvalds 3911da177e4SLinus Torvalds struct blk_queue_tag *queue_tags; 3926eca9004SJens Axboe struct list_head tag_busy_list; 3931da177e4SLinus Torvalds 39415853af9STejun Heo unsigned int nr_sorted; 3950a7ae2ffSJens Axboe unsigned int in_flight[2]; 39624faf6f6SBart Van Assche /* 39724faf6f6SBart Van Assche * Number of active block driver functions for which blk_drain_queue() 39824faf6f6SBart Van Assche * must wait. Must be incremented around functions that unlock the 39924faf6f6SBart Van Assche * queue_lock internally, e.g. scsi_request_fn(). 40024faf6f6SBart Van Assche */ 40124faf6f6SBart Van Assche unsigned int request_fn_active; 4021da177e4SLinus Torvalds 403242f9dcbSJens Axboe unsigned int rq_timeout; 404242f9dcbSJens Axboe struct timer_list timeout; 405242f9dcbSJens Axboe struct list_head timeout_list; 406242f9dcbSJens Axboe 407a612fddfSTejun Heo struct list_head icq_list; 4084eef3049STejun Heo #ifdef CONFIG_BLK_CGROUP 409a2b1693bSTejun Heo DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS); 4103c798398STejun Heo struct blkcg_gq *root_blkg; 41103aa264aSTejun Heo struct list_head blkg_list; 4124eef3049STejun Heo #endif 413a612fddfSTejun Heo 414025146e1SMartin K. Petersen struct queue_limits limits; 415025146e1SMartin K. Petersen 4161da177e4SLinus Torvalds /* 4171da177e4SLinus Torvalds * sg stuff 4181da177e4SLinus Torvalds */ 4191da177e4SLinus Torvalds unsigned int sg_timeout; 4201da177e4SLinus Torvalds unsigned int sg_reserved_size; 4211946089aSChristoph Lameter int node; 4226c5c9341SAlexey Dobriyan #ifdef CONFIG_BLK_DEV_IO_TRACE 4232056a782SJens Axboe struct blk_trace *blk_trace; 4246c5c9341SAlexey Dobriyan #endif 4251da177e4SLinus Torvalds /* 4264913efe4STejun Heo * for flush operations 4271da177e4SLinus Torvalds */ 4284913efe4STejun Heo unsigned int flush_flags; 429f3876930S[email protected] unsigned int flush_not_queueable:1; 4307c94e1c1SMing Lei struct blk_flush_queue *fq; 431483f4afcSAl Viro 4326fca6a61SChristoph Hellwig struct list_head requeue_list; 4336fca6a61SChristoph Hellwig spinlock_t requeue_lock; 4346fca6a61SChristoph Hellwig struct work_struct requeue_work; 4356fca6a61SChristoph Hellwig 436483f4afcSAl Viro struct mutex sysfs_lock; 437d351af01SFUJITA Tomonori 438d732580bSTejun Heo int bypass_depth; 4394ecd4fefSChristoph Hellwig atomic_t mq_freeze_depth; 440d732580bSTejun Heo 441d351af01SFUJITA Tomonori #if defined(CONFIG_BLK_DEV_BSG) 442aa387cc8SMike Christie bsg_job_fn *bsg_job_fn; 443aa387cc8SMike Christie int bsg_job_size; 444d351af01SFUJITA Tomonori struct bsg_class_device bsg_dev; 445d351af01SFUJITA Tomonori #endif 446e43473b7SVivek Goyal 447e43473b7SVivek Goyal #ifdef CONFIG_BLK_DEV_THROTTLING 448e43473b7SVivek Goyal /* Throttle data */ 449e43473b7SVivek Goyal struct throtl_data *td; 450e43473b7SVivek Goyal #endif 451548bc8e1STejun Heo struct rcu_head rcu_head; 452320ae51fSJens Axboe wait_queue_head_t mq_freeze_wq; 453*3ef28e83SDan Williams struct percpu_ref q_usage_counter; 454320ae51fSJens Axboe struct list_head all_q_node; 4550d2602caSJens Axboe 4560d2602caSJens Axboe struct blk_mq_tag_set *tag_set; 4570d2602caSJens Axboe struct list_head tag_set_list; 45854efd50bSKent Overstreet struct bio_set *bio_split; 4594593fdbeSAkinobu Mita 4604593fdbeSAkinobu Mita bool mq_sysfs_init_done; 4611da177e4SLinus Torvalds }; 4621da177e4SLinus Torvalds 4631da177e4SLinus Torvalds #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 4641da177e4SLinus Torvalds #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 4651faa16d2SJens Axboe #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ 4661faa16d2SJens Axboe #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ 4673f3299d5SBart Van Assche #define QUEUE_FLAG_DYING 5 /* queue being torn down */ 468d732580bSTejun Heo #define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */ 469c21e6bebSJens Axboe #define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ 470c21e6bebSJens Axboe #define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ 4715757a6d7SDan Williams #define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */ 472c21e6bebSJens Axboe #define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */ 473c21e6bebSJens Axboe #define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */ 474c21e6bebSJens Axboe #define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */ 47588e740f1SFernando Luis Vázquez Cao #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 476c21e6bebSJens Axboe #define QUEUE_FLAG_IO_STAT 13 /* do IO stats */ 477c21e6bebSJens Axboe #define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */ 478c21e6bebSJens Axboe #define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */ 479c21e6bebSJens Axboe #define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ 480c21e6bebSJens Axboe #define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ 4815757a6d7SDan Williams #define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ 482c246e80dSBart Van Assche #define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ 483320ae51fSJens Axboe #define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ 48405f1dd53SJens Axboe #define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/ 485bc58ba94SJens Axboe 486bc58ba94SJens Axboe #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 48701e97f6bSJens Axboe (1 << QUEUE_FLAG_STACKABLE) | \ 488e2e1a148SJens Axboe (1 << QUEUE_FLAG_SAME_COMP) | \ 489e2e1a148SJens Axboe (1 << QUEUE_FLAG_ADD_RANDOM)) 490797e7dbbSTejun Heo 49194eddfbeSJens Axboe #define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 492ad9cf3bbSMike Snitzer (1 << QUEUE_FLAG_STACKABLE) | \ 49394eddfbeSJens Axboe (1 << QUEUE_FLAG_SAME_COMP)) 49494eddfbeSJens Axboe 4958bcb6c7dSAndi Kleen static inline void queue_lockdep_assert_held(struct request_queue *q) 4968f45c1a5SLinus Torvalds { 4978bcb6c7dSAndi Kleen if (q->queue_lock) 4988bcb6c7dSAndi Kleen lockdep_assert_held(q->queue_lock); 4998f45c1a5SLinus Torvalds } 5008f45c1a5SLinus Torvalds 50175ad23bcSNick Piggin static inline void queue_flag_set_unlocked(unsigned int flag, 50275ad23bcSNick Piggin struct request_queue *q) 50375ad23bcSNick Piggin { 50475ad23bcSNick Piggin __set_bit(flag, &q->queue_flags); 50575ad23bcSNick Piggin } 50675ad23bcSNick Piggin 507e48ec690SJens Axboe static inline int queue_flag_test_and_clear(unsigned int flag, 508e48ec690SJens Axboe struct request_queue *q) 509e48ec690SJens Axboe { 5108bcb6c7dSAndi Kleen queue_lockdep_assert_held(q); 511e48ec690SJens Axboe 512e48ec690SJens Axboe if (test_bit(flag, &q->queue_flags)) { 513e48ec690SJens Axboe __clear_bit(flag, &q->queue_flags); 514e48ec690SJens Axboe return 1; 515e48ec690SJens Axboe } 516e48ec690SJens Axboe 517e48ec690SJens Axboe return 0; 518e48ec690SJens Axboe } 519e48ec690SJens Axboe 520e48ec690SJens Axboe static inline int queue_flag_test_and_set(unsigned int flag, 521e48ec690SJens Axboe struct request_queue *q) 522e48ec690SJens Axboe { 5238bcb6c7dSAndi Kleen queue_lockdep_assert_held(q); 524e48ec690SJens Axboe 525e48ec690SJens Axboe if (!test_bit(flag, &q->queue_flags)) { 526e48ec690SJens Axboe __set_bit(flag, &q->queue_flags); 527e48ec690SJens Axboe return 0; 528e48ec690SJens Axboe } 529e48ec690SJens Axboe 530e48ec690SJens Axboe return 1; 531e48ec690SJens Axboe } 532e48ec690SJens Axboe 53375ad23bcSNick Piggin static inline void queue_flag_set(unsigned int flag, struct request_queue *q) 53475ad23bcSNick Piggin { 5358bcb6c7dSAndi Kleen queue_lockdep_assert_held(q); 53675ad23bcSNick Piggin __set_bit(flag, &q->queue_flags); 53775ad23bcSNick Piggin } 53875ad23bcSNick Piggin 53975ad23bcSNick Piggin static inline void queue_flag_clear_unlocked(unsigned int flag, 54075ad23bcSNick Piggin struct request_queue *q) 54175ad23bcSNick Piggin { 54275ad23bcSNick Piggin __clear_bit(flag, &q->queue_flags); 54375ad23bcSNick Piggin } 54475ad23bcSNick Piggin 5450a7ae2ffSJens Axboe static inline int queue_in_flight(struct request_queue *q) 5460a7ae2ffSJens Axboe { 5470a7ae2ffSJens Axboe return q->in_flight[0] + q->in_flight[1]; 5480a7ae2ffSJens Axboe } 5490a7ae2ffSJens Axboe 55075ad23bcSNick Piggin static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) 55175ad23bcSNick Piggin { 5528bcb6c7dSAndi Kleen queue_lockdep_assert_held(q); 55375ad23bcSNick Piggin __clear_bit(flag, &q->queue_flags); 55475ad23bcSNick Piggin } 55575ad23bcSNick Piggin 5561da177e4SLinus Torvalds #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 5571da177e4SLinus Torvalds #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 5583f3299d5SBart Van Assche #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) 559c246e80dSBart Van Assche #define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) 560d732580bSTejun Heo #define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags) 561320ae51fSJens Axboe #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) 562ac9fafa1SAlan D. Brunelle #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 563488991e2SAlan D. Brunelle #define blk_queue_noxmerges(q) \ 564488991e2SAlan D. Brunelle test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 565a68bbddbSJens Axboe #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 566bc58ba94SJens Axboe #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) 567e2e1a148SJens Axboe #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) 5684ee5eaf4SKiyoshi Ueda #define blk_queue_stackable(q) \ 5694ee5eaf4SKiyoshi Ueda test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) 570c15227deSChristoph Hellwig #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) 5718d57a98cSAdrian Hunter #define blk_queue_secdiscard(q) (blk_queue_discard(q) && \ 5728d57a98cSAdrian Hunter test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags)) 5731da177e4SLinus Torvalds 57433659ebbSChristoph Hellwig #define blk_noretry_request(rq) \ 57533659ebbSChristoph Hellwig ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ 57633659ebbSChristoph Hellwig REQ_FAILFAST_DRIVER)) 5774aff5e23SJens Axboe 57833659ebbSChristoph Hellwig #define blk_account_rq(rq) \ 57933659ebbSChristoph Hellwig (((rq)->cmd_flags & REQ_STARTED) && \ 580e2a60da7SMartin K. Petersen ((rq)->cmd_type == REQ_TYPE_FS)) 5811da177e4SLinus Torvalds 582ab780f1eSJens Axboe #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) 583abae1fdeSFUJITA Tomonori #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 584336cdb40SKiyoshi Ueda /* rq->queuelist of dequeued request must be list_empty() */ 585336cdb40SKiyoshi Ueda #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) 5861da177e4SLinus Torvalds 5871da177e4SLinus Torvalds #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 5881da177e4SLinus Torvalds 58910fbd36eSLinus Torvalds #define rq_data_dir(rq) ((int)((rq)->cmd_flags & 1)) 5901da177e4SLinus Torvalds 59149fd524fSJens Axboe /* 59249fd524fSJens Axboe * Driver can handle struct request, if it either has an old style 59349fd524fSJens Axboe * request_fn defined, or is blk-mq based. 59449fd524fSJens Axboe */ 59549fd524fSJens Axboe static inline bool queue_is_rq_based(struct request_queue *q) 59649fd524fSJens Axboe { 59749fd524fSJens Axboe return q->request_fn || q->mq_ops; 59849fd524fSJens Axboe } 59949fd524fSJens Axboe 600e692cb66SMartin K. Petersen static inline unsigned int blk_queue_cluster(struct request_queue *q) 601e692cb66SMartin K. Petersen { 602e692cb66SMartin K. Petersen return q->limits.cluster; 603e692cb66SMartin K. Petersen } 604e692cb66SMartin K. Petersen 6059e2585a8SJens Axboe /* 6061faa16d2SJens Axboe * We regard a request as sync, if either a read or a sync write 6079e2585a8SJens Axboe */ 6081faa16d2SJens Axboe static inline bool rw_is_sync(unsigned int rw_flags) 6091faa16d2SJens Axboe { 6107b6d91daSChristoph Hellwig return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC); 6111faa16d2SJens Axboe } 6121faa16d2SJens Axboe 6131faa16d2SJens Axboe static inline bool rq_is_sync(struct request *rq) 6141faa16d2SJens Axboe { 6151faa16d2SJens Axboe return rw_is_sync(rq->cmd_flags); 6161faa16d2SJens Axboe } 6171faa16d2SJens Axboe 6185b788ce3STejun Heo static inline bool blk_rl_full(struct request_list *rl, bool sync) 6191da177e4SLinus Torvalds { 6205b788ce3STejun Heo unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 6215b788ce3STejun Heo 6225b788ce3STejun Heo return rl->flags & flag; 6231da177e4SLinus Torvalds } 6241da177e4SLinus Torvalds 6255b788ce3STejun Heo static inline void blk_set_rl_full(struct request_list *rl, bool sync) 6261da177e4SLinus Torvalds { 6275b788ce3STejun Heo unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 6285b788ce3STejun Heo 6295b788ce3STejun Heo rl->flags |= flag; 6301da177e4SLinus Torvalds } 6311da177e4SLinus Torvalds 6325b788ce3STejun Heo static inline void blk_clear_rl_full(struct request_list *rl, bool sync) 6331da177e4SLinus Torvalds { 6345b788ce3STejun Heo unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 6355b788ce3STejun Heo 6365b788ce3STejun Heo rl->flags &= ~flag; 6371da177e4SLinus Torvalds } 6381da177e4SLinus Torvalds 639e2a60da7SMartin K. Petersen static inline bool rq_mergeable(struct request *rq) 640e2a60da7SMartin K. Petersen { 641e2a60da7SMartin K. Petersen if (rq->cmd_type != REQ_TYPE_FS) 642e2a60da7SMartin K. Petersen return false; 6431da177e4SLinus Torvalds 644e2a60da7SMartin K. Petersen if (rq->cmd_flags & REQ_NOMERGE_FLAGS) 645e2a60da7SMartin K. Petersen return false; 646e2a60da7SMartin K. Petersen 647e2a60da7SMartin K. Petersen return true; 648e2a60da7SMartin K. Petersen } 6491da177e4SLinus Torvalds 650f31dc1cdSMartin K. Petersen static inline bool blk_check_merge_flags(unsigned int flags1, 651f31dc1cdSMartin K. Petersen unsigned int flags2) 652f31dc1cdSMartin K. Petersen { 653f31dc1cdSMartin K. Petersen if ((flags1 & REQ_DISCARD) != (flags2 & REQ_DISCARD)) 654f31dc1cdSMartin K. Petersen return false; 655f31dc1cdSMartin K. Petersen 656f31dc1cdSMartin K. Petersen if ((flags1 & REQ_SECURE) != (flags2 & REQ_SECURE)) 657f31dc1cdSMartin K. Petersen return false; 658f31dc1cdSMartin K. Petersen 6594363ac7cSMartin K. Petersen if ((flags1 & REQ_WRITE_SAME) != (flags2 & REQ_WRITE_SAME)) 6604363ac7cSMartin K. Petersen return false; 6614363ac7cSMartin K. Petersen 662f31dc1cdSMartin K. Petersen return true; 663f31dc1cdSMartin K. Petersen } 664f31dc1cdSMartin K. Petersen 6654363ac7cSMartin K. Petersen static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) 6664363ac7cSMartin K. Petersen { 6674363ac7cSMartin K. Petersen if (bio_data(a) == bio_data(b)) 6684363ac7cSMartin K. Petersen return true; 6694363ac7cSMartin K. Petersen 6704363ac7cSMartin K. Petersen return false; 6714363ac7cSMartin K. Petersen } 6724363ac7cSMartin K. Petersen 6731da177e4SLinus Torvalds /* 6741da177e4SLinus Torvalds * q->prep_rq_fn return values 6751da177e4SLinus Torvalds */ 6761da177e4SLinus Torvalds #define BLKPREP_OK 0 /* serve it */ 6771da177e4SLinus Torvalds #define BLKPREP_KILL 1 /* fatal error, kill */ 6781da177e4SLinus Torvalds #define BLKPREP_DEFER 2 /* leave on queue */ 6791da177e4SLinus Torvalds 6801da177e4SLinus Torvalds extern unsigned long blk_max_low_pfn, blk_max_pfn; 6811da177e4SLinus Torvalds 6821da177e4SLinus Torvalds /* 6831da177e4SLinus Torvalds * standard bounce addresses: 6841da177e4SLinus Torvalds * 6851da177e4SLinus Torvalds * BLK_BOUNCE_HIGH : bounce all highmem pages 6861da177e4SLinus Torvalds * BLK_BOUNCE_ANY : don't bounce anything 6871da177e4SLinus Torvalds * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary 6881da177e4SLinus Torvalds */ 6892472892aSAndi Kleen 6902472892aSAndi Kleen #if BITS_PER_LONG == 32 6911da177e4SLinus Torvalds #define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT) 6922472892aSAndi Kleen #else 6932472892aSAndi Kleen #define BLK_BOUNCE_HIGH -1ULL 6942472892aSAndi Kleen #endif 6952472892aSAndi Kleen #define BLK_BOUNCE_ANY (-1ULL) 696bfe17231SFUJITA Tomonori #define BLK_BOUNCE_ISA (DMA_BIT_MASK(24)) 6971da177e4SLinus Torvalds 6983d6392cfSJens Axboe /* 6993d6392cfSJens Axboe * default timeout for SG_IO if none specified 7003d6392cfSJens Axboe */ 7013d6392cfSJens Axboe #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) 702f2f1fa78SLinus Torvalds #define BLK_MIN_SG_TIMEOUT (7 * HZ) 7033d6392cfSJens Axboe 7042a7326b5SChristoph Lameter #ifdef CONFIG_BOUNCE 7051da177e4SLinus Torvalds extern int init_emergency_isa_pool(void); 706165125e1SJens Axboe extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); 7071da177e4SLinus Torvalds #else 7081da177e4SLinus Torvalds static inline int init_emergency_isa_pool(void) 7091da177e4SLinus Torvalds { 7101da177e4SLinus Torvalds return 0; 7111da177e4SLinus Torvalds } 712165125e1SJens Axboe static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) 7131da177e4SLinus Torvalds { 7141da177e4SLinus Torvalds } 7151da177e4SLinus Torvalds #endif /* CONFIG_MMU */ 7161da177e4SLinus Torvalds 717152e283fSFUJITA Tomonori struct rq_map_data { 718152e283fSFUJITA Tomonori struct page **pages; 719152e283fSFUJITA Tomonori int page_order; 720152e283fSFUJITA Tomonori int nr_entries; 72156c451f4SFUJITA Tomonori unsigned long offset; 72297ae77a1SFUJITA Tomonori int null_mapped; 723ecb554a8SFUJITA Tomonori int from_user; 724152e283fSFUJITA Tomonori }; 725152e283fSFUJITA Tomonori 7265705f702SNeilBrown struct req_iterator { 7277988613bSKent Overstreet struct bvec_iter iter; 7285705f702SNeilBrown struct bio *bio; 7295705f702SNeilBrown }; 7305705f702SNeilBrown 7315705f702SNeilBrown /* This should not be used directly - use rq_for_each_segment */ 7321e428079SJens Axboe #define for_each_bio(_bio) \ 7331e428079SJens Axboe for (; _bio; _bio = _bio->bi_next) 7345705f702SNeilBrown #define __rq_for_each_bio(_bio, rq) \ 7351da177e4SLinus Torvalds if ((rq->bio)) \ 7361da177e4SLinus Torvalds for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) 7371da177e4SLinus Torvalds 7385705f702SNeilBrown #define rq_for_each_segment(bvl, _rq, _iter) \ 7395705f702SNeilBrown __rq_for_each_bio(_iter.bio, _rq) \ 7407988613bSKent Overstreet bio_for_each_segment(bvl, _iter.bio, _iter.iter) 7415705f702SNeilBrown 7424550dd6cSKent Overstreet #define rq_iter_last(bvec, _iter) \ 7437988613bSKent Overstreet (_iter.bio->bi_next == NULL && \ 7444550dd6cSKent Overstreet bio_iter_last(bvec, _iter.iter)) 7455705f702SNeilBrown 7462d4dc890SIlya Loginov #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 7472d4dc890SIlya Loginov # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" 7482d4dc890SIlya Loginov #endif 7492d4dc890SIlya Loginov #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 7502d4dc890SIlya Loginov extern void rq_flush_dcache_pages(struct request *rq); 7512d4dc890SIlya Loginov #else 7522d4dc890SIlya Loginov static inline void rq_flush_dcache_pages(struct request *rq) 7532d4dc890SIlya Loginov { 7542d4dc890SIlya Loginov } 7552d4dc890SIlya Loginov #endif 7562d4dc890SIlya Loginov 7571da177e4SLinus Torvalds extern int blk_register_queue(struct gendisk *disk); 7581da177e4SLinus Torvalds extern void blk_unregister_queue(struct gendisk *disk); 7591da177e4SLinus Torvalds extern void generic_make_request(struct bio *bio); 7602a4aa30cSFUJITA Tomonori extern void blk_rq_init(struct request_queue *q, struct request *rq); 7611da177e4SLinus Torvalds extern void blk_put_request(struct request *); 762165125e1SJens Axboe extern void __blk_put_request(struct request_queue *, struct request *); 763165125e1SJens Axboe extern struct request *blk_get_request(struct request_queue *, int, gfp_t); 76479eb63e9SBoaz Harrosh extern struct request *blk_make_request(struct request_queue *, struct bio *, 76579eb63e9SBoaz Harrosh gfp_t); 766f27b087bSJens Axboe extern void blk_rq_set_block_pc(struct request *); 767165125e1SJens Axboe extern void blk_requeue_request(struct request_queue *, struct request *); 76866ac0280SChristoph Hellwig extern void blk_add_request_payload(struct request *rq, struct page *page, 76966ac0280SChristoph Hellwig unsigned int len); 77082124d60SKiyoshi Ueda extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); 771ef9e3facSKiyoshi Ueda extern int blk_lld_busy(struct request_queue *q); 77278d8e58aSMike Snitzer extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 77378d8e58aSMike Snitzer struct bio_set *bs, gfp_t gfp_mask, 77478d8e58aSMike Snitzer int (*bio_ctr)(struct bio *, struct bio *, void *), 77578d8e58aSMike Snitzer void *data); 77678d8e58aSMike Snitzer extern void blk_rq_unprep_clone(struct request *rq); 77782124d60SKiyoshi Ueda extern int blk_insert_cloned_request(struct request_queue *q, 77882124d60SKiyoshi Ueda struct request *rq); 7793cca6dc1SJens Axboe extern void blk_delay_queue(struct request_queue *, unsigned long); 78054efd50bSKent Overstreet extern void blk_queue_split(struct request_queue *, struct bio **, 78154efd50bSKent Overstreet struct bio_set *); 782165125e1SJens Axboe extern void blk_recount_segments(struct request_queue *, struct bio *); 7830bfc96cbSPaolo Bonzini extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); 784577ebb37SPaolo Bonzini extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t, 785577ebb37SPaolo Bonzini unsigned int, void __user *); 78674f3c8afSAl Viro extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, 78774f3c8afSAl Viro unsigned int, void __user *); 788e915e872SAl Viro extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, 789e915e872SAl Viro struct scsi_ioctl_command __user *); 7903fcfab16SAndrew Morton 791165125e1SJens Axboe extern void blk_start_queue(struct request_queue *q); 792165125e1SJens Axboe extern void blk_stop_queue(struct request_queue *q); 7931da177e4SLinus Torvalds extern void blk_sync_queue(struct request_queue *q); 794165125e1SJens Axboe extern void __blk_stop_queue(struct request_queue *q); 79524ecfbe2SChristoph Hellwig extern void __blk_run_queue(struct request_queue *q); 796a7928c15SChristoph Hellwig extern void __blk_run_queue_uncond(struct request_queue *q); 797165125e1SJens Axboe extern void blk_run_queue(struct request_queue *); 798c21e6bebSJens Axboe extern void blk_run_queue_async(struct request_queue *q); 799a3bce90eSFUJITA Tomonori extern int blk_rq_map_user(struct request_queue *, struct request *, 800152e283fSFUJITA Tomonori struct rq_map_data *, void __user *, unsigned long, 801152e283fSFUJITA Tomonori gfp_t); 8028e5cfc45SJens Axboe extern int blk_rq_unmap_user(struct bio *); 803165125e1SJens Axboe extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); 804165125e1SJens Axboe extern int blk_rq_map_user_iov(struct request_queue *, struct request *, 80526e49cfcSKent Overstreet struct rq_map_data *, const struct iov_iter *, 80626e49cfcSKent Overstreet gfp_t); 807165125e1SJens Axboe extern int blk_execute_rq(struct request_queue *, struct gendisk *, 808994ca9a1SJames Bottomley struct request *, int); 809165125e1SJens Axboe extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 81015fc858aSJens Axboe struct request *, int, rq_end_io_fn *); 8116e39b69eSMike Christie 812165125e1SJens Axboe static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 8131da177e4SLinus Torvalds { 814ff9ea323STejun Heo return bdev->bd_disk->queue; /* this is never NULL */ 8151da177e4SLinus Torvalds } 8161da177e4SLinus Torvalds 8171da177e4SLinus Torvalds /* 8185b93629bSTejun Heo * blk_rq_pos() : the current sector 8195b93629bSTejun Heo * blk_rq_bytes() : bytes left in the entire request 8205b93629bSTejun Heo * blk_rq_cur_bytes() : bytes left in the current segment 82180a761fdSTejun Heo * blk_rq_err_bytes() : bytes left till the next error boundary 8225b93629bSTejun Heo * blk_rq_sectors() : sectors left in the entire request 8235b93629bSTejun Heo * blk_rq_cur_sectors() : sectors left in the current segment 8245efccd17STejun Heo */ 8255b93629bSTejun Heo static inline sector_t blk_rq_pos(const struct request *rq) 8265b93629bSTejun Heo { 827a2dec7b3STejun Heo return rq->__sector; 8285b93629bSTejun Heo } 8295b93629bSTejun Heo 8302e46e8b2STejun Heo static inline unsigned int blk_rq_bytes(const struct request *rq) 8312e46e8b2STejun Heo { 832a2dec7b3STejun Heo return rq->__data_len; 8332e46e8b2STejun Heo } 8342e46e8b2STejun Heo 8352e46e8b2STejun Heo static inline int blk_rq_cur_bytes(const struct request *rq) 8362e46e8b2STejun Heo { 8372e46e8b2STejun Heo return rq->bio ? bio_cur_bytes(rq->bio) : 0; 8382e46e8b2STejun Heo } 8395efccd17STejun Heo 84080a761fdSTejun Heo extern unsigned int blk_rq_err_bytes(const struct request *rq); 84180a761fdSTejun Heo 8425b93629bSTejun Heo static inline unsigned int blk_rq_sectors(const struct request *rq) 8435b93629bSTejun Heo { 8442e46e8b2STejun Heo return blk_rq_bytes(rq) >> 9; 8455b93629bSTejun Heo } 8465b93629bSTejun Heo 8475b93629bSTejun Heo static inline unsigned int blk_rq_cur_sectors(const struct request *rq) 8485b93629bSTejun Heo { 8492e46e8b2STejun Heo return blk_rq_cur_bytes(rq) >> 9; 8505b93629bSTejun Heo } 8515b93629bSTejun Heo 852f31dc1cdSMartin K. Petersen static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, 853f31dc1cdSMartin K. Petersen unsigned int cmd_flags) 854f31dc1cdSMartin K. Petersen { 855f31dc1cdSMartin K. Petersen if (unlikely(cmd_flags & REQ_DISCARD)) 856871dd928SJames Bottomley return min(q->limits.max_discard_sectors, UINT_MAX >> 9); 857f31dc1cdSMartin K. Petersen 8584363ac7cSMartin K. Petersen if (unlikely(cmd_flags & REQ_WRITE_SAME)) 8594363ac7cSMartin K. Petersen return q->limits.max_write_same_sectors; 8604363ac7cSMartin K. Petersen 861f31dc1cdSMartin K. Petersen return q->limits.max_sectors; 862f31dc1cdSMartin K. Petersen } 863f31dc1cdSMartin K. Petersen 864762380adSJens Axboe /* 865762380adSJens Axboe * Return maximum size of a request at given offset. Only valid for 866762380adSJens Axboe * file system requests. 867762380adSJens Axboe */ 868762380adSJens Axboe static inline unsigned int blk_max_size_offset(struct request_queue *q, 869762380adSJens Axboe sector_t offset) 870762380adSJens Axboe { 871762380adSJens Axboe if (!q->limits.chunk_sectors) 872736ed4deSJens Axboe return q->limits.max_sectors; 873762380adSJens Axboe 874762380adSJens Axboe return q->limits.chunk_sectors - 875762380adSJens Axboe (offset & (q->limits.chunk_sectors - 1)); 876762380adSJens Axboe } 877762380adSJens Axboe 878f31dc1cdSMartin K. Petersen static inline unsigned int blk_rq_get_max_sectors(struct request *rq) 879f31dc1cdSMartin K. Petersen { 880f31dc1cdSMartin K. Petersen struct request_queue *q = rq->q; 881f31dc1cdSMartin K. Petersen 882f31dc1cdSMartin K. Petersen if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC)) 883f31dc1cdSMartin K. Petersen return q->limits.max_hw_sectors; 884f31dc1cdSMartin K. Petersen 885e548ca4eSJens Axboe if (!q->limits.chunk_sectors || (rq->cmd_flags & REQ_DISCARD)) 886f31dc1cdSMartin K. Petersen return blk_queue_get_max_sectors(q, rq->cmd_flags); 887762380adSJens Axboe 888762380adSJens Axboe return min(blk_max_size_offset(q, blk_rq_pos(rq)), 889762380adSJens Axboe blk_queue_get_max_sectors(q, rq->cmd_flags)); 890f31dc1cdSMartin K. Petersen } 891f31dc1cdSMartin K. Petersen 89275afb352SJun'ichi Nomura static inline unsigned int blk_rq_count_bios(struct request *rq) 89375afb352SJun'ichi Nomura { 89475afb352SJun'ichi Nomura unsigned int nr_bios = 0; 89575afb352SJun'ichi Nomura struct bio *bio; 89675afb352SJun'ichi Nomura 89775afb352SJun'ichi Nomura __rq_for_each_bio(bio, rq) 89875afb352SJun'ichi Nomura nr_bios++; 89975afb352SJun'ichi Nomura 90075afb352SJun'ichi Nomura return nr_bios; 90175afb352SJun'ichi Nomura } 90275afb352SJun'ichi Nomura 9035efccd17STejun Heo /* 9049934c8c0STejun Heo * Request issue related functions. 9059934c8c0STejun Heo */ 9069934c8c0STejun Heo extern struct request *blk_peek_request(struct request_queue *q); 9079934c8c0STejun Heo extern void blk_start_request(struct request *rq); 9089934c8c0STejun Heo extern struct request *blk_fetch_request(struct request_queue *q); 9099934c8c0STejun Heo 9109934c8c0STejun Heo /* 9112e60e022STejun Heo * Request completion related functions. 9122e60e022STejun Heo * 9132e60e022STejun Heo * blk_update_request() completes given number of bytes and updates 9142e60e022STejun Heo * the request without completing it. 9152e60e022STejun Heo * 916f06d9a2bSTejun Heo * blk_end_request() and friends. __blk_end_request() must be called 917f06d9a2bSTejun Heo * with the request queue spinlock acquired. 9181da177e4SLinus Torvalds * 9191da177e4SLinus Torvalds * Several drivers define their own end_request and call 9203bcddeacSKiyoshi Ueda * blk_end_request() for parts of the original function. 9213bcddeacSKiyoshi Ueda * This prevents code duplication in drivers. 9221da177e4SLinus Torvalds */ 9232e60e022STejun Heo extern bool blk_update_request(struct request *rq, int error, 92422b13210SJens Axboe unsigned int nr_bytes); 92512120077SChristoph Hellwig extern void blk_finish_request(struct request *rq, int error); 926b1f74493SFUJITA Tomonori extern bool blk_end_request(struct request *rq, int error, 927b1f74493SFUJITA Tomonori unsigned int nr_bytes); 928b1f74493SFUJITA Tomonori extern void blk_end_request_all(struct request *rq, int error); 929b1f74493SFUJITA Tomonori extern bool blk_end_request_cur(struct request *rq, int error); 93080a761fdSTejun Heo extern bool blk_end_request_err(struct request *rq, int error); 931b1f74493SFUJITA Tomonori extern bool __blk_end_request(struct request *rq, int error, 932b1f74493SFUJITA Tomonori unsigned int nr_bytes); 933b1f74493SFUJITA Tomonori extern void __blk_end_request_all(struct request *rq, int error); 934b1f74493SFUJITA Tomonori extern bool __blk_end_request_cur(struct request *rq, int error); 93580a761fdSTejun Heo extern bool __blk_end_request_err(struct request *rq, int error); 9362e60e022STejun Heo 937ff856badSJens Axboe extern void blk_complete_request(struct request *); 938242f9dcbSJens Axboe extern void __blk_complete_request(struct request *); 939242f9dcbSJens Axboe extern void blk_abort_request(struct request *); 94028018c24SJames Bottomley extern void blk_unprep_request(struct request *); 941ff856badSJens Axboe 9421da177e4SLinus Torvalds /* 9431da177e4SLinus Torvalds * Access functions for manipulating queue properties 9441da177e4SLinus Torvalds */ 945165125e1SJens Axboe extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, 9461946089aSChristoph Lameter spinlock_t *lock, int node_id); 947165125e1SJens Axboe extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); 94801effb0dSMike Snitzer extern struct request_queue *blk_init_allocated_queue(struct request_queue *, 94901effb0dSMike Snitzer request_fn_proc *, spinlock_t *); 950165125e1SJens Axboe extern void blk_cleanup_queue(struct request_queue *); 951165125e1SJens Axboe extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 952165125e1SJens Axboe extern void blk_queue_bounce_limit(struct request_queue *, u64); 95372d4cd9fSMike Snitzer extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int); 954086fa5ffSMartin K. Petersen extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 955762380adSJens Axboe extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); 9568a78362cSMartin K. Petersen extern void blk_queue_max_segments(struct request_queue *, unsigned short); 957165125e1SJens Axboe extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 95867efc925SChristoph Hellwig extern void blk_queue_max_discard_sectors(struct request_queue *q, 95967efc925SChristoph Hellwig unsigned int max_discard_sectors); 9604363ac7cSMartin K. Petersen extern void blk_queue_max_write_same_sectors(struct request_queue *q, 9614363ac7cSMartin K. Petersen unsigned int max_write_same_sectors); 962e1defc4fSMartin K. Petersen extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); 963892b6f90SMartin K. Petersen extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); 964c72758f3SMartin K. Petersen extern void blk_queue_alignment_offset(struct request_queue *q, 965c72758f3SMartin K. Petersen unsigned int alignment); 9667c958e32SMartin K. Petersen extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); 967c72758f3SMartin K. Petersen extern void blk_queue_io_min(struct request_queue *q, unsigned int min); 9683c5820c7SMartin K. Petersen extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); 969c72758f3SMartin K. Petersen extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); 970e475bba2SMartin K. Petersen extern void blk_set_default_limits(struct queue_limits *lim); 971b1bd055dSMartin K. Petersen extern void blk_set_stacking_limits(struct queue_limits *lim); 972c72758f3SMartin K. Petersen extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 973c72758f3SMartin K. Petersen sector_t offset); 97417be8c24SMartin K. Petersen extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, 97517be8c24SMartin K. Petersen sector_t offset); 976c72758f3SMartin K. Petersen extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, 977c72758f3SMartin K. Petersen sector_t offset); 978165125e1SJens Axboe extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 979e3790c7dSTejun Heo extern void blk_queue_dma_pad(struct request_queue *, unsigned int); 98027f8221aSFUJITA Tomonori extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); 9812fb98e84STejun Heo extern int blk_queue_dma_drain(struct request_queue *q, 9822fb98e84STejun Heo dma_drain_needed_fn *dma_drain_needed, 9832fb98e84STejun Heo void *buf, unsigned int size); 984ef9e3facSKiyoshi Ueda extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); 985165125e1SJens Axboe extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 98603100aadSKeith Busch extern void blk_queue_virt_boundary(struct request_queue *, unsigned long); 987165125e1SJens Axboe extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 98828018c24SJames Bottomley extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn); 989165125e1SJens Axboe extern void blk_queue_dma_alignment(struct request_queue *, int); 99011c3e689SJames Bottomley extern void blk_queue_update_dma_alignment(struct request_queue *, int); 991165125e1SJens Axboe extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 992242f9dcbSJens Axboe extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); 993242f9dcbSJens Axboe extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 9944913efe4STejun Heo extern void blk_queue_flush(struct request_queue *q, unsigned int flush); 995f3876930S[email protected] extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable); 9961da177e4SLinus Torvalds extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 9971da177e4SLinus Torvalds 998165125e1SJens Axboe extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 9991da177e4SLinus Torvalds extern void blk_dump_rq_flags(struct request *, char *); 10001da177e4SLinus Torvalds extern long nr_blockdev_pages(void); 10011da177e4SLinus Torvalds 100209ac46c4STejun Heo bool __must_check blk_get_queue(struct request_queue *); 1003165125e1SJens Axboe struct request_queue *blk_alloc_queue(gfp_t); 1004165125e1SJens Axboe struct request_queue *blk_alloc_queue_node(gfp_t, int); 1005165125e1SJens Axboe extern void blk_put_queue(struct request_queue *); 10063f21c265SJens Axboe extern void blk_set_queue_dying(struct request_queue *); 10071da177e4SLinus Torvalds 1008316cc67dSShaohua Li /* 10096c954667SLin Ming * block layer runtime pm functions 10106c954667SLin Ming */ 101147fafbc7SRafael J. Wysocki #ifdef CONFIG_PM 10126c954667SLin Ming extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev); 10136c954667SLin Ming extern int blk_pre_runtime_suspend(struct request_queue *q); 10146c954667SLin Ming extern void blk_post_runtime_suspend(struct request_queue *q, int err); 10156c954667SLin Ming extern void blk_pre_runtime_resume(struct request_queue *q); 10166c954667SLin Ming extern void blk_post_runtime_resume(struct request_queue *q, int err); 10176c954667SLin Ming #else 10186c954667SLin Ming static inline void blk_pm_runtime_init(struct request_queue *q, 10196c954667SLin Ming struct device *dev) {} 10206c954667SLin Ming static inline int blk_pre_runtime_suspend(struct request_queue *q) 10216c954667SLin Ming { 10226c954667SLin Ming return -ENOSYS; 10236c954667SLin Ming } 10246c954667SLin Ming static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {} 10256c954667SLin Ming static inline void blk_pre_runtime_resume(struct request_queue *q) {} 10266c954667SLin Ming static inline void blk_post_runtime_resume(struct request_queue *q, int err) {} 10276c954667SLin Ming #endif 10286c954667SLin Ming 10296c954667SLin Ming /* 103075df7136SSuresh Jayaraman * blk_plug permits building a queue of related requests by holding the I/O 103175df7136SSuresh Jayaraman * fragments for a short period. This allows merging of sequential requests 103275df7136SSuresh Jayaraman * into single larger request. As the requests are moved from a per-task list to 103375df7136SSuresh Jayaraman * the device's request_queue in a batch, this results in improved scalability 103475df7136SSuresh Jayaraman * as the lock contention for request_queue lock is reduced. 103575df7136SSuresh Jayaraman * 103675df7136SSuresh Jayaraman * It is ok not to disable preemption when adding the request to the plug list 103775df7136SSuresh Jayaraman * or when attempting a merge, because blk_schedule_flush_list() will only flush 103875df7136SSuresh Jayaraman * the plug list when the task sleeps by itself. For details, please see 103975df7136SSuresh Jayaraman * schedule() where blk_schedule_flush_plug() is called. 1040316cc67dSShaohua Li */ 104173c10101SJens Axboe struct blk_plug { 104275df7136SSuresh Jayaraman struct list_head list; /* requests */ 1043320ae51fSJens Axboe struct list_head mq_list; /* blk-mq requests */ 104475df7136SSuresh Jayaraman struct list_head cb_list; /* md requires an unplug callback */ 104573c10101SJens Axboe }; 104655c022bbSShaohua Li #define BLK_MAX_REQUEST_COUNT 16 104755c022bbSShaohua Li 10489cbb1750SNeilBrown struct blk_plug_cb; 104974018dc3SNeilBrown typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); 1050048c9374SNeilBrown struct blk_plug_cb { 1051048c9374SNeilBrown struct list_head list; 10529cbb1750SNeilBrown blk_plug_cb_fn callback; 10539cbb1750SNeilBrown void *data; 1054048c9374SNeilBrown }; 10559cbb1750SNeilBrown extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, 10569cbb1750SNeilBrown void *data, int size); 105773c10101SJens Axboe extern void blk_start_plug(struct blk_plug *); 105873c10101SJens Axboe extern void blk_finish_plug(struct blk_plug *); 1059f6603783SJens Axboe extern void blk_flush_plug_list(struct blk_plug *, bool); 106073c10101SJens Axboe 106173c10101SJens Axboe static inline void blk_flush_plug(struct task_struct *tsk) 106273c10101SJens Axboe { 106373c10101SJens Axboe struct blk_plug *plug = tsk->plug; 106473c10101SJens Axboe 106588b996cdSChristoph Hellwig if (plug) 1066a237c1c5SJens Axboe blk_flush_plug_list(plug, false); 1067a237c1c5SJens Axboe } 1068a237c1c5SJens Axboe 1069a237c1c5SJens Axboe static inline void blk_schedule_flush_plug(struct task_struct *tsk) 1070a237c1c5SJens Axboe { 1071a237c1c5SJens Axboe struct blk_plug *plug = tsk->plug; 1072a237c1c5SJens Axboe 1073a237c1c5SJens Axboe if (plug) 1074f6603783SJens Axboe blk_flush_plug_list(plug, true); 107573c10101SJens Axboe } 107673c10101SJens Axboe 107773c10101SJens Axboe static inline bool blk_needs_flush_plug(struct task_struct *tsk) 107873c10101SJens Axboe { 107973c10101SJens Axboe struct blk_plug *plug = tsk->plug; 108073c10101SJens Axboe 1081320ae51fSJens Axboe return plug && 1082320ae51fSJens Axboe (!list_empty(&plug->list) || 1083320ae51fSJens Axboe !list_empty(&plug->mq_list) || 1084320ae51fSJens Axboe !list_empty(&plug->cb_list)); 108573c10101SJens Axboe } 108673c10101SJens Axboe 10871da177e4SLinus Torvalds /* 10881da177e4SLinus Torvalds * tag stuff 10891da177e4SLinus Torvalds */ 1090165125e1SJens Axboe extern int blk_queue_start_tag(struct request_queue *, struct request *); 1091165125e1SJens Axboe extern struct request *blk_queue_find_tag(struct request_queue *, int); 1092165125e1SJens Axboe extern void blk_queue_end_tag(struct request_queue *, struct request *); 1093ee1b6f7aSShaohua Li extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int); 1094165125e1SJens Axboe extern void blk_queue_free_tags(struct request_queue *); 1095165125e1SJens Axboe extern int blk_queue_resize_tags(struct request_queue *, int); 1096165125e1SJens Axboe extern void blk_queue_invalidate_tags(struct request_queue *); 1097ee1b6f7aSShaohua Li extern struct blk_queue_tag *blk_init_tags(int, int); 1098492dfb48SJames Bottomley extern void blk_free_tags(struct blk_queue_tag *); 10991da177e4SLinus Torvalds 1100f583f492SDavid C Somayajulu static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, 1101f583f492SDavid C Somayajulu int tag) 1102f583f492SDavid C Somayajulu { 1103f583f492SDavid C Somayajulu if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) 1104f583f492SDavid C Somayajulu return NULL; 1105f583f492SDavid C Somayajulu return bqt->tag_index[tag]; 1106f583f492SDavid C Somayajulu } 1107dd3932edSChristoph Hellwig 1108dd3932edSChristoph Hellwig #define BLKDEV_DISCARD_SECURE 0x01 /* secure discard */ 1109dd3932edSChristoph Hellwig 1110dd3932edSChristoph Hellwig extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); 1111fbd9b09aSDmitry Monakhov extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1112fbd9b09aSDmitry Monakhov sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); 11134363ac7cSMartin K. Petersen extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, 11144363ac7cSMartin K. Petersen sector_t nr_sects, gfp_t gfp_mask, struct page *page); 11153f14d792SDmitry Monakhov extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1116d93ba7a5SMartin K. Petersen sector_t nr_sects, gfp_t gfp_mask, bool discard); 11172cf6d26aSChristoph Hellwig static inline int sb_issue_discard(struct super_block *sb, sector_t block, 11182cf6d26aSChristoph Hellwig sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) 1119fb2dce86SDavid Woodhouse { 11202cf6d26aSChristoph Hellwig return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9), 11212cf6d26aSChristoph Hellwig nr_blocks << (sb->s_blocksize_bits - 9), 11222cf6d26aSChristoph Hellwig gfp_mask, flags); 1123fb2dce86SDavid Woodhouse } 1124e6fa0be6SLukas Czerner static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, 1125a107e5a3STheodore Ts'o sector_t nr_blocks, gfp_t gfp_mask) 1126e6fa0be6SLukas Czerner { 1127e6fa0be6SLukas Czerner return blkdev_issue_zeroout(sb->s_bdev, 1128e6fa0be6SLukas Czerner block << (sb->s_blocksize_bits - 9), 1129e6fa0be6SLukas Czerner nr_blocks << (sb->s_blocksize_bits - 9), 1130d93ba7a5SMartin K. Petersen gfp_mask, true); 1131e6fa0be6SLukas Czerner } 11321da177e4SLinus Torvalds 1133018e0446SJens Axboe extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); 11340b07de85SAdel Gadllah 1135eb28d31bSMartin K. Petersen enum blk_default_limits { 1136eb28d31bSMartin K. Petersen BLK_MAX_SEGMENTS = 128, 1137eb28d31bSMartin K. Petersen BLK_SAFE_MAX_SECTORS = 255, 1138d2be537cSJeff Moyer BLK_DEF_MAX_SECTORS = 2560, 1139eb28d31bSMartin K. Petersen BLK_MAX_SEGMENT_SIZE = 65536, 1140eb28d31bSMartin K. Petersen BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, 1141eb28d31bSMartin K. Petersen }; 11420e435ac2SMilan Broz 11431da177e4SLinus Torvalds #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 11441da177e4SLinus Torvalds 1145ae03bf63SMartin K. Petersen static inline unsigned long queue_bounce_pfn(struct request_queue *q) 1146ae03bf63SMartin K. Petersen { 1147025146e1SMartin K. Petersen return q->limits.bounce_pfn; 1148ae03bf63SMartin K. Petersen } 1149ae03bf63SMartin K. Petersen 1150ae03bf63SMartin K. Petersen static inline unsigned long queue_segment_boundary(struct request_queue *q) 1151ae03bf63SMartin K. Petersen { 1152025146e1SMartin K. Petersen return q->limits.seg_boundary_mask; 1153ae03bf63SMartin K. Petersen } 1154ae03bf63SMartin K. Petersen 115503100aadSKeith Busch static inline unsigned long queue_virt_boundary(struct request_queue *q) 115603100aadSKeith Busch { 115703100aadSKeith Busch return q->limits.virt_boundary_mask; 115803100aadSKeith Busch } 115903100aadSKeith Busch 1160ae03bf63SMartin K. Petersen static inline unsigned int queue_max_sectors(struct request_queue *q) 1161ae03bf63SMartin K. Petersen { 1162025146e1SMartin K. Petersen return q->limits.max_sectors; 1163ae03bf63SMartin K. Petersen } 1164ae03bf63SMartin K. Petersen 1165ae03bf63SMartin K. Petersen static inline unsigned int queue_max_hw_sectors(struct request_queue *q) 1166ae03bf63SMartin K. Petersen { 1167025146e1SMartin K. Petersen return q->limits.max_hw_sectors; 1168ae03bf63SMartin K. Petersen } 1169ae03bf63SMartin K. Petersen 11708a78362cSMartin K. Petersen static inline unsigned short queue_max_segments(struct request_queue *q) 1171ae03bf63SMartin K. Petersen { 11728a78362cSMartin K. Petersen return q->limits.max_segments; 1173ae03bf63SMartin K. Petersen } 1174ae03bf63SMartin K. Petersen 1175ae03bf63SMartin K. Petersen static inline unsigned int queue_max_segment_size(struct request_queue *q) 1176ae03bf63SMartin K. Petersen { 1177025146e1SMartin K. Petersen return q->limits.max_segment_size; 1178ae03bf63SMartin K. Petersen } 1179ae03bf63SMartin K. Petersen 1180e1defc4fSMartin K. Petersen static inline unsigned short queue_logical_block_size(struct request_queue *q) 11811da177e4SLinus Torvalds { 11821da177e4SLinus Torvalds int retval = 512; 11831da177e4SLinus Torvalds 1184025146e1SMartin K. Petersen if (q && q->limits.logical_block_size) 1185025146e1SMartin K. Petersen retval = q->limits.logical_block_size; 11861da177e4SLinus Torvalds 11871da177e4SLinus Torvalds return retval; 11881da177e4SLinus Torvalds } 11891da177e4SLinus Torvalds 1190e1defc4fSMartin K. Petersen static inline unsigned short bdev_logical_block_size(struct block_device *bdev) 11911da177e4SLinus Torvalds { 1192e1defc4fSMartin K. Petersen return queue_logical_block_size(bdev_get_queue(bdev)); 11931da177e4SLinus Torvalds } 11941da177e4SLinus Torvalds 1195c72758f3SMartin K. Petersen static inline unsigned int queue_physical_block_size(struct request_queue *q) 1196c72758f3SMartin K. Petersen { 1197c72758f3SMartin K. Petersen return q->limits.physical_block_size; 1198c72758f3SMartin K. Petersen } 1199c72758f3SMartin K. Petersen 1200892b6f90SMartin K. Petersen static inline unsigned int bdev_physical_block_size(struct block_device *bdev) 1201ac481c20SMartin K. Petersen { 1202ac481c20SMartin K. Petersen return queue_physical_block_size(bdev_get_queue(bdev)); 1203ac481c20SMartin K. Petersen } 1204ac481c20SMartin K. Petersen 1205c72758f3SMartin K. Petersen static inline unsigned int queue_io_min(struct request_queue *q) 1206c72758f3SMartin K. Petersen { 1207c72758f3SMartin K. Petersen return q->limits.io_min; 1208c72758f3SMartin K. Petersen } 1209c72758f3SMartin K. Petersen 1210ac481c20SMartin K. Petersen static inline int bdev_io_min(struct block_device *bdev) 1211ac481c20SMartin K. Petersen { 1212ac481c20SMartin K. Petersen return queue_io_min(bdev_get_queue(bdev)); 1213ac481c20SMartin K. Petersen } 1214ac481c20SMartin K. Petersen 1215c72758f3SMartin K. Petersen static inline unsigned int queue_io_opt(struct request_queue *q) 1216c72758f3SMartin K. Petersen { 1217c72758f3SMartin K. Petersen return q->limits.io_opt; 1218c72758f3SMartin K. Petersen } 1219c72758f3SMartin K. Petersen 1220ac481c20SMartin K. Petersen static inline int bdev_io_opt(struct block_device *bdev) 1221ac481c20SMartin K. Petersen { 1222ac481c20SMartin K. Petersen return queue_io_opt(bdev_get_queue(bdev)); 1223ac481c20SMartin K. Petersen } 1224ac481c20SMartin K. Petersen 1225c72758f3SMartin K. Petersen static inline int queue_alignment_offset(struct request_queue *q) 1226c72758f3SMartin K. Petersen { 1227ac481c20SMartin K. Petersen if (q->limits.misaligned) 1228c72758f3SMartin K. Petersen return -1; 1229c72758f3SMartin K. Petersen 1230c72758f3SMartin K. Petersen return q->limits.alignment_offset; 1231c72758f3SMartin K. Petersen } 1232c72758f3SMartin K. Petersen 1233e03a72e1SMartin K. Petersen static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) 123481744ee4SMartin K. Petersen { 123581744ee4SMartin K. Petersen unsigned int granularity = max(lim->physical_block_size, lim->io_min); 1236b8839b8cSMike Snitzer unsigned int alignment = sector_div(sector, granularity >> 9) << 9; 123781744ee4SMartin K. Petersen 1238b8839b8cSMike Snitzer return (granularity + lim->alignment_offset - alignment) % granularity; 1239c72758f3SMartin K. Petersen } 1240c72758f3SMartin K. Petersen 1241ac481c20SMartin K. Petersen static inline int bdev_alignment_offset(struct block_device *bdev) 1242ac481c20SMartin K. Petersen { 1243ac481c20SMartin K. Petersen struct request_queue *q = bdev_get_queue(bdev); 1244ac481c20SMartin K. Petersen 1245ac481c20SMartin K. Petersen if (q->limits.misaligned) 1246ac481c20SMartin K. Petersen return -1; 1247ac481c20SMartin K. Petersen 1248ac481c20SMartin K. Petersen if (bdev != bdev->bd_contains) 1249ac481c20SMartin K. Petersen return bdev->bd_part->alignment_offset; 1250ac481c20SMartin K. Petersen 1251ac481c20SMartin K. Petersen return q->limits.alignment_offset; 1252ac481c20SMartin K. Petersen } 1253ac481c20SMartin K. Petersen 125486b37281SMartin K. Petersen static inline int queue_discard_alignment(struct request_queue *q) 125586b37281SMartin K. Petersen { 125686b37281SMartin K. Petersen if (q->limits.discard_misaligned) 125786b37281SMartin K. Petersen return -1; 125886b37281SMartin K. Petersen 125986b37281SMartin K. Petersen return q->limits.discard_alignment; 126086b37281SMartin K. Petersen } 126186b37281SMartin K. Petersen 1262e03a72e1SMartin K. Petersen static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) 126386b37281SMartin K. Petersen { 126459771079SLinus Torvalds unsigned int alignment, granularity, offset; 1265dd3d145dSMartin K. Petersen 1266a934a00aSMartin K. Petersen if (!lim->max_discard_sectors) 1267a934a00aSMartin K. Petersen return 0; 1268a934a00aSMartin K. Petersen 126959771079SLinus Torvalds /* Why are these in bytes, not sectors? */ 127059771079SLinus Torvalds alignment = lim->discard_alignment >> 9; 127159771079SLinus Torvalds granularity = lim->discard_granularity >> 9; 127259771079SLinus Torvalds if (!granularity) 127359771079SLinus Torvalds return 0; 127459771079SLinus Torvalds 127559771079SLinus Torvalds /* Offset of the partition start in 'granularity' sectors */ 127659771079SLinus Torvalds offset = sector_div(sector, granularity); 127759771079SLinus Torvalds 127859771079SLinus Torvalds /* And why do we do this modulus *again* in blkdev_issue_discard()? */ 127959771079SLinus Torvalds offset = (granularity + alignment - offset) % granularity; 128059771079SLinus Torvalds 128159771079SLinus Torvalds /* Turn it back into bytes, gaah */ 128259771079SLinus Torvalds return offset << 9; 128386b37281SMartin K. Petersen } 128486b37281SMartin K. Petersen 1285c6e66634SPaolo Bonzini static inline int bdev_discard_alignment(struct block_device *bdev) 1286c6e66634SPaolo Bonzini { 1287c6e66634SPaolo Bonzini struct request_queue *q = bdev_get_queue(bdev); 1288c6e66634SPaolo Bonzini 1289c6e66634SPaolo Bonzini if (bdev != bdev->bd_contains) 1290c6e66634SPaolo Bonzini return bdev->bd_part->discard_alignment; 1291c6e66634SPaolo Bonzini 1292c6e66634SPaolo Bonzini return q->limits.discard_alignment; 1293c6e66634SPaolo Bonzini } 1294c6e66634SPaolo Bonzini 129598262f27SMartin K. Petersen static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) 129698262f27SMartin K. Petersen { 1297a934a00aSMartin K. Petersen if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1) 129898262f27SMartin K. Petersen return 1; 129998262f27SMartin K. Petersen 130098262f27SMartin K. Petersen return 0; 130198262f27SMartin K. Petersen } 130298262f27SMartin K. Petersen 130398262f27SMartin K. Petersen static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev) 130498262f27SMartin K. Petersen { 130598262f27SMartin K. Petersen return queue_discard_zeroes_data(bdev_get_queue(bdev)); 130698262f27SMartin K. Petersen } 130798262f27SMartin K. Petersen 13084363ac7cSMartin K. Petersen static inline unsigned int bdev_write_same(struct block_device *bdev) 13094363ac7cSMartin K. Petersen { 13104363ac7cSMartin K. Petersen struct request_queue *q = bdev_get_queue(bdev); 13114363ac7cSMartin K. Petersen 13124363ac7cSMartin K. Petersen if (q) 13134363ac7cSMartin K. Petersen return q->limits.max_write_same_sectors; 13144363ac7cSMartin K. Petersen 13154363ac7cSMartin K. Petersen return 0; 13164363ac7cSMartin K. Petersen } 13174363ac7cSMartin K. Petersen 1318165125e1SJens Axboe static inline int queue_dma_alignment(struct request_queue *q) 13191da177e4SLinus Torvalds { 1320482eb689SPete Wyckoff return q ? q->dma_alignment : 511; 13211da177e4SLinus Torvalds } 13221da177e4SLinus Torvalds 132314417799SNamhyung Kim static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, 132487904074SFUJITA Tomonori unsigned int len) 132587904074SFUJITA Tomonori { 132687904074SFUJITA Tomonori unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; 132714417799SNamhyung Kim return !(addr & alignment) && !(len & alignment); 132887904074SFUJITA Tomonori } 132987904074SFUJITA Tomonori 13301da177e4SLinus Torvalds /* assumes size > 256 */ 13311da177e4SLinus Torvalds static inline unsigned int blksize_bits(unsigned int size) 13321da177e4SLinus Torvalds { 13331da177e4SLinus Torvalds unsigned int bits = 8; 13341da177e4SLinus Torvalds do { 13351da177e4SLinus Torvalds bits++; 13361da177e4SLinus Torvalds size >>= 1; 13371da177e4SLinus Torvalds } while (size > 256); 13381da177e4SLinus Torvalds return bits; 13391da177e4SLinus Torvalds } 13401da177e4SLinus Torvalds 13412befb9e3SAdrian Bunk static inline unsigned int block_size(struct block_device *bdev) 13421da177e4SLinus Torvalds { 13431da177e4SLinus Torvalds return bdev->bd_block_size; 13441da177e4SLinus Torvalds } 13451da177e4SLinus Torvalds 1346f3876930S[email protected] static inline bool queue_flush_queueable(struct request_queue *q) 1347f3876930S[email protected] { 1348f3876930S[email protected] return !q->flush_not_queueable; 1349f3876930S[email protected] } 1350f3876930S[email protected] 13511da177e4SLinus Torvalds typedef struct {struct page *v;} Sector; 13521da177e4SLinus Torvalds 13531da177e4SLinus Torvalds unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); 13541da177e4SLinus Torvalds 13551da177e4SLinus Torvalds static inline void put_dev_sector(Sector p) 13561da177e4SLinus Torvalds { 13571da177e4SLinus Torvalds page_cache_release(p.v); 13581da177e4SLinus Torvalds } 13591da177e4SLinus Torvalds 136003100aadSKeith Busch /* 136103100aadSKeith Busch * Check if adding a bio_vec after bprv with offset would create a gap in 136203100aadSKeith Busch * the SG list. Most drivers don't care about this, but some do. 136303100aadSKeith Busch */ 136403100aadSKeith Busch static inline bool bvec_gap_to_prev(struct request_queue *q, 136503100aadSKeith Busch struct bio_vec *bprv, unsigned int offset) 136603100aadSKeith Busch { 136703100aadSKeith Busch if (!queue_virt_boundary(q)) 136803100aadSKeith Busch return false; 136903100aadSKeith Busch return offset || 137003100aadSKeith Busch ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); 137103100aadSKeith Busch } 137203100aadSKeith Busch 13735e7c4274SJens Axboe static inline bool bio_will_gap(struct request_queue *q, struct bio *prev, 13745e7c4274SJens Axboe struct bio *next) 13755e7c4274SJens Axboe { 13765e7c4274SJens Axboe if (!bio_has_data(prev)) 13775e7c4274SJens Axboe return false; 13785e7c4274SJens Axboe 13795e7c4274SJens Axboe return bvec_gap_to_prev(q, &prev->bi_io_vec[prev->bi_vcnt - 1], 13805e7c4274SJens Axboe next->bi_io_vec[0].bv_offset); 13815e7c4274SJens Axboe } 13825e7c4274SJens Axboe 13835e7c4274SJens Axboe static inline bool req_gap_back_merge(struct request *req, struct bio *bio) 13845e7c4274SJens Axboe { 13855e7c4274SJens Axboe return bio_will_gap(req->q, req->biotail, bio); 13865e7c4274SJens Axboe } 13875e7c4274SJens Axboe 13885e7c4274SJens Axboe static inline bool req_gap_front_merge(struct request *req, struct bio *bio) 13895e7c4274SJens Axboe { 13905e7c4274SJens Axboe return bio_will_gap(req->q, bio, req->bio); 13915e7c4274SJens Axboe } 13925e7c4274SJens Axboe 13931da177e4SLinus Torvalds struct work_struct; 139459c3d45eSJens Axboe int kblockd_schedule_work(struct work_struct *work); 139559c3d45eSJens Axboe int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay); 13968ab14595SJens Axboe int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); 13971da177e4SLinus Torvalds 13989195291eSDivyesh Shah #ifdef CONFIG_BLK_CGROUP 139928f4197eSJens Axboe /* 140028f4197eSJens Axboe * This should not be using sched_clock(). A real patch is in progress 140128f4197eSJens Axboe * to fix this up, until that is in place we need to disable preemption 140228f4197eSJens Axboe * around sched_clock() in this function and set_io_start_time_ns(). 140328f4197eSJens Axboe */ 14049195291eSDivyesh Shah static inline void set_start_time_ns(struct request *req) 14059195291eSDivyesh Shah { 140628f4197eSJens Axboe preempt_disable(); 14079195291eSDivyesh Shah req->start_time_ns = sched_clock(); 140828f4197eSJens Axboe preempt_enable(); 14099195291eSDivyesh Shah } 14109195291eSDivyesh Shah 14119195291eSDivyesh Shah static inline void set_io_start_time_ns(struct request *req) 14129195291eSDivyesh Shah { 141328f4197eSJens Axboe preempt_disable(); 14149195291eSDivyesh Shah req->io_start_time_ns = sched_clock(); 141528f4197eSJens Axboe preempt_enable(); 14169195291eSDivyesh Shah } 141784c124daSDivyesh Shah 141884c124daSDivyesh Shah static inline uint64_t rq_start_time_ns(struct request *req) 141984c124daSDivyesh Shah { 142084c124daSDivyesh Shah return req->start_time_ns; 142184c124daSDivyesh Shah } 142284c124daSDivyesh Shah 142384c124daSDivyesh Shah static inline uint64_t rq_io_start_time_ns(struct request *req) 142484c124daSDivyesh Shah { 142584c124daSDivyesh Shah return req->io_start_time_ns; 142684c124daSDivyesh Shah } 14279195291eSDivyesh Shah #else 14289195291eSDivyesh Shah static inline void set_start_time_ns(struct request *req) {} 14299195291eSDivyesh Shah static inline void set_io_start_time_ns(struct request *req) {} 143084c124daSDivyesh Shah static inline uint64_t rq_start_time_ns(struct request *req) 143184c124daSDivyesh Shah { 143284c124daSDivyesh Shah return 0; 143384c124daSDivyesh Shah } 143484c124daSDivyesh Shah static inline uint64_t rq_io_start_time_ns(struct request *req) 143584c124daSDivyesh Shah { 143684c124daSDivyesh Shah return 0; 143784c124daSDivyesh Shah } 14389195291eSDivyesh Shah #endif 14399195291eSDivyesh Shah 14401da177e4SLinus Torvalds #define MODULE_ALIAS_BLOCKDEV(major,minor) \ 14411da177e4SLinus Torvalds MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 14421da177e4SLinus Torvalds #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 14431da177e4SLinus Torvalds MODULE_ALIAS("block-major-" __stringify(major) "-*") 14441da177e4SLinus Torvalds 14457ba1ba12SMartin K. Petersen #if defined(CONFIG_BLK_DEV_INTEGRITY) 14467ba1ba12SMartin K. Petersen 14478288f496SMartin K. Petersen enum blk_integrity_flags { 14488288f496SMartin K. Petersen BLK_INTEGRITY_VERIFY = 1 << 0, 14498288f496SMartin K. Petersen BLK_INTEGRITY_GENERATE = 1 << 1, 14503aec2f41SMartin K. Petersen BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2, 1451aae7df50SMartin K. Petersen BLK_INTEGRITY_IP_CHECKSUM = 1 << 3, 14528288f496SMartin K. Petersen }; 14537ba1ba12SMartin K. Petersen 145418593088SMartin K. Petersen struct blk_integrity_iter { 14557ba1ba12SMartin K. Petersen void *prot_buf; 14567ba1ba12SMartin K. Petersen void *data_buf; 14573be91c4aSMartin K. Petersen sector_t seed; 14587ba1ba12SMartin K. Petersen unsigned int data_size; 14593be91c4aSMartin K. Petersen unsigned short interval; 14607ba1ba12SMartin K. Petersen const char *disk_name; 14617ba1ba12SMartin K. Petersen }; 14627ba1ba12SMartin K. Petersen 146318593088SMartin K. Petersen typedef int (integrity_processing_fn) (struct blk_integrity_iter *); 14647ba1ba12SMartin K. Petersen 14650f8087ecSMartin K. Petersen struct blk_integrity_profile { 146618593088SMartin K. Petersen integrity_processing_fn *generate_fn; 146718593088SMartin K. Petersen integrity_processing_fn *verify_fn; 14680f8087ecSMartin K. Petersen const char *name; 14690f8087ecSMartin K. Petersen }; 14707ba1ba12SMartin K. Petersen 147125520d55SMartin K. Petersen extern void blk_integrity_register(struct gendisk *, struct blk_integrity *); 14727ba1ba12SMartin K. Petersen extern void blk_integrity_unregister(struct gendisk *); 1473ad7fce93SMartin K. Petersen extern int blk_integrity_compare(struct gendisk *, struct gendisk *); 147413f05c8dSMartin K. Petersen extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, 147513f05c8dSMartin K. Petersen struct scatterlist *); 147613f05c8dSMartin K. Petersen extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); 14774eaf99beSMartin K. Petersen extern bool blk_integrity_merge_rq(struct request_queue *, struct request *, 147813f05c8dSMartin K. Petersen struct request *); 14794eaf99beSMartin K. Petersen extern bool blk_integrity_merge_bio(struct request_queue *, struct request *, 148013f05c8dSMartin K. Petersen struct bio *); 14817ba1ba12SMartin K. Petersen 148225520d55SMartin K. Petersen static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 148325520d55SMartin K. Petersen { 148425520d55SMartin K. Petersen struct blk_integrity *bi = &disk->integrity; 148525520d55SMartin K. Petersen 148625520d55SMartin K. Petersen if (!bi->profile) 148725520d55SMartin K. Petersen return NULL; 148825520d55SMartin K. Petersen 148925520d55SMartin K. Petersen return bi; 149025520d55SMartin K. Petersen } 149125520d55SMartin K. Petersen 1492b04accc4SJens Axboe static inline 1493b04accc4SJens Axboe struct blk_integrity *bdev_get_integrity(struct block_device *bdev) 1494b04accc4SJens Axboe { 149525520d55SMartin K. Petersen return blk_get_integrity(bdev->bd_disk); 1496b02739b0SMartin K. Petersen } 1497b02739b0SMartin K. Petersen 1498180b2f95SMartin K. Petersen static inline bool blk_integrity_rq(struct request *rq) 14997ba1ba12SMartin K. Petersen { 1500180b2f95SMartin K. Petersen return rq->cmd_flags & REQ_INTEGRITY; 15017ba1ba12SMartin K. Petersen } 15027ba1ba12SMartin K. Petersen 150313f05c8dSMartin K. Petersen static inline void blk_queue_max_integrity_segments(struct request_queue *q, 150413f05c8dSMartin K. Petersen unsigned int segs) 150513f05c8dSMartin K. Petersen { 150613f05c8dSMartin K. Petersen q->limits.max_integrity_segments = segs; 150713f05c8dSMartin K. Petersen } 150813f05c8dSMartin K. Petersen 150913f05c8dSMartin K. Petersen static inline unsigned short 151013f05c8dSMartin K. Petersen queue_max_integrity_segments(struct request_queue *q) 151113f05c8dSMartin K. Petersen { 151213f05c8dSMartin K. Petersen return q->limits.max_integrity_segments; 151313f05c8dSMartin K. Petersen } 151413f05c8dSMartin K. Petersen 15157f39add3SSagi Grimberg static inline bool integrity_req_gap_back_merge(struct request *req, 15167f39add3SSagi Grimberg struct bio *next) 15177f39add3SSagi Grimberg { 15187f39add3SSagi Grimberg struct bio_integrity_payload *bip = bio_integrity(req->bio); 15197f39add3SSagi Grimberg struct bio_integrity_payload *bip_next = bio_integrity(next); 15207f39add3SSagi Grimberg 15217f39add3SSagi Grimberg return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], 15227f39add3SSagi Grimberg bip_next->bip_vec[0].bv_offset); 15237f39add3SSagi Grimberg } 15247f39add3SSagi Grimberg 15257f39add3SSagi Grimberg static inline bool integrity_req_gap_front_merge(struct request *req, 15267f39add3SSagi Grimberg struct bio *bio) 15277f39add3SSagi Grimberg { 15287f39add3SSagi Grimberg struct bio_integrity_payload *bip = bio_integrity(bio); 15297f39add3SSagi Grimberg struct bio_integrity_payload *bip_next = bio_integrity(req->bio); 15307f39add3SSagi Grimberg 15317f39add3SSagi Grimberg return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], 15327f39add3SSagi Grimberg bip_next->bip_vec[0].bv_offset); 15337f39add3SSagi Grimberg } 15347f39add3SSagi Grimberg 15357ba1ba12SMartin K. Petersen #else /* CONFIG_BLK_DEV_INTEGRITY */ 15367ba1ba12SMartin K. Petersen 1537fd83240aSStephen Rothwell struct bio; 1538fd83240aSStephen Rothwell struct block_device; 1539fd83240aSStephen Rothwell struct gendisk; 1540fd83240aSStephen Rothwell struct blk_integrity; 1541fd83240aSStephen Rothwell 1542fd83240aSStephen Rothwell static inline int blk_integrity_rq(struct request *rq) 1543fd83240aSStephen Rothwell { 1544fd83240aSStephen Rothwell return 0; 1545fd83240aSStephen Rothwell } 1546fd83240aSStephen Rothwell static inline int blk_rq_count_integrity_sg(struct request_queue *q, 1547fd83240aSStephen Rothwell struct bio *b) 1548fd83240aSStephen Rothwell { 1549fd83240aSStephen Rothwell return 0; 1550fd83240aSStephen Rothwell } 1551fd83240aSStephen Rothwell static inline int blk_rq_map_integrity_sg(struct request_queue *q, 1552fd83240aSStephen Rothwell struct bio *b, 1553fd83240aSStephen Rothwell struct scatterlist *s) 1554fd83240aSStephen Rothwell { 1555fd83240aSStephen Rothwell return 0; 1556fd83240aSStephen Rothwell } 1557fd83240aSStephen Rothwell static inline struct blk_integrity *bdev_get_integrity(struct block_device *b) 1558fd83240aSStephen Rothwell { 155961a04e5bSMichele Curti return NULL; 1560fd83240aSStephen Rothwell } 1561fd83240aSStephen Rothwell static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 1562fd83240aSStephen Rothwell { 1563fd83240aSStephen Rothwell return NULL; 1564fd83240aSStephen Rothwell } 1565fd83240aSStephen Rothwell static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b) 1566fd83240aSStephen Rothwell { 1567fd83240aSStephen Rothwell return 0; 1568fd83240aSStephen Rothwell } 156925520d55SMartin K. Petersen static inline void blk_integrity_register(struct gendisk *d, 1570fd83240aSStephen Rothwell struct blk_integrity *b) 1571fd83240aSStephen Rothwell { 1572fd83240aSStephen Rothwell } 1573fd83240aSStephen Rothwell static inline void blk_integrity_unregister(struct gendisk *d) 1574fd83240aSStephen Rothwell { 1575fd83240aSStephen Rothwell } 1576fd83240aSStephen Rothwell static inline void blk_queue_max_integrity_segments(struct request_queue *q, 1577fd83240aSStephen Rothwell unsigned int segs) 1578fd83240aSStephen Rothwell { 1579fd83240aSStephen Rothwell } 1580fd83240aSStephen Rothwell static inline unsigned short queue_max_integrity_segments(struct request_queue *q) 1581fd83240aSStephen Rothwell { 1582fd83240aSStephen Rothwell return 0; 1583fd83240aSStephen Rothwell } 15844eaf99beSMartin K. Petersen static inline bool blk_integrity_merge_rq(struct request_queue *rq, 1585fd83240aSStephen Rothwell struct request *r1, 1586fd83240aSStephen Rothwell struct request *r2) 1587fd83240aSStephen Rothwell { 1588cb1a5ab6SMartin K. Petersen return true; 1589fd83240aSStephen Rothwell } 15904eaf99beSMartin K. Petersen static inline bool blk_integrity_merge_bio(struct request_queue *rq, 1591fd83240aSStephen Rothwell struct request *r, 1592fd83240aSStephen Rothwell struct bio *b) 1593fd83240aSStephen Rothwell { 1594cb1a5ab6SMartin K. Petersen return true; 1595fd83240aSStephen Rothwell } 159625520d55SMartin K. Petersen 15977f39add3SSagi Grimberg static inline bool integrity_req_gap_back_merge(struct request *req, 15987f39add3SSagi Grimberg struct bio *next) 15997f39add3SSagi Grimberg { 16007f39add3SSagi Grimberg return false; 16017f39add3SSagi Grimberg } 16027f39add3SSagi Grimberg static inline bool integrity_req_gap_front_merge(struct request *req, 16037f39add3SSagi Grimberg struct bio *bio) 16047f39add3SSagi Grimberg { 16057f39add3SSagi Grimberg return false; 16067f39add3SSagi Grimberg } 16077ba1ba12SMartin K. Petersen 16087ba1ba12SMartin K. Petersen #endif /* CONFIG_BLK_DEV_INTEGRITY */ 16097ba1ba12SMartin K. Petersen 161008f85851SAl Viro struct block_device_operations { 1611d4430d62SAl Viro int (*open) (struct block_device *, fmode_t); 1612db2a144bSAl Viro void (*release) (struct gendisk *, fmode_t); 161347a191fdSMatthew Wilcox int (*rw_page)(struct block_device *, sector_t, struct page *, int rw); 1614d4430d62SAl Viro int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1615d4430d62SAl Viro int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1616e2e05394SRoss Zwisler long (*direct_access)(struct block_device *, sector_t, void __pmem **, 1617cb389b9cSDan Williams unsigned long *pfn); 161877ea887eSTejun Heo unsigned int (*check_events) (struct gendisk *disk, 161977ea887eSTejun Heo unsigned int clearing); 162077ea887eSTejun Heo /* ->media_changed() is DEPRECATED, use ->check_events() instead */ 162108f85851SAl Viro int (*media_changed) (struct gendisk *); 1622c3e33e04STejun Heo void (*unlock_native_capacity) (struct gendisk *); 162308f85851SAl Viro int (*revalidate_disk) (struct gendisk *); 162408f85851SAl Viro int (*getgeo)(struct block_device *, struct hd_geometry *); 1625b3a27d05SNitin Gupta /* this callback is with swap_lock and sometimes page table lock held */ 1626b3a27d05SNitin Gupta void (*swap_slot_free_notify) (struct block_device *, unsigned long); 162708f85851SAl Viro struct module *owner; 162808f85851SAl Viro }; 162908f85851SAl Viro 1630633a08b8SAl Viro extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, 1631633a08b8SAl Viro unsigned long); 163247a191fdSMatthew Wilcox extern int bdev_read_page(struct block_device *, sector_t, struct page *); 163347a191fdSMatthew Wilcox extern int bdev_write_page(struct block_device *, sector_t, struct page *, 163447a191fdSMatthew Wilcox struct writeback_control *); 1635e2e05394SRoss Zwisler extern long bdev_direct_access(struct block_device *, sector_t, 1636e2e05394SRoss Zwisler void __pmem **addr, unsigned long *pfn, long size); 16379361401eSDavid Howells #else /* CONFIG_BLOCK */ 1638ac13a829SFabian Frederick 1639ac13a829SFabian Frederick struct block_device; 1640ac13a829SFabian Frederick 16419361401eSDavid Howells /* 16429361401eSDavid Howells * stubs for when the block layer is configured out 16439361401eSDavid Howells */ 16449361401eSDavid Howells #define buffer_heads_over_limit 0 16459361401eSDavid Howells 16469361401eSDavid Howells static inline long nr_blockdev_pages(void) 16479361401eSDavid Howells { 16489361401eSDavid Howells return 0; 16499361401eSDavid Howells } 16509361401eSDavid Howells 16511f940bdfSJens Axboe struct blk_plug { 16521f940bdfSJens Axboe }; 16531f940bdfSJens Axboe 16541f940bdfSJens Axboe static inline void blk_start_plug(struct blk_plug *plug) 165573c10101SJens Axboe { 165673c10101SJens Axboe } 165773c10101SJens Axboe 16581f940bdfSJens Axboe static inline void blk_finish_plug(struct blk_plug *plug) 165973c10101SJens Axboe { 166073c10101SJens Axboe } 166173c10101SJens Axboe 16621f940bdfSJens Axboe static inline void blk_flush_plug(struct task_struct *task) 166373c10101SJens Axboe { 166473c10101SJens Axboe } 166573c10101SJens Axboe 1666a237c1c5SJens Axboe static inline void blk_schedule_flush_plug(struct task_struct *task) 1667a237c1c5SJens Axboe { 1668a237c1c5SJens Axboe } 1669a237c1c5SJens Axboe 1670a237c1c5SJens Axboe 167173c10101SJens Axboe static inline bool blk_needs_flush_plug(struct task_struct *tsk) 167273c10101SJens Axboe { 167373c10101SJens Axboe return false; 167473c10101SJens Axboe } 167573c10101SJens Axboe 1676ac13a829SFabian Frederick static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, 1677ac13a829SFabian Frederick sector_t *error_sector) 1678ac13a829SFabian Frederick { 1679ac13a829SFabian Frederick return 0; 1680ac13a829SFabian Frederick } 1681ac13a829SFabian Frederick 16829361401eSDavid Howells #endif /* CONFIG_BLOCK */ 16839361401eSDavid Howells 16841da177e4SLinus Torvalds #endif 1685