11da177e4SLinus Torvalds #ifndef _LINUX_BLKDEV_H 21da177e4SLinus Torvalds #define _LINUX_BLKDEV_H 31da177e4SLinus Torvalds 485fd0bc9SRussell King #include <linux/sched.h> 585fd0bc9SRussell King 6f5ff8422SJens Axboe #ifdef CONFIG_BLOCK 7f5ff8422SJens Axboe 81da177e4SLinus Torvalds #include <linux/major.h> 91da177e4SLinus Torvalds #include <linux/genhd.h> 101da177e4SLinus Torvalds #include <linux/list.h> 111da177e4SLinus Torvalds #include <linux/timer.h> 121da177e4SLinus Torvalds #include <linux/workqueue.h> 131da177e4SLinus Torvalds #include <linux/pagemap.h> 141da177e4SLinus Torvalds #include <linux/backing-dev.h> 151da177e4SLinus Torvalds #include <linux/wait.h> 161da177e4SLinus Torvalds #include <linux/mempool.h> 171da177e4SLinus Torvalds #include <linux/bio.h> 181da177e4SLinus Torvalds #include <linux/stringify.h> 193e6053d7SHugh Dickins #include <linux/gfp.h> 20d351af01SFUJITA Tomonori #include <linux/bsg.h> 21c7c22e4dSJens Axboe #include <linux/smp.h> 221da177e4SLinus Torvalds 231da177e4SLinus Torvalds #include <asm/scatterlist.h> 241da177e4SLinus Torvalds 25de477254SPaul Gortmaker struct module; 2621b2f0c8SChristoph Hellwig struct scsi_ioctl_command; 2721b2f0c8SChristoph Hellwig 281da177e4SLinus Torvalds struct request_queue; 291da177e4SLinus Torvalds struct elevator_queue; 301da177e4SLinus Torvalds struct request_pm_state; 312056a782SJens Axboe struct blk_trace; 323d6392cfSJens Axboe struct request; 333d6392cfSJens Axboe struct sg_io_hdr; 34aa387cc8SMike Christie struct bsg_job; 353c798398STejun Heo struct blkcg_gq; 361da177e4SLinus Torvalds 371da177e4SLinus Torvalds #define BLKDEV_MIN_RQ 4 381da177e4SLinus Torvalds #define BLKDEV_MAX_RQ 128 /* Default maximum */ 391da177e4SLinus Torvalds 408bd435b3STejun Heo /* 418bd435b3STejun Heo * Maximum number of blkcg policies allowed to be registered concurrently. 428bd435b3STejun Heo * Defined here to simplify include dependency. 438bd435b3STejun Heo */ 448bd435b3STejun Heo #define BLKCG_MAX_POLS 2 458bd435b3STejun Heo 461da177e4SLinus Torvalds struct request; 478ffdc655STejun Heo typedef void (rq_end_io_fn)(struct request *, int); 481da177e4SLinus Torvalds 495b788ce3STejun Heo #define BLK_RL_SYNCFULL (1U << 0) 505b788ce3STejun Heo #define BLK_RL_ASYNCFULL (1U << 1) 515b788ce3STejun Heo 521da177e4SLinus Torvalds struct request_list { 535b788ce3STejun Heo struct request_queue *q; /* the queue this rl belongs to */ 54a051661cSTejun Heo #ifdef CONFIG_BLK_CGROUP 55a051661cSTejun Heo struct blkcg_gq *blkg; /* blkg this request pool belongs to */ 56a051661cSTejun Heo #endif 571faa16d2SJens Axboe /* 581faa16d2SJens Axboe * count[], starved[], and wait[] are indexed by 591faa16d2SJens Axboe * BLK_RW_SYNC/BLK_RW_ASYNC 601faa16d2SJens Axboe */ 611da177e4SLinus Torvalds int count[2]; 621da177e4SLinus Torvalds int starved[2]; 631da177e4SLinus Torvalds mempool_t *rq_pool; 641da177e4SLinus Torvalds wait_queue_head_t wait[2]; 655b788ce3STejun Heo unsigned int flags; 661da177e4SLinus Torvalds }; 671da177e4SLinus Torvalds 684aff5e23SJens Axboe /* 694aff5e23SJens Axboe * request command types 704aff5e23SJens Axboe */ 714aff5e23SJens Axboe enum rq_cmd_type_bits { 724aff5e23SJens Axboe REQ_TYPE_FS = 1, /* fs request */ 734aff5e23SJens Axboe REQ_TYPE_BLOCK_PC, /* scsi command */ 744aff5e23SJens Axboe REQ_TYPE_SENSE, /* sense request */ 754aff5e23SJens Axboe REQ_TYPE_PM_SUSPEND, /* suspend request */ 764aff5e23SJens Axboe REQ_TYPE_PM_RESUME, /* resume request */ 774aff5e23SJens Axboe REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ 784aff5e23SJens Axboe REQ_TYPE_SPECIAL, /* driver defined type */ 794aff5e23SJens Axboe /* 804aff5e23SJens Axboe * for ATA/ATAPI devices. this really doesn't belong here, ide should 814aff5e23SJens Axboe * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver 824aff5e23SJens Axboe * private REQ_LB opcodes to differentiate what type of request this is 834aff5e23SJens Axboe */ 844aff5e23SJens Axboe REQ_TYPE_ATA_TASKFILE, 85cea2885aSJens Axboe REQ_TYPE_ATA_PC, 864aff5e23SJens Axboe }; 874aff5e23SJens Axboe 881da177e4SLinus Torvalds #define BLK_MAX_CDB 16 891da177e4SLinus Torvalds 901da177e4SLinus Torvalds /* 9163a71386SJens Axboe * try to put the fields that are referenced together in the same cacheline. 924d0d98b6SWanlong Gao * if you modify this structure, be sure to check block/blk-core.c:blk_rq_init() 9363a71386SJens Axboe * as well! 941da177e4SLinus Torvalds */ 951da177e4SLinus Torvalds struct request { 96ff856badSJens Axboe struct list_head queuelist; 97c7c22e4dSJens Axboe struct call_single_data csd; 98ff856badSJens Axboe 99165125e1SJens Axboe struct request_queue *q; 100e6a1c874SJens Axboe 1014aff5e23SJens Axboe unsigned int cmd_flags; 1024aff5e23SJens Axboe enum rq_cmd_type_bits cmd_type; 103242f9dcbSJens Axboe unsigned long atomic_flags; 1041da177e4SLinus Torvalds 105181fdde3SRichard Kennedy int cpu; 106181fdde3SRichard Kennedy 107a2dec7b3STejun Heo /* the following two fields are internal, NEVER access directly */ 108a2dec7b3STejun Heo unsigned int __data_len; /* total data len */ 109181fdde3SRichard Kennedy sector_t __sector; /* sector cursor */ 1101da177e4SLinus Torvalds 1111da177e4SLinus Torvalds struct bio *bio; 1121da177e4SLinus Torvalds struct bio *biotail; 1131da177e4SLinus Torvalds 1149817064bSJens Axboe struct hlist_node hash; /* merge hash */ 115e6a1c874SJens Axboe /* 116e6a1c874SJens Axboe * The rb_node is only used inside the io scheduler, requests 117e6a1c874SJens Axboe * are pruned when moved to the dispatch queue. So let the 118c186794dSMike Snitzer * completion_data share space with the rb_node. 119e6a1c874SJens Axboe */ 120e6a1c874SJens Axboe union { 1212e662b65SJens Axboe struct rb_node rb_node; /* sort/lookup */ 122c186794dSMike Snitzer void *completion_data; 123c186794dSMike Snitzer }; 124c186794dSMike Snitzer 125c186794dSMike Snitzer /* 126c186794dSMike Snitzer * Three pointers are available for the IO schedulers, if they need 127c186794dSMike Snitzer * more they have to dynamically allocate it. Flush requests are 128c186794dSMike Snitzer * never put on the IO scheduler. So let the flush fields share 129a612fddfSTejun Heo * space with the elevator data. 130c186794dSMike Snitzer */ 131c186794dSMike Snitzer union { 132a612fddfSTejun Heo struct { 133a612fddfSTejun Heo struct io_cq *icq; 134a612fddfSTejun Heo void *priv[2]; 135a612fddfSTejun Heo } elv; 136a612fddfSTejun Heo 137ae1b1539STejun Heo struct { 138ae1b1539STejun Heo unsigned int seq; 139ae1b1539STejun Heo struct list_head list; 1404853abaaSJeff Moyer rq_end_io_fn *saved_end_io; 141ae1b1539STejun Heo } flush; 142e6a1c874SJens Axboe }; 1439817064bSJens Axboe 1448f34ee75SJens Axboe struct gendisk *rq_disk; 14509e099d4SJerome Marchand struct hd_struct *part; 1461da177e4SLinus Torvalds unsigned long start_time; 1479195291eSDivyesh Shah #ifdef CONFIG_BLK_CGROUP 148a051661cSTejun Heo struct request_list *rl; /* rl this rq is alloced from */ 1499195291eSDivyesh Shah unsigned long long start_time_ns; 1509195291eSDivyesh Shah unsigned long long io_start_time_ns; /* when passed to hardware */ 1519195291eSDivyesh Shah #endif 1521da177e4SLinus Torvalds /* Number of scatter-gather DMA addr+len pairs after 1531da177e4SLinus Torvalds * physical address coalescing is performed. 1541da177e4SLinus Torvalds */ 1551da177e4SLinus Torvalds unsigned short nr_phys_segments; 15613f05c8dSMartin K. Petersen #if defined(CONFIG_BLK_DEV_INTEGRITY) 15713f05c8dSMartin K. Petersen unsigned short nr_integrity_segments; 15813f05c8dSMartin K. Petersen #endif 1591da177e4SLinus Torvalds 1608f34ee75SJens Axboe unsigned short ioprio; 1618f34ee75SJens Axboe 162181fdde3SRichard Kennedy int ref_count; 163181fdde3SRichard Kennedy 164731ec497STejun Heo void *special; /* opaque pointer available for LLD use */ 165731ec497STejun Heo char *buffer; /* kaddr of the current segment if available */ 1661da177e4SLinus Torvalds 167cdd60262SJens Axboe int tag; 168cdd60262SJens Axboe int errors; 169cdd60262SJens Axboe 1701da177e4SLinus Torvalds /* 1711da177e4SLinus Torvalds * when request is used as a packet command carrier 1721da177e4SLinus Torvalds */ 173d7e3c324SFUJITA Tomonori unsigned char __cmd[BLK_MAX_CDB]; 174d7e3c324SFUJITA Tomonori unsigned char *cmd; 175181fdde3SRichard Kennedy unsigned short cmd_len; 1761da177e4SLinus Torvalds 1777a85f889SFUJITA Tomonori unsigned int extra_len; /* length of alignment and padding */ 1781da177e4SLinus Torvalds unsigned int sense_len; 179c3a4d78cSTejun Heo unsigned int resid_len; /* residual count */ 1801da177e4SLinus Torvalds void *sense; 1811da177e4SLinus Torvalds 182242f9dcbSJens Axboe unsigned long deadline; 183242f9dcbSJens Axboe struct list_head timeout_list; 1841da177e4SLinus Torvalds unsigned int timeout; 18517e01f21SMike Christie int retries; 1861da177e4SLinus Torvalds 1871da177e4SLinus Torvalds /* 188c00895abSJens Axboe * completion callback. 1891da177e4SLinus Torvalds */ 1901da177e4SLinus Torvalds rq_end_io_fn *end_io; 1911da177e4SLinus Torvalds void *end_io_data; 192abae1fdeSFUJITA Tomonori 193abae1fdeSFUJITA Tomonori /* for bidi */ 194abae1fdeSFUJITA Tomonori struct request *next_rq; 1951da177e4SLinus Torvalds }; 1961da177e4SLinus Torvalds 197766ca442SFernando Luis Vázquez Cao static inline unsigned short req_get_ioprio(struct request *req) 198766ca442SFernando Luis Vázquez Cao { 199766ca442SFernando Luis Vázquez Cao return req->ioprio; 200766ca442SFernando Luis Vázquez Cao } 201766ca442SFernando Luis Vázquez Cao 2021da177e4SLinus Torvalds /* 2034aff5e23SJens Axboe * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME 2041da177e4SLinus Torvalds * requests. Some step values could eventually be made generic. 2051da177e4SLinus Torvalds */ 2061da177e4SLinus Torvalds struct request_pm_state 2071da177e4SLinus Torvalds { 2081da177e4SLinus Torvalds /* PM state machine step value, currently driver specific */ 2091da177e4SLinus Torvalds int pm_step; 2101da177e4SLinus Torvalds /* requested PM state value (S1, S2, S3, S4, ...) */ 2111da177e4SLinus Torvalds u32 pm_state; 2121da177e4SLinus Torvalds void* data; /* for driver use */ 2131da177e4SLinus Torvalds }; 2141da177e4SLinus Torvalds 2151da177e4SLinus Torvalds #include <linux/elevator.h> 2161da177e4SLinus Torvalds 217165125e1SJens Axboe typedef void (request_fn_proc) (struct request_queue *q); 2185a7bbad2SChristoph Hellwig typedef void (make_request_fn) (struct request_queue *q, struct bio *bio); 219165125e1SJens Axboe typedef int (prep_rq_fn) (struct request_queue *, struct request *); 22028018c24SJames Bottomley typedef void (unprep_rq_fn) (struct request_queue *, struct request *); 2211da177e4SLinus Torvalds 2221da177e4SLinus Torvalds struct bio_vec; 223cc371e66SAlasdair G Kergon struct bvec_merge_data { 224cc371e66SAlasdair G Kergon struct block_device *bi_bdev; 225cc371e66SAlasdair G Kergon sector_t bi_sector; 226cc371e66SAlasdair G Kergon unsigned bi_size; 227cc371e66SAlasdair G Kergon unsigned long bi_rw; 228cc371e66SAlasdair G Kergon }; 229cc371e66SAlasdair G Kergon typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, 230cc371e66SAlasdair G Kergon struct bio_vec *); 231ff856badSJens Axboe typedef void (softirq_done_fn)(struct request *); 2322fb98e84STejun Heo typedef int (dma_drain_needed_fn)(struct request *); 233ef9e3facSKiyoshi Ueda typedef int (lld_busy_fn) (struct request_queue *q); 234aa387cc8SMike Christie typedef int (bsg_job_fn) (struct bsg_job *); 2351da177e4SLinus Torvalds 236242f9dcbSJens Axboe enum blk_eh_timer_return { 237242f9dcbSJens Axboe BLK_EH_NOT_HANDLED, 238242f9dcbSJens Axboe BLK_EH_HANDLED, 239242f9dcbSJens Axboe BLK_EH_RESET_TIMER, 240242f9dcbSJens Axboe }; 241242f9dcbSJens Axboe 242242f9dcbSJens Axboe typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *); 243242f9dcbSJens Axboe 2441da177e4SLinus Torvalds enum blk_queue_state { 2451da177e4SLinus Torvalds Queue_down, 2461da177e4SLinus Torvalds Queue_up, 2471da177e4SLinus Torvalds }; 2481da177e4SLinus Torvalds 2491da177e4SLinus Torvalds struct blk_queue_tag { 2501da177e4SLinus Torvalds struct request **tag_index; /* map of busy tags */ 2511da177e4SLinus Torvalds unsigned long *tag_map; /* bit map of free/busy tags */ 2521da177e4SLinus Torvalds int busy; /* current depth */ 2531da177e4SLinus Torvalds int max_depth; /* what we will send to device */ 254ba025082STejun Heo int real_max_depth; /* what the array can hold */ 2551da177e4SLinus Torvalds atomic_t refcnt; /* map can be shared */ 2561da177e4SLinus Torvalds }; 2571da177e4SLinus Torvalds 258abf54393SFUJITA Tomonori #define BLK_SCSI_MAX_CMDS (256) 259abf54393SFUJITA Tomonori #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) 260abf54393SFUJITA Tomonori 261025146e1SMartin K. Petersen struct queue_limits { 262025146e1SMartin K. Petersen unsigned long bounce_pfn; 263025146e1SMartin K. Petersen unsigned long seg_boundary_mask; 264025146e1SMartin K. Petersen 265025146e1SMartin K. Petersen unsigned int max_hw_sectors; 266025146e1SMartin K. Petersen unsigned int max_sectors; 267025146e1SMartin K. Petersen unsigned int max_segment_size; 268c72758f3SMartin K. Petersen unsigned int physical_block_size; 269c72758f3SMartin K. Petersen unsigned int alignment_offset; 270c72758f3SMartin K. Petersen unsigned int io_min; 271c72758f3SMartin K. Petersen unsigned int io_opt; 27267efc925SChristoph Hellwig unsigned int max_discard_sectors; 27386b37281SMartin K. Petersen unsigned int discard_granularity; 27486b37281SMartin K. Petersen unsigned int discard_alignment; 275025146e1SMartin K. Petersen 276025146e1SMartin K. Petersen unsigned short logical_block_size; 2778a78362cSMartin K. Petersen unsigned short max_segments; 27813f05c8dSMartin K. Petersen unsigned short max_integrity_segments; 279025146e1SMartin K. Petersen 280c72758f3SMartin K. Petersen unsigned char misaligned; 28186b37281SMartin K. Petersen unsigned char discard_misaligned; 282e692cb66SMartin K. Petersen unsigned char cluster; 283a934a00aSMartin K. Petersen unsigned char discard_zeroes_data; 284025146e1SMartin K. Petersen }; 285025146e1SMartin K. Petersen 286d7b76301SRichard Kennedy struct request_queue { 2871da177e4SLinus Torvalds /* 2881da177e4SLinus Torvalds * Together with queue_head for cacheline sharing 2891da177e4SLinus Torvalds */ 2901da177e4SLinus Torvalds struct list_head queue_head; 2911da177e4SLinus Torvalds struct request *last_merge; 292b374d18aSJens Axboe struct elevator_queue *elevator; 2938a5ecdd4STejun Heo int nr_rqs[2]; /* # allocated [a]sync rqs */ 2948a5ecdd4STejun Heo int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */ 2951da177e4SLinus Torvalds 2961da177e4SLinus Torvalds /* 297a051661cSTejun Heo * If blkcg is not used, @q->root_rl serves all requests. If blkcg 298a051661cSTejun Heo * is used, root blkg allocates from @q->root_rl and all other 299a051661cSTejun Heo * blkgs from their own blkg->rl. Which one to use should be 300a051661cSTejun Heo * determined using bio_request_list(). 3011da177e4SLinus Torvalds */ 302a051661cSTejun Heo struct request_list root_rl; 3031da177e4SLinus Torvalds 3041da177e4SLinus Torvalds request_fn_proc *request_fn; 3051da177e4SLinus Torvalds make_request_fn *make_request_fn; 3061da177e4SLinus Torvalds prep_rq_fn *prep_rq_fn; 30728018c24SJames Bottomley unprep_rq_fn *unprep_rq_fn; 3081da177e4SLinus Torvalds merge_bvec_fn *merge_bvec_fn; 309ff856badSJens Axboe softirq_done_fn *softirq_done_fn; 310242f9dcbSJens Axboe rq_timed_out_fn *rq_timed_out_fn; 3112fb98e84STejun Heo dma_drain_needed_fn *dma_drain_needed; 312ef9e3facSKiyoshi Ueda lld_busy_fn *lld_busy_fn; 3131da177e4SLinus Torvalds 3141da177e4SLinus Torvalds /* 3158922e16cSTejun Heo * Dispatch queue sorting 3168922e16cSTejun Heo */ 3171b47f531SJens Axboe sector_t end_sector; 3188922e16cSTejun Heo struct request *boundary_rq; 3198922e16cSTejun Heo 3208922e16cSTejun Heo /* 3213cca6dc1SJens Axboe * Delayed queue handling 3221da177e4SLinus Torvalds */ 3233cca6dc1SJens Axboe struct delayed_work delay_work; 3241da177e4SLinus Torvalds 3251da177e4SLinus Torvalds struct backing_dev_info backing_dev_info; 3261da177e4SLinus Torvalds 3271da177e4SLinus Torvalds /* 3281da177e4SLinus Torvalds * The queue owner gets to use this for whatever they like. 3291da177e4SLinus Torvalds * ll_rw_blk doesn't touch it. 3301da177e4SLinus Torvalds */ 3311da177e4SLinus Torvalds void *queuedata; 3321da177e4SLinus Torvalds 3331da177e4SLinus Torvalds /* 3341da177e4SLinus Torvalds * various queue flags, see QUEUE_* below 3351da177e4SLinus Torvalds */ 3361da177e4SLinus Torvalds unsigned long queue_flags; 3371da177e4SLinus Torvalds 3381da177e4SLinus Torvalds /* 339a73f730dSTejun Heo * ida allocated id for this queue. Used to index queues from 340a73f730dSTejun Heo * ioctx. 341a73f730dSTejun Heo */ 342a73f730dSTejun Heo int id; 343a73f730dSTejun Heo 344a73f730dSTejun Heo /* 345d7b76301SRichard Kennedy * queue needs bounce pages for pages above this limit 346d7b76301SRichard Kennedy */ 347d7b76301SRichard Kennedy gfp_t bounce_gfp; 348d7b76301SRichard Kennedy 349d7b76301SRichard Kennedy /* 350152587deS * protects queue structures from reentrancy. ->__queue_lock should 351152587deS * _never_ be used directly, it is queue private. always use 352152587deS * ->queue_lock. 3531da177e4SLinus Torvalds */ 354152587deS spinlock_t __queue_lock; 3551da177e4SLinus Torvalds spinlock_t *queue_lock; 3561da177e4SLinus Torvalds 3571da177e4SLinus Torvalds /* 3581da177e4SLinus Torvalds * queue kobject 3591da177e4SLinus Torvalds */ 3601da177e4SLinus Torvalds struct kobject kobj; 3611da177e4SLinus Torvalds 3621da177e4SLinus Torvalds /* 3631da177e4SLinus Torvalds * queue settings 3641da177e4SLinus Torvalds */ 3651da177e4SLinus Torvalds unsigned long nr_requests; /* Max # of requests */ 3661da177e4SLinus Torvalds unsigned int nr_congestion_on; 3671da177e4SLinus Torvalds unsigned int nr_congestion_off; 3681da177e4SLinus Torvalds unsigned int nr_batching; 3691da177e4SLinus Torvalds 370fa0ccd83SJames Bottomley unsigned int dma_drain_size; 371d7b76301SRichard Kennedy void *dma_drain_buffer; 372e3790c7dSTejun Heo unsigned int dma_pad_mask; 3731da177e4SLinus Torvalds unsigned int dma_alignment; 3741da177e4SLinus Torvalds 3751da177e4SLinus Torvalds struct blk_queue_tag *queue_tags; 3766eca9004SJens Axboe struct list_head tag_busy_list; 3771da177e4SLinus Torvalds 37815853af9STejun Heo unsigned int nr_sorted; 3790a7ae2ffSJens Axboe unsigned int in_flight[2]; 3801da177e4SLinus Torvalds 381242f9dcbSJens Axboe unsigned int rq_timeout; 382242f9dcbSJens Axboe struct timer_list timeout; 383242f9dcbSJens Axboe struct list_head timeout_list; 384242f9dcbSJens Axboe 385a612fddfSTejun Heo struct list_head icq_list; 3864eef3049STejun Heo #ifdef CONFIG_BLK_CGROUP 387a2b1693bSTejun Heo DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS); 3883c798398STejun Heo struct blkcg_gq *root_blkg; 38903aa264aSTejun Heo struct list_head blkg_list; 3904eef3049STejun Heo #endif 391a612fddfSTejun Heo 392025146e1SMartin K. Petersen struct queue_limits limits; 393025146e1SMartin K. Petersen 3941da177e4SLinus Torvalds /* 3951da177e4SLinus Torvalds * sg stuff 3961da177e4SLinus Torvalds */ 3971da177e4SLinus Torvalds unsigned int sg_timeout; 3981da177e4SLinus Torvalds unsigned int sg_reserved_size; 3991946089aSChristoph Lameter int node; 4006c5c9341SAlexey Dobriyan #ifdef CONFIG_BLK_DEV_IO_TRACE 4012056a782SJens Axboe struct blk_trace *blk_trace; 4026c5c9341SAlexey Dobriyan #endif 4031da177e4SLinus Torvalds /* 4044913efe4STejun Heo * for flush operations 4051da177e4SLinus Torvalds */ 4064913efe4STejun Heo unsigned int flush_flags; 407f3876930S[email protected] unsigned int flush_not_queueable:1; 4083ac0cc45S[email protected] unsigned int flush_queue_delayed:1; 409ae1b1539STejun Heo unsigned int flush_pending_idx:1; 410ae1b1539STejun Heo unsigned int flush_running_idx:1; 411ae1b1539STejun Heo unsigned long flush_pending_since; 412ae1b1539STejun Heo struct list_head flush_queue[2]; 413ae1b1539STejun Heo struct list_head flush_data_in_flight; 414dd4c133fSTejun Heo struct request flush_rq; 415483f4afcSAl Viro 416483f4afcSAl Viro struct mutex sysfs_lock; 417d351af01SFUJITA Tomonori 418d732580bSTejun Heo int bypass_depth; 419d732580bSTejun Heo 420d351af01SFUJITA Tomonori #if defined(CONFIG_BLK_DEV_BSG) 421aa387cc8SMike Christie bsg_job_fn *bsg_job_fn; 422aa387cc8SMike Christie int bsg_job_size; 423d351af01SFUJITA Tomonori struct bsg_class_device bsg_dev; 424d351af01SFUJITA Tomonori #endif 425e43473b7SVivek Goyal 426923adde1STejun Heo #ifdef CONFIG_BLK_CGROUP 427923adde1STejun Heo struct list_head all_q_node; 428923adde1STejun Heo #endif 429e43473b7SVivek Goyal #ifdef CONFIG_BLK_DEV_THROTTLING 430e43473b7SVivek Goyal /* Throttle data */ 431e43473b7SVivek Goyal struct throtl_data *td; 432e43473b7SVivek Goyal #endif 4331da177e4SLinus Torvalds }; 4341da177e4SLinus Torvalds 4351da177e4SLinus Torvalds #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 4361da177e4SLinus Torvalds #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 4371faa16d2SJens Axboe #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ 4381faa16d2SJens Axboe #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ 4391da177e4SLinus Torvalds #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ 440d732580bSTejun Heo #define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */ 441c21e6bebSJens Axboe #define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ 442c21e6bebSJens Axboe #define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ 4435757a6d7SDan Williams #define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */ 444c21e6bebSJens Axboe #define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */ 445c21e6bebSJens Axboe #define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */ 446c21e6bebSJens Axboe #define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */ 44788e740f1SFernando Luis Vázquez Cao #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 448c21e6bebSJens Axboe #define QUEUE_FLAG_IO_STAT 13 /* do IO stats */ 449c21e6bebSJens Axboe #define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */ 450c21e6bebSJens Axboe #define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */ 451c21e6bebSJens Axboe #define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ 452c21e6bebSJens Axboe #define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ 4535757a6d7SDan Williams #define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ 454bc58ba94SJens Axboe 455bc58ba94SJens Axboe #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 45601e97f6bSJens Axboe (1 << QUEUE_FLAG_STACKABLE) | \ 457e2e1a148SJens Axboe (1 << QUEUE_FLAG_SAME_COMP) | \ 458e2e1a148SJens Axboe (1 << QUEUE_FLAG_ADD_RANDOM)) 459797e7dbbSTejun Heo 4608bcb6c7dSAndi Kleen static inline void queue_lockdep_assert_held(struct request_queue *q) 4618f45c1a5SLinus Torvalds { 4628bcb6c7dSAndi Kleen if (q->queue_lock) 4638bcb6c7dSAndi Kleen lockdep_assert_held(q->queue_lock); 4648f45c1a5SLinus Torvalds } 4658f45c1a5SLinus Torvalds 46675ad23bcSNick Piggin static inline void queue_flag_set_unlocked(unsigned int flag, 46775ad23bcSNick Piggin struct request_queue *q) 46875ad23bcSNick Piggin { 46975ad23bcSNick Piggin __set_bit(flag, &q->queue_flags); 47075ad23bcSNick Piggin } 47175ad23bcSNick Piggin 472e48ec690SJens Axboe static inline int queue_flag_test_and_clear(unsigned int flag, 473e48ec690SJens Axboe struct request_queue *q) 474e48ec690SJens Axboe { 4758bcb6c7dSAndi Kleen queue_lockdep_assert_held(q); 476e48ec690SJens Axboe 477e48ec690SJens Axboe if (test_bit(flag, &q->queue_flags)) { 478e48ec690SJens Axboe __clear_bit(flag, &q->queue_flags); 479e48ec690SJens Axboe return 1; 480e48ec690SJens Axboe } 481e48ec690SJens Axboe 482e48ec690SJens Axboe return 0; 483e48ec690SJens Axboe } 484e48ec690SJens Axboe 485e48ec690SJens Axboe static inline int queue_flag_test_and_set(unsigned int flag, 486e48ec690SJens Axboe struct request_queue *q) 487e48ec690SJens Axboe { 4888bcb6c7dSAndi Kleen queue_lockdep_assert_held(q); 489e48ec690SJens Axboe 490e48ec690SJens Axboe if (!test_bit(flag, &q->queue_flags)) { 491e48ec690SJens Axboe __set_bit(flag, &q->queue_flags); 492e48ec690SJens Axboe return 0; 493e48ec690SJens Axboe } 494e48ec690SJens Axboe 495e48ec690SJens Axboe return 1; 496e48ec690SJens Axboe } 497e48ec690SJens Axboe 49875ad23bcSNick Piggin static inline void queue_flag_set(unsigned int flag, struct request_queue *q) 49975ad23bcSNick Piggin { 5008bcb6c7dSAndi Kleen queue_lockdep_assert_held(q); 50175ad23bcSNick Piggin __set_bit(flag, &q->queue_flags); 50275ad23bcSNick Piggin } 50375ad23bcSNick Piggin 50475ad23bcSNick Piggin static inline void queue_flag_clear_unlocked(unsigned int flag, 50575ad23bcSNick Piggin struct request_queue *q) 50675ad23bcSNick Piggin { 50775ad23bcSNick Piggin __clear_bit(flag, &q->queue_flags); 50875ad23bcSNick Piggin } 50975ad23bcSNick Piggin 5100a7ae2ffSJens Axboe static inline int queue_in_flight(struct request_queue *q) 5110a7ae2ffSJens Axboe { 5120a7ae2ffSJens Axboe return q->in_flight[0] + q->in_flight[1]; 5130a7ae2ffSJens Axboe } 5140a7ae2ffSJens Axboe 51575ad23bcSNick Piggin static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) 51675ad23bcSNick Piggin { 5178bcb6c7dSAndi Kleen queue_lockdep_assert_held(q); 51875ad23bcSNick Piggin __clear_bit(flag, &q->queue_flags); 51975ad23bcSNick Piggin } 52075ad23bcSNick Piggin 5211da177e4SLinus Torvalds #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 5221da177e4SLinus Torvalds #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 52334f6055cSTejun Heo #define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) 524d732580bSTejun Heo #define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags) 525ac9fafa1SAlan D. Brunelle #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 526488991e2SAlan D. Brunelle #define blk_queue_noxmerges(q) \ 527488991e2SAlan D. Brunelle test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 528a68bbddbSJens Axboe #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 529bc58ba94SJens Axboe #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) 530e2e1a148SJens Axboe #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) 5314ee5eaf4SKiyoshi Ueda #define blk_queue_stackable(q) \ 5324ee5eaf4SKiyoshi Ueda test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) 533c15227deSChristoph Hellwig #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) 5348d57a98cSAdrian Hunter #define blk_queue_secdiscard(q) (blk_queue_discard(q) && \ 5358d57a98cSAdrian Hunter test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags)) 5361da177e4SLinus Torvalds 53733659ebbSChristoph Hellwig #define blk_noretry_request(rq) \ 53833659ebbSChristoph Hellwig ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ 53933659ebbSChristoph Hellwig REQ_FAILFAST_DRIVER)) 5404aff5e23SJens Axboe 54133659ebbSChristoph Hellwig #define blk_account_rq(rq) \ 54233659ebbSChristoph Hellwig (((rq)->cmd_flags & REQ_STARTED) && \ 543e2a60da7SMartin K. Petersen ((rq)->cmd_type == REQ_TYPE_FS)) 5441da177e4SLinus Torvalds 5451da177e4SLinus Torvalds #define blk_pm_request(rq) \ 54633659ebbSChristoph Hellwig ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \ 54733659ebbSChristoph Hellwig (rq)->cmd_type == REQ_TYPE_PM_RESUME) 5481da177e4SLinus Torvalds 549ab780f1eSJens Axboe #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) 550abae1fdeSFUJITA Tomonori #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 551336cdb40SKiyoshi Ueda /* rq->queuelist of dequeued request must be list_empty() */ 552336cdb40SKiyoshi Ueda #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) 5531da177e4SLinus Torvalds 5541da177e4SLinus Torvalds #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 5551da177e4SLinus Torvalds 5564aff5e23SJens Axboe #define rq_data_dir(rq) ((rq)->cmd_flags & 1) 5571da177e4SLinus Torvalds 558e692cb66SMartin K. Petersen static inline unsigned int blk_queue_cluster(struct request_queue *q) 559e692cb66SMartin K. Petersen { 560e692cb66SMartin K. Petersen return q->limits.cluster; 561e692cb66SMartin K. Petersen } 562e692cb66SMartin K. Petersen 5639e2585a8SJens Axboe /* 5641faa16d2SJens Axboe * We regard a request as sync, if either a read or a sync write 5659e2585a8SJens Axboe */ 5661faa16d2SJens Axboe static inline bool rw_is_sync(unsigned int rw_flags) 5671faa16d2SJens Axboe { 5687b6d91daSChristoph Hellwig return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC); 5691faa16d2SJens Axboe } 5701faa16d2SJens Axboe 5711faa16d2SJens Axboe static inline bool rq_is_sync(struct request *rq) 5721faa16d2SJens Axboe { 5731faa16d2SJens Axboe return rw_is_sync(rq->cmd_flags); 5741faa16d2SJens Axboe } 5751faa16d2SJens Axboe 5765b788ce3STejun Heo static inline bool blk_rl_full(struct request_list *rl, bool sync) 5771da177e4SLinus Torvalds { 5785b788ce3STejun Heo unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 5795b788ce3STejun Heo 5805b788ce3STejun Heo return rl->flags & flag; 5811da177e4SLinus Torvalds } 5821da177e4SLinus Torvalds 5835b788ce3STejun Heo static inline void blk_set_rl_full(struct request_list *rl, bool sync) 5841da177e4SLinus Torvalds { 5855b788ce3STejun Heo unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 5865b788ce3STejun Heo 5875b788ce3STejun Heo rl->flags |= flag; 5881da177e4SLinus Torvalds } 5891da177e4SLinus Torvalds 5905b788ce3STejun Heo static inline void blk_clear_rl_full(struct request_list *rl, bool sync) 5911da177e4SLinus Torvalds { 5925b788ce3STejun Heo unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 5935b788ce3STejun Heo 5945b788ce3STejun Heo rl->flags &= ~flag; 5951da177e4SLinus Torvalds } 5961da177e4SLinus Torvalds 597e2a60da7SMartin K. Petersen static inline bool rq_mergeable(struct request *rq) 598e2a60da7SMartin K. Petersen { 599e2a60da7SMartin K. Petersen if (rq->cmd_type != REQ_TYPE_FS) 600e2a60da7SMartin K. Petersen return false; 6011da177e4SLinus Torvalds 602e2a60da7SMartin K. Petersen if (rq->cmd_flags & REQ_NOMERGE_FLAGS) 603e2a60da7SMartin K. Petersen return false; 604e2a60da7SMartin K. Petersen 605e2a60da7SMartin K. Petersen return true; 606e2a60da7SMartin K. Petersen } 6071da177e4SLinus Torvalds 608*f31dc1cdSMartin K. Petersen static inline bool blk_check_merge_flags(unsigned int flags1, 609*f31dc1cdSMartin K. Petersen unsigned int flags2) 610*f31dc1cdSMartin K. Petersen { 611*f31dc1cdSMartin K. Petersen if ((flags1 & REQ_DISCARD) != (flags2 & REQ_DISCARD)) 612*f31dc1cdSMartin K. Petersen return false; 613*f31dc1cdSMartin K. Petersen 614*f31dc1cdSMartin K. Petersen if ((flags1 & REQ_SECURE) != (flags2 & REQ_SECURE)) 615*f31dc1cdSMartin K. Petersen return false; 616*f31dc1cdSMartin K. Petersen 617*f31dc1cdSMartin K. Petersen return true; 618*f31dc1cdSMartin K. Petersen } 619*f31dc1cdSMartin K. Petersen 6201da177e4SLinus Torvalds /* 6211da177e4SLinus Torvalds * q->prep_rq_fn return values 6221da177e4SLinus Torvalds */ 6231da177e4SLinus Torvalds #define BLKPREP_OK 0 /* serve it */ 6241da177e4SLinus Torvalds #define BLKPREP_KILL 1 /* fatal error, kill */ 6251da177e4SLinus Torvalds #define BLKPREP_DEFER 2 /* leave on queue */ 6261da177e4SLinus Torvalds 6271da177e4SLinus Torvalds extern unsigned long blk_max_low_pfn, blk_max_pfn; 6281da177e4SLinus Torvalds 6291da177e4SLinus Torvalds /* 6301da177e4SLinus Torvalds * standard bounce addresses: 6311da177e4SLinus Torvalds * 6321da177e4SLinus Torvalds * BLK_BOUNCE_HIGH : bounce all highmem pages 6331da177e4SLinus Torvalds * BLK_BOUNCE_ANY : don't bounce anything 6341da177e4SLinus Torvalds * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary 6351da177e4SLinus Torvalds */ 6362472892aSAndi Kleen 6372472892aSAndi Kleen #if BITS_PER_LONG == 32 6381da177e4SLinus Torvalds #define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT) 6392472892aSAndi Kleen #else 6402472892aSAndi Kleen #define BLK_BOUNCE_HIGH -1ULL 6412472892aSAndi Kleen #endif 6422472892aSAndi Kleen #define BLK_BOUNCE_ANY (-1ULL) 643bfe17231SFUJITA Tomonori #define BLK_BOUNCE_ISA (DMA_BIT_MASK(24)) 6441da177e4SLinus Torvalds 6453d6392cfSJens Axboe /* 6463d6392cfSJens Axboe * default timeout for SG_IO if none specified 6473d6392cfSJens Axboe */ 6483d6392cfSJens Axboe #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) 649f2f1fa78SLinus Torvalds #define BLK_MIN_SG_TIMEOUT (7 * HZ) 6503d6392cfSJens Axboe 6512a7326b5SChristoph Lameter #ifdef CONFIG_BOUNCE 6521da177e4SLinus Torvalds extern int init_emergency_isa_pool(void); 653165125e1SJens Axboe extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); 6541da177e4SLinus Torvalds #else 6551da177e4SLinus Torvalds static inline int init_emergency_isa_pool(void) 6561da177e4SLinus Torvalds { 6571da177e4SLinus Torvalds return 0; 6581da177e4SLinus Torvalds } 659165125e1SJens Axboe static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) 6601da177e4SLinus Torvalds { 6611da177e4SLinus Torvalds } 6621da177e4SLinus Torvalds #endif /* CONFIG_MMU */ 6631da177e4SLinus Torvalds 664152e283fSFUJITA Tomonori struct rq_map_data { 665152e283fSFUJITA Tomonori struct page **pages; 666152e283fSFUJITA Tomonori int page_order; 667152e283fSFUJITA Tomonori int nr_entries; 66856c451f4SFUJITA Tomonori unsigned long offset; 66997ae77a1SFUJITA Tomonori int null_mapped; 670ecb554a8SFUJITA Tomonori int from_user; 671152e283fSFUJITA Tomonori }; 672152e283fSFUJITA Tomonori 6735705f702SNeilBrown struct req_iterator { 6745705f702SNeilBrown int i; 6755705f702SNeilBrown struct bio *bio; 6765705f702SNeilBrown }; 6775705f702SNeilBrown 6785705f702SNeilBrown /* This should not be used directly - use rq_for_each_segment */ 6791e428079SJens Axboe #define for_each_bio(_bio) \ 6801e428079SJens Axboe for (; _bio; _bio = _bio->bi_next) 6815705f702SNeilBrown #define __rq_for_each_bio(_bio, rq) \ 6821da177e4SLinus Torvalds if ((rq->bio)) \ 6831da177e4SLinus Torvalds for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) 6841da177e4SLinus Torvalds 6855705f702SNeilBrown #define rq_for_each_segment(bvl, _rq, _iter) \ 6865705f702SNeilBrown __rq_for_each_bio(_iter.bio, _rq) \ 6875705f702SNeilBrown bio_for_each_segment(bvl, _iter.bio, _iter.i) 6885705f702SNeilBrown 6895705f702SNeilBrown #define rq_iter_last(rq, _iter) \ 6905705f702SNeilBrown (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1) 6915705f702SNeilBrown 6922d4dc890SIlya Loginov #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 6932d4dc890SIlya Loginov # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" 6942d4dc890SIlya Loginov #endif 6952d4dc890SIlya Loginov #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 6962d4dc890SIlya Loginov extern void rq_flush_dcache_pages(struct request *rq); 6972d4dc890SIlya Loginov #else 6982d4dc890SIlya Loginov static inline void rq_flush_dcache_pages(struct request *rq) 6992d4dc890SIlya Loginov { 7002d4dc890SIlya Loginov } 7012d4dc890SIlya Loginov #endif 7022d4dc890SIlya Loginov 7031da177e4SLinus Torvalds extern int blk_register_queue(struct gendisk *disk); 7041da177e4SLinus Torvalds extern void blk_unregister_queue(struct gendisk *disk); 7051da177e4SLinus Torvalds extern void generic_make_request(struct bio *bio); 7062a4aa30cSFUJITA Tomonori extern void blk_rq_init(struct request_queue *q, struct request *rq); 7071da177e4SLinus Torvalds extern void blk_put_request(struct request *); 708165125e1SJens Axboe extern void __blk_put_request(struct request_queue *, struct request *); 709165125e1SJens Axboe extern struct request *blk_get_request(struct request_queue *, int, gfp_t); 71079eb63e9SBoaz Harrosh extern struct request *blk_make_request(struct request_queue *, struct bio *, 71179eb63e9SBoaz Harrosh gfp_t); 712165125e1SJens Axboe extern void blk_requeue_request(struct request_queue *, struct request *); 71366ac0280SChristoph Hellwig extern void blk_add_request_payload(struct request *rq, struct page *page, 71466ac0280SChristoph Hellwig unsigned int len); 71582124d60SKiyoshi Ueda extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); 716ef9e3facSKiyoshi Ueda extern int blk_lld_busy(struct request_queue *q); 717b0fd271dSKiyoshi Ueda extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 718b0fd271dSKiyoshi Ueda struct bio_set *bs, gfp_t gfp_mask, 719b0fd271dSKiyoshi Ueda int (*bio_ctr)(struct bio *, struct bio *, void *), 720b0fd271dSKiyoshi Ueda void *data); 721b0fd271dSKiyoshi Ueda extern void blk_rq_unprep_clone(struct request *rq); 72282124d60SKiyoshi Ueda extern int blk_insert_cloned_request(struct request_queue *q, 72382124d60SKiyoshi Ueda struct request *rq); 7243cca6dc1SJens Axboe extern void blk_delay_queue(struct request_queue *, unsigned long); 725165125e1SJens Axboe extern void blk_recount_segments(struct request_queue *, struct bio *); 7260bfc96cbSPaolo Bonzini extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); 727577ebb37SPaolo Bonzini extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t, 728577ebb37SPaolo Bonzini unsigned int, void __user *); 72974f3c8afSAl Viro extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, 73074f3c8afSAl Viro unsigned int, void __user *); 731e915e872SAl Viro extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, 732e915e872SAl Viro struct scsi_ioctl_command __user *); 7333fcfab16SAndrew Morton 7345a7bbad2SChristoph Hellwig extern void blk_queue_bio(struct request_queue *q, struct bio *bio); 735166e1f90SChristoph Hellwig 7363fcfab16SAndrew Morton /* 7373fcfab16SAndrew Morton * A queue has just exitted congestion. Note this in the global counter of 7383fcfab16SAndrew Morton * congested queues, and wake up anyone who was waiting for requests to be 7393fcfab16SAndrew Morton * put back. 7403fcfab16SAndrew Morton */ 7418aa7e847SJens Axboe static inline void blk_clear_queue_congested(struct request_queue *q, int sync) 7423fcfab16SAndrew Morton { 7438aa7e847SJens Axboe clear_bdi_congested(&q->backing_dev_info, sync); 7443fcfab16SAndrew Morton } 7453fcfab16SAndrew Morton 7463fcfab16SAndrew Morton /* 7473fcfab16SAndrew Morton * A queue has just entered congestion. Flag that in the queue's VM-visible 7483fcfab16SAndrew Morton * state flags and increment the global gounter of congested queues. 7493fcfab16SAndrew Morton */ 7508aa7e847SJens Axboe static inline void blk_set_queue_congested(struct request_queue *q, int sync) 7513fcfab16SAndrew Morton { 7528aa7e847SJens Axboe set_bdi_congested(&q->backing_dev_info, sync); 7533fcfab16SAndrew Morton } 7543fcfab16SAndrew Morton 755165125e1SJens Axboe extern void blk_start_queue(struct request_queue *q); 756165125e1SJens Axboe extern void blk_stop_queue(struct request_queue *q); 7571da177e4SLinus Torvalds extern void blk_sync_queue(struct request_queue *q); 758165125e1SJens Axboe extern void __blk_stop_queue(struct request_queue *q); 75924ecfbe2SChristoph Hellwig extern void __blk_run_queue(struct request_queue *q); 760165125e1SJens Axboe extern void blk_run_queue(struct request_queue *); 761c21e6bebSJens Axboe extern void blk_run_queue_async(struct request_queue *q); 762a3bce90eSFUJITA Tomonori extern int blk_rq_map_user(struct request_queue *, struct request *, 763152e283fSFUJITA Tomonori struct rq_map_data *, void __user *, unsigned long, 764152e283fSFUJITA Tomonori gfp_t); 7658e5cfc45SJens Axboe extern int blk_rq_unmap_user(struct bio *); 766165125e1SJens Axboe extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); 767165125e1SJens Axboe extern int blk_rq_map_user_iov(struct request_queue *, struct request *, 768152e283fSFUJITA Tomonori struct rq_map_data *, struct sg_iovec *, int, 769152e283fSFUJITA Tomonori unsigned int, gfp_t); 770165125e1SJens Axboe extern int blk_execute_rq(struct request_queue *, struct gendisk *, 771994ca9a1SJames Bottomley struct request *, int); 772165125e1SJens Axboe extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 77315fc858aSJens Axboe struct request *, int, rq_end_io_fn *); 7746e39b69eSMike Christie 775165125e1SJens Axboe static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 7761da177e4SLinus Torvalds { 7771da177e4SLinus Torvalds return bdev->bd_disk->queue; 7781da177e4SLinus Torvalds } 7791da177e4SLinus Torvalds 7801da177e4SLinus Torvalds /* 7815b93629bSTejun Heo * blk_rq_pos() : the current sector 7825b93629bSTejun Heo * blk_rq_bytes() : bytes left in the entire request 7835b93629bSTejun Heo * blk_rq_cur_bytes() : bytes left in the current segment 78480a761fdSTejun Heo * blk_rq_err_bytes() : bytes left till the next error boundary 7855b93629bSTejun Heo * blk_rq_sectors() : sectors left in the entire request 7865b93629bSTejun Heo * blk_rq_cur_sectors() : sectors left in the current segment 7875efccd17STejun Heo */ 7885b93629bSTejun Heo static inline sector_t blk_rq_pos(const struct request *rq) 7895b93629bSTejun Heo { 790a2dec7b3STejun Heo return rq->__sector; 7915b93629bSTejun Heo } 7925b93629bSTejun Heo 7932e46e8b2STejun Heo static inline unsigned int blk_rq_bytes(const struct request *rq) 7942e46e8b2STejun Heo { 795a2dec7b3STejun Heo return rq->__data_len; 7962e46e8b2STejun Heo } 7972e46e8b2STejun Heo 7982e46e8b2STejun Heo static inline int blk_rq_cur_bytes(const struct request *rq) 7992e46e8b2STejun Heo { 8002e46e8b2STejun Heo return rq->bio ? bio_cur_bytes(rq->bio) : 0; 8012e46e8b2STejun Heo } 8025efccd17STejun Heo 80380a761fdSTejun Heo extern unsigned int blk_rq_err_bytes(const struct request *rq); 80480a761fdSTejun Heo 8055b93629bSTejun Heo static inline unsigned int blk_rq_sectors(const struct request *rq) 8065b93629bSTejun Heo { 8072e46e8b2STejun Heo return blk_rq_bytes(rq) >> 9; 8085b93629bSTejun Heo } 8095b93629bSTejun Heo 8105b93629bSTejun Heo static inline unsigned int blk_rq_cur_sectors(const struct request *rq) 8115b93629bSTejun Heo { 8122e46e8b2STejun Heo return blk_rq_cur_bytes(rq) >> 9; 8135b93629bSTejun Heo } 8145b93629bSTejun Heo 815*f31dc1cdSMartin K. Petersen static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, 816*f31dc1cdSMartin K. Petersen unsigned int cmd_flags) 817*f31dc1cdSMartin K. Petersen { 818*f31dc1cdSMartin K. Petersen if (unlikely(cmd_flags & REQ_DISCARD)) 819*f31dc1cdSMartin K. Petersen return q->limits.max_discard_sectors; 820*f31dc1cdSMartin K. Petersen 821*f31dc1cdSMartin K. Petersen return q->limits.max_sectors; 822*f31dc1cdSMartin K. Petersen } 823*f31dc1cdSMartin K. Petersen 824*f31dc1cdSMartin K. Petersen static inline unsigned int blk_rq_get_max_sectors(struct request *rq) 825*f31dc1cdSMartin K. Petersen { 826*f31dc1cdSMartin K. Petersen struct request_queue *q = rq->q; 827*f31dc1cdSMartin K. Petersen 828*f31dc1cdSMartin K. Petersen if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC)) 829*f31dc1cdSMartin K. Petersen return q->limits.max_hw_sectors; 830*f31dc1cdSMartin K. Petersen 831*f31dc1cdSMartin K. Petersen return blk_queue_get_max_sectors(q, rq->cmd_flags); 832*f31dc1cdSMartin K. Petersen } 833*f31dc1cdSMartin K. Petersen 8345efccd17STejun Heo /* 8359934c8c0STejun Heo * Request issue related functions. 8369934c8c0STejun Heo */ 8379934c8c0STejun Heo extern struct request *blk_peek_request(struct request_queue *q); 8389934c8c0STejun Heo extern void blk_start_request(struct request *rq); 8399934c8c0STejun Heo extern struct request *blk_fetch_request(struct request_queue *q); 8409934c8c0STejun Heo 8419934c8c0STejun Heo /* 8422e60e022STejun Heo * Request completion related functions. 8432e60e022STejun Heo * 8442e60e022STejun Heo * blk_update_request() completes given number of bytes and updates 8452e60e022STejun Heo * the request without completing it. 8462e60e022STejun Heo * 847f06d9a2bSTejun Heo * blk_end_request() and friends. __blk_end_request() must be called 848f06d9a2bSTejun Heo * with the request queue spinlock acquired. 8491da177e4SLinus Torvalds * 8501da177e4SLinus Torvalds * Several drivers define their own end_request and call 8513bcddeacSKiyoshi Ueda * blk_end_request() for parts of the original function. 8523bcddeacSKiyoshi Ueda * This prevents code duplication in drivers. 8531da177e4SLinus Torvalds */ 8542e60e022STejun Heo extern bool blk_update_request(struct request *rq, int error, 85522b13210SJens Axboe unsigned int nr_bytes); 856b1f74493SFUJITA Tomonori extern bool blk_end_request(struct request *rq, int error, 857b1f74493SFUJITA Tomonori unsigned int nr_bytes); 858b1f74493SFUJITA Tomonori extern void blk_end_request_all(struct request *rq, int error); 859b1f74493SFUJITA Tomonori extern bool blk_end_request_cur(struct request *rq, int error); 86080a761fdSTejun Heo extern bool blk_end_request_err(struct request *rq, int error); 861b1f74493SFUJITA Tomonori extern bool __blk_end_request(struct request *rq, int error, 862b1f74493SFUJITA Tomonori unsigned int nr_bytes); 863b1f74493SFUJITA Tomonori extern void __blk_end_request_all(struct request *rq, int error); 864b1f74493SFUJITA Tomonori extern bool __blk_end_request_cur(struct request *rq, int error); 86580a761fdSTejun Heo extern bool __blk_end_request_err(struct request *rq, int error); 8662e60e022STejun Heo 867ff856badSJens Axboe extern void blk_complete_request(struct request *); 868242f9dcbSJens Axboe extern void __blk_complete_request(struct request *); 869242f9dcbSJens Axboe extern void blk_abort_request(struct request *); 87028018c24SJames Bottomley extern void blk_unprep_request(struct request *); 871ff856badSJens Axboe 8721da177e4SLinus Torvalds /* 8731da177e4SLinus Torvalds * Access functions for manipulating queue properties 8741da177e4SLinus Torvalds */ 875165125e1SJens Axboe extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, 8761946089aSChristoph Lameter spinlock_t *lock, int node_id); 877165125e1SJens Axboe extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); 87801effb0dSMike Snitzer extern struct request_queue *blk_init_allocated_queue(struct request_queue *, 87901effb0dSMike Snitzer request_fn_proc *, spinlock_t *); 880165125e1SJens Axboe extern void blk_cleanup_queue(struct request_queue *); 881165125e1SJens Axboe extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 882165125e1SJens Axboe extern void blk_queue_bounce_limit(struct request_queue *, u64); 88372d4cd9fSMike Snitzer extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int); 884086fa5ffSMartin K. Petersen extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 8858a78362cSMartin K. Petersen extern void blk_queue_max_segments(struct request_queue *, unsigned short); 886165125e1SJens Axboe extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 88767efc925SChristoph Hellwig extern void blk_queue_max_discard_sectors(struct request_queue *q, 88867efc925SChristoph Hellwig unsigned int max_discard_sectors); 889e1defc4fSMartin K. Petersen extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); 890892b6f90SMartin K. Petersen extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); 891c72758f3SMartin K. Petersen extern void blk_queue_alignment_offset(struct request_queue *q, 892c72758f3SMartin K. Petersen unsigned int alignment); 8937c958e32SMartin K. Petersen extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); 894c72758f3SMartin K. Petersen extern void blk_queue_io_min(struct request_queue *q, unsigned int min); 8953c5820c7SMartin K. Petersen extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); 896c72758f3SMartin K. Petersen extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); 897e475bba2SMartin K. Petersen extern void blk_set_default_limits(struct queue_limits *lim); 898b1bd055dSMartin K. Petersen extern void blk_set_stacking_limits(struct queue_limits *lim); 899c72758f3SMartin K. Petersen extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 900c72758f3SMartin K. Petersen sector_t offset); 90117be8c24SMartin K. Petersen extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, 90217be8c24SMartin K. Petersen sector_t offset); 903c72758f3SMartin K. Petersen extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, 904c72758f3SMartin K. Petersen sector_t offset); 905165125e1SJens Axboe extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 906e3790c7dSTejun Heo extern void blk_queue_dma_pad(struct request_queue *, unsigned int); 90727f8221aSFUJITA Tomonori extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); 9082fb98e84STejun Heo extern int blk_queue_dma_drain(struct request_queue *q, 9092fb98e84STejun Heo dma_drain_needed_fn *dma_drain_needed, 9102fb98e84STejun Heo void *buf, unsigned int size); 911ef9e3facSKiyoshi Ueda extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); 912165125e1SJens Axboe extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 913165125e1SJens Axboe extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 91428018c24SJames Bottomley extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn); 915165125e1SJens Axboe extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); 916165125e1SJens Axboe extern void blk_queue_dma_alignment(struct request_queue *, int); 91711c3e689SJames Bottomley extern void blk_queue_update_dma_alignment(struct request_queue *, int); 918165125e1SJens Axboe extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 919242f9dcbSJens Axboe extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); 920242f9dcbSJens Axboe extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 9214913efe4STejun Heo extern void blk_queue_flush(struct request_queue *q, unsigned int flush); 922f3876930S[email protected] extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable); 9231da177e4SLinus Torvalds extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 9241da177e4SLinus Torvalds 925165125e1SJens Axboe extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 92685b9f66aSAsias He extern int blk_bio_map_sg(struct request_queue *q, struct bio *bio, 92785b9f66aSAsias He struct scatterlist *sglist); 9281da177e4SLinus Torvalds extern void blk_dump_rq_flags(struct request *, char *); 9291da177e4SLinus Torvalds extern long nr_blockdev_pages(void); 9301da177e4SLinus Torvalds 93109ac46c4STejun Heo bool __must_check blk_get_queue(struct request_queue *); 932165125e1SJens Axboe struct request_queue *blk_alloc_queue(gfp_t); 933165125e1SJens Axboe struct request_queue *blk_alloc_queue_node(gfp_t, int); 934165125e1SJens Axboe extern void blk_put_queue(struct request_queue *); 9351da177e4SLinus Torvalds 936316cc67dSShaohua Li /* 93775df7136SSuresh Jayaraman * blk_plug permits building a queue of related requests by holding the I/O 93875df7136SSuresh Jayaraman * fragments for a short period. This allows merging of sequential requests 93975df7136SSuresh Jayaraman * into single larger request. As the requests are moved from a per-task list to 94075df7136SSuresh Jayaraman * the device's request_queue in a batch, this results in improved scalability 94175df7136SSuresh Jayaraman * as the lock contention for request_queue lock is reduced. 94275df7136SSuresh Jayaraman * 94375df7136SSuresh Jayaraman * It is ok not to disable preemption when adding the request to the plug list 94475df7136SSuresh Jayaraman * or when attempting a merge, because blk_schedule_flush_list() will only flush 94575df7136SSuresh Jayaraman * the plug list when the task sleeps by itself. For details, please see 94675df7136SSuresh Jayaraman * schedule() where blk_schedule_flush_plug() is called. 947316cc67dSShaohua Li */ 94873c10101SJens Axboe struct blk_plug { 94975df7136SSuresh Jayaraman unsigned long magic; /* detect uninitialized use-cases */ 95075df7136SSuresh Jayaraman struct list_head list; /* requests */ 95175df7136SSuresh Jayaraman struct list_head cb_list; /* md requires an unplug callback */ 95275df7136SSuresh Jayaraman unsigned int should_sort; /* list to be sorted before flushing? */ 95373c10101SJens Axboe }; 95455c022bbSShaohua Li #define BLK_MAX_REQUEST_COUNT 16 95555c022bbSShaohua Li 9569cbb1750SNeilBrown struct blk_plug_cb; 95774018dc3SNeilBrown typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); 958048c9374SNeilBrown struct blk_plug_cb { 959048c9374SNeilBrown struct list_head list; 9609cbb1750SNeilBrown blk_plug_cb_fn callback; 9619cbb1750SNeilBrown void *data; 962048c9374SNeilBrown }; 9639cbb1750SNeilBrown extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, 9649cbb1750SNeilBrown void *data, int size); 96573c10101SJens Axboe extern void blk_start_plug(struct blk_plug *); 96673c10101SJens Axboe extern void blk_finish_plug(struct blk_plug *); 967f6603783SJens Axboe extern void blk_flush_plug_list(struct blk_plug *, bool); 96873c10101SJens Axboe 96973c10101SJens Axboe static inline void blk_flush_plug(struct task_struct *tsk) 97073c10101SJens Axboe { 97173c10101SJens Axboe struct blk_plug *plug = tsk->plug; 97273c10101SJens Axboe 97388b996cdSChristoph Hellwig if (plug) 974a237c1c5SJens Axboe blk_flush_plug_list(plug, false); 975a237c1c5SJens Axboe } 976a237c1c5SJens Axboe 977a237c1c5SJens Axboe static inline void blk_schedule_flush_plug(struct task_struct *tsk) 978a237c1c5SJens Axboe { 979a237c1c5SJens Axboe struct blk_plug *plug = tsk->plug; 980a237c1c5SJens Axboe 981a237c1c5SJens Axboe if (plug) 982f6603783SJens Axboe blk_flush_plug_list(plug, true); 98373c10101SJens Axboe } 98473c10101SJens Axboe 98573c10101SJens Axboe static inline bool blk_needs_flush_plug(struct task_struct *tsk) 98673c10101SJens Axboe { 98773c10101SJens Axboe struct blk_plug *plug = tsk->plug; 98873c10101SJens Axboe 989048c9374SNeilBrown return plug && (!list_empty(&plug->list) || !list_empty(&plug->cb_list)); 99073c10101SJens Axboe } 99173c10101SJens Axboe 9921da177e4SLinus Torvalds /* 9931da177e4SLinus Torvalds * tag stuff 9941da177e4SLinus Torvalds */ 9954aff5e23SJens Axboe #define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) 996165125e1SJens Axboe extern int blk_queue_start_tag(struct request_queue *, struct request *); 997165125e1SJens Axboe extern struct request *blk_queue_find_tag(struct request_queue *, int); 998165125e1SJens Axboe extern void blk_queue_end_tag(struct request_queue *, struct request *); 999165125e1SJens Axboe extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *); 1000165125e1SJens Axboe extern void blk_queue_free_tags(struct request_queue *); 1001165125e1SJens Axboe extern int blk_queue_resize_tags(struct request_queue *, int); 1002165125e1SJens Axboe extern void blk_queue_invalidate_tags(struct request_queue *); 1003492dfb48SJames Bottomley extern struct blk_queue_tag *blk_init_tags(int); 1004492dfb48SJames Bottomley extern void blk_free_tags(struct blk_queue_tag *); 10051da177e4SLinus Torvalds 1006f583f492SDavid C Somayajulu static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, 1007f583f492SDavid C Somayajulu int tag) 1008f583f492SDavid C Somayajulu { 1009f583f492SDavid C Somayajulu if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) 1010f583f492SDavid C Somayajulu return NULL; 1011f583f492SDavid C Somayajulu return bqt->tag_index[tag]; 1012f583f492SDavid C Somayajulu } 1013dd3932edSChristoph Hellwig 1014dd3932edSChristoph Hellwig #define BLKDEV_DISCARD_SECURE 0x01 /* secure discard */ 1015dd3932edSChristoph Hellwig 1016dd3932edSChristoph Hellwig extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); 1017fbd9b09aSDmitry Monakhov extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1018fbd9b09aSDmitry Monakhov sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); 10193f14d792SDmitry Monakhov extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1020dd3932edSChristoph Hellwig sector_t nr_sects, gfp_t gfp_mask); 10212cf6d26aSChristoph Hellwig static inline int sb_issue_discard(struct super_block *sb, sector_t block, 10222cf6d26aSChristoph Hellwig sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) 1023fb2dce86SDavid Woodhouse { 10242cf6d26aSChristoph Hellwig return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9), 10252cf6d26aSChristoph Hellwig nr_blocks << (sb->s_blocksize_bits - 9), 10262cf6d26aSChristoph Hellwig gfp_mask, flags); 1027fb2dce86SDavid Woodhouse } 1028e6fa0be6SLukas Czerner static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, 1029a107e5a3STheodore Ts'o sector_t nr_blocks, gfp_t gfp_mask) 1030e6fa0be6SLukas Czerner { 1031e6fa0be6SLukas Czerner return blkdev_issue_zeroout(sb->s_bdev, 1032e6fa0be6SLukas Czerner block << (sb->s_blocksize_bits - 9), 1033e6fa0be6SLukas Czerner nr_blocks << (sb->s_blocksize_bits - 9), 1034a107e5a3STheodore Ts'o gfp_mask); 1035e6fa0be6SLukas Czerner } 10361da177e4SLinus Torvalds 1037018e0446SJens Axboe extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); 10380b07de85SAdel Gadllah 1039eb28d31bSMartin K. Petersen enum blk_default_limits { 1040eb28d31bSMartin K. Petersen BLK_MAX_SEGMENTS = 128, 1041eb28d31bSMartin K. Petersen BLK_SAFE_MAX_SECTORS = 255, 1042eb28d31bSMartin K. Petersen BLK_DEF_MAX_SECTORS = 1024, 1043eb28d31bSMartin K. Petersen BLK_MAX_SEGMENT_SIZE = 65536, 1044eb28d31bSMartin K. Petersen BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, 1045eb28d31bSMartin K. Petersen }; 10460e435ac2SMilan Broz 10471da177e4SLinus Torvalds #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 10481da177e4SLinus Torvalds 1049ae03bf63SMartin K. Petersen static inline unsigned long queue_bounce_pfn(struct request_queue *q) 1050ae03bf63SMartin K. Petersen { 1051025146e1SMartin K. Petersen return q->limits.bounce_pfn; 1052ae03bf63SMartin K. Petersen } 1053ae03bf63SMartin K. Petersen 1054ae03bf63SMartin K. Petersen static inline unsigned long queue_segment_boundary(struct request_queue *q) 1055ae03bf63SMartin K. Petersen { 1056025146e1SMartin K. Petersen return q->limits.seg_boundary_mask; 1057ae03bf63SMartin K. Petersen } 1058ae03bf63SMartin K. Petersen 1059ae03bf63SMartin K. Petersen static inline unsigned int queue_max_sectors(struct request_queue *q) 1060ae03bf63SMartin K. Petersen { 1061025146e1SMartin K. Petersen return q->limits.max_sectors; 1062ae03bf63SMartin K. Petersen } 1063ae03bf63SMartin K. Petersen 1064ae03bf63SMartin K. Petersen static inline unsigned int queue_max_hw_sectors(struct request_queue *q) 1065ae03bf63SMartin K. Petersen { 1066025146e1SMartin K. Petersen return q->limits.max_hw_sectors; 1067ae03bf63SMartin K. Petersen } 1068ae03bf63SMartin K. Petersen 10698a78362cSMartin K. Petersen static inline unsigned short queue_max_segments(struct request_queue *q) 1070ae03bf63SMartin K. Petersen { 10718a78362cSMartin K. Petersen return q->limits.max_segments; 1072ae03bf63SMartin K. Petersen } 1073ae03bf63SMartin K. Petersen 1074ae03bf63SMartin K. Petersen static inline unsigned int queue_max_segment_size(struct request_queue *q) 1075ae03bf63SMartin K. Petersen { 1076025146e1SMartin K. Petersen return q->limits.max_segment_size; 1077ae03bf63SMartin K. Petersen } 1078ae03bf63SMartin K. Petersen 1079e1defc4fSMartin K. Petersen static inline unsigned short queue_logical_block_size(struct request_queue *q) 10801da177e4SLinus Torvalds { 10811da177e4SLinus Torvalds int retval = 512; 10821da177e4SLinus Torvalds 1083025146e1SMartin K. Petersen if (q && q->limits.logical_block_size) 1084025146e1SMartin K. Petersen retval = q->limits.logical_block_size; 10851da177e4SLinus Torvalds 10861da177e4SLinus Torvalds return retval; 10871da177e4SLinus Torvalds } 10881da177e4SLinus Torvalds 1089e1defc4fSMartin K. Petersen static inline unsigned short bdev_logical_block_size(struct block_device *bdev) 10901da177e4SLinus Torvalds { 1091e1defc4fSMartin K. Petersen return queue_logical_block_size(bdev_get_queue(bdev)); 10921da177e4SLinus Torvalds } 10931da177e4SLinus Torvalds 1094c72758f3SMartin K. Petersen static inline unsigned int queue_physical_block_size(struct request_queue *q) 1095c72758f3SMartin K. Petersen { 1096c72758f3SMartin K. Petersen return q->limits.physical_block_size; 1097c72758f3SMartin K. Petersen } 1098c72758f3SMartin K. Petersen 1099892b6f90SMartin K. Petersen static inline unsigned int bdev_physical_block_size(struct block_device *bdev) 1100ac481c20SMartin K. Petersen { 1101ac481c20SMartin K. Petersen return queue_physical_block_size(bdev_get_queue(bdev)); 1102ac481c20SMartin K. Petersen } 1103ac481c20SMartin K. Petersen 1104c72758f3SMartin K. Petersen static inline unsigned int queue_io_min(struct request_queue *q) 1105c72758f3SMartin K. Petersen { 1106c72758f3SMartin K. Petersen return q->limits.io_min; 1107c72758f3SMartin K. Petersen } 1108c72758f3SMartin K. Petersen 1109ac481c20SMartin K. Petersen static inline int bdev_io_min(struct block_device *bdev) 1110ac481c20SMartin K. Petersen { 1111ac481c20SMartin K. Petersen return queue_io_min(bdev_get_queue(bdev)); 1112ac481c20SMartin K. Petersen } 1113ac481c20SMartin K. Petersen 1114c72758f3SMartin K. Petersen static inline unsigned int queue_io_opt(struct request_queue *q) 1115c72758f3SMartin K. Petersen { 1116c72758f3SMartin K. Petersen return q->limits.io_opt; 1117c72758f3SMartin K. Petersen } 1118c72758f3SMartin K. Petersen 1119ac481c20SMartin K. Petersen static inline int bdev_io_opt(struct block_device *bdev) 1120ac481c20SMartin K. Petersen { 1121ac481c20SMartin K. Petersen return queue_io_opt(bdev_get_queue(bdev)); 1122ac481c20SMartin K. Petersen } 1123ac481c20SMartin K. Petersen 1124c72758f3SMartin K. Petersen static inline int queue_alignment_offset(struct request_queue *q) 1125c72758f3SMartin K. Petersen { 1126ac481c20SMartin K. Petersen if (q->limits.misaligned) 1127c72758f3SMartin K. Petersen return -1; 1128c72758f3SMartin K. Petersen 1129c72758f3SMartin K. Petersen return q->limits.alignment_offset; 1130c72758f3SMartin K. Petersen } 1131c72758f3SMartin K. Petersen 1132e03a72e1SMartin K. Petersen static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) 113381744ee4SMartin K. Petersen { 113481744ee4SMartin K. Petersen unsigned int granularity = max(lim->physical_block_size, lim->io_min); 1135e03a72e1SMartin K. Petersen unsigned int alignment = (sector << 9) & (granularity - 1); 113681744ee4SMartin K. Petersen 1137e03a72e1SMartin K. Petersen return (granularity + lim->alignment_offset - alignment) 1138e03a72e1SMartin K. Petersen & (granularity - 1); 1139c72758f3SMartin K. Petersen } 1140c72758f3SMartin K. Petersen 1141ac481c20SMartin K. Petersen static inline int bdev_alignment_offset(struct block_device *bdev) 1142ac481c20SMartin K. Petersen { 1143ac481c20SMartin K. Petersen struct request_queue *q = bdev_get_queue(bdev); 1144ac481c20SMartin K. Petersen 1145ac481c20SMartin K. Petersen if (q->limits.misaligned) 1146ac481c20SMartin K. Petersen return -1; 1147ac481c20SMartin K. Petersen 1148ac481c20SMartin K. Petersen if (bdev != bdev->bd_contains) 1149ac481c20SMartin K. Petersen return bdev->bd_part->alignment_offset; 1150ac481c20SMartin K. Petersen 1151ac481c20SMartin K. Petersen return q->limits.alignment_offset; 1152ac481c20SMartin K. Petersen } 1153ac481c20SMartin K. Petersen 115486b37281SMartin K. Petersen static inline int queue_discard_alignment(struct request_queue *q) 115586b37281SMartin K. Petersen { 115686b37281SMartin K. Petersen if (q->limits.discard_misaligned) 115786b37281SMartin K. Petersen return -1; 115886b37281SMartin K. Petersen 115986b37281SMartin K. Petersen return q->limits.discard_alignment; 116086b37281SMartin K. Petersen } 116186b37281SMartin K. Petersen 1162e03a72e1SMartin K. Petersen static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) 116386b37281SMartin K. Petersen { 1164dd3d145dSMartin K. Petersen unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1); 1165dd3d145dSMartin K. Petersen 1166a934a00aSMartin K. Petersen if (!lim->max_discard_sectors) 1167a934a00aSMartin K. Petersen return 0; 1168a934a00aSMartin K. Petersen 1169dd3d145dSMartin K. Petersen return (lim->discard_granularity + lim->discard_alignment - alignment) 1170dd3d145dSMartin K. Petersen & (lim->discard_granularity - 1); 117186b37281SMartin K. Petersen } 117286b37281SMartin K. Petersen 1173c6e66634SPaolo Bonzini static inline int bdev_discard_alignment(struct block_device *bdev) 1174c6e66634SPaolo Bonzini { 1175c6e66634SPaolo Bonzini struct request_queue *q = bdev_get_queue(bdev); 1176c6e66634SPaolo Bonzini 1177c6e66634SPaolo Bonzini if (bdev != bdev->bd_contains) 1178c6e66634SPaolo Bonzini return bdev->bd_part->discard_alignment; 1179c6e66634SPaolo Bonzini 1180c6e66634SPaolo Bonzini return q->limits.discard_alignment; 1181c6e66634SPaolo Bonzini } 1182c6e66634SPaolo Bonzini 118398262f27SMartin K. Petersen static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) 118498262f27SMartin K. Petersen { 1185a934a00aSMartin K. Petersen if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1) 118698262f27SMartin K. Petersen return 1; 118798262f27SMartin K. Petersen 118898262f27SMartin K. Petersen return 0; 118998262f27SMartin K. Petersen } 119098262f27SMartin K. Petersen 119198262f27SMartin K. Petersen static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev) 119298262f27SMartin K. Petersen { 119398262f27SMartin K. Petersen return queue_discard_zeroes_data(bdev_get_queue(bdev)); 119498262f27SMartin K. Petersen } 119598262f27SMartin K. Petersen 1196165125e1SJens Axboe static inline int queue_dma_alignment(struct request_queue *q) 11971da177e4SLinus Torvalds { 1198482eb689SPete Wyckoff return q ? q->dma_alignment : 511; 11991da177e4SLinus Torvalds } 12001da177e4SLinus Torvalds 120114417799SNamhyung Kim static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, 120287904074SFUJITA Tomonori unsigned int len) 120387904074SFUJITA Tomonori { 120487904074SFUJITA Tomonori unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; 120514417799SNamhyung Kim return !(addr & alignment) && !(len & alignment); 120687904074SFUJITA Tomonori } 120787904074SFUJITA Tomonori 12081da177e4SLinus Torvalds /* assumes size > 256 */ 12091da177e4SLinus Torvalds static inline unsigned int blksize_bits(unsigned int size) 12101da177e4SLinus Torvalds { 12111da177e4SLinus Torvalds unsigned int bits = 8; 12121da177e4SLinus Torvalds do { 12131da177e4SLinus Torvalds bits++; 12141da177e4SLinus Torvalds size >>= 1; 12151da177e4SLinus Torvalds } while (size > 256); 12161da177e4SLinus Torvalds return bits; 12171da177e4SLinus Torvalds } 12181da177e4SLinus Torvalds 12192befb9e3SAdrian Bunk static inline unsigned int block_size(struct block_device *bdev) 12201da177e4SLinus Torvalds { 12211da177e4SLinus Torvalds return bdev->bd_block_size; 12221da177e4SLinus Torvalds } 12231da177e4SLinus Torvalds 1224f3876930S[email protected] static inline bool queue_flush_queueable(struct request_queue *q) 1225f3876930S[email protected] { 1226f3876930S[email protected] return !q->flush_not_queueable; 1227f3876930S[email protected] } 1228f3876930S[email protected] 12291da177e4SLinus Torvalds typedef struct {struct page *v;} Sector; 12301da177e4SLinus Torvalds 12311da177e4SLinus Torvalds unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); 12321da177e4SLinus Torvalds 12331da177e4SLinus Torvalds static inline void put_dev_sector(Sector p) 12341da177e4SLinus Torvalds { 12351da177e4SLinus Torvalds page_cache_release(p.v); 12361da177e4SLinus Torvalds } 12371da177e4SLinus Torvalds 12381da177e4SLinus Torvalds struct work_struct; 123918887ad9SJens Axboe int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); 12401da177e4SLinus Torvalds 12419195291eSDivyesh Shah #ifdef CONFIG_BLK_CGROUP 124228f4197eSJens Axboe /* 124328f4197eSJens Axboe * This should not be using sched_clock(). A real patch is in progress 124428f4197eSJens Axboe * to fix this up, until that is in place we need to disable preemption 124528f4197eSJens Axboe * around sched_clock() in this function and set_io_start_time_ns(). 124628f4197eSJens Axboe */ 12479195291eSDivyesh Shah static inline void set_start_time_ns(struct request *req) 12489195291eSDivyesh Shah { 124928f4197eSJens Axboe preempt_disable(); 12509195291eSDivyesh Shah req->start_time_ns = sched_clock(); 125128f4197eSJens Axboe preempt_enable(); 12529195291eSDivyesh Shah } 12539195291eSDivyesh Shah 12549195291eSDivyesh Shah static inline void set_io_start_time_ns(struct request *req) 12559195291eSDivyesh Shah { 125628f4197eSJens Axboe preempt_disable(); 12579195291eSDivyesh Shah req->io_start_time_ns = sched_clock(); 125828f4197eSJens Axboe preempt_enable(); 12599195291eSDivyesh Shah } 126084c124daSDivyesh Shah 126184c124daSDivyesh Shah static inline uint64_t rq_start_time_ns(struct request *req) 126284c124daSDivyesh Shah { 126384c124daSDivyesh Shah return req->start_time_ns; 126484c124daSDivyesh Shah } 126584c124daSDivyesh Shah 126684c124daSDivyesh Shah static inline uint64_t rq_io_start_time_ns(struct request *req) 126784c124daSDivyesh Shah { 126884c124daSDivyesh Shah return req->io_start_time_ns; 126984c124daSDivyesh Shah } 12709195291eSDivyesh Shah #else 12719195291eSDivyesh Shah static inline void set_start_time_ns(struct request *req) {} 12729195291eSDivyesh Shah static inline void set_io_start_time_ns(struct request *req) {} 127384c124daSDivyesh Shah static inline uint64_t rq_start_time_ns(struct request *req) 127484c124daSDivyesh Shah { 127584c124daSDivyesh Shah return 0; 127684c124daSDivyesh Shah } 127784c124daSDivyesh Shah static inline uint64_t rq_io_start_time_ns(struct request *req) 127884c124daSDivyesh Shah { 127984c124daSDivyesh Shah return 0; 128084c124daSDivyesh Shah } 12819195291eSDivyesh Shah #endif 12829195291eSDivyesh Shah 12831da177e4SLinus Torvalds #define MODULE_ALIAS_BLOCKDEV(major,minor) \ 12841da177e4SLinus Torvalds MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 12851da177e4SLinus Torvalds #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 12861da177e4SLinus Torvalds MODULE_ALIAS("block-major-" __stringify(major) "-*") 12871da177e4SLinus Torvalds 12887ba1ba12SMartin K. Petersen #if defined(CONFIG_BLK_DEV_INTEGRITY) 12897ba1ba12SMartin K. Petersen 1290b24498d4SJens Axboe #define INTEGRITY_FLAG_READ 2 /* verify data integrity on read */ 1291b24498d4SJens Axboe #define INTEGRITY_FLAG_WRITE 4 /* generate data integrity on write */ 12927ba1ba12SMartin K. Petersen 12937ba1ba12SMartin K. Petersen struct blk_integrity_exchg { 12947ba1ba12SMartin K. Petersen void *prot_buf; 12957ba1ba12SMartin K. Petersen void *data_buf; 12967ba1ba12SMartin K. Petersen sector_t sector; 12977ba1ba12SMartin K. Petersen unsigned int data_size; 12987ba1ba12SMartin K. Petersen unsigned short sector_size; 12997ba1ba12SMartin K. Petersen const char *disk_name; 13007ba1ba12SMartin K. Petersen }; 13017ba1ba12SMartin K. Petersen 13027ba1ba12SMartin K. Petersen typedef void (integrity_gen_fn) (struct blk_integrity_exchg *); 13037ba1ba12SMartin K. Petersen typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *); 13047ba1ba12SMartin K. Petersen typedef void (integrity_set_tag_fn) (void *, void *, unsigned int); 13057ba1ba12SMartin K. Petersen typedef void (integrity_get_tag_fn) (void *, void *, unsigned int); 13067ba1ba12SMartin K. Petersen 13077ba1ba12SMartin K. Petersen struct blk_integrity { 13087ba1ba12SMartin K. Petersen integrity_gen_fn *generate_fn; 13097ba1ba12SMartin K. Petersen integrity_vrfy_fn *verify_fn; 13107ba1ba12SMartin K. Petersen integrity_set_tag_fn *set_tag_fn; 13117ba1ba12SMartin K. Petersen integrity_get_tag_fn *get_tag_fn; 13127ba1ba12SMartin K. Petersen 13137ba1ba12SMartin K. Petersen unsigned short flags; 13147ba1ba12SMartin K. Petersen unsigned short tuple_size; 13157ba1ba12SMartin K. Petersen unsigned short sector_size; 13167ba1ba12SMartin K. Petersen unsigned short tag_size; 13177ba1ba12SMartin K. Petersen 13187ba1ba12SMartin K. Petersen const char *name; 13197ba1ba12SMartin K. Petersen 13207ba1ba12SMartin K. Petersen struct kobject kobj; 13217ba1ba12SMartin K. Petersen }; 13227ba1ba12SMartin K. Petersen 1323a63a5cf8SMike Snitzer extern bool blk_integrity_is_initialized(struct gendisk *); 13247ba1ba12SMartin K. Petersen extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); 13257ba1ba12SMartin K. Petersen extern void blk_integrity_unregister(struct gendisk *); 1326ad7fce93SMartin K. Petersen extern int blk_integrity_compare(struct gendisk *, struct gendisk *); 132713f05c8dSMartin K. Petersen extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, 132813f05c8dSMartin K. Petersen struct scatterlist *); 132913f05c8dSMartin K. Petersen extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); 133013f05c8dSMartin K. Petersen extern int blk_integrity_merge_rq(struct request_queue *, struct request *, 133113f05c8dSMartin K. Petersen struct request *); 133213f05c8dSMartin K. Petersen extern int blk_integrity_merge_bio(struct request_queue *, struct request *, 133313f05c8dSMartin K. Petersen struct bio *); 13347ba1ba12SMartin K. Petersen 1335b04accc4SJens Axboe static inline 1336b04accc4SJens Axboe struct blk_integrity *bdev_get_integrity(struct block_device *bdev) 1337b04accc4SJens Axboe { 1338b04accc4SJens Axboe return bdev->bd_disk->integrity; 1339b04accc4SJens Axboe } 1340b04accc4SJens Axboe 1341b02739b0SMartin K. Petersen static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 1342b02739b0SMartin K. Petersen { 1343b02739b0SMartin K. Petersen return disk->integrity; 1344b02739b0SMartin K. Petersen } 1345b02739b0SMartin K. Petersen 13467ba1ba12SMartin K. Petersen static inline int blk_integrity_rq(struct request *rq) 13477ba1ba12SMartin K. Petersen { 1348d442cc44SMartin K. Petersen if (rq->bio == NULL) 1349d442cc44SMartin K. Petersen return 0; 1350d442cc44SMartin K. Petersen 13517ba1ba12SMartin K. Petersen return bio_integrity(rq->bio); 13527ba1ba12SMartin K. Petersen } 13537ba1ba12SMartin K. Petersen 135413f05c8dSMartin K. Petersen static inline void blk_queue_max_integrity_segments(struct request_queue *q, 135513f05c8dSMartin K. Petersen unsigned int segs) 135613f05c8dSMartin K. Petersen { 135713f05c8dSMartin K. Petersen q->limits.max_integrity_segments = segs; 135813f05c8dSMartin K. Petersen } 135913f05c8dSMartin K. Petersen 136013f05c8dSMartin K. Petersen static inline unsigned short 136113f05c8dSMartin K. Petersen queue_max_integrity_segments(struct request_queue *q) 136213f05c8dSMartin K. Petersen { 136313f05c8dSMartin K. Petersen return q->limits.max_integrity_segments; 136413f05c8dSMartin K. Petersen } 136513f05c8dSMartin K. Petersen 13667ba1ba12SMartin K. Petersen #else /* CONFIG_BLK_DEV_INTEGRITY */ 13677ba1ba12SMartin K. Petersen 1368fd83240aSStephen Rothwell struct bio; 1369fd83240aSStephen Rothwell struct block_device; 1370fd83240aSStephen Rothwell struct gendisk; 1371fd83240aSStephen Rothwell struct blk_integrity; 1372fd83240aSStephen Rothwell 1373fd83240aSStephen Rothwell static inline int blk_integrity_rq(struct request *rq) 1374fd83240aSStephen Rothwell { 1375fd83240aSStephen Rothwell return 0; 1376fd83240aSStephen Rothwell } 1377fd83240aSStephen Rothwell static inline int blk_rq_count_integrity_sg(struct request_queue *q, 1378fd83240aSStephen Rothwell struct bio *b) 1379fd83240aSStephen Rothwell { 1380fd83240aSStephen Rothwell return 0; 1381fd83240aSStephen Rothwell } 1382fd83240aSStephen Rothwell static inline int blk_rq_map_integrity_sg(struct request_queue *q, 1383fd83240aSStephen Rothwell struct bio *b, 1384fd83240aSStephen Rothwell struct scatterlist *s) 1385fd83240aSStephen Rothwell { 1386fd83240aSStephen Rothwell return 0; 1387fd83240aSStephen Rothwell } 1388fd83240aSStephen Rothwell static inline struct blk_integrity *bdev_get_integrity(struct block_device *b) 1389fd83240aSStephen Rothwell { 1390fd83240aSStephen Rothwell return 0; 1391fd83240aSStephen Rothwell } 1392fd83240aSStephen Rothwell static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 1393fd83240aSStephen Rothwell { 1394fd83240aSStephen Rothwell return NULL; 1395fd83240aSStephen Rothwell } 1396fd83240aSStephen Rothwell static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b) 1397fd83240aSStephen Rothwell { 1398fd83240aSStephen Rothwell return 0; 1399fd83240aSStephen Rothwell } 1400fd83240aSStephen Rothwell static inline int blk_integrity_register(struct gendisk *d, 1401fd83240aSStephen Rothwell struct blk_integrity *b) 1402fd83240aSStephen Rothwell { 1403fd83240aSStephen Rothwell return 0; 1404fd83240aSStephen Rothwell } 1405fd83240aSStephen Rothwell static inline void blk_integrity_unregister(struct gendisk *d) 1406fd83240aSStephen Rothwell { 1407fd83240aSStephen Rothwell } 1408fd83240aSStephen Rothwell static inline void blk_queue_max_integrity_segments(struct request_queue *q, 1409fd83240aSStephen Rothwell unsigned int segs) 1410fd83240aSStephen Rothwell { 1411fd83240aSStephen Rothwell } 1412fd83240aSStephen Rothwell static inline unsigned short queue_max_integrity_segments(struct request_queue *q) 1413fd83240aSStephen Rothwell { 1414fd83240aSStephen Rothwell return 0; 1415fd83240aSStephen Rothwell } 1416fd83240aSStephen Rothwell static inline int blk_integrity_merge_rq(struct request_queue *rq, 1417fd83240aSStephen Rothwell struct request *r1, 1418fd83240aSStephen Rothwell struct request *r2) 1419fd83240aSStephen Rothwell { 1420fd83240aSStephen Rothwell return 0; 1421fd83240aSStephen Rothwell } 1422fd83240aSStephen Rothwell static inline int blk_integrity_merge_bio(struct request_queue *rq, 1423fd83240aSStephen Rothwell struct request *r, 1424fd83240aSStephen Rothwell struct bio *b) 1425fd83240aSStephen Rothwell { 1426fd83240aSStephen Rothwell return 0; 1427fd83240aSStephen Rothwell } 1428fd83240aSStephen Rothwell static inline bool blk_integrity_is_initialized(struct gendisk *g) 1429fd83240aSStephen Rothwell { 1430fd83240aSStephen Rothwell return 0; 1431fd83240aSStephen Rothwell } 14327ba1ba12SMartin K. Petersen 14337ba1ba12SMartin K. Petersen #endif /* CONFIG_BLK_DEV_INTEGRITY */ 14347ba1ba12SMartin K. Petersen 143508f85851SAl Viro struct block_device_operations { 1436d4430d62SAl Viro int (*open) (struct block_device *, fmode_t); 1437d4430d62SAl Viro int (*release) (struct gendisk *, fmode_t); 1438d4430d62SAl Viro int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1439d4430d62SAl Viro int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 144008f85851SAl Viro int (*direct_access) (struct block_device *, sector_t, 144108f85851SAl Viro void **, unsigned long *); 144277ea887eSTejun Heo unsigned int (*check_events) (struct gendisk *disk, 144377ea887eSTejun Heo unsigned int clearing); 144477ea887eSTejun Heo /* ->media_changed() is DEPRECATED, use ->check_events() instead */ 144508f85851SAl Viro int (*media_changed) (struct gendisk *); 1446c3e33e04STejun Heo void (*unlock_native_capacity) (struct gendisk *); 144708f85851SAl Viro int (*revalidate_disk) (struct gendisk *); 144808f85851SAl Viro int (*getgeo)(struct block_device *, struct hd_geometry *); 1449b3a27d05SNitin Gupta /* this callback is with swap_lock and sometimes page table lock held */ 1450b3a27d05SNitin Gupta void (*swap_slot_free_notify) (struct block_device *, unsigned long); 145108f85851SAl Viro struct module *owner; 145208f85851SAl Viro }; 145308f85851SAl Viro 1454633a08b8SAl Viro extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, 1455633a08b8SAl Viro unsigned long); 14569361401eSDavid Howells #else /* CONFIG_BLOCK */ 14579361401eSDavid Howells /* 14589361401eSDavid Howells * stubs for when the block layer is configured out 14599361401eSDavid Howells */ 14609361401eSDavid Howells #define buffer_heads_over_limit 0 14619361401eSDavid Howells 14629361401eSDavid Howells static inline long nr_blockdev_pages(void) 14639361401eSDavid Howells { 14649361401eSDavid Howells return 0; 14659361401eSDavid Howells } 14669361401eSDavid Howells 14671f940bdfSJens Axboe struct blk_plug { 14681f940bdfSJens Axboe }; 14691f940bdfSJens Axboe 14701f940bdfSJens Axboe static inline void blk_start_plug(struct blk_plug *plug) 147173c10101SJens Axboe { 147273c10101SJens Axboe } 147373c10101SJens Axboe 14741f940bdfSJens Axboe static inline void blk_finish_plug(struct blk_plug *plug) 147573c10101SJens Axboe { 147673c10101SJens Axboe } 147773c10101SJens Axboe 14781f940bdfSJens Axboe static inline void blk_flush_plug(struct task_struct *task) 147973c10101SJens Axboe { 148073c10101SJens Axboe } 148173c10101SJens Axboe 1482a237c1c5SJens Axboe static inline void blk_schedule_flush_plug(struct task_struct *task) 1483a237c1c5SJens Axboe { 1484a237c1c5SJens Axboe } 1485a237c1c5SJens Axboe 1486a237c1c5SJens Axboe 148773c10101SJens Axboe static inline bool blk_needs_flush_plug(struct task_struct *tsk) 148873c10101SJens Axboe { 148973c10101SJens Axboe return false; 149073c10101SJens Axboe } 149173c10101SJens Axboe 14929361401eSDavid Howells #endif /* CONFIG_BLOCK */ 14939361401eSDavid Howells 14941da177e4SLinus Torvalds #endif 1495