xref: /linux-6.15/include/linux/blkdev.h (revision 72d4cd9f)
11da177e4SLinus Torvalds #ifndef _LINUX_BLKDEV_H
21da177e4SLinus Torvalds #define _LINUX_BLKDEV_H
31da177e4SLinus Torvalds 
4f5ff8422SJens Axboe #ifdef CONFIG_BLOCK
5f5ff8422SJens Axboe 
6bcfd8d36SAndrew Morton #include <linux/sched.h>
71da177e4SLinus Torvalds #include <linux/major.h>
81da177e4SLinus Torvalds #include <linux/genhd.h>
91da177e4SLinus Torvalds #include <linux/list.h>
101da177e4SLinus Torvalds #include <linux/timer.h>
111da177e4SLinus Torvalds #include <linux/workqueue.h>
121da177e4SLinus Torvalds #include <linux/pagemap.h>
131da177e4SLinus Torvalds #include <linux/backing-dev.h>
141da177e4SLinus Torvalds #include <linux/wait.h>
151da177e4SLinus Torvalds #include <linux/mempool.h>
161da177e4SLinus Torvalds #include <linux/bio.h>
171da177e4SLinus Torvalds #include <linux/module.h>
181da177e4SLinus Torvalds #include <linux/stringify.h>
193e6053d7SHugh Dickins #include <linux/gfp.h>
20d351af01SFUJITA Tomonori #include <linux/bsg.h>
21c7c22e4dSJens Axboe #include <linux/smp.h>
221da177e4SLinus Torvalds 
231da177e4SLinus Torvalds #include <asm/scatterlist.h>
241da177e4SLinus Torvalds 
2521b2f0c8SChristoph Hellwig struct scsi_ioctl_command;
2621b2f0c8SChristoph Hellwig 
271da177e4SLinus Torvalds struct request_queue;
281da177e4SLinus Torvalds struct elevator_queue;
291da177e4SLinus Torvalds struct request_pm_state;
302056a782SJens Axboe struct blk_trace;
313d6392cfSJens Axboe struct request;
323d6392cfSJens Axboe struct sg_io_hdr;
331da177e4SLinus Torvalds 
341da177e4SLinus Torvalds #define BLKDEV_MIN_RQ	4
351da177e4SLinus Torvalds #define BLKDEV_MAX_RQ	128	/* Default maximum */
361da177e4SLinus Torvalds 
371da177e4SLinus Torvalds struct request;
388ffdc655STejun Heo typedef void (rq_end_io_fn)(struct request *, int);
391da177e4SLinus Torvalds 
401da177e4SLinus Torvalds struct request_list {
411faa16d2SJens Axboe 	/*
421faa16d2SJens Axboe 	 * count[], starved[], and wait[] are indexed by
431faa16d2SJens Axboe 	 * BLK_RW_SYNC/BLK_RW_ASYNC
441faa16d2SJens Axboe 	 */
451da177e4SLinus Torvalds 	int count[2];
461da177e4SLinus Torvalds 	int starved[2];
47cb98fc8bSTejun Heo 	int elvpriv;
481da177e4SLinus Torvalds 	mempool_t *rq_pool;
491da177e4SLinus Torvalds 	wait_queue_head_t wait[2];
501da177e4SLinus Torvalds };
511da177e4SLinus Torvalds 
524aff5e23SJens Axboe /*
534aff5e23SJens Axboe  * request command types
544aff5e23SJens Axboe  */
554aff5e23SJens Axboe enum rq_cmd_type_bits {
564aff5e23SJens Axboe 	REQ_TYPE_FS		= 1,	/* fs request */
574aff5e23SJens Axboe 	REQ_TYPE_BLOCK_PC,		/* scsi command */
584aff5e23SJens Axboe 	REQ_TYPE_SENSE,			/* sense request */
594aff5e23SJens Axboe 	REQ_TYPE_PM_SUSPEND,		/* suspend request */
604aff5e23SJens Axboe 	REQ_TYPE_PM_RESUME,		/* resume request */
614aff5e23SJens Axboe 	REQ_TYPE_PM_SHUTDOWN,		/* shutdown request */
624aff5e23SJens Axboe 	REQ_TYPE_SPECIAL,		/* driver defined type */
634aff5e23SJens Axboe 	/*
644aff5e23SJens Axboe 	 * for ATA/ATAPI devices. this really doesn't belong here, ide should
654aff5e23SJens Axboe 	 * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver
664aff5e23SJens Axboe 	 * private REQ_LB opcodes to differentiate what type of request this is
674aff5e23SJens Axboe 	 */
684aff5e23SJens Axboe 	REQ_TYPE_ATA_TASKFILE,
69cea2885aSJens Axboe 	REQ_TYPE_ATA_PC,
704aff5e23SJens Axboe };
714aff5e23SJens Axboe 
721da177e4SLinus Torvalds #define BLK_MAX_CDB	16
731da177e4SLinus Torvalds 
741da177e4SLinus Torvalds /*
7563a71386SJens Axboe  * try to put the fields that are referenced together in the same cacheline.
7663a71386SJens Axboe  * if you modify this structure, be sure to check block/blk-core.c:rq_init()
7763a71386SJens Axboe  * as well!
781da177e4SLinus Torvalds  */
791da177e4SLinus Torvalds struct request {
80ff856badSJens Axboe 	struct list_head queuelist;
81c7c22e4dSJens Axboe 	struct call_single_data csd;
82ff856badSJens Axboe 
83165125e1SJens Axboe 	struct request_queue *q;
84e6a1c874SJens Axboe 
854aff5e23SJens Axboe 	unsigned int cmd_flags;
864aff5e23SJens Axboe 	enum rq_cmd_type_bits cmd_type;
87242f9dcbSJens Axboe 	unsigned long atomic_flags;
881da177e4SLinus Torvalds 
89181fdde3SRichard Kennedy 	int cpu;
90181fdde3SRichard Kennedy 
91a2dec7b3STejun Heo 	/* the following two fields are internal, NEVER access directly */
92a2dec7b3STejun Heo 	unsigned int __data_len;	/* total data len */
93181fdde3SRichard Kennedy 	sector_t __sector;		/* sector cursor */
941da177e4SLinus Torvalds 
951da177e4SLinus Torvalds 	struct bio *bio;
961da177e4SLinus Torvalds 	struct bio *biotail;
971da177e4SLinus Torvalds 
989817064bSJens Axboe 	struct hlist_node hash;	/* merge hash */
99e6a1c874SJens Axboe 	/*
100e6a1c874SJens Axboe 	 * The rb_node is only used inside the io scheduler, requests
101e6a1c874SJens Axboe 	 * are pruned when moved to the dispatch queue. So let the
102e6a1c874SJens Axboe 	 * completion_data share space with the rb_node.
103e6a1c874SJens Axboe 	 */
104e6a1c874SJens Axboe 	union {
1052e662b65SJens Axboe 		struct rb_node rb_node;	/* sort/lookup */
106e6a1c874SJens Axboe 		void *completion_data;
107e6a1c874SJens Axboe 	};
1089817064bSJens Axboe 
109ff7d145fSJens Axboe 	/*
1107f1dc8a2SVivek Goyal 	 * Three pointers are available for the IO schedulers, if they need
111ff7d145fSJens Axboe 	 * more they have to dynamically allocate it.
112ff7d145fSJens Axboe 	 */
1131da177e4SLinus Torvalds 	void *elevator_private;
114ff7d145fSJens Axboe 	void *elevator_private2;
1157f1dc8a2SVivek Goyal 	void *elevator_private3;
116ff7d145fSJens Axboe 
1178f34ee75SJens Axboe 	struct gendisk *rq_disk;
1181da177e4SLinus Torvalds 	unsigned long start_time;
1199195291eSDivyesh Shah #ifdef CONFIG_BLK_CGROUP
1209195291eSDivyesh Shah 	unsigned long long start_time_ns;
1219195291eSDivyesh Shah 	unsigned long long io_start_time_ns;    /* when passed to hardware */
1229195291eSDivyesh Shah #endif
1231da177e4SLinus Torvalds 	/* Number of scatter-gather DMA addr+len pairs after
1241da177e4SLinus Torvalds 	 * physical address coalescing is performed.
1251da177e4SLinus Torvalds 	 */
1261da177e4SLinus Torvalds 	unsigned short nr_phys_segments;
12713f05c8dSMartin K. Petersen #if defined(CONFIG_BLK_DEV_INTEGRITY)
12813f05c8dSMartin K. Petersen 	unsigned short nr_integrity_segments;
12913f05c8dSMartin K. Petersen #endif
1301da177e4SLinus Torvalds 
1318f34ee75SJens Axboe 	unsigned short ioprio;
1328f34ee75SJens Axboe 
133181fdde3SRichard Kennedy 	int ref_count;
134181fdde3SRichard Kennedy 
135731ec497STejun Heo 	void *special;		/* opaque pointer available for LLD use */
136731ec497STejun Heo 	char *buffer;		/* kaddr of the current segment if available */
1371da177e4SLinus Torvalds 
138cdd60262SJens Axboe 	int tag;
139cdd60262SJens Axboe 	int errors;
140cdd60262SJens Axboe 
1411da177e4SLinus Torvalds 	/*
1421da177e4SLinus Torvalds 	 * when request is used as a packet command carrier
1431da177e4SLinus Torvalds 	 */
144d7e3c324SFUJITA Tomonori 	unsigned char __cmd[BLK_MAX_CDB];
145d7e3c324SFUJITA Tomonori 	unsigned char *cmd;
146181fdde3SRichard Kennedy 	unsigned short cmd_len;
1471da177e4SLinus Torvalds 
1487a85f889SFUJITA Tomonori 	unsigned int extra_len;	/* length of alignment and padding */
1491da177e4SLinus Torvalds 	unsigned int sense_len;
150c3a4d78cSTejun Heo 	unsigned int resid_len;	/* residual count */
1511da177e4SLinus Torvalds 	void *sense;
1521da177e4SLinus Torvalds 
153242f9dcbSJens Axboe 	unsigned long deadline;
154242f9dcbSJens Axboe 	struct list_head timeout_list;
1551da177e4SLinus Torvalds 	unsigned int timeout;
15617e01f21SMike Christie 	int retries;
1571da177e4SLinus Torvalds 
1581da177e4SLinus Torvalds 	/*
159c00895abSJens Axboe 	 * completion callback.
1601da177e4SLinus Torvalds 	 */
1611da177e4SLinus Torvalds 	rq_end_io_fn *end_io;
1621da177e4SLinus Torvalds 	void *end_io_data;
163abae1fdeSFUJITA Tomonori 
164abae1fdeSFUJITA Tomonori 	/* for bidi */
165abae1fdeSFUJITA Tomonori 	struct request *next_rq;
1661da177e4SLinus Torvalds };
1671da177e4SLinus Torvalds 
168766ca442SFernando Luis Vázquez Cao static inline unsigned short req_get_ioprio(struct request *req)
169766ca442SFernando Luis Vázquez Cao {
170766ca442SFernando Luis Vázquez Cao 	return req->ioprio;
171766ca442SFernando Luis Vázquez Cao }
172766ca442SFernando Luis Vázquez Cao 
1731da177e4SLinus Torvalds /*
1744aff5e23SJens Axboe  * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME
1751da177e4SLinus Torvalds  * requests. Some step values could eventually be made generic.
1761da177e4SLinus Torvalds  */
1771da177e4SLinus Torvalds struct request_pm_state
1781da177e4SLinus Torvalds {
1791da177e4SLinus Torvalds 	/* PM state machine step value, currently driver specific */
1801da177e4SLinus Torvalds 	int	pm_step;
1811da177e4SLinus Torvalds 	/* requested PM state value (S1, S2, S3, S4, ...) */
1821da177e4SLinus Torvalds 	u32	pm_state;
1831da177e4SLinus Torvalds 	void*	data;		/* for driver use */
1841da177e4SLinus Torvalds };
1851da177e4SLinus Torvalds 
1861da177e4SLinus Torvalds #include <linux/elevator.h>
1871da177e4SLinus Torvalds 
188165125e1SJens Axboe typedef void (request_fn_proc) (struct request_queue *q);
189165125e1SJens Axboe typedef int (make_request_fn) (struct request_queue *q, struct bio *bio);
190165125e1SJens Axboe typedef int (prep_rq_fn) (struct request_queue *, struct request *);
19128018c24SJames Bottomley typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
192165125e1SJens Axboe typedef void (unplug_fn) (struct request_queue *);
1931da177e4SLinus Torvalds 
1941da177e4SLinus Torvalds struct bio_vec;
195cc371e66SAlasdair G Kergon struct bvec_merge_data {
196cc371e66SAlasdair G Kergon 	struct block_device *bi_bdev;
197cc371e66SAlasdair G Kergon 	sector_t bi_sector;
198cc371e66SAlasdair G Kergon 	unsigned bi_size;
199cc371e66SAlasdair G Kergon 	unsigned long bi_rw;
200cc371e66SAlasdair G Kergon };
201cc371e66SAlasdair G Kergon typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *,
202cc371e66SAlasdair G Kergon 			     struct bio_vec *);
203ff856badSJens Axboe typedef void (softirq_done_fn)(struct request *);
2042fb98e84STejun Heo typedef int (dma_drain_needed_fn)(struct request *);
205ef9e3facSKiyoshi Ueda typedef int (lld_busy_fn) (struct request_queue *q);
2061da177e4SLinus Torvalds 
207242f9dcbSJens Axboe enum blk_eh_timer_return {
208242f9dcbSJens Axboe 	BLK_EH_NOT_HANDLED,
209242f9dcbSJens Axboe 	BLK_EH_HANDLED,
210242f9dcbSJens Axboe 	BLK_EH_RESET_TIMER,
211242f9dcbSJens Axboe };
212242f9dcbSJens Axboe 
213242f9dcbSJens Axboe typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *);
214242f9dcbSJens Axboe 
2151da177e4SLinus Torvalds enum blk_queue_state {
2161da177e4SLinus Torvalds 	Queue_down,
2171da177e4SLinus Torvalds 	Queue_up,
2181da177e4SLinus Torvalds };
2191da177e4SLinus Torvalds 
2201da177e4SLinus Torvalds struct blk_queue_tag {
2211da177e4SLinus Torvalds 	struct request **tag_index;	/* map of busy tags */
2221da177e4SLinus Torvalds 	unsigned long *tag_map;		/* bit map of free/busy tags */
2231da177e4SLinus Torvalds 	int busy;			/* current depth */
2241da177e4SLinus Torvalds 	int max_depth;			/* what we will send to device */
225ba025082STejun Heo 	int real_max_depth;		/* what the array can hold */
2261da177e4SLinus Torvalds 	atomic_t refcnt;		/* map can be shared */
2271da177e4SLinus Torvalds };
2281da177e4SLinus Torvalds 
229abf54393SFUJITA Tomonori #define BLK_SCSI_MAX_CMDS	(256)
230abf54393SFUJITA Tomonori #define BLK_SCSI_CMD_PER_LONG	(BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
231abf54393SFUJITA Tomonori 
232025146e1SMartin K. Petersen struct queue_limits {
233025146e1SMartin K. Petersen 	unsigned long		bounce_pfn;
234025146e1SMartin K. Petersen 	unsigned long		seg_boundary_mask;
235025146e1SMartin K. Petersen 
236025146e1SMartin K. Petersen 	unsigned int		max_hw_sectors;
237025146e1SMartin K. Petersen 	unsigned int		max_sectors;
238025146e1SMartin K. Petersen 	unsigned int		max_segment_size;
239c72758f3SMartin K. Petersen 	unsigned int		physical_block_size;
240c72758f3SMartin K. Petersen 	unsigned int		alignment_offset;
241c72758f3SMartin K. Petersen 	unsigned int		io_min;
242c72758f3SMartin K. Petersen 	unsigned int		io_opt;
24367efc925SChristoph Hellwig 	unsigned int		max_discard_sectors;
24486b37281SMartin K. Petersen 	unsigned int		discard_granularity;
24586b37281SMartin K. Petersen 	unsigned int		discard_alignment;
246025146e1SMartin K. Petersen 
247025146e1SMartin K. Petersen 	unsigned short		logical_block_size;
2488a78362cSMartin K. Petersen 	unsigned short		max_segments;
24913f05c8dSMartin K. Petersen 	unsigned short		max_integrity_segments;
250025146e1SMartin K. Petersen 
251c72758f3SMartin K. Petersen 	unsigned char		misaligned;
25286b37281SMartin K. Petersen 	unsigned char		discard_misaligned;
253e692cb66SMartin K. Petersen 	unsigned char		cluster;
25498262f27SMartin K. Petersen 	signed char		discard_zeroes_data;
255025146e1SMartin K. Petersen };
256025146e1SMartin K. Petersen 
2571da177e4SLinus Torvalds struct request_queue
2581da177e4SLinus Torvalds {
2591da177e4SLinus Torvalds 	/*
2601da177e4SLinus Torvalds 	 * Together with queue_head for cacheline sharing
2611da177e4SLinus Torvalds 	 */
2621da177e4SLinus Torvalds 	struct list_head	queue_head;
2631da177e4SLinus Torvalds 	struct request		*last_merge;
264b374d18aSJens Axboe 	struct elevator_queue	*elevator;
2651da177e4SLinus Torvalds 
2661da177e4SLinus Torvalds 	/*
2671da177e4SLinus Torvalds 	 * the queue request freelist, one for reads and one for writes
2681da177e4SLinus Torvalds 	 */
2691da177e4SLinus Torvalds 	struct request_list	rq;
2701da177e4SLinus Torvalds 
2711da177e4SLinus Torvalds 	request_fn_proc		*request_fn;
2721da177e4SLinus Torvalds 	make_request_fn		*make_request_fn;
2731da177e4SLinus Torvalds 	prep_rq_fn		*prep_rq_fn;
27428018c24SJames Bottomley 	unprep_rq_fn		*unprep_rq_fn;
2751da177e4SLinus Torvalds 	unplug_fn		*unplug_fn;
2761da177e4SLinus Torvalds 	merge_bvec_fn		*merge_bvec_fn;
277ff856badSJens Axboe 	softirq_done_fn		*softirq_done_fn;
278242f9dcbSJens Axboe 	rq_timed_out_fn		*rq_timed_out_fn;
2792fb98e84STejun Heo 	dma_drain_needed_fn	*dma_drain_needed;
280ef9e3facSKiyoshi Ueda 	lld_busy_fn		*lld_busy_fn;
2811da177e4SLinus Torvalds 
2821da177e4SLinus Torvalds 	/*
2838922e16cSTejun Heo 	 * Dispatch queue sorting
2848922e16cSTejun Heo 	 */
2851b47f531SJens Axboe 	sector_t		end_sector;
2868922e16cSTejun Heo 	struct request		*boundary_rq;
2878922e16cSTejun Heo 
2888922e16cSTejun Heo 	/*
2891da177e4SLinus Torvalds 	 * Auto-unplugging state
2901da177e4SLinus Torvalds 	 */
2911da177e4SLinus Torvalds 	struct timer_list	unplug_timer;
2921da177e4SLinus Torvalds 	int			unplug_thresh;	/* After this many requests */
2931da177e4SLinus Torvalds 	unsigned long		unplug_delay;	/* After this many jiffies */
2941da177e4SLinus Torvalds 	struct work_struct	unplug_work;
2951da177e4SLinus Torvalds 
2961da177e4SLinus Torvalds 	struct backing_dev_info	backing_dev_info;
2971da177e4SLinus Torvalds 
2981da177e4SLinus Torvalds 	/*
2991da177e4SLinus Torvalds 	 * The queue owner gets to use this for whatever they like.
3001da177e4SLinus Torvalds 	 * ll_rw_blk doesn't touch it.
3011da177e4SLinus Torvalds 	 */
3021da177e4SLinus Torvalds 	void			*queuedata;
3031da177e4SLinus Torvalds 
3041da177e4SLinus Torvalds 	/*
3051da177e4SLinus Torvalds 	 * queue needs bounce pages for pages above this limit
3061da177e4SLinus Torvalds 	 */
3078267e268SAl Viro 	gfp_t			bounce_gfp;
3081da177e4SLinus Torvalds 
3091da177e4SLinus Torvalds 	/*
3101da177e4SLinus Torvalds 	 * various queue flags, see QUEUE_* below
3111da177e4SLinus Torvalds 	 */
3121da177e4SLinus Torvalds 	unsigned long		queue_flags;
3131da177e4SLinus Torvalds 
3141da177e4SLinus Torvalds 	/*
315152587deS 	 * protects queue structures from reentrancy. ->__queue_lock should
316152587deS 	 * _never_ be used directly, it is queue private. always use
317152587deS 	 * ->queue_lock.
3181da177e4SLinus Torvalds 	 */
319152587deS 	spinlock_t		__queue_lock;
3201da177e4SLinus Torvalds 	spinlock_t		*queue_lock;
3211da177e4SLinus Torvalds 
3221da177e4SLinus Torvalds 	/*
3231da177e4SLinus Torvalds 	 * queue kobject
3241da177e4SLinus Torvalds 	 */
3251da177e4SLinus Torvalds 	struct kobject kobj;
3261da177e4SLinus Torvalds 
3271da177e4SLinus Torvalds 	/*
3281da177e4SLinus Torvalds 	 * queue settings
3291da177e4SLinus Torvalds 	 */
3301da177e4SLinus Torvalds 	unsigned long		nr_requests;	/* Max # of requests */
3311da177e4SLinus Torvalds 	unsigned int		nr_congestion_on;
3321da177e4SLinus Torvalds 	unsigned int		nr_congestion_off;
3331da177e4SLinus Torvalds 	unsigned int		nr_batching;
3341da177e4SLinus Torvalds 
335fa0ccd83SJames Bottomley 	void			*dma_drain_buffer;
336fa0ccd83SJames Bottomley 	unsigned int		dma_drain_size;
337e3790c7dSTejun Heo 	unsigned int		dma_pad_mask;
3381da177e4SLinus Torvalds 	unsigned int		dma_alignment;
3391da177e4SLinus Torvalds 
3401da177e4SLinus Torvalds 	struct blk_queue_tag	*queue_tags;
3416eca9004SJens Axboe 	struct list_head	tag_busy_list;
3421da177e4SLinus Torvalds 
34315853af9STejun Heo 	unsigned int		nr_sorted;
3440a7ae2ffSJens Axboe 	unsigned int		in_flight[2];
3451da177e4SLinus Torvalds 
346242f9dcbSJens Axboe 	unsigned int		rq_timeout;
347242f9dcbSJens Axboe 	struct timer_list	timeout;
348242f9dcbSJens Axboe 	struct list_head	timeout_list;
349242f9dcbSJens Axboe 
350025146e1SMartin K. Petersen 	struct queue_limits	limits;
351025146e1SMartin K. Petersen 
3521da177e4SLinus Torvalds 	/*
3531da177e4SLinus Torvalds 	 * sg stuff
3541da177e4SLinus Torvalds 	 */
3551da177e4SLinus Torvalds 	unsigned int		sg_timeout;
3561da177e4SLinus Torvalds 	unsigned int		sg_reserved_size;
3571946089aSChristoph Lameter 	int			node;
3586c5c9341SAlexey Dobriyan #ifdef CONFIG_BLK_DEV_IO_TRACE
3592056a782SJens Axboe 	struct blk_trace	*blk_trace;
3606c5c9341SAlexey Dobriyan #endif
3611da177e4SLinus Torvalds 	/*
3624913efe4STejun Heo 	 * for flush operations
3631da177e4SLinus Torvalds 	 */
3644913efe4STejun Heo 	unsigned int		flush_flags;
365dd4c133fSTejun Heo 	unsigned int		flush_seq;
366dd4c133fSTejun Heo 	int			flush_err;
367dd4c133fSTejun Heo 	struct request		flush_rq;
368dd4c133fSTejun Heo 	struct request		*orig_flush_rq;
369dd4c133fSTejun Heo 	struct list_head	pending_flushes;
370483f4afcSAl Viro 
371483f4afcSAl Viro 	struct mutex		sysfs_lock;
372d351af01SFUJITA Tomonori 
373d351af01SFUJITA Tomonori #if defined(CONFIG_BLK_DEV_BSG)
374d351af01SFUJITA Tomonori 	struct bsg_class_device bsg_dev;
375d351af01SFUJITA Tomonori #endif
376e43473b7SVivek Goyal 
377e43473b7SVivek Goyal #ifdef CONFIG_BLK_DEV_THROTTLING
378e43473b7SVivek Goyal 	/* Throttle data */
379e43473b7SVivek Goyal 	struct throtl_data *td;
380e43473b7SVivek Goyal #endif
3811da177e4SLinus Torvalds };
3821da177e4SLinus Torvalds 
3831da177e4SLinus Torvalds #define QUEUE_FLAG_QUEUED	1	/* uses generic tag queueing */
3841da177e4SLinus Torvalds #define QUEUE_FLAG_STOPPED	2	/* queue is stopped */
3851faa16d2SJens Axboe #define	QUEUE_FLAG_SYNCFULL	3	/* read queue has been filled */
3861faa16d2SJens Axboe #define QUEUE_FLAG_ASYNCFULL	4	/* write queue has been filled */
3871da177e4SLinus Torvalds #define QUEUE_FLAG_DEAD		5	/* queue being torn down */
3881da177e4SLinus Torvalds #define QUEUE_FLAG_REENTER	6	/* Re-entrancy avoidance */
3891da177e4SLinus Torvalds #define QUEUE_FLAG_PLUGGED	7	/* queue is plugged */
39064521d1aSJens Axboe #define QUEUE_FLAG_ELVSWITCH	8	/* don't use elevator, just do FIFO */
391abae1fdeSFUJITA Tomonori #define QUEUE_FLAG_BIDI		9	/* queue supports bidi requests */
392ac9fafa1SAlan D. Brunelle #define QUEUE_FLAG_NOMERGES    10	/* disable merge attempts */
393c7c22e4dSJens Axboe #define QUEUE_FLAG_SAME_COMP   11	/* force complete on same CPU */
394581d4e28SJens Axboe #define QUEUE_FLAG_FAIL_IO     12	/* fake timeout */
3954ee5eaf4SKiyoshi Ueda #define QUEUE_FLAG_STACKABLE   13	/* supports request stacking */
396a68bbddbSJens Axboe #define QUEUE_FLAG_NONROT      14	/* non-rotational device (SSD) */
39788e740f1SFernando Luis Vázquez Cao #define QUEUE_FLAG_VIRT        QUEUE_FLAG_NONROT /* paravirt device */
398bc58ba94SJens Axboe #define QUEUE_FLAG_IO_STAT     15	/* do IO stats */
39979da0644SJens Axboe #define QUEUE_FLAG_DISCARD     16	/* supports DISCARD */
4007f03292eSJens Axboe #define QUEUE_FLAG_NOXMERGES   17	/* No extended merges */
401e2e1a148SJens Axboe #define QUEUE_FLAG_ADD_RANDOM  18	/* Contributes to random pool */
4028d57a98cSAdrian Hunter #define QUEUE_FLAG_SECDISCARD  19	/* supports SECDISCARD */
403bc58ba94SJens Axboe 
404bc58ba94SJens Axboe #define QUEUE_FLAG_DEFAULT	((1 << QUEUE_FLAG_IO_STAT) |		\
40501e97f6bSJens Axboe 				 (1 << QUEUE_FLAG_STACKABLE)	|	\
406e2e1a148SJens Axboe 				 (1 << QUEUE_FLAG_SAME_COMP)	|	\
407e2e1a148SJens Axboe 				 (1 << QUEUE_FLAG_ADD_RANDOM))
408797e7dbbSTejun Heo 
4098f45c1a5SLinus Torvalds static inline int queue_is_locked(struct request_queue *q)
4108f45c1a5SLinus Torvalds {
4117663c1e2SJens Axboe #ifdef CONFIG_SMP
4128f45c1a5SLinus Torvalds 	spinlock_t *lock = q->queue_lock;
4138f45c1a5SLinus Torvalds 	return lock && spin_is_locked(lock);
4147663c1e2SJens Axboe #else
4157663c1e2SJens Axboe 	return 1;
4167663c1e2SJens Axboe #endif
4178f45c1a5SLinus Torvalds }
4188f45c1a5SLinus Torvalds 
41975ad23bcSNick Piggin static inline void queue_flag_set_unlocked(unsigned int flag,
42075ad23bcSNick Piggin 					   struct request_queue *q)
42175ad23bcSNick Piggin {
42275ad23bcSNick Piggin 	__set_bit(flag, &q->queue_flags);
42375ad23bcSNick Piggin }
42475ad23bcSNick Piggin 
425e48ec690SJens Axboe static inline int queue_flag_test_and_clear(unsigned int flag,
426e48ec690SJens Axboe 					    struct request_queue *q)
427e48ec690SJens Axboe {
428e48ec690SJens Axboe 	WARN_ON_ONCE(!queue_is_locked(q));
429e48ec690SJens Axboe 
430e48ec690SJens Axboe 	if (test_bit(flag, &q->queue_flags)) {
431e48ec690SJens Axboe 		__clear_bit(flag, &q->queue_flags);
432e48ec690SJens Axboe 		return 1;
433e48ec690SJens Axboe 	}
434e48ec690SJens Axboe 
435e48ec690SJens Axboe 	return 0;
436e48ec690SJens Axboe }
437e48ec690SJens Axboe 
438e48ec690SJens Axboe static inline int queue_flag_test_and_set(unsigned int flag,
439e48ec690SJens Axboe 					  struct request_queue *q)
440e48ec690SJens Axboe {
441e48ec690SJens Axboe 	WARN_ON_ONCE(!queue_is_locked(q));
442e48ec690SJens Axboe 
443e48ec690SJens Axboe 	if (!test_bit(flag, &q->queue_flags)) {
444e48ec690SJens Axboe 		__set_bit(flag, &q->queue_flags);
445e48ec690SJens Axboe 		return 0;
446e48ec690SJens Axboe 	}
447e48ec690SJens Axboe 
448e48ec690SJens Axboe 	return 1;
449e48ec690SJens Axboe }
450e48ec690SJens Axboe 
45175ad23bcSNick Piggin static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
45275ad23bcSNick Piggin {
4538f45c1a5SLinus Torvalds 	WARN_ON_ONCE(!queue_is_locked(q));
45475ad23bcSNick Piggin 	__set_bit(flag, &q->queue_flags);
45575ad23bcSNick Piggin }
45675ad23bcSNick Piggin 
45775ad23bcSNick Piggin static inline void queue_flag_clear_unlocked(unsigned int flag,
45875ad23bcSNick Piggin 					     struct request_queue *q)
45975ad23bcSNick Piggin {
46075ad23bcSNick Piggin 	__clear_bit(flag, &q->queue_flags);
46175ad23bcSNick Piggin }
46275ad23bcSNick Piggin 
4630a7ae2ffSJens Axboe static inline int queue_in_flight(struct request_queue *q)
4640a7ae2ffSJens Axboe {
4650a7ae2ffSJens Axboe 	return q->in_flight[0] + q->in_flight[1];
4660a7ae2ffSJens Axboe }
4670a7ae2ffSJens Axboe 
46875ad23bcSNick Piggin static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
46975ad23bcSNick Piggin {
4708f45c1a5SLinus Torvalds 	WARN_ON_ONCE(!queue_is_locked(q));
47175ad23bcSNick Piggin 	__clear_bit(flag, &q->queue_flags);
47275ad23bcSNick Piggin }
47375ad23bcSNick Piggin 
4741da177e4SLinus Torvalds #define blk_queue_plugged(q)	test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
4751da177e4SLinus Torvalds #define blk_queue_tagged(q)	test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
4761da177e4SLinus Torvalds #define blk_queue_stopped(q)	test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
477ac9fafa1SAlan D. Brunelle #define blk_queue_nomerges(q)	test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
478488991e2SAlan D. Brunelle #define blk_queue_noxmerges(q)	\
479488991e2SAlan D. Brunelle 	test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
480a68bbddbSJens Axboe #define blk_queue_nonrot(q)	test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
481bc58ba94SJens Axboe #define blk_queue_io_stat(q)	test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
482e2e1a148SJens Axboe #define blk_queue_add_random(q)	test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
4834ee5eaf4SKiyoshi Ueda #define blk_queue_stackable(q)	\
4844ee5eaf4SKiyoshi Ueda 	test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
485c15227deSChristoph Hellwig #define blk_queue_discard(q)	test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
4868d57a98cSAdrian Hunter #define blk_queue_secdiscard(q)	(blk_queue_discard(q) && \
4878d57a98cSAdrian Hunter 	test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags))
4881da177e4SLinus Torvalds 
48933659ebbSChristoph Hellwig #define blk_noretry_request(rq) \
49033659ebbSChristoph Hellwig 	((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
49133659ebbSChristoph Hellwig 			     REQ_FAILFAST_DRIVER))
4924aff5e23SJens Axboe 
49333659ebbSChristoph Hellwig #define blk_account_rq(rq) \
49433659ebbSChristoph Hellwig 	(((rq)->cmd_flags & REQ_STARTED) && \
49533659ebbSChristoph Hellwig 	 ((rq)->cmd_type == REQ_TYPE_FS || \
49633659ebbSChristoph Hellwig 	  ((rq)->cmd_flags & REQ_DISCARD)))
4971da177e4SLinus Torvalds 
4981da177e4SLinus Torvalds #define blk_pm_request(rq)	\
49933659ebbSChristoph Hellwig 	((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \
50033659ebbSChristoph Hellwig 	 (rq)->cmd_type == REQ_TYPE_PM_RESUME)
5011da177e4SLinus Torvalds 
502ab780f1eSJens Axboe #define blk_rq_cpu_valid(rq)	((rq)->cpu != -1)
503abae1fdeSFUJITA Tomonori #define blk_bidi_rq(rq)		((rq)->next_rq != NULL)
504336cdb40SKiyoshi Ueda /* rq->queuelist of dequeued request must be list_empty() */
505336cdb40SKiyoshi Ueda #define blk_queued_rq(rq)	(!list_empty(&(rq)->queuelist))
5061da177e4SLinus Torvalds 
5071da177e4SLinus Torvalds #define list_entry_rq(ptr)	list_entry((ptr), struct request, queuelist)
5081da177e4SLinus Torvalds 
5094aff5e23SJens Axboe #define rq_data_dir(rq)		((rq)->cmd_flags & 1)
5101da177e4SLinus Torvalds 
511e692cb66SMartin K. Petersen static inline unsigned int blk_queue_cluster(struct request_queue *q)
512e692cb66SMartin K. Petersen {
513e692cb66SMartin K. Petersen 	return q->limits.cluster;
514e692cb66SMartin K. Petersen }
515e692cb66SMartin K. Petersen 
5169e2585a8SJens Axboe /*
5171faa16d2SJens Axboe  * We regard a request as sync, if either a read or a sync write
5189e2585a8SJens Axboe  */
5191faa16d2SJens Axboe static inline bool rw_is_sync(unsigned int rw_flags)
5201faa16d2SJens Axboe {
5217b6d91daSChristoph Hellwig 	return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC);
5221faa16d2SJens Axboe }
5231faa16d2SJens Axboe 
5241faa16d2SJens Axboe static inline bool rq_is_sync(struct request *rq)
5251faa16d2SJens Axboe {
5261faa16d2SJens Axboe 	return rw_is_sync(rq->cmd_flags);
5271faa16d2SJens Axboe }
5281faa16d2SJens Axboe 
5291faa16d2SJens Axboe static inline int blk_queue_full(struct request_queue *q, int sync)
5301da177e4SLinus Torvalds {
5311faa16d2SJens Axboe 	if (sync)
5321faa16d2SJens Axboe 		return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags);
5331faa16d2SJens Axboe 	return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags);
5341da177e4SLinus Torvalds }
5351da177e4SLinus Torvalds 
5361faa16d2SJens Axboe static inline void blk_set_queue_full(struct request_queue *q, int sync)
5371da177e4SLinus Torvalds {
5381faa16d2SJens Axboe 	if (sync)
5391faa16d2SJens Axboe 		queue_flag_set(QUEUE_FLAG_SYNCFULL, q);
5401da177e4SLinus Torvalds 	else
5411faa16d2SJens Axboe 		queue_flag_set(QUEUE_FLAG_ASYNCFULL, q);
5421da177e4SLinus Torvalds }
5431da177e4SLinus Torvalds 
5441faa16d2SJens Axboe static inline void blk_clear_queue_full(struct request_queue *q, int sync)
5451da177e4SLinus Torvalds {
5461faa16d2SJens Axboe 	if (sync)
5471faa16d2SJens Axboe 		queue_flag_clear(QUEUE_FLAG_SYNCFULL, q);
5481da177e4SLinus Torvalds 	else
5491faa16d2SJens Axboe 		queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q);
5501da177e4SLinus Torvalds }
5511da177e4SLinus Torvalds 
5521da177e4SLinus Torvalds 
5531da177e4SLinus Torvalds /*
5541da177e4SLinus Torvalds  * mergeable request must not have _NOMERGE or _BARRIER bit set, nor may
5551da177e4SLinus Torvalds  * it already be started by driver.
5561da177e4SLinus Torvalds  */
5571da177e4SLinus Torvalds #define RQ_NOMERGE_FLAGS	\
55802e031cbSChristoph Hellwig 	(REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA)
5591da177e4SLinus Torvalds #define rq_mergeable(rq)	\
560e17fc0a1SDavid Woodhouse 	(!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \
56133659ebbSChristoph Hellwig 	 (((rq)->cmd_flags & REQ_DISCARD) || \
56233659ebbSChristoph Hellwig 	  (rq)->cmd_type == REQ_TYPE_FS))
5631da177e4SLinus Torvalds 
5641da177e4SLinus Torvalds /*
5651da177e4SLinus Torvalds  * q->prep_rq_fn return values
5661da177e4SLinus Torvalds  */
5671da177e4SLinus Torvalds #define BLKPREP_OK		0	/* serve it */
5681da177e4SLinus Torvalds #define BLKPREP_KILL		1	/* fatal error, kill */
5691da177e4SLinus Torvalds #define BLKPREP_DEFER		2	/* leave on queue */
5701da177e4SLinus Torvalds 
5711da177e4SLinus Torvalds extern unsigned long blk_max_low_pfn, blk_max_pfn;
5721da177e4SLinus Torvalds 
5731da177e4SLinus Torvalds /*
5741da177e4SLinus Torvalds  * standard bounce addresses:
5751da177e4SLinus Torvalds  *
5761da177e4SLinus Torvalds  * BLK_BOUNCE_HIGH	: bounce all highmem pages
5771da177e4SLinus Torvalds  * BLK_BOUNCE_ANY	: don't bounce anything
5781da177e4SLinus Torvalds  * BLK_BOUNCE_ISA	: bounce pages above ISA DMA boundary
5791da177e4SLinus Torvalds  */
5802472892aSAndi Kleen 
5812472892aSAndi Kleen #if BITS_PER_LONG == 32
5821da177e4SLinus Torvalds #define BLK_BOUNCE_HIGH		((u64)blk_max_low_pfn << PAGE_SHIFT)
5832472892aSAndi Kleen #else
5842472892aSAndi Kleen #define BLK_BOUNCE_HIGH		-1ULL
5852472892aSAndi Kleen #endif
5862472892aSAndi Kleen #define BLK_BOUNCE_ANY		(-1ULL)
587bfe17231SFUJITA Tomonori #define BLK_BOUNCE_ISA		(DMA_BIT_MASK(24))
5881da177e4SLinus Torvalds 
5893d6392cfSJens Axboe /*
5903d6392cfSJens Axboe  * default timeout for SG_IO if none specified
5913d6392cfSJens Axboe  */
5923d6392cfSJens Axboe #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
593f2f1fa78SLinus Torvalds #define BLK_MIN_SG_TIMEOUT	(7 * HZ)
5943d6392cfSJens Axboe 
5952a7326b5SChristoph Lameter #ifdef CONFIG_BOUNCE
5961da177e4SLinus Torvalds extern int init_emergency_isa_pool(void);
597165125e1SJens Axboe extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
5981da177e4SLinus Torvalds #else
5991da177e4SLinus Torvalds static inline int init_emergency_isa_pool(void)
6001da177e4SLinus Torvalds {
6011da177e4SLinus Torvalds 	return 0;
6021da177e4SLinus Torvalds }
603165125e1SJens Axboe static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
6041da177e4SLinus Torvalds {
6051da177e4SLinus Torvalds }
6061da177e4SLinus Torvalds #endif /* CONFIG_MMU */
6071da177e4SLinus Torvalds 
608152e283fSFUJITA Tomonori struct rq_map_data {
609152e283fSFUJITA Tomonori 	struct page **pages;
610152e283fSFUJITA Tomonori 	int page_order;
611152e283fSFUJITA Tomonori 	int nr_entries;
61256c451f4SFUJITA Tomonori 	unsigned long offset;
61397ae77a1SFUJITA Tomonori 	int null_mapped;
614ecb554a8SFUJITA Tomonori 	int from_user;
615152e283fSFUJITA Tomonori };
616152e283fSFUJITA Tomonori 
6175705f702SNeilBrown struct req_iterator {
6185705f702SNeilBrown 	int i;
6195705f702SNeilBrown 	struct bio *bio;
6205705f702SNeilBrown };
6215705f702SNeilBrown 
6225705f702SNeilBrown /* This should not be used directly - use rq_for_each_segment */
6231e428079SJens Axboe #define for_each_bio(_bio)		\
6241e428079SJens Axboe 	for (; _bio; _bio = _bio->bi_next)
6255705f702SNeilBrown #define __rq_for_each_bio(_bio, rq)	\
6261da177e4SLinus Torvalds 	if ((rq->bio))			\
6271da177e4SLinus Torvalds 		for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
6281da177e4SLinus Torvalds 
6295705f702SNeilBrown #define rq_for_each_segment(bvl, _rq, _iter)			\
6305705f702SNeilBrown 	__rq_for_each_bio(_iter.bio, _rq)			\
6315705f702SNeilBrown 		bio_for_each_segment(bvl, _iter.bio, _iter.i)
6325705f702SNeilBrown 
6335705f702SNeilBrown #define rq_iter_last(rq, _iter)					\
6345705f702SNeilBrown 		(_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1)
6355705f702SNeilBrown 
6362d4dc890SIlya Loginov #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
6372d4dc890SIlya Loginov # error	"You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
6382d4dc890SIlya Loginov #endif
6392d4dc890SIlya Loginov #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
6402d4dc890SIlya Loginov extern void rq_flush_dcache_pages(struct request *rq);
6412d4dc890SIlya Loginov #else
6422d4dc890SIlya Loginov static inline void rq_flush_dcache_pages(struct request *rq)
6432d4dc890SIlya Loginov {
6442d4dc890SIlya Loginov }
6452d4dc890SIlya Loginov #endif
6462d4dc890SIlya Loginov 
6471da177e4SLinus Torvalds extern int blk_register_queue(struct gendisk *disk);
6481da177e4SLinus Torvalds extern void blk_unregister_queue(struct gendisk *disk);
6491da177e4SLinus Torvalds extern void register_disk(struct gendisk *dev);
6501da177e4SLinus Torvalds extern void generic_make_request(struct bio *bio);
6512a4aa30cSFUJITA Tomonori extern void blk_rq_init(struct request_queue *q, struct request *rq);
6521da177e4SLinus Torvalds extern void blk_put_request(struct request *);
653165125e1SJens Axboe extern void __blk_put_request(struct request_queue *, struct request *);
654165125e1SJens Axboe extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
65579eb63e9SBoaz Harrosh extern struct request *blk_make_request(struct request_queue *, struct bio *,
65679eb63e9SBoaz Harrosh 					gfp_t);
657165125e1SJens Axboe extern void blk_insert_request(struct request_queue *, struct request *, int, void *);
658165125e1SJens Axboe extern void blk_requeue_request(struct request_queue *, struct request *);
65966ac0280SChristoph Hellwig extern void blk_add_request_payload(struct request *rq, struct page *page,
66066ac0280SChristoph Hellwig 		unsigned int len);
66182124d60SKiyoshi Ueda extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
662ef9e3facSKiyoshi Ueda extern int blk_lld_busy(struct request_queue *q);
663b0fd271dSKiyoshi Ueda extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
664b0fd271dSKiyoshi Ueda 			     struct bio_set *bs, gfp_t gfp_mask,
665b0fd271dSKiyoshi Ueda 			     int (*bio_ctr)(struct bio *, struct bio *, void *),
666b0fd271dSKiyoshi Ueda 			     void *data);
667b0fd271dSKiyoshi Ueda extern void blk_rq_unprep_clone(struct request *rq);
66882124d60SKiyoshi Ueda extern int blk_insert_cloned_request(struct request_queue *q,
66982124d60SKiyoshi Ueda 				     struct request *rq);
670165125e1SJens Axboe extern void blk_plug_device(struct request_queue *);
6716c5e0c4dSJens Axboe extern void blk_plug_device_unlocked(struct request_queue *);
672165125e1SJens Axboe extern int blk_remove_plug(struct request_queue *);
673165125e1SJens Axboe extern void blk_recount_segments(struct request_queue *, struct bio *);
67474f3c8afSAl Viro extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
67574f3c8afSAl Viro 			  unsigned int, void __user *);
676e915e872SAl Viro extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
677e915e872SAl Viro 			 struct scsi_ioctl_command __user *);
6783fcfab16SAndrew Morton 
6793fcfab16SAndrew Morton /*
6803fcfab16SAndrew Morton  * A queue has just exitted congestion.  Note this in the global counter of
6813fcfab16SAndrew Morton  * congested queues, and wake up anyone who was waiting for requests to be
6823fcfab16SAndrew Morton  * put back.
6833fcfab16SAndrew Morton  */
6848aa7e847SJens Axboe static inline void blk_clear_queue_congested(struct request_queue *q, int sync)
6853fcfab16SAndrew Morton {
6868aa7e847SJens Axboe 	clear_bdi_congested(&q->backing_dev_info, sync);
6873fcfab16SAndrew Morton }
6883fcfab16SAndrew Morton 
6893fcfab16SAndrew Morton /*
6903fcfab16SAndrew Morton  * A queue has just entered congestion.  Flag that in the queue's VM-visible
6913fcfab16SAndrew Morton  * state flags and increment the global gounter of congested queues.
6923fcfab16SAndrew Morton  */
6938aa7e847SJens Axboe static inline void blk_set_queue_congested(struct request_queue *q, int sync)
6943fcfab16SAndrew Morton {
6958aa7e847SJens Axboe 	set_bdi_congested(&q->backing_dev_info, sync);
6963fcfab16SAndrew Morton }
6973fcfab16SAndrew Morton 
698165125e1SJens Axboe extern void blk_start_queue(struct request_queue *q);
699165125e1SJens Axboe extern void blk_stop_queue(struct request_queue *q);
7001da177e4SLinus Torvalds extern void blk_sync_queue(struct request_queue *q);
701165125e1SJens Axboe extern void __blk_stop_queue(struct request_queue *q);
70275ad23bcSNick Piggin extern void __blk_run_queue(struct request_queue *);
703165125e1SJens Axboe extern void blk_run_queue(struct request_queue *);
704a3bce90eSFUJITA Tomonori extern int blk_rq_map_user(struct request_queue *, struct request *,
705152e283fSFUJITA Tomonori 			   struct rq_map_data *, void __user *, unsigned long,
706152e283fSFUJITA Tomonori 			   gfp_t);
7078e5cfc45SJens Axboe extern int blk_rq_unmap_user(struct bio *);
708165125e1SJens Axboe extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
709165125e1SJens Axboe extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
710152e283fSFUJITA Tomonori 			       struct rq_map_data *, struct sg_iovec *, int,
711152e283fSFUJITA Tomonori 			       unsigned int, gfp_t);
712165125e1SJens Axboe extern int blk_execute_rq(struct request_queue *, struct gendisk *,
713994ca9a1SJames Bottomley  			  struct request *, int);
714165125e1SJens Axboe extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
71515fc858aSJens Axboe 				  struct request *, int, rq_end_io_fn *);
7162ad8b1efSAlan D. Brunelle extern void blk_unplug(struct request_queue *q);
7176e39b69eSMike Christie 
718165125e1SJens Axboe static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
7191da177e4SLinus Torvalds {
7201da177e4SLinus Torvalds 	return bdev->bd_disk->queue;
7211da177e4SLinus Torvalds }
7221da177e4SLinus Torvalds 
7231da177e4SLinus Torvalds /*
7245b93629bSTejun Heo  * blk_rq_pos()			: the current sector
7255b93629bSTejun Heo  * blk_rq_bytes()		: bytes left in the entire request
7265b93629bSTejun Heo  * blk_rq_cur_bytes()		: bytes left in the current segment
72780a761fdSTejun Heo  * blk_rq_err_bytes()		: bytes left till the next error boundary
7285b93629bSTejun Heo  * blk_rq_sectors()		: sectors left in the entire request
7295b93629bSTejun Heo  * blk_rq_cur_sectors()		: sectors left in the current segment
7305efccd17STejun Heo  */
7315b93629bSTejun Heo static inline sector_t blk_rq_pos(const struct request *rq)
7325b93629bSTejun Heo {
733a2dec7b3STejun Heo 	return rq->__sector;
7345b93629bSTejun Heo }
7355b93629bSTejun Heo 
7362e46e8b2STejun Heo static inline unsigned int blk_rq_bytes(const struct request *rq)
7372e46e8b2STejun Heo {
738a2dec7b3STejun Heo 	return rq->__data_len;
7392e46e8b2STejun Heo }
7402e46e8b2STejun Heo 
7412e46e8b2STejun Heo static inline int blk_rq_cur_bytes(const struct request *rq)
7422e46e8b2STejun Heo {
7432e46e8b2STejun Heo 	return rq->bio ? bio_cur_bytes(rq->bio) : 0;
7442e46e8b2STejun Heo }
7455efccd17STejun Heo 
74680a761fdSTejun Heo extern unsigned int blk_rq_err_bytes(const struct request *rq);
74780a761fdSTejun Heo 
7485b93629bSTejun Heo static inline unsigned int blk_rq_sectors(const struct request *rq)
7495b93629bSTejun Heo {
7502e46e8b2STejun Heo 	return blk_rq_bytes(rq) >> 9;
7515b93629bSTejun Heo }
7525b93629bSTejun Heo 
7535b93629bSTejun Heo static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
7545b93629bSTejun Heo {
7552e46e8b2STejun Heo 	return blk_rq_cur_bytes(rq) >> 9;
7565b93629bSTejun Heo }
7575b93629bSTejun Heo 
7585efccd17STejun Heo /*
7599934c8c0STejun Heo  * Request issue related functions.
7609934c8c0STejun Heo  */
7619934c8c0STejun Heo extern struct request *blk_peek_request(struct request_queue *q);
7629934c8c0STejun Heo extern void blk_start_request(struct request *rq);
7639934c8c0STejun Heo extern struct request *blk_fetch_request(struct request_queue *q);
7649934c8c0STejun Heo 
7659934c8c0STejun Heo /*
7662e60e022STejun Heo  * Request completion related functions.
7672e60e022STejun Heo  *
7682e60e022STejun Heo  * blk_update_request() completes given number of bytes and updates
7692e60e022STejun Heo  * the request without completing it.
7702e60e022STejun Heo  *
771f06d9a2bSTejun Heo  * blk_end_request() and friends.  __blk_end_request() must be called
772f06d9a2bSTejun Heo  * with the request queue spinlock acquired.
7731da177e4SLinus Torvalds  *
7741da177e4SLinus Torvalds  * Several drivers define their own end_request and call
7753bcddeacSKiyoshi Ueda  * blk_end_request() for parts of the original function.
7763bcddeacSKiyoshi Ueda  * This prevents code duplication in drivers.
7771da177e4SLinus Torvalds  */
7782e60e022STejun Heo extern bool blk_update_request(struct request *rq, int error,
77922b13210SJens Axboe 			       unsigned int nr_bytes);
780b1f74493SFUJITA Tomonori extern bool blk_end_request(struct request *rq, int error,
781b1f74493SFUJITA Tomonori 			    unsigned int nr_bytes);
782b1f74493SFUJITA Tomonori extern void blk_end_request_all(struct request *rq, int error);
783b1f74493SFUJITA Tomonori extern bool blk_end_request_cur(struct request *rq, int error);
78480a761fdSTejun Heo extern bool blk_end_request_err(struct request *rq, int error);
785b1f74493SFUJITA Tomonori extern bool __blk_end_request(struct request *rq, int error,
786b1f74493SFUJITA Tomonori 			      unsigned int nr_bytes);
787b1f74493SFUJITA Tomonori extern void __blk_end_request_all(struct request *rq, int error);
788b1f74493SFUJITA Tomonori extern bool __blk_end_request_cur(struct request *rq, int error);
78980a761fdSTejun Heo extern bool __blk_end_request_err(struct request *rq, int error);
7902e60e022STejun Heo 
791ff856badSJens Axboe extern void blk_complete_request(struct request *);
792242f9dcbSJens Axboe extern void __blk_complete_request(struct request *);
793242f9dcbSJens Axboe extern void blk_abort_request(struct request *);
79411914a53SMike Anderson extern void blk_abort_queue(struct request_queue *);
79528018c24SJames Bottomley extern void blk_unprep_request(struct request *);
796ff856badSJens Axboe 
7971da177e4SLinus Torvalds /*
7981da177e4SLinus Torvalds  * Access functions for manipulating queue properties
7991da177e4SLinus Torvalds  */
800165125e1SJens Axboe extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
8011946089aSChristoph Lameter 					spinlock_t *lock, int node_id);
80201effb0dSMike Snitzer extern struct request_queue *blk_init_allocated_queue_node(struct request_queue *,
80301effb0dSMike Snitzer 							   request_fn_proc *,
80401effb0dSMike Snitzer 							   spinlock_t *, int node_id);
805165125e1SJens Axboe extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
80601effb0dSMike Snitzer extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
80701effb0dSMike Snitzer 						      request_fn_proc *, spinlock_t *);
808165125e1SJens Axboe extern void blk_cleanup_queue(struct request_queue *);
809165125e1SJens Axboe extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
810165125e1SJens Axboe extern void blk_queue_bounce_limit(struct request_queue *, u64);
811*72d4cd9fSMike Snitzer extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int);
812086fa5ffSMartin K. Petersen extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
8138a78362cSMartin K. Petersen extern void blk_queue_max_segments(struct request_queue *, unsigned short);
814165125e1SJens Axboe extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
81567efc925SChristoph Hellwig extern void blk_queue_max_discard_sectors(struct request_queue *q,
81667efc925SChristoph Hellwig 		unsigned int max_discard_sectors);
817e1defc4fSMartin K. Petersen extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
818892b6f90SMartin K. Petersen extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
819c72758f3SMartin K. Petersen extern void blk_queue_alignment_offset(struct request_queue *q,
820c72758f3SMartin K. Petersen 				       unsigned int alignment);
8217c958e32SMartin K. Petersen extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
822c72758f3SMartin K. Petersen extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
8233c5820c7SMartin K. Petersen extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
824c72758f3SMartin K. Petersen extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
825e475bba2SMartin K. Petersen extern void blk_set_default_limits(struct queue_limits *lim);
826c72758f3SMartin K. Petersen extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
827c72758f3SMartin K. Petersen 			    sector_t offset);
82817be8c24SMartin K. Petersen extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
82917be8c24SMartin K. Petersen 			    sector_t offset);
830c72758f3SMartin K. Petersen extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
831c72758f3SMartin K. Petersen 			      sector_t offset);
832165125e1SJens Axboe extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
833e3790c7dSTejun Heo extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
83427f8221aSFUJITA Tomonori extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
8352fb98e84STejun Heo extern int blk_queue_dma_drain(struct request_queue *q,
8362fb98e84STejun Heo 			       dma_drain_needed_fn *dma_drain_needed,
8372fb98e84STejun Heo 			       void *buf, unsigned int size);
838ef9e3facSKiyoshi Ueda extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
839165125e1SJens Axboe extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
840165125e1SJens Axboe extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
84128018c24SJames Bottomley extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn);
842165125e1SJens Axboe extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
843165125e1SJens Axboe extern void blk_queue_dma_alignment(struct request_queue *, int);
84411c3e689SJames Bottomley extern void blk_queue_update_dma_alignment(struct request_queue *, int);
845165125e1SJens Axboe extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
846242f9dcbSJens Axboe extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
847242f9dcbSJens Axboe extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
8484913efe4STejun Heo extern void blk_queue_flush(struct request_queue *q, unsigned int flush);
8491da177e4SLinus Torvalds extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
8501da177e4SLinus Torvalds 
851165125e1SJens Axboe extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
8521da177e4SLinus Torvalds extern void blk_dump_rq_flags(struct request *, char *);
853165125e1SJens Axboe extern void generic_unplug_device(struct request_queue *);
8541da177e4SLinus Torvalds extern long nr_blockdev_pages(void);
8551da177e4SLinus Torvalds 
856165125e1SJens Axboe int blk_get_queue(struct request_queue *);
857165125e1SJens Axboe struct request_queue *blk_alloc_queue(gfp_t);
858165125e1SJens Axboe struct request_queue *blk_alloc_queue_node(gfp_t, int);
859165125e1SJens Axboe extern void blk_put_queue(struct request_queue *);
8601da177e4SLinus Torvalds 
8611da177e4SLinus Torvalds /*
8621da177e4SLinus Torvalds  * tag stuff
8631da177e4SLinus Torvalds  */
8644aff5e23SJens Axboe #define blk_rq_tagged(rq)		((rq)->cmd_flags & REQ_QUEUED)
865165125e1SJens Axboe extern int blk_queue_start_tag(struct request_queue *, struct request *);
866165125e1SJens Axboe extern struct request *blk_queue_find_tag(struct request_queue *, int);
867165125e1SJens Axboe extern void blk_queue_end_tag(struct request_queue *, struct request *);
868165125e1SJens Axboe extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *);
869165125e1SJens Axboe extern void blk_queue_free_tags(struct request_queue *);
870165125e1SJens Axboe extern int blk_queue_resize_tags(struct request_queue *, int);
871165125e1SJens Axboe extern void blk_queue_invalidate_tags(struct request_queue *);
872492dfb48SJames Bottomley extern struct blk_queue_tag *blk_init_tags(int);
873492dfb48SJames Bottomley extern void blk_free_tags(struct blk_queue_tag *);
8741da177e4SLinus Torvalds 
875f583f492SDavid C Somayajulu static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
876f583f492SDavid C Somayajulu 						int tag)
877f583f492SDavid C Somayajulu {
878f583f492SDavid C Somayajulu 	if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
879f583f492SDavid C Somayajulu 		return NULL;
880f583f492SDavid C Somayajulu 	return bqt->tag_index[tag];
881f583f492SDavid C Somayajulu }
882dd3932edSChristoph Hellwig 
883dd3932edSChristoph Hellwig #define BLKDEV_DISCARD_SECURE  0x01    /* secure discard */
884dd3932edSChristoph Hellwig 
885dd3932edSChristoph Hellwig extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
886fbd9b09aSDmitry Monakhov extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
887fbd9b09aSDmitry Monakhov 		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
8883f14d792SDmitry Monakhov extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
889dd3932edSChristoph Hellwig 			sector_t nr_sects, gfp_t gfp_mask);
8902cf6d26aSChristoph Hellwig static inline int sb_issue_discard(struct super_block *sb, sector_t block,
8912cf6d26aSChristoph Hellwig 		sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
892fb2dce86SDavid Woodhouse {
8932cf6d26aSChristoph Hellwig 	return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9),
8942cf6d26aSChristoph Hellwig 				    nr_blocks << (sb->s_blocksize_bits - 9),
8952cf6d26aSChristoph Hellwig 				    gfp_mask, flags);
896fb2dce86SDavid Woodhouse }
897e6fa0be6SLukas Czerner static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
898a107e5a3STheodore Ts'o 		sector_t nr_blocks, gfp_t gfp_mask)
899e6fa0be6SLukas Czerner {
900e6fa0be6SLukas Czerner 	return blkdev_issue_zeroout(sb->s_bdev,
901e6fa0be6SLukas Czerner 				    block << (sb->s_blocksize_bits - 9),
902e6fa0be6SLukas Czerner 				    nr_blocks << (sb->s_blocksize_bits - 9),
903a107e5a3STheodore Ts'o 				    gfp_mask);
904e6fa0be6SLukas Czerner }
9051da177e4SLinus Torvalds 
906018e0446SJens Axboe extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
9070b07de85SAdel Gadllah 
908eb28d31bSMartin K. Petersen enum blk_default_limits {
909eb28d31bSMartin K. Petersen 	BLK_MAX_SEGMENTS	= 128,
910eb28d31bSMartin K. Petersen 	BLK_SAFE_MAX_SECTORS	= 255,
911eb28d31bSMartin K. Petersen 	BLK_DEF_MAX_SECTORS	= 1024,
912eb28d31bSMartin K. Petersen 	BLK_MAX_SEGMENT_SIZE	= 65536,
913eb28d31bSMartin K. Petersen 	BLK_SEG_BOUNDARY_MASK	= 0xFFFFFFFFUL,
914eb28d31bSMartin K. Petersen };
9150e435ac2SMilan Broz 
9161da177e4SLinus Torvalds #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
9171da177e4SLinus Torvalds 
918ae03bf63SMartin K. Petersen static inline unsigned long queue_bounce_pfn(struct request_queue *q)
919ae03bf63SMartin K. Petersen {
920025146e1SMartin K. Petersen 	return q->limits.bounce_pfn;
921ae03bf63SMartin K. Petersen }
922ae03bf63SMartin K. Petersen 
923ae03bf63SMartin K. Petersen static inline unsigned long queue_segment_boundary(struct request_queue *q)
924ae03bf63SMartin K. Petersen {
925025146e1SMartin K. Petersen 	return q->limits.seg_boundary_mask;
926ae03bf63SMartin K. Petersen }
927ae03bf63SMartin K. Petersen 
928ae03bf63SMartin K. Petersen static inline unsigned int queue_max_sectors(struct request_queue *q)
929ae03bf63SMartin K. Petersen {
930025146e1SMartin K. Petersen 	return q->limits.max_sectors;
931ae03bf63SMartin K. Petersen }
932ae03bf63SMartin K. Petersen 
933ae03bf63SMartin K. Petersen static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
934ae03bf63SMartin K. Petersen {
935025146e1SMartin K. Petersen 	return q->limits.max_hw_sectors;
936ae03bf63SMartin K. Petersen }
937ae03bf63SMartin K. Petersen 
9388a78362cSMartin K. Petersen static inline unsigned short queue_max_segments(struct request_queue *q)
939ae03bf63SMartin K. Petersen {
9408a78362cSMartin K. Petersen 	return q->limits.max_segments;
941ae03bf63SMartin K. Petersen }
942ae03bf63SMartin K. Petersen 
943ae03bf63SMartin K. Petersen static inline unsigned int queue_max_segment_size(struct request_queue *q)
944ae03bf63SMartin K. Petersen {
945025146e1SMartin K. Petersen 	return q->limits.max_segment_size;
946ae03bf63SMartin K. Petersen }
947ae03bf63SMartin K. Petersen 
948e1defc4fSMartin K. Petersen static inline unsigned short queue_logical_block_size(struct request_queue *q)
9491da177e4SLinus Torvalds {
9501da177e4SLinus Torvalds 	int retval = 512;
9511da177e4SLinus Torvalds 
952025146e1SMartin K. Petersen 	if (q && q->limits.logical_block_size)
953025146e1SMartin K. Petersen 		retval = q->limits.logical_block_size;
9541da177e4SLinus Torvalds 
9551da177e4SLinus Torvalds 	return retval;
9561da177e4SLinus Torvalds }
9571da177e4SLinus Torvalds 
958e1defc4fSMartin K. Petersen static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
9591da177e4SLinus Torvalds {
960e1defc4fSMartin K. Petersen 	return queue_logical_block_size(bdev_get_queue(bdev));
9611da177e4SLinus Torvalds }
9621da177e4SLinus Torvalds 
963c72758f3SMartin K. Petersen static inline unsigned int queue_physical_block_size(struct request_queue *q)
964c72758f3SMartin K. Petersen {
965c72758f3SMartin K. Petersen 	return q->limits.physical_block_size;
966c72758f3SMartin K. Petersen }
967c72758f3SMartin K. Petersen 
968892b6f90SMartin K. Petersen static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
969ac481c20SMartin K. Petersen {
970ac481c20SMartin K. Petersen 	return queue_physical_block_size(bdev_get_queue(bdev));
971ac481c20SMartin K. Petersen }
972ac481c20SMartin K. Petersen 
973c72758f3SMartin K. Petersen static inline unsigned int queue_io_min(struct request_queue *q)
974c72758f3SMartin K. Petersen {
975c72758f3SMartin K. Petersen 	return q->limits.io_min;
976c72758f3SMartin K. Petersen }
977c72758f3SMartin K. Petersen 
978ac481c20SMartin K. Petersen static inline int bdev_io_min(struct block_device *bdev)
979ac481c20SMartin K. Petersen {
980ac481c20SMartin K. Petersen 	return queue_io_min(bdev_get_queue(bdev));
981ac481c20SMartin K. Petersen }
982ac481c20SMartin K. Petersen 
983c72758f3SMartin K. Petersen static inline unsigned int queue_io_opt(struct request_queue *q)
984c72758f3SMartin K. Petersen {
985c72758f3SMartin K. Petersen 	return q->limits.io_opt;
986c72758f3SMartin K. Petersen }
987c72758f3SMartin K. Petersen 
988ac481c20SMartin K. Petersen static inline int bdev_io_opt(struct block_device *bdev)
989ac481c20SMartin K. Petersen {
990ac481c20SMartin K. Petersen 	return queue_io_opt(bdev_get_queue(bdev));
991ac481c20SMartin K. Petersen }
992ac481c20SMartin K. Petersen 
993c72758f3SMartin K. Petersen static inline int queue_alignment_offset(struct request_queue *q)
994c72758f3SMartin K. Petersen {
995ac481c20SMartin K. Petersen 	if (q->limits.misaligned)
996c72758f3SMartin K. Petersen 		return -1;
997c72758f3SMartin K. Petersen 
998c72758f3SMartin K. Petersen 	return q->limits.alignment_offset;
999c72758f3SMartin K. Petersen }
1000c72758f3SMartin K. Petersen 
1001e03a72e1SMartin K. Petersen static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
100281744ee4SMartin K. Petersen {
100381744ee4SMartin K. Petersen 	unsigned int granularity = max(lim->physical_block_size, lim->io_min);
1004e03a72e1SMartin K. Petersen 	unsigned int alignment = (sector << 9) & (granularity - 1);
100581744ee4SMartin K. Petersen 
1006e03a72e1SMartin K. Petersen 	return (granularity + lim->alignment_offset - alignment)
1007e03a72e1SMartin K. Petersen 		& (granularity - 1);
1008c72758f3SMartin K. Petersen }
1009c72758f3SMartin K. Petersen 
1010ac481c20SMartin K. Petersen static inline int bdev_alignment_offset(struct block_device *bdev)
1011ac481c20SMartin K. Petersen {
1012ac481c20SMartin K. Petersen 	struct request_queue *q = bdev_get_queue(bdev);
1013ac481c20SMartin K. Petersen 
1014ac481c20SMartin K. Petersen 	if (q->limits.misaligned)
1015ac481c20SMartin K. Petersen 		return -1;
1016ac481c20SMartin K. Petersen 
1017ac481c20SMartin K. Petersen 	if (bdev != bdev->bd_contains)
1018ac481c20SMartin K. Petersen 		return bdev->bd_part->alignment_offset;
1019ac481c20SMartin K. Petersen 
1020ac481c20SMartin K. Petersen 	return q->limits.alignment_offset;
1021ac481c20SMartin K. Petersen }
1022ac481c20SMartin K. Petersen 
102386b37281SMartin K. Petersen static inline int queue_discard_alignment(struct request_queue *q)
102486b37281SMartin K. Petersen {
102586b37281SMartin K. Petersen 	if (q->limits.discard_misaligned)
102686b37281SMartin K. Petersen 		return -1;
102786b37281SMartin K. Petersen 
102886b37281SMartin K. Petersen 	return q->limits.discard_alignment;
102986b37281SMartin K. Petersen }
103086b37281SMartin K. Petersen 
1031e03a72e1SMartin K. Petersen static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
103286b37281SMartin K. Petersen {
1033dd3d145dSMartin K. Petersen 	unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1);
1034dd3d145dSMartin K. Petersen 
1035dd3d145dSMartin K. Petersen 	return (lim->discard_granularity + lim->discard_alignment - alignment)
1036dd3d145dSMartin K. Petersen 		& (lim->discard_granularity - 1);
103786b37281SMartin K. Petersen }
103886b37281SMartin K. Petersen 
103998262f27SMartin K. Petersen static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
104098262f27SMartin K. Petersen {
104198262f27SMartin K. Petersen 	if (q->limits.discard_zeroes_data == 1)
104298262f27SMartin K. Petersen 		return 1;
104398262f27SMartin K. Petersen 
104498262f27SMartin K. Petersen 	return 0;
104598262f27SMartin K. Petersen }
104698262f27SMartin K. Petersen 
104798262f27SMartin K. Petersen static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev)
104898262f27SMartin K. Petersen {
104998262f27SMartin K. Petersen 	return queue_discard_zeroes_data(bdev_get_queue(bdev));
105098262f27SMartin K. Petersen }
105198262f27SMartin K. Petersen 
1052165125e1SJens Axboe static inline int queue_dma_alignment(struct request_queue *q)
10531da177e4SLinus Torvalds {
1054482eb689SPete Wyckoff 	return q ? q->dma_alignment : 511;
10551da177e4SLinus Torvalds }
10561da177e4SLinus Torvalds 
105714417799SNamhyung Kim static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
105887904074SFUJITA Tomonori 				 unsigned int len)
105987904074SFUJITA Tomonori {
106087904074SFUJITA Tomonori 	unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
106114417799SNamhyung Kim 	return !(addr & alignment) && !(len & alignment);
106287904074SFUJITA Tomonori }
106387904074SFUJITA Tomonori 
10641da177e4SLinus Torvalds /* assumes size > 256 */
10651da177e4SLinus Torvalds static inline unsigned int blksize_bits(unsigned int size)
10661da177e4SLinus Torvalds {
10671da177e4SLinus Torvalds 	unsigned int bits = 8;
10681da177e4SLinus Torvalds 	do {
10691da177e4SLinus Torvalds 		bits++;
10701da177e4SLinus Torvalds 		size >>= 1;
10711da177e4SLinus Torvalds 	} while (size > 256);
10721da177e4SLinus Torvalds 	return bits;
10731da177e4SLinus Torvalds }
10741da177e4SLinus Torvalds 
10752befb9e3SAdrian Bunk static inline unsigned int block_size(struct block_device *bdev)
10761da177e4SLinus Torvalds {
10771da177e4SLinus Torvalds 	return bdev->bd_block_size;
10781da177e4SLinus Torvalds }
10791da177e4SLinus Torvalds 
10801da177e4SLinus Torvalds typedef struct {struct page *v;} Sector;
10811da177e4SLinus Torvalds 
10821da177e4SLinus Torvalds unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
10831da177e4SLinus Torvalds 
10841da177e4SLinus Torvalds static inline void put_dev_sector(Sector p)
10851da177e4SLinus Torvalds {
10861da177e4SLinus Torvalds 	page_cache_release(p.v);
10871da177e4SLinus Torvalds }
10881da177e4SLinus Torvalds 
10891da177e4SLinus Torvalds struct work_struct;
109018887ad9SJens Axboe int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
1091e43473b7SVivek Goyal int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay);
10921da177e4SLinus Torvalds 
10939195291eSDivyesh Shah #ifdef CONFIG_BLK_CGROUP
109428f4197eSJens Axboe /*
109528f4197eSJens Axboe  * This should not be using sched_clock(). A real patch is in progress
109628f4197eSJens Axboe  * to fix this up, until that is in place we need to disable preemption
109728f4197eSJens Axboe  * around sched_clock() in this function and set_io_start_time_ns().
109828f4197eSJens Axboe  */
10999195291eSDivyesh Shah static inline void set_start_time_ns(struct request *req)
11009195291eSDivyesh Shah {
110128f4197eSJens Axboe 	preempt_disable();
11029195291eSDivyesh Shah 	req->start_time_ns = sched_clock();
110328f4197eSJens Axboe 	preempt_enable();
11049195291eSDivyesh Shah }
11059195291eSDivyesh Shah 
11069195291eSDivyesh Shah static inline void set_io_start_time_ns(struct request *req)
11079195291eSDivyesh Shah {
110828f4197eSJens Axboe 	preempt_disable();
11099195291eSDivyesh Shah 	req->io_start_time_ns = sched_clock();
111028f4197eSJens Axboe 	preempt_enable();
11119195291eSDivyesh Shah }
111284c124daSDivyesh Shah 
111384c124daSDivyesh Shah static inline uint64_t rq_start_time_ns(struct request *req)
111484c124daSDivyesh Shah {
111584c124daSDivyesh Shah         return req->start_time_ns;
111684c124daSDivyesh Shah }
111784c124daSDivyesh Shah 
111884c124daSDivyesh Shah static inline uint64_t rq_io_start_time_ns(struct request *req)
111984c124daSDivyesh Shah {
112084c124daSDivyesh Shah         return req->io_start_time_ns;
112184c124daSDivyesh Shah }
11229195291eSDivyesh Shah #else
11239195291eSDivyesh Shah static inline void set_start_time_ns(struct request *req) {}
11249195291eSDivyesh Shah static inline void set_io_start_time_ns(struct request *req) {}
112584c124daSDivyesh Shah static inline uint64_t rq_start_time_ns(struct request *req)
112684c124daSDivyesh Shah {
112784c124daSDivyesh Shah 	return 0;
112884c124daSDivyesh Shah }
112984c124daSDivyesh Shah static inline uint64_t rq_io_start_time_ns(struct request *req)
113084c124daSDivyesh Shah {
113184c124daSDivyesh Shah 	return 0;
113284c124daSDivyesh Shah }
11339195291eSDivyesh Shah #endif
11349195291eSDivyesh Shah 
1135e43473b7SVivek Goyal #ifdef CONFIG_BLK_DEV_THROTTLING
1136e43473b7SVivek Goyal extern int blk_throtl_init(struct request_queue *q);
1137e43473b7SVivek Goyal extern void blk_throtl_exit(struct request_queue *q);
1138e43473b7SVivek Goyal extern int blk_throtl_bio(struct request_queue *q, struct bio **bio);
1139e43473b7SVivek Goyal extern void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay);
1140e43473b7SVivek Goyal extern void throtl_shutdown_timer_wq(struct request_queue *q);
1141e43473b7SVivek Goyal #else /* CONFIG_BLK_DEV_THROTTLING */
1142e43473b7SVivek Goyal static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio)
1143e43473b7SVivek Goyal {
1144e43473b7SVivek Goyal 	return 0;
1145e43473b7SVivek Goyal }
1146e43473b7SVivek Goyal 
1147e43473b7SVivek Goyal static inline int blk_throtl_init(struct request_queue *q) { return 0; }
1148e43473b7SVivek Goyal static inline int blk_throtl_exit(struct request_queue *q) { return 0; }
1149e43473b7SVivek Goyal static inline void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) {}
1150e43473b7SVivek Goyal static inline void throtl_shutdown_timer_wq(struct request_queue *q) {}
1151e43473b7SVivek Goyal #endif /* CONFIG_BLK_DEV_THROTTLING */
1152e43473b7SVivek Goyal 
11531da177e4SLinus Torvalds #define MODULE_ALIAS_BLOCKDEV(major,minor) \
11541da177e4SLinus Torvalds 	MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
11551da177e4SLinus Torvalds #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
11561da177e4SLinus Torvalds 	MODULE_ALIAS("block-major-" __stringify(major) "-*")
11571da177e4SLinus Torvalds 
11587ba1ba12SMartin K. Petersen #if defined(CONFIG_BLK_DEV_INTEGRITY)
11597ba1ba12SMartin K. Petersen 
1160b24498d4SJens Axboe #define INTEGRITY_FLAG_READ	2	/* verify data integrity on read */
1161b24498d4SJens Axboe #define INTEGRITY_FLAG_WRITE	4	/* generate data integrity on write */
11627ba1ba12SMartin K. Petersen 
11637ba1ba12SMartin K. Petersen struct blk_integrity_exchg {
11647ba1ba12SMartin K. Petersen 	void			*prot_buf;
11657ba1ba12SMartin K. Petersen 	void			*data_buf;
11667ba1ba12SMartin K. Petersen 	sector_t		sector;
11677ba1ba12SMartin K. Petersen 	unsigned int		data_size;
11687ba1ba12SMartin K. Petersen 	unsigned short		sector_size;
11697ba1ba12SMartin K. Petersen 	const char		*disk_name;
11707ba1ba12SMartin K. Petersen };
11717ba1ba12SMartin K. Petersen 
11727ba1ba12SMartin K. Petersen typedef void (integrity_gen_fn) (struct blk_integrity_exchg *);
11737ba1ba12SMartin K. Petersen typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *);
11747ba1ba12SMartin K. Petersen typedef void (integrity_set_tag_fn) (void *, void *, unsigned int);
11757ba1ba12SMartin K. Petersen typedef void (integrity_get_tag_fn) (void *, void *, unsigned int);
11767ba1ba12SMartin K. Petersen 
11777ba1ba12SMartin K. Petersen struct blk_integrity {
11787ba1ba12SMartin K. Petersen 	integrity_gen_fn	*generate_fn;
11797ba1ba12SMartin K. Petersen 	integrity_vrfy_fn	*verify_fn;
11807ba1ba12SMartin K. Petersen 	integrity_set_tag_fn	*set_tag_fn;
11817ba1ba12SMartin K. Petersen 	integrity_get_tag_fn	*get_tag_fn;
11827ba1ba12SMartin K. Petersen 
11837ba1ba12SMartin K. Petersen 	unsigned short		flags;
11847ba1ba12SMartin K. Petersen 	unsigned short		tuple_size;
11857ba1ba12SMartin K. Petersen 	unsigned short		sector_size;
11867ba1ba12SMartin K. Petersen 	unsigned short		tag_size;
11877ba1ba12SMartin K. Petersen 
11887ba1ba12SMartin K. Petersen 	const char		*name;
11897ba1ba12SMartin K. Petersen 
11907ba1ba12SMartin K. Petersen 	struct kobject		kobj;
11917ba1ba12SMartin K. Petersen };
11927ba1ba12SMartin K. Petersen 
11937ba1ba12SMartin K. Petersen extern int blk_integrity_register(struct gendisk *, struct blk_integrity *);
11947ba1ba12SMartin K. Petersen extern void blk_integrity_unregister(struct gendisk *);
1195ad7fce93SMartin K. Petersen extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
119613f05c8dSMartin K. Petersen extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
119713f05c8dSMartin K. Petersen 				   struct scatterlist *);
119813f05c8dSMartin K. Petersen extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
119913f05c8dSMartin K. Petersen extern int blk_integrity_merge_rq(struct request_queue *, struct request *,
120013f05c8dSMartin K. Petersen 				  struct request *);
120113f05c8dSMartin K. Petersen extern int blk_integrity_merge_bio(struct request_queue *, struct request *,
120213f05c8dSMartin K. Petersen 				   struct bio *);
12037ba1ba12SMartin K. Petersen 
1204b04accc4SJens Axboe static inline
1205b04accc4SJens Axboe struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
1206b04accc4SJens Axboe {
1207b04accc4SJens Axboe 	return bdev->bd_disk->integrity;
1208b04accc4SJens Axboe }
1209b04accc4SJens Axboe 
1210b02739b0SMartin K. Petersen static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1211b02739b0SMartin K. Petersen {
1212b02739b0SMartin K. Petersen 	return disk->integrity;
1213b02739b0SMartin K. Petersen }
1214b02739b0SMartin K. Petersen 
12157ba1ba12SMartin K. Petersen static inline int blk_integrity_rq(struct request *rq)
12167ba1ba12SMartin K. Petersen {
1217d442cc44SMartin K. Petersen 	if (rq->bio == NULL)
1218d442cc44SMartin K. Petersen 		return 0;
1219d442cc44SMartin K. Petersen 
12207ba1ba12SMartin K. Petersen 	return bio_integrity(rq->bio);
12217ba1ba12SMartin K. Petersen }
12227ba1ba12SMartin K. Petersen 
122313f05c8dSMartin K. Petersen static inline void blk_queue_max_integrity_segments(struct request_queue *q,
122413f05c8dSMartin K. Petersen 						    unsigned int segs)
122513f05c8dSMartin K. Petersen {
122613f05c8dSMartin K. Petersen 	q->limits.max_integrity_segments = segs;
122713f05c8dSMartin K. Petersen }
122813f05c8dSMartin K. Petersen 
122913f05c8dSMartin K. Petersen static inline unsigned short
123013f05c8dSMartin K. Petersen queue_max_integrity_segments(struct request_queue *q)
123113f05c8dSMartin K. Petersen {
123213f05c8dSMartin K. Petersen 	return q->limits.max_integrity_segments;
123313f05c8dSMartin K. Petersen }
123413f05c8dSMartin K. Petersen 
12357ba1ba12SMartin K. Petersen #else /* CONFIG_BLK_DEV_INTEGRITY */
12367ba1ba12SMartin K. Petersen 
12377ba1ba12SMartin K. Petersen #define blk_integrity_rq(rq)			(0)
123813f05c8dSMartin K. Petersen #define blk_rq_count_integrity_sg(a, b)		(0)
123913f05c8dSMartin K. Petersen #define blk_rq_map_integrity_sg(a, b, c)	(0)
1240b04accc4SJens Axboe #define bdev_get_integrity(a)			(0)
1241b02739b0SMartin K. Petersen #define blk_get_integrity(a)			(0)
12427ba1ba12SMartin K. Petersen #define blk_integrity_compare(a, b)		(0)
12437ba1ba12SMartin K. Petersen #define blk_integrity_register(a, b)		(0)
12447ba1ba12SMartin K. Petersen #define blk_integrity_unregister(a)		do { } while (0);
124513f05c8dSMartin K. Petersen #define blk_queue_max_integrity_segments(a, b)	do { } while (0);
124613f05c8dSMartin K. Petersen #define queue_max_integrity_segments(a)		(0)
124713f05c8dSMartin K. Petersen #define blk_integrity_merge_rq(a, b, c)		(0)
124813f05c8dSMartin K. Petersen #define blk_integrity_merge_bio(a, b, c)	(0)
12497ba1ba12SMartin K. Petersen 
12507ba1ba12SMartin K. Petersen #endif /* CONFIG_BLK_DEV_INTEGRITY */
12517ba1ba12SMartin K. Petersen 
125208f85851SAl Viro struct block_device_operations {
1253d4430d62SAl Viro 	int (*open) (struct block_device *, fmode_t);
1254d4430d62SAl Viro 	int (*release) (struct gendisk *, fmode_t);
1255d4430d62SAl Viro 	int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1256d4430d62SAl Viro 	int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
125708f85851SAl Viro 	int (*direct_access) (struct block_device *, sector_t,
125808f85851SAl Viro 						void **, unsigned long *);
125908f85851SAl Viro 	int (*media_changed) (struct gendisk *);
1260c3e33e04STejun Heo 	void (*unlock_native_capacity) (struct gendisk *);
126108f85851SAl Viro 	int (*revalidate_disk) (struct gendisk *);
126208f85851SAl Viro 	int (*getgeo)(struct block_device *, struct hd_geometry *);
1263b3a27d05SNitin Gupta 	/* this callback is with swap_lock and sometimes page table lock held */
1264b3a27d05SNitin Gupta 	void (*swap_slot_free_notify) (struct block_device *, unsigned long);
126508f85851SAl Viro 	struct module *owner;
126608f85851SAl Viro };
126708f85851SAl Viro 
1268633a08b8SAl Viro extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
1269633a08b8SAl Viro 				 unsigned long);
12709361401eSDavid Howells #else /* CONFIG_BLOCK */
12719361401eSDavid Howells /*
12729361401eSDavid Howells  * stubs for when the block layer is configured out
12739361401eSDavid Howells  */
12749361401eSDavid Howells #define buffer_heads_over_limit 0
12759361401eSDavid Howells 
12769361401eSDavid Howells static inline long nr_blockdev_pages(void)
12779361401eSDavid Howells {
12789361401eSDavid Howells 	return 0;
12799361401eSDavid Howells }
12809361401eSDavid Howells 
12819361401eSDavid Howells #endif /* CONFIG_BLOCK */
12829361401eSDavid Howells 
12831da177e4SLinus Torvalds #endif
1284