xref: /linux-6.15/include/linux/blkdev.h (revision db2a144b)
11da177e4SLinus Torvalds #ifndef _LINUX_BLKDEV_H
21da177e4SLinus Torvalds #define _LINUX_BLKDEV_H
31da177e4SLinus Torvalds 
485fd0bc9SRussell King #include <linux/sched.h>
585fd0bc9SRussell King 
6f5ff8422SJens Axboe #ifdef CONFIG_BLOCK
7f5ff8422SJens Axboe 
81da177e4SLinus Torvalds #include <linux/major.h>
91da177e4SLinus Torvalds #include <linux/genhd.h>
101da177e4SLinus Torvalds #include <linux/list.h>
111da177e4SLinus Torvalds #include <linux/timer.h>
121da177e4SLinus Torvalds #include <linux/workqueue.h>
131da177e4SLinus Torvalds #include <linux/pagemap.h>
141da177e4SLinus Torvalds #include <linux/backing-dev.h>
151da177e4SLinus Torvalds #include <linux/wait.h>
161da177e4SLinus Torvalds #include <linux/mempool.h>
171da177e4SLinus Torvalds #include <linux/bio.h>
181da177e4SLinus Torvalds #include <linux/stringify.h>
193e6053d7SHugh Dickins #include <linux/gfp.h>
20d351af01SFUJITA Tomonori #include <linux/bsg.h>
21c7c22e4dSJens Axboe #include <linux/smp.h>
22548bc8e1STejun Heo #include <linux/rcupdate.h>
231da177e4SLinus Torvalds 
241da177e4SLinus Torvalds #include <asm/scatterlist.h>
251da177e4SLinus Torvalds 
26de477254SPaul Gortmaker struct module;
2721b2f0c8SChristoph Hellwig struct scsi_ioctl_command;
2821b2f0c8SChristoph Hellwig 
291da177e4SLinus Torvalds struct request_queue;
301da177e4SLinus Torvalds struct elevator_queue;
311da177e4SLinus Torvalds struct request_pm_state;
322056a782SJens Axboe struct blk_trace;
333d6392cfSJens Axboe struct request;
343d6392cfSJens Axboe struct sg_io_hdr;
35aa387cc8SMike Christie struct bsg_job;
363c798398STejun Heo struct blkcg_gq;
371da177e4SLinus Torvalds 
381da177e4SLinus Torvalds #define BLKDEV_MIN_RQ	4
391da177e4SLinus Torvalds #define BLKDEV_MAX_RQ	128	/* Default maximum */
401da177e4SLinus Torvalds 
418bd435b3STejun Heo /*
428bd435b3STejun Heo  * Maximum number of blkcg policies allowed to be registered concurrently.
438bd435b3STejun Heo  * Defined here to simplify include dependency.
448bd435b3STejun Heo  */
458bd435b3STejun Heo #define BLKCG_MAX_POLS		2
468bd435b3STejun Heo 
471da177e4SLinus Torvalds struct request;
488ffdc655STejun Heo typedef void (rq_end_io_fn)(struct request *, int);
491da177e4SLinus Torvalds 
505b788ce3STejun Heo #define BLK_RL_SYNCFULL		(1U << 0)
515b788ce3STejun Heo #define BLK_RL_ASYNCFULL	(1U << 1)
525b788ce3STejun Heo 
531da177e4SLinus Torvalds struct request_list {
545b788ce3STejun Heo 	struct request_queue	*q;	/* the queue this rl belongs to */
55a051661cSTejun Heo #ifdef CONFIG_BLK_CGROUP
56a051661cSTejun Heo 	struct blkcg_gq		*blkg;	/* blkg this request pool belongs to */
57a051661cSTejun Heo #endif
581faa16d2SJens Axboe 	/*
591faa16d2SJens Axboe 	 * count[], starved[], and wait[] are indexed by
601faa16d2SJens Axboe 	 * BLK_RW_SYNC/BLK_RW_ASYNC
611faa16d2SJens Axboe 	 */
621da177e4SLinus Torvalds 	int			count[2];
631da177e4SLinus Torvalds 	int			starved[2];
641da177e4SLinus Torvalds 	mempool_t		*rq_pool;
651da177e4SLinus Torvalds 	wait_queue_head_t	wait[2];
665b788ce3STejun Heo 	unsigned int		flags;
671da177e4SLinus Torvalds };
681da177e4SLinus Torvalds 
694aff5e23SJens Axboe /*
704aff5e23SJens Axboe  * request command types
714aff5e23SJens Axboe  */
724aff5e23SJens Axboe enum rq_cmd_type_bits {
734aff5e23SJens Axboe 	REQ_TYPE_FS		= 1,	/* fs request */
744aff5e23SJens Axboe 	REQ_TYPE_BLOCK_PC,		/* scsi command */
754aff5e23SJens Axboe 	REQ_TYPE_SENSE,			/* sense request */
764aff5e23SJens Axboe 	REQ_TYPE_PM_SUSPEND,		/* suspend request */
774aff5e23SJens Axboe 	REQ_TYPE_PM_RESUME,		/* resume request */
784aff5e23SJens Axboe 	REQ_TYPE_PM_SHUTDOWN,		/* shutdown request */
794aff5e23SJens Axboe 	REQ_TYPE_SPECIAL,		/* driver defined type */
804aff5e23SJens Axboe 	/*
814aff5e23SJens Axboe 	 * for ATA/ATAPI devices. this really doesn't belong here, ide should
824aff5e23SJens Axboe 	 * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver
834aff5e23SJens Axboe 	 * private REQ_LB opcodes to differentiate what type of request this is
844aff5e23SJens Axboe 	 */
854aff5e23SJens Axboe 	REQ_TYPE_ATA_TASKFILE,
86cea2885aSJens Axboe 	REQ_TYPE_ATA_PC,
874aff5e23SJens Axboe };
884aff5e23SJens Axboe 
891da177e4SLinus Torvalds #define BLK_MAX_CDB	16
901da177e4SLinus Torvalds 
911da177e4SLinus Torvalds /*
9263a71386SJens Axboe  * try to put the fields that are referenced together in the same cacheline.
934d0d98b6SWanlong Gao  * if you modify this structure, be sure to check block/blk-core.c:blk_rq_init()
9463a71386SJens Axboe  * as well!
951da177e4SLinus Torvalds  */
961da177e4SLinus Torvalds struct request {
97ff856badSJens Axboe 	struct list_head queuelist;
98c7c22e4dSJens Axboe 	struct call_single_data csd;
99ff856badSJens Axboe 
100165125e1SJens Axboe 	struct request_queue *q;
101e6a1c874SJens Axboe 
1024aff5e23SJens Axboe 	unsigned int cmd_flags;
1034aff5e23SJens Axboe 	enum rq_cmd_type_bits cmd_type;
104242f9dcbSJens Axboe 	unsigned long atomic_flags;
1051da177e4SLinus Torvalds 
106181fdde3SRichard Kennedy 	int cpu;
107181fdde3SRichard Kennedy 
108a2dec7b3STejun Heo 	/* the following two fields are internal, NEVER access directly */
109a2dec7b3STejun Heo 	unsigned int __data_len;	/* total data len */
110181fdde3SRichard Kennedy 	sector_t __sector;		/* sector cursor */
1111da177e4SLinus Torvalds 
1121da177e4SLinus Torvalds 	struct bio *bio;
1131da177e4SLinus Torvalds 	struct bio *biotail;
1141da177e4SLinus Torvalds 
1159817064bSJens Axboe 	struct hlist_node hash;	/* merge hash */
116e6a1c874SJens Axboe 	/*
117e6a1c874SJens Axboe 	 * The rb_node is only used inside the io scheduler, requests
118e6a1c874SJens Axboe 	 * are pruned when moved to the dispatch queue. So let the
119c186794dSMike Snitzer 	 * completion_data share space with the rb_node.
120e6a1c874SJens Axboe 	 */
121e6a1c874SJens Axboe 	union {
1222e662b65SJens Axboe 		struct rb_node rb_node;	/* sort/lookup */
123c186794dSMike Snitzer 		void *completion_data;
124c186794dSMike Snitzer 	};
125c186794dSMike Snitzer 
126c186794dSMike Snitzer 	/*
127c186794dSMike Snitzer 	 * Three pointers are available for the IO schedulers, if they need
128c186794dSMike Snitzer 	 * more they have to dynamically allocate it.  Flush requests are
129c186794dSMike Snitzer 	 * never put on the IO scheduler. So let the flush fields share
130a612fddfSTejun Heo 	 * space with the elevator data.
131c186794dSMike Snitzer 	 */
132c186794dSMike Snitzer 	union {
133a612fddfSTejun Heo 		struct {
134a612fddfSTejun Heo 			struct io_cq		*icq;
135a612fddfSTejun Heo 			void			*priv[2];
136a612fddfSTejun Heo 		} elv;
137a612fddfSTejun Heo 
138ae1b1539STejun Heo 		struct {
139ae1b1539STejun Heo 			unsigned int		seq;
140ae1b1539STejun Heo 			struct list_head	list;
1414853abaaSJeff Moyer 			rq_end_io_fn		*saved_end_io;
142ae1b1539STejun Heo 		} flush;
143e6a1c874SJens Axboe 	};
1449817064bSJens Axboe 
1458f34ee75SJens Axboe 	struct gendisk *rq_disk;
14609e099d4SJerome Marchand 	struct hd_struct *part;
1471da177e4SLinus Torvalds 	unsigned long start_time;
1489195291eSDivyesh Shah #ifdef CONFIG_BLK_CGROUP
149a051661cSTejun Heo 	struct request_list *rl;		/* rl this rq is alloced from */
1509195291eSDivyesh Shah 	unsigned long long start_time_ns;
1519195291eSDivyesh Shah 	unsigned long long io_start_time_ns;    /* when passed to hardware */
1529195291eSDivyesh Shah #endif
1531da177e4SLinus Torvalds 	/* Number of scatter-gather DMA addr+len pairs after
1541da177e4SLinus Torvalds 	 * physical address coalescing is performed.
1551da177e4SLinus Torvalds 	 */
1561da177e4SLinus Torvalds 	unsigned short nr_phys_segments;
15713f05c8dSMartin K. Petersen #if defined(CONFIG_BLK_DEV_INTEGRITY)
15813f05c8dSMartin K. Petersen 	unsigned short nr_integrity_segments;
15913f05c8dSMartin K. Petersen #endif
1601da177e4SLinus Torvalds 
1618f34ee75SJens Axboe 	unsigned short ioprio;
1628f34ee75SJens Axboe 
163181fdde3SRichard Kennedy 	int ref_count;
164181fdde3SRichard Kennedy 
165731ec497STejun Heo 	void *special;		/* opaque pointer available for LLD use */
166731ec497STejun Heo 	char *buffer;		/* kaddr of the current segment if available */
1671da177e4SLinus Torvalds 
168cdd60262SJens Axboe 	int tag;
169cdd60262SJens Axboe 	int errors;
170cdd60262SJens Axboe 
1711da177e4SLinus Torvalds 	/*
1721da177e4SLinus Torvalds 	 * when request is used as a packet command carrier
1731da177e4SLinus Torvalds 	 */
174d7e3c324SFUJITA Tomonori 	unsigned char __cmd[BLK_MAX_CDB];
175d7e3c324SFUJITA Tomonori 	unsigned char *cmd;
176181fdde3SRichard Kennedy 	unsigned short cmd_len;
1771da177e4SLinus Torvalds 
1787a85f889SFUJITA Tomonori 	unsigned int extra_len;	/* length of alignment and padding */
1791da177e4SLinus Torvalds 	unsigned int sense_len;
180c3a4d78cSTejun Heo 	unsigned int resid_len;	/* residual count */
1811da177e4SLinus Torvalds 	void *sense;
1821da177e4SLinus Torvalds 
183242f9dcbSJens Axboe 	unsigned long deadline;
184242f9dcbSJens Axboe 	struct list_head timeout_list;
1851da177e4SLinus Torvalds 	unsigned int timeout;
18617e01f21SMike Christie 	int retries;
1871da177e4SLinus Torvalds 
1881da177e4SLinus Torvalds 	/*
189c00895abSJens Axboe 	 * completion callback.
1901da177e4SLinus Torvalds 	 */
1911da177e4SLinus Torvalds 	rq_end_io_fn *end_io;
1921da177e4SLinus Torvalds 	void *end_io_data;
193abae1fdeSFUJITA Tomonori 
194abae1fdeSFUJITA Tomonori 	/* for bidi */
195abae1fdeSFUJITA Tomonori 	struct request *next_rq;
1961da177e4SLinus Torvalds };
1971da177e4SLinus Torvalds 
198766ca442SFernando Luis Vázquez Cao static inline unsigned short req_get_ioprio(struct request *req)
199766ca442SFernando Luis Vázquez Cao {
200766ca442SFernando Luis Vázquez Cao 	return req->ioprio;
201766ca442SFernando Luis Vázquez Cao }
202766ca442SFernando Luis Vázquez Cao 
2031da177e4SLinus Torvalds /*
2044aff5e23SJens Axboe  * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME
2051da177e4SLinus Torvalds  * requests. Some step values could eventually be made generic.
2061da177e4SLinus Torvalds  */
2071da177e4SLinus Torvalds struct request_pm_state
2081da177e4SLinus Torvalds {
2091da177e4SLinus Torvalds 	/* PM state machine step value, currently driver specific */
2101da177e4SLinus Torvalds 	int	pm_step;
2111da177e4SLinus Torvalds 	/* requested PM state value (S1, S2, S3, S4, ...) */
2121da177e4SLinus Torvalds 	u32	pm_state;
2131da177e4SLinus Torvalds 	void*	data;		/* for driver use */
2141da177e4SLinus Torvalds };
2151da177e4SLinus Torvalds 
2161da177e4SLinus Torvalds #include <linux/elevator.h>
2171da177e4SLinus Torvalds 
218165125e1SJens Axboe typedef void (request_fn_proc) (struct request_queue *q);
2195a7bbad2SChristoph Hellwig typedef void (make_request_fn) (struct request_queue *q, struct bio *bio);
220165125e1SJens Axboe typedef int (prep_rq_fn) (struct request_queue *, struct request *);
22128018c24SJames Bottomley typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
2221da177e4SLinus Torvalds 
2231da177e4SLinus Torvalds struct bio_vec;
224cc371e66SAlasdair G Kergon struct bvec_merge_data {
225cc371e66SAlasdair G Kergon 	struct block_device *bi_bdev;
226cc371e66SAlasdair G Kergon 	sector_t bi_sector;
227cc371e66SAlasdair G Kergon 	unsigned bi_size;
228cc371e66SAlasdair G Kergon 	unsigned long bi_rw;
229cc371e66SAlasdair G Kergon };
230cc371e66SAlasdair G Kergon typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *,
231cc371e66SAlasdair G Kergon 			     struct bio_vec *);
232ff856badSJens Axboe typedef void (softirq_done_fn)(struct request *);
2332fb98e84STejun Heo typedef int (dma_drain_needed_fn)(struct request *);
234ef9e3facSKiyoshi Ueda typedef int (lld_busy_fn) (struct request_queue *q);
235aa387cc8SMike Christie typedef int (bsg_job_fn) (struct bsg_job *);
2361da177e4SLinus Torvalds 
237242f9dcbSJens Axboe enum blk_eh_timer_return {
238242f9dcbSJens Axboe 	BLK_EH_NOT_HANDLED,
239242f9dcbSJens Axboe 	BLK_EH_HANDLED,
240242f9dcbSJens Axboe 	BLK_EH_RESET_TIMER,
241242f9dcbSJens Axboe };
242242f9dcbSJens Axboe 
243242f9dcbSJens Axboe typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *);
244242f9dcbSJens Axboe 
2451da177e4SLinus Torvalds enum blk_queue_state {
2461da177e4SLinus Torvalds 	Queue_down,
2471da177e4SLinus Torvalds 	Queue_up,
2481da177e4SLinus Torvalds };
2491da177e4SLinus Torvalds 
2501da177e4SLinus Torvalds struct blk_queue_tag {
2511da177e4SLinus Torvalds 	struct request **tag_index;	/* map of busy tags */
2521da177e4SLinus Torvalds 	unsigned long *tag_map;		/* bit map of free/busy tags */
2531da177e4SLinus Torvalds 	int busy;			/* current depth */
2541da177e4SLinus Torvalds 	int max_depth;			/* what we will send to device */
255ba025082STejun Heo 	int real_max_depth;		/* what the array can hold */
2561da177e4SLinus Torvalds 	atomic_t refcnt;		/* map can be shared */
2571da177e4SLinus Torvalds };
2581da177e4SLinus Torvalds 
259abf54393SFUJITA Tomonori #define BLK_SCSI_MAX_CMDS	(256)
260abf54393SFUJITA Tomonori #define BLK_SCSI_CMD_PER_LONG	(BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
261abf54393SFUJITA Tomonori 
262025146e1SMartin K. Petersen struct queue_limits {
263025146e1SMartin K. Petersen 	unsigned long		bounce_pfn;
264025146e1SMartin K. Petersen 	unsigned long		seg_boundary_mask;
265025146e1SMartin K. Petersen 
266025146e1SMartin K. Petersen 	unsigned int		max_hw_sectors;
267025146e1SMartin K. Petersen 	unsigned int		max_sectors;
268025146e1SMartin K. Petersen 	unsigned int		max_segment_size;
269c72758f3SMartin K. Petersen 	unsigned int		physical_block_size;
270c72758f3SMartin K. Petersen 	unsigned int		alignment_offset;
271c72758f3SMartin K. Petersen 	unsigned int		io_min;
272c72758f3SMartin K. Petersen 	unsigned int		io_opt;
27367efc925SChristoph Hellwig 	unsigned int		max_discard_sectors;
2744363ac7cSMartin K. Petersen 	unsigned int		max_write_same_sectors;
27586b37281SMartin K. Petersen 	unsigned int		discard_granularity;
27686b37281SMartin K. Petersen 	unsigned int		discard_alignment;
277025146e1SMartin K. Petersen 
278025146e1SMartin K. Petersen 	unsigned short		logical_block_size;
2798a78362cSMartin K. Petersen 	unsigned short		max_segments;
28013f05c8dSMartin K. Petersen 	unsigned short		max_integrity_segments;
281025146e1SMartin K. Petersen 
282c72758f3SMartin K. Petersen 	unsigned char		misaligned;
28386b37281SMartin K. Petersen 	unsigned char		discard_misaligned;
284e692cb66SMartin K. Petersen 	unsigned char		cluster;
285a934a00aSMartin K. Petersen 	unsigned char		discard_zeroes_data;
286025146e1SMartin K. Petersen };
287025146e1SMartin K. Petersen 
288d7b76301SRichard Kennedy struct request_queue {
2891da177e4SLinus Torvalds 	/*
2901da177e4SLinus Torvalds 	 * Together with queue_head for cacheline sharing
2911da177e4SLinus Torvalds 	 */
2921da177e4SLinus Torvalds 	struct list_head	queue_head;
2931da177e4SLinus Torvalds 	struct request		*last_merge;
294b374d18aSJens Axboe 	struct elevator_queue	*elevator;
2958a5ecdd4STejun Heo 	int			nr_rqs[2];	/* # allocated [a]sync rqs */
2968a5ecdd4STejun Heo 	int			nr_rqs_elvpriv;	/* # allocated rqs w/ elvpriv */
2971da177e4SLinus Torvalds 
2981da177e4SLinus Torvalds 	/*
299a051661cSTejun Heo 	 * If blkcg is not used, @q->root_rl serves all requests.  If blkcg
300a051661cSTejun Heo 	 * is used, root blkg allocates from @q->root_rl and all other
301a051661cSTejun Heo 	 * blkgs from their own blkg->rl.  Which one to use should be
302a051661cSTejun Heo 	 * determined using bio_request_list().
3031da177e4SLinus Torvalds 	 */
304a051661cSTejun Heo 	struct request_list	root_rl;
3051da177e4SLinus Torvalds 
3061da177e4SLinus Torvalds 	request_fn_proc		*request_fn;
3071da177e4SLinus Torvalds 	make_request_fn		*make_request_fn;
3081da177e4SLinus Torvalds 	prep_rq_fn		*prep_rq_fn;
30928018c24SJames Bottomley 	unprep_rq_fn		*unprep_rq_fn;
3101da177e4SLinus Torvalds 	merge_bvec_fn		*merge_bvec_fn;
311ff856badSJens Axboe 	softirq_done_fn		*softirq_done_fn;
312242f9dcbSJens Axboe 	rq_timed_out_fn		*rq_timed_out_fn;
3132fb98e84STejun Heo 	dma_drain_needed_fn	*dma_drain_needed;
314ef9e3facSKiyoshi Ueda 	lld_busy_fn		*lld_busy_fn;
3151da177e4SLinus Torvalds 
3161da177e4SLinus Torvalds 	/*
3178922e16cSTejun Heo 	 * Dispatch queue sorting
3188922e16cSTejun Heo 	 */
3191b47f531SJens Axboe 	sector_t		end_sector;
3208922e16cSTejun Heo 	struct request		*boundary_rq;
3218922e16cSTejun Heo 
3228922e16cSTejun Heo 	/*
3233cca6dc1SJens Axboe 	 * Delayed queue handling
3241da177e4SLinus Torvalds 	 */
3253cca6dc1SJens Axboe 	struct delayed_work	delay_work;
3261da177e4SLinus Torvalds 
3271da177e4SLinus Torvalds 	struct backing_dev_info	backing_dev_info;
3281da177e4SLinus Torvalds 
3291da177e4SLinus Torvalds 	/*
3301da177e4SLinus Torvalds 	 * The queue owner gets to use this for whatever they like.
3311da177e4SLinus Torvalds 	 * ll_rw_blk doesn't touch it.
3321da177e4SLinus Torvalds 	 */
3331da177e4SLinus Torvalds 	void			*queuedata;
3341da177e4SLinus Torvalds 
3351da177e4SLinus Torvalds 	/*
3361da177e4SLinus Torvalds 	 * various queue flags, see QUEUE_* below
3371da177e4SLinus Torvalds 	 */
3381da177e4SLinus Torvalds 	unsigned long		queue_flags;
3391da177e4SLinus Torvalds 
3401da177e4SLinus Torvalds 	/*
341a73f730dSTejun Heo 	 * ida allocated id for this queue.  Used to index queues from
342a73f730dSTejun Heo 	 * ioctx.
343a73f730dSTejun Heo 	 */
344a73f730dSTejun Heo 	int			id;
345a73f730dSTejun Heo 
346a73f730dSTejun Heo 	/*
347d7b76301SRichard Kennedy 	 * queue needs bounce pages for pages above this limit
348d7b76301SRichard Kennedy 	 */
349d7b76301SRichard Kennedy 	gfp_t			bounce_gfp;
350d7b76301SRichard Kennedy 
351d7b76301SRichard Kennedy 	/*
352152587deS 	 * protects queue structures from reentrancy. ->__queue_lock should
353152587deS 	 * _never_ be used directly, it is queue private. always use
354152587deS 	 * ->queue_lock.
3551da177e4SLinus Torvalds 	 */
356152587deS 	spinlock_t		__queue_lock;
3571da177e4SLinus Torvalds 	spinlock_t		*queue_lock;
3581da177e4SLinus Torvalds 
3591da177e4SLinus Torvalds 	/*
3601da177e4SLinus Torvalds 	 * queue kobject
3611da177e4SLinus Torvalds 	 */
3621da177e4SLinus Torvalds 	struct kobject kobj;
3631da177e4SLinus Torvalds 
3641da177e4SLinus Torvalds 	/*
3651da177e4SLinus Torvalds 	 * queue settings
3661da177e4SLinus Torvalds 	 */
3671da177e4SLinus Torvalds 	unsigned long		nr_requests;	/* Max # of requests */
3681da177e4SLinus Torvalds 	unsigned int		nr_congestion_on;
3691da177e4SLinus Torvalds 	unsigned int		nr_congestion_off;
3701da177e4SLinus Torvalds 	unsigned int		nr_batching;
3711da177e4SLinus Torvalds 
372fa0ccd83SJames Bottomley 	unsigned int		dma_drain_size;
373d7b76301SRichard Kennedy 	void			*dma_drain_buffer;
374e3790c7dSTejun Heo 	unsigned int		dma_pad_mask;
3751da177e4SLinus Torvalds 	unsigned int		dma_alignment;
3761da177e4SLinus Torvalds 
3771da177e4SLinus Torvalds 	struct blk_queue_tag	*queue_tags;
3786eca9004SJens Axboe 	struct list_head	tag_busy_list;
3791da177e4SLinus Torvalds 
38015853af9STejun Heo 	unsigned int		nr_sorted;
3810a7ae2ffSJens Axboe 	unsigned int		in_flight[2];
38224faf6f6SBart Van Assche 	/*
38324faf6f6SBart Van Assche 	 * Number of active block driver functions for which blk_drain_queue()
38424faf6f6SBart Van Assche 	 * must wait. Must be incremented around functions that unlock the
38524faf6f6SBart Van Assche 	 * queue_lock internally, e.g. scsi_request_fn().
38624faf6f6SBart Van Assche 	 */
38724faf6f6SBart Van Assche 	unsigned int		request_fn_active;
3881da177e4SLinus Torvalds 
389242f9dcbSJens Axboe 	unsigned int		rq_timeout;
390242f9dcbSJens Axboe 	struct timer_list	timeout;
391242f9dcbSJens Axboe 	struct list_head	timeout_list;
392242f9dcbSJens Axboe 
393a612fddfSTejun Heo 	struct list_head	icq_list;
3944eef3049STejun Heo #ifdef CONFIG_BLK_CGROUP
395a2b1693bSTejun Heo 	DECLARE_BITMAP		(blkcg_pols, BLKCG_MAX_POLS);
3963c798398STejun Heo 	struct blkcg_gq		*root_blkg;
39703aa264aSTejun Heo 	struct list_head	blkg_list;
3984eef3049STejun Heo #endif
399a612fddfSTejun Heo 
400025146e1SMartin K. Petersen 	struct queue_limits	limits;
401025146e1SMartin K. Petersen 
4021da177e4SLinus Torvalds 	/*
4031da177e4SLinus Torvalds 	 * sg stuff
4041da177e4SLinus Torvalds 	 */
4051da177e4SLinus Torvalds 	unsigned int		sg_timeout;
4061da177e4SLinus Torvalds 	unsigned int		sg_reserved_size;
4071946089aSChristoph Lameter 	int			node;
4086c5c9341SAlexey Dobriyan #ifdef CONFIG_BLK_DEV_IO_TRACE
4092056a782SJens Axboe 	struct blk_trace	*blk_trace;
4106c5c9341SAlexey Dobriyan #endif
4111da177e4SLinus Torvalds 	/*
4124913efe4STejun Heo 	 * for flush operations
4131da177e4SLinus Torvalds 	 */
4144913efe4STejun Heo 	unsigned int		flush_flags;
415f3876930S[email protected] 	unsigned int		flush_not_queueable:1;
4163ac0cc45S[email protected] 	unsigned int		flush_queue_delayed:1;
417ae1b1539STejun Heo 	unsigned int		flush_pending_idx:1;
418ae1b1539STejun Heo 	unsigned int		flush_running_idx:1;
419ae1b1539STejun Heo 	unsigned long		flush_pending_since;
420ae1b1539STejun Heo 	struct list_head	flush_queue[2];
421ae1b1539STejun Heo 	struct list_head	flush_data_in_flight;
422dd4c133fSTejun Heo 	struct request		flush_rq;
423483f4afcSAl Viro 
424483f4afcSAl Viro 	struct mutex		sysfs_lock;
425d351af01SFUJITA Tomonori 
426d732580bSTejun Heo 	int			bypass_depth;
427d732580bSTejun Heo 
428d351af01SFUJITA Tomonori #if defined(CONFIG_BLK_DEV_BSG)
429aa387cc8SMike Christie 	bsg_job_fn		*bsg_job_fn;
430aa387cc8SMike Christie 	int			bsg_job_size;
431d351af01SFUJITA Tomonori 	struct bsg_class_device bsg_dev;
432d351af01SFUJITA Tomonori #endif
433e43473b7SVivek Goyal 
434923adde1STejun Heo #ifdef CONFIG_BLK_CGROUP
435923adde1STejun Heo 	struct list_head	all_q_node;
436923adde1STejun Heo #endif
437e43473b7SVivek Goyal #ifdef CONFIG_BLK_DEV_THROTTLING
438e43473b7SVivek Goyal 	/* Throttle data */
439e43473b7SVivek Goyal 	struct throtl_data *td;
440e43473b7SVivek Goyal #endif
441548bc8e1STejun Heo 	struct rcu_head		rcu_head;
4421da177e4SLinus Torvalds };
4431da177e4SLinus Torvalds 
4441da177e4SLinus Torvalds #define QUEUE_FLAG_QUEUED	1	/* uses generic tag queueing */
4451da177e4SLinus Torvalds #define QUEUE_FLAG_STOPPED	2	/* queue is stopped */
4461faa16d2SJens Axboe #define	QUEUE_FLAG_SYNCFULL	3	/* read queue has been filled */
4471faa16d2SJens Axboe #define QUEUE_FLAG_ASYNCFULL	4	/* write queue has been filled */
4483f3299d5SBart Van Assche #define QUEUE_FLAG_DYING	5	/* queue being torn down */
449d732580bSTejun Heo #define QUEUE_FLAG_BYPASS	6	/* act as dumb FIFO queue */
450c21e6bebSJens Axboe #define QUEUE_FLAG_BIDI		7	/* queue supports bidi requests */
451c21e6bebSJens Axboe #define QUEUE_FLAG_NOMERGES     8	/* disable merge attempts */
4525757a6d7SDan Williams #define QUEUE_FLAG_SAME_COMP	9	/* complete on same CPU-group */
453c21e6bebSJens Axboe #define QUEUE_FLAG_FAIL_IO     10	/* fake timeout */
454c21e6bebSJens Axboe #define QUEUE_FLAG_STACKABLE   11	/* supports request stacking */
455c21e6bebSJens Axboe #define QUEUE_FLAG_NONROT      12	/* non-rotational device (SSD) */
45688e740f1SFernando Luis Vázquez Cao #define QUEUE_FLAG_VIRT        QUEUE_FLAG_NONROT /* paravirt device */
457c21e6bebSJens Axboe #define QUEUE_FLAG_IO_STAT     13	/* do IO stats */
458c21e6bebSJens Axboe #define QUEUE_FLAG_DISCARD     14	/* supports DISCARD */
459c21e6bebSJens Axboe #define QUEUE_FLAG_NOXMERGES   15	/* No extended merges */
460c21e6bebSJens Axboe #define QUEUE_FLAG_ADD_RANDOM  16	/* Contributes to random pool */
461c21e6bebSJens Axboe #define QUEUE_FLAG_SECDISCARD  17	/* supports SECDISCARD */
4625757a6d7SDan Williams #define QUEUE_FLAG_SAME_FORCE  18	/* force complete on same CPU */
463c246e80dSBart Van Assche #define QUEUE_FLAG_DEAD        19	/* queue tear-down finished */
464bc58ba94SJens Axboe 
465bc58ba94SJens Axboe #define QUEUE_FLAG_DEFAULT	((1 << QUEUE_FLAG_IO_STAT) |		\
46601e97f6bSJens Axboe 				 (1 << QUEUE_FLAG_STACKABLE)	|	\
467e2e1a148SJens Axboe 				 (1 << QUEUE_FLAG_SAME_COMP)	|	\
468e2e1a148SJens Axboe 				 (1 << QUEUE_FLAG_ADD_RANDOM))
469797e7dbbSTejun Heo 
4708bcb6c7dSAndi Kleen static inline void queue_lockdep_assert_held(struct request_queue *q)
4718f45c1a5SLinus Torvalds {
4728bcb6c7dSAndi Kleen 	if (q->queue_lock)
4738bcb6c7dSAndi Kleen 		lockdep_assert_held(q->queue_lock);
4748f45c1a5SLinus Torvalds }
4758f45c1a5SLinus Torvalds 
47675ad23bcSNick Piggin static inline void queue_flag_set_unlocked(unsigned int flag,
47775ad23bcSNick Piggin 					   struct request_queue *q)
47875ad23bcSNick Piggin {
47975ad23bcSNick Piggin 	__set_bit(flag, &q->queue_flags);
48075ad23bcSNick Piggin }
48175ad23bcSNick Piggin 
482e48ec690SJens Axboe static inline int queue_flag_test_and_clear(unsigned int flag,
483e48ec690SJens Axboe 					    struct request_queue *q)
484e48ec690SJens Axboe {
4858bcb6c7dSAndi Kleen 	queue_lockdep_assert_held(q);
486e48ec690SJens Axboe 
487e48ec690SJens Axboe 	if (test_bit(flag, &q->queue_flags)) {
488e48ec690SJens Axboe 		__clear_bit(flag, &q->queue_flags);
489e48ec690SJens Axboe 		return 1;
490e48ec690SJens Axboe 	}
491e48ec690SJens Axboe 
492e48ec690SJens Axboe 	return 0;
493e48ec690SJens Axboe }
494e48ec690SJens Axboe 
495e48ec690SJens Axboe static inline int queue_flag_test_and_set(unsigned int flag,
496e48ec690SJens Axboe 					  struct request_queue *q)
497e48ec690SJens Axboe {
4988bcb6c7dSAndi Kleen 	queue_lockdep_assert_held(q);
499e48ec690SJens Axboe 
500e48ec690SJens Axboe 	if (!test_bit(flag, &q->queue_flags)) {
501e48ec690SJens Axboe 		__set_bit(flag, &q->queue_flags);
502e48ec690SJens Axboe 		return 0;
503e48ec690SJens Axboe 	}
504e48ec690SJens Axboe 
505e48ec690SJens Axboe 	return 1;
506e48ec690SJens Axboe }
507e48ec690SJens Axboe 
50875ad23bcSNick Piggin static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
50975ad23bcSNick Piggin {
5108bcb6c7dSAndi Kleen 	queue_lockdep_assert_held(q);
51175ad23bcSNick Piggin 	__set_bit(flag, &q->queue_flags);
51275ad23bcSNick Piggin }
51375ad23bcSNick Piggin 
51475ad23bcSNick Piggin static inline void queue_flag_clear_unlocked(unsigned int flag,
51575ad23bcSNick Piggin 					     struct request_queue *q)
51675ad23bcSNick Piggin {
51775ad23bcSNick Piggin 	__clear_bit(flag, &q->queue_flags);
51875ad23bcSNick Piggin }
51975ad23bcSNick Piggin 
5200a7ae2ffSJens Axboe static inline int queue_in_flight(struct request_queue *q)
5210a7ae2ffSJens Axboe {
5220a7ae2ffSJens Axboe 	return q->in_flight[0] + q->in_flight[1];
5230a7ae2ffSJens Axboe }
5240a7ae2ffSJens Axboe 
52575ad23bcSNick Piggin static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
52675ad23bcSNick Piggin {
5278bcb6c7dSAndi Kleen 	queue_lockdep_assert_held(q);
52875ad23bcSNick Piggin 	__clear_bit(flag, &q->queue_flags);
52975ad23bcSNick Piggin }
53075ad23bcSNick Piggin 
5311da177e4SLinus Torvalds #define blk_queue_tagged(q)	test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
5321da177e4SLinus Torvalds #define blk_queue_stopped(q)	test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
5333f3299d5SBart Van Assche #define blk_queue_dying(q)	test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
534c246e80dSBart Van Assche #define blk_queue_dead(q)	test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
535d732580bSTejun Heo #define blk_queue_bypass(q)	test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
536ac9fafa1SAlan D. Brunelle #define blk_queue_nomerges(q)	test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
537488991e2SAlan D. Brunelle #define blk_queue_noxmerges(q)	\
538488991e2SAlan D. Brunelle 	test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
539a68bbddbSJens Axboe #define blk_queue_nonrot(q)	test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
540bc58ba94SJens Axboe #define blk_queue_io_stat(q)	test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
541e2e1a148SJens Axboe #define blk_queue_add_random(q)	test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
5424ee5eaf4SKiyoshi Ueda #define blk_queue_stackable(q)	\
5434ee5eaf4SKiyoshi Ueda 	test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
544c15227deSChristoph Hellwig #define blk_queue_discard(q)	test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
5458d57a98cSAdrian Hunter #define blk_queue_secdiscard(q)	(blk_queue_discard(q) && \
5468d57a98cSAdrian Hunter 	test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags))
5471da177e4SLinus Torvalds 
54833659ebbSChristoph Hellwig #define blk_noretry_request(rq) \
54933659ebbSChristoph Hellwig 	((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
55033659ebbSChristoph Hellwig 			     REQ_FAILFAST_DRIVER))
5514aff5e23SJens Axboe 
55233659ebbSChristoph Hellwig #define blk_account_rq(rq) \
55333659ebbSChristoph Hellwig 	(((rq)->cmd_flags & REQ_STARTED) && \
554e2a60da7SMartin K. Petersen 	 ((rq)->cmd_type == REQ_TYPE_FS))
5551da177e4SLinus Torvalds 
5561da177e4SLinus Torvalds #define blk_pm_request(rq)	\
55733659ebbSChristoph Hellwig 	((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \
55833659ebbSChristoph Hellwig 	 (rq)->cmd_type == REQ_TYPE_PM_RESUME)
5591da177e4SLinus Torvalds 
560ab780f1eSJens Axboe #define blk_rq_cpu_valid(rq)	((rq)->cpu != -1)
561abae1fdeSFUJITA Tomonori #define blk_bidi_rq(rq)		((rq)->next_rq != NULL)
562336cdb40SKiyoshi Ueda /* rq->queuelist of dequeued request must be list_empty() */
563336cdb40SKiyoshi Ueda #define blk_queued_rq(rq)	(!list_empty(&(rq)->queuelist))
5641da177e4SLinus Torvalds 
5651da177e4SLinus Torvalds #define list_entry_rq(ptr)	list_entry((ptr), struct request, queuelist)
5661da177e4SLinus Torvalds 
5674aff5e23SJens Axboe #define rq_data_dir(rq)		((rq)->cmd_flags & 1)
5681da177e4SLinus Torvalds 
569e692cb66SMartin K. Petersen static inline unsigned int blk_queue_cluster(struct request_queue *q)
570e692cb66SMartin K. Petersen {
571e692cb66SMartin K. Petersen 	return q->limits.cluster;
572e692cb66SMartin K. Petersen }
573e692cb66SMartin K. Petersen 
5749e2585a8SJens Axboe /*
5751faa16d2SJens Axboe  * We regard a request as sync, if either a read or a sync write
5769e2585a8SJens Axboe  */
5771faa16d2SJens Axboe static inline bool rw_is_sync(unsigned int rw_flags)
5781faa16d2SJens Axboe {
5797b6d91daSChristoph Hellwig 	return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC);
5801faa16d2SJens Axboe }
5811faa16d2SJens Axboe 
5821faa16d2SJens Axboe static inline bool rq_is_sync(struct request *rq)
5831faa16d2SJens Axboe {
5841faa16d2SJens Axboe 	return rw_is_sync(rq->cmd_flags);
5851faa16d2SJens Axboe }
5861faa16d2SJens Axboe 
5875b788ce3STejun Heo static inline bool blk_rl_full(struct request_list *rl, bool sync)
5881da177e4SLinus Torvalds {
5895b788ce3STejun Heo 	unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
5905b788ce3STejun Heo 
5915b788ce3STejun Heo 	return rl->flags & flag;
5921da177e4SLinus Torvalds }
5931da177e4SLinus Torvalds 
5945b788ce3STejun Heo static inline void blk_set_rl_full(struct request_list *rl, bool sync)
5951da177e4SLinus Torvalds {
5965b788ce3STejun Heo 	unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
5975b788ce3STejun Heo 
5985b788ce3STejun Heo 	rl->flags |= flag;
5991da177e4SLinus Torvalds }
6001da177e4SLinus Torvalds 
6015b788ce3STejun Heo static inline void blk_clear_rl_full(struct request_list *rl, bool sync)
6021da177e4SLinus Torvalds {
6035b788ce3STejun Heo 	unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
6045b788ce3STejun Heo 
6055b788ce3STejun Heo 	rl->flags &= ~flag;
6061da177e4SLinus Torvalds }
6071da177e4SLinus Torvalds 
608e2a60da7SMartin K. Petersen static inline bool rq_mergeable(struct request *rq)
609e2a60da7SMartin K. Petersen {
610e2a60da7SMartin K. Petersen 	if (rq->cmd_type != REQ_TYPE_FS)
611e2a60da7SMartin K. Petersen 		return false;
6121da177e4SLinus Torvalds 
613e2a60da7SMartin K. Petersen 	if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
614e2a60da7SMartin K. Petersen 		return false;
615e2a60da7SMartin K. Petersen 
616e2a60da7SMartin K. Petersen 	return true;
617e2a60da7SMartin K. Petersen }
6181da177e4SLinus Torvalds 
619f31dc1cdSMartin K. Petersen static inline bool blk_check_merge_flags(unsigned int flags1,
620f31dc1cdSMartin K. Petersen 					 unsigned int flags2)
621f31dc1cdSMartin K. Petersen {
622f31dc1cdSMartin K. Petersen 	if ((flags1 & REQ_DISCARD) != (flags2 & REQ_DISCARD))
623f31dc1cdSMartin K. Petersen 		return false;
624f31dc1cdSMartin K. Petersen 
625f31dc1cdSMartin K. Petersen 	if ((flags1 & REQ_SECURE) != (flags2 & REQ_SECURE))
626f31dc1cdSMartin K. Petersen 		return false;
627f31dc1cdSMartin K. Petersen 
6284363ac7cSMartin K. Petersen 	if ((flags1 & REQ_WRITE_SAME) != (flags2 & REQ_WRITE_SAME))
6294363ac7cSMartin K. Petersen 		return false;
6304363ac7cSMartin K. Petersen 
631f31dc1cdSMartin K. Petersen 	return true;
632f31dc1cdSMartin K. Petersen }
633f31dc1cdSMartin K. Petersen 
6344363ac7cSMartin K. Petersen static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
6354363ac7cSMartin K. Petersen {
6364363ac7cSMartin K. Petersen 	if (bio_data(a) == bio_data(b))
6374363ac7cSMartin K. Petersen 		return true;
6384363ac7cSMartin K. Petersen 
6394363ac7cSMartin K. Petersen 	return false;
6404363ac7cSMartin K. Petersen }
6414363ac7cSMartin K. Petersen 
6421da177e4SLinus Torvalds /*
6431da177e4SLinus Torvalds  * q->prep_rq_fn return values
6441da177e4SLinus Torvalds  */
6451da177e4SLinus Torvalds #define BLKPREP_OK		0	/* serve it */
6461da177e4SLinus Torvalds #define BLKPREP_KILL		1	/* fatal error, kill */
6471da177e4SLinus Torvalds #define BLKPREP_DEFER		2	/* leave on queue */
6481da177e4SLinus Torvalds 
6491da177e4SLinus Torvalds extern unsigned long blk_max_low_pfn, blk_max_pfn;
6501da177e4SLinus Torvalds 
6511da177e4SLinus Torvalds /*
6521da177e4SLinus Torvalds  * standard bounce addresses:
6531da177e4SLinus Torvalds  *
6541da177e4SLinus Torvalds  * BLK_BOUNCE_HIGH	: bounce all highmem pages
6551da177e4SLinus Torvalds  * BLK_BOUNCE_ANY	: don't bounce anything
6561da177e4SLinus Torvalds  * BLK_BOUNCE_ISA	: bounce pages above ISA DMA boundary
6571da177e4SLinus Torvalds  */
6582472892aSAndi Kleen 
6592472892aSAndi Kleen #if BITS_PER_LONG == 32
6601da177e4SLinus Torvalds #define BLK_BOUNCE_HIGH		((u64)blk_max_low_pfn << PAGE_SHIFT)
6612472892aSAndi Kleen #else
6622472892aSAndi Kleen #define BLK_BOUNCE_HIGH		-1ULL
6632472892aSAndi Kleen #endif
6642472892aSAndi Kleen #define BLK_BOUNCE_ANY		(-1ULL)
665bfe17231SFUJITA Tomonori #define BLK_BOUNCE_ISA		(DMA_BIT_MASK(24))
6661da177e4SLinus Torvalds 
6673d6392cfSJens Axboe /*
6683d6392cfSJens Axboe  * default timeout for SG_IO if none specified
6693d6392cfSJens Axboe  */
6703d6392cfSJens Axboe #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
671f2f1fa78SLinus Torvalds #define BLK_MIN_SG_TIMEOUT	(7 * HZ)
6723d6392cfSJens Axboe 
6732a7326b5SChristoph Lameter #ifdef CONFIG_BOUNCE
6741da177e4SLinus Torvalds extern int init_emergency_isa_pool(void);
675165125e1SJens Axboe extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
6761da177e4SLinus Torvalds #else
6771da177e4SLinus Torvalds static inline int init_emergency_isa_pool(void)
6781da177e4SLinus Torvalds {
6791da177e4SLinus Torvalds 	return 0;
6801da177e4SLinus Torvalds }
681165125e1SJens Axboe static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
6821da177e4SLinus Torvalds {
6831da177e4SLinus Torvalds }
6841da177e4SLinus Torvalds #endif /* CONFIG_MMU */
6851da177e4SLinus Torvalds 
686152e283fSFUJITA Tomonori struct rq_map_data {
687152e283fSFUJITA Tomonori 	struct page **pages;
688152e283fSFUJITA Tomonori 	int page_order;
689152e283fSFUJITA Tomonori 	int nr_entries;
69056c451f4SFUJITA Tomonori 	unsigned long offset;
69197ae77a1SFUJITA Tomonori 	int null_mapped;
692ecb554a8SFUJITA Tomonori 	int from_user;
693152e283fSFUJITA Tomonori };
694152e283fSFUJITA Tomonori 
6955705f702SNeilBrown struct req_iterator {
6965705f702SNeilBrown 	int i;
6975705f702SNeilBrown 	struct bio *bio;
6985705f702SNeilBrown };
6995705f702SNeilBrown 
7005705f702SNeilBrown /* This should not be used directly - use rq_for_each_segment */
7011e428079SJens Axboe #define for_each_bio(_bio)		\
7021e428079SJens Axboe 	for (; _bio; _bio = _bio->bi_next)
7035705f702SNeilBrown #define __rq_for_each_bio(_bio, rq)	\
7041da177e4SLinus Torvalds 	if ((rq->bio))			\
7051da177e4SLinus Torvalds 		for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
7061da177e4SLinus Torvalds 
7075705f702SNeilBrown #define rq_for_each_segment(bvl, _rq, _iter)			\
7085705f702SNeilBrown 	__rq_for_each_bio(_iter.bio, _rq)			\
7095705f702SNeilBrown 		bio_for_each_segment(bvl, _iter.bio, _iter.i)
7105705f702SNeilBrown 
7115705f702SNeilBrown #define rq_iter_last(rq, _iter)					\
7125705f702SNeilBrown 		(_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1)
7135705f702SNeilBrown 
7142d4dc890SIlya Loginov #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
7152d4dc890SIlya Loginov # error	"You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
7162d4dc890SIlya Loginov #endif
7172d4dc890SIlya Loginov #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
7182d4dc890SIlya Loginov extern void rq_flush_dcache_pages(struct request *rq);
7192d4dc890SIlya Loginov #else
7202d4dc890SIlya Loginov static inline void rq_flush_dcache_pages(struct request *rq)
7212d4dc890SIlya Loginov {
7222d4dc890SIlya Loginov }
7232d4dc890SIlya Loginov #endif
7242d4dc890SIlya Loginov 
7251da177e4SLinus Torvalds extern int blk_register_queue(struct gendisk *disk);
7261da177e4SLinus Torvalds extern void blk_unregister_queue(struct gendisk *disk);
7271da177e4SLinus Torvalds extern void generic_make_request(struct bio *bio);
7282a4aa30cSFUJITA Tomonori extern void blk_rq_init(struct request_queue *q, struct request *rq);
7291da177e4SLinus Torvalds extern void blk_put_request(struct request *);
730165125e1SJens Axboe extern void __blk_put_request(struct request_queue *, struct request *);
731165125e1SJens Axboe extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
73279eb63e9SBoaz Harrosh extern struct request *blk_make_request(struct request_queue *, struct bio *,
73379eb63e9SBoaz Harrosh 					gfp_t);
734165125e1SJens Axboe extern void blk_requeue_request(struct request_queue *, struct request *);
73566ac0280SChristoph Hellwig extern void blk_add_request_payload(struct request *rq, struct page *page,
73666ac0280SChristoph Hellwig 		unsigned int len);
73782124d60SKiyoshi Ueda extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
738ef9e3facSKiyoshi Ueda extern int blk_lld_busy(struct request_queue *q);
739b0fd271dSKiyoshi Ueda extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
740b0fd271dSKiyoshi Ueda 			     struct bio_set *bs, gfp_t gfp_mask,
741b0fd271dSKiyoshi Ueda 			     int (*bio_ctr)(struct bio *, struct bio *, void *),
742b0fd271dSKiyoshi Ueda 			     void *data);
743b0fd271dSKiyoshi Ueda extern void blk_rq_unprep_clone(struct request *rq);
74482124d60SKiyoshi Ueda extern int blk_insert_cloned_request(struct request_queue *q,
74582124d60SKiyoshi Ueda 				     struct request *rq);
7463cca6dc1SJens Axboe extern void blk_delay_queue(struct request_queue *, unsigned long);
747165125e1SJens Axboe extern void blk_recount_segments(struct request_queue *, struct bio *);
7480bfc96cbSPaolo Bonzini extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
749577ebb37SPaolo Bonzini extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
750577ebb37SPaolo Bonzini 			      unsigned int, void __user *);
75174f3c8afSAl Viro extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
75274f3c8afSAl Viro 			  unsigned int, void __user *);
753e915e872SAl Viro extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
754e915e872SAl Viro 			 struct scsi_ioctl_command __user *);
7553fcfab16SAndrew Morton 
7565a7bbad2SChristoph Hellwig extern void blk_queue_bio(struct request_queue *q, struct bio *bio);
757166e1f90SChristoph Hellwig 
7583fcfab16SAndrew Morton /*
7593fcfab16SAndrew Morton  * A queue has just exitted congestion.  Note this in the global counter of
7603fcfab16SAndrew Morton  * congested queues, and wake up anyone who was waiting for requests to be
7613fcfab16SAndrew Morton  * put back.
7623fcfab16SAndrew Morton  */
7638aa7e847SJens Axboe static inline void blk_clear_queue_congested(struct request_queue *q, int sync)
7643fcfab16SAndrew Morton {
7658aa7e847SJens Axboe 	clear_bdi_congested(&q->backing_dev_info, sync);
7663fcfab16SAndrew Morton }
7673fcfab16SAndrew Morton 
7683fcfab16SAndrew Morton /*
7693fcfab16SAndrew Morton  * A queue has just entered congestion.  Flag that in the queue's VM-visible
7703fcfab16SAndrew Morton  * state flags and increment the global gounter of congested queues.
7713fcfab16SAndrew Morton  */
7728aa7e847SJens Axboe static inline void blk_set_queue_congested(struct request_queue *q, int sync)
7733fcfab16SAndrew Morton {
7748aa7e847SJens Axboe 	set_bdi_congested(&q->backing_dev_info, sync);
7753fcfab16SAndrew Morton }
7763fcfab16SAndrew Morton 
777165125e1SJens Axboe extern void blk_start_queue(struct request_queue *q);
778165125e1SJens Axboe extern void blk_stop_queue(struct request_queue *q);
7791da177e4SLinus Torvalds extern void blk_sync_queue(struct request_queue *q);
780165125e1SJens Axboe extern void __blk_stop_queue(struct request_queue *q);
78124ecfbe2SChristoph Hellwig extern void __blk_run_queue(struct request_queue *q);
782165125e1SJens Axboe extern void blk_run_queue(struct request_queue *);
783c21e6bebSJens Axboe extern void blk_run_queue_async(struct request_queue *q);
784a3bce90eSFUJITA Tomonori extern int blk_rq_map_user(struct request_queue *, struct request *,
785152e283fSFUJITA Tomonori 			   struct rq_map_data *, void __user *, unsigned long,
786152e283fSFUJITA Tomonori 			   gfp_t);
7878e5cfc45SJens Axboe extern int blk_rq_unmap_user(struct bio *);
788165125e1SJens Axboe extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
789165125e1SJens Axboe extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
790152e283fSFUJITA Tomonori 			       struct rq_map_data *, struct sg_iovec *, int,
791152e283fSFUJITA Tomonori 			       unsigned int, gfp_t);
792165125e1SJens Axboe extern int blk_execute_rq(struct request_queue *, struct gendisk *,
793994ca9a1SJames Bottomley  			  struct request *, int);
794165125e1SJens Axboe extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
79515fc858aSJens Axboe 				  struct request *, int, rq_end_io_fn *);
7966e39b69eSMike Christie 
797165125e1SJens Axboe static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
7981da177e4SLinus Torvalds {
7991da177e4SLinus Torvalds 	return bdev->bd_disk->queue;
8001da177e4SLinus Torvalds }
8011da177e4SLinus Torvalds 
8021da177e4SLinus Torvalds /*
8035b93629bSTejun Heo  * blk_rq_pos()			: the current sector
8045b93629bSTejun Heo  * blk_rq_bytes()		: bytes left in the entire request
8055b93629bSTejun Heo  * blk_rq_cur_bytes()		: bytes left in the current segment
80680a761fdSTejun Heo  * blk_rq_err_bytes()		: bytes left till the next error boundary
8075b93629bSTejun Heo  * blk_rq_sectors()		: sectors left in the entire request
8085b93629bSTejun Heo  * blk_rq_cur_sectors()		: sectors left in the current segment
8095efccd17STejun Heo  */
8105b93629bSTejun Heo static inline sector_t blk_rq_pos(const struct request *rq)
8115b93629bSTejun Heo {
812a2dec7b3STejun Heo 	return rq->__sector;
8135b93629bSTejun Heo }
8145b93629bSTejun Heo 
8152e46e8b2STejun Heo static inline unsigned int blk_rq_bytes(const struct request *rq)
8162e46e8b2STejun Heo {
817a2dec7b3STejun Heo 	return rq->__data_len;
8182e46e8b2STejun Heo }
8192e46e8b2STejun Heo 
8202e46e8b2STejun Heo static inline int blk_rq_cur_bytes(const struct request *rq)
8212e46e8b2STejun Heo {
8222e46e8b2STejun Heo 	return rq->bio ? bio_cur_bytes(rq->bio) : 0;
8232e46e8b2STejun Heo }
8245efccd17STejun Heo 
82580a761fdSTejun Heo extern unsigned int blk_rq_err_bytes(const struct request *rq);
82680a761fdSTejun Heo 
8275b93629bSTejun Heo static inline unsigned int blk_rq_sectors(const struct request *rq)
8285b93629bSTejun Heo {
8292e46e8b2STejun Heo 	return blk_rq_bytes(rq) >> 9;
8305b93629bSTejun Heo }
8315b93629bSTejun Heo 
8325b93629bSTejun Heo static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
8335b93629bSTejun Heo {
8342e46e8b2STejun Heo 	return blk_rq_cur_bytes(rq) >> 9;
8355b93629bSTejun Heo }
8365b93629bSTejun Heo 
837f31dc1cdSMartin K. Petersen static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
838f31dc1cdSMartin K. Petersen 						     unsigned int cmd_flags)
839f31dc1cdSMartin K. Petersen {
840f31dc1cdSMartin K. Petersen 	if (unlikely(cmd_flags & REQ_DISCARD))
841f31dc1cdSMartin K. Petersen 		return q->limits.max_discard_sectors;
842f31dc1cdSMartin K. Petersen 
8434363ac7cSMartin K. Petersen 	if (unlikely(cmd_flags & REQ_WRITE_SAME))
8444363ac7cSMartin K. Petersen 		return q->limits.max_write_same_sectors;
8454363ac7cSMartin K. Petersen 
846f31dc1cdSMartin K. Petersen 	return q->limits.max_sectors;
847f31dc1cdSMartin K. Petersen }
848f31dc1cdSMartin K. Petersen 
849f31dc1cdSMartin K. Petersen static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
850f31dc1cdSMartin K. Petersen {
851f31dc1cdSMartin K. Petersen 	struct request_queue *q = rq->q;
852f31dc1cdSMartin K. Petersen 
853f31dc1cdSMartin K. Petersen 	if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC))
854f31dc1cdSMartin K. Petersen 		return q->limits.max_hw_sectors;
855f31dc1cdSMartin K. Petersen 
856f31dc1cdSMartin K. Petersen 	return blk_queue_get_max_sectors(q, rq->cmd_flags);
857f31dc1cdSMartin K. Petersen }
858f31dc1cdSMartin K. Petersen 
8595efccd17STejun Heo /*
8609934c8c0STejun Heo  * Request issue related functions.
8619934c8c0STejun Heo  */
8629934c8c0STejun Heo extern struct request *blk_peek_request(struct request_queue *q);
8639934c8c0STejun Heo extern void blk_start_request(struct request *rq);
8649934c8c0STejun Heo extern struct request *blk_fetch_request(struct request_queue *q);
8659934c8c0STejun Heo 
8669934c8c0STejun Heo /*
8672e60e022STejun Heo  * Request completion related functions.
8682e60e022STejun Heo  *
8692e60e022STejun Heo  * blk_update_request() completes given number of bytes and updates
8702e60e022STejun Heo  * the request without completing it.
8712e60e022STejun Heo  *
872f06d9a2bSTejun Heo  * blk_end_request() and friends.  __blk_end_request() must be called
873f06d9a2bSTejun Heo  * with the request queue spinlock acquired.
8741da177e4SLinus Torvalds  *
8751da177e4SLinus Torvalds  * Several drivers define their own end_request and call
8763bcddeacSKiyoshi Ueda  * blk_end_request() for parts of the original function.
8773bcddeacSKiyoshi Ueda  * This prevents code duplication in drivers.
8781da177e4SLinus Torvalds  */
8792e60e022STejun Heo extern bool blk_update_request(struct request *rq, int error,
88022b13210SJens Axboe 			       unsigned int nr_bytes);
881b1f74493SFUJITA Tomonori extern bool blk_end_request(struct request *rq, int error,
882b1f74493SFUJITA Tomonori 			    unsigned int nr_bytes);
883b1f74493SFUJITA Tomonori extern void blk_end_request_all(struct request *rq, int error);
884b1f74493SFUJITA Tomonori extern bool blk_end_request_cur(struct request *rq, int error);
88580a761fdSTejun Heo extern bool blk_end_request_err(struct request *rq, int error);
886b1f74493SFUJITA Tomonori extern bool __blk_end_request(struct request *rq, int error,
887b1f74493SFUJITA Tomonori 			      unsigned int nr_bytes);
888b1f74493SFUJITA Tomonori extern void __blk_end_request_all(struct request *rq, int error);
889b1f74493SFUJITA Tomonori extern bool __blk_end_request_cur(struct request *rq, int error);
89080a761fdSTejun Heo extern bool __blk_end_request_err(struct request *rq, int error);
8912e60e022STejun Heo 
892ff856badSJens Axboe extern void blk_complete_request(struct request *);
893242f9dcbSJens Axboe extern void __blk_complete_request(struct request *);
894242f9dcbSJens Axboe extern void blk_abort_request(struct request *);
89528018c24SJames Bottomley extern void blk_unprep_request(struct request *);
896ff856badSJens Axboe 
8971da177e4SLinus Torvalds /*
8981da177e4SLinus Torvalds  * Access functions for manipulating queue properties
8991da177e4SLinus Torvalds  */
900165125e1SJens Axboe extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
9011946089aSChristoph Lameter 					spinlock_t *lock, int node_id);
902165125e1SJens Axboe extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
90301effb0dSMike Snitzer extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
90401effb0dSMike Snitzer 						      request_fn_proc *, spinlock_t *);
905165125e1SJens Axboe extern void blk_cleanup_queue(struct request_queue *);
906165125e1SJens Axboe extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
907165125e1SJens Axboe extern void blk_queue_bounce_limit(struct request_queue *, u64);
90872d4cd9fSMike Snitzer extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int);
909086fa5ffSMartin K. Petersen extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
9108a78362cSMartin K. Petersen extern void blk_queue_max_segments(struct request_queue *, unsigned short);
911165125e1SJens Axboe extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
91267efc925SChristoph Hellwig extern void blk_queue_max_discard_sectors(struct request_queue *q,
91367efc925SChristoph Hellwig 		unsigned int max_discard_sectors);
9144363ac7cSMartin K. Petersen extern void blk_queue_max_write_same_sectors(struct request_queue *q,
9154363ac7cSMartin K. Petersen 		unsigned int max_write_same_sectors);
916e1defc4fSMartin K. Petersen extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
917892b6f90SMartin K. Petersen extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
918c72758f3SMartin K. Petersen extern void blk_queue_alignment_offset(struct request_queue *q,
919c72758f3SMartin K. Petersen 				       unsigned int alignment);
9207c958e32SMartin K. Petersen extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
921c72758f3SMartin K. Petersen extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
9223c5820c7SMartin K. Petersen extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
923c72758f3SMartin K. Petersen extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
924e475bba2SMartin K. Petersen extern void blk_set_default_limits(struct queue_limits *lim);
925b1bd055dSMartin K. Petersen extern void blk_set_stacking_limits(struct queue_limits *lim);
926c72758f3SMartin K. Petersen extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
927c72758f3SMartin K. Petersen 			    sector_t offset);
92817be8c24SMartin K. Petersen extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
92917be8c24SMartin K. Petersen 			    sector_t offset);
930c72758f3SMartin K. Petersen extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
931c72758f3SMartin K. Petersen 			      sector_t offset);
932165125e1SJens Axboe extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
933e3790c7dSTejun Heo extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
93427f8221aSFUJITA Tomonori extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
9352fb98e84STejun Heo extern int blk_queue_dma_drain(struct request_queue *q,
9362fb98e84STejun Heo 			       dma_drain_needed_fn *dma_drain_needed,
9372fb98e84STejun Heo 			       void *buf, unsigned int size);
938ef9e3facSKiyoshi Ueda extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
939165125e1SJens Axboe extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
940165125e1SJens Axboe extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
94128018c24SJames Bottomley extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn);
942165125e1SJens Axboe extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
943165125e1SJens Axboe extern void blk_queue_dma_alignment(struct request_queue *, int);
94411c3e689SJames Bottomley extern void blk_queue_update_dma_alignment(struct request_queue *, int);
945165125e1SJens Axboe extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
946242f9dcbSJens Axboe extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
947242f9dcbSJens Axboe extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
9484913efe4STejun Heo extern void blk_queue_flush(struct request_queue *q, unsigned int flush);
949f3876930S[email protected] extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
9501da177e4SLinus Torvalds extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
9511da177e4SLinus Torvalds 
952165125e1SJens Axboe extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
95385b9f66aSAsias He extern int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
95485b9f66aSAsias He 			  struct scatterlist *sglist);
9551da177e4SLinus Torvalds extern void blk_dump_rq_flags(struct request *, char *);
9561da177e4SLinus Torvalds extern long nr_blockdev_pages(void);
9571da177e4SLinus Torvalds 
95809ac46c4STejun Heo bool __must_check blk_get_queue(struct request_queue *);
959165125e1SJens Axboe struct request_queue *blk_alloc_queue(gfp_t);
960165125e1SJens Axboe struct request_queue *blk_alloc_queue_node(gfp_t, int);
961165125e1SJens Axboe extern void blk_put_queue(struct request_queue *);
9621da177e4SLinus Torvalds 
963316cc67dSShaohua Li /*
96475df7136SSuresh Jayaraman  * blk_plug permits building a queue of related requests by holding the I/O
96575df7136SSuresh Jayaraman  * fragments for a short period. This allows merging of sequential requests
96675df7136SSuresh Jayaraman  * into single larger request. As the requests are moved from a per-task list to
96775df7136SSuresh Jayaraman  * the device's request_queue in a batch, this results in improved scalability
96875df7136SSuresh Jayaraman  * as the lock contention for request_queue lock is reduced.
96975df7136SSuresh Jayaraman  *
97075df7136SSuresh Jayaraman  * It is ok not to disable preemption when adding the request to the plug list
97175df7136SSuresh Jayaraman  * or when attempting a merge, because blk_schedule_flush_list() will only flush
97275df7136SSuresh Jayaraman  * the plug list when the task sleeps by itself. For details, please see
97375df7136SSuresh Jayaraman  * schedule() where blk_schedule_flush_plug() is called.
974316cc67dSShaohua Li  */
97573c10101SJens Axboe struct blk_plug {
97675df7136SSuresh Jayaraman 	unsigned long magic; /* detect uninitialized use-cases */
97775df7136SSuresh Jayaraman 	struct list_head list; /* requests */
97875df7136SSuresh Jayaraman 	struct list_head cb_list; /* md requires an unplug callback */
97973c10101SJens Axboe };
98055c022bbSShaohua Li #define BLK_MAX_REQUEST_COUNT 16
98155c022bbSShaohua Li 
9829cbb1750SNeilBrown struct blk_plug_cb;
98374018dc3SNeilBrown typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
984048c9374SNeilBrown struct blk_plug_cb {
985048c9374SNeilBrown 	struct list_head list;
9869cbb1750SNeilBrown 	blk_plug_cb_fn callback;
9879cbb1750SNeilBrown 	void *data;
988048c9374SNeilBrown };
9899cbb1750SNeilBrown extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
9909cbb1750SNeilBrown 					     void *data, int size);
99173c10101SJens Axboe extern void blk_start_plug(struct blk_plug *);
99273c10101SJens Axboe extern void blk_finish_plug(struct blk_plug *);
993f6603783SJens Axboe extern void blk_flush_plug_list(struct blk_plug *, bool);
99473c10101SJens Axboe 
99573c10101SJens Axboe static inline void blk_flush_plug(struct task_struct *tsk)
99673c10101SJens Axboe {
99773c10101SJens Axboe 	struct blk_plug *plug = tsk->plug;
99873c10101SJens Axboe 
99988b996cdSChristoph Hellwig 	if (plug)
1000a237c1c5SJens Axboe 		blk_flush_plug_list(plug, false);
1001a237c1c5SJens Axboe }
1002a237c1c5SJens Axboe 
1003a237c1c5SJens Axboe static inline void blk_schedule_flush_plug(struct task_struct *tsk)
1004a237c1c5SJens Axboe {
1005a237c1c5SJens Axboe 	struct blk_plug *plug = tsk->plug;
1006a237c1c5SJens Axboe 
1007a237c1c5SJens Axboe 	if (plug)
1008f6603783SJens Axboe 		blk_flush_plug_list(plug, true);
100973c10101SJens Axboe }
101073c10101SJens Axboe 
101173c10101SJens Axboe static inline bool blk_needs_flush_plug(struct task_struct *tsk)
101273c10101SJens Axboe {
101373c10101SJens Axboe 	struct blk_plug *plug = tsk->plug;
101473c10101SJens Axboe 
1015048c9374SNeilBrown 	return plug && (!list_empty(&plug->list) || !list_empty(&plug->cb_list));
101673c10101SJens Axboe }
101773c10101SJens Axboe 
10181da177e4SLinus Torvalds /*
10191da177e4SLinus Torvalds  * tag stuff
10201da177e4SLinus Torvalds  */
10214aff5e23SJens Axboe #define blk_rq_tagged(rq)		((rq)->cmd_flags & REQ_QUEUED)
1022165125e1SJens Axboe extern int blk_queue_start_tag(struct request_queue *, struct request *);
1023165125e1SJens Axboe extern struct request *blk_queue_find_tag(struct request_queue *, int);
1024165125e1SJens Axboe extern void blk_queue_end_tag(struct request_queue *, struct request *);
1025165125e1SJens Axboe extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *);
1026165125e1SJens Axboe extern void blk_queue_free_tags(struct request_queue *);
1027165125e1SJens Axboe extern int blk_queue_resize_tags(struct request_queue *, int);
1028165125e1SJens Axboe extern void blk_queue_invalidate_tags(struct request_queue *);
1029492dfb48SJames Bottomley extern struct blk_queue_tag *blk_init_tags(int);
1030492dfb48SJames Bottomley extern void blk_free_tags(struct blk_queue_tag *);
10311da177e4SLinus Torvalds 
1032f583f492SDavid C Somayajulu static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
1033f583f492SDavid C Somayajulu 						int tag)
1034f583f492SDavid C Somayajulu {
1035f583f492SDavid C Somayajulu 	if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
1036f583f492SDavid C Somayajulu 		return NULL;
1037f583f492SDavid C Somayajulu 	return bqt->tag_index[tag];
1038f583f492SDavid C Somayajulu }
1039dd3932edSChristoph Hellwig 
1040dd3932edSChristoph Hellwig #define BLKDEV_DISCARD_SECURE  0x01    /* secure discard */
1041dd3932edSChristoph Hellwig 
1042dd3932edSChristoph Hellwig extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
1043fbd9b09aSDmitry Monakhov extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1044fbd9b09aSDmitry Monakhov 		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
10454363ac7cSMartin K. Petersen extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
10464363ac7cSMartin K. Petersen 		sector_t nr_sects, gfp_t gfp_mask, struct page *page);
10473f14d792SDmitry Monakhov extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
1048dd3932edSChristoph Hellwig 			sector_t nr_sects, gfp_t gfp_mask);
10492cf6d26aSChristoph Hellwig static inline int sb_issue_discard(struct super_block *sb, sector_t block,
10502cf6d26aSChristoph Hellwig 		sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
1051fb2dce86SDavid Woodhouse {
10522cf6d26aSChristoph Hellwig 	return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9),
10532cf6d26aSChristoph Hellwig 				    nr_blocks << (sb->s_blocksize_bits - 9),
10542cf6d26aSChristoph Hellwig 				    gfp_mask, flags);
1055fb2dce86SDavid Woodhouse }
1056e6fa0be6SLukas Czerner static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
1057a107e5a3STheodore Ts'o 		sector_t nr_blocks, gfp_t gfp_mask)
1058e6fa0be6SLukas Czerner {
1059e6fa0be6SLukas Czerner 	return blkdev_issue_zeroout(sb->s_bdev,
1060e6fa0be6SLukas Czerner 				    block << (sb->s_blocksize_bits - 9),
1061e6fa0be6SLukas Czerner 				    nr_blocks << (sb->s_blocksize_bits - 9),
1062a107e5a3STheodore Ts'o 				    gfp_mask);
1063e6fa0be6SLukas Czerner }
10641da177e4SLinus Torvalds 
1065018e0446SJens Axboe extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
10660b07de85SAdel Gadllah 
1067eb28d31bSMartin K. Petersen enum blk_default_limits {
1068eb28d31bSMartin K. Petersen 	BLK_MAX_SEGMENTS	= 128,
1069eb28d31bSMartin K. Petersen 	BLK_SAFE_MAX_SECTORS	= 255,
1070eb28d31bSMartin K. Petersen 	BLK_DEF_MAX_SECTORS	= 1024,
1071eb28d31bSMartin K. Petersen 	BLK_MAX_SEGMENT_SIZE	= 65536,
1072eb28d31bSMartin K. Petersen 	BLK_SEG_BOUNDARY_MASK	= 0xFFFFFFFFUL,
1073eb28d31bSMartin K. Petersen };
10740e435ac2SMilan Broz 
10751da177e4SLinus Torvalds #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
10761da177e4SLinus Torvalds 
1077ae03bf63SMartin K. Petersen static inline unsigned long queue_bounce_pfn(struct request_queue *q)
1078ae03bf63SMartin K. Petersen {
1079025146e1SMartin K. Petersen 	return q->limits.bounce_pfn;
1080ae03bf63SMartin K. Petersen }
1081ae03bf63SMartin K. Petersen 
1082ae03bf63SMartin K. Petersen static inline unsigned long queue_segment_boundary(struct request_queue *q)
1083ae03bf63SMartin K. Petersen {
1084025146e1SMartin K. Petersen 	return q->limits.seg_boundary_mask;
1085ae03bf63SMartin K. Petersen }
1086ae03bf63SMartin K. Petersen 
1087ae03bf63SMartin K. Petersen static inline unsigned int queue_max_sectors(struct request_queue *q)
1088ae03bf63SMartin K. Petersen {
1089025146e1SMartin K. Petersen 	return q->limits.max_sectors;
1090ae03bf63SMartin K. Petersen }
1091ae03bf63SMartin K. Petersen 
1092ae03bf63SMartin K. Petersen static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
1093ae03bf63SMartin K. Petersen {
1094025146e1SMartin K. Petersen 	return q->limits.max_hw_sectors;
1095ae03bf63SMartin K. Petersen }
1096ae03bf63SMartin K. Petersen 
10978a78362cSMartin K. Petersen static inline unsigned short queue_max_segments(struct request_queue *q)
1098ae03bf63SMartin K. Petersen {
10998a78362cSMartin K. Petersen 	return q->limits.max_segments;
1100ae03bf63SMartin K. Petersen }
1101ae03bf63SMartin K. Petersen 
1102ae03bf63SMartin K. Petersen static inline unsigned int queue_max_segment_size(struct request_queue *q)
1103ae03bf63SMartin K. Petersen {
1104025146e1SMartin K. Petersen 	return q->limits.max_segment_size;
1105ae03bf63SMartin K. Petersen }
1106ae03bf63SMartin K. Petersen 
1107e1defc4fSMartin K. Petersen static inline unsigned short queue_logical_block_size(struct request_queue *q)
11081da177e4SLinus Torvalds {
11091da177e4SLinus Torvalds 	int retval = 512;
11101da177e4SLinus Torvalds 
1111025146e1SMartin K. Petersen 	if (q && q->limits.logical_block_size)
1112025146e1SMartin K. Petersen 		retval = q->limits.logical_block_size;
11131da177e4SLinus Torvalds 
11141da177e4SLinus Torvalds 	return retval;
11151da177e4SLinus Torvalds }
11161da177e4SLinus Torvalds 
1117e1defc4fSMartin K. Petersen static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
11181da177e4SLinus Torvalds {
1119e1defc4fSMartin K. Petersen 	return queue_logical_block_size(bdev_get_queue(bdev));
11201da177e4SLinus Torvalds }
11211da177e4SLinus Torvalds 
1122c72758f3SMartin K. Petersen static inline unsigned int queue_physical_block_size(struct request_queue *q)
1123c72758f3SMartin K. Petersen {
1124c72758f3SMartin K. Petersen 	return q->limits.physical_block_size;
1125c72758f3SMartin K. Petersen }
1126c72758f3SMartin K. Petersen 
1127892b6f90SMartin K. Petersen static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
1128ac481c20SMartin K. Petersen {
1129ac481c20SMartin K. Petersen 	return queue_physical_block_size(bdev_get_queue(bdev));
1130ac481c20SMartin K. Petersen }
1131ac481c20SMartin K. Petersen 
1132c72758f3SMartin K. Petersen static inline unsigned int queue_io_min(struct request_queue *q)
1133c72758f3SMartin K. Petersen {
1134c72758f3SMartin K. Petersen 	return q->limits.io_min;
1135c72758f3SMartin K. Petersen }
1136c72758f3SMartin K. Petersen 
1137ac481c20SMartin K. Petersen static inline int bdev_io_min(struct block_device *bdev)
1138ac481c20SMartin K. Petersen {
1139ac481c20SMartin K. Petersen 	return queue_io_min(bdev_get_queue(bdev));
1140ac481c20SMartin K. Petersen }
1141ac481c20SMartin K. Petersen 
1142c72758f3SMartin K. Petersen static inline unsigned int queue_io_opt(struct request_queue *q)
1143c72758f3SMartin K. Petersen {
1144c72758f3SMartin K. Petersen 	return q->limits.io_opt;
1145c72758f3SMartin K. Petersen }
1146c72758f3SMartin K. Petersen 
1147ac481c20SMartin K. Petersen static inline int bdev_io_opt(struct block_device *bdev)
1148ac481c20SMartin K. Petersen {
1149ac481c20SMartin K. Petersen 	return queue_io_opt(bdev_get_queue(bdev));
1150ac481c20SMartin K. Petersen }
1151ac481c20SMartin K. Petersen 
1152c72758f3SMartin K. Petersen static inline int queue_alignment_offset(struct request_queue *q)
1153c72758f3SMartin K. Petersen {
1154ac481c20SMartin K. Petersen 	if (q->limits.misaligned)
1155c72758f3SMartin K. Petersen 		return -1;
1156c72758f3SMartin K. Petersen 
1157c72758f3SMartin K. Petersen 	return q->limits.alignment_offset;
1158c72758f3SMartin K. Petersen }
1159c72758f3SMartin K. Petersen 
1160e03a72e1SMartin K. Petersen static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
116181744ee4SMartin K. Petersen {
116281744ee4SMartin K. Petersen 	unsigned int granularity = max(lim->physical_block_size, lim->io_min);
1163e03a72e1SMartin K. Petersen 	unsigned int alignment = (sector << 9) & (granularity - 1);
116481744ee4SMartin K. Petersen 
1165e03a72e1SMartin K. Petersen 	return (granularity + lim->alignment_offset - alignment)
1166e03a72e1SMartin K. Petersen 		& (granularity - 1);
1167c72758f3SMartin K. Petersen }
1168c72758f3SMartin K. Petersen 
1169ac481c20SMartin K. Petersen static inline int bdev_alignment_offset(struct block_device *bdev)
1170ac481c20SMartin K. Petersen {
1171ac481c20SMartin K. Petersen 	struct request_queue *q = bdev_get_queue(bdev);
1172ac481c20SMartin K. Petersen 
1173ac481c20SMartin K. Petersen 	if (q->limits.misaligned)
1174ac481c20SMartin K. Petersen 		return -1;
1175ac481c20SMartin K. Petersen 
1176ac481c20SMartin K. Petersen 	if (bdev != bdev->bd_contains)
1177ac481c20SMartin K. Petersen 		return bdev->bd_part->alignment_offset;
1178ac481c20SMartin K. Petersen 
1179ac481c20SMartin K. Petersen 	return q->limits.alignment_offset;
1180ac481c20SMartin K. Petersen }
1181ac481c20SMartin K. Petersen 
118286b37281SMartin K. Petersen static inline int queue_discard_alignment(struct request_queue *q)
118386b37281SMartin K. Petersen {
118486b37281SMartin K. Petersen 	if (q->limits.discard_misaligned)
118586b37281SMartin K. Petersen 		return -1;
118686b37281SMartin K. Petersen 
118786b37281SMartin K. Petersen 	return q->limits.discard_alignment;
118886b37281SMartin K. Petersen }
118986b37281SMartin K. Petersen 
1190e03a72e1SMartin K. Petersen static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
119186b37281SMartin K. Petersen {
119259771079SLinus Torvalds 	unsigned int alignment, granularity, offset;
1193dd3d145dSMartin K. Petersen 
1194a934a00aSMartin K. Petersen 	if (!lim->max_discard_sectors)
1195a934a00aSMartin K. Petersen 		return 0;
1196a934a00aSMartin K. Petersen 
119759771079SLinus Torvalds 	/* Why are these in bytes, not sectors? */
119859771079SLinus Torvalds 	alignment = lim->discard_alignment >> 9;
119959771079SLinus Torvalds 	granularity = lim->discard_granularity >> 9;
120059771079SLinus Torvalds 	if (!granularity)
120159771079SLinus Torvalds 		return 0;
120259771079SLinus Torvalds 
120359771079SLinus Torvalds 	/* Offset of the partition start in 'granularity' sectors */
120459771079SLinus Torvalds 	offset = sector_div(sector, granularity);
120559771079SLinus Torvalds 
120659771079SLinus Torvalds 	/* And why do we do this modulus *again* in blkdev_issue_discard()? */
120759771079SLinus Torvalds 	offset = (granularity + alignment - offset) % granularity;
120859771079SLinus Torvalds 
120959771079SLinus Torvalds 	/* Turn it back into bytes, gaah */
121059771079SLinus Torvalds 	return offset << 9;
121186b37281SMartin K. Petersen }
121286b37281SMartin K. Petersen 
1213c6e66634SPaolo Bonzini static inline int bdev_discard_alignment(struct block_device *bdev)
1214c6e66634SPaolo Bonzini {
1215c6e66634SPaolo Bonzini 	struct request_queue *q = bdev_get_queue(bdev);
1216c6e66634SPaolo Bonzini 
1217c6e66634SPaolo Bonzini 	if (bdev != bdev->bd_contains)
1218c6e66634SPaolo Bonzini 		return bdev->bd_part->discard_alignment;
1219c6e66634SPaolo Bonzini 
1220c6e66634SPaolo Bonzini 	return q->limits.discard_alignment;
1221c6e66634SPaolo Bonzini }
1222c6e66634SPaolo Bonzini 
122398262f27SMartin K. Petersen static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
122498262f27SMartin K. Petersen {
1225a934a00aSMartin K. Petersen 	if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1)
122698262f27SMartin K. Petersen 		return 1;
122798262f27SMartin K. Petersen 
122898262f27SMartin K. Petersen 	return 0;
122998262f27SMartin K. Petersen }
123098262f27SMartin K. Petersen 
123198262f27SMartin K. Petersen static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev)
123298262f27SMartin K. Petersen {
123398262f27SMartin K. Petersen 	return queue_discard_zeroes_data(bdev_get_queue(bdev));
123498262f27SMartin K. Petersen }
123598262f27SMartin K. Petersen 
12364363ac7cSMartin K. Petersen static inline unsigned int bdev_write_same(struct block_device *bdev)
12374363ac7cSMartin K. Petersen {
12384363ac7cSMartin K. Petersen 	struct request_queue *q = bdev_get_queue(bdev);
12394363ac7cSMartin K. Petersen 
12404363ac7cSMartin K. Petersen 	if (q)
12414363ac7cSMartin K. Petersen 		return q->limits.max_write_same_sectors;
12424363ac7cSMartin K. Petersen 
12434363ac7cSMartin K. Petersen 	return 0;
12444363ac7cSMartin K. Petersen }
12454363ac7cSMartin K. Petersen 
1246165125e1SJens Axboe static inline int queue_dma_alignment(struct request_queue *q)
12471da177e4SLinus Torvalds {
1248482eb689SPete Wyckoff 	return q ? q->dma_alignment : 511;
12491da177e4SLinus Torvalds }
12501da177e4SLinus Torvalds 
125114417799SNamhyung Kim static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
125287904074SFUJITA Tomonori 				 unsigned int len)
125387904074SFUJITA Tomonori {
125487904074SFUJITA Tomonori 	unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
125514417799SNamhyung Kim 	return !(addr & alignment) && !(len & alignment);
125687904074SFUJITA Tomonori }
125787904074SFUJITA Tomonori 
12581da177e4SLinus Torvalds /* assumes size > 256 */
12591da177e4SLinus Torvalds static inline unsigned int blksize_bits(unsigned int size)
12601da177e4SLinus Torvalds {
12611da177e4SLinus Torvalds 	unsigned int bits = 8;
12621da177e4SLinus Torvalds 	do {
12631da177e4SLinus Torvalds 		bits++;
12641da177e4SLinus Torvalds 		size >>= 1;
12651da177e4SLinus Torvalds 	} while (size > 256);
12661da177e4SLinus Torvalds 	return bits;
12671da177e4SLinus Torvalds }
12681da177e4SLinus Torvalds 
12692befb9e3SAdrian Bunk static inline unsigned int block_size(struct block_device *bdev)
12701da177e4SLinus Torvalds {
12711da177e4SLinus Torvalds 	return bdev->bd_block_size;
12721da177e4SLinus Torvalds }
12731da177e4SLinus Torvalds 
1274f3876930S[email protected] static inline bool queue_flush_queueable(struct request_queue *q)
1275f3876930S[email protected] {
1276f3876930S[email protected] 	return !q->flush_not_queueable;
1277f3876930S[email protected] }
1278f3876930S[email protected] 
12791da177e4SLinus Torvalds typedef struct {struct page *v;} Sector;
12801da177e4SLinus Torvalds 
12811da177e4SLinus Torvalds unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
12821da177e4SLinus Torvalds 
12831da177e4SLinus Torvalds static inline void put_dev_sector(Sector p)
12841da177e4SLinus Torvalds {
12851da177e4SLinus Torvalds 	page_cache_release(p.v);
12861da177e4SLinus Torvalds }
12871da177e4SLinus Torvalds 
12881da177e4SLinus Torvalds struct work_struct;
128918887ad9SJens Axboe int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
12901da177e4SLinus Torvalds 
12919195291eSDivyesh Shah #ifdef CONFIG_BLK_CGROUP
129228f4197eSJens Axboe /*
129328f4197eSJens Axboe  * This should not be using sched_clock(). A real patch is in progress
129428f4197eSJens Axboe  * to fix this up, until that is in place we need to disable preemption
129528f4197eSJens Axboe  * around sched_clock() in this function and set_io_start_time_ns().
129628f4197eSJens Axboe  */
12979195291eSDivyesh Shah static inline void set_start_time_ns(struct request *req)
12989195291eSDivyesh Shah {
129928f4197eSJens Axboe 	preempt_disable();
13009195291eSDivyesh Shah 	req->start_time_ns = sched_clock();
130128f4197eSJens Axboe 	preempt_enable();
13029195291eSDivyesh Shah }
13039195291eSDivyesh Shah 
13049195291eSDivyesh Shah static inline void set_io_start_time_ns(struct request *req)
13059195291eSDivyesh Shah {
130628f4197eSJens Axboe 	preempt_disable();
13079195291eSDivyesh Shah 	req->io_start_time_ns = sched_clock();
130828f4197eSJens Axboe 	preempt_enable();
13099195291eSDivyesh Shah }
131084c124daSDivyesh Shah 
131184c124daSDivyesh Shah static inline uint64_t rq_start_time_ns(struct request *req)
131284c124daSDivyesh Shah {
131384c124daSDivyesh Shah         return req->start_time_ns;
131484c124daSDivyesh Shah }
131584c124daSDivyesh Shah 
131684c124daSDivyesh Shah static inline uint64_t rq_io_start_time_ns(struct request *req)
131784c124daSDivyesh Shah {
131884c124daSDivyesh Shah         return req->io_start_time_ns;
131984c124daSDivyesh Shah }
13209195291eSDivyesh Shah #else
13219195291eSDivyesh Shah static inline void set_start_time_ns(struct request *req) {}
13229195291eSDivyesh Shah static inline void set_io_start_time_ns(struct request *req) {}
132384c124daSDivyesh Shah static inline uint64_t rq_start_time_ns(struct request *req)
132484c124daSDivyesh Shah {
132584c124daSDivyesh Shah 	return 0;
132684c124daSDivyesh Shah }
132784c124daSDivyesh Shah static inline uint64_t rq_io_start_time_ns(struct request *req)
132884c124daSDivyesh Shah {
132984c124daSDivyesh Shah 	return 0;
133084c124daSDivyesh Shah }
13319195291eSDivyesh Shah #endif
13329195291eSDivyesh Shah 
13331da177e4SLinus Torvalds #define MODULE_ALIAS_BLOCKDEV(major,minor) \
13341da177e4SLinus Torvalds 	MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
13351da177e4SLinus Torvalds #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
13361da177e4SLinus Torvalds 	MODULE_ALIAS("block-major-" __stringify(major) "-*")
13371da177e4SLinus Torvalds 
13387ba1ba12SMartin K. Petersen #if defined(CONFIG_BLK_DEV_INTEGRITY)
13397ba1ba12SMartin K. Petersen 
1340b24498d4SJens Axboe #define INTEGRITY_FLAG_READ	2	/* verify data integrity on read */
1341b24498d4SJens Axboe #define INTEGRITY_FLAG_WRITE	4	/* generate data integrity on write */
13427ba1ba12SMartin K. Petersen 
13437ba1ba12SMartin K. Petersen struct blk_integrity_exchg {
13447ba1ba12SMartin K. Petersen 	void			*prot_buf;
13457ba1ba12SMartin K. Petersen 	void			*data_buf;
13467ba1ba12SMartin K. Petersen 	sector_t		sector;
13477ba1ba12SMartin K. Petersen 	unsigned int		data_size;
13487ba1ba12SMartin K. Petersen 	unsigned short		sector_size;
13497ba1ba12SMartin K. Petersen 	const char		*disk_name;
13507ba1ba12SMartin K. Petersen };
13517ba1ba12SMartin K. Petersen 
13527ba1ba12SMartin K. Petersen typedef void (integrity_gen_fn) (struct blk_integrity_exchg *);
13537ba1ba12SMartin K. Petersen typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *);
13547ba1ba12SMartin K. Petersen typedef void (integrity_set_tag_fn) (void *, void *, unsigned int);
13557ba1ba12SMartin K. Petersen typedef void (integrity_get_tag_fn) (void *, void *, unsigned int);
13567ba1ba12SMartin K. Petersen 
13577ba1ba12SMartin K. Petersen struct blk_integrity {
13587ba1ba12SMartin K. Petersen 	integrity_gen_fn	*generate_fn;
13597ba1ba12SMartin K. Petersen 	integrity_vrfy_fn	*verify_fn;
13607ba1ba12SMartin K. Petersen 	integrity_set_tag_fn	*set_tag_fn;
13617ba1ba12SMartin K. Petersen 	integrity_get_tag_fn	*get_tag_fn;
13627ba1ba12SMartin K. Petersen 
13637ba1ba12SMartin K. Petersen 	unsigned short		flags;
13647ba1ba12SMartin K. Petersen 	unsigned short		tuple_size;
13657ba1ba12SMartin K. Petersen 	unsigned short		sector_size;
13667ba1ba12SMartin K. Petersen 	unsigned short		tag_size;
13677ba1ba12SMartin K. Petersen 
13687ba1ba12SMartin K. Petersen 	const char		*name;
13697ba1ba12SMartin K. Petersen 
13707ba1ba12SMartin K. Petersen 	struct kobject		kobj;
13717ba1ba12SMartin K. Petersen };
13727ba1ba12SMartin K. Petersen 
1373a63a5cf8SMike Snitzer extern bool blk_integrity_is_initialized(struct gendisk *);
13747ba1ba12SMartin K. Petersen extern int blk_integrity_register(struct gendisk *, struct blk_integrity *);
13757ba1ba12SMartin K. Petersen extern void blk_integrity_unregister(struct gendisk *);
1376ad7fce93SMartin K. Petersen extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
137713f05c8dSMartin K. Petersen extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
137813f05c8dSMartin K. Petersen 				   struct scatterlist *);
137913f05c8dSMartin K. Petersen extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
138013f05c8dSMartin K. Petersen extern int blk_integrity_merge_rq(struct request_queue *, struct request *,
138113f05c8dSMartin K. Petersen 				  struct request *);
138213f05c8dSMartin K. Petersen extern int blk_integrity_merge_bio(struct request_queue *, struct request *,
138313f05c8dSMartin K. Petersen 				   struct bio *);
13847ba1ba12SMartin K. Petersen 
1385b04accc4SJens Axboe static inline
1386b04accc4SJens Axboe struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
1387b04accc4SJens Axboe {
1388b04accc4SJens Axboe 	return bdev->bd_disk->integrity;
1389b04accc4SJens Axboe }
1390b04accc4SJens Axboe 
1391b02739b0SMartin K. Petersen static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1392b02739b0SMartin K. Petersen {
1393b02739b0SMartin K. Petersen 	return disk->integrity;
1394b02739b0SMartin K. Petersen }
1395b02739b0SMartin K. Petersen 
13967ba1ba12SMartin K. Petersen static inline int blk_integrity_rq(struct request *rq)
13977ba1ba12SMartin K. Petersen {
1398d442cc44SMartin K. Petersen 	if (rq->bio == NULL)
1399d442cc44SMartin K. Petersen 		return 0;
1400d442cc44SMartin K. Petersen 
14017ba1ba12SMartin K. Petersen 	return bio_integrity(rq->bio);
14027ba1ba12SMartin K. Petersen }
14037ba1ba12SMartin K. Petersen 
140413f05c8dSMartin K. Petersen static inline void blk_queue_max_integrity_segments(struct request_queue *q,
140513f05c8dSMartin K. Petersen 						    unsigned int segs)
140613f05c8dSMartin K. Petersen {
140713f05c8dSMartin K. Petersen 	q->limits.max_integrity_segments = segs;
140813f05c8dSMartin K. Petersen }
140913f05c8dSMartin K. Petersen 
141013f05c8dSMartin K. Petersen static inline unsigned short
141113f05c8dSMartin K. Petersen queue_max_integrity_segments(struct request_queue *q)
141213f05c8dSMartin K. Petersen {
141313f05c8dSMartin K. Petersen 	return q->limits.max_integrity_segments;
141413f05c8dSMartin K. Petersen }
141513f05c8dSMartin K. Petersen 
14167ba1ba12SMartin K. Petersen #else /* CONFIG_BLK_DEV_INTEGRITY */
14177ba1ba12SMartin K. Petersen 
1418fd83240aSStephen Rothwell struct bio;
1419fd83240aSStephen Rothwell struct block_device;
1420fd83240aSStephen Rothwell struct gendisk;
1421fd83240aSStephen Rothwell struct blk_integrity;
1422fd83240aSStephen Rothwell 
1423fd83240aSStephen Rothwell static inline int blk_integrity_rq(struct request *rq)
1424fd83240aSStephen Rothwell {
1425fd83240aSStephen Rothwell 	return 0;
1426fd83240aSStephen Rothwell }
1427fd83240aSStephen Rothwell static inline int blk_rq_count_integrity_sg(struct request_queue *q,
1428fd83240aSStephen Rothwell 					    struct bio *b)
1429fd83240aSStephen Rothwell {
1430fd83240aSStephen Rothwell 	return 0;
1431fd83240aSStephen Rothwell }
1432fd83240aSStephen Rothwell static inline int blk_rq_map_integrity_sg(struct request_queue *q,
1433fd83240aSStephen Rothwell 					  struct bio *b,
1434fd83240aSStephen Rothwell 					  struct scatterlist *s)
1435fd83240aSStephen Rothwell {
1436fd83240aSStephen Rothwell 	return 0;
1437fd83240aSStephen Rothwell }
1438fd83240aSStephen Rothwell static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
1439fd83240aSStephen Rothwell {
1440fd83240aSStephen Rothwell 	return 0;
1441fd83240aSStephen Rothwell }
1442fd83240aSStephen Rothwell static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1443fd83240aSStephen Rothwell {
1444fd83240aSStephen Rothwell 	return NULL;
1445fd83240aSStephen Rothwell }
1446fd83240aSStephen Rothwell static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
1447fd83240aSStephen Rothwell {
1448fd83240aSStephen Rothwell 	return 0;
1449fd83240aSStephen Rothwell }
1450fd83240aSStephen Rothwell static inline int blk_integrity_register(struct gendisk *d,
1451fd83240aSStephen Rothwell 					 struct blk_integrity *b)
1452fd83240aSStephen Rothwell {
1453fd83240aSStephen Rothwell 	return 0;
1454fd83240aSStephen Rothwell }
1455fd83240aSStephen Rothwell static inline void blk_integrity_unregister(struct gendisk *d)
1456fd83240aSStephen Rothwell {
1457fd83240aSStephen Rothwell }
1458fd83240aSStephen Rothwell static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1459fd83240aSStephen Rothwell 						    unsigned int segs)
1460fd83240aSStephen Rothwell {
1461fd83240aSStephen Rothwell }
1462fd83240aSStephen Rothwell static inline unsigned short queue_max_integrity_segments(struct request_queue *q)
1463fd83240aSStephen Rothwell {
1464fd83240aSStephen Rothwell 	return 0;
1465fd83240aSStephen Rothwell }
1466fd83240aSStephen Rothwell static inline int blk_integrity_merge_rq(struct request_queue *rq,
1467fd83240aSStephen Rothwell 					 struct request *r1,
1468fd83240aSStephen Rothwell 					 struct request *r2)
1469fd83240aSStephen Rothwell {
1470fd83240aSStephen Rothwell 	return 0;
1471fd83240aSStephen Rothwell }
1472fd83240aSStephen Rothwell static inline int blk_integrity_merge_bio(struct request_queue *rq,
1473fd83240aSStephen Rothwell 					  struct request *r,
1474fd83240aSStephen Rothwell 					  struct bio *b)
1475fd83240aSStephen Rothwell {
1476fd83240aSStephen Rothwell 	return 0;
1477fd83240aSStephen Rothwell }
1478fd83240aSStephen Rothwell static inline bool blk_integrity_is_initialized(struct gendisk *g)
1479fd83240aSStephen Rothwell {
1480fd83240aSStephen Rothwell 	return 0;
1481fd83240aSStephen Rothwell }
14827ba1ba12SMartin K. Petersen 
14837ba1ba12SMartin K. Petersen #endif /* CONFIG_BLK_DEV_INTEGRITY */
14847ba1ba12SMartin K. Petersen 
148508f85851SAl Viro struct block_device_operations {
1486d4430d62SAl Viro 	int (*open) (struct block_device *, fmode_t);
1487*db2a144bSAl Viro 	void (*release) (struct gendisk *, fmode_t);
1488d4430d62SAl Viro 	int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1489d4430d62SAl Viro 	int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
149008f85851SAl Viro 	int (*direct_access) (struct block_device *, sector_t,
149108f85851SAl Viro 						void **, unsigned long *);
149277ea887eSTejun Heo 	unsigned int (*check_events) (struct gendisk *disk,
149377ea887eSTejun Heo 				      unsigned int clearing);
149477ea887eSTejun Heo 	/* ->media_changed() is DEPRECATED, use ->check_events() instead */
149508f85851SAl Viro 	int (*media_changed) (struct gendisk *);
1496c3e33e04STejun Heo 	void (*unlock_native_capacity) (struct gendisk *);
149708f85851SAl Viro 	int (*revalidate_disk) (struct gendisk *);
149808f85851SAl Viro 	int (*getgeo)(struct block_device *, struct hd_geometry *);
1499b3a27d05SNitin Gupta 	/* this callback is with swap_lock and sometimes page table lock held */
1500b3a27d05SNitin Gupta 	void (*swap_slot_free_notify) (struct block_device *, unsigned long);
150108f85851SAl Viro 	struct module *owner;
150208f85851SAl Viro };
150308f85851SAl Viro 
1504633a08b8SAl Viro extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
1505633a08b8SAl Viro 				 unsigned long);
15069361401eSDavid Howells #else /* CONFIG_BLOCK */
15079361401eSDavid Howells /*
15089361401eSDavid Howells  * stubs for when the block layer is configured out
15099361401eSDavid Howells  */
15109361401eSDavid Howells #define buffer_heads_over_limit 0
15119361401eSDavid Howells 
15129361401eSDavid Howells static inline long nr_blockdev_pages(void)
15139361401eSDavid Howells {
15149361401eSDavid Howells 	return 0;
15159361401eSDavid Howells }
15169361401eSDavid Howells 
15171f940bdfSJens Axboe struct blk_plug {
15181f940bdfSJens Axboe };
15191f940bdfSJens Axboe 
15201f940bdfSJens Axboe static inline void blk_start_plug(struct blk_plug *plug)
152173c10101SJens Axboe {
152273c10101SJens Axboe }
152373c10101SJens Axboe 
15241f940bdfSJens Axboe static inline void blk_finish_plug(struct blk_plug *plug)
152573c10101SJens Axboe {
152673c10101SJens Axboe }
152773c10101SJens Axboe 
15281f940bdfSJens Axboe static inline void blk_flush_plug(struct task_struct *task)
152973c10101SJens Axboe {
153073c10101SJens Axboe }
153173c10101SJens Axboe 
1532a237c1c5SJens Axboe static inline void blk_schedule_flush_plug(struct task_struct *task)
1533a237c1c5SJens Axboe {
1534a237c1c5SJens Axboe }
1535a237c1c5SJens Axboe 
1536a237c1c5SJens Axboe 
153773c10101SJens Axboe static inline bool blk_needs_flush_plug(struct task_struct *tsk)
153873c10101SJens Axboe {
153973c10101SJens Axboe 	return false;
154073c10101SJens Axboe }
154173c10101SJens Axboe 
15429361401eSDavid Howells #endif /* CONFIG_BLOCK */
15439361401eSDavid Howells 
15441da177e4SLinus Torvalds #endif
1545