xref: /linux-6.15/include/linux/blkdev.h (revision 211a22ce)
1 #ifndef _LINUX_BLKDEV_H
2 #define _LINUX_BLKDEV_H
3 
4 #ifdef CONFIG_BLOCK
5 
6 #include <linux/sched.h>
7 #include <linux/major.h>
8 #include <linux/genhd.h>
9 #include <linux/list.h>
10 #include <linux/timer.h>
11 #include <linux/workqueue.h>
12 #include <linux/pagemap.h>
13 #include <linux/backing-dev.h>
14 #include <linux/wait.h>
15 #include <linux/mempool.h>
16 #include <linux/bio.h>
17 #include <linux/module.h>
18 #include <linux/stringify.h>
19 #include <linux/gfp.h>
20 #include <linux/bsg.h>
21 #include <linux/smp.h>
22 
23 #include <asm/scatterlist.h>
24 
25 struct scsi_ioctl_command;
26 
27 struct request_queue;
28 struct elevator_queue;
29 struct request_pm_state;
30 struct blk_trace;
31 struct request;
32 struct sg_io_hdr;
33 
34 #define BLKDEV_MIN_RQ	4
35 #define BLKDEV_MAX_RQ	128	/* Default maximum */
36 
37 struct request;
38 typedef void (rq_end_io_fn)(struct request *, int);
39 
40 struct request_list {
41 	int count[2];
42 	int starved[2];
43 	int elvpriv;
44 	mempool_t *rq_pool;
45 	wait_queue_head_t wait[2];
46 };
47 
48 /*
49  * request command types
50  */
51 enum rq_cmd_type_bits {
52 	REQ_TYPE_FS		= 1,	/* fs request */
53 	REQ_TYPE_BLOCK_PC,		/* scsi command */
54 	REQ_TYPE_SENSE,			/* sense request */
55 	REQ_TYPE_PM_SUSPEND,		/* suspend request */
56 	REQ_TYPE_PM_RESUME,		/* resume request */
57 	REQ_TYPE_PM_SHUTDOWN,		/* shutdown request */
58 	REQ_TYPE_SPECIAL,		/* driver defined type */
59 	REQ_TYPE_LINUX_BLOCK,		/* generic block layer message */
60 	/*
61 	 * for ATA/ATAPI devices. this really doesn't belong here, ide should
62 	 * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver
63 	 * private REQ_LB opcodes to differentiate what type of request this is
64 	 */
65 	REQ_TYPE_ATA_TASKFILE,
66 	REQ_TYPE_ATA_PC,
67 };
68 
69 /*
70  * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being
71  * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a
72  * SCSI cdb.
73  *
74  * 0x00 -> 0x3f are driver private, to be used for whatever purpose they need,
75  * typically to differentiate REQ_TYPE_SPECIAL requests.
76  *
77  */
78 enum {
79 	REQ_LB_OP_EJECT	= 0x40,		/* eject request */
80 	REQ_LB_OP_FLUSH = 0x41,		/* flush request */
81 	REQ_LB_OP_DISCARD = 0x42,	/* discard sectors */
82 };
83 
84 /*
85  * request type modified bits. first two bits match BIO_RW* bits, important
86  */
87 enum rq_flag_bits {
88 	__REQ_RW,		/* not set, read. set, write */
89 	__REQ_FAILFAST_DEV,	/* no driver retries of device errors */
90 	__REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
91 	__REQ_FAILFAST_DRIVER,	/* no driver retries of driver errors */
92 	__REQ_DISCARD,		/* request to discard sectors */
93 	__REQ_SORTED,		/* elevator knows about this request */
94 	__REQ_SOFTBARRIER,	/* may not be passed by ioscheduler */
95 	__REQ_HARDBARRIER,	/* may not be passed by drive either */
96 	__REQ_FUA,		/* forced unit access */
97 	__REQ_NOMERGE,		/* don't touch this for merging */
98 	__REQ_STARTED,		/* drive already may have started this one */
99 	__REQ_DONTPREP,		/* don't call prep for this one */
100 	__REQ_QUEUED,		/* uses queueing */
101 	__REQ_ELVPRIV,		/* elevator private data attached */
102 	__REQ_FAILED,		/* set if the request failed */
103 	__REQ_QUIET,		/* don't worry about errors */
104 	__REQ_PREEMPT,		/* set for "ide_preempt" requests */
105 	__REQ_ORDERED_COLOR,	/* is before or after barrier */
106 	__REQ_RW_SYNC,		/* request is sync (O_DIRECT) */
107 	__REQ_ALLOCED,		/* request came from our alloc pool */
108 	__REQ_RW_META,		/* metadata io request */
109 	__REQ_COPY_USER,	/* contains copies of user pages */
110 	__REQ_INTEGRITY,	/* integrity metadata has been remapped */
111 	__REQ_UNPLUG,		/* unplug queue on submission */
112 	__REQ_NR_BITS,		/* stops here */
113 };
114 
115 #define REQ_RW		(1 << __REQ_RW)
116 #define REQ_FAILFAST_DEV	(1 << __REQ_FAILFAST_DEV)
117 #define REQ_FAILFAST_TRANSPORT	(1 << __REQ_FAILFAST_TRANSPORT)
118 #define REQ_FAILFAST_DRIVER	(1 << __REQ_FAILFAST_DRIVER)
119 #define REQ_DISCARD	(1 << __REQ_DISCARD)
120 #define REQ_SORTED	(1 << __REQ_SORTED)
121 #define REQ_SOFTBARRIER	(1 << __REQ_SOFTBARRIER)
122 #define REQ_HARDBARRIER	(1 << __REQ_HARDBARRIER)
123 #define REQ_FUA		(1 << __REQ_FUA)
124 #define REQ_NOMERGE	(1 << __REQ_NOMERGE)
125 #define REQ_STARTED	(1 << __REQ_STARTED)
126 #define REQ_DONTPREP	(1 << __REQ_DONTPREP)
127 #define REQ_QUEUED	(1 << __REQ_QUEUED)
128 #define REQ_ELVPRIV	(1 << __REQ_ELVPRIV)
129 #define REQ_FAILED	(1 << __REQ_FAILED)
130 #define REQ_QUIET	(1 << __REQ_QUIET)
131 #define REQ_PREEMPT	(1 << __REQ_PREEMPT)
132 #define REQ_ORDERED_COLOR	(1 << __REQ_ORDERED_COLOR)
133 #define REQ_RW_SYNC	(1 << __REQ_RW_SYNC)
134 #define REQ_ALLOCED	(1 << __REQ_ALLOCED)
135 #define REQ_RW_META	(1 << __REQ_RW_META)
136 #define REQ_COPY_USER	(1 << __REQ_COPY_USER)
137 #define REQ_INTEGRITY	(1 << __REQ_INTEGRITY)
138 #define REQ_UNPLUG	(1 << __REQ_UNPLUG)
139 
140 #define BLK_MAX_CDB	16
141 
142 /*
143  * try to put the fields that are referenced together in the same cacheline.
144  * if you modify this structure, be sure to check block/blk-core.c:rq_init()
145  * as well!
146  */
147 struct request {
148 	struct list_head queuelist;
149 	struct call_single_data csd;
150 	int cpu;
151 
152 	struct request_queue *q;
153 
154 	unsigned int cmd_flags;
155 	enum rq_cmd_type_bits cmd_type;
156 	unsigned long atomic_flags;
157 
158 	/* Maintain bio traversal state for part by part I/O submission.
159 	 * hard_* are block layer internals, no driver should touch them!
160 	 */
161 
162 	sector_t sector;		/* next sector to submit */
163 	sector_t hard_sector;		/* next sector to complete */
164 	unsigned long nr_sectors;	/* no. of sectors left to submit */
165 	unsigned long hard_nr_sectors;	/* no. of sectors left to complete */
166 	/* no. of sectors left to submit in the current segment */
167 	unsigned int current_nr_sectors;
168 
169 	/* no. of sectors left to complete in the current segment */
170 	unsigned int hard_cur_sectors;
171 
172 	struct bio *bio;
173 	struct bio *biotail;
174 
175 	struct hlist_node hash;	/* merge hash */
176 	/*
177 	 * The rb_node is only used inside the io scheduler, requests
178 	 * are pruned when moved to the dispatch queue. So let the
179 	 * completion_data share space with the rb_node.
180 	 */
181 	union {
182 		struct rb_node rb_node;	/* sort/lookup */
183 		void *completion_data;
184 	};
185 
186 	/*
187 	 * two pointers are available for the IO schedulers, if they need
188 	 * more they have to dynamically allocate it.
189 	 */
190 	void *elevator_private;
191 	void *elevator_private2;
192 
193 	struct gendisk *rq_disk;
194 	unsigned long start_time;
195 
196 	/* Number of scatter-gather DMA addr+len pairs after
197 	 * physical address coalescing is performed.
198 	 */
199 	unsigned short nr_phys_segments;
200 
201 	unsigned short ioprio;
202 
203 	void *special;
204 	char *buffer;
205 
206 	int tag;
207 	int errors;
208 
209 	int ref_count;
210 
211 	/*
212 	 * when request is used as a packet command carrier
213 	 */
214 	unsigned short cmd_len;
215 	unsigned char __cmd[BLK_MAX_CDB];
216 	unsigned char *cmd;
217 
218 	unsigned int data_len;
219 	unsigned int extra_len;	/* length of alignment and padding */
220 	unsigned int sense_len;
221 	void *data;
222 	void *sense;
223 
224 	unsigned long deadline;
225 	struct list_head timeout_list;
226 	unsigned int timeout;
227 	int retries;
228 
229 	/*
230 	 * completion callback.
231 	 */
232 	rq_end_io_fn *end_io;
233 	void *end_io_data;
234 
235 	/* for bidi */
236 	struct request *next_rq;
237 };
238 
239 static inline unsigned short req_get_ioprio(struct request *req)
240 {
241 	return req->ioprio;
242 }
243 
244 /*
245  * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME
246  * requests. Some step values could eventually be made generic.
247  */
248 struct request_pm_state
249 {
250 	/* PM state machine step value, currently driver specific */
251 	int	pm_step;
252 	/* requested PM state value (S1, S2, S3, S4, ...) */
253 	u32	pm_state;
254 	void*	data;		/* for driver use */
255 };
256 
257 #include <linux/elevator.h>
258 
259 typedef void (request_fn_proc) (struct request_queue *q);
260 typedef int (make_request_fn) (struct request_queue *q, struct bio *bio);
261 typedef int (prep_rq_fn) (struct request_queue *, struct request *);
262 typedef void (unplug_fn) (struct request_queue *);
263 typedef int (prepare_discard_fn) (struct request_queue *, struct request *);
264 
265 struct bio_vec;
266 struct bvec_merge_data {
267 	struct block_device *bi_bdev;
268 	sector_t bi_sector;
269 	unsigned bi_size;
270 	unsigned long bi_rw;
271 };
272 typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *,
273 			     struct bio_vec *);
274 typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
275 typedef void (softirq_done_fn)(struct request *);
276 typedef int (dma_drain_needed_fn)(struct request *);
277 typedef int (lld_busy_fn) (struct request_queue *q);
278 
279 enum blk_eh_timer_return {
280 	BLK_EH_NOT_HANDLED,
281 	BLK_EH_HANDLED,
282 	BLK_EH_RESET_TIMER,
283 };
284 
285 typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *);
286 
287 enum blk_queue_state {
288 	Queue_down,
289 	Queue_up,
290 };
291 
292 struct blk_queue_tag {
293 	struct request **tag_index;	/* map of busy tags */
294 	unsigned long *tag_map;		/* bit map of free/busy tags */
295 	int busy;			/* current depth */
296 	int max_depth;			/* what we will send to device */
297 	int real_max_depth;		/* what the array can hold */
298 	atomic_t refcnt;		/* map can be shared */
299 };
300 
301 #define BLK_SCSI_MAX_CMDS	(256)
302 #define BLK_SCSI_CMD_PER_LONG	(BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
303 
304 struct blk_cmd_filter {
305 	unsigned long read_ok[BLK_SCSI_CMD_PER_LONG];
306 	unsigned long write_ok[BLK_SCSI_CMD_PER_LONG];
307 	struct kobject kobj;
308 };
309 
310 struct request_queue
311 {
312 	/*
313 	 * Together with queue_head for cacheline sharing
314 	 */
315 	struct list_head	queue_head;
316 	struct request		*last_merge;
317 	struct elevator_queue	*elevator;
318 
319 	/*
320 	 * the queue request freelist, one for reads and one for writes
321 	 */
322 	struct request_list	rq;
323 
324 	request_fn_proc		*request_fn;
325 	make_request_fn		*make_request_fn;
326 	prep_rq_fn		*prep_rq_fn;
327 	unplug_fn		*unplug_fn;
328 	prepare_discard_fn	*prepare_discard_fn;
329 	merge_bvec_fn		*merge_bvec_fn;
330 	prepare_flush_fn	*prepare_flush_fn;
331 	softirq_done_fn		*softirq_done_fn;
332 	rq_timed_out_fn		*rq_timed_out_fn;
333 	dma_drain_needed_fn	*dma_drain_needed;
334 	lld_busy_fn		*lld_busy_fn;
335 
336 	/*
337 	 * Dispatch queue sorting
338 	 */
339 	sector_t		end_sector;
340 	struct request		*boundary_rq;
341 
342 	/*
343 	 * Auto-unplugging state
344 	 */
345 	struct timer_list	unplug_timer;
346 	int			unplug_thresh;	/* After this many requests */
347 	unsigned long		unplug_delay;	/* After this many jiffies */
348 	struct work_struct	unplug_work;
349 
350 	struct backing_dev_info	backing_dev_info;
351 
352 	/*
353 	 * The queue owner gets to use this for whatever they like.
354 	 * ll_rw_blk doesn't touch it.
355 	 */
356 	void			*queuedata;
357 
358 	/*
359 	 * queue needs bounce pages for pages above this limit
360 	 */
361 	unsigned long		bounce_pfn;
362 	gfp_t			bounce_gfp;
363 
364 	/*
365 	 * various queue flags, see QUEUE_* below
366 	 */
367 	unsigned long		queue_flags;
368 
369 	/*
370 	 * protects queue structures from reentrancy. ->__queue_lock should
371 	 * _never_ be used directly, it is queue private. always use
372 	 * ->queue_lock.
373 	 */
374 	spinlock_t		__queue_lock;
375 	spinlock_t		*queue_lock;
376 
377 	/*
378 	 * queue kobject
379 	 */
380 	struct kobject kobj;
381 
382 	/*
383 	 * queue settings
384 	 */
385 	unsigned long		nr_requests;	/* Max # of requests */
386 	unsigned int		nr_congestion_on;
387 	unsigned int		nr_congestion_off;
388 	unsigned int		nr_batching;
389 
390 	unsigned int		max_sectors;
391 	unsigned int		max_hw_sectors;
392 	unsigned short		max_phys_segments;
393 	unsigned short		max_hw_segments;
394 	unsigned short		hardsect_size;
395 	unsigned int		max_segment_size;
396 
397 	unsigned long		seg_boundary_mask;
398 	void			*dma_drain_buffer;
399 	unsigned int		dma_drain_size;
400 	unsigned int		dma_pad_mask;
401 	unsigned int		dma_alignment;
402 
403 	struct blk_queue_tag	*queue_tags;
404 	struct list_head	tag_busy_list;
405 
406 	unsigned int		nr_sorted;
407 	unsigned int		in_flight;
408 
409 	unsigned int		rq_timeout;
410 	struct timer_list	timeout;
411 	struct list_head	timeout_list;
412 
413 	/*
414 	 * sg stuff
415 	 */
416 	unsigned int		sg_timeout;
417 	unsigned int		sg_reserved_size;
418 	int			node;
419 #ifdef CONFIG_BLK_DEV_IO_TRACE
420 	struct blk_trace	*blk_trace;
421 #endif
422 	/*
423 	 * reserved for flush operations
424 	 */
425 	unsigned int		ordered, next_ordered, ordseq;
426 	int			orderr, ordcolor;
427 	struct request		pre_flush_rq, bar_rq, post_flush_rq;
428 	struct request		*orig_bar_rq;
429 
430 	struct mutex		sysfs_lock;
431 
432 #if defined(CONFIG_BLK_DEV_BSG)
433 	struct bsg_class_device bsg_dev;
434 #endif
435 	struct blk_cmd_filter cmd_filter;
436 };
437 
438 #define QUEUE_FLAG_CLUSTER	0	/* cluster several segments into 1 */
439 #define QUEUE_FLAG_QUEUED	1	/* uses generic tag queueing */
440 #define QUEUE_FLAG_STOPPED	2	/* queue is stopped */
441 #define	QUEUE_FLAG_READFULL	3	/* read queue has been filled */
442 #define QUEUE_FLAG_WRITEFULL	4	/* write queue has been filled */
443 #define QUEUE_FLAG_DEAD		5	/* queue being torn down */
444 #define QUEUE_FLAG_REENTER	6	/* Re-entrancy avoidance */
445 #define QUEUE_FLAG_PLUGGED	7	/* queue is plugged */
446 #define QUEUE_FLAG_ELVSWITCH	8	/* don't use elevator, just do FIFO */
447 #define QUEUE_FLAG_BIDI		9	/* queue supports bidi requests */
448 #define QUEUE_FLAG_NOMERGES    10	/* disable merge attempts */
449 #define QUEUE_FLAG_SAME_COMP   11	/* force complete on same CPU */
450 #define QUEUE_FLAG_FAIL_IO     12	/* fake timeout */
451 #define QUEUE_FLAG_STACKABLE   13	/* supports request stacking */
452 #define QUEUE_FLAG_NONROT      14	/* non-rotational device (SSD) */
453 #define QUEUE_FLAG_VIRT        QUEUE_FLAG_NONROT /* paravirt device */
454 #define QUEUE_FLAG_IO_STAT     15	/* do IO stats */
455 
456 #define QUEUE_FLAG_DEFAULT	((1 << QUEUE_FLAG_IO_STAT) |		\
457 				 (1 << QUEUE_FLAG_CLUSTER) |		\
458 				 (1 << QUEUE_FLAG_STACKABLE))
459 
460 static inline int queue_is_locked(struct request_queue *q)
461 {
462 #ifdef CONFIG_SMP
463 	spinlock_t *lock = q->queue_lock;
464 	return lock && spin_is_locked(lock);
465 #else
466 	return 1;
467 #endif
468 }
469 
470 static inline void queue_flag_set_unlocked(unsigned int flag,
471 					   struct request_queue *q)
472 {
473 	__set_bit(flag, &q->queue_flags);
474 }
475 
476 static inline int queue_flag_test_and_clear(unsigned int flag,
477 					    struct request_queue *q)
478 {
479 	WARN_ON_ONCE(!queue_is_locked(q));
480 
481 	if (test_bit(flag, &q->queue_flags)) {
482 		__clear_bit(flag, &q->queue_flags);
483 		return 1;
484 	}
485 
486 	return 0;
487 }
488 
489 static inline int queue_flag_test_and_set(unsigned int flag,
490 					  struct request_queue *q)
491 {
492 	WARN_ON_ONCE(!queue_is_locked(q));
493 
494 	if (!test_bit(flag, &q->queue_flags)) {
495 		__set_bit(flag, &q->queue_flags);
496 		return 0;
497 	}
498 
499 	return 1;
500 }
501 
502 static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
503 {
504 	WARN_ON_ONCE(!queue_is_locked(q));
505 	__set_bit(flag, &q->queue_flags);
506 }
507 
508 static inline void queue_flag_clear_unlocked(unsigned int flag,
509 					     struct request_queue *q)
510 {
511 	__clear_bit(flag, &q->queue_flags);
512 }
513 
514 static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
515 {
516 	WARN_ON_ONCE(!queue_is_locked(q));
517 	__clear_bit(flag, &q->queue_flags);
518 }
519 
520 enum {
521 	/*
522 	 * Hardbarrier is supported with one of the following methods.
523 	 *
524 	 * NONE		: hardbarrier unsupported
525 	 * DRAIN	: ordering by draining is enough
526 	 * DRAIN_FLUSH	: ordering by draining w/ pre and post flushes
527 	 * DRAIN_FUA	: ordering by draining w/ pre flush and FUA write
528 	 * TAG		: ordering by tag is enough
529 	 * TAG_FLUSH	: ordering by tag w/ pre and post flushes
530 	 * TAG_FUA	: ordering by tag w/ pre flush and FUA write
531 	 */
532 	QUEUE_ORDERED_BY_DRAIN		= 0x01,
533 	QUEUE_ORDERED_BY_TAG		= 0x02,
534 	QUEUE_ORDERED_DO_PREFLUSH	= 0x10,
535 	QUEUE_ORDERED_DO_BAR		= 0x20,
536 	QUEUE_ORDERED_DO_POSTFLUSH	= 0x40,
537 	QUEUE_ORDERED_DO_FUA		= 0x80,
538 
539 	QUEUE_ORDERED_NONE		= 0x00,
540 
541 	QUEUE_ORDERED_DRAIN		= QUEUE_ORDERED_BY_DRAIN |
542 					  QUEUE_ORDERED_DO_BAR,
543 	QUEUE_ORDERED_DRAIN_FLUSH	= QUEUE_ORDERED_DRAIN |
544 					  QUEUE_ORDERED_DO_PREFLUSH |
545 					  QUEUE_ORDERED_DO_POSTFLUSH,
546 	QUEUE_ORDERED_DRAIN_FUA		= QUEUE_ORDERED_DRAIN |
547 					  QUEUE_ORDERED_DO_PREFLUSH |
548 					  QUEUE_ORDERED_DO_FUA,
549 
550 	QUEUE_ORDERED_TAG		= QUEUE_ORDERED_BY_TAG |
551 					  QUEUE_ORDERED_DO_BAR,
552 	QUEUE_ORDERED_TAG_FLUSH		= QUEUE_ORDERED_TAG |
553 					  QUEUE_ORDERED_DO_PREFLUSH |
554 					  QUEUE_ORDERED_DO_POSTFLUSH,
555 	QUEUE_ORDERED_TAG_FUA		= QUEUE_ORDERED_TAG |
556 					  QUEUE_ORDERED_DO_PREFLUSH |
557 					  QUEUE_ORDERED_DO_FUA,
558 
559 	/*
560 	 * Ordered operation sequence
561 	 */
562 	QUEUE_ORDSEQ_STARTED	= 0x01,	/* flushing in progress */
563 	QUEUE_ORDSEQ_DRAIN	= 0x02,	/* waiting for the queue to be drained */
564 	QUEUE_ORDSEQ_PREFLUSH	= 0x04,	/* pre-flushing in progress */
565 	QUEUE_ORDSEQ_BAR	= 0x08,	/* original barrier req in progress */
566 	QUEUE_ORDSEQ_POSTFLUSH	= 0x10,	/* post-flushing in progress */
567 	QUEUE_ORDSEQ_DONE	= 0x20,
568 };
569 
570 #define blk_queue_plugged(q)	test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
571 #define blk_queue_tagged(q)	test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
572 #define blk_queue_stopped(q)	test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
573 #define blk_queue_nomerges(q)	test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
574 #define blk_queue_nonrot(q)	test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
575 #define blk_queue_io_stat(q)	test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
576 #define blk_queue_flushing(q)	((q)->ordseq)
577 #define blk_queue_stackable(q)	\
578 	test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
579 
580 #define blk_fs_request(rq)	((rq)->cmd_type == REQ_TYPE_FS)
581 #define blk_pc_request(rq)	((rq)->cmd_type == REQ_TYPE_BLOCK_PC)
582 #define blk_special_request(rq)	((rq)->cmd_type == REQ_TYPE_SPECIAL)
583 #define blk_sense_request(rq)	((rq)->cmd_type == REQ_TYPE_SENSE)
584 
585 #define blk_failfast_dev(rq)	((rq)->cmd_flags & REQ_FAILFAST_DEV)
586 #define blk_failfast_transport(rq) ((rq)->cmd_flags & REQ_FAILFAST_TRANSPORT)
587 #define blk_failfast_driver(rq)	((rq)->cmd_flags & REQ_FAILFAST_DRIVER)
588 #define blk_noretry_request(rq)	(blk_failfast_dev(rq) ||	\
589 				 blk_failfast_transport(rq) ||	\
590 				 blk_failfast_driver(rq))
591 #define blk_rq_started(rq)	((rq)->cmd_flags & REQ_STARTED)
592 
593 #define blk_account_rq(rq)	(blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq)))
594 
595 #define blk_pm_suspend_request(rq)	((rq)->cmd_type == REQ_TYPE_PM_SUSPEND)
596 #define blk_pm_resume_request(rq)	((rq)->cmd_type == REQ_TYPE_PM_RESUME)
597 #define blk_pm_request(rq)	\
598 	(blk_pm_suspend_request(rq) || blk_pm_resume_request(rq))
599 
600 #define blk_rq_cpu_valid(rq)	((rq)->cpu != -1)
601 #define blk_sorted_rq(rq)	((rq)->cmd_flags & REQ_SORTED)
602 #define blk_barrier_rq(rq)	((rq)->cmd_flags & REQ_HARDBARRIER)
603 #define blk_fua_rq(rq)		((rq)->cmd_flags & REQ_FUA)
604 #define blk_discard_rq(rq)	((rq)->cmd_flags & REQ_DISCARD)
605 #define blk_bidi_rq(rq)		((rq)->next_rq != NULL)
606 /* rq->queuelist of dequeued request must be list_empty() */
607 #define blk_queued_rq(rq)	(!list_empty(&(rq)->queuelist))
608 
609 #define list_entry_rq(ptr)	list_entry((ptr), struct request, queuelist)
610 
611 #define rq_data_dir(rq)		((rq)->cmd_flags & 1)
612 
613 /*
614  * We regard a request as sync, if it's a READ or a SYNC write.
615  */
616 #define rq_is_sync(rq)		(rq_data_dir((rq)) == READ || (rq)->cmd_flags & REQ_RW_SYNC)
617 #define rq_is_meta(rq)		((rq)->cmd_flags & REQ_RW_META)
618 
619 static inline int blk_queue_full(struct request_queue *q, int rw)
620 {
621 	if (rw == READ)
622 		return test_bit(QUEUE_FLAG_READFULL, &q->queue_flags);
623 	return test_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags);
624 }
625 
626 static inline void blk_set_queue_full(struct request_queue *q, int rw)
627 {
628 	if (rw == READ)
629 		queue_flag_set(QUEUE_FLAG_READFULL, q);
630 	else
631 		queue_flag_set(QUEUE_FLAG_WRITEFULL, q);
632 }
633 
634 static inline void blk_clear_queue_full(struct request_queue *q, int rw)
635 {
636 	if (rw == READ)
637 		queue_flag_clear(QUEUE_FLAG_READFULL, q);
638 	else
639 		queue_flag_clear(QUEUE_FLAG_WRITEFULL, q);
640 }
641 
642 
643 /*
644  * mergeable request must not have _NOMERGE or _BARRIER bit set, nor may
645  * it already be started by driver.
646  */
647 #define RQ_NOMERGE_FLAGS	\
648 	(REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER)
649 #define rq_mergeable(rq)	\
650 	(!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \
651 	 (blk_discard_rq(rq) || blk_fs_request((rq))))
652 
653 /*
654  * q->prep_rq_fn return values
655  */
656 #define BLKPREP_OK		0	/* serve it */
657 #define BLKPREP_KILL		1	/* fatal error, kill */
658 #define BLKPREP_DEFER		2	/* leave on queue */
659 
660 extern unsigned long blk_max_low_pfn, blk_max_pfn;
661 
662 /*
663  * standard bounce addresses:
664  *
665  * BLK_BOUNCE_HIGH	: bounce all highmem pages
666  * BLK_BOUNCE_ANY	: don't bounce anything
667  * BLK_BOUNCE_ISA	: bounce pages above ISA DMA boundary
668  */
669 
670 #if BITS_PER_LONG == 32
671 #define BLK_BOUNCE_HIGH		((u64)blk_max_low_pfn << PAGE_SHIFT)
672 #else
673 #define BLK_BOUNCE_HIGH		-1ULL
674 #endif
675 #define BLK_BOUNCE_ANY		(-1ULL)
676 #define BLK_BOUNCE_ISA		(ISA_DMA_THRESHOLD)
677 
678 /*
679  * default timeout for SG_IO if none specified
680  */
681 #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
682 #define BLK_MIN_SG_TIMEOUT	(7 * HZ)
683 
684 #ifdef CONFIG_BOUNCE
685 extern int init_emergency_isa_pool(void);
686 extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
687 #else
688 static inline int init_emergency_isa_pool(void)
689 {
690 	return 0;
691 }
692 static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
693 {
694 }
695 #endif /* CONFIG_MMU */
696 
697 struct rq_map_data {
698 	struct page **pages;
699 	int page_order;
700 	int nr_entries;
701 	unsigned long offset;
702 	int null_mapped;
703 };
704 
705 struct req_iterator {
706 	int i;
707 	struct bio *bio;
708 };
709 
710 /* This should not be used directly - use rq_for_each_segment */
711 #define __rq_for_each_bio(_bio, rq)	\
712 	if ((rq->bio))			\
713 		for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
714 
715 #define rq_for_each_segment(bvl, _rq, _iter)			\
716 	__rq_for_each_bio(_iter.bio, _rq)			\
717 		bio_for_each_segment(bvl, _iter.bio, _iter.i)
718 
719 #define rq_iter_last(rq, _iter)					\
720 		(_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1)
721 
722 extern int blk_register_queue(struct gendisk *disk);
723 extern void blk_unregister_queue(struct gendisk *disk);
724 extern void register_disk(struct gendisk *dev);
725 extern void generic_make_request(struct bio *bio);
726 extern void blk_rq_init(struct request_queue *q, struct request *rq);
727 extern void blk_put_request(struct request *);
728 extern void __blk_put_request(struct request_queue *, struct request *);
729 extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
730 extern void blk_insert_request(struct request_queue *, struct request *, int, void *);
731 extern void blk_requeue_request(struct request_queue *, struct request *);
732 extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
733 extern int blk_lld_busy(struct request_queue *q);
734 extern int blk_insert_cloned_request(struct request_queue *q,
735 				     struct request *rq);
736 extern void blk_plug_device(struct request_queue *);
737 extern void blk_plug_device_unlocked(struct request_queue *);
738 extern int blk_remove_plug(struct request_queue *);
739 extern void blk_recount_segments(struct request_queue *, struct bio *);
740 extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
741 			  unsigned int, void __user *);
742 extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
743 			 struct scsi_ioctl_command __user *);
744 
745 /*
746  * Temporary export, until SCSI gets fixed up.
747  */
748 extern int blk_rq_append_bio(struct request_queue *q, struct request *rq,
749 			     struct bio *bio);
750 
751 /*
752  * A queue has just exitted congestion.  Note this in the global counter of
753  * congested queues, and wake up anyone who was waiting for requests to be
754  * put back.
755  */
756 static inline void blk_clear_queue_congested(struct request_queue *q, int rw)
757 {
758 	clear_bdi_congested(&q->backing_dev_info, rw);
759 }
760 
761 /*
762  * A queue has just entered congestion.  Flag that in the queue's VM-visible
763  * state flags and increment the global gounter of congested queues.
764  */
765 static inline void blk_set_queue_congested(struct request_queue *q, int rw)
766 {
767 	set_bdi_congested(&q->backing_dev_info, rw);
768 }
769 
770 extern void blk_start_queue(struct request_queue *q);
771 extern void blk_stop_queue(struct request_queue *q);
772 extern void blk_sync_queue(struct request_queue *q);
773 extern void __blk_stop_queue(struct request_queue *q);
774 extern void __blk_run_queue(struct request_queue *);
775 extern void blk_run_queue(struct request_queue *);
776 extern void blk_start_queueing(struct request_queue *);
777 extern int blk_rq_map_user(struct request_queue *, struct request *,
778 			   struct rq_map_data *, void __user *, unsigned long,
779 			   gfp_t);
780 extern int blk_rq_unmap_user(struct bio *);
781 extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
782 extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
783 			       struct rq_map_data *, struct sg_iovec *, int,
784 			       unsigned int, gfp_t);
785 extern int blk_execute_rq(struct request_queue *, struct gendisk *,
786 			  struct request *, int);
787 extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
788 				  struct request *, int, rq_end_io_fn *);
789 extern void blk_unplug(struct request_queue *q);
790 
791 static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
792 {
793 	return bdev->bd_disk->queue;
794 }
795 
796 static inline void blk_run_backing_dev(struct backing_dev_info *bdi,
797 				       struct page *page)
798 {
799 	if (bdi && bdi->unplug_io_fn)
800 		bdi->unplug_io_fn(bdi, page);
801 }
802 
803 static inline void blk_run_address_space(struct address_space *mapping)
804 {
805 	if (mapping)
806 		blk_run_backing_dev(mapping->backing_dev_info, NULL);
807 }
808 
809 extern void blkdev_dequeue_request(struct request *req);
810 
811 /*
812  * blk_end_request() and friends.
813  * __blk_end_request() and end_request() must be called with
814  * the request queue spinlock acquired.
815  *
816  * Several drivers define their own end_request and call
817  * blk_end_request() for parts of the original function.
818  * This prevents code duplication in drivers.
819  */
820 extern int blk_end_request(struct request *rq, int error,
821 				unsigned int nr_bytes);
822 extern int __blk_end_request(struct request *rq, int error,
823 				unsigned int nr_bytes);
824 extern int blk_end_bidi_request(struct request *rq, int error,
825 				unsigned int nr_bytes, unsigned int bidi_bytes);
826 extern void end_request(struct request *, int);
827 extern int blk_end_request_callback(struct request *rq, int error,
828 				unsigned int nr_bytes,
829 				int (drv_callback)(struct request *));
830 extern void blk_complete_request(struct request *);
831 extern void __blk_complete_request(struct request *);
832 extern void blk_abort_request(struct request *);
833 extern void blk_abort_queue(struct request_queue *);
834 extern void blk_update_request(struct request *rq, int error,
835 			       unsigned int nr_bytes);
836 
837 /*
838  * blk_end_request() takes bytes instead of sectors as a complete size.
839  * blk_rq_bytes() returns bytes left to complete in the entire request.
840  * blk_rq_cur_bytes() returns bytes left to complete in the current segment.
841  */
842 extern unsigned int blk_rq_bytes(struct request *rq);
843 extern unsigned int blk_rq_cur_bytes(struct request *rq);
844 
845 /*
846  * Access functions for manipulating queue properties
847  */
848 extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
849 					spinlock_t *lock, int node_id);
850 extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
851 extern void blk_cleanup_queue(struct request_queue *);
852 extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
853 extern void blk_queue_bounce_limit(struct request_queue *, u64);
854 extern void blk_queue_max_sectors(struct request_queue *, unsigned int);
855 extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);
856 extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
857 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
858 extern void blk_queue_hardsect_size(struct request_queue *, unsigned short);
859 extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
860 extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
861 extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
862 extern int blk_queue_dma_drain(struct request_queue *q,
863 			       dma_drain_needed_fn *dma_drain_needed,
864 			       void *buf, unsigned int size);
865 extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
866 extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
867 extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
868 extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
869 extern void blk_queue_dma_alignment(struct request_queue *, int);
870 extern void blk_queue_update_dma_alignment(struct request_queue *, int);
871 extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
872 extern void blk_queue_set_discard(struct request_queue *, prepare_discard_fn *);
873 extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
874 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
875 extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
876 extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);
877 extern bool blk_do_ordered(struct request_queue *, struct request **);
878 extern unsigned blk_ordered_cur_seq(struct request_queue *);
879 extern unsigned blk_ordered_req_seq(struct request *);
880 extern bool blk_ordered_complete_seq(struct request_queue *, unsigned, int);
881 
882 extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
883 extern void blk_dump_rq_flags(struct request *, char *);
884 extern void generic_unplug_device(struct request_queue *);
885 extern long nr_blockdev_pages(void);
886 
887 int blk_get_queue(struct request_queue *);
888 struct request_queue *blk_alloc_queue(gfp_t);
889 struct request_queue *blk_alloc_queue_node(gfp_t, int);
890 extern void blk_put_queue(struct request_queue *);
891 
892 /*
893  * tag stuff
894  */
895 #define blk_rq_tagged(rq)		((rq)->cmd_flags & REQ_QUEUED)
896 extern int blk_queue_start_tag(struct request_queue *, struct request *);
897 extern struct request *blk_queue_find_tag(struct request_queue *, int);
898 extern void blk_queue_end_tag(struct request_queue *, struct request *);
899 extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *);
900 extern void blk_queue_free_tags(struct request_queue *);
901 extern int blk_queue_resize_tags(struct request_queue *, int);
902 extern void blk_queue_invalidate_tags(struct request_queue *);
903 extern struct blk_queue_tag *blk_init_tags(int);
904 extern void blk_free_tags(struct blk_queue_tag *);
905 
906 static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
907 						int tag)
908 {
909 	if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
910 		return NULL;
911 	return bqt->tag_index[tag];
912 }
913 
914 extern int blkdev_issue_flush(struct block_device *, sector_t *);
915 extern int blkdev_issue_discard(struct block_device *,
916 				sector_t sector, sector_t nr_sects, gfp_t);
917 
918 static inline int sb_issue_discard(struct super_block *sb,
919 				   sector_t block, sector_t nr_blocks)
920 {
921 	block <<= (sb->s_blocksize_bits - 9);
922 	nr_blocks <<= (sb->s_blocksize_bits - 9);
923 	return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL);
924 }
925 
926 /*
927 * command filter functions
928 */
929 extern int blk_verify_command(struct blk_cmd_filter *filter,
930 			      unsigned char *cmd, fmode_t has_write_perm);
931 extern void blk_unregister_filter(struct gendisk *disk);
932 extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter);
933 
934 #define MAX_PHYS_SEGMENTS 128
935 #define MAX_HW_SEGMENTS 128
936 #define SAFE_MAX_SECTORS 255
937 #define BLK_DEF_MAX_SECTORS 1024
938 
939 #define MAX_SEGMENT_SIZE	65536
940 
941 #define BLK_SEG_BOUNDARY_MASK	0xFFFFFFFFUL
942 
943 #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
944 
945 static inline int queue_hardsect_size(struct request_queue *q)
946 {
947 	int retval = 512;
948 
949 	if (q && q->hardsect_size)
950 		retval = q->hardsect_size;
951 
952 	return retval;
953 }
954 
955 static inline int bdev_hardsect_size(struct block_device *bdev)
956 {
957 	return queue_hardsect_size(bdev_get_queue(bdev));
958 }
959 
960 static inline int queue_dma_alignment(struct request_queue *q)
961 {
962 	return q ? q->dma_alignment : 511;
963 }
964 
965 static inline int blk_rq_aligned(struct request_queue *q, void *addr,
966 				 unsigned int len)
967 {
968 	unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
969 	return !((unsigned long)addr & alignment) && !(len & alignment);
970 }
971 
972 /* assumes size > 256 */
973 static inline unsigned int blksize_bits(unsigned int size)
974 {
975 	unsigned int bits = 8;
976 	do {
977 		bits++;
978 		size >>= 1;
979 	} while (size > 256);
980 	return bits;
981 }
982 
983 static inline unsigned int block_size(struct block_device *bdev)
984 {
985 	return bdev->bd_block_size;
986 }
987 
988 typedef struct {struct page *v;} Sector;
989 
990 unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
991 
992 static inline void put_dev_sector(Sector p)
993 {
994 	page_cache_release(p.v);
995 }
996 
997 struct work_struct;
998 int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
999 
1000 #define MODULE_ALIAS_BLOCKDEV(major,minor) \
1001 	MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
1002 #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
1003 	MODULE_ALIAS("block-major-" __stringify(major) "-*")
1004 
1005 #if defined(CONFIG_BLK_DEV_INTEGRITY)
1006 
1007 #define INTEGRITY_FLAG_READ	2	/* verify data integrity on read */
1008 #define INTEGRITY_FLAG_WRITE	4	/* generate data integrity on write */
1009 
1010 struct blk_integrity_exchg {
1011 	void			*prot_buf;
1012 	void			*data_buf;
1013 	sector_t		sector;
1014 	unsigned int		data_size;
1015 	unsigned short		sector_size;
1016 	const char		*disk_name;
1017 };
1018 
1019 typedef void (integrity_gen_fn) (struct blk_integrity_exchg *);
1020 typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *);
1021 typedef void (integrity_set_tag_fn) (void *, void *, unsigned int);
1022 typedef void (integrity_get_tag_fn) (void *, void *, unsigned int);
1023 
1024 struct blk_integrity {
1025 	integrity_gen_fn	*generate_fn;
1026 	integrity_vrfy_fn	*verify_fn;
1027 	integrity_set_tag_fn	*set_tag_fn;
1028 	integrity_get_tag_fn	*get_tag_fn;
1029 
1030 	unsigned short		flags;
1031 	unsigned short		tuple_size;
1032 	unsigned short		sector_size;
1033 	unsigned short		tag_size;
1034 
1035 	const char		*name;
1036 
1037 	struct kobject		kobj;
1038 };
1039 
1040 extern int blk_integrity_register(struct gendisk *, struct blk_integrity *);
1041 extern void blk_integrity_unregister(struct gendisk *);
1042 extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
1043 extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *);
1044 extern int blk_rq_count_integrity_sg(struct request *);
1045 
1046 static inline
1047 struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
1048 {
1049 	return bdev->bd_disk->integrity;
1050 }
1051 
1052 static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1053 {
1054 	return disk->integrity;
1055 }
1056 
1057 static inline int blk_integrity_rq(struct request *rq)
1058 {
1059 	if (rq->bio == NULL)
1060 		return 0;
1061 
1062 	return bio_integrity(rq->bio);
1063 }
1064 
1065 #else /* CONFIG_BLK_DEV_INTEGRITY */
1066 
1067 #define blk_integrity_rq(rq)			(0)
1068 #define blk_rq_count_integrity_sg(a)		(0)
1069 #define blk_rq_map_integrity_sg(a, b)		(0)
1070 #define bdev_get_integrity(a)			(0)
1071 #define blk_get_integrity(a)			(0)
1072 #define blk_integrity_compare(a, b)		(0)
1073 #define blk_integrity_register(a, b)		(0)
1074 #define blk_integrity_unregister(a)		do { } while (0);
1075 
1076 #endif /* CONFIG_BLK_DEV_INTEGRITY */
1077 
1078 struct block_device_operations {
1079 	int (*open) (struct block_device *, fmode_t);
1080 	int (*release) (struct gendisk *, fmode_t);
1081 	int (*locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1082 	int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1083 	int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1084 	int (*direct_access) (struct block_device *, sector_t,
1085 						void **, unsigned long *);
1086 	int (*media_changed) (struct gendisk *);
1087 	int (*revalidate_disk) (struct gendisk *);
1088 	int (*getgeo)(struct block_device *, struct hd_geometry *);
1089 	struct module *owner;
1090 };
1091 
1092 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
1093 				 unsigned long);
1094 #else /* CONFIG_BLOCK */
1095 /*
1096  * stubs for when the block layer is configured out
1097  */
1098 #define buffer_heads_over_limit 0
1099 
1100 static inline long nr_blockdev_pages(void)
1101 {
1102 	return 0;
1103 }
1104 
1105 #endif /* CONFIG_BLOCK */
1106 
1107 #endif
1108