xref: /linux-6.15/include/linux/blkdev.h (revision f699b7f3)
1 #ifndef _LINUX_BLKDEV_H
2 #define _LINUX_BLKDEV_H
3 
4 #ifdef CONFIG_BLOCK
5 
6 #include <linux/sched.h>
7 #include <linux/major.h>
8 #include <linux/genhd.h>
9 #include <linux/list.h>
10 #include <linux/timer.h>
11 #include <linux/workqueue.h>
12 #include <linux/pagemap.h>
13 #include <linux/backing-dev.h>
14 #include <linux/wait.h>
15 #include <linux/mempool.h>
16 #include <linux/bio.h>
17 #include <linux/module.h>
18 #include <linux/stringify.h>
19 #include <linux/gfp.h>
20 #include <linux/bsg.h>
21 #include <linux/smp.h>
22 
23 #include <asm/scatterlist.h>
24 
25 struct scsi_ioctl_command;
26 
27 struct request_queue;
28 struct elevator_queue;
29 struct request_pm_state;
30 struct blk_trace;
31 struct request;
32 struct sg_io_hdr;
33 
34 #define BLKDEV_MIN_RQ	4
35 #define BLKDEV_MAX_RQ	128	/* Default maximum */
36 
37 struct request;
38 typedef void (rq_end_io_fn)(struct request *, int);
39 
40 struct request_list {
41 	/*
42 	 * count[], starved[], and wait[] are indexed by
43 	 * BLK_RW_SYNC/BLK_RW_ASYNC
44 	 */
45 	int count[2];
46 	int starved[2];
47 	int elvpriv;
48 	mempool_t *rq_pool;
49 	wait_queue_head_t wait[2];
50 };
51 
52 /*
53  * request command types
54  */
55 enum rq_cmd_type_bits {
56 	REQ_TYPE_FS		= 1,	/* fs request */
57 	REQ_TYPE_BLOCK_PC,		/* scsi command */
58 	REQ_TYPE_SENSE,			/* sense request */
59 	REQ_TYPE_PM_SUSPEND,		/* suspend request */
60 	REQ_TYPE_PM_RESUME,		/* resume request */
61 	REQ_TYPE_PM_SHUTDOWN,		/* shutdown request */
62 	REQ_TYPE_SPECIAL,		/* driver defined type */
63 	REQ_TYPE_LINUX_BLOCK,		/* generic block layer message */
64 	/*
65 	 * for ATA/ATAPI devices. this really doesn't belong here, ide should
66 	 * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver
67 	 * private REQ_LB opcodes to differentiate what type of request this is
68 	 */
69 	REQ_TYPE_ATA_TASKFILE,
70 	REQ_TYPE_ATA_PC,
71 };
72 
73 /*
74  * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being
75  * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a
76  * SCSI cdb.
77  *
78  * 0x00 -> 0x3f are driver private, to be used for whatever purpose they need,
79  * typically to differentiate REQ_TYPE_SPECIAL requests.
80  *
81  */
82 enum {
83 	REQ_LB_OP_EJECT	= 0x40,		/* eject request */
84 	REQ_LB_OP_FLUSH = 0x41,		/* flush request */
85 };
86 
87 /*
88  * request type modified bits. first four bits match BIO_RW* bits, important
89  */
90 enum rq_flag_bits {
91 	__REQ_RW,		/* not set, read. set, write */
92 	__REQ_FAILFAST_DEV,	/* no driver retries of device errors */
93 	__REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
94 	__REQ_FAILFAST_DRIVER,	/* no driver retries of driver errors */
95 	/* above flags must match BIO_RW_* */
96 	__REQ_DISCARD,		/* request to discard sectors */
97 	__REQ_SORTED,		/* elevator knows about this request */
98 	__REQ_SOFTBARRIER,	/* may not be passed by ioscheduler */
99 	__REQ_HARDBARRIER,	/* may not be passed by drive either */
100 	__REQ_FUA,		/* forced unit access */
101 	__REQ_NOMERGE,		/* don't touch this for merging */
102 	__REQ_STARTED,		/* drive already may have started this one */
103 	__REQ_DONTPREP,		/* don't call prep for this one */
104 	__REQ_QUEUED,		/* uses queueing */
105 	__REQ_ELVPRIV,		/* elevator private data attached */
106 	__REQ_FAILED,		/* set if the request failed */
107 	__REQ_QUIET,		/* don't worry about errors */
108 	__REQ_PREEMPT,		/* set for "ide_preempt" requests */
109 	__REQ_ORDERED_COLOR,	/* is before or after barrier */
110 	__REQ_RW_SYNC,		/* request is sync (sync write or read) */
111 	__REQ_ALLOCED,		/* request came from our alloc pool */
112 	__REQ_RW_META,		/* metadata io request */
113 	__REQ_COPY_USER,	/* contains copies of user pages */
114 	__REQ_INTEGRITY,	/* integrity metadata has been remapped */
115 	__REQ_NOIDLE,		/* Don't anticipate more IO after this one */
116 	__REQ_IO_STAT,		/* account I/O stat */
117 	__REQ_MIXED_MERGE,	/* merge of different types, fail separately */
118 	__REQ_NR_BITS,		/* stops here */
119 };
120 
121 #define REQ_RW		(1 << __REQ_RW)
122 #define REQ_FAILFAST_DEV	(1 << __REQ_FAILFAST_DEV)
123 #define REQ_FAILFAST_TRANSPORT	(1 << __REQ_FAILFAST_TRANSPORT)
124 #define REQ_FAILFAST_DRIVER	(1 << __REQ_FAILFAST_DRIVER)
125 #define REQ_DISCARD	(1 << __REQ_DISCARD)
126 #define REQ_SORTED	(1 << __REQ_SORTED)
127 #define REQ_SOFTBARRIER	(1 << __REQ_SOFTBARRIER)
128 #define REQ_HARDBARRIER	(1 << __REQ_HARDBARRIER)
129 #define REQ_FUA		(1 << __REQ_FUA)
130 #define REQ_NOMERGE	(1 << __REQ_NOMERGE)
131 #define REQ_STARTED	(1 << __REQ_STARTED)
132 #define REQ_DONTPREP	(1 << __REQ_DONTPREP)
133 #define REQ_QUEUED	(1 << __REQ_QUEUED)
134 #define REQ_ELVPRIV	(1 << __REQ_ELVPRIV)
135 #define REQ_FAILED	(1 << __REQ_FAILED)
136 #define REQ_QUIET	(1 << __REQ_QUIET)
137 #define REQ_PREEMPT	(1 << __REQ_PREEMPT)
138 #define REQ_ORDERED_COLOR	(1 << __REQ_ORDERED_COLOR)
139 #define REQ_RW_SYNC	(1 << __REQ_RW_SYNC)
140 #define REQ_ALLOCED	(1 << __REQ_ALLOCED)
141 #define REQ_RW_META	(1 << __REQ_RW_META)
142 #define REQ_COPY_USER	(1 << __REQ_COPY_USER)
143 #define REQ_INTEGRITY	(1 << __REQ_INTEGRITY)
144 #define REQ_NOIDLE	(1 << __REQ_NOIDLE)
145 #define REQ_IO_STAT	(1 << __REQ_IO_STAT)
146 #define REQ_MIXED_MERGE	(1 << __REQ_MIXED_MERGE)
147 
148 #define REQ_FAILFAST_MASK	(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | \
149 				 REQ_FAILFAST_DRIVER)
150 
151 #define BLK_MAX_CDB	16
152 
153 /*
154  * try to put the fields that are referenced together in the same cacheline.
155  * if you modify this structure, be sure to check block/blk-core.c:rq_init()
156  * as well!
157  */
158 struct request {
159 	struct list_head queuelist;
160 	struct call_single_data csd;
161 	int cpu;
162 
163 	struct request_queue *q;
164 
165 	unsigned int cmd_flags;
166 	enum rq_cmd_type_bits cmd_type;
167 	unsigned long atomic_flags;
168 
169 	/* the following two fields are internal, NEVER access directly */
170 	sector_t __sector;		/* sector cursor */
171 	unsigned int __data_len;	/* total data len */
172 
173 	struct bio *bio;
174 	struct bio *biotail;
175 
176 	struct hlist_node hash;	/* merge hash */
177 	/*
178 	 * The rb_node is only used inside the io scheduler, requests
179 	 * are pruned when moved to the dispatch queue. So let the
180 	 * completion_data share space with the rb_node.
181 	 */
182 	union {
183 		struct rb_node rb_node;	/* sort/lookup */
184 		void *completion_data;
185 	};
186 
187 	/*
188 	 * two pointers are available for the IO schedulers, if they need
189 	 * more they have to dynamically allocate it.
190 	 */
191 	void *elevator_private;
192 	void *elevator_private2;
193 
194 	struct gendisk *rq_disk;
195 	unsigned long start_time;
196 
197 	/* Number of scatter-gather DMA addr+len pairs after
198 	 * physical address coalescing is performed.
199 	 */
200 	unsigned short nr_phys_segments;
201 
202 	unsigned short ioprio;
203 
204 	void *special;		/* opaque pointer available for LLD use */
205 	char *buffer;		/* kaddr of the current segment if available */
206 
207 	int tag;
208 	int errors;
209 
210 	int ref_count;
211 
212 	/*
213 	 * when request is used as a packet command carrier
214 	 */
215 	unsigned short cmd_len;
216 	unsigned char __cmd[BLK_MAX_CDB];
217 	unsigned char *cmd;
218 
219 	unsigned int extra_len;	/* length of alignment and padding */
220 	unsigned int sense_len;
221 	unsigned int resid_len;	/* residual count */
222 	void *sense;
223 
224 	unsigned long deadline;
225 	struct list_head timeout_list;
226 	unsigned int timeout;
227 	int retries;
228 
229 	/*
230 	 * completion callback.
231 	 */
232 	rq_end_io_fn *end_io;
233 	void *end_io_data;
234 
235 	/* for bidi */
236 	struct request *next_rq;
237 };
238 
239 static inline unsigned short req_get_ioprio(struct request *req)
240 {
241 	return req->ioprio;
242 }
243 
244 /*
245  * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME
246  * requests. Some step values could eventually be made generic.
247  */
248 struct request_pm_state
249 {
250 	/* PM state machine step value, currently driver specific */
251 	int	pm_step;
252 	/* requested PM state value (S1, S2, S3, S4, ...) */
253 	u32	pm_state;
254 	void*	data;		/* for driver use */
255 };
256 
257 #include <linux/elevator.h>
258 
259 typedef void (request_fn_proc) (struct request_queue *q);
260 typedef int (make_request_fn) (struct request_queue *q, struct bio *bio);
261 typedef int (prep_rq_fn) (struct request_queue *, struct request *);
262 typedef void (unplug_fn) (struct request_queue *);
263 
264 struct bio_vec;
265 struct bvec_merge_data {
266 	struct block_device *bi_bdev;
267 	sector_t bi_sector;
268 	unsigned bi_size;
269 	unsigned long bi_rw;
270 };
271 typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *,
272 			     struct bio_vec *);
273 typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
274 typedef void (softirq_done_fn)(struct request *);
275 typedef int (dma_drain_needed_fn)(struct request *);
276 typedef int (lld_busy_fn) (struct request_queue *q);
277 
278 enum blk_eh_timer_return {
279 	BLK_EH_NOT_HANDLED,
280 	BLK_EH_HANDLED,
281 	BLK_EH_RESET_TIMER,
282 };
283 
284 typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *);
285 
286 enum blk_queue_state {
287 	Queue_down,
288 	Queue_up,
289 };
290 
291 struct blk_queue_tag {
292 	struct request **tag_index;	/* map of busy tags */
293 	unsigned long *tag_map;		/* bit map of free/busy tags */
294 	int busy;			/* current depth */
295 	int max_depth;			/* what we will send to device */
296 	int real_max_depth;		/* what the array can hold */
297 	atomic_t refcnt;		/* map can be shared */
298 };
299 
300 #define BLK_SCSI_MAX_CMDS	(256)
301 #define BLK_SCSI_CMD_PER_LONG	(BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
302 
303 struct queue_limits {
304 	unsigned long		bounce_pfn;
305 	unsigned long		seg_boundary_mask;
306 
307 	unsigned int		max_hw_sectors;
308 	unsigned int		max_sectors;
309 	unsigned int		max_segment_size;
310 	unsigned int		physical_block_size;
311 	unsigned int		alignment_offset;
312 	unsigned int		io_min;
313 	unsigned int		io_opt;
314 	unsigned int		max_discard_sectors;
315 
316 	unsigned short		logical_block_size;
317 	unsigned short		max_hw_segments;
318 	unsigned short		max_phys_segments;
319 
320 	unsigned char		misaligned;
321 	unsigned char		no_cluster;
322 };
323 
324 struct request_queue
325 {
326 	/*
327 	 * Together with queue_head for cacheline sharing
328 	 */
329 	struct list_head	queue_head;
330 	struct request		*last_merge;
331 	struct elevator_queue	*elevator;
332 
333 	/*
334 	 * the queue request freelist, one for reads and one for writes
335 	 */
336 	struct request_list	rq;
337 
338 	request_fn_proc		*request_fn;
339 	make_request_fn		*make_request_fn;
340 	prep_rq_fn		*prep_rq_fn;
341 	unplug_fn		*unplug_fn;
342 	merge_bvec_fn		*merge_bvec_fn;
343 	prepare_flush_fn	*prepare_flush_fn;
344 	softirq_done_fn		*softirq_done_fn;
345 	rq_timed_out_fn		*rq_timed_out_fn;
346 	dma_drain_needed_fn	*dma_drain_needed;
347 	lld_busy_fn		*lld_busy_fn;
348 
349 	/*
350 	 * Dispatch queue sorting
351 	 */
352 	sector_t		end_sector;
353 	struct request		*boundary_rq;
354 
355 	/*
356 	 * Auto-unplugging state
357 	 */
358 	struct timer_list	unplug_timer;
359 	int			unplug_thresh;	/* After this many requests */
360 	unsigned long		unplug_delay;	/* After this many jiffies */
361 	struct work_struct	unplug_work;
362 
363 	struct backing_dev_info	backing_dev_info;
364 
365 	/*
366 	 * The queue owner gets to use this for whatever they like.
367 	 * ll_rw_blk doesn't touch it.
368 	 */
369 	void			*queuedata;
370 
371 	/*
372 	 * queue needs bounce pages for pages above this limit
373 	 */
374 	gfp_t			bounce_gfp;
375 
376 	/*
377 	 * various queue flags, see QUEUE_* below
378 	 */
379 	unsigned long		queue_flags;
380 
381 	/*
382 	 * protects queue structures from reentrancy. ->__queue_lock should
383 	 * _never_ be used directly, it is queue private. always use
384 	 * ->queue_lock.
385 	 */
386 	spinlock_t		__queue_lock;
387 	spinlock_t		*queue_lock;
388 
389 	/*
390 	 * queue kobject
391 	 */
392 	struct kobject kobj;
393 
394 	/*
395 	 * queue settings
396 	 */
397 	unsigned long		nr_requests;	/* Max # of requests */
398 	unsigned int		nr_congestion_on;
399 	unsigned int		nr_congestion_off;
400 	unsigned int		nr_batching;
401 
402 	void			*dma_drain_buffer;
403 	unsigned int		dma_drain_size;
404 	unsigned int		dma_pad_mask;
405 	unsigned int		dma_alignment;
406 
407 	struct blk_queue_tag	*queue_tags;
408 	struct list_head	tag_busy_list;
409 
410 	unsigned int		nr_sorted;
411 	unsigned int		in_flight[2];
412 
413 	unsigned int		rq_timeout;
414 	struct timer_list	timeout;
415 	struct list_head	timeout_list;
416 
417 	struct queue_limits	limits;
418 
419 	/*
420 	 * sg stuff
421 	 */
422 	unsigned int		sg_timeout;
423 	unsigned int		sg_reserved_size;
424 	int			node;
425 #ifdef CONFIG_BLK_DEV_IO_TRACE
426 	struct blk_trace	*blk_trace;
427 #endif
428 	/*
429 	 * reserved for flush operations
430 	 */
431 	unsigned int		ordered, next_ordered, ordseq;
432 	int			orderr, ordcolor;
433 	struct request		pre_flush_rq, bar_rq, post_flush_rq;
434 	struct request		*orig_bar_rq;
435 
436 	struct mutex		sysfs_lock;
437 
438 #if defined(CONFIG_BLK_DEV_BSG)
439 	struct bsg_class_device bsg_dev;
440 #endif
441 };
442 
443 #define QUEUE_FLAG_CLUSTER	0	/* cluster several segments into 1 */
444 #define QUEUE_FLAG_QUEUED	1	/* uses generic tag queueing */
445 #define QUEUE_FLAG_STOPPED	2	/* queue is stopped */
446 #define	QUEUE_FLAG_SYNCFULL	3	/* read queue has been filled */
447 #define QUEUE_FLAG_ASYNCFULL	4	/* write queue has been filled */
448 #define QUEUE_FLAG_DEAD		5	/* queue being torn down */
449 #define QUEUE_FLAG_REENTER	6	/* Re-entrancy avoidance */
450 #define QUEUE_FLAG_PLUGGED	7	/* queue is plugged */
451 #define QUEUE_FLAG_ELVSWITCH	8	/* don't use elevator, just do FIFO */
452 #define QUEUE_FLAG_BIDI		9	/* queue supports bidi requests */
453 #define QUEUE_FLAG_NOMERGES    10	/* disable merge attempts */
454 #define QUEUE_FLAG_SAME_COMP   11	/* force complete on same CPU */
455 #define QUEUE_FLAG_FAIL_IO     12	/* fake timeout */
456 #define QUEUE_FLAG_STACKABLE   13	/* supports request stacking */
457 #define QUEUE_FLAG_NONROT      14	/* non-rotational device (SSD) */
458 #define QUEUE_FLAG_VIRT        QUEUE_FLAG_NONROT /* paravirt device */
459 #define QUEUE_FLAG_IO_STAT     15	/* do IO stats */
460 #define QUEUE_FLAG_CQ	       16	/* hardware does queuing */
461 #define QUEUE_FLAG_DISCARD     17	/* supports DISCARD */
462 
463 #define QUEUE_FLAG_DEFAULT	((1 << QUEUE_FLAG_IO_STAT) |		\
464 				 (1 << QUEUE_FLAG_CLUSTER) |		\
465 				 (1 << QUEUE_FLAG_STACKABLE)	|	\
466 				 (1 << QUEUE_FLAG_SAME_COMP))
467 
468 static inline int queue_is_locked(struct request_queue *q)
469 {
470 #ifdef CONFIG_SMP
471 	spinlock_t *lock = q->queue_lock;
472 	return lock && spin_is_locked(lock);
473 #else
474 	return 1;
475 #endif
476 }
477 
478 static inline void queue_flag_set_unlocked(unsigned int flag,
479 					   struct request_queue *q)
480 {
481 	__set_bit(flag, &q->queue_flags);
482 }
483 
484 static inline int queue_flag_test_and_clear(unsigned int flag,
485 					    struct request_queue *q)
486 {
487 	WARN_ON_ONCE(!queue_is_locked(q));
488 
489 	if (test_bit(flag, &q->queue_flags)) {
490 		__clear_bit(flag, &q->queue_flags);
491 		return 1;
492 	}
493 
494 	return 0;
495 }
496 
497 static inline int queue_flag_test_and_set(unsigned int flag,
498 					  struct request_queue *q)
499 {
500 	WARN_ON_ONCE(!queue_is_locked(q));
501 
502 	if (!test_bit(flag, &q->queue_flags)) {
503 		__set_bit(flag, &q->queue_flags);
504 		return 0;
505 	}
506 
507 	return 1;
508 }
509 
510 static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
511 {
512 	WARN_ON_ONCE(!queue_is_locked(q));
513 	__set_bit(flag, &q->queue_flags);
514 }
515 
516 static inline void queue_flag_clear_unlocked(unsigned int flag,
517 					     struct request_queue *q)
518 {
519 	__clear_bit(flag, &q->queue_flags);
520 }
521 
522 static inline int queue_in_flight(struct request_queue *q)
523 {
524 	return q->in_flight[0] + q->in_flight[1];
525 }
526 
527 static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
528 {
529 	WARN_ON_ONCE(!queue_is_locked(q));
530 	__clear_bit(flag, &q->queue_flags);
531 }
532 
533 enum {
534 	/*
535 	 * Hardbarrier is supported with one of the following methods.
536 	 *
537 	 * NONE		: hardbarrier unsupported
538 	 * DRAIN	: ordering by draining is enough
539 	 * DRAIN_FLUSH	: ordering by draining w/ pre and post flushes
540 	 * DRAIN_FUA	: ordering by draining w/ pre flush and FUA write
541 	 * TAG		: ordering by tag is enough
542 	 * TAG_FLUSH	: ordering by tag w/ pre and post flushes
543 	 * TAG_FUA	: ordering by tag w/ pre flush and FUA write
544 	 */
545 	QUEUE_ORDERED_BY_DRAIN		= 0x01,
546 	QUEUE_ORDERED_BY_TAG		= 0x02,
547 	QUEUE_ORDERED_DO_PREFLUSH	= 0x10,
548 	QUEUE_ORDERED_DO_BAR		= 0x20,
549 	QUEUE_ORDERED_DO_POSTFLUSH	= 0x40,
550 	QUEUE_ORDERED_DO_FUA		= 0x80,
551 
552 	QUEUE_ORDERED_NONE		= 0x00,
553 
554 	QUEUE_ORDERED_DRAIN		= QUEUE_ORDERED_BY_DRAIN |
555 					  QUEUE_ORDERED_DO_BAR,
556 	QUEUE_ORDERED_DRAIN_FLUSH	= QUEUE_ORDERED_DRAIN |
557 					  QUEUE_ORDERED_DO_PREFLUSH |
558 					  QUEUE_ORDERED_DO_POSTFLUSH,
559 	QUEUE_ORDERED_DRAIN_FUA		= QUEUE_ORDERED_DRAIN |
560 					  QUEUE_ORDERED_DO_PREFLUSH |
561 					  QUEUE_ORDERED_DO_FUA,
562 
563 	QUEUE_ORDERED_TAG		= QUEUE_ORDERED_BY_TAG |
564 					  QUEUE_ORDERED_DO_BAR,
565 	QUEUE_ORDERED_TAG_FLUSH		= QUEUE_ORDERED_TAG |
566 					  QUEUE_ORDERED_DO_PREFLUSH |
567 					  QUEUE_ORDERED_DO_POSTFLUSH,
568 	QUEUE_ORDERED_TAG_FUA		= QUEUE_ORDERED_TAG |
569 					  QUEUE_ORDERED_DO_PREFLUSH |
570 					  QUEUE_ORDERED_DO_FUA,
571 
572 	/*
573 	 * Ordered operation sequence
574 	 */
575 	QUEUE_ORDSEQ_STARTED	= 0x01,	/* flushing in progress */
576 	QUEUE_ORDSEQ_DRAIN	= 0x02,	/* waiting for the queue to be drained */
577 	QUEUE_ORDSEQ_PREFLUSH	= 0x04,	/* pre-flushing in progress */
578 	QUEUE_ORDSEQ_BAR	= 0x08,	/* original barrier req in progress */
579 	QUEUE_ORDSEQ_POSTFLUSH	= 0x10,	/* post-flushing in progress */
580 	QUEUE_ORDSEQ_DONE	= 0x20,
581 };
582 
583 #define blk_queue_plugged(q)	test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
584 #define blk_queue_tagged(q)	test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
585 #define blk_queue_queuing(q)	test_bit(QUEUE_FLAG_CQ, &(q)->queue_flags)
586 #define blk_queue_stopped(q)	test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
587 #define blk_queue_nomerges(q)	test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
588 #define blk_queue_nonrot(q)	test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
589 #define blk_queue_io_stat(q)	test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
590 #define blk_queue_flushing(q)	((q)->ordseq)
591 #define blk_queue_stackable(q)	\
592 	test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
593 #define blk_queue_discard(q)	test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
594 
595 #define blk_fs_request(rq)	((rq)->cmd_type == REQ_TYPE_FS)
596 #define blk_pc_request(rq)	((rq)->cmd_type == REQ_TYPE_BLOCK_PC)
597 #define blk_special_request(rq)	((rq)->cmd_type == REQ_TYPE_SPECIAL)
598 #define blk_sense_request(rq)	((rq)->cmd_type == REQ_TYPE_SENSE)
599 
600 #define blk_failfast_dev(rq)	((rq)->cmd_flags & REQ_FAILFAST_DEV)
601 #define blk_failfast_transport(rq) ((rq)->cmd_flags & REQ_FAILFAST_TRANSPORT)
602 #define blk_failfast_driver(rq)	((rq)->cmd_flags & REQ_FAILFAST_DRIVER)
603 #define blk_noretry_request(rq)	(blk_failfast_dev(rq) ||	\
604 				 blk_failfast_transport(rq) ||	\
605 				 blk_failfast_driver(rq))
606 #define blk_rq_started(rq)	((rq)->cmd_flags & REQ_STARTED)
607 #define blk_rq_io_stat(rq)	((rq)->cmd_flags & REQ_IO_STAT)
608 #define blk_rq_quiet(rq)	((rq)->cmd_flags & REQ_QUIET)
609 
610 #define blk_account_rq(rq)	(blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq)))
611 
612 #define blk_pm_suspend_request(rq)	((rq)->cmd_type == REQ_TYPE_PM_SUSPEND)
613 #define blk_pm_resume_request(rq)	((rq)->cmd_type == REQ_TYPE_PM_RESUME)
614 #define blk_pm_request(rq)	\
615 	(blk_pm_suspend_request(rq) || blk_pm_resume_request(rq))
616 
617 #define blk_rq_cpu_valid(rq)	((rq)->cpu != -1)
618 #define blk_sorted_rq(rq)	((rq)->cmd_flags & REQ_SORTED)
619 #define blk_barrier_rq(rq)	((rq)->cmd_flags & REQ_HARDBARRIER)
620 #define blk_fua_rq(rq)		((rq)->cmd_flags & REQ_FUA)
621 #define blk_discard_rq(rq)	((rq)->cmd_flags & REQ_DISCARD)
622 #define blk_bidi_rq(rq)		((rq)->next_rq != NULL)
623 /* rq->queuelist of dequeued request must be list_empty() */
624 #define blk_queued_rq(rq)	(!list_empty(&(rq)->queuelist))
625 
626 #define list_entry_rq(ptr)	list_entry((ptr), struct request, queuelist)
627 
628 #define rq_data_dir(rq)		((rq)->cmd_flags & 1)
629 
630 /*
631  * We regard a request as sync, if either a read or a sync write
632  */
633 static inline bool rw_is_sync(unsigned int rw_flags)
634 {
635 	return !(rw_flags & REQ_RW) || (rw_flags & REQ_RW_SYNC);
636 }
637 
638 static inline bool rq_is_sync(struct request *rq)
639 {
640 	return rw_is_sync(rq->cmd_flags);
641 }
642 
643 #define rq_is_meta(rq)		((rq)->cmd_flags & REQ_RW_META)
644 #define rq_noidle(rq)		((rq)->cmd_flags & REQ_NOIDLE)
645 
646 static inline int blk_queue_full(struct request_queue *q, int sync)
647 {
648 	if (sync)
649 		return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags);
650 	return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags);
651 }
652 
653 static inline void blk_set_queue_full(struct request_queue *q, int sync)
654 {
655 	if (sync)
656 		queue_flag_set(QUEUE_FLAG_SYNCFULL, q);
657 	else
658 		queue_flag_set(QUEUE_FLAG_ASYNCFULL, q);
659 }
660 
661 static inline void blk_clear_queue_full(struct request_queue *q, int sync)
662 {
663 	if (sync)
664 		queue_flag_clear(QUEUE_FLAG_SYNCFULL, q);
665 	else
666 		queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q);
667 }
668 
669 
670 /*
671  * mergeable request must not have _NOMERGE or _BARRIER bit set, nor may
672  * it already be started by driver.
673  */
674 #define RQ_NOMERGE_FLAGS	\
675 	(REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER)
676 #define rq_mergeable(rq)	\
677 	(!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \
678 	 (blk_discard_rq(rq) || blk_fs_request((rq))))
679 
680 /*
681  * q->prep_rq_fn return values
682  */
683 #define BLKPREP_OK		0	/* serve it */
684 #define BLKPREP_KILL		1	/* fatal error, kill */
685 #define BLKPREP_DEFER		2	/* leave on queue */
686 
687 extern unsigned long blk_max_low_pfn, blk_max_pfn;
688 
689 /*
690  * standard bounce addresses:
691  *
692  * BLK_BOUNCE_HIGH	: bounce all highmem pages
693  * BLK_BOUNCE_ANY	: don't bounce anything
694  * BLK_BOUNCE_ISA	: bounce pages above ISA DMA boundary
695  */
696 
697 #if BITS_PER_LONG == 32
698 #define BLK_BOUNCE_HIGH		((u64)blk_max_low_pfn << PAGE_SHIFT)
699 #else
700 #define BLK_BOUNCE_HIGH		-1ULL
701 #endif
702 #define BLK_BOUNCE_ANY		(-1ULL)
703 #define BLK_BOUNCE_ISA		(ISA_DMA_THRESHOLD)
704 
705 /*
706  * default timeout for SG_IO if none specified
707  */
708 #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
709 #define BLK_MIN_SG_TIMEOUT	(7 * HZ)
710 
711 #ifdef CONFIG_BOUNCE
712 extern int init_emergency_isa_pool(void);
713 extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
714 #else
715 static inline int init_emergency_isa_pool(void)
716 {
717 	return 0;
718 }
719 static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
720 {
721 }
722 #endif /* CONFIG_MMU */
723 
724 struct rq_map_data {
725 	struct page **pages;
726 	int page_order;
727 	int nr_entries;
728 	unsigned long offset;
729 	int null_mapped;
730 	int from_user;
731 };
732 
733 struct req_iterator {
734 	int i;
735 	struct bio *bio;
736 };
737 
738 /* This should not be used directly - use rq_for_each_segment */
739 #define for_each_bio(_bio)		\
740 	for (; _bio; _bio = _bio->bi_next)
741 #define __rq_for_each_bio(_bio, rq)	\
742 	if ((rq->bio))			\
743 		for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
744 
745 #define rq_for_each_segment(bvl, _rq, _iter)			\
746 	__rq_for_each_bio(_iter.bio, _rq)			\
747 		bio_for_each_segment(bvl, _iter.bio, _iter.i)
748 
749 #define rq_iter_last(rq, _iter)					\
750 		(_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1)
751 
752 extern int blk_register_queue(struct gendisk *disk);
753 extern void blk_unregister_queue(struct gendisk *disk);
754 extern void register_disk(struct gendisk *dev);
755 extern void generic_make_request(struct bio *bio);
756 extern void blk_rq_init(struct request_queue *q, struct request *rq);
757 extern void blk_put_request(struct request *);
758 extern void __blk_put_request(struct request_queue *, struct request *);
759 extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
760 extern struct request *blk_make_request(struct request_queue *, struct bio *,
761 					gfp_t);
762 extern void blk_insert_request(struct request_queue *, struct request *, int, void *);
763 extern void blk_requeue_request(struct request_queue *, struct request *);
764 extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
765 extern int blk_lld_busy(struct request_queue *q);
766 extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
767 			     struct bio_set *bs, gfp_t gfp_mask,
768 			     int (*bio_ctr)(struct bio *, struct bio *, void *),
769 			     void *data);
770 extern void blk_rq_unprep_clone(struct request *rq);
771 extern int blk_insert_cloned_request(struct request_queue *q,
772 				     struct request *rq);
773 extern void blk_plug_device(struct request_queue *);
774 extern void blk_plug_device_unlocked(struct request_queue *);
775 extern int blk_remove_plug(struct request_queue *);
776 extern void blk_recount_segments(struct request_queue *, struct bio *);
777 extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
778 			  unsigned int, void __user *);
779 extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
780 			 struct scsi_ioctl_command __user *);
781 
782 /*
783  * A queue has just exitted congestion.  Note this in the global counter of
784  * congested queues, and wake up anyone who was waiting for requests to be
785  * put back.
786  */
787 static inline void blk_clear_queue_congested(struct request_queue *q, int sync)
788 {
789 	clear_bdi_congested(&q->backing_dev_info, sync);
790 }
791 
792 /*
793  * A queue has just entered congestion.  Flag that in the queue's VM-visible
794  * state flags and increment the global gounter of congested queues.
795  */
796 static inline void blk_set_queue_congested(struct request_queue *q, int sync)
797 {
798 	set_bdi_congested(&q->backing_dev_info, sync);
799 }
800 
801 extern void blk_start_queue(struct request_queue *q);
802 extern void blk_stop_queue(struct request_queue *q);
803 extern void blk_sync_queue(struct request_queue *q);
804 extern void __blk_stop_queue(struct request_queue *q);
805 extern void __blk_run_queue(struct request_queue *);
806 extern void blk_run_queue(struct request_queue *);
807 extern int blk_rq_map_user(struct request_queue *, struct request *,
808 			   struct rq_map_data *, void __user *, unsigned long,
809 			   gfp_t);
810 extern int blk_rq_unmap_user(struct bio *);
811 extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
812 extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
813 			       struct rq_map_data *, struct sg_iovec *, int,
814 			       unsigned int, gfp_t);
815 extern int blk_execute_rq(struct request_queue *, struct gendisk *,
816 			  struct request *, int);
817 extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
818 				  struct request *, int, rq_end_io_fn *);
819 extern void blk_unplug(struct request_queue *q);
820 
821 static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
822 {
823 	return bdev->bd_disk->queue;
824 }
825 
826 static inline void blk_run_backing_dev(struct backing_dev_info *bdi,
827 				       struct page *page)
828 {
829 	if (bdi && bdi->unplug_io_fn)
830 		bdi->unplug_io_fn(bdi, page);
831 }
832 
833 static inline void blk_run_address_space(struct address_space *mapping)
834 {
835 	if (mapping)
836 		blk_run_backing_dev(mapping->backing_dev_info, NULL);
837 }
838 
839 /*
840  * blk_rq_pos()			: the current sector
841  * blk_rq_bytes()		: bytes left in the entire request
842  * blk_rq_cur_bytes()		: bytes left in the current segment
843  * blk_rq_err_bytes()		: bytes left till the next error boundary
844  * blk_rq_sectors()		: sectors left in the entire request
845  * blk_rq_cur_sectors()		: sectors left in the current segment
846  * blk_rq_err_sectors()		: sectors left till the next error boundary
847  */
848 static inline sector_t blk_rq_pos(const struct request *rq)
849 {
850 	return rq->__sector;
851 }
852 
853 static inline unsigned int blk_rq_bytes(const struct request *rq)
854 {
855 	return rq->__data_len;
856 }
857 
858 static inline int blk_rq_cur_bytes(const struct request *rq)
859 {
860 	return rq->bio ? bio_cur_bytes(rq->bio) : 0;
861 }
862 
863 extern unsigned int blk_rq_err_bytes(const struct request *rq);
864 
865 static inline unsigned int blk_rq_sectors(const struct request *rq)
866 {
867 	return blk_rq_bytes(rq) >> 9;
868 }
869 
870 static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
871 {
872 	return blk_rq_cur_bytes(rq) >> 9;
873 }
874 
875 static inline unsigned int blk_rq_err_sectors(const struct request *rq)
876 {
877 	return blk_rq_err_bytes(rq) >> 9;
878 }
879 
880 /*
881  * Request issue related functions.
882  */
883 extern struct request *blk_peek_request(struct request_queue *q);
884 extern void blk_start_request(struct request *rq);
885 extern struct request *blk_fetch_request(struct request_queue *q);
886 
887 /*
888  * Request completion related functions.
889  *
890  * blk_update_request() completes given number of bytes and updates
891  * the request without completing it.
892  *
893  * blk_end_request() and friends.  __blk_end_request() must be called
894  * with the request queue spinlock acquired.
895  *
896  * Several drivers define their own end_request and call
897  * blk_end_request() for parts of the original function.
898  * This prevents code duplication in drivers.
899  */
900 extern bool blk_update_request(struct request *rq, int error,
901 			       unsigned int nr_bytes);
902 extern bool blk_end_request(struct request *rq, int error,
903 			    unsigned int nr_bytes);
904 extern void blk_end_request_all(struct request *rq, int error);
905 extern bool blk_end_request_cur(struct request *rq, int error);
906 extern bool blk_end_request_err(struct request *rq, int error);
907 extern bool __blk_end_request(struct request *rq, int error,
908 			      unsigned int nr_bytes);
909 extern void __blk_end_request_all(struct request *rq, int error);
910 extern bool __blk_end_request_cur(struct request *rq, int error);
911 extern bool __blk_end_request_err(struct request *rq, int error);
912 
913 extern void blk_complete_request(struct request *);
914 extern void __blk_complete_request(struct request *);
915 extern void blk_abort_request(struct request *);
916 extern void blk_abort_queue(struct request_queue *);
917 
918 /*
919  * Access functions for manipulating queue properties
920  */
921 extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
922 					spinlock_t *lock, int node_id);
923 extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
924 extern void blk_cleanup_queue(struct request_queue *);
925 extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
926 extern void blk_queue_bounce_limit(struct request_queue *, u64);
927 extern void blk_queue_max_sectors(struct request_queue *, unsigned int);
928 extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
929 extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);
930 extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
931 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
932 extern void blk_queue_max_discard_sectors(struct request_queue *q,
933 		unsigned int max_discard_sectors);
934 extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
935 extern void blk_queue_physical_block_size(struct request_queue *, unsigned short);
936 extern void blk_queue_alignment_offset(struct request_queue *q,
937 				       unsigned int alignment);
938 extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
939 extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
940 extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
941 extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
942 extern void blk_set_default_limits(struct queue_limits *lim);
943 extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
944 			    sector_t offset);
945 extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
946 			      sector_t offset);
947 extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
948 extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
949 extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
950 extern int blk_queue_dma_drain(struct request_queue *q,
951 			       dma_drain_needed_fn *dma_drain_needed,
952 			       void *buf, unsigned int size);
953 extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
954 extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
955 extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
956 extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
957 extern void blk_queue_dma_alignment(struct request_queue *, int);
958 extern void blk_queue_update_dma_alignment(struct request_queue *, int);
959 extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
960 extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
961 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
962 extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
963 extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);
964 extern bool blk_do_ordered(struct request_queue *, struct request **);
965 extern unsigned blk_ordered_cur_seq(struct request_queue *);
966 extern unsigned blk_ordered_req_seq(struct request *);
967 extern bool blk_ordered_complete_seq(struct request_queue *, unsigned, int);
968 
969 extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
970 extern void blk_dump_rq_flags(struct request *, char *);
971 extern void generic_unplug_device(struct request_queue *);
972 extern long nr_blockdev_pages(void);
973 
974 int blk_get_queue(struct request_queue *);
975 struct request_queue *blk_alloc_queue(gfp_t);
976 struct request_queue *blk_alloc_queue_node(gfp_t, int);
977 extern void blk_put_queue(struct request_queue *);
978 
979 /*
980  * tag stuff
981  */
982 #define blk_rq_tagged(rq)		((rq)->cmd_flags & REQ_QUEUED)
983 extern int blk_queue_start_tag(struct request_queue *, struct request *);
984 extern struct request *blk_queue_find_tag(struct request_queue *, int);
985 extern void blk_queue_end_tag(struct request_queue *, struct request *);
986 extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *);
987 extern void blk_queue_free_tags(struct request_queue *);
988 extern int blk_queue_resize_tags(struct request_queue *, int);
989 extern void blk_queue_invalidate_tags(struct request_queue *);
990 extern struct blk_queue_tag *blk_init_tags(int);
991 extern void blk_free_tags(struct blk_queue_tag *);
992 
993 static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
994 						int tag)
995 {
996 	if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
997 		return NULL;
998 	return bqt->tag_index[tag];
999 }
1000 
1001 extern int blkdev_issue_flush(struct block_device *, sector_t *);
1002 #define DISCARD_FL_WAIT		0x01	/* wait for completion */
1003 #define DISCARD_FL_BARRIER	0x02	/* issue DISCARD_BARRIER request */
1004 extern int blkdev_issue_discard(struct block_device *, sector_t sector,
1005 		sector_t nr_sects, gfp_t, int flags);
1006 
1007 static inline int sb_issue_discard(struct super_block *sb,
1008 				   sector_t block, sector_t nr_blocks)
1009 {
1010 	block <<= (sb->s_blocksize_bits - 9);
1011 	nr_blocks <<= (sb->s_blocksize_bits - 9);
1012 	return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL,
1013 				    DISCARD_FL_BARRIER);
1014 }
1015 
1016 extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
1017 
1018 #define MAX_PHYS_SEGMENTS 128
1019 #define MAX_HW_SEGMENTS 128
1020 #define SAFE_MAX_SECTORS 255
1021 #define BLK_DEF_MAX_SECTORS 1024
1022 
1023 #define MAX_SEGMENT_SIZE	65536
1024 
1025 #define BLK_SEG_BOUNDARY_MASK	0xFFFFFFFFUL
1026 
1027 #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
1028 
1029 static inline unsigned long queue_bounce_pfn(struct request_queue *q)
1030 {
1031 	return q->limits.bounce_pfn;
1032 }
1033 
1034 static inline unsigned long queue_segment_boundary(struct request_queue *q)
1035 {
1036 	return q->limits.seg_boundary_mask;
1037 }
1038 
1039 static inline unsigned int queue_max_sectors(struct request_queue *q)
1040 {
1041 	return q->limits.max_sectors;
1042 }
1043 
1044 static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
1045 {
1046 	return q->limits.max_hw_sectors;
1047 }
1048 
1049 static inline unsigned short queue_max_hw_segments(struct request_queue *q)
1050 {
1051 	return q->limits.max_hw_segments;
1052 }
1053 
1054 static inline unsigned short queue_max_phys_segments(struct request_queue *q)
1055 {
1056 	return q->limits.max_phys_segments;
1057 }
1058 
1059 static inline unsigned int queue_max_segment_size(struct request_queue *q)
1060 {
1061 	return q->limits.max_segment_size;
1062 }
1063 
1064 static inline unsigned short queue_logical_block_size(struct request_queue *q)
1065 {
1066 	int retval = 512;
1067 
1068 	if (q && q->limits.logical_block_size)
1069 		retval = q->limits.logical_block_size;
1070 
1071 	return retval;
1072 }
1073 
1074 static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
1075 {
1076 	return queue_logical_block_size(bdev_get_queue(bdev));
1077 }
1078 
1079 static inline unsigned int queue_physical_block_size(struct request_queue *q)
1080 {
1081 	return q->limits.physical_block_size;
1082 }
1083 
1084 static inline int bdev_physical_block_size(struct block_device *bdev)
1085 {
1086 	return queue_physical_block_size(bdev_get_queue(bdev));
1087 }
1088 
1089 static inline unsigned int queue_io_min(struct request_queue *q)
1090 {
1091 	return q->limits.io_min;
1092 }
1093 
1094 static inline int bdev_io_min(struct block_device *bdev)
1095 {
1096 	return queue_io_min(bdev_get_queue(bdev));
1097 }
1098 
1099 static inline unsigned int queue_io_opt(struct request_queue *q)
1100 {
1101 	return q->limits.io_opt;
1102 }
1103 
1104 static inline int bdev_io_opt(struct block_device *bdev)
1105 {
1106 	return queue_io_opt(bdev_get_queue(bdev));
1107 }
1108 
1109 static inline int queue_alignment_offset(struct request_queue *q)
1110 {
1111 	if (q->limits.misaligned)
1112 		return -1;
1113 
1114 	return q->limits.alignment_offset;
1115 }
1116 
1117 static inline int queue_sector_alignment_offset(struct request_queue *q,
1118 						sector_t sector)
1119 {
1120 	return ((sector << 9) - q->limits.alignment_offset)
1121 		& (q->limits.io_min - 1);
1122 }
1123 
1124 static inline int bdev_alignment_offset(struct block_device *bdev)
1125 {
1126 	struct request_queue *q = bdev_get_queue(bdev);
1127 
1128 	if (q->limits.misaligned)
1129 		return -1;
1130 
1131 	if (bdev != bdev->bd_contains)
1132 		return bdev->bd_part->alignment_offset;
1133 
1134 	return q->limits.alignment_offset;
1135 }
1136 
1137 static inline int queue_dma_alignment(struct request_queue *q)
1138 {
1139 	return q ? q->dma_alignment : 511;
1140 }
1141 
1142 static inline int blk_rq_aligned(struct request_queue *q, void *addr,
1143 				 unsigned int len)
1144 {
1145 	unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
1146 	return !((unsigned long)addr & alignment) && !(len & alignment);
1147 }
1148 
1149 /* assumes size > 256 */
1150 static inline unsigned int blksize_bits(unsigned int size)
1151 {
1152 	unsigned int bits = 8;
1153 	do {
1154 		bits++;
1155 		size >>= 1;
1156 	} while (size > 256);
1157 	return bits;
1158 }
1159 
1160 static inline unsigned int block_size(struct block_device *bdev)
1161 {
1162 	return bdev->bd_block_size;
1163 }
1164 
1165 typedef struct {struct page *v;} Sector;
1166 
1167 unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
1168 
1169 static inline void put_dev_sector(Sector p)
1170 {
1171 	page_cache_release(p.v);
1172 }
1173 
1174 struct work_struct;
1175 int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
1176 
1177 #define MODULE_ALIAS_BLOCKDEV(major,minor) \
1178 	MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
1179 #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
1180 	MODULE_ALIAS("block-major-" __stringify(major) "-*")
1181 
1182 #if defined(CONFIG_BLK_DEV_INTEGRITY)
1183 
1184 #define INTEGRITY_FLAG_READ	2	/* verify data integrity on read */
1185 #define INTEGRITY_FLAG_WRITE	4	/* generate data integrity on write */
1186 
1187 struct blk_integrity_exchg {
1188 	void			*prot_buf;
1189 	void			*data_buf;
1190 	sector_t		sector;
1191 	unsigned int		data_size;
1192 	unsigned short		sector_size;
1193 	const char		*disk_name;
1194 };
1195 
1196 typedef void (integrity_gen_fn) (struct blk_integrity_exchg *);
1197 typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *);
1198 typedef void (integrity_set_tag_fn) (void *, void *, unsigned int);
1199 typedef void (integrity_get_tag_fn) (void *, void *, unsigned int);
1200 
1201 struct blk_integrity {
1202 	integrity_gen_fn	*generate_fn;
1203 	integrity_vrfy_fn	*verify_fn;
1204 	integrity_set_tag_fn	*set_tag_fn;
1205 	integrity_get_tag_fn	*get_tag_fn;
1206 
1207 	unsigned short		flags;
1208 	unsigned short		tuple_size;
1209 	unsigned short		sector_size;
1210 	unsigned short		tag_size;
1211 
1212 	const char		*name;
1213 
1214 	struct kobject		kobj;
1215 };
1216 
1217 extern int blk_integrity_register(struct gendisk *, struct blk_integrity *);
1218 extern void blk_integrity_unregister(struct gendisk *);
1219 extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
1220 extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *);
1221 extern int blk_rq_count_integrity_sg(struct request *);
1222 
1223 static inline
1224 struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
1225 {
1226 	return bdev->bd_disk->integrity;
1227 }
1228 
1229 static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1230 {
1231 	return disk->integrity;
1232 }
1233 
1234 static inline int blk_integrity_rq(struct request *rq)
1235 {
1236 	if (rq->bio == NULL)
1237 		return 0;
1238 
1239 	return bio_integrity(rq->bio);
1240 }
1241 
1242 #else /* CONFIG_BLK_DEV_INTEGRITY */
1243 
1244 #define blk_integrity_rq(rq)			(0)
1245 #define blk_rq_count_integrity_sg(a)		(0)
1246 #define blk_rq_map_integrity_sg(a, b)		(0)
1247 #define bdev_get_integrity(a)			(0)
1248 #define blk_get_integrity(a)			(0)
1249 #define blk_integrity_compare(a, b)		(0)
1250 #define blk_integrity_register(a, b)		(0)
1251 #define blk_integrity_unregister(a)		do { } while (0);
1252 
1253 #endif /* CONFIG_BLK_DEV_INTEGRITY */
1254 
1255 struct block_device_operations {
1256 	int (*open) (struct block_device *, fmode_t);
1257 	int (*release) (struct gendisk *, fmode_t);
1258 	int (*locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1259 	int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1260 	int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1261 	int (*direct_access) (struct block_device *, sector_t,
1262 						void **, unsigned long *);
1263 	int (*media_changed) (struct gendisk *);
1264 	unsigned long long (*set_capacity) (struct gendisk *,
1265 						unsigned long long);
1266 	int (*revalidate_disk) (struct gendisk *);
1267 	int (*getgeo)(struct block_device *, struct hd_geometry *);
1268 	struct module *owner;
1269 };
1270 
1271 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
1272 				 unsigned long);
1273 #else /* CONFIG_BLOCK */
1274 /*
1275  * stubs for when the block layer is configured out
1276  */
1277 #define buffer_heads_over_limit 0
1278 
1279 static inline long nr_blockdev_pages(void)
1280 {
1281 	return 0;
1282 }
1283 
1284 #endif /* CONFIG_BLOCK */
1285 
1286 #endif
1287