xref: /linux-6.15/include/linux/blkdev.h (revision 8b4a4080)
1 #ifndef _LINUX_BLKDEV_H
2 #define _LINUX_BLKDEV_H
3 
4 #include <linux/sched.h>
5 #include <linux/major.h>
6 #include <linux/genhd.h>
7 #include <linux/list.h>
8 #include <linux/timer.h>
9 #include <linux/workqueue.h>
10 #include <linux/pagemap.h>
11 #include <linux/backing-dev.h>
12 #include <linux/wait.h>
13 #include <linux/mempool.h>
14 #include <linux/bio.h>
15 #include <linux/module.h>
16 #include <linux/stringify.h>
17 #include <linux/bsg.h>
18 
19 #include <asm/scatterlist.h>
20 
21 #ifdef CONFIG_LBD
22 # include <asm/div64.h>
23 # define sector_div(a, b) do_div(a, b)
24 #else
25 # define sector_div(n, b)( \
26 { \
27 	int _res; \
28 	_res = (n) % (b); \
29 	(n) /= (b); \
30 	_res; \
31 } \
32 )
33 #endif
34 
35 #ifdef CONFIG_BLOCK
36 
37 struct scsi_ioctl_command;
38 
39 struct request_queue;
40 typedef struct request_queue request_queue_t;
41 struct elevator_queue;
42 typedef struct elevator_queue elevator_t;
43 struct request_pm_state;
44 struct blk_trace;
45 struct request;
46 struct sg_io_hdr;
47 
48 #define BLKDEV_MIN_RQ	4
49 #define BLKDEV_MAX_RQ	128	/* Default maximum */
50 
51 /*
52  * This is the per-process anticipatory I/O scheduler state.
53  */
54 struct as_io_context {
55 	spinlock_t lock;
56 
57 	void (*dtor)(struct as_io_context *aic); /* destructor */
58 	void (*exit)(struct as_io_context *aic); /* called on task exit */
59 
60 	unsigned long state;
61 	atomic_t nr_queued; /* queued reads & sync writes */
62 	atomic_t nr_dispatched; /* number of requests gone to the drivers */
63 
64 	/* IO History tracking */
65 	/* Thinktime */
66 	unsigned long last_end_request;
67 	unsigned long ttime_total;
68 	unsigned long ttime_samples;
69 	unsigned long ttime_mean;
70 	/* Layout pattern */
71 	unsigned int seek_samples;
72 	sector_t last_request_pos;
73 	u64 seek_total;
74 	sector_t seek_mean;
75 };
76 
77 struct cfq_queue;
78 struct cfq_io_context {
79 	struct rb_node rb_node;
80 	void *key;
81 
82 	struct cfq_queue *cfqq[2];
83 
84 	struct io_context *ioc;
85 
86 	unsigned long last_end_request;
87 	sector_t last_request_pos;
88 
89 	unsigned long ttime_total;
90 	unsigned long ttime_samples;
91 	unsigned long ttime_mean;
92 
93 	unsigned int seek_samples;
94 	u64 seek_total;
95 	sector_t seek_mean;
96 
97 	struct list_head queue_list;
98 
99 	void (*dtor)(struct io_context *); /* destructor */
100 	void (*exit)(struct io_context *); /* called on task exit */
101 };
102 
103 /*
104  * This is the per-process I/O subsystem state.  It is refcounted and
105  * kmalloc'ed. Currently all fields are modified in process io context
106  * (apart from the atomic refcount), so require no locking.
107  */
108 struct io_context {
109 	atomic_t refcount;
110 	struct task_struct *task;
111 
112 	unsigned int ioprio_changed;
113 
114 	/*
115 	 * For request batching
116 	 */
117 	unsigned long last_waited; /* Time last woken after wait for request */
118 	int nr_batch_requests;     /* Number of requests left in the batch */
119 
120 	struct as_io_context *aic;
121 	struct rb_root cic_root;
122 	void *ioc_data;
123 };
124 
125 void put_io_context(struct io_context *ioc);
126 void exit_io_context(void);
127 struct io_context *get_io_context(gfp_t gfp_flags, int node);
128 void copy_io_context(struct io_context **pdst, struct io_context **psrc);
129 void swap_io_context(struct io_context **ioc1, struct io_context **ioc2);
130 
131 struct request;
132 typedef void (rq_end_io_fn)(struct request *, int);
133 
134 struct request_list {
135 	int count[2];
136 	int starved[2];
137 	int elvpriv;
138 	mempool_t *rq_pool;
139 	wait_queue_head_t wait[2];
140 };
141 
142 /*
143  * request command types
144  */
145 enum rq_cmd_type_bits {
146 	REQ_TYPE_FS		= 1,	/* fs request */
147 	REQ_TYPE_BLOCK_PC,		/* scsi command */
148 	REQ_TYPE_SENSE,			/* sense request */
149 	REQ_TYPE_PM_SUSPEND,		/* suspend request */
150 	REQ_TYPE_PM_RESUME,		/* resume request */
151 	REQ_TYPE_PM_SHUTDOWN,		/* shutdown request */
152 	REQ_TYPE_FLUSH,			/* flush request */
153 	REQ_TYPE_SPECIAL,		/* driver defined type */
154 	REQ_TYPE_LINUX_BLOCK,		/* generic block layer message */
155 	/*
156 	 * for ATA/ATAPI devices. this really doesn't belong here, ide should
157 	 * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver
158 	 * private REQ_LB opcodes to differentiate what type of request this is
159 	 */
160 	REQ_TYPE_ATA_CMD,
161 	REQ_TYPE_ATA_TASK,
162 	REQ_TYPE_ATA_TASKFILE,
163 	REQ_TYPE_ATA_PC,
164 };
165 
166 /*
167  * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being
168  * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a
169  * SCSI cdb.
170  *
171  * 0x00 -> 0x3f are driver private, to be used for whatever purpose they need,
172  * typically to differentiate REQ_TYPE_SPECIAL requests.
173  *
174  */
175 enum {
176 	/*
177 	 * just examples for now
178 	 */
179 	REQ_LB_OP_EJECT	= 0x40,		/* eject request */
180 	REQ_LB_OP_FLUSH = 0x41,		/* flush device */
181 };
182 
183 /*
184  * request type modified bits. first three bits match BIO_RW* bits, important
185  */
186 enum rq_flag_bits {
187 	__REQ_RW,		/* not set, read. set, write */
188 	__REQ_FAILFAST,		/* no low level driver retries */
189 	__REQ_SORTED,		/* elevator knows about this request */
190 	__REQ_SOFTBARRIER,	/* may not be passed by ioscheduler */
191 	__REQ_HARDBARRIER,	/* may not be passed by drive either */
192 	__REQ_FUA,		/* forced unit access */
193 	__REQ_NOMERGE,		/* don't touch this for merging */
194 	__REQ_STARTED,		/* drive already may have started this one */
195 	__REQ_DONTPREP,		/* don't call prep for this one */
196 	__REQ_QUEUED,		/* uses queueing */
197 	__REQ_ELVPRIV,		/* elevator private data attached */
198 	__REQ_FAILED,		/* set if the request failed */
199 	__REQ_QUIET,		/* don't worry about errors */
200 	__REQ_PREEMPT,		/* set for "ide_preempt" requests */
201 	__REQ_ORDERED_COLOR,	/* is before or after barrier */
202 	__REQ_RW_SYNC,		/* request is sync (O_DIRECT) */
203 	__REQ_ALLOCED,		/* request came from our alloc pool */
204 	__REQ_RW_META,		/* metadata io request */
205 	__REQ_NR_BITS,		/* stops here */
206 };
207 
208 #define REQ_RW		(1 << __REQ_RW)
209 #define REQ_FAILFAST	(1 << __REQ_FAILFAST)
210 #define REQ_SORTED	(1 << __REQ_SORTED)
211 #define REQ_SOFTBARRIER	(1 << __REQ_SOFTBARRIER)
212 #define REQ_HARDBARRIER	(1 << __REQ_HARDBARRIER)
213 #define REQ_FUA		(1 << __REQ_FUA)
214 #define REQ_NOMERGE	(1 << __REQ_NOMERGE)
215 #define REQ_STARTED	(1 << __REQ_STARTED)
216 #define REQ_DONTPREP	(1 << __REQ_DONTPREP)
217 #define REQ_QUEUED	(1 << __REQ_QUEUED)
218 #define REQ_ELVPRIV	(1 << __REQ_ELVPRIV)
219 #define REQ_FAILED	(1 << __REQ_FAILED)
220 #define REQ_QUIET	(1 << __REQ_QUIET)
221 #define REQ_PREEMPT	(1 << __REQ_PREEMPT)
222 #define REQ_ORDERED_COLOR	(1 << __REQ_ORDERED_COLOR)
223 #define REQ_RW_SYNC	(1 << __REQ_RW_SYNC)
224 #define REQ_ALLOCED	(1 << __REQ_ALLOCED)
225 #define REQ_RW_META	(1 << __REQ_RW_META)
226 
227 #define BLK_MAX_CDB	16
228 
229 /*
230  * try to put the fields that are referenced together in the same cacheline
231  */
232 struct request {
233 	struct list_head queuelist;
234 	struct list_head donelist;
235 
236 	request_queue_t *q;
237 
238 	unsigned int cmd_flags;
239 	enum rq_cmd_type_bits cmd_type;
240 
241 	/* Maintain bio traversal state for part by part I/O submission.
242 	 * hard_* are block layer internals, no driver should touch them!
243 	 */
244 
245 	sector_t sector;		/* next sector to submit */
246 	sector_t hard_sector;		/* next sector to complete */
247 	unsigned long nr_sectors;	/* no. of sectors left to submit */
248 	unsigned long hard_nr_sectors;	/* no. of sectors left to complete */
249 	/* no. of sectors left to submit in the current segment */
250 	unsigned int current_nr_sectors;
251 
252 	/* no. of sectors left to complete in the current segment */
253 	unsigned int hard_cur_sectors;
254 
255 	struct bio *bio;
256 	struct bio *biotail;
257 
258 	struct hlist_node hash;	/* merge hash */
259 	/*
260 	 * The rb_node is only used inside the io scheduler, requests
261 	 * are pruned when moved to the dispatch queue. So let the
262 	 * completion_data share space with the rb_node.
263 	 */
264 	union {
265 		struct rb_node rb_node;	/* sort/lookup */
266 		void *completion_data;
267 	};
268 
269 	/*
270 	 * two pointers are available for the IO schedulers, if they need
271 	 * more they have to dynamically allocate it.
272 	 */
273 	void *elevator_private;
274 	void *elevator_private2;
275 
276 	struct gendisk *rq_disk;
277 	unsigned long start_time;
278 
279 	/* Number of scatter-gather DMA addr+len pairs after
280 	 * physical address coalescing is performed.
281 	 */
282 	unsigned short nr_phys_segments;
283 
284 	/* Number of scatter-gather addr+len pairs after
285 	 * physical and DMA remapping hardware coalescing is performed.
286 	 * This is the number of scatter-gather entries the driver
287 	 * will actually have to deal with after DMA mapping is done.
288 	 */
289 	unsigned short nr_hw_segments;
290 
291 	unsigned short ioprio;
292 
293 	void *special;
294 	char *buffer;
295 
296 	int tag;
297 	int errors;
298 
299 	int ref_count;
300 
301 	/*
302 	 * when request is used as a packet command carrier
303 	 */
304 	unsigned int cmd_len;
305 	unsigned char cmd[BLK_MAX_CDB];
306 
307 	unsigned int data_len;
308 	unsigned int sense_len;
309 	void *data;
310 	void *sense;
311 
312 	unsigned int timeout;
313 	int retries;
314 
315 	/*
316 	 * completion callback.
317 	 */
318 	rq_end_io_fn *end_io;
319 	void *end_io_data;
320 
321 	/* for bidi */
322 	struct request *next_rq;
323 };
324 
325 /*
326  * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME
327  * requests. Some step values could eventually be made generic.
328  */
329 struct request_pm_state
330 {
331 	/* PM state machine step value, currently driver specific */
332 	int	pm_step;
333 	/* requested PM state value (S1, S2, S3, S4, ...) */
334 	u32	pm_state;
335 	void*	data;		/* for driver use */
336 };
337 
338 #include <linux/elevator.h>
339 
340 typedef void (request_fn_proc) (request_queue_t *q);
341 typedef int (make_request_fn) (request_queue_t *q, struct bio *bio);
342 typedef int (prep_rq_fn) (request_queue_t *, struct request *);
343 typedef void (unplug_fn) (request_queue_t *);
344 
345 struct bio_vec;
346 typedef int (merge_bvec_fn) (request_queue_t *, struct bio *, struct bio_vec *);
347 typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *);
348 typedef void (prepare_flush_fn) (request_queue_t *, struct request *);
349 typedef void (softirq_done_fn)(struct request *);
350 
351 enum blk_queue_state {
352 	Queue_down,
353 	Queue_up,
354 };
355 
356 struct blk_queue_tag {
357 	struct request **tag_index;	/* map of busy tags */
358 	unsigned long *tag_map;		/* bit map of free/busy tags */
359 	struct list_head busy_list;	/* fifo list of busy tags */
360 	int busy;			/* current depth */
361 	int max_depth;			/* what we will send to device */
362 	int real_max_depth;		/* what the array can hold */
363 	atomic_t refcnt;		/* map can be shared */
364 };
365 
366 struct request_queue
367 {
368 	/*
369 	 * Together with queue_head for cacheline sharing
370 	 */
371 	struct list_head	queue_head;
372 	struct request		*last_merge;
373 	elevator_t		*elevator;
374 
375 	/*
376 	 * the queue request freelist, one for reads and one for writes
377 	 */
378 	struct request_list	rq;
379 
380 	request_fn_proc		*request_fn;
381 	make_request_fn		*make_request_fn;
382 	prep_rq_fn		*prep_rq_fn;
383 	unplug_fn		*unplug_fn;
384 	merge_bvec_fn		*merge_bvec_fn;
385 	issue_flush_fn		*issue_flush_fn;
386 	prepare_flush_fn	*prepare_flush_fn;
387 	softirq_done_fn		*softirq_done_fn;
388 
389 	/*
390 	 * Dispatch queue sorting
391 	 */
392 	sector_t		end_sector;
393 	struct request		*boundary_rq;
394 
395 	/*
396 	 * Auto-unplugging state
397 	 */
398 	struct timer_list	unplug_timer;
399 	int			unplug_thresh;	/* After this many requests */
400 	unsigned long		unplug_delay;	/* After this many jiffies */
401 	struct work_struct	unplug_work;
402 
403 	struct backing_dev_info	backing_dev_info;
404 
405 	/*
406 	 * The queue owner gets to use this for whatever they like.
407 	 * ll_rw_blk doesn't touch it.
408 	 */
409 	void			*queuedata;
410 
411 	/*
412 	 * queue needs bounce pages for pages above this limit
413 	 */
414 	unsigned long		bounce_pfn;
415 	gfp_t			bounce_gfp;
416 
417 	/*
418 	 * various queue flags, see QUEUE_* below
419 	 */
420 	unsigned long		queue_flags;
421 
422 	/*
423 	 * protects queue structures from reentrancy. ->__queue_lock should
424 	 * _never_ be used directly, it is queue private. always use
425 	 * ->queue_lock.
426 	 */
427 	spinlock_t		__queue_lock;
428 	spinlock_t		*queue_lock;
429 
430 	/*
431 	 * queue kobject
432 	 */
433 	struct kobject kobj;
434 
435 	/*
436 	 * queue settings
437 	 */
438 	unsigned long		nr_requests;	/* Max # of requests */
439 	unsigned int		nr_congestion_on;
440 	unsigned int		nr_congestion_off;
441 	unsigned int		nr_batching;
442 
443 	unsigned int		max_sectors;
444 	unsigned int		max_hw_sectors;
445 	unsigned short		max_phys_segments;
446 	unsigned short		max_hw_segments;
447 	unsigned short		hardsect_size;
448 	unsigned int		max_segment_size;
449 
450 	unsigned long		seg_boundary_mask;
451 	unsigned int		dma_alignment;
452 
453 	struct blk_queue_tag	*queue_tags;
454 
455 	unsigned int		nr_sorted;
456 	unsigned int		in_flight;
457 
458 	/*
459 	 * sg stuff
460 	 */
461 	unsigned int		sg_timeout;
462 	unsigned int		sg_reserved_size;
463 	int			node;
464 #ifdef CONFIG_BLK_DEV_IO_TRACE
465 	struct blk_trace	*blk_trace;
466 #endif
467 	/*
468 	 * reserved for flush operations
469 	 */
470 	unsigned int		ordered, next_ordered, ordseq;
471 	int			orderr, ordcolor;
472 	struct request		pre_flush_rq, bar_rq, post_flush_rq;
473 	struct request		*orig_bar_rq;
474 	unsigned int		bi_size;
475 
476 	struct mutex		sysfs_lock;
477 
478 #if defined(CONFIG_BLK_DEV_BSG)
479 	struct bsg_class_device bsg_dev;
480 #endif
481 };
482 
483 #define QUEUE_FLAG_CLUSTER	0	/* cluster several segments into 1 */
484 #define QUEUE_FLAG_QUEUED	1	/* uses generic tag queueing */
485 #define QUEUE_FLAG_STOPPED	2	/* queue is stopped */
486 #define	QUEUE_FLAG_READFULL	3	/* write queue has been filled */
487 #define QUEUE_FLAG_WRITEFULL	4	/* read queue has been filled */
488 #define QUEUE_FLAG_DEAD		5	/* queue being torn down */
489 #define QUEUE_FLAG_REENTER	6	/* Re-entrancy avoidance */
490 #define QUEUE_FLAG_PLUGGED	7	/* queue is plugged */
491 #define QUEUE_FLAG_ELVSWITCH	8	/* don't use elevator, just do FIFO */
492 #define QUEUE_FLAG_BIDI		9	/* queue supports bidi requests */
493 
494 enum {
495 	/*
496 	 * Hardbarrier is supported with one of the following methods.
497 	 *
498 	 * NONE		: hardbarrier unsupported
499 	 * DRAIN	: ordering by draining is enough
500 	 * DRAIN_FLUSH	: ordering by draining w/ pre and post flushes
501 	 * DRAIN_FUA	: ordering by draining w/ pre flush and FUA write
502 	 * TAG		: ordering by tag is enough
503 	 * TAG_FLUSH	: ordering by tag w/ pre and post flushes
504 	 * TAG_FUA	: ordering by tag w/ pre flush and FUA write
505 	 */
506 	QUEUE_ORDERED_NONE	= 0x00,
507 	QUEUE_ORDERED_DRAIN	= 0x01,
508 	QUEUE_ORDERED_TAG	= 0x02,
509 
510 	QUEUE_ORDERED_PREFLUSH	= 0x10,
511 	QUEUE_ORDERED_POSTFLUSH	= 0x20,
512 	QUEUE_ORDERED_FUA	= 0x40,
513 
514 	QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN |
515 			QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH,
516 	QUEUE_ORDERED_DRAIN_FUA	= QUEUE_ORDERED_DRAIN |
517 			QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA,
518 	QUEUE_ORDERED_TAG_FLUSH	= QUEUE_ORDERED_TAG |
519 			QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH,
520 	QUEUE_ORDERED_TAG_FUA	= QUEUE_ORDERED_TAG |
521 			QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA,
522 
523 	/*
524 	 * Ordered operation sequence
525 	 */
526 	QUEUE_ORDSEQ_STARTED	= 0x01,	/* flushing in progress */
527 	QUEUE_ORDSEQ_DRAIN	= 0x02,	/* waiting for the queue to be drained */
528 	QUEUE_ORDSEQ_PREFLUSH	= 0x04,	/* pre-flushing in progress */
529 	QUEUE_ORDSEQ_BAR	= 0x08,	/* original barrier req in progress */
530 	QUEUE_ORDSEQ_POSTFLUSH	= 0x10,	/* post-flushing in progress */
531 	QUEUE_ORDSEQ_DONE	= 0x20,
532 };
533 
534 #define blk_queue_plugged(q)	test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
535 #define blk_queue_tagged(q)	test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
536 #define blk_queue_stopped(q)	test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
537 #define blk_queue_flushing(q)	((q)->ordseq)
538 
539 #define blk_fs_request(rq)	((rq)->cmd_type == REQ_TYPE_FS)
540 #define blk_pc_request(rq)	((rq)->cmd_type == REQ_TYPE_BLOCK_PC)
541 #define blk_special_request(rq)	((rq)->cmd_type == REQ_TYPE_SPECIAL)
542 #define blk_sense_request(rq)	((rq)->cmd_type == REQ_TYPE_SENSE)
543 
544 #define blk_noretry_request(rq)	((rq)->cmd_flags & REQ_FAILFAST)
545 #define blk_rq_started(rq)	((rq)->cmd_flags & REQ_STARTED)
546 
547 #define blk_account_rq(rq)	(blk_rq_started(rq) && blk_fs_request(rq))
548 
549 #define blk_pm_suspend_request(rq)	((rq)->cmd_type == REQ_TYPE_PM_SUSPEND)
550 #define blk_pm_resume_request(rq)	((rq)->cmd_type == REQ_TYPE_PM_RESUME)
551 #define blk_pm_request(rq)	\
552 	(blk_pm_suspend_request(rq) || blk_pm_resume_request(rq))
553 
554 #define blk_sorted_rq(rq)	((rq)->cmd_flags & REQ_SORTED)
555 #define blk_barrier_rq(rq)	((rq)->cmd_flags & REQ_HARDBARRIER)
556 #define blk_fua_rq(rq)		((rq)->cmd_flags & REQ_FUA)
557 #define blk_bidi_rq(rq)		((rq)->next_rq != NULL)
558 
559 #define list_entry_rq(ptr)	list_entry((ptr), struct request, queuelist)
560 
561 #define rq_data_dir(rq)		((rq)->cmd_flags & 1)
562 
563 /*
564  * We regard a request as sync, if it's a READ or a SYNC write.
565  */
566 #define rq_is_sync(rq)		(rq_data_dir((rq)) == READ || (rq)->cmd_flags & REQ_RW_SYNC)
567 #define rq_is_meta(rq)		((rq)->cmd_flags & REQ_RW_META)
568 
569 static inline int blk_queue_full(struct request_queue *q, int rw)
570 {
571 	if (rw == READ)
572 		return test_bit(QUEUE_FLAG_READFULL, &q->queue_flags);
573 	return test_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags);
574 }
575 
576 static inline void blk_set_queue_full(struct request_queue *q, int rw)
577 {
578 	if (rw == READ)
579 		set_bit(QUEUE_FLAG_READFULL, &q->queue_flags);
580 	else
581 		set_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags);
582 }
583 
584 static inline void blk_clear_queue_full(struct request_queue *q, int rw)
585 {
586 	if (rw == READ)
587 		clear_bit(QUEUE_FLAG_READFULL, &q->queue_flags);
588 	else
589 		clear_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags);
590 }
591 
592 
593 /*
594  * mergeable request must not have _NOMERGE or _BARRIER bit set, nor may
595  * it already be started by driver.
596  */
597 #define RQ_NOMERGE_FLAGS	\
598 	(REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER)
599 #define rq_mergeable(rq)	\
600 	(!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq)))
601 
602 /*
603  * q->prep_rq_fn return values
604  */
605 #define BLKPREP_OK		0	/* serve it */
606 #define BLKPREP_KILL		1	/* fatal error, kill */
607 #define BLKPREP_DEFER		2	/* leave on queue */
608 
609 extern unsigned long blk_max_low_pfn, blk_max_pfn;
610 
611 /*
612  * standard bounce addresses:
613  *
614  * BLK_BOUNCE_HIGH	: bounce all highmem pages
615  * BLK_BOUNCE_ANY	: don't bounce anything
616  * BLK_BOUNCE_ISA	: bounce pages above ISA DMA boundary
617  */
618 #define BLK_BOUNCE_HIGH		((u64)blk_max_low_pfn << PAGE_SHIFT)
619 #define BLK_BOUNCE_ANY		((u64)blk_max_pfn << PAGE_SHIFT)
620 #define BLK_BOUNCE_ISA		(ISA_DMA_THRESHOLD)
621 
622 /*
623  * default timeout for SG_IO if none specified
624  */
625 #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
626 
627 #ifdef CONFIG_BOUNCE
628 extern int init_emergency_isa_pool(void);
629 extern void blk_queue_bounce(request_queue_t *q, struct bio **bio);
630 #else
631 static inline int init_emergency_isa_pool(void)
632 {
633 	return 0;
634 }
635 static inline void blk_queue_bounce(request_queue_t *q, struct bio **bio)
636 {
637 }
638 #endif /* CONFIG_MMU */
639 
640 #define rq_for_each_bio(_bio, rq)	\
641 	if ((rq->bio))			\
642 		for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
643 
644 extern int blk_register_queue(struct gendisk *disk);
645 extern void blk_unregister_queue(struct gendisk *disk);
646 extern void register_disk(struct gendisk *dev);
647 extern void generic_make_request(struct bio *bio);
648 extern void blk_put_request(struct request *);
649 extern void __blk_put_request(request_queue_t *, struct request *);
650 extern void blk_end_sync_rq(struct request *rq, int error);
651 extern struct request *blk_get_request(request_queue_t *, int, gfp_t);
652 extern void blk_insert_request(request_queue_t *, struct request *, int, void *);
653 extern void blk_requeue_request(request_queue_t *, struct request *);
654 extern void blk_plug_device(request_queue_t *);
655 extern int blk_remove_plug(request_queue_t *);
656 extern void blk_recount_segments(request_queue_t *, struct bio *);
657 extern int scsi_cmd_ioctl(struct file *, struct request_queue *,
658 			  struct gendisk *, unsigned int, void __user *);
659 extern int sg_scsi_ioctl(struct file *, struct request_queue *,
660 		struct gendisk *, struct scsi_ioctl_command __user *);
661 
662 /*
663  * Temporary export, until SCSI gets fixed up.
664  */
665 extern int ll_back_merge_fn(request_queue_t *, struct request *, struct bio *);
666 
667 /*
668  * A queue has just exitted congestion.  Note this in the global counter of
669  * congested queues, and wake up anyone who was waiting for requests to be
670  * put back.
671  */
672 static inline void blk_clear_queue_congested(request_queue_t *q, int rw)
673 {
674 	clear_bdi_congested(&q->backing_dev_info, rw);
675 }
676 
677 /*
678  * A queue has just entered congestion.  Flag that in the queue's VM-visible
679  * state flags and increment the global gounter of congested queues.
680  */
681 static inline void blk_set_queue_congested(request_queue_t *q, int rw)
682 {
683 	set_bdi_congested(&q->backing_dev_info, rw);
684 }
685 
686 extern void blk_start_queue(request_queue_t *q);
687 extern void blk_stop_queue(request_queue_t *q);
688 extern void blk_sync_queue(struct request_queue *q);
689 extern void __blk_stop_queue(request_queue_t *q);
690 extern void blk_run_queue(request_queue_t *);
691 extern void blk_start_queueing(request_queue_t *);
692 extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned long);
693 extern int blk_rq_unmap_user(struct bio *);
694 extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, gfp_t);
695 extern int blk_rq_map_user_iov(request_queue_t *, struct request *,
696 			       struct sg_iovec *, int, unsigned int);
697 extern int blk_execute_rq(request_queue_t *, struct gendisk *,
698 			  struct request *, int);
699 extern void blk_execute_rq_nowait(request_queue_t *, struct gendisk *,
700 				  struct request *, int, rq_end_io_fn *);
701 extern int blk_fill_sghdr_rq(request_queue_t *, struct request *,
702 		      struct sg_io_hdr *, int);
703 extern int blk_unmap_sghdr_rq(struct request *, struct sg_io_hdr *);
704 extern int blk_complete_sghdr_rq(struct request *, struct sg_io_hdr *,
705 			  struct bio *);
706 extern int blk_verify_command(unsigned char *, int);
707 
708 static inline request_queue_t *bdev_get_queue(struct block_device *bdev)
709 {
710 	return bdev->bd_disk->queue;
711 }
712 
713 static inline void blk_run_backing_dev(struct backing_dev_info *bdi,
714 				       struct page *page)
715 {
716 	if (bdi && bdi->unplug_io_fn)
717 		bdi->unplug_io_fn(bdi, page);
718 }
719 
720 static inline void blk_run_address_space(struct address_space *mapping)
721 {
722 	if (mapping)
723 		blk_run_backing_dev(mapping->backing_dev_info, NULL);
724 }
725 
726 /*
727  * end_request() and friends. Must be called with the request queue spinlock
728  * acquired. All functions called within end_request() _must_be_ atomic.
729  *
730  * Several drivers define their own end_request and call
731  * end_that_request_first() and end_that_request_last()
732  * for parts of the original function. This prevents
733  * code duplication in drivers.
734  */
735 extern int end_that_request_first(struct request *, int, int);
736 extern int end_that_request_chunk(struct request *, int, int);
737 extern void end_that_request_last(struct request *, int);
738 extern void end_request(struct request *req, int uptodate);
739 extern void blk_complete_request(struct request *);
740 
741 /*
742  * end_that_request_first/chunk() takes an uptodate argument. we account
743  * any value <= as an io error. 0 means -EIO for compatability reasons,
744  * any other < 0 value is the direct error type. An uptodate value of
745  * 1 indicates successful io completion
746  */
747 #define end_io_error(uptodate)	(unlikely((uptodate) <= 0))
748 
749 static inline void blkdev_dequeue_request(struct request *req)
750 {
751 	elv_dequeue_request(req->q, req);
752 }
753 
754 /*
755  * Access functions for manipulating queue properties
756  */
757 extern request_queue_t *blk_init_queue_node(request_fn_proc *rfn,
758 					spinlock_t *lock, int node_id);
759 extern request_queue_t *blk_init_queue(request_fn_proc *, spinlock_t *);
760 extern void blk_cleanup_queue(request_queue_t *);
761 extern void blk_queue_make_request(request_queue_t *, make_request_fn *);
762 extern void blk_queue_bounce_limit(request_queue_t *, u64);
763 extern void blk_queue_max_sectors(request_queue_t *, unsigned int);
764 extern void blk_queue_max_phys_segments(request_queue_t *, unsigned short);
765 extern void blk_queue_max_hw_segments(request_queue_t *, unsigned short);
766 extern void blk_queue_max_segment_size(request_queue_t *, unsigned int);
767 extern void blk_queue_hardsect_size(request_queue_t *, unsigned short);
768 extern void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b);
769 extern void blk_queue_segment_boundary(request_queue_t *, unsigned long);
770 extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn);
771 extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *);
772 extern void blk_queue_dma_alignment(request_queue_t *, int);
773 extern void blk_queue_softirq_done(request_queue_t *, softirq_done_fn *);
774 extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
775 extern int blk_queue_ordered(request_queue_t *, unsigned, prepare_flush_fn *);
776 extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *);
777 extern int blk_do_ordered(request_queue_t *, struct request **);
778 extern unsigned blk_ordered_cur_seq(request_queue_t *);
779 extern unsigned blk_ordered_req_seq(struct request *);
780 extern void blk_ordered_complete_seq(request_queue_t *, unsigned, int);
781 
782 extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
783 extern void blk_dump_rq_flags(struct request *, char *);
784 extern void generic_unplug_device(request_queue_t *);
785 extern void __generic_unplug_device(request_queue_t *);
786 extern long nr_blockdev_pages(void);
787 
788 int blk_get_queue(request_queue_t *);
789 request_queue_t *blk_alloc_queue(gfp_t);
790 request_queue_t *blk_alloc_queue_node(gfp_t, int);
791 extern void blk_put_queue(request_queue_t *);
792 
793 /*
794  * tag stuff
795  */
796 #define blk_queue_tag_depth(q)		((q)->queue_tags->busy)
797 #define blk_queue_tag_queue(q)		((q)->queue_tags->busy < (q)->queue_tags->max_depth)
798 #define blk_rq_tagged(rq)		((rq)->cmd_flags & REQ_QUEUED)
799 extern int blk_queue_start_tag(request_queue_t *, struct request *);
800 extern struct request *blk_queue_find_tag(request_queue_t *, int);
801 extern void blk_queue_end_tag(request_queue_t *, struct request *);
802 extern int blk_queue_init_tags(request_queue_t *, int, struct blk_queue_tag *);
803 extern void blk_queue_free_tags(request_queue_t *);
804 extern int blk_queue_resize_tags(request_queue_t *, int);
805 extern void blk_queue_invalidate_tags(request_queue_t *);
806 extern struct blk_queue_tag *blk_init_tags(int);
807 extern void blk_free_tags(struct blk_queue_tag *);
808 
809 static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
810 						int tag)
811 {
812 	if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
813 		return NULL;
814 	return bqt->tag_index[tag];
815 }
816 
817 extern void blk_rq_bio_prep(request_queue_t *, struct request *, struct bio *);
818 extern int blkdev_issue_flush(struct block_device *, sector_t *);
819 
820 #define MAX_PHYS_SEGMENTS 128
821 #define MAX_HW_SEGMENTS 128
822 #define SAFE_MAX_SECTORS 255
823 #define BLK_DEF_MAX_SECTORS 1024
824 
825 #define MAX_SEGMENT_SIZE	65536
826 
827 #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
828 
829 static inline int queue_hardsect_size(request_queue_t *q)
830 {
831 	int retval = 512;
832 
833 	if (q && q->hardsect_size)
834 		retval = q->hardsect_size;
835 
836 	return retval;
837 }
838 
839 static inline int bdev_hardsect_size(struct block_device *bdev)
840 {
841 	return queue_hardsect_size(bdev_get_queue(bdev));
842 }
843 
844 static inline int queue_dma_alignment(request_queue_t *q)
845 {
846 	int retval = 511;
847 
848 	if (q && q->dma_alignment)
849 		retval = q->dma_alignment;
850 
851 	return retval;
852 }
853 
854 /* assumes size > 256 */
855 static inline unsigned int blksize_bits(unsigned int size)
856 {
857 	unsigned int bits = 8;
858 	do {
859 		bits++;
860 		size >>= 1;
861 	} while (size > 256);
862 	return bits;
863 }
864 
865 static inline unsigned int block_size(struct block_device *bdev)
866 {
867 	return bdev->bd_block_size;
868 }
869 
870 typedef struct {struct page *v;} Sector;
871 
872 unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
873 
874 static inline void put_dev_sector(Sector p)
875 {
876 	page_cache_release(p.v);
877 }
878 
879 struct work_struct;
880 int kblockd_schedule_work(struct work_struct *work);
881 void kblockd_flush_work(struct work_struct *work);
882 
883 #define MODULE_ALIAS_BLOCKDEV(major,minor) \
884 	MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
885 #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
886 	MODULE_ALIAS("block-major-" __stringify(major) "-*")
887 
888 
889 #else /* CONFIG_BLOCK */
890 /*
891  * stubs for when the block layer is configured out
892  */
893 #define buffer_heads_over_limit 0
894 
895 static inline long nr_blockdev_pages(void)
896 {
897 	return 0;
898 }
899 
900 static inline void exit_io_context(void)
901 {
902 }
903 
904 #endif /* CONFIG_BLOCK */
905 
906 #endif
907