xref: /linux-6.15/include/linux/blkdev.h (revision 4f193362)
1 #ifndef _LINUX_BLKDEV_H
2 #define _LINUX_BLKDEV_H
3 
4 #include <linux/config.h>
5 #include <linux/major.h>
6 #include <linux/genhd.h>
7 #include <linux/list.h>
8 #include <linux/timer.h>
9 #include <linux/workqueue.h>
10 #include <linux/pagemap.h>
11 #include <linux/backing-dev.h>
12 #include <linux/wait.h>
13 #include <linux/mempool.h>
14 #include <linux/bio.h>
15 #include <linux/module.h>
16 #include <linux/stringify.h>
17 
18 #include <asm/scatterlist.h>
19 
20 struct request_queue;
21 typedef struct request_queue request_queue_t;
22 struct elevator_queue;
23 typedef struct elevator_queue elevator_t;
24 struct request_pm_state;
25 
26 #define BLKDEV_MIN_RQ	4
27 #define BLKDEV_MAX_RQ	128	/* Default maximum */
28 
29 /*
30  * This is the per-process anticipatory I/O scheduler state.
31  */
32 struct as_io_context {
33 	spinlock_t lock;
34 
35 	void (*dtor)(struct as_io_context *aic); /* destructor */
36 	void (*exit)(struct as_io_context *aic); /* called on task exit */
37 
38 	unsigned long state;
39 	atomic_t nr_queued; /* queued reads & sync writes */
40 	atomic_t nr_dispatched; /* number of requests gone to the drivers */
41 
42 	/* IO History tracking */
43 	/* Thinktime */
44 	unsigned long last_end_request;
45 	unsigned long ttime_total;
46 	unsigned long ttime_samples;
47 	unsigned long ttime_mean;
48 	/* Layout pattern */
49 	unsigned int seek_samples;
50 	sector_t last_request_pos;
51 	u64 seek_total;
52 	sector_t seek_mean;
53 };
54 
55 struct cfq_queue;
56 struct cfq_io_context {
57 	/*
58 	 * circular list of cfq_io_contexts belonging to a process io context
59 	 */
60 	struct list_head list;
61 	struct cfq_queue *cfqq;
62 	void *key;
63 
64 	struct io_context *ioc;
65 
66 	unsigned long last_end_request;
67 	unsigned long last_queue;
68 	unsigned long ttime_total;
69 	unsigned long ttime_samples;
70 	unsigned long ttime_mean;
71 
72 	void (*dtor)(struct cfq_io_context *);
73 	void (*exit)(struct cfq_io_context *);
74 };
75 
76 /*
77  * This is the per-process I/O subsystem state.  It is refcounted and
78  * kmalloc'ed. Currently all fields are modified in process io context
79  * (apart from the atomic refcount), so require no locking.
80  */
81 struct io_context {
82 	atomic_t refcount;
83 	struct task_struct *task;
84 
85 	int (*set_ioprio)(struct io_context *, unsigned int);
86 
87 	/*
88 	 * For request batching
89 	 */
90 	unsigned long last_waited; /* Time last woken after wait for request */
91 	int nr_batch_requests;     /* Number of requests left in the batch */
92 
93 	struct as_io_context *aic;
94 	struct cfq_io_context *cic;
95 };
96 
97 void put_io_context(struct io_context *ioc);
98 void exit_io_context(void);
99 struct io_context *current_io_context(gfp_t gfp_flags);
100 struct io_context *get_io_context(gfp_t gfp_flags);
101 void copy_io_context(struct io_context **pdst, struct io_context **psrc);
102 void swap_io_context(struct io_context **ioc1, struct io_context **ioc2);
103 
104 struct request;
105 typedef void (rq_end_io_fn)(struct request *, int);
106 
107 struct request_list {
108 	int count[2];
109 	int starved[2];
110 	int elvpriv;
111 	mempool_t *rq_pool;
112 	wait_queue_head_t wait[2];
113 };
114 
115 #define BLK_MAX_CDB	16
116 
117 /*
118  * try to put the fields that are referenced together in the same cacheline
119  */
120 struct request {
121 	struct list_head queuelist;
122 	struct list_head donelist;
123 
124 	unsigned long flags;		/* see REQ_ bits below */
125 
126 	/* Maintain bio traversal state for part by part I/O submission.
127 	 * hard_* are block layer internals, no driver should touch them!
128 	 */
129 
130 	sector_t sector;		/* next sector to submit */
131 	unsigned long nr_sectors;	/* no. of sectors left to submit */
132 	/* no. of sectors left to submit in the current segment */
133 	unsigned int current_nr_sectors;
134 
135 	sector_t hard_sector;		/* next sector to complete */
136 	unsigned long hard_nr_sectors;	/* no. of sectors left to complete */
137 	/* no. of sectors left to complete in the current segment */
138 	unsigned int hard_cur_sectors;
139 
140 	struct bio *bio;
141 	struct bio *biotail;
142 
143 	void *elevator_private;
144 	void *completion_data;
145 
146 	unsigned short ioprio;
147 
148 	int rq_status;	/* should split this into a few status bits */
149 	struct gendisk *rq_disk;
150 	int errors;
151 	unsigned long start_time;
152 
153 	/* Number of scatter-gather DMA addr+len pairs after
154 	 * physical address coalescing is performed.
155 	 */
156 	unsigned short nr_phys_segments;
157 
158 	/* Number of scatter-gather addr+len pairs after
159 	 * physical and DMA remapping hardware coalescing is performed.
160 	 * This is the number of scatter-gather entries the driver
161 	 * will actually have to deal with after DMA mapping is done.
162 	 */
163 	unsigned short nr_hw_segments;
164 
165 	int tag;
166 	char *buffer;
167 
168 	int ref_count;
169 	request_queue_t *q;
170 	struct request_list *rl;
171 
172 	struct completion *waiting;
173 	void *special;
174 
175 	/*
176 	 * when request is used as a packet command carrier
177 	 */
178 	unsigned int cmd_len;
179 	unsigned char cmd[BLK_MAX_CDB];
180 
181 	unsigned int data_len;
182 	void *data;
183 
184 	unsigned int sense_len;
185 	void *sense;
186 
187 	unsigned int timeout;
188 	int retries;
189 
190 	/*
191 	 * For Power Management requests
192 	 */
193 	struct request_pm_state *pm;
194 
195 	/*
196 	 * completion callback. end_io_data should be folded in with waiting
197 	 */
198 	rq_end_io_fn *end_io;
199 	void *end_io_data;
200 };
201 
202 /*
203  * first three bits match BIO_RW* bits, important
204  */
205 enum rq_flag_bits {
206 	__REQ_RW,		/* not set, read. set, write */
207 	__REQ_FAILFAST,		/* no low level driver retries */
208 	__REQ_SORTED,		/* elevator knows about this request */
209 	__REQ_SOFTBARRIER,	/* may not be passed by ioscheduler */
210 	__REQ_HARDBARRIER,	/* may not be passed by drive either */
211 	__REQ_FUA,		/* forced unit access */
212 	__REQ_CMD,		/* is a regular fs rw request */
213 	__REQ_NOMERGE,		/* don't touch this for merging */
214 	__REQ_STARTED,		/* drive already may have started this one */
215 	__REQ_DONTPREP,		/* don't call prep for this one */
216 	__REQ_QUEUED,		/* uses queueing */
217 	__REQ_ELVPRIV,		/* elevator private data attached */
218 	/*
219 	 * for ATA/ATAPI devices
220 	 */
221 	__REQ_PC,		/* packet command (special) */
222 	__REQ_BLOCK_PC,		/* queued down pc from block layer */
223 	__REQ_SENSE,		/* sense retrival */
224 
225 	__REQ_FAILED,		/* set if the request failed */
226 	__REQ_QUIET,		/* don't worry about errors */
227 	__REQ_SPECIAL,		/* driver suplied command */
228 	__REQ_DRIVE_CMD,
229 	__REQ_DRIVE_TASK,
230 	__REQ_DRIVE_TASKFILE,
231 	__REQ_PREEMPT,		/* set for "ide_preempt" requests */
232 	__REQ_PM_SUSPEND,	/* suspend request */
233 	__REQ_PM_RESUME,	/* resume request */
234 	__REQ_PM_SHUTDOWN,	/* shutdown request */
235 	__REQ_ORDERED_COLOR,	/* is before or after barrier */
236 	__REQ_NR_BITS,		/* stops here */
237 };
238 
239 #define REQ_RW		(1 << __REQ_RW)
240 #define REQ_FAILFAST	(1 << __REQ_FAILFAST)
241 #define REQ_SORTED	(1 << __REQ_SORTED)
242 #define REQ_SOFTBARRIER	(1 << __REQ_SOFTBARRIER)
243 #define REQ_HARDBARRIER	(1 << __REQ_HARDBARRIER)
244 #define REQ_FUA		(1 << __REQ_FUA)
245 #define REQ_CMD		(1 << __REQ_CMD)
246 #define REQ_NOMERGE	(1 << __REQ_NOMERGE)
247 #define REQ_STARTED	(1 << __REQ_STARTED)
248 #define REQ_DONTPREP	(1 << __REQ_DONTPREP)
249 #define REQ_QUEUED	(1 << __REQ_QUEUED)
250 #define REQ_ELVPRIV	(1 << __REQ_ELVPRIV)
251 #define REQ_PC		(1 << __REQ_PC)
252 #define REQ_BLOCK_PC	(1 << __REQ_BLOCK_PC)
253 #define REQ_SENSE	(1 << __REQ_SENSE)
254 #define REQ_FAILED	(1 << __REQ_FAILED)
255 #define REQ_QUIET	(1 << __REQ_QUIET)
256 #define REQ_SPECIAL	(1 << __REQ_SPECIAL)
257 #define REQ_DRIVE_CMD	(1 << __REQ_DRIVE_CMD)
258 #define REQ_DRIVE_TASK	(1 << __REQ_DRIVE_TASK)
259 #define REQ_DRIVE_TASKFILE	(1 << __REQ_DRIVE_TASKFILE)
260 #define REQ_PREEMPT	(1 << __REQ_PREEMPT)
261 #define REQ_PM_SUSPEND	(1 << __REQ_PM_SUSPEND)
262 #define REQ_PM_RESUME	(1 << __REQ_PM_RESUME)
263 #define REQ_PM_SHUTDOWN	(1 << __REQ_PM_SHUTDOWN)
264 #define REQ_ORDERED_COLOR	(1 << __REQ_ORDERED_COLOR)
265 
266 /*
267  * State information carried for REQ_PM_SUSPEND and REQ_PM_RESUME
268  * requests. Some step values could eventually be made generic.
269  */
270 struct request_pm_state
271 {
272 	/* PM state machine step value, currently driver specific */
273 	int	pm_step;
274 	/* requested PM state value (S1, S2, S3, S4, ...) */
275 	u32	pm_state;
276 	void*	data;		/* for driver use */
277 };
278 
279 #include <linux/elevator.h>
280 
281 typedef int (merge_request_fn) (request_queue_t *, struct request *,
282 				struct bio *);
283 typedef int (merge_requests_fn) (request_queue_t *, struct request *,
284 				 struct request *);
285 typedef void (request_fn_proc) (request_queue_t *q);
286 typedef int (make_request_fn) (request_queue_t *q, struct bio *bio);
287 typedef int (prep_rq_fn) (request_queue_t *, struct request *);
288 typedef void (unplug_fn) (request_queue_t *);
289 
290 struct bio_vec;
291 typedef int (merge_bvec_fn) (request_queue_t *, struct bio *, struct bio_vec *);
292 typedef void (activity_fn) (void *data, int rw);
293 typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *);
294 typedef void (prepare_flush_fn) (request_queue_t *, struct request *);
295 typedef void (softirq_done_fn)(struct request *);
296 
297 enum blk_queue_state {
298 	Queue_down,
299 	Queue_up,
300 };
301 
302 struct blk_queue_tag {
303 	struct request **tag_index;	/* map of busy tags */
304 	unsigned long *tag_map;		/* bit map of free/busy tags */
305 	struct list_head busy_list;	/* fifo list of busy tags */
306 	int busy;			/* current depth */
307 	int max_depth;			/* what we will send to device */
308 	int real_max_depth;		/* what the array can hold */
309 	atomic_t refcnt;		/* map can be shared */
310 };
311 
312 struct request_queue
313 {
314 	/*
315 	 * Together with queue_head for cacheline sharing
316 	 */
317 	struct list_head	queue_head;
318 	struct request		*last_merge;
319 	elevator_t		*elevator;
320 
321 	/*
322 	 * the queue request freelist, one for reads and one for writes
323 	 */
324 	struct request_list	rq;
325 
326 	request_fn_proc		*request_fn;
327 	merge_request_fn	*back_merge_fn;
328 	merge_request_fn	*front_merge_fn;
329 	merge_requests_fn	*merge_requests_fn;
330 	make_request_fn		*make_request_fn;
331 	prep_rq_fn		*prep_rq_fn;
332 	unplug_fn		*unplug_fn;
333 	merge_bvec_fn		*merge_bvec_fn;
334 	activity_fn		*activity_fn;
335 	issue_flush_fn		*issue_flush_fn;
336 	prepare_flush_fn	*prepare_flush_fn;
337 	softirq_done_fn		*softirq_done_fn;
338 
339 	/*
340 	 * Dispatch queue sorting
341 	 */
342 	sector_t		end_sector;
343 	struct request		*boundary_rq;
344 
345 	/*
346 	 * Auto-unplugging state
347 	 */
348 	struct timer_list	unplug_timer;
349 	int			unplug_thresh;	/* After this many requests */
350 	unsigned long		unplug_delay;	/* After this many jiffies */
351 	struct work_struct	unplug_work;
352 
353 	struct backing_dev_info	backing_dev_info;
354 
355 	/*
356 	 * The queue owner gets to use this for whatever they like.
357 	 * ll_rw_blk doesn't touch it.
358 	 */
359 	void			*queuedata;
360 
361 	void			*activity_data;
362 
363 	/*
364 	 * queue needs bounce pages for pages above this limit
365 	 */
366 	unsigned long		bounce_pfn;
367 	gfp_t			bounce_gfp;
368 
369 	/*
370 	 * various queue flags, see QUEUE_* below
371 	 */
372 	unsigned long		queue_flags;
373 
374 	/*
375 	 * protects queue structures from reentrancy. ->__queue_lock should
376 	 * _never_ be used directly, it is queue private. always use
377 	 * ->queue_lock.
378 	 */
379 	spinlock_t		__queue_lock;
380 	spinlock_t		*queue_lock;
381 
382 	/*
383 	 * queue kobject
384 	 */
385 	struct kobject kobj;
386 
387 	/*
388 	 * queue settings
389 	 */
390 	unsigned long		nr_requests;	/* Max # of requests */
391 	unsigned int		nr_congestion_on;
392 	unsigned int		nr_congestion_off;
393 	unsigned int		nr_batching;
394 
395 	unsigned int		max_sectors;
396 	unsigned int		max_hw_sectors;
397 	unsigned short		max_phys_segments;
398 	unsigned short		max_hw_segments;
399 	unsigned short		hardsect_size;
400 	unsigned int		max_segment_size;
401 
402 	unsigned long		seg_boundary_mask;
403 	unsigned int		dma_alignment;
404 
405 	struct blk_queue_tag	*queue_tags;
406 
407 	atomic_t		refcnt;
408 
409 	unsigned int		nr_sorted;
410 	unsigned int		in_flight;
411 
412 	/*
413 	 * sg stuff
414 	 */
415 	unsigned int		sg_timeout;
416 	unsigned int		sg_reserved_size;
417 	int			node;
418 
419 	/*
420 	 * reserved for flush operations
421 	 */
422 	unsigned int		ordered, next_ordered, ordseq;
423 	int			orderr, ordcolor;
424 	struct request		pre_flush_rq, bar_rq, post_flush_rq;
425 	struct request		*orig_bar_rq;
426 	unsigned int		bi_size;
427 };
428 
429 #define RQ_INACTIVE		(-1)
430 #define RQ_ACTIVE		1
431 #define RQ_SCSI_BUSY		0xffff
432 #define RQ_SCSI_DONE		0xfffe
433 #define RQ_SCSI_DISCONNECTING	0xffe0
434 
435 #define QUEUE_FLAG_CLUSTER	0	/* cluster several segments into 1 */
436 #define QUEUE_FLAG_QUEUED	1	/* uses generic tag queueing */
437 #define QUEUE_FLAG_STOPPED	2	/* queue is stopped */
438 #define	QUEUE_FLAG_READFULL	3	/* write queue has been filled */
439 #define QUEUE_FLAG_WRITEFULL	4	/* read queue has been filled */
440 #define QUEUE_FLAG_DEAD		5	/* queue being torn down */
441 #define QUEUE_FLAG_REENTER	6	/* Re-entrancy avoidance */
442 #define QUEUE_FLAG_PLUGGED	7	/* queue is plugged */
443 #define QUEUE_FLAG_ELVSWITCH	8	/* don't use elevator, just do FIFO */
444 
445 enum {
446 	/*
447 	 * Hardbarrier is supported with one of the following methods.
448 	 *
449 	 * NONE		: hardbarrier unsupported
450 	 * DRAIN	: ordering by draining is enough
451 	 * DRAIN_FLUSH	: ordering by draining w/ pre and post flushes
452 	 * DRAIN_FUA	: ordering by draining w/ pre flush and FUA write
453 	 * TAG		: ordering by tag is enough
454 	 * TAG_FLUSH	: ordering by tag w/ pre and post flushes
455 	 * TAG_FUA	: ordering by tag w/ pre flush and FUA write
456 	 */
457 	QUEUE_ORDERED_NONE	= 0x00,
458 	QUEUE_ORDERED_DRAIN	= 0x01,
459 	QUEUE_ORDERED_TAG	= 0x02,
460 
461 	QUEUE_ORDERED_PREFLUSH	= 0x10,
462 	QUEUE_ORDERED_POSTFLUSH	= 0x20,
463 	QUEUE_ORDERED_FUA	= 0x40,
464 
465 	QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN |
466 			QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH,
467 	QUEUE_ORDERED_DRAIN_FUA	= QUEUE_ORDERED_DRAIN |
468 			QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA,
469 	QUEUE_ORDERED_TAG_FLUSH	= QUEUE_ORDERED_TAG |
470 			QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH,
471 	QUEUE_ORDERED_TAG_FUA	= QUEUE_ORDERED_TAG |
472 			QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA,
473 
474 	/*
475 	 * Ordered operation sequence
476 	 */
477 	QUEUE_ORDSEQ_STARTED	= 0x01,	/* flushing in progress */
478 	QUEUE_ORDSEQ_DRAIN	= 0x02,	/* waiting for the queue to be drained */
479 	QUEUE_ORDSEQ_PREFLUSH	= 0x04,	/* pre-flushing in progress */
480 	QUEUE_ORDSEQ_BAR	= 0x08,	/* original barrier req in progress */
481 	QUEUE_ORDSEQ_POSTFLUSH	= 0x10,	/* post-flushing in progress */
482 	QUEUE_ORDSEQ_DONE	= 0x20,
483 };
484 
485 #define blk_queue_plugged(q)	test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
486 #define blk_queue_tagged(q)	test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
487 #define blk_queue_stopped(q)	test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
488 #define blk_queue_flushing(q)	((q)->ordseq)
489 
490 #define blk_fs_request(rq)	((rq)->flags & REQ_CMD)
491 #define blk_pc_request(rq)	((rq)->flags & REQ_BLOCK_PC)
492 #define blk_noretry_request(rq)	((rq)->flags & REQ_FAILFAST)
493 #define blk_rq_started(rq)	((rq)->flags & REQ_STARTED)
494 
495 #define blk_account_rq(rq)	(blk_rq_started(rq) && blk_fs_request(rq))
496 
497 #define blk_pm_suspend_request(rq)	((rq)->flags & REQ_PM_SUSPEND)
498 #define blk_pm_resume_request(rq)	((rq)->flags & REQ_PM_RESUME)
499 #define blk_pm_request(rq)	\
500 	((rq)->flags & (REQ_PM_SUSPEND | REQ_PM_RESUME))
501 
502 #define blk_sorted_rq(rq)	((rq)->flags & REQ_SORTED)
503 #define blk_barrier_rq(rq)	((rq)->flags & REQ_HARDBARRIER)
504 #define blk_fua_rq(rq)		((rq)->flags & REQ_FUA)
505 
506 #define list_entry_rq(ptr)	list_entry((ptr), struct request, queuelist)
507 
508 #define rq_data_dir(rq)		((rq)->flags & 1)
509 
510 static inline int blk_queue_full(struct request_queue *q, int rw)
511 {
512 	if (rw == READ)
513 		return test_bit(QUEUE_FLAG_READFULL, &q->queue_flags);
514 	return test_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags);
515 }
516 
517 static inline void blk_set_queue_full(struct request_queue *q, int rw)
518 {
519 	if (rw == READ)
520 		set_bit(QUEUE_FLAG_READFULL, &q->queue_flags);
521 	else
522 		set_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags);
523 }
524 
525 static inline void blk_clear_queue_full(struct request_queue *q, int rw)
526 {
527 	if (rw == READ)
528 		clear_bit(QUEUE_FLAG_READFULL, &q->queue_flags);
529 	else
530 		clear_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags);
531 }
532 
533 
534 /*
535  * mergeable request must not have _NOMERGE or _BARRIER bit set, nor may
536  * it already be started by driver.
537  */
538 #define RQ_NOMERGE_FLAGS	\
539 	(REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER)
540 #define rq_mergeable(rq)	\
541 	(!((rq)->flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq)))
542 
543 /*
544  * noop, requests are automagically marked as active/inactive by I/O
545  * scheduler -- see elv_next_request
546  */
547 #define blk_queue_headactive(q, head_active)
548 
549 /*
550  * q->prep_rq_fn return values
551  */
552 #define BLKPREP_OK		0	/* serve it */
553 #define BLKPREP_KILL		1	/* fatal error, kill */
554 #define BLKPREP_DEFER		2	/* leave on queue */
555 
556 extern unsigned long blk_max_low_pfn, blk_max_pfn;
557 
558 /*
559  * standard bounce addresses:
560  *
561  * BLK_BOUNCE_HIGH	: bounce all highmem pages
562  * BLK_BOUNCE_ANY	: don't bounce anything
563  * BLK_BOUNCE_ISA	: bounce pages above ISA DMA boundary
564  */
565 #define BLK_BOUNCE_HIGH		((u64)blk_max_low_pfn << PAGE_SHIFT)
566 #define BLK_BOUNCE_ANY		((u64)blk_max_pfn << PAGE_SHIFT)
567 #define BLK_BOUNCE_ISA		(ISA_DMA_THRESHOLD)
568 
569 #ifdef CONFIG_MMU
570 extern int init_emergency_isa_pool(void);
571 extern void blk_queue_bounce(request_queue_t *q, struct bio **bio);
572 #else
573 static inline int init_emergency_isa_pool(void)
574 {
575 	return 0;
576 }
577 static inline void blk_queue_bounce(request_queue_t *q, struct bio **bio)
578 {
579 }
580 #endif /* CONFIG_MMU */
581 
582 #define rq_for_each_bio(_bio, rq)	\
583 	if ((rq->bio))			\
584 		for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
585 
586 struct sec_size {
587 	unsigned block_size;
588 	unsigned block_size_bits;
589 };
590 
591 extern int blk_register_queue(struct gendisk *disk);
592 extern void blk_unregister_queue(struct gendisk *disk);
593 extern void register_disk(struct gendisk *dev);
594 extern void generic_make_request(struct bio *bio);
595 extern void blk_put_request(struct request *);
596 extern void __blk_put_request(request_queue_t *, struct request *);
597 extern void blk_end_sync_rq(struct request *rq, int error);
598 extern struct request *blk_get_request(request_queue_t *, int, gfp_t);
599 extern void blk_insert_request(request_queue_t *, struct request *, int, void *);
600 extern void blk_requeue_request(request_queue_t *, struct request *);
601 extern void blk_plug_device(request_queue_t *);
602 extern int blk_remove_plug(request_queue_t *);
603 extern void blk_recount_segments(request_queue_t *, struct bio *);
604 extern int scsi_cmd_ioctl(struct file *, struct gendisk *, unsigned int, void __user *);
605 extern void blk_start_queue(request_queue_t *q);
606 extern void blk_stop_queue(request_queue_t *q);
607 extern void blk_sync_queue(struct request_queue *q);
608 extern void __blk_stop_queue(request_queue_t *q);
609 extern void blk_run_queue(request_queue_t *);
610 extern void blk_queue_activity_fn(request_queue_t *, activity_fn *, void *);
611 extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned int);
612 extern int blk_rq_unmap_user(struct bio *, unsigned int);
613 extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, gfp_t);
614 extern int blk_rq_map_user_iov(request_queue_t *, struct request *, struct sg_iovec *, int);
615 extern int blk_execute_rq(request_queue_t *, struct gendisk *,
616 			  struct request *, int);
617 extern void blk_execute_rq_nowait(request_queue_t *, struct gendisk *,
618 				  struct request *, int, rq_end_io_fn *);
619 
620 static inline request_queue_t *bdev_get_queue(struct block_device *bdev)
621 {
622 	return bdev->bd_disk->queue;
623 }
624 
625 static inline void blk_run_backing_dev(struct backing_dev_info *bdi,
626 				       struct page *page)
627 {
628 	if (bdi && bdi->unplug_io_fn)
629 		bdi->unplug_io_fn(bdi, page);
630 }
631 
632 static inline void blk_run_address_space(struct address_space *mapping)
633 {
634 	if (mapping)
635 		blk_run_backing_dev(mapping->backing_dev_info, NULL);
636 }
637 
638 /*
639  * end_request() and friends. Must be called with the request queue spinlock
640  * acquired. All functions called within end_request() _must_be_ atomic.
641  *
642  * Several drivers define their own end_request and call
643  * end_that_request_first() and end_that_request_last()
644  * for parts of the original function. This prevents
645  * code duplication in drivers.
646  */
647 extern int end_that_request_first(struct request *, int, int);
648 extern int end_that_request_chunk(struct request *, int, int);
649 extern void end_that_request_last(struct request *, int);
650 extern void end_request(struct request *req, int uptodate);
651 extern void blk_complete_request(struct request *);
652 
653 static inline int rq_all_done(struct request *rq, unsigned int nr_bytes)
654 {
655 	if (blk_fs_request(rq))
656 		return (nr_bytes >= (rq->hard_nr_sectors << 9));
657 	else if (blk_pc_request(rq))
658 		return nr_bytes >= rq->data_len;
659 
660 	return 0;
661 }
662 
663 /*
664  * end_that_request_first/chunk() takes an uptodate argument. we account
665  * any value <= as an io error. 0 means -EIO for compatability reasons,
666  * any other < 0 value is the direct error type. An uptodate value of
667  * 1 indicates successful io completion
668  */
669 #define end_io_error(uptodate)	(unlikely((uptodate) <= 0))
670 
671 static inline void blkdev_dequeue_request(struct request *req)
672 {
673 	elv_dequeue_request(req->q, req);
674 }
675 
676 /*
677  * This should be in elevator.h, but that requires pulling in rq and q
678  */
679 static inline void elv_dispatch_add_tail(struct request_queue *q,
680 					 struct request *rq)
681 {
682 	if (q->last_merge == rq)
683 		q->last_merge = NULL;
684 	q->nr_sorted--;
685 
686 	q->end_sector = rq_end_sector(rq);
687 	q->boundary_rq = rq;
688 	list_add_tail(&rq->queuelist, &q->queue_head);
689 }
690 
691 /*
692  * Access functions for manipulating queue properties
693  */
694 extern request_queue_t *blk_init_queue_node(request_fn_proc *rfn,
695 					spinlock_t *lock, int node_id);
696 extern request_queue_t *blk_init_queue(request_fn_proc *, spinlock_t *);
697 extern void blk_cleanup_queue(request_queue_t *);
698 extern void blk_queue_make_request(request_queue_t *, make_request_fn *);
699 extern void blk_queue_bounce_limit(request_queue_t *, u64);
700 extern void blk_queue_max_sectors(request_queue_t *, unsigned int);
701 extern void blk_queue_max_phys_segments(request_queue_t *, unsigned short);
702 extern void blk_queue_max_hw_segments(request_queue_t *, unsigned short);
703 extern void blk_queue_max_segment_size(request_queue_t *, unsigned int);
704 extern void blk_queue_hardsect_size(request_queue_t *, unsigned short);
705 extern void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b);
706 extern void blk_queue_segment_boundary(request_queue_t *, unsigned long);
707 extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn);
708 extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *);
709 extern void blk_queue_dma_alignment(request_queue_t *, int);
710 extern void blk_queue_softirq_done(request_queue_t *, softirq_done_fn *);
711 extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
712 extern int blk_queue_ordered(request_queue_t *, unsigned, prepare_flush_fn *);
713 extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *);
714 extern int blk_do_ordered(request_queue_t *, struct request **);
715 extern unsigned blk_ordered_cur_seq(request_queue_t *);
716 extern unsigned blk_ordered_req_seq(struct request *);
717 extern void blk_ordered_complete_seq(request_queue_t *, unsigned, int);
718 
719 extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
720 extern void blk_dump_rq_flags(struct request *, char *);
721 extern void generic_unplug_device(request_queue_t *);
722 extern void __generic_unplug_device(request_queue_t *);
723 extern long nr_blockdev_pages(void);
724 
725 int blk_get_queue(request_queue_t *);
726 request_queue_t *blk_alloc_queue(gfp_t);
727 request_queue_t *blk_alloc_queue_node(gfp_t, int);
728 #define blk_put_queue(q) blk_cleanup_queue((q))
729 
730 /*
731  * tag stuff
732  */
733 #define blk_queue_tag_depth(q)		((q)->queue_tags->busy)
734 #define blk_queue_tag_queue(q)		((q)->queue_tags->busy < (q)->queue_tags->max_depth)
735 #define blk_rq_tagged(rq)		((rq)->flags & REQ_QUEUED)
736 extern int blk_queue_start_tag(request_queue_t *, struct request *);
737 extern struct request *blk_queue_find_tag(request_queue_t *, int);
738 extern void blk_queue_end_tag(request_queue_t *, struct request *);
739 extern int blk_queue_init_tags(request_queue_t *, int, struct blk_queue_tag *);
740 extern void blk_queue_free_tags(request_queue_t *);
741 extern int blk_queue_resize_tags(request_queue_t *, int);
742 extern void blk_queue_invalidate_tags(request_queue_t *);
743 extern long blk_congestion_wait(int rw, long timeout);
744 
745 extern void blk_rq_bio_prep(request_queue_t *, struct request *, struct bio *);
746 extern int blkdev_issue_flush(struct block_device *, sector_t *);
747 
748 #define MAX_PHYS_SEGMENTS 128
749 #define MAX_HW_SEGMENTS 128
750 #define SAFE_MAX_SECTORS 255
751 #define BLK_DEF_MAX_SECTORS 1024
752 
753 #define MAX_SEGMENT_SIZE	65536
754 
755 #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
756 
757 static inline int queue_hardsect_size(request_queue_t *q)
758 {
759 	int retval = 512;
760 
761 	if (q && q->hardsect_size)
762 		retval = q->hardsect_size;
763 
764 	return retval;
765 }
766 
767 static inline int bdev_hardsect_size(struct block_device *bdev)
768 {
769 	return queue_hardsect_size(bdev_get_queue(bdev));
770 }
771 
772 static inline int queue_dma_alignment(request_queue_t *q)
773 {
774 	int retval = 511;
775 
776 	if (q && q->dma_alignment)
777 		retval = q->dma_alignment;
778 
779 	return retval;
780 }
781 
782 static inline int bdev_dma_aligment(struct block_device *bdev)
783 {
784 	return queue_dma_alignment(bdev_get_queue(bdev));
785 }
786 
787 #define blk_finished_io(nsects)	do { } while (0)
788 #define blk_started_io(nsects)	do { } while (0)
789 
790 /* assumes size > 256 */
791 static inline unsigned int blksize_bits(unsigned int size)
792 {
793 	unsigned int bits = 8;
794 	do {
795 		bits++;
796 		size >>= 1;
797 	} while (size > 256);
798 	return bits;
799 }
800 
801 static inline unsigned int block_size(struct block_device *bdev)
802 {
803 	return bdev->bd_block_size;
804 }
805 
806 typedef struct {struct page *v;} Sector;
807 
808 unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
809 
810 static inline void put_dev_sector(Sector p)
811 {
812 	page_cache_release(p.v);
813 }
814 
815 struct work_struct;
816 int kblockd_schedule_work(struct work_struct *work);
817 void kblockd_flush(void);
818 
819 #ifdef CONFIG_LBD
820 # include <asm/div64.h>
821 # define sector_div(a, b) do_div(a, b)
822 #else
823 # define sector_div(n, b)( \
824 { \
825 	int _res; \
826 	_res = (n) % (b); \
827 	(n) /= (b); \
828 	_res; \
829 } \
830 )
831 #endif
832 
833 #define MODULE_ALIAS_BLOCKDEV(major,minor) \
834 	MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
835 #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
836 	MODULE_ALIAS("block-major-" __stringify(major) "-*")
837 
838 
839 #endif
840