xref: /linux-6.15/include/linux/blkdev.h (revision 97f2645f)
1 #ifndef _LINUX_BLKDEV_H
2 #define _LINUX_BLKDEV_H
3 
4 #include <linux/sched.h>
5 
6 #ifdef CONFIG_BLOCK
7 
8 #include <linux/major.h>
9 #include <linux/genhd.h>
10 #include <linux/list.h>
11 #include <linux/llist.h>
12 #include <linux/timer.h>
13 #include <linux/workqueue.h>
14 #include <linux/pagemap.h>
15 #include <linux/backing-dev-defs.h>
16 #include <linux/wait.h>
17 #include <linux/mempool.h>
18 #include <linux/pfn.h>
19 #include <linux/bio.h>
20 #include <linux/stringify.h>
21 #include <linux/gfp.h>
22 #include <linux/bsg.h>
23 #include <linux/smp.h>
24 #include <linux/rcupdate.h>
25 #include <linux/percpu-refcount.h>
26 #include <linux/scatterlist.h>
27 
28 struct module;
29 struct scsi_ioctl_command;
30 
31 struct request_queue;
32 struct elevator_queue;
33 struct blk_trace;
34 struct request;
35 struct sg_io_hdr;
36 struct bsg_job;
37 struct blkcg_gq;
38 struct blk_flush_queue;
39 struct pr_ops;
40 
41 #define BLKDEV_MIN_RQ	4
42 #define BLKDEV_MAX_RQ	128	/* Default maximum */
43 
44 /*
45  * Maximum number of blkcg policies allowed to be registered concurrently.
46  * Defined here to simplify include dependency.
47  */
48 #define BLKCG_MAX_POLS		2
49 
50 struct request;
51 typedef void (rq_end_io_fn)(struct request *, int);
52 
53 #define BLK_RL_SYNCFULL		(1U << 0)
54 #define BLK_RL_ASYNCFULL	(1U << 1)
55 
56 struct request_list {
57 	struct request_queue	*q;	/* the queue this rl belongs to */
58 #ifdef CONFIG_BLK_CGROUP
59 	struct blkcg_gq		*blkg;	/* blkg this request pool belongs to */
60 #endif
61 	/*
62 	 * count[], starved[], and wait[] are indexed by
63 	 * BLK_RW_SYNC/BLK_RW_ASYNC
64 	 */
65 	int			count[2];
66 	int			starved[2];
67 	mempool_t		*rq_pool;
68 	wait_queue_head_t	wait[2];
69 	unsigned int		flags;
70 };
71 
72 /*
73  * request command types
74  */
75 enum rq_cmd_type_bits {
76 	REQ_TYPE_FS		= 1,	/* fs request */
77 	REQ_TYPE_BLOCK_PC,		/* scsi command */
78 	REQ_TYPE_DRV_PRIV,		/* driver defined types from here */
79 };
80 
81 #define BLK_MAX_CDB	16
82 
83 /*
84  * Try to put the fields that are referenced together in the same cacheline.
85  *
86  * If you modify this structure, make sure to update blk_rq_init() and
87  * especially blk_mq_rq_ctx_init() to take care of the added fields.
88  */
89 struct request {
90 	struct list_head queuelist;
91 	union {
92 		struct call_single_data csd;
93 		u64 fifo_time;
94 	};
95 
96 	struct request_queue *q;
97 	struct blk_mq_ctx *mq_ctx;
98 
99 	int cpu;
100 	unsigned cmd_type;
101 	u64 cmd_flags;
102 	unsigned long atomic_flags;
103 
104 	/* the following two fields are internal, NEVER access directly */
105 	unsigned int __data_len;	/* total data len */
106 	sector_t __sector;		/* sector cursor */
107 
108 	struct bio *bio;
109 	struct bio *biotail;
110 
111 	/*
112 	 * The hash is used inside the scheduler, and killed once the
113 	 * request reaches the dispatch list. The ipi_list is only used
114 	 * to queue the request for softirq completion, which is long
115 	 * after the request has been unhashed (and even removed from
116 	 * the dispatch list).
117 	 */
118 	union {
119 		struct hlist_node hash;	/* merge hash */
120 		struct list_head ipi_list;
121 	};
122 
123 	/*
124 	 * The rb_node is only used inside the io scheduler, requests
125 	 * are pruned when moved to the dispatch queue. So let the
126 	 * completion_data share space with the rb_node.
127 	 */
128 	union {
129 		struct rb_node rb_node;	/* sort/lookup */
130 		void *completion_data;
131 	};
132 
133 	/*
134 	 * Three pointers are available for the IO schedulers, if they need
135 	 * more they have to dynamically allocate it.  Flush requests are
136 	 * never put on the IO scheduler. So let the flush fields share
137 	 * space with the elevator data.
138 	 */
139 	union {
140 		struct {
141 			struct io_cq		*icq;
142 			void			*priv[2];
143 		} elv;
144 
145 		struct {
146 			unsigned int		seq;
147 			struct list_head	list;
148 			rq_end_io_fn		*saved_end_io;
149 		} flush;
150 	};
151 
152 	struct gendisk *rq_disk;
153 	struct hd_struct *part;
154 	unsigned long start_time;
155 #ifdef CONFIG_BLK_CGROUP
156 	struct request_list *rl;		/* rl this rq is alloced from */
157 	unsigned long long start_time_ns;
158 	unsigned long long io_start_time_ns;    /* when passed to hardware */
159 #endif
160 	/* Number of scatter-gather DMA addr+len pairs after
161 	 * physical address coalescing is performed.
162 	 */
163 	unsigned short nr_phys_segments;
164 #if defined(CONFIG_BLK_DEV_INTEGRITY)
165 	unsigned short nr_integrity_segments;
166 #endif
167 
168 	unsigned short ioprio;
169 
170 	void *special;		/* opaque pointer available for LLD use */
171 
172 	int tag;
173 	int errors;
174 
175 	/*
176 	 * when request is used as a packet command carrier
177 	 */
178 	unsigned char __cmd[BLK_MAX_CDB];
179 	unsigned char *cmd;
180 	unsigned short cmd_len;
181 
182 	unsigned int extra_len;	/* length of alignment and padding */
183 	unsigned int sense_len;
184 	unsigned int resid_len;	/* residual count */
185 	void *sense;
186 
187 	unsigned long deadline;
188 	struct list_head timeout_list;
189 	unsigned int timeout;
190 	int retries;
191 
192 	/*
193 	 * completion callback.
194 	 */
195 	rq_end_io_fn *end_io;
196 	void *end_io_data;
197 
198 	/* for bidi */
199 	struct request *next_rq;
200 };
201 
202 #define REQ_OP_SHIFT (8 * sizeof(u64) - REQ_OP_BITS)
203 #define req_op(req)  ((req)->cmd_flags >> REQ_OP_SHIFT)
204 
205 #define req_set_op(req, op) do {				\
206 	WARN_ON(op >= (1 << REQ_OP_BITS));			\
207 	(req)->cmd_flags &= ((1ULL << REQ_OP_SHIFT) - 1);	\
208 	(req)->cmd_flags |= ((u64) (op) << REQ_OP_SHIFT);	\
209 } while (0)
210 
211 #define req_set_op_attrs(req, op, flags) do {	\
212 	req_set_op(req, op);			\
213 	(req)->cmd_flags |= flags;		\
214 } while (0)
215 
216 static inline unsigned short req_get_ioprio(struct request *req)
217 {
218 	return req->ioprio;
219 }
220 
221 #include <linux/elevator.h>
222 
223 struct blk_queue_ctx;
224 
225 typedef void (request_fn_proc) (struct request_queue *q);
226 typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
227 typedef int (prep_rq_fn) (struct request_queue *, struct request *);
228 typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
229 
230 struct bio_vec;
231 typedef void (softirq_done_fn)(struct request *);
232 typedef int (dma_drain_needed_fn)(struct request *);
233 typedef int (lld_busy_fn) (struct request_queue *q);
234 typedef int (bsg_job_fn) (struct bsg_job *);
235 
236 enum blk_eh_timer_return {
237 	BLK_EH_NOT_HANDLED,
238 	BLK_EH_HANDLED,
239 	BLK_EH_RESET_TIMER,
240 };
241 
242 typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *);
243 
244 enum blk_queue_state {
245 	Queue_down,
246 	Queue_up,
247 };
248 
249 struct blk_queue_tag {
250 	struct request **tag_index;	/* map of busy tags */
251 	unsigned long *tag_map;		/* bit map of free/busy tags */
252 	int busy;			/* current depth */
253 	int max_depth;			/* what we will send to device */
254 	int real_max_depth;		/* what the array can hold */
255 	atomic_t refcnt;		/* map can be shared */
256 	int alloc_policy;		/* tag allocation policy */
257 	int next_tag;			/* next tag */
258 };
259 #define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
260 #define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
261 
262 #define BLK_SCSI_MAX_CMDS	(256)
263 #define BLK_SCSI_CMD_PER_LONG	(BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
264 
265 struct queue_limits {
266 	unsigned long		bounce_pfn;
267 	unsigned long		seg_boundary_mask;
268 	unsigned long		virt_boundary_mask;
269 
270 	unsigned int		max_hw_sectors;
271 	unsigned int		max_dev_sectors;
272 	unsigned int		chunk_sectors;
273 	unsigned int		max_sectors;
274 	unsigned int		max_segment_size;
275 	unsigned int		physical_block_size;
276 	unsigned int		alignment_offset;
277 	unsigned int		io_min;
278 	unsigned int		io_opt;
279 	unsigned int		max_discard_sectors;
280 	unsigned int		max_hw_discard_sectors;
281 	unsigned int		max_write_same_sectors;
282 	unsigned int		discard_granularity;
283 	unsigned int		discard_alignment;
284 
285 	unsigned short		logical_block_size;
286 	unsigned short		max_segments;
287 	unsigned short		max_integrity_segments;
288 
289 	unsigned char		misaligned;
290 	unsigned char		discard_misaligned;
291 	unsigned char		cluster;
292 	unsigned char		discard_zeroes_data;
293 	unsigned char		raid_partial_stripes_expensive;
294 };
295 
296 struct request_queue {
297 	/*
298 	 * Together with queue_head for cacheline sharing
299 	 */
300 	struct list_head	queue_head;
301 	struct request		*last_merge;
302 	struct elevator_queue	*elevator;
303 	int			nr_rqs[2];	/* # allocated [a]sync rqs */
304 	int			nr_rqs_elvpriv;	/* # allocated rqs w/ elvpriv */
305 
306 	/*
307 	 * If blkcg is not used, @q->root_rl serves all requests.  If blkcg
308 	 * is used, root blkg allocates from @q->root_rl and all other
309 	 * blkgs from their own blkg->rl.  Which one to use should be
310 	 * determined using bio_request_list().
311 	 */
312 	struct request_list	root_rl;
313 
314 	request_fn_proc		*request_fn;
315 	make_request_fn		*make_request_fn;
316 	prep_rq_fn		*prep_rq_fn;
317 	unprep_rq_fn		*unprep_rq_fn;
318 	softirq_done_fn		*softirq_done_fn;
319 	rq_timed_out_fn		*rq_timed_out_fn;
320 	dma_drain_needed_fn	*dma_drain_needed;
321 	lld_busy_fn		*lld_busy_fn;
322 
323 	struct blk_mq_ops	*mq_ops;
324 
325 	unsigned int		*mq_map;
326 
327 	/* sw queues */
328 	struct blk_mq_ctx __percpu	*queue_ctx;
329 	unsigned int		nr_queues;
330 
331 	/* hw dispatch queues */
332 	struct blk_mq_hw_ctx	**queue_hw_ctx;
333 	unsigned int		nr_hw_queues;
334 
335 	/*
336 	 * Dispatch queue sorting
337 	 */
338 	sector_t		end_sector;
339 	struct request		*boundary_rq;
340 
341 	/*
342 	 * Delayed queue handling
343 	 */
344 	struct delayed_work	delay_work;
345 
346 	struct backing_dev_info	backing_dev_info;
347 
348 	/*
349 	 * The queue owner gets to use this for whatever they like.
350 	 * ll_rw_blk doesn't touch it.
351 	 */
352 	void			*queuedata;
353 
354 	/*
355 	 * various queue flags, see QUEUE_* below
356 	 */
357 	unsigned long		queue_flags;
358 
359 	/*
360 	 * ida allocated id for this queue.  Used to index queues from
361 	 * ioctx.
362 	 */
363 	int			id;
364 
365 	/*
366 	 * queue needs bounce pages for pages above this limit
367 	 */
368 	gfp_t			bounce_gfp;
369 
370 	/*
371 	 * protects queue structures from reentrancy. ->__queue_lock should
372 	 * _never_ be used directly, it is queue private. always use
373 	 * ->queue_lock.
374 	 */
375 	spinlock_t		__queue_lock;
376 	spinlock_t		*queue_lock;
377 
378 	/*
379 	 * queue kobject
380 	 */
381 	struct kobject kobj;
382 
383 	/*
384 	 * mq queue kobject
385 	 */
386 	struct kobject mq_kobj;
387 
388 #ifdef  CONFIG_BLK_DEV_INTEGRITY
389 	struct blk_integrity integrity;
390 #endif	/* CONFIG_BLK_DEV_INTEGRITY */
391 
392 #ifdef CONFIG_PM
393 	struct device		*dev;
394 	int			rpm_status;
395 	unsigned int		nr_pending;
396 #endif
397 
398 	/*
399 	 * queue settings
400 	 */
401 	unsigned long		nr_requests;	/* Max # of requests */
402 	unsigned int		nr_congestion_on;
403 	unsigned int		nr_congestion_off;
404 	unsigned int		nr_batching;
405 
406 	unsigned int		dma_drain_size;
407 	void			*dma_drain_buffer;
408 	unsigned int		dma_pad_mask;
409 	unsigned int		dma_alignment;
410 
411 	struct blk_queue_tag	*queue_tags;
412 	struct list_head	tag_busy_list;
413 
414 	unsigned int		nr_sorted;
415 	unsigned int		in_flight[2];
416 	/*
417 	 * Number of active block driver functions for which blk_drain_queue()
418 	 * must wait. Must be incremented around functions that unlock the
419 	 * queue_lock internally, e.g. scsi_request_fn().
420 	 */
421 	unsigned int		request_fn_active;
422 
423 	unsigned int		rq_timeout;
424 	struct timer_list	timeout;
425 	struct work_struct	timeout_work;
426 	struct list_head	timeout_list;
427 
428 	struct list_head	icq_list;
429 #ifdef CONFIG_BLK_CGROUP
430 	DECLARE_BITMAP		(blkcg_pols, BLKCG_MAX_POLS);
431 	struct blkcg_gq		*root_blkg;
432 	struct list_head	blkg_list;
433 #endif
434 
435 	struct queue_limits	limits;
436 
437 	/*
438 	 * sg stuff
439 	 */
440 	unsigned int		sg_timeout;
441 	unsigned int		sg_reserved_size;
442 	int			node;
443 #ifdef CONFIG_BLK_DEV_IO_TRACE
444 	struct blk_trace	*blk_trace;
445 #endif
446 	/*
447 	 * for flush operations
448 	 */
449 	struct blk_flush_queue	*fq;
450 
451 	struct list_head	requeue_list;
452 	spinlock_t		requeue_lock;
453 	struct work_struct	requeue_work;
454 
455 	struct mutex		sysfs_lock;
456 
457 	int			bypass_depth;
458 	atomic_t		mq_freeze_depth;
459 
460 #if defined(CONFIG_BLK_DEV_BSG)
461 	bsg_job_fn		*bsg_job_fn;
462 	int			bsg_job_size;
463 	struct bsg_class_device bsg_dev;
464 #endif
465 
466 #ifdef CONFIG_BLK_DEV_THROTTLING
467 	/* Throttle data */
468 	struct throtl_data *td;
469 #endif
470 	struct rcu_head		rcu_head;
471 	wait_queue_head_t	mq_freeze_wq;
472 	struct percpu_ref	q_usage_counter;
473 	struct list_head	all_q_node;
474 
475 	struct blk_mq_tag_set	*tag_set;
476 	struct list_head	tag_set_list;
477 	struct bio_set		*bio_split;
478 
479 	bool			mq_sysfs_init_done;
480 };
481 
482 #define QUEUE_FLAG_QUEUED	1	/* uses generic tag queueing */
483 #define QUEUE_FLAG_STOPPED	2	/* queue is stopped */
484 #define	QUEUE_FLAG_SYNCFULL	3	/* read queue has been filled */
485 #define QUEUE_FLAG_ASYNCFULL	4	/* write queue has been filled */
486 #define QUEUE_FLAG_DYING	5	/* queue being torn down */
487 #define QUEUE_FLAG_BYPASS	6	/* act as dumb FIFO queue */
488 #define QUEUE_FLAG_BIDI		7	/* queue supports bidi requests */
489 #define QUEUE_FLAG_NOMERGES     8	/* disable merge attempts */
490 #define QUEUE_FLAG_SAME_COMP	9	/* complete on same CPU-group */
491 #define QUEUE_FLAG_FAIL_IO     10	/* fake timeout */
492 #define QUEUE_FLAG_STACKABLE   11	/* supports request stacking */
493 #define QUEUE_FLAG_NONROT      12	/* non-rotational device (SSD) */
494 #define QUEUE_FLAG_VIRT        QUEUE_FLAG_NONROT /* paravirt device */
495 #define QUEUE_FLAG_IO_STAT     13	/* do IO stats */
496 #define QUEUE_FLAG_DISCARD     14	/* supports DISCARD */
497 #define QUEUE_FLAG_NOXMERGES   15	/* No extended merges */
498 #define QUEUE_FLAG_ADD_RANDOM  16	/* Contributes to random pool */
499 #define QUEUE_FLAG_SECERASE    17	/* supports secure erase */
500 #define QUEUE_FLAG_SAME_FORCE  18	/* force complete on same CPU */
501 #define QUEUE_FLAG_DEAD        19	/* queue tear-down finished */
502 #define QUEUE_FLAG_INIT_DONE   20	/* queue is initialized */
503 #define QUEUE_FLAG_NO_SG_MERGE 21	/* don't attempt to merge SG segments*/
504 #define QUEUE_FLAG_POLL	       22	/* IO polling enabled if set */
505 #define QUEUE_FLAG_WC	       23	/* Write back caching */
506 #define QUEUE_FLAG_FUA	       24	/* device supports FUA writes */
507 #define QUEUE_FLAG_FLUSH_NQ    25	/* flush not queueuable */
508 #define QUEUE_FLAG_DAX         26	/* device supports DAX */
509 
510 #define QUEUE_FLAG_DEFAULT	((1 << QUEUE_FLAG_IO_STAT) |		\
511 				 (1 << QUEUE_FLAG_STACKABLE)	|	\
512 				 (1 << QUEUE_FLAG_SAME_COMP)	|	\
513 				 (1 << QUEUE_FLAG_ADD_RANDOM))
514 
515 #define QUEUE_FLAG_MQ_DEFAULT	((1 << QUEUE_FLAG_IO_STAT) |		\
516 				 (1 << QUEUE_FLAG_STACKABLE)	|	\
517 				 (1 << QUEUE_FLAG_SAME_COMP)	|	\
518 				 (1 << QUEUE_FLAG_POLL))
519 
520 static inline void queue_lockdep_assert_held(struct request_queue *q)
521 {
522 	if (q->queue_lock)
523 		lockdep_assert_held(q->queue_lock);
524 }
525 
526 static inline void queue_flag_set_unlocked(unsigned int flag,
527 					   struct request_queue *q)
528 {
529 	__set_bit(flag, &q->queue_flags);
530 }
531 
532 static inline int queue_flag_test_and_clear(unsigned int flag,
533 					    struct request_queue *q)
534 {
535 	queue_lockdep_assert_held(q);
536 
537 	if (test_bit(flag, &q->queue_flags)) {
538 		__clear_bit(flag, &q->queue_flags);
539 		return 1;
540 	}
541 
542 	return 0;
543 }
544 
545 static inline int queue_flag_test_and_set(unsigned int flag,
546 					  struct request_queue *q)
547 {
548 	queue_lockdep_assert_held(q);
549 
550 	if (!test_bit(flag, &q->queue_flags)) {
551 		__set_bit(flag, &q->queue_flags);
552 		return 0;
553 	}
554 
555 	return 1;
556 }
557 
558 static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
559 {
560 	queue_lockdep_assert_held(q);
561 	__set_bit(flag, &q->queue_flags);
562 }
563 
564 static inline void queue_flag_clear_unlocked(unsigned int flag,
565 					     struct request_queue *q)
566 {
567 	__clear_bit(flag, &q->queue_flags);
568 }
569 
570 static inline int queue_in_flight(struct request_queue *q)
571 {
572 	return q->in_flight[0] + q->in_flight[1];
573 }
574 
575 static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
576 {
577 	queue_lockdep_assert_held(q);
578 	__clear_bit(flag, &q->queue_flags);
579 }
580 
581 #define blk_queue_tagged(q)	test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
582 #define blk_queue_stopped(q)	test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
583 #define blk_queue_dying(q)	test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
584 #define blk_queue_dead(q)	test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
585 #define blk_queue_bypass(q)	test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
586 #define blk_queue_init_done(q)	test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
587 #define blk_queue_nomerges(q)	test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
588 #define blk_queue_noxmerges(q)	\
589 	test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
590 #define blk_queue_nonrot(q)	test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
591 #define blk_queue_io_stat(q)	test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
592 #define blk_queue_add_random(q)	test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
593 #define blk_queue_stackable(q)	\
594 	test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
595 #define blk_queue_discard(q)	test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
596 #define blk_queue_secure_erase(q) \
597 	(test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
598 #define blk_queue_dax(q)	test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
599 
600 #define blk_noretry_request(rq) \
601 	((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
602 			     REQ_FAILFAST_DRIVER))
603 
604 #define blk_account_rq(rq) \
605 	(((rq)->cmd_flags & REQ_STARTED) && \
606 	 ((rq)->cmd_type == REQ_TYPE_FS))
607 
608 #define blk_rq_cpu_valid(rq)	((rq)->cpu != -1)
609 #define blk_bidi_rq(rq)		((rq)->next_rq != NULL)
610 /* rq->queuelist of dequeued request must be list_empty() */
611 #define blk_queued_rq(rq)	(!list_empty(&(rq)->queuelist))
612 
613 #define list_entry_rq(ptr)	list_entry((ptr), struct request, queuelist)
614 
615 #define rq_data_dir(rq)		(op_is_write(req_op(rq)) ? WRITE : READ)
616 
617 /*
618  * Driver can handle struct request, if it either has an old style
619  * request_fn defined, or is blk-mq based.
620  */
621 static inline bool queue_is_rq_based(struct request_queue *q)
622 {
623 	return q->request_fn || q->mq_ops;
624 }
625 
626 static inline unsigned int blk_queue_cluster(struct request_queue *q)
627 {
628 	return q->limits.cluster;
629 }
630 
631 /*
632  * We regard a request as sync, if either a read or a sync write
633  */
634 static inline bool rw_is_sync(int op, unsigned int rw_flags)
635 {
636 	return op == REQ_OP_READ || (rw_flags & REQ_SYNC);
637 }
638 
639 static inline bool rq_is_sync(struct request *rq)
640 {
641 	return rw_is_sync(req_op(rq), rq->cmd_flags);
642 }
643 
644 static inline bool blk_rl_full(struct request_list *rl, bool sync)
645 {
646 	unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
647 
648 	return rl->flags & flag;
649 }
650 
651 static inline void blk_set_rl_full(struct request_list *rl, bool sync)
652 {
653 	unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
654 
655 	rl->flags |= flag;
656 }
657 
658 static inline void blk_clear_rl_full(struct request_list *rl, bool sync)
659 {
660 	unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
661 
662 	rl->flags &= ~flag;
663 }
664 
665 static inline bool rq_mergeable(struct request *rq)
666 {
667 	if (rq->cmd_type != REQ_TYPE_FS)
668 		return false;
669 
670 	if (req_op(rq) == REQ_OP_FLUSH)
671 		return false;
672 
673 	if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
674 		return false;
675 
676 	return true;
677 }
678 
679 static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
680 {
681 	if (bio_data(a) == bio_data(b))
682 		return true;
683 
684 	return false;
685 }
686 
687 /*
688  * q->prep_rq_fn return values
689  */
690 enum {
691 	BLKPREP_OK,		/* serve it */
692 	BLKPREP_KILL,		/* fatal error, kill, return -EIO */
693 	BLKPREP_DEFER,		/* leave on queue */
694 	BLKPREP_INVALID,	/* invalid command, kill, return -EREMOTEIO */
695 };
696 
697 extern unsigned long blk_max_low_pfn, blk_max_pfn;
698 
699 /*
700  * standard bounce addresses:
701  *
702  * BLK_BOUNCE_HIGH	: bounce all highmem pages
703  * BLK_BOUNCE_ANY	: don't bounce anything
704  * BLK_BOUNCE_ISA	: bounce pages above ISA DMA boundary
705  */
706 
707 #if BITS_PER_LONG == 32
708 #define BLK_BOUNCE_HIGH		((u64)blk_max_low_pfn << PAGE_SHIFT)
709 #else
710 #define BLK_BOUNCE_HIGH		-1ULL
711 #endif
712 #define BLK_BOUNCE_ANY		(-1ULL)
713 #define BLK_BOUNCE_ISA		(DMA_BIT_MASK(24))
714 
715 /*
716  * default timeout for SG_IO if none specified
717  */
718 #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
719 #define BLK_MIN_SG_TIMEOUT	(7 * HZ)
720 
721 #ifdef CONFIG_BOUNCE
722 extern int init_emergency_isa_pool(void);
723 extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
724 #else
725 static inline int init_emergency_isa_pool(void)
726 {
727 	return 0;
728 }
729 static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
730 {
731 }
732 #endif /* CONFIG_MMU */
733 
734 struct rq_map_data {
735 	struct page **pages;
736 	int page_order;
737 	int nr_entries;
738 	unsigned long offset;
739 	int null_mapped;
740 	int from_user;
741 };
742 
743 struct req_iterator {
744 	struct bvec_iter iter;
745 	struct bio *bio;
746 };
747 
748 /* This should not be used directly - use rq_for_each_segment */
749 #define for_each_bio(_bio)		\
750 	for (; _bio; _bio = _bio->bi_next)
751 #define __rq_for_each_bio(_bio, rq)	\
752 	if ((rq->bio))			\
753 		for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
754 
755 #define rq_for_each_segment(bvl, _rq, _iter)			\
756 	__rq_for_each_bio(_iter.bio, _rq)			\
757 		bio_for_each_segment(bvl, _iter.bio, _iter.iter)
758 
759 #define rq_iter_last(bvec, _iter)				\
760 		(_iter.bio->bi_next == NULL &&			\
761 		 bio_iter_last(bvec, _iter.iter))
762 
763 #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
764 # error	"You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
765 #endif
766 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
767 extern void rq_flush_dcache_pages(struct request *rq);
768 #else
769 static inline void rq_flush_dcache_pages(struct request *rq)
770 {
771 }
772 #endif
773 
774 #ifdef CONFIG_PRINTK
775 #define vfs_msg(sb, level, fmt, ...)				\
776 	__vfs_msg(sb, level, fmt, ##__VA_ARGS__)
777 #else
778 #define vfs_msg(sb, level, fmt, ...)				\
779 do {								\
780 	no_printk(fmt, ##__VA_ARGS__);				\
781 	__vfs_msg(sb, "", " ");					\
782 } while (0)
783 #endif
784 
785 extern int blk_register_queue(struct gendisk *disk);
786 extern void blk_unregister_queue(struct gendisk *disk);
787 extern blk_qc_t generic_make_request(struct bio *bio);
788 extern void blk_rq_init(struct request_queue *q, struct request *rq);
789 extern void blk_put_request(struct request *);
790 extern void __blk_put_request(struct request_queue *, struct request *);
791 extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
792 extern void blk_rq_set_block_pc(struct request *);
793 extern void blk_requeue_request(struct request_queue *, struct request *);
794 extern void blk_add_request_payload(struct request *rq, struct page *page,
795 		int offset, unsigned int len);
796 extern int blk_lld_busy(struct request_queue *q);
797 extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
798 			     struct bio_set *bs, gfp_t gfp_mask,
799 			     int (*bio_ctr)(struct bio *, struct bio *, void *),
800 			     void *data);
801 extern void blk_rq_unprep_clone(struct request *rq);
802 extern int blk_insert_cloned_request(struct request_queue *q,
803 				     struct request *rq);
804 extern int blk_rq_append_bio(struct request *rq, struct bio *bio);
805 extern void blk_delay_queue(struct request_queue *, unsigned long);
806 extern void blk_queue_split(struct request_queue *, struct bio **,
807 			    struct bio_set *);
808 extern void blk_recount_segments(struct request_queue *, struct bio *);
809 extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
810 extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
811 			      unsigned int, void __user *);
812 extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
813 			  unsigned int, void __user *);
814 extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
815 			 struct scsi_ioctl_command __user *);
816 
817 extern int blk_queue_enter(struct request_queue *q, bool nowait);
818 extern void blk_queue_exit(struct request_queue *q);
819 extern void blk_start_queue(struct request_queue *q);
820 extern void blk_start_queue_async(struct request_queue *q);
821 extern void blk_stop_queue(struct request_queue *q);
822 extern void blk_sync_queue(struct request_queue *q);
823 extern void __blk_stop_queue(struct request_queue *q);
824 extern void __blk_run_queue(struct request_queue *q);
825 extern void __blk_run_queue_uncond(struct request_queue *q);
826 extern void blk_run_queue(struct request_queue *);
827 extern void blk_run_queue_async(struct request_queue *q);
828 extern int blk_rq_map_user(struct request_queue *, struct request *,
829 			   struct rq_map_data *, void __user *, unsigned long,
830 			   gfp_t);
831 extern int blk_rq_unmap_user(struct bio *);
832 extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
833 extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
834 			       struct rq_map_data *, const struct iov_iter *,
835 			       gfp_t);
836 extern int blk_execute_rq(struct request_queue *, struct gendisk *,
837 			  struct request *, int);
838 extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
839 				  struct request *, int, rq_end_io_fn *);
840 
841 bool blk_poll(struct request_queue *q, blk_qc_t cookie);
842 
843 static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
844 {
845 	return bdev->bd_disk->queue;	/* this is never NULL */
846 }
847 
848 /*
849  * blk_rq_pos()			: the current sector
850  * blk_rq_bytes()		: bytes left in the entire request
851  * blk_rq_cur_bytes()		: bytes left in the current segment
852  * blk_rq_err_bytes()		: bytes left till the next error boundary
853  * blk_rq_sectors()		: sectors left in the entire request
854  * blk_rq_cur_sectors()		: sectors left in the current segment
855  */
856 static inline sector_t blk_rq_pos(const struct request *rq)
857 {
858 	return rq->__sector;
859 }
860 
861 static inline unsigned int blk_rq_bytes(const struct request *rq)
862 {
863 	return rq->__data_len;
864 }
865 
866 static inline int blk_rq_cur_bytes(const struct request *rq)
867 {
868 	return rq->bio ? bio_cur_bytes(rq->bio) : 0;
869 }
870 
871 extern unsigned int blk_rq_err_bytes(const struct request *rq);
872 
873 static inline unsigned int blk_rq_sectors(const struct request *rq)
874 {
875 	return blk_rq_bytes(rq) >> 9;
876 }
877 
878 static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
879 {
880 	return blk_rq_cur_bytes(rq) >> 9;
881 }
882 
883 static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
884 						     int op)
885 {
886 	if (unlikely(op == REQ_OP_DISCARD))
887 		return min(q->limits.max_discard_sectors, UINT_MAX >> 9);
888 
889 	if (unlikely(op == REQ_OP_WRITE_SAME))
890 		return q->limits.max_write_same_sectors;
891 
892 	return q->limits.max_sectors;
893 }
894 
895 /*
896  * Return maximum size of a request at given offset. Only valid for
897  * file system requests.
898  */
899 static inline unsigned int blk_max_size_offset(struct request_queue *q,
900 					       sector_t offset)
901 {
902 	if (!q->limits.chunk_sectors)
903 		return q->limits.max_sectors;
904 
905 	return q->limits.chunk_sectors -
906 			(offset & (q->limits.chunk_sectors - 1));
907 }
908 
909 static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
910 						  sector_t offset)
911 {
912 	struct request_queue *q = rq->q;
913 
914 	if (unlikely(rq->cmd_type != REQ_TYPE_FS))
915 		return q->limits.max_hw_sectors;
916 
917 	if (!q->limits.chunk_sectors || (req_op(rq) == REQ_OP_DISCARD))
918 		return blk_queue_get_max_sectors(q, req_op(rq));
919 
920 	return min(blk_max_size_offset(q, offset),
921 			blk_queue_get_max_sectors(q, req_op(rq)));
922 }
923 
924 static inline unsigned int blk_rq_count_bios(struct request *rq)
925 {
926 	unsigned int nr_bios = 0;
927 	struct bio *bio;
928 
929 	__rq_for_each_bio(bio, rq)
930 		nr_bios++;
931 
932 	return nr_bios;
933 }
934 
935 /*
936  * Request issue related functions.
937  */
938 extern struct request *blk_peek_request(struct request_queue *q);
939 extern void blk_start_request(struct request *rq);
940 extern struct request *blk_fetch_request(struct request_queue *q);
941 
942 /*
943  * Request completion related functions.
944  *
945  * blk_update_request() completes given number of bytes and updates
946  * the request without completing it.
947  *
948  * blk_end_request() and friends.  __blk_end_request() must be called
949  * with the request queue spinlock acquired.
950  *
951  * Several drivers define their own end_request and call
952  * blk_end_request() for parts of the original function.
953  * This prevents code duplication in drivers.
954  */
955 extern bool blk_update_request(struct request *rq, int error,
956 			       unsigned int nr_bytes);
957 extern void blk_finish_request(struct request *rq, int error);
958 extern bool blk_end_request(struct request *rq, int error,
959 			    unsigned int nr_bytes);
960 extern void blk_end_request_all(struct request *rq, int error);
961 extern bool blk_end_request_cur(struct request *rq, int error);
962 extern bool blk_end_request_err(struct request *rq, int error);
963 extern bool __blk_end_request(struct request *rq, int error,
964 			      unsigned int nr_bytes);
965 extern void __blk_end_request_all(struct request *rq, int error);
966 extern bool __blk_end_request_cur(struct request *rq, int error);
967 extern bool __blk_end_request_err(struct request *rq, int error);
968 
969 extern void blk_complete_request(struct request *);
970 extern void __blk_complete_request(struct request *);
971 extern void blk_abort_request(struct request *);
972 extern void blk_unprep_request(struct request *);
973 
974 /*
975  * Access functions for manipulating queue properties
976  */
977 extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
978 					spinlock_t *lock, int node_id);
979 extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
980 extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
981 						      request_fn_proc *, spinlock_t *);
982 extern void blk_cleanup_queue(struct request_queue *);
983 extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
984 extern void blk_queue_bounce_limit(struct request_queue *, u64);
985 extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
986 extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
987 extern void blk_queue_max_segments(struct request_queue *, unsigned short);
988 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
989 extern void blk_queue_max_discard_sectors(struct request_queue *q,
990 		unsigned int max_discard_sectors);
991 extern void blk_queue_max_write_same_sectors(struct request_queue *q,
992 		unsigned int max_write_same_sectors);
993 extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
994 extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
995 extern void blk_queue_alignment_offset(struct request_queue *q,
996 				       unsigned int alignment);
997 extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
998 extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
999 extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
1000 extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
1001 extern void blk_set_default_limits(struct queue_limits *lim);
1002 extern void blk_set_stacking_limits(struct queue_limits *lim);
1003 extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
1004 			    sector_t offset);
1005 extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
1006 			    sector_t offset);
1007 extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
1008 			      sector_t offset);
1009 extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
1010 extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
1011 extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
1012 extern int blk_queue_dma_drain(struct request_queue *q,
1013 			       dma_drain_needed_fn *dma_drain_needed,
1014 			       void *buf, unsigned int size);
1015 extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
1016 extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
1017 extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
1018 extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
1019 extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn);
1020 extern void blk_queue_dma_alignment(struct request_queue *, int);
1021 extern void blk_queue_update_dma_alignment(struct request_queue *, int);
1022 extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
1023 extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
1024 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
1025 extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
1026 extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
1027 extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
1028 
1029 extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
1030 extern void blk_dump_rq_flags(struct request *, char *);
1031 extern long nr_blockdev_pages(void);
1032 
1033 bool __must_check blk_get_queue(struct request_queue *);
1034 struct request_queue *blk_alloc_queue(gfp_t);
1035 struct request_queue *blk_alloc_queue_node(gfp_t, int);
1036 extern void blk_put_queue(struct request_queue *);
1037 extern void blk_set_queue_dying(struct request_queue *);
1038 
1039 /*
1040  * block layer runtime pm functions
1041  */
1042 #ifdef CONFIG_PM
1043 extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
1044 extern int blk_pre_runtime_suspend(struct request_queue *q);
1045 extern void blk_post_runtime_suspend(struct request_queue *q, int err);
1046 extern void blk_pre_runtime_resume(struct request_queue *q);
1047 extern void blk_post_runtime_resume(struct request_queue *q, int err);
1048 extern void blk_set_runtime_active(struct request_queue *q);
1049 #else
1050 static inline void blk_pm_runtime_init(struct request_queue *q,
1051 	struct device *dev) {}
1052 static inline int blk_pre_runtime_suspend(struct request_queue *q)
1053 {
1054 	return -ENOSYS;
1055 }
1056 static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {}
1057 static inline void blk_pre_runtime_resume(struct request_queue *q) {}
1058 static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
1059 extern inline void blk_set_runtime_active(struct request_queue *q) {}
1060 #endif
1061 
1062 /*
1063  * blk_plug permits building a queue of related requests by holding the I/O
1064  * fragments for a short period. This allows merging of sequential requests
1065  * into single larger request. As the requests are moved from a per-task list to
1066  * the device's request_queue in a batch, this results in improved scalability
1067  * as the lock contention for request_queue lock is reduced.
1068  *
1069  * It is ok not to disable preemption when adding the request to the plug list
1070  * or when attempting a merge, because blk_schedule_flush_list() will only flush
1071  * the plug list when the task sleeps by itself. For details, please see
1072  * schedule() where blk_schedule_flush_plug() is called.
1073  */
1074 struct blk_plug {
1075 	struct list_head list; /* requests */
1076 	struct list_head mq_list; /* blk-mq requests */
1077 	struct list_head cb_list; /* md requires an unplug callback */
1078 };
1079 #define BLK_MAX_REQUEST_COUNT 16
1080 
1081 struct blk_plug_cb;
1082 typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
1083 struct blk_plug_cb {
1084 	struct list_head list;
1085 	blk_plug_cb_fn callback;
1086 	void *data;
1087 };
1088 extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
1089 					     void *data, int size);
1090 extern void blk_start_plug(struct blk_plug *);
1091 extern void blk_finish_plug(struct blk_plug *);
1092 extern void blk_flush_plug_list(struct blk_plug *, bool);
1093 
1094 static inline void blk_flush_plug(struct task_struct *tsk)
1095 {
1096 	struct blk_plug *plug = tsk->plug;
1097 
1098 	if (plug)
1099 		blk_flush_plug_list(plug, false);
1100 }
1101 
1102 static inline void blk_schedule_flush_plug(struct task_struct *tsk)
1103 {
1104 	struct blk_plug *plug = tsk->plug;
1105 
1106 	if (plug)
1107 		blk_flush_plug_list(plug, true);
1108 }
1109 
1110 static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1111 {
1112 	struct blk_plug *plug = tsk->plug;
1113 
1114 	return plug &&
1115 		(!list_empty(&plug->list) ||
1116 		 !list_empty(&plug->mq_list) ||
1117 		 !list_empty(&plug->cb_list));
1118 }
1119 
1120 /*
1121  * tag stuff
1122  */
1123 extern int blk_queue_start_tag(struct request_queue *, struct request *);
1124 extern struct request *blk_queue_find_tag(struct request_queue *, int);
1125 extern void blk_queue_end_tag(struct request_queue *, struct request *);
1126 extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int);
1127 extern void blk_queue_free_tags(struct request_queue *);
1128 extern int blk_queue_resize_tags(struct request_queue *, int);
1129 extern void blk_queue_invalidate_tags(struct request_queue *);
1130 extern struct blk_queue_tag *blk_init_tags(int, int);
1131 extern void blk_free_tags(struct blk_queue_tag *);
1132 
1133 static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
1134 						int tag)
1135 {
1136 	if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
1137 		return NULL;
1138 	return bqt->tag_index[tag];
1139 }
1140 
1141 
1142 #define BLKDEV_DISCARD_SECURE	(1 << 0)	/* issue a secure erase */
1143 #define BLKDEV_DISCARD_ZERO	(1 << 1)	/* must reliably zero data */
1144 
1145 extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
1146 extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1147 		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
1148 extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1149 		sector_t nr_sects, gfp_t gfp_mask, int flags,
1150 		struct bio **biop);
1151 extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
1152 		sector_t nr_sects, gfp_t gfp_mask, struct page *page);
1153 extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
1154 		sector_t nr_sects, gfp_t gfp_mask, bool discard);
1155 static inline int sb_issue_discard(struct super_block *sb, sector_t block,
1156 		sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
1157 {
1158 	return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9),
1159 				    nr_blocks << (sb->s_blocksize_bits - 9),
1160 				    gfp_mask, flags);
1161 }
1162 static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
1163 		sector_t nr_blocks, gfp_t gfp_mask)
1164 {
1165 	return blkdev_issue_zeroout(sb->s_bdev,
1166 				    block << (sb->s_blocksize_bits - 9),
1167 				    nr_blocks << (sb->s_blocksize_bits - 9),
1168 				    gfp_mask, true);
1169 }
1170 
1171 extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
1172 
1173 enum blk_default_limits {
1174 	BLK_MAX_SEGMENTS	= 128,
1175 	BLK_SAFE_MAX_SECTORS	= 255,
1176 	BLK_DEF_MAX_SECTORS	= 2560,
1177 	BLK_MAX_SEGMENT_SIZE	= 65536,
1178 	BLK_SEG_BOUNDARY_MASK	= 0xFFFFFFFFUL,
1179 };
1180 
1181 #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
1182 
1183 static inline unsigned long queue_bounce_pfn(struct request_queue *q)
1184 {
1185 	return q->limits.bounce_pfn;
1186 }
1187 
1188 static inline unsigned long queue_segment_boundary(struct request_queue *q)
1189 {
1190 	return q->limits.seg_boundary_mask;
1191 }
1192 
1193 static inline unsigned long queue_virt_boundary(struct request_queue *q)
1194 {
1195 	return q->limits.virt_boundary_mask;
1196 }
1197 
1198 static inline unsigned int queue_max_sectors(struct request_queue *q)
1199 {
1200 	return q->limits.max_sectors;
1201 }
1202 
1203 static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
1204 {
1205 	return q->limits.max_hw_sectors;
1206 }
1207 
1208 static inline unsigned short queue_max_segments(struct request_queue *q)
1209 {
1210 	return q->limits.max_segments;
1211 }
1212 
1213 static inline unsigned int queue_max_segment_size(struct request_queue *q)
1214 {
1215 	return q->limits.max_segment_size;
1216 }
1217 
1218 static inline unsigned short queue_logical_block_size(struct request_queue *q)
1219 {
1220 	int retval = 512;
1221 
1222 	if (q && q->limits.logical_block_size)
1223 		retval = q->limits.logical_block_size;
1224 
1225 	return retval;
1226 }
1227 
1228 static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
1229 {
1230 	return queue_logical_block_size(bdev_get_queue(bdev));
1231 }
1232 
1233 static inline unsigned int queue_physical_block_size(struct request_queue *q)
1234 {
1235 	return q->limits.physical_block_size;
1236 }
1237 
1238 static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
1239 {
1240 	return queue_physical_block_size(bdev_get_queue(bdev));
1241 }
1242 
1243 static inline unsigned int queue_io_min(struct request_queue *q)
1244 {
1245 	return q->limits.io_min;
1246 }
1247 
1248 static inline int bdev_io_min(struct block_device *bdev)
1249 {
1250 	return queue_io_min(bdev_get_queue(bdev));
1251 }
1252 
1253 static inline unsigned int queue_io_opt(struct request_queue *q)
1254 {
1255 	return q->limits.io_opt;
1256 }
1257 
1258 static inline int bdev_io_opt(struct block_device *bdev)
1259 {
1260 	return queue_io_opt(bdev_get_queue(bdev));
1261 }
1262 
1263 static inline int queue_alignment_offset(struct request_queue *q)
1264 {
1265 	if (q->limits.misaligned)
1266 		return -1;
1267 
1268 	return q->limits.alignment_offset;
1269 }
1270 
1271 static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
1272 {
1273 	unsigned int granularity = max(lim->physical_block_size, lim->io_min);
1274 	unsigned int alignment = sector_div(sector, granularity >> 9) << 9;
1275 
1276 	return (granularity + lim->alignment_offset - alignment) % granularity;
1277 }
1278 
1279 static inline int bdev_alignment_offset(struct block_device *bdev)
1280 {
1281 	struct request_queue *q = bdev_get_queue(bdev);
1282 
1283 	if (q->limits.misaligned)
1284 		return -1;
1285 
1286 	if (bdev != bdev->bd_contains)
1287 		return bdev->bd_part->alignment_offset;
1288 
1289 	return q->limits.alignment_offset;
1290 }
1291 
1292 static inline int queue_discard_alignment(struct request_queue *q)
1293 {
1294 	if (q->limits.discard_misaligned)
1295 		return -1;
1296 
1297 	return q->limits.discard_alignment;
1298 }
1299 
1300 static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
1301 {
1302 	unsigned int alignment, granularity, offset;
1303 
1304 	if (!lim->max_discard_sectors)
1305 		return 0;
1306 
1307 	/* Why are these in bytes, not sectors? */
1308 	alignment = lim->discard_alignment >> 9;
1309 	granularity = lim->discard_granularity >> 9;
1310 	if (!granularity)
1311 		return 0;
1312 
1313 	/* Offset of the partition start in 'granularity' sectors */
1314 	offset = sector_div(sector, granularity);
1315 
1316 	/* And why do we do this modulus *again* in blkdev_issue_discard()? */
1317 	offset = (granularity + alignment - offset) % granularity;
1318 
1319 	/* Turn it back into bytes, gaah */
1320 	return offset << 9;
1321 }
1322 
1323 static inline int bdev_discard_alignment(struct block_device *bdev)
1324 {
1325 	struct request_queue *q = bdev_get_queue(bdev);
1326 
1327 	if (bdev != bdev->bd_contains)
1328 		return bdev->bd_part->discard_alignment;
1329 
1330 	return q->limits.discard_alignment;
1331 }
1332 
1333 static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
1334 {
1335 	if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1)
1336 		return 1;
1337 
1338 	return 0;
1339 }
1340 
1341 static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev)
1342 {
1343 	return queue_discard_zeroes_data(bdev_get_queue(bdev));
1344 }
1345 
1346 static inline unsigned int bdev_write_same(struct block_device *bdev)
1347 {
1348 	struct request_queue *q = bdev_get_queue(bdev);
1349 
1350 	if (q)
1351 		return q->limits.max_write_same_sectors;
1352 
1353 	return 0;
1354 }
1355 
1356 static inline int queue_dma_alignment(struct request_queue *q)
1357 {
1358 	return q ? q->dma_alignment : 511;
1359 }
1360 
1361 static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
1362 				 unsigned int len)
1363 {
1364 	unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
1365 	return !(addr & alignment) && !(len & alignment);
1366 }
1367 
1368 /* assumes size > 256 */
1369 static inline unsigned int blksize_bits(unsigned int size)
1370 {
1371 	unsigned int bits = 8;
1372 	do {
1373 		bits++;
1374 		size >>= 1;
1375 	} while (size > 256);
1376 	return bits;
1377 }
1378 
1379 static inline unsigned int block_size(struct block_device *bdev)
1380 {
1381 	return bdev->bd_block_size;
1382 }
1383 
1384 static inline bool queue_flush_queueable(struct request_queue *q)
1385 {
1386 	return !test_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
1387 }
1388 
1389 typedef struct {struct page *v;} Sector;
1390 
1391 unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
1392 
1393 static inline void put_dev_sector(Sector p)
1394 {
1395 	put_page(p.v);
1396 }
1397 
1398 static inline bool __bvec_gap_to_prev(struct request_queue *q,
1399 				struct bio_vec *bprv, unsigned int offset)
1400 {
1401 	return offset ||
1402 		((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
1403 }
1404 
1405 /*
1406  * Check if adding a bio_vec after bprv with offset would create a gap in
1407  * the SG list. Most drivers don't care about this, but some do.
1408  */
1409 static inline bool bvec_gap_to_prev(struct request_queue *q,
1410 				struct bio_vec *bprv, unsigned int offset)
1411 {
1412 	if (!queue_virt_boundary(q))
1413 		return false;
1414 	return __bvec_gap_to_prev(q, bprv, offset);
1415 }
1416 
1417 static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
1418 			 struct bio *next)
1419 {
1420 	if (bio_has_data(prev) && queue_virt_boundary(q)) {
1421 		struct bio_vec pb, nb;
1422 
1423 		bio_get_last_bvec(prev, &pb);
1424 		bio_get_first_bvec(next, &nb);
1425 
1426 		return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
1427 	}
1428 
1429 	return false;
1430 }
1431 
1432 static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
1433 {
1434 	return bio_will_gap(req->q, req->biotail, bio);
1435 }
1436 
1437 static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
1438 {
1439 	return bio_will_gap(req->q, bio, req->bio);
1440 }
1441 
1442 struct work_struct;
1443 int kblockd_schedule_work(struct work_struct *work);
1444 int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
1445 int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
1446 
1447 #ifdef CONFIG_BLK_CGROUP
1448 /*
1449  * This should not be using sched_clock(). A real patch is in progress
1450  * to fix this up, until that is in place we need to disable preemption
1451  * around sched_clock() in this function and set_io_start_time_ns().
1452  */
1453 static inline void set_start_time_ns(struct request *req)
1454 {
1455 	preempt_disable();
1456 	req->start_time_ns = sched_clock();
1457 	preempt_enable();
1458 }
1459 
1460 static inline void set_io_start_time_ns(struct request *req)
1461 {
1462 	preempt_disable();
1463 	req->io_start_time_ns = sched_clock();
1464 	preempt_enable();
1465 }
1466 
1467 static inline uint64_t rq_start_time_ns(struct request *req)
1468 {
1469         return req->start_time_ns;
1470 }
1471 
1472 static inline uint64_t rq_io_start_time_ns(struct request *req)
1473 {
1474         return req->io_start_time_ns;
1475 }
1476 #else
1477 static inline void set_start_time_ns(struct request *req) {}
1478 static inline void set_io_start_time_ns(struct request *req) {}
1479 static inline uint64_t rq_start_time_ns(struct request *req)
1480 {
1481 	return 0;
1482 }
1483 static inline uint64_t rq_io_start_time_ns(struct request *req)
1484 {
1485 	return 0;
1486 }
1487 #endif
1488 
1489 #define MODULE_ALIAS_BLOCKDEV(major,minor) \
1490 	MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
1491 #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
1492 	MODULE_ALIAS("block-major-" __stringify(major) "-*")
1493 
1494 #if defined(CONFIG_BLK_DEV_INTEGRITY)
1495 
1496 enum blk_integrity_flags {
1497 	BLK_INTEGRITY_VERIFY		= 1 << 0,
1498 	BLK_INTEGRITY_GENERATE		= 1 << 1,
1499 	BLK_INTEGRITY_DEVICE_CAPABLE	= 1 << 2,
1500 	BLK_INTEGRITY_IP_CHECKSUM	= 1 << 3,
1501 };
1502 
1503 struct blk_integrity_iter {
1504 	void			*prot_buf;
1505 	void			*data_buf;
1506 	sector_t		seed;
1507 	unsigned int		data_size;
1508 	unsigned short		interval;
1509 	const char		*disk_name;
1510 };
1511 
1512 typedef int (integrity_processing_fn) (struct blk_integrity_iter *);
1513 
1514 struct blk_integrity_profile {
1515 	integrity_processing_fn		*generate_fn;
1516 	integrity_processing_fn		*verify_fn;
1517 	const char			*name;
1518 };
1519 
1520 extern void blk_integrity_register(struct gendisk *, struct blk_integrity *);
1521 extern void blk_integrity_unregister(struct gendisk *);
1522 extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
1523 extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
1524 				   struct scatterlist *);
1525 extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
1526 extern bool blk_integrity_merge_rq(struct request_queue *, struct request *,
1527 				   struct request *);
1528 extern bool blk_integrity_merge_bio(struct request_queue *, struct request *,
1529 				    struct bio *);
1530 
1531 static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1532 {
1533 	struct blk_integrity *bi = &disk->queue->integrity;
1534 
1535 	if (!bi->profile)
1536 		return NULL;
1537 
1538 	return bi;
1539 }
1540 
1541 static inline
1542 struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
1543 {
1544 	return blk_get_integrity(bdev->bd_disk);
1545 }
1546 
1547 static inline bool blk_integrity_rq(struct request *rq)
1548 {
1549 	return rq->cmd_flags & REQ_INTEGRITY;
1550 }
1551 
1552 static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1553 						    unsigned int segs)
1554 {
1555 	q->limits.max_integrity_segments = segs;
1556 }
1557 
1558 static inline unsigned short
1559 queue_max_integrity_segments(struct request_queue *q)
1560 {
1561 	return q->limits.max_integrity_segments;
1562 }
1563 
1564 static inline bool integrity_req_gap_back_merge(struct request *req,
1565 						struct bio *next)
1566 {
1567 	struct bio_integrity_payload *bip = bio_integrity(req->bio);
1568 	struct bio_integrity_payload *bip_next = bio_integrity(next);
1569 
1570 	return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
1571 				bip_next->bip_vec[0].bv_offset);
1572 }
1573 
1574 static inline bool integrity_req_gap_front_merge(struct request *req,
1575 						 struct bio *bio)
1576 {
1577 	struct bio_integrity_payload *bip = bio_integrity(bio);
1578 	struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
1579 
1580 	return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
1581 				bip_next->bip_vec[0].bv_offset);
1582 }
1583 
1584 #else /* CONFIG_BLK_DEV_INTEGRITY */
1585 
1586 struct bio;
1587 struct block_device;
1588 struct gendisk;
1589 struct blk_integrity;
1590 
1591 static inline int blk_integrity_rq(struct request *rq)
1592 {
1593 	return 0;
1594 }
1595 static inline int blk_rq_count_integrity_sg(struct request_queue *q,
1596 					    struct bio *b)
1597 {
1598 	return 0;
1599 }
1600 static inline int blk_rq_map_integrity_sg(struct request_queue *q,
1601 					  struct bio *b,
1602 					  struct scatterlist *s)
1603 {
1604 	return 0;
1605 }
1606 static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
1607 {
1608 	return NULL;
1609 }
1610 static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1611 {
1612 	return NULL;
1613 }
1614 static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
1615 {
1616 	return 0;
1617 }
1618 static inline void blk_integrity_register(struct gendisk *d,
1619 					 struct blk_integrity *b)
1620 {
1621 }
1622 static inline void blk_integrity_unregister(struct gendisk *d)
1623 {
1624 }
1625 static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1626 						    unsigned int segs)
1627 {
1628 }
1629 static inline unsigned short queue_max_integrity_segments(struct request_queue *q)
1630 {
1631 	return 0;
1632 }
1633 static inline bool blk_integrity_merge_rq(struct request_queue *rq,
1634 					  struct request *r1,
1635 					  struct request *r2)
1636 {
1637 	return true;
1638 }
1639 static inline bool blk_integrity_merge_bio(struct request_queue *rq,
1640 					   struct request *r,
1641 					   struct bio *b)
1642 {
1643 	return true;
1644 }
1645 
1646 static inline bool integrity_req_gap_back_merge(struct request *req,
1647 						struct bio *next)
1648 {
1649 	return false;
1650 }
1651 static inline bool integrity_req_gap_front_merge(struct request *req,
1652 						 struct bio *bio)
1653 {
1654 	return false;
1655 }
1656 
1657 #endif /* CONFIG_BLK_DEV_INTEGRITY */
1658 
1659 /**
1660  * struct blk_dax_ctl - control and output parameters for ->direct_access
1661  * @sector: (input) offset relative to a block_device
1662  * @addr: (output) kernel virtual address for @sector populated by driver
1663  * @pfn: (output) page frame number for @addr populated by driver
1664  * @size: (input) number of bytes requested
1665  */
1666 struct blk_dax_ctl {
1667 	sector_t sector;
1668 	void *addr;
1669 	long size;
1670 	pfn_t pfn;
1671 };
1672 
1673 struct block_device_operations {
1674 	int (*open) (struct block_device *, fmode_t);
1675 	void (*release) (struct gendisk *, fmode_t);
1676 	int (*rw_page)(struct block_device *, sector_t, struct page *, int rw);
1677 	int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1678 	int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1679 	long (*direct_access)(struct block_device *, sector_t, void **, pfn_t *,
1680 			long);
1681 	unsigned int (*check_events) (struct gendisk *disk,
1682 				      unsigned int clearing);
1683 	/* ->media_changed() is DEPRECATED, use ->check_events() instead */
1684 	int (*media_changed) (struct gendisk *);
1685 	void (*unlock_native_capacity) (struct gendisk *);
1686 	int (*revalidate_disk) (struct gendisk *);
1687 	int (*getgeo)(struct block_device *, struct hd_geometry *);
1688 	/* this callback is with swap_lock and sometimes page table lock held */
1689 	void (*swap_slot_free_notify) (struct block_device *, unsigned long);
1690 	struct module *owner;
1691 	const struct pr_ops *pr_ops;
1692 };
1693 
1694 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
1695 				 unsigned long);
1696 extern int bdev_read_page(struct block_device *, sector_t, struct page *);
1697 extern int bdev_write_page(struct block_device *, sector_t, struct page *,
1698 						struct writeback_control *);
1699 extern long bdev_direct_access(struct block_device *, struct blk_dax_ctl *);
1700 extern int bdev_dax_supported(struct super_block *, int);
1701 extern bool bdev_dax_capable(struct block_device *);
1702 #else /* CONFIG_BLOCK */
1703 
1704 struct block_device;
1705 
1706 /*
1707  * stubs for when the block layer is configured out
1708  */
1709 #define buffer_heads_over_limit 0
1710 
1711 static inline long nr_blockdev_pages(void)
1712 {
1713 	return 0;
1714 }
1715 
1716 struct blk_plug {
1717 };
1718 
1719 static inline void blk_start_plug(struct blk_plug *plug)
1720 {
1721 }
1722 
1723 static inline void blk_finish_plug(struct blk_plug *plug)
1724 {
1725 }
1726 
1727 static inline void blk_flush_plug(struct task_struct *task)
1728 {
1729 }
1730 
1731 static inline void blk_schedule_flush_plug(struct task_struct *task)
1732 {
1733 }
1734 
1735 
1736 static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1737 {
1738 	return false;
1739 }
1740 
1741 static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
1742 				     sector_t *error_sector)
1743 {
1744 	return 0;
1745 }
1746 
1747 #endif /* CONFIG_BLOCK */
1748 
1749 #endif
1750