1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_BLKDEV_H 3 #define _LINUX_BLKDEV_H 4 5 #include <linux/sched.h> 6 #include <linux/sched/clock.h> 7 8 #ifdef CONFIG_BLOCK 9 10 #include <linux/major.h> 11 #include <linux/genhd.h> 12 #include <linux/list.h> 13 #include <linux/llist.h> 14 #include <linux/timer.h> 15 #include <linux/workqueue.h> 16 #include <linux/pagemap.h> 17 #include <linux/backing-dev-defs.h> 18 #include <linux/wait.h> 19 #include <linux/mempool.h> 20 #include <linux/pfn.h> 21 #include <linux/bio.h> 22 #include <linux/stringify.h> 23 #include <linux/gfp.h> 24 #include <linux/bsg.h> 25 #include <linux/smp.h> 26 #include <linux/rcupdate.h> 27 #include <linux/percpu-refcount.h> 28 #include <linux/scatterlist.h> 29 #include <linux/blkzoned.h> 30 31 struct module; 32 struct scsi_ioctl_command; 33 34 struct request_queue; 35 struct elevator_queue; 36 struct blk_trace; 37 struct request; 38 struct sg_io_hdr; 39 struct bsg_job; 40 struct blkcg_gq; 41 struct blk_flush_queue; 42 struct pr_ops; 43 struct rq_wb; 44 struct blk_queue_stats; 45 struct blk_stat_callback; 46 47 #define BLKDEV_MIN_RQ 4 48 #define BLKDEV_MAX_RQ 128 /* Default maximum */ 49 50 /* Must be consisitent with blk_mq_poll_stats_bkt() */ 51 #define BLK_MQ_POLL_STATS_BKTS 16 52 53 /* 54 * Maximum number of blkcg policies allowed to be registered concurrently. 55 * Defined here to simplify include dependency. 56 */ 57 #define BLKCG_MAX_POLS 3 58 59 typedef void (rq_end_io_fn)(struct request *, blk_status_t); 60 61 #define BLK_RL_SYNCFULL (1U << 0) 62 #define BLK_RL_ASYNCFULL (1U << 1) 63 64 struct request_list { 65 struct request_queue *q; /* the queue this rl belongs to */ 66 #ifdef CONFIG_BLK_CGROUP 67 struct blkcg_gq *blkg; /* blkg this request pool belongs to */ 68 #endif 69 /* 70 * count[], starved[], and wait[] are indexed by 71 * BLK_RW_SYNC/BLK_RW_ASYNC 72 */ 73 int count[2]; 74 int starved[2]; 75 mempool_t *rq_pool; 76 wait_queue_head_t wait[2]; 77 unsigned int flags; 78 }; 79 80 /* 81 * request flags */ 82 typedef __u32 __bitwise req_flags_t; 83 84 /* elevator knows about this request */ 85 #define RQF_SORTED ((__force req_flags_t)(1 << 0)) 86 /* drive already may have started this one */ 87 #define RQF_STARTED ((__force req_flags_t)(1 << 1)) 88 /* uses tagged queueing */ 89 #define RQF_QUEUED ((__force req_flags_t)(1 << 2)) 90 /* may not be passed by ioscheduler */ 91 #define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3)) 92 /* request for flush sequence */ 93 #define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4)) 94 /* merge of different types, fail separately */ 95 #define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5)) 96 /* track inflight for MQ */ 97 #define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6)) 98 /* don't call prep for this one */ 99 #define RQF_DONTPREP ((__force req_flags_t)(1 << 7)) 100 /* set for "ide_preempt" requests and also for requests for which the SCSI 101 "quiesce" state must be ignored. */ 102 #define RQF_PREEMPT ((__force req_flags_t)(1 << 8)) 103 /* contains copies of user pages */ 104 #define RQF_COPY_USER ((__force req_flags_t)(1 << 9)) 105 /* vaguely specified driver internal error. Ignored by the block layer */ 106 #define RQF_FAILED ((__force req_flags_t)(1 << 10)) 107 /* don't warn about errors */ 108 #define RQF_QUIET ((__force req_flags_t)(1 << 11)) 109 /* elevator private data attached */ 110 #define RQF_ELVPRIV ((__force req_flags_t)(1 << 12)) 111 /* account I/O stat */ 112 #define RQF_IO_STAT ((__force req_flags_t)(1 << 13)) 113 /* request came from our alloc pool */ 114 #define RQF_ALLOCED ((__force req_flags_t)(1 << 14)) 115 /* runtime pm request */ 116 #define RQF_PM ((__force req_flags_t)(1 << 15)) 117 /* on IO scheduler merge hash */ 118 #define RQF_HASHED ((__force req_flags_t)(1 << 16)) 119 /* IO stats tracking on */ 120 #define RQF_STATS ((__force req_flags_t)(1 << 17)) 121 /* Look at ->special_vec for the actual data payload instead of the 122 bio chain. */ 123 #define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18)) 124 125 /* flags that prevent us from merging requests: */ 126 #define RQF_NOMERGE_FLAGS \ 127 (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD) 128 129 /* 130 * Try to put the fields that are referenced together in the same cacheline. 131 * 132 * If you modify this structure, make sure to update blk_rq_init() and 133 * especially blk_mq_rq_ctx_init() to take care of the added fields. 134 */ 135 struct request { 136 struct list_head queuelist; 137 union { 138 struct __call_single_data csd; 139 u64 fifo_time; 140 }; 141 142 struct request_queue *q; 143 struct blk_mq_ctx *mq_ctx; 144 145 int cpu; 146 unsigned int cmd_flags; /* op and common flags */ 147 req_flags_t rq_flags; 148 149 int internal_tag; 150 151 unsigned long atomic_flags; 152 153 /* the following two fields are internal, NEVER access directly */ 154 unsigned int __data_len; /* total data len */ 155 int tag; 156 sector_t __sector; /* sector cursor */ 157 158 struct bio *bio; 159 struct bio *biotail; 160 161 /* 162 * The hash is used inside the scheduler, and killed once the 163 * request reaches the dispatch list. The ipi_list is only used 164 * to queue the request for softirq completion, which is long 165 * after the request has been unhashed (and even removed from 166 * the dispatch list). 167 */ 168 union { 169 struct hlist_node hash; /* merge hash */ 170 struct list_head ipi_list; 171 }; 172 173 /* 174 * The rb_node is only used inside the io scheduler, requests 175 * are pruned when moved to the dispatch queue. So let the 176 * completion_data share space with the rb_node. 177 */ 178 union { 179 struct rb_node rb_node; /* sort/lookup */ 180 struct bio_vec special_vec; 181 void *completion_data; 182 int error_count; /* for legacy drivers, don't use */ 183 }; 184 185 /* 186 * Three pointers are available for the IO schedulers, if they need 187 * more they have to dynamically allocate it. Flush requests are 188 * never put on the IO scheduler. So let the flush fields share 189 * space with the elevator data. 190 */ 191 union { 192 struct { 193 struct io_cq *icq; 194 void *priv[2]; 195 } elv; 196 197 struct { 198 unsigned int seq; 199 struct list_head list; 200 rq_end_io_fn *saved_end_io; 201 } flush; 202 }; 203 204 struct gendisk *rq_disk; 205 struct hd_struct *part; 206 unsigned long start_time; 207 struct blk_issue_stat issue_stat; 208 #ifdef CONFIG_BLK_CGROUP 209 struct request_list *rl; /* rl this rq is alloced from */ 210 unsigned long long start_time_ns; 211 unsigned long long io_start_time_ns; /* when passed to hardware */ 212 #endif 213 /* Number of scatter-gather DMA addr+len pairs after 214 * physical address coalescing is performed. 215 */ 216 unsigned short nr_phys_segments; 217 #if defined(CONFIG_BLK_DEV_INTEGRITY) 218 unsigned short nr_integrity_segments; 219 #endif 220 221 unsigned short ioprio; 222 223 unsigned int timeout; 224 225 void *special; /* opaque pointer available for LLD use */ 226 227 unsigned int extra_len; /* length of alignment and padding */ 228 229 unsigned short write_hint; 230 231 unsigned long deadline; 232 struct list_head timeout_list; 233 234 /* 235 * completion callback. 236 */ 237 rq_end_io_fn *end_io; 238 void *end_io_data; 239 240 /* for bidi */ 241 struct request *next_rq; 242 }; 243 244 static inline bool blk_op_is_scsi(unsigned int op) 245 { 246 return op == REQ_OP_SCSI_IN || op == REQ_OP_SCSI_OUT; 247 } 248 249 static inline bool blk_op_is_private(unsigned int op) 250 { 251 return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT; 252 } 253 254 static inline bool blk_rq_is_scsi(struct request *rq) 255 { 256 return blk_op_is_scsi(req_op(rq)); 257 } 258 259 static inline bool blk_rq_is_private(struct request *rq) 260 { 261 return blk_op_is_private(req_op(rq)); 262 } 263 264 static inline bool blk_rq_is_passthrough(struct request *rq) 265 { 266 return blk_rq_is_scsi(rq) || blk_rq_is_private(rq); 267 } 268 269 static inline bool bio_is_passthrough(struct bio *bio) 270 { 271 unsigned op = bio_op(bio); 272 273 return blk_op_is_scsi(op) || blk_op_is_private(op); 274 } 275 276 static inline unsigned short req_get_ioprio(struct request *req) 277 { 278 return req->ioprio; 279 } 280 281 #include <linux/elevator.h> 282 283 struct blk_queue_ctx; 284 285 typedef void (request_fn_proc) (struct request_queue *q); 286 typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio); 287 typedef bool (poll_q_fn) (struct request_queue *q, blk_qc_t); 288 typedef int (prep_rq_fn) (struct request_queue *, struct request *); 289 typedef void (unprep_rq_fn) (struct request_queue *, struct request *); 290 291 struct bio_vec; 292 typedef void (softirq_done_fn)(struct request *); 293 typedef int (dma_drain_needed_fn)(struct request *); 294 typedef int (lld_busy_fn) (struct request_queue *q); 295 typedef int (bsg_job_fn) (struct bsg_job *); 296 typedef int (init_rq_fn)(struct request_queue *, struct request *, gfp_t); 297 typedef void (exit_rq_fn)(struct request_queue *, struct request *); 298 299 enum blk_eh_timer_return { 300 BLK_EH_NOT_HANDLED, 301 BLK_EH_HANDLED, 302 BLK_EH_RESET_TIMER, 303 }; 304 305 typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *); 306 307 enum blk_queue_state { 308 Queue_down, 309 Queue_up, 310 }; 311 312 struct blk_queue_tag { 313 struct request **tag_index; /* map of busy tags */ 314 unsigned long *tag_map; /* bit map of free/busy tags */ 315 int max_depth; /* what we will send to device */ 316 int real_max_depth; /* what the array can hold */ 317 atomic_t refcnt; /* map can be shared */ 318 int alloc_policy; /* tag allocation policy */ 319 int next_tag; /* next tag */ 320 }; 321 #define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */ 322 #define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */ 323 324 #define BLK_SCSI_MAX_CMDS (256) 325 #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) 326 327 /* 328 * Zoned block device models (zoned limit). 329 */ 330 enum blk_zoned_model { 331 BLK_ZONED_NONE, /* Regular block device */ 332 BLK_ZONED_HA, /* Host-aware zoned block device */ 333 BLK_ZONED_HM, /* Host-managed zoned block device */ 334 }; 335 336 struct queue_limits { 337 unsigned long bounce_pfn; 338 unsigned long seg_boundary_mask; 339 unsigned long virt_boundary_mask; 340 341 unsigned int max_hw_sectors; 342 unsigned int max_dev_sectors; 343 unsigned int chunk_sectors; 344 unsigned int max_sectors; 345 unsigned int max_segment_size; 346 unsigned int physical_block_size; 347 unsigned int alignment_offset; 348 unsigned int io_min; 349 unsigned int io_opt; 350 unsigned int max_discard_sectors; 351 unsigned int max_hw_discard_sectors; 352 unsigned int max_write_same_sectors; 353 unsigned int max_write_zeroes_sectors; 354 unsigned int discard_granularity; 355 unsigned int discard_alignment; 356 357 unsigned short logical_block_size; 358 unsigned short max_segments; 359 unsigned short max_integrity_segments; 360 unsigned short max_discard_segments; 361 362 unsigned char misaligned; 363 unsigned char discard_misaligned; 364 unsigned char cluster; 365 unsigned char raid_partial_stripes_expensive; 366 enum blk_zoned_model zoned; 367 }; 368 369 #ifdef CONFIG_BLK_DEV_ZONED 370 371 struct blk_zone_report_hdr { 372 unsigned int nr_zones; 373 u8 padding[60]; 374 }; 375 376 extern int blkdev_report_zones(struct block_device *bdev, 377 sector_t sector, struct blk_zone *zones, 378 unsigned int *nr_zones, gfp_t gfp_mask); 379 extern int blkdev_reset_zones(struct block_device *bdev, sector_t sectors, 380 sector_t nr_sectors, gfp_t gfp_mask); 381 382 extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, 383 unsigned int cmd, unsigned long arg); 384 extern int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode, 385 unsigned int cmd, unsigned long arg); 386 387 #else /* CONFIG_BLK_DEV_ZONED */ 388 389 static inline int blkdev_report_zones_ioctl(struct block_device *bdev, 390 fmode_t mode, unsigned int cmd, 391 unsigned long arg) 392 { 393 return -ENOTTY; 394 } 395 396 static inline int blkdev_reset_zones_ioctl(struct block_device *bdev, 397 fmode_t mode, unsigned int cmd, 398 unsigned long arg) 399 { 400 return -ENOTTY; 401 } 402 403 #endif /* CONFIG_BLK_DEV_ZONED */ 404 405 struct request_queue { 406 /* 407 * Together with queue_head for cacheline sharing 408 */ 409 struct list_head queue_head; 410 struct request *last_merge; 411 struct elevator_queue *elevator; 412 int nr_rqs[2]; /* # allocated [a]sync rqs */ 413 int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */ 414 415 atomic_t shared_hctx_restart; 416 417 struct blk_queue_stats *stats; 418 struct rq_wb *rq_wb; 419 420 /* 421 * If blkcg is not used, @q->root_rl serves all requests. If blkcg 422 * is used, root blkg allocates from @q->root_rl and all other 423 * blkgs from their own blkg->rl. Which one to use should be 424 * determined using bio_request_list(). 425 */ 426 struct request_list root_rl; 427 428 request_fn_proc *request_fn; 429 make_request_fn *make_request_fn; 430 poll_q_fn *poll_fn; 431 prep_rq_fn *prep_rq_fn; 432 unprep_rq_fn *unprep_rq_fn; 433 softirq_done_fn *softirq_done_fn; 434 rq_timed_out_fn *rq_timed_out_fn; 435 dma_drain_needed_fn *dma_drain_needed; 436 lld_busy_fn *lld_busy_fn; 437 /* Called just after a request is allocated */ 438 init_rq_fn *init_rq_fn; 439 /* Called just before a request is freed */ 440 exit_rq_fn *exit_rq_fn; 441 /* Called from inside blk_get_request() */ 442 void (*initialize_rq_fn)(struct request *rq); 443 444 const struct blk_mq_ops *mq_ops; 445 446 unsigned int *mq_map; 447 448 /* sw queues */ 449 struct blk_mq_ctx __percpu *queue_ctx; 450 unsigned int nr_queues; 451 452 unsigned int queue_depth; 453 454 /* hw dispatch queues */ 455 struct blk_mq_hw_ctx **queue_hw_ctx; 456 unsigned int nr_hw_queues; 457 458 /* 459 * Dispatch queue sorting 460 */ 461 sector_t end_sector; 462 struct request *boundary_rq; 463 464 /* 465 * Delayed queue handling 466 */ 467 struct delayed_work delay_work; 468 469 struct backing_dev_info *backing_dev_info; 470 471 /* 472 * The queue owner gets to use this for whatever they like. 473 * ll_rw_blk doesn't touch it. 474 */ 475 void *queuedata; 476 477 /* 478 * various queue flags, see QUEUE_* below 479 */ 480 unsigned long queue_flags; 481 482 /* 483 * ida allocated id for this queue. Used to index queues from 484 * ioctx. 485 */ 486 int id; 487 488 /* 489 * queue needs bounce pages for pages above this limit 490 */ 491 gfp_t bounce_gfp; 492 493 /* 494 * protects queue structures from reentrancy. ->__queue_lock should 495 * _never_ be used directly, it is queue private. always use 496 * ->queue_lock. 497 */ 498 spinlock_t __queue_lock; 499 spinlock_t *queue_lock; 500 501 /* 502 * queue kobject 503 */ 504 struct kobject kobj; 505 506 /* 507 * mq queue kobject 508 */ 509 struct kobject mq_kobj; 510 511 #ifdef CONFIG_BLK_DEV_INTEGRITY 512 struct blk_integrity integrity; 513 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 514 515 #ifdef CONFIG_PM 516 struct device *dev; 517 int rpm_status; 518 unsigned int nr_pending; 519 #endif 520 521 /* 522 * queue settings 523 */ 524 unsigned long nr_requests; /* Max # of requests */ 525 unsigned int nr_congestion_on; 526 unsigned int nr_congestion_off; 527 unsigned int nr_batching; 528 529 unsigned int dma_drain_size; 530 void *dma_drain_buffer; 531 unsigned int dma_pad_mask; 532 unsigned int dma_alignment; 533 534 struct blk_queue_tag *queue_tags; 535 struct list_head tag_busy_list; 536 537 unsigned int nr_sorted; 538 unsigned int in_flight[2]; 539 540 /* 541 * Number of active block driver functions for which blk_drain_queue() 542 * must wait. Must be incremented around functions that unlock the 543 * queue_lock internally, e.g. scsi_request_fn(). 544 */ 545 unsigned int request_fn_active; 546 547 unsigned int rq_timeout; 548 int poll_nsec; 549 550 struct blk_stat_callback *poll_cb; 551 struct blk_rq_stat poll_stat[BLK_MQ_POLL_STATS_BKTS]; 552 553 struct timer_list timeout; 554 struct work_struct timeout_work; 555 struct list_head timeout_list; 556 557 struct list_head icq_list; 558 #ifdef CONFIG_BLK_CGROUP 559 DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS); 560 struct blkcg_gq *root_blkg; 561 struct list_head blkg_list; 562 #endif 563 564 struct queue_limits limits; 565 566 /* 567 * sg stuff 568 */ 569 unsigned int sg_timeout; 570 unsigned int sg_reserved_size; 571 int node; 572 #ifdef CONFIG_BLK_DEV_IO_TRACE 573 struct blk_trace *blk_trace; 574 struct mutex blk_trace_mutex; 575 #endif 576 /* 577 * for flush operations 578 */ 579 struct blk_flush_queue *fq; 580 581 struct list_head requeue_list; 582 spinlock_t requeue_lock; 583 struct delayed_work requeue_work; 584 585 struct mutex sysfs_lock; 586 587 int bypass_depth; 588 atomic_t mq_freeze_depth; 589 590 #if defined(CONFIG_BLK_DEV_BSG) 591 bsg_job_fn *bsg_job_fn; 592 struct bsg_class_device bsg_dev; 593 #endif 594 595 #ifdef CONFIG_BLK_DEV_THROTTLING 596 /* Throttle data */ 597 struct throtl_data *td; 598 #endif 599 struct rcu_head rcu_head; 600 wait_queue_head_t mq_freeze_wq; 601 struct percpu_ref q_usage_counter; 602 struct list_head all_q_node; 603 604 struct blk_mq_tag_set *tag_set; 605 struct list_head tag_set_list; 606 struct bio_set *bio_split; 607 608 #ifdef CONFIG_BLK_DEBUG_FS 609 struct dentry *debugfs_dir; 610 struct dentry *sched_debugfs_dir; 611 #endif 612 613 bool mq_sysfs_init_done; 614 615 size_t cmd_size; 616 void *rq_alloc_data; 617 618 struct work_struct release_work; 619 620 #define BLK_MAX_WRITE_HINTS 5 621 u64 write_hints[BLK_MAX_WRITE_HINTS]; 622 }; 623 624 #define QUEUE_FLAG_QUEUED 0 /* uses generic tag queueing */ 625 #define QUEUE_FLAG_STOPPED 1 /* queue is stopped */ 626 #define QUEUE_FLAG_DYING 2 /* queue being torn down */ 627 #define QUEUE_FLAG_BYPASS 3 /* act as dumb FIFO queue */ 628 #define QUEUE_FLAG_BIDI 4 /* queue supports bidi requests */ 629 #define QUEUE_FLAG_NOMERGES 5 /* disable merge attempts */ 630 #define QUEUE_FLAG_SAME_COMP 6 /* complete on same CPU-group */ 631 #define QUEUE_FLAG_FAIL_IO 7 /* fake timeout */ 632 #define QUEUE_FLAG_NONROT 9 /* non-rotational device (SSD) */ 633 #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 634 #define QUEUE_FLAG_IO_STAT 10 /* do IO stats */ 635 #define QUEUE_FLAG_DISCARD 11 /* supports DISCARD */ 636 #define QUEUE_FLAG_NOXMERGES 12 /* No extended merges */ 637 #define QUEUE_FLAG_ADD_RANDOM 13 /* Contributes to random pool */ 638 #define QUEUE_FLAG_SECERASE 14 /* supports secure erase */ 639 #define QUEUE_FLAG_SAME_FORCE 15 /* force complete on same CPU */ 640 #define QUEUE_FLAG_DEAD 16 /* queue tear-down finished */ 641 #define QUEUE_FLAG_INIT_DONE 17 /* queue is initialized */ 642 #define QUEUE_FLAG_NO_SG_MERGE 18 /* don't attempt to merge SG segments*/ 643 #define QUEUE_FLAG_POLL 19 /* IO polling enabled if set */ 644 #define QUEUE_FLAG_WC 20 /* Write back caching */ 645 #define QUEUE_FLAG_FUA 21 /* device supports FUA writes */ 646 #define QUEUE_FLAG_FLUSH_NQ 22 /* flush not queueuable */ 647 #define QUEUE_FLAG_DAX 23 /* device supports DAX */ 648 #define QUEUE_FLAG_STATS 24 /* track rq completion times */ 649 #define QUEUE_FLAG_POLL_STATS 25 /* collecting stats for hybrid polling */ 650 #define QUEUE_FLAG_REGISTERED 26 /* queue has been registered to a disk */ 651 #define QUEUE_FLAG_SCSI_PASSTHROUGH 27 /* queue supports SCSI commands */ 652 #define QUEUE_FLAG_QUIESCED 28 /* queue has been quiesced */ 653 #define QUEUE_FLAG_PREEMPT_ONLY 29 /* only process REQ_PREEMPT requests */ 654 655 #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 656 (1 << QUEUE_FLAG_SAME_COMP) | \ 657 (1 << QUEUE_FLAG_ADD_RANDOM)) 658 659 #define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 660 (1 << QUEUE_FLAG_SAME_COMP) | \ 661 (1 << QUEUE_FLAG_POLL)) 662 663 /* 664 * @q->queue_lock is set while a queue is being initialized. Since we know 665 * that no other threads access the queue object before @q->queue_lock has 666 * been set, it is safe to manipulate queue flags without holding the 667 * queue_lock if @q->queue_lock == NULL. See also blk_alloc_queue_node() and 668 * blk_init_allocated_queue(). 669 */ 670 static inline void queue_lockdep_assert_held(struct request_queue *q) 671 { 672 if (q->queue_lock) 673 lockdep_assert_held(q->queue_lock); 674 } 675 676 static inline void queue_flag_set_unlocked(unsigned int flag, 677 struct request_queue *q) 678 { 679 __set_bit(flag, &q->queue_flags); 680 } 681 682 static inline int queue_flag_test_and_clear(unsigned int flag, 683 struct request_queue *q) 684 { 685 queue_lockdep_assert_held(q); 686 687 if (test_bit(flag, &q->queue_flags)) { 688 __clear_bit(flag, &q->queue_flags); 689 return 1; 690 } 691 692 return 0; 693 } 694 695 static inline int queue_flag_test_and_set(unsigned int flag, 696 struct request_queue *q) 697 { 698 queue_lockdep_assert_held(q); 699 700 if (!test_bit(flag, &q->queue_flags)) { 701 __set_bit(flag, &q->queue_flags); 702 return 0; 703 } 704 705 return 1; 706 } 707 708 static inline void queue_flag_set(unsigned int flag, struct request_queue *q) 709 { 710 queue_lockdep_assert_held(q); 711 __set_bit(flag, &q->queue_flags); 712 } 713 714 static inline void queue_flag_clear_unlocked(unsigned int flag, 715 struct request_queue *q) 716 { 717 __clear_bit(flag, &q->queue_flags); 718 } 719 720 static inline int queue_in_flight(struct request_queue *q) 721 { 722 return q->in_flight[0] + q->in_flight[1]; 723 } 724 725 static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) 726 { 727 queue_lockdep_assert_held(q); 728 __clear_bit(flag, &q->queue_flags); 729 } 730 731 #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 732 #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 733 #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) 734 #define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) 735 #define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags) 736 #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) 737 #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 738 #define blk_queue_noxmerges(q) \ 739 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 740 #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 741 #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) 742 #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) 743 #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) 744 #define blk_queue_secure_erase(q) \ 745 (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags)) 746 #define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags) 747 #define blk_queue_scsi_passthrough(q) \ 748 test_bit(QUEUE_FLAG_SCSI_PASSTHROUGH, &(q)->queue_flags) 749 750 #define blk_noretry_request(rq) \ 751 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ 752 REQ_FAILFAST_DRIVER)) 753 #define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags) 754 #define blk_queue_preempt_only(q) \ 755 test_bit(QUEUE_FLAG_PREEMPT_ONLY, &(q)->queue_flags) 756 757 extern int blk_set_preempt_only(struct request_queue *q); 758 extern void blk_clear_preempt_only(struct request_queue *q); 759 760 static inline bool blk_account_rq(struct request *rq) 761 { 762 return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq); 763 } 764 765 #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) 766 #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 767 /* rq->queuelist of dequeued request must be list_empty() */ 768 #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) 769 770 #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 771 772 #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) 773 774 /* 775 * Driver can handle struct request, if it either has an old style 776 * request_fn defined, or is blk-mq based. 777 */ 778 static inline bool queue_is_rq_based(struct request_queue *q) 779 { 780 return q->request_fn || q->mq_ops; 781 } 782 783 static inline unsigned int blk_queue_cluster(struct request_queue *q) 784 { 785 return q->limits.cluster; 786 } 787 788 static inline enum blk_zoned_model 789 blk_queue_zoned_model(struct request_queue *q) 790 { 791 return q->limits.zoned; 792 } 793 794 static inline bool blk_queue_is_zoned(struct request_queue *q) 795 { 796 switch (blk_queue_zoned_model(q)) { 797 case BLK_ZONED_HA: 798 case BLK_ZONED_HM: 799 return true; 800 default: 801 return false; 802 } 803 } 804 805 static inline unsigned int blk_queue_zone_sectors(struct request_queue *q) 806 { 807 return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0; 808 } 809 810 static inline bool rq_is_sync(struct request *rq) 811 { 812 return op_is_sync(rq->cmd_flags); 813 } 814 815 static inline bool blk_rl_full(struct request_list *rl, bool sync) 816 { 817 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 818 819 return rl->flags & flag; 820 } 821 822 static inline void blk_set_rl_full(struct request_list *rl, bool sync) 823 { 824 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 825 826 rl->flags |= flag; 827 } 828 829 static inline void blk_clear_rl_full(struct request_list *rl, bool sync) 830 { 831 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 832 833 rl->flags &= ~flag; 834 } 835 836 static inline bool rq_mergeable(struct request *rq) 837 { 838 if (blk_rq_is_passthrough(rq)) 839 return false; 840 841 if (req_op(rq) == REQ_OP_FLUSH) 842 return false; 843 844 if (req_op(rq) == REQ_OP_WRITE_ZEROES) 845 return false; 846 847 if (rq->cmd_flags & REQ_NOMERGE_FLAGS) 848 return false; 849 if (rq->rq_flags & RQF_NOMERGE_FLAGS) 850 return false; 851 852 return true; 853 } 854 855 static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) 856 { 857 if (bio_page(a) == bio_page(b) && 858 bio_offset(a) == bio_offset(b)) 859 return true; 860 861 return false; 862 } 863 864 static inline unsigned int blk_queue_depth(struct request_queue *q) 865 { 866 if (q->queue_depth) 867 return q->queue_depth; 868 869 return q->nr_requests; 870 } 871 872 /* 873 * q->prep_rq_fn return values 874 */ 875 enum { 876 BLKPREP_OK, /* serve it */ 877 BLKPREP_KILL, /* fatal error, kill, return -EIO */ 878 BLKPREP_DEFER, /* leave on queue */ 879 BLKPREP_INVALID, /* invalid command, kill, return -EREMOTEIO */ 880 }; 881 882 extern unsigned long blk_max_low_pfn, blk_max_pfn; 883 884 /* 885 * standard bounce addresses: 886 * 887 * BLK_BOUNCE_HIGH : bounce all highmem pages 888 * BLK_BOUNCE_ANY : don't bounce anything 889 * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary 890 */ 891 892 #if BITS_PER_LONG == 32 893 #define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT) 894 #else 895 #define BLK_BOUNCE_HIGH -1ULL 896 #endif 897 #define BLK_BOUNCE_ANY (-1ULL) 898 #define BLK_BOUNCE_ISA (DMA_BIT_MASK(24)) 899 900 /* 901 * default timeout for SG_IO if none specified 902 */ 903 #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) 904 #define BLK_MIN_SG_TIMEOUT (7 * HZ) 905 906 struct rq_map_data { 907 struct page **pages; 908 int page_order; 909 int nr_entries; 910 unsigned long offset; 911 int null_mapped; 912 int from_user; 913 }; 914 915 struct req_iterator { 916 struct bvec_iter iter; 917 struct bio *bio; 918 }; 919 920 /* This should not be used directly - use rq_for_each_segment */ 921 #define for_each_bio(_bio) \ 922 for (; _bio; _bio = _bio->bi_next) 923 #define __rq_for_each_bio(_bio, rq) \ 924 if ((rq->bio)) \ 925 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) 926 927 #define rq_for_each_segment(bvl, _rq, _iter) \ 928 __rq_for_each_bio(_iter.bio, _rq) \ 929 bio_for_each_segment(bvl, _iter.bio, _iter.iter) 930 931 #define rq_iter_last(bvec, _iter) \ 932 (_iter.bio->bi_next == NULL && \ 933 bio_iter_last(bvec, _iter.iter)) 934 935 #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 936 # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" 937 #endif 938 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 939 extern void rq_flush_dcache_pages(struct request *rq); 940 #else 941 static inline void rq_flush_dcache_pages(struct request *rq) 942 { 943 } 944 #endif 945 946 extern int blk_register_queue(struct gendisk *disk); 947 extern void blk_unregister_queue(struct gendisk *disk); 948 extern blk_qc_t generic_make_request(struct bio *bio); 949 extern blk_qc_t direct_make_request(struct bio *bio); 950 extern void blk_rq_init(struct request_queue *q, struct request *rq); 951 extern void blk_init_request_from_bio(struct request *req, struct bio *bio); 952 extern void blk_put_request(struct request *); 953 extern void __blk_put_request(struct request_queue *, struct request *); 954 extern struct request *blk_get_request_flags(struct request_queue *, 955 unsigned int op, 956 blk_mq_req_flags_t flags); 957 extern struct request *blk_get_request(struct request_queue *, unsigned int op, 958 gfp_t gfp_mask); 959 extern void blk_requeue_request(struct request_queue *, struct request *); 960 extern int blk_lld_busy(struct request_queue *q); 961 extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 962 struct bio_set *bs, gfp_t gfp_mask, 963 int (*bio_ctr)(struct bio *, struct bio *, void *), 964 void *data); 965 extern void blk_rq_unprep_clone(struct request *rq); 966 extern blk_status_t blk_insert_cloned_request(struct request_queue *q, 967 struct request *rq); 968 extern int blk_rq_append_bio(struct request *rq, struct bio **bio); 969 extern void blk_delay_queue(struct request_queue *, unsigned long); 970 extern void blk_queue_split(struct request_queue *, struct bio **); 971 extern void blk_recount_segments(struct request_queue *, struct bio *); 972 extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); 973 extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t, 974 unsigned int, void __user *); 975 extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, 976 unsigned int, void __user *); 977 extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, 978 struct scsi_ioctl_command __user *); 979 980 extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags); 981 extern void blk_queue_exit(struct request_queue *q); 982 extern void blk_start_queue(struct request_queue *q); 983 extern void blk_start_queue_async(struct request_queue *q); 984 extern void blk_stop_queue(struct request_queue *q); 985 extern void blk_sync_queue(struct request_queue *q); 986 extern void __blk_stop_queue(struct request_queue *q); 987 extern void __blk_run_queue(struct request_queue *q); 988 extern void __blk_run_queue_uncond(struct request_queue *q); 989 extern void blk_run_queue(struct request_queue *); 990 extern void blk_run_queue_async(struct request_queue *q); 991 extern int blk_rq_map_user(struct request_queue *, struct request *, 992 struct rq_map_data *, void __user *, unsigned long, 993 gfp_t); 994 extern int blk_rq_unmap_user(struct bio *); 995 extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); 996 extern int blk_rq_map_user_iov(struct request_queue *, struct request *, 997 struct rq_map_data *, const struct iov_iter *, 998 gfp_t); 999 extern void blk_execute_rq(struct request_queue *, struct gendisk *, 1000 struct request *, int); 1001 extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 1002 struct request *, int, rq_end_io_fn *); 1003 1004 int blk_status_to_errno(blk_status_t status); 1005 blk_status_t errno_to_blk_status(int errno); 1006 1007 bool blk_poll(struct request_queue *q, blk_qc_t cookie); 1008 1009 static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 1010 { 1011 return bdev->bd_disk->queue; /* this is never NULL */ 1012 } 1013 1014 /* 1015 * blk_rq_pos() : the current sector 1016 * blk_rq_bytes() : bytes left in the entire request 1017 * blk_rq_cur_bytes() : bytes left in the current segment 1018 * blk_rq_err_bytes() : bytes left till the next error boundary 1019 * blk_rq_sectors() : sectors left in the entire request 1020 * blk_rq_cur_sectors() : sectors left in the current segment 1021 */ 1022 static inline sector_t blk_rq_pos(const struct request *rq) 1023 { 1024 return rq->__sector; 1025 } 1026 1027 static inline unsigned int blk_rq_bytes(const struct request *rq) 1028 { 1029 return rq->__data_len; 1030 } 1031 1032 static inline int blk_rq_cur_bytes(const struct request *rq) 1033 { 1034 return rq->bio ? bio_cur_bytes(rq->bio) : 0; 1035 } 1036 1037 extern unsigned int blk_rq_err_bytes(const struct request *rq); 1038 1039 static inline unsigned int blk_rq_sectors(const struct request *rq) 1040 { 1041 return blk_rq_bytes(rq) >> 9; 1042 } 1043 1044 static inline unsigned int blk_rq_cur_sectors(const struct request *rq) 1045 { 1046 return blk_rq_cur_bytes(rq) >> 9; 1047 } 1048 1049 /* 1050 * Some commands like WRITE SAME have a payload or data transfer size which 1051 * is different from the size of the request. Any driver that supports such 1052 * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to 1053 * calculate the data transfer size. 1054 */ 1055 static inline unsigned int blk_rq_payload_bytes(struct request *rq) 1056 { 1057 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) 1058 return rq->special_vec.bv_len; 1059 return blk_rq_bytes(rq); 1060 } 1061 1062 static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, 1063 int op) 1064 { 1065 if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)) 1066 return min(q->limits.max_discard_sectors, UINT_MAX >> 9); 1067 1068 if (unlikely(op == REQ_OP_WRITE_SAME)) 1069 return q->limits.max_write_same_sectors; 1070 1071 if (unlikely(op == REQ_OP_WRITE_ZEROES)) 1072 return q->limits.max_write_zeroes_sectors; 1073 1074 return q->limits.max_sectors; 1075 } 1076 1077 /* 1078 * Return maximum size of a request at given offset. Only valid for 1079 * file system requests. 1080 */ 1081 static inline unsigned int blk_max_size_offset(struct request_queue *q, 1082 sector_t offset) 1083 { 1084 if (!q->limits.chunk_sectors) 1085 return q->limits.max_sectors; 1086 1087 return q->limits.chunk_sectors - 1088 (offset & (q->limits.chunk_sectors - 1)); 1089 } 1090 1091 static inline unsigned int blk_rq_get_max_sectors(struct request *rq, 1092 sector_t offset) 1093 { 1094 struct request_queue *q = rq->q; 1095 1096 if (blk_rq_is_passthrough(rq)) 1097 return q->limits.max_hw_sectors; 1098 1099 if (!q->limits.chunk_sectors || 1100 req_op(rq) == REQ_OP_DISCARD || 1101 req_op(rq) == REQ_OP_SECURE_ERASE) 1102 return blk_queue_get_max_sectors(q, req_op(rq)); 1103 1104 return min(blk_max_size_offset(q, offset), 1105 blk_queue_get_max_sectors(q, req_op(rq))); 1106 } 1107 1108 static inline unsigned int blk_rq_count_bios(struct request *rq) 1109 { 1110 unsigned int nr_bios = 0; 1111 struct bio *bio; 1112 1113 __rq_for_each_bio(bio, rq) 1114 nr_bios++; 1115 1116 return nr_bios; 1117 } 1118 1119 /* 1120 * Request issue related functions. 1121 */ 1122 extern struct request *blk_peek_request(struct request_queue *q); 1123 extern void blk_start_request(struct request *rq); 1124 extern struct request *blk_fetch_request(struct request_queue *q); 1125 1126 void blk_steal_bios(struct bio_list *list, struct request *rq); 1127 1128 /* 1129 * Request completion related functions. 1130 * 1131 * blk_update_request() completes given number of bytes and updates 1132 * the request without completing it. 1133 * 1134 * blk_end_request() and friends. __blk_end_request() must be called 1135 * with the request queue spinlock acquired. 1136 * 1137 * Several drivers define their own end_request and call 1138 * blk_end_request() for parts of the original function. 1139 * This prevents code duplication in drivers. 1140 */ 1141 extern bool blk_update_request(struct request *rq, blk_status_t error, 1142 unsigned int nr_bytes); 1143 extern void blk_finish_request(struct request *rq, blk_status_t error); 1144 extern bool blk_end_request(struct request *rq, blk_status_t error, 1145 unsigned int nr_bytes); 1146 extern void blk_end_request_all(struct request *rq, blk_status_t error); 1147 extern bool __blk_end_request(struct request *rq, blk_status_t error, 1148 unsigned int nr_bytes); 1149 extern void __blk_end_request_all(struct request *rq, blk_status_t error); 1150 extern bool __blk_end_request_cur(struct request *rq, blk_status_t error); 1151 1152 extern void blk_complete_request(struct request *); 1153 extern void __blk_complete_request(struct request *); 1154 extern void blk_abort_request(struct request *); 1155 extern void blk_unprep_request(struct request *); 1156 1157 /* 1158 * Access functions for manipulating queue properties 1159 */ 1160 extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, 1161 spinlock_t *lock, int node_id); 1162 extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); 1163 extern int blk_init_allocated_queue(struct request_queue *); 1164 extern void blk_cleanup_queue(struct request_queue *); 1165 extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 1166 extern void blk_queue_bounce_limit(struct request_queue *, u64); 1167 extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 1168 extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); 1169 extern void blk_queue_max_segments(struct request_queue *, unsigned short); 1170 extern void blk_queue_max_discard_segments(struct request_queue *, 1171 unsigned short); 1172 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 1173 extern void blk_queue_max_discard_sectors(struct request_queue *q, 1174 unsigned int max_discard_sectors); 1175 extern void blk_queue_max_write_same_sectors(struct request_queue *q, 1176 unsigned int max_write_same_sectors); 1177 extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q, 1178 unsigned int max_write_same_sectors); 1179 extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); 1180 extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); 1181 extern void blk_queue_alignment_offset(struct request_queue *q, 1182 unsigned int alignment); 1183 extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); 1184 extern void blk_queue_io_min(struct request_queue *q, unsigned int min); 1185 extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); 1186 extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); 1187 extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth); 1188 extern void blk_set_default_limits(struct queue_limits *lim); 1189 extern void blk_set_stacking_limits(struct queue_limits *lim); 1190 extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 1191 sector_t offset); 1192 extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, 1193 sector_t offset); 1194 extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, 1195 sector_t offset); 1196 extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 1197 extern void blk_queue_dma_pad(struct request_queue *, unsigned int); 1198 extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); 1199 extern int blk_queue_dma_drain(struct request_queue *q, 1200 dma_drain_needed_fn *dma_drain_needed, 1201 void *buf, unsigned int size); 1202 extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); 1203 extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 1204 extern void blk_queue_virt_boundary(struct request_queue *, unsigned long); 1205 extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 1206 extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn); 1207 extern void blk_queue_dma_alignment(struct request_queue *, int); 1208 extern void blk_queue_update_dma_alignment(struct request_queue *, int); 1209 extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 1210 extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); 1211 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 1212 extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable); 1213 extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); 1214 1215 /* 1216 * Number of physical segments as sent to the device. 1217 * 1218 * Normally this is the number of discontiguous data segments sent by the 1219 * submitter. But for data-less command like discard we might have no 1220 * actual data segments submitted, but the driver might have to add it's 1221 * own special payload. In that case we still return 1 here so that this 1222 * special payload will be mapped. 1223 */ 1224 static inline unsigned short blk_rq_nr_phys_segments(struct request *rq) 1225 { 1226 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) 1227 return 1; 1228 return rq->nr_phys_segments; 1229 } 1230 1231 /* 1232 * Number of discard segments (or ranges) the driver needs to fill in. 1233 * Each discard bio merged into a request is counted as one segment. 1234 */ 1235 static inline unsigned short blk_rq_nr_discard_segments(struct request *rq) 1236 { 1237 return max_t(unsigned short, rq->nr_phys_segments, 1); 1238 } 1239 1240 extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 1241 extern void blk_dump_rq_flags(struct request *, char *); 1242 extern long nr_blockdev_pages(void); 1243 1244 bool __must_check blk_get_queue(struct request_queue *); 1245 struct request_queue *blk_alloc_queue(gfp_t); 1246 struct request_queue *blk_alloc_queue_node(gfp_t, int); 1247 extern void blk_put_queue(struct request_queue *); 1248 extern void blk_set_queue_dying(struct request_queue *); 1249 1250 /* 1251 * block layer runtime pm functions 1252 */ 1253 #ifdef CONFIG_PM 1254 extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev); 1255 extern int blk_pre_runtime_suspend(struct request_queue *q); 1256 extern void blk_post_runtime_suspend(struct request_queue *q, int err); 1257 extern void blk_pre_runtime_resume(struct request_queue *q); 1258 extern void blk_post_runtime_resume(struct request_queue *q, int err); 1259 extern void blk_set_runtime_active(struct request_queue *q); 1260 #else 1261 static inline void blk_pm_runtime_init(struct request_queue *q, 1262 struct device *dev) {} 1263 static inline int blk_pre_runtime_suspend(struct request_queue *q) 1264 { 1265 return -ENOSYS; 1266 } 1267 static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {} 1268 static inline void blk_pre_runtime_resume(struct request_queue *q) {} 1269 static inline void blk_post_runtime_resume(struct request_queue *q, int err) {} 1270 static inline void blk_set_runtime_active(struct request_queue *q) {} 1271 #endif 1272 1273 /* 1274 * blk_plug permits building a queue of related requests by holding the I/O 1275 * fragments for a short period. This allows merging of sequential requests 1276 * into single larger request. As the requests are moved from a per-task list to 1277 * the device's request_queue in a batch, this results in improved scalability 1278 * as the lock contention for request_queue lock is reduced. 1279 * 1280 * It is ok not to disable preemption when adding the request to the plug list 1281 * or when attempting a merge, because blk_schedule_flush_list() will only flush 1282 * the plug list when the task sleeps by itself. For details, please see 1283 * schedule() where blk_schedule_flush_plug() is called. 1284 */ 1285 struct blk_plug { 1286 struct list_head list; /* requests */ 1287 struct list_head mq_list; /* blk-mq requests */ 1288 struct list_head cb_list; /* md requires an unplug callback */ 1289 }; 1290 #define BLK_MAX_REQUEST_COUNT 16 1291 #define BLK_PLUG_FLUSH_SIZE (128 * 1024) 1292 1293 struct blk_plug_cb; 1294 typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); 1295 struct blk_plug_cb { 1296 struct list_head list; 1297 blk_plug_cb_fn callback; 1298 void *data; 1299 }; 1300 extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, 1301 void *data, int size); 1302 extern void blk_start_plug(struct blk_plug *); 1303 extern void blk_finish_plug(struct blk_plug *); 1304 extern void blk_flush_plug_list(struct blk_plug *, bool); 1305 1306 static inline void blk_flush_plug(struct task_struct *tsk) 1307 { 1308 struct blk_plug *plug = tsk->plug; 1309 1310 if (plug) 1311 blk_flush_plug_list(plug, false); 1312 } 1313 1314 static inline void blk_schedule_flush_plug(struct task_struct *tsk) 1315 { 1316 struct blk_plug *plug = tsk->plug; 1317 1318 if (plug) 1319 blk_flush_plug_list(plug, true); 1320 } 1321 1322 static inline bool blk_needs_flush_plug(struct task_struct *tsk) 1323 { 1324 struct blk_plug *plug = tsk->plug; 1325 1326 return plug && 1327 (!list_empty(&plug->list) || 1328 !list_empty(&plug->mq_list) || 1329 !list_empty(&plug->cb_list)); 1330 } 1331 1332 /* 1333 * tag stuff 1334 */ 1335 extern int blk_queue_start_tag(struct request_queue *, struct request *); 1336 extern struct request *blk_queue_find_tag(struct request_queue *, int); 1337 extern void blk_queue_end_tag(struct request_queue *, struct request *); 1338 extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int); 1339 extern void blk_queue_free_tags(struct request_queue *); 1340 extern int blk_queue_resize_tags(struct request_queue *, int); 1341 extern void blk_queue_invalidate_tags(struct request_queue *); 1342 extern struct blk_queue_tag *blk_init_tags(int, int); 1343 extern void blk_free_tags(struct blk_queue_tag *); 1344 1345 static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, 1346 int tag) 1347 { 1348 if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) 1349 return NULL; 1350 return bqt->tag_index[tag]; 1351 } 1352 1353 extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); 1354 extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, 1355 sector_t nr_sects, gfp_t gfp_mask, struct page *page); 1356 1357 #define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */ 1358 1359 extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1360 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); 1361 extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1362 sector_t nr_sects, gfp_t gfp_mask, int flags, 1363 struct bio **biop); 1364 1365 #define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */ 1366 #define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */ 1367 1368 extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1369 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, 1370 unsigned flags); 1371 extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1372 sector_t nr_sects, gfp_t gfp_mask, unsigned flags); 1373 1374 static inline int sb_issue_discard(struct super_block *sb, sector_t block, 1375 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) 1376 { 1377 return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9), 1378 nr_blocks << (sb->s_blocksize_bits - 9), 1379 gfp_mask, flags); 1380 } 1381 static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, 1382 sector_t nr_blocks, gfp_t gfp_mask) 1383 { 1384 return blkdev_issue_zeroout(sb->s_bdev, 1385 block << (sb->s_blocksize_bits - 9), 1386 nr_blocks << (sb->s_blocksize_bits - 9), 1387 gfp_mask, 0); 1388 } 1389 1390 extern int blk_verify_command(unsigned char *cmd, fmode_t mode); 1391 1392 enum blk_default_limits { 1393 BLK_MAX_SEGMENTS = 128, 1394 BLK_SAFE_MAX_SECTORS = 255, 1395 BLK_DEF_MAX_SECTORS = 2560, 1396 BLK_MAX_SEGMENT_SIZE = 65536, 1397 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, 1398 }; 1399 1400 #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 1401 1402 static inline unsigned long queue_segment_boundary(struct request_queue *q) 1403 { 1404 return q->limits.seg_boundary_mask; 1405 } 1406 1407 static inline unsigned long queue_virt_boundary(struct request_queue *q) 1408 { 1409 return q->limits.virt_boundary_mask; 1410 } 1411 1412 static inline unsigned int queue_max_sectors(struct request_queue *q) 1413 { 1414 return q->limits.max_sectors; 1415 } 1416 1417 static inline unsigned int queue_max_hw_sectors(struct request_queue *q) 1418 { 1419 return q->limits.max_hw_sectors; 1420 } 1421 1422 static inline unsigned short queue_max_segments(struct request_queue *q) 1423 { 1424 return q->limits.max_segments; 1425 } 1426 1427 static inline unsigned short queue_max_discard_segments(struct request_queue *q) 1428 { 1429 return q->limits.max_discard_segments; 1430 } 1431 1432 static inline unsigned int queue_max_segment_size(struct request_queue *q) 1433 { 1434 return q->limits.max_segment_size; 1435 } 1436 1437 static inline unsigned short queue_logical_block_size(struct request_queue *q) 1438 { 1439 int retval = 512; 1440 1441 if (q && q->limits.logical_block_size) 1442 retval = q->limits.logical_block_size; 1443 1444 return retval; 1445 } 1446 1447 static inline unsigned short bdev_logical_block_size(struct block_device *bdev) 1448 { 1449 return queue_logical_block_size(bdev_get_queue(bdev)); 1450 } 1451 1452 static inline unsigned int queue_physical_block_size(struct request_queue *q) 1453 { 1454 return q->limits.physical_block_size; 1455 } 1456 1457 static inline unsigned int bdev_physical_block_size(struct block_device *bdev) 1458 { 1459 return queue_physical_block_size(bdev_get_queue(bdev)); 1460 } 1461 1462 static inline unsigned int queue_io_min(struct request_queue *q) 1463 { 1464 return q->limits.io_min; 1465 } 1466 1467 static inline int bdev_io_min(struct block_device *bdev) 1468 { 1469 return queue_io_min(bdev_get_queue(bdev)); 1470 } 1471 1472 static inline unsigned int queue_io_opt(struct request_queue *q) 1473 { 1474 return q->limits.io_opt; 1475 } 1476 1477 static inline int bdev_io_opt(struct block_device *bdev) 1478 { 1479 return queue_io_opt(bdev_get_queue(bdev)); 1480 } 1481 1482 static inline int queue_alignment_offset(struct request_queue *q) 1483 { 1484 if (q->limits.misaligned) 1485 return -1; 1486 1487 return q->limits.alignment_offset; 1488 } 1489 1490 static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) 1491 { 1492 unsigned int granularity = max(lim->physical_block_size, lim->io_min); 1493 unsigned int alignment = sector_div(sector, granularity >> 9) << 9; 1494 1495 return (granularity + lim->alignment_offset - alignment) % granularity; 1496 } 1497 1498 static inline int bdev_alignment_offset(struct block_device *bdev) 1499 { 1500 struct request_queue *q = bdev_get_queue(bdev); 1501 1502 if (q->limits.misaligned) 1503 return -1; 1504 1505 if (bdev != bdev->bd_contains) 1506 return bdev->bd_part->alignment_offset; 1507 1508 return q->limits.alignment_offset; 1509 } 1510 1511 static inline int queue_discard_alignment(struct request_queue *q) 1512 { 1513 if (q->limits.discard_misaligned) 1514 return -1; 1515 1516 return q->limits.discard_alignment; 1517 } 1518 1519 static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) 1520 { 1521 unsigned int alignment, granularity, offset; 1522 1523 if (!lim->max_discard_sectors) 1524 return 0; 1525 1526 /* Why are these in bytes, not sectors? */ 1527 alignment = lim->discard_alignment >> 9; 1528 granularity = lim->discard_granularity >> 9; 1529 if (!granularity) 1530 return 0; 1531 1532 /* Offset of the partition start in 'granularity' sectors */ 1533 offset = sector_div(sector, granularity); 1534 1535 /* And why do we do this modulus *again* in blkdev_issue_discard()? */ 1536 offset = (granularity + alignment - offset) % granularity; 1537 1538 /* Turn it back into bytes, gaah */ 1539 return offset << 9; 1540 } 1541 1542 static inline int bdev_discard_alignment(struct block_device *bdev) 1543 { 1544 struct request_queue *q = bdev_get_queue(bdev); 1545 1546 if (bdev != bdev->bd_contains) 1547 return bdev->bd_part->discard_alignment; 1548 1549 return q->limits.discard_alignment; 1550 } 1551 1552 static inline unsigned int bdev_write_same(struct block_device *bdev) 1553 { 1554 struct request_queue *q = bdev_get_queue(bdev); 1555 1556 if (q) 1557 return q->limits.max_write_same_sectors; 1558 1559 return 0; 1560 } 1561 1562 static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev) 1563 { 1564 struct request_queue *q = bdev_get_queue(bdev); 1565 1566 if (q) 1567 return q->limits.max_write_zeroes_sectors; 1568 1569 return 0; 1570 } 1571 1572 static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev) 1573 { 1574 struct request_queue *q = bdev_get_queue(bdev); 1575 1576 if (q) 1577 return blk_queue_zoned_model(q); 1578 1579 return BLK_ZONED_NONE; 1580 } 1581 1582 static inline bool bdev_is_zoned(struct block_device *bdev) 1583 { 1584 struct request_queue *q = bdev_get_queue(bdev); 1585 1586 if (q) 1587 return blk_queue_is_zoned(q); 1588 1589 return false; 1590 } 1591 1592 static inline unsigned int bdev_zone_sectors(struct block_device *bdev) 1593 { 1594 struct request_queue *q = bdev_get_queue(bdev); 1595 1596 if (q) 1597 return blk_queue_zone_sectors(q); 1598 1599 return 0; 1600 } 1601 1602 static inline int queue_dma_alignment(struct request_queue *q) 1603 { 1604 return q ? q->dma_alignment : 511; 1605 } 1606 1607 static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, 1608 unsigned int len) 1609 { 1610 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; 1611 return !(addr & alignment) && !(len & alignment); 1612 } 1613 1614 /* assumes size > 256 */ 1615 static inline unsigned int blksize_bits(unsigned int size) 1616 { 1617 unsigned int bits = 8; 1618 do { 1619 bits++; 1620 size >>= 1; 1621 } while (size > 256); 1622 return bits; 1623 } 1624 1625 static inline unsigned int block_size(struct block_device *bdev) 1626 { 1627 return bdev->bd_block_size; 1628 } 1629 1630 static inline bool queue_flush_queueable(struct request_queue *q) 1631 { 1632 return !test_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags); 1633 } 1634 1635 typedef struct {struct page *v;} Sector; 1636 1637 unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); 1638 1639 static inline void put_dev_sector(Sector p) 1640 { 1641 put_page(p.v); 1642 } 1643 1644 static inline bool __bvec_gap_to_prev(struct request_queue *q, 1645 struct bio_vec *bprv, unsigned int offset) 1646 { 1647 return offset || 1648 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); 1649 } 1650 1651 /* 1652 * Check if adding a bio_vec after bprv with offset would create a gap in 1653 * the SG list. Most drivers don't care about this, but some do. 1654 */ 1655 static inline bool bvec_gap_to_prev(struct request_queue *q, 1656 struct bio_vec *bprv, unsigned int offset) 1657 { 1658 if (!queue_virt_boundary(q)) 1659 return false; 1660 return __bvec_gap_to_prev(q, bprv, offset); 1661 } 1662 1663 /* 1664 * Check if the two bvecs from two bios can be merged to one segment. 1665 * If yes, no need to check gap between the two bios since the 1st bio 1666 * and the 1st bvec in the 2nd bio can be handled in one segment. 1667 */ 1668 static inline bool bios_segs_mergeable(struct request_queue *q, 1669 struct bio *prev, struct bio_vec *prev_last_bv, 1670 struct bio_vec *next_first_bv) 1671 { 1672 if (!BIOVEC_PHYS_MERGEABLE(prev_last_bv, next_first_bv)) 1673 return false; 1674 if (!BIOVEC_SEG_BOUNDARY(q, prev_last_bv, next_first_bv)) 1675 return false; 1676 if (prev->bi_seg_back_size + next_first_bv->bv_len > 1677 queue_max_segment_size(q)) 1678 return false; 1679 return true; 1680 } 1681 1682 static inline bool bio_will_gap(struct request_queue *q, 1683 struct request *prev_rq, 1684 struct bio *prev, 1685 struct bio *next) 1686 { 1687 if (bio_has_data(prev) && queue_virt_boundary(q)) { 1688 struct bio_vec pb, nb; 1689 1690 /* 1691 * don't merge if the 1st bio starts with non-zero 1692 * offset, otherwise it is quite difficult to respect 1693 * sg gap limit. We work hard to merge a huge number of small 1694 * single bios in case of mkfs. 1695 */ 1696 if (prev_rq) 1697 bio_get_first_bvec(prev_rq->bio, &pb); 1698 else 1699 bio_get_first_bvec(prev, &pb); 1700 if (pb.bv_offset) 1701 return true; 1702 1703 /* 1704 * We don't need to worry about the situation that the 1705 * merged segment ends in unaligned virt boundary: 1706 * 1707 * - if 'pb' ends aligned, the merged segment ends aligned 1708 * - if 'pb' ends unaligned, the next bio must include 1709 * one single bvec of 'nb', otherwise the 'nb' can't 1710 * merge with 'pb' 1711 */ 1712 bio_get_last_bvec(prev, &pb); 1713 bio_get_first_bvec(next, &nb); 1714 1715 if (!bios_segs_mergeable(q, prev, &pb, &nb)) 1716 return __bvec_gap_to_prev(q, &pb, nb.bv_offset); 1717 } 1718 1719 return false; 1720 } 1721 1722 static inline bool req_gap_back_merge(struct request *req, struct bio *bio) 1723 { 1724 return bio_will_gap(req->q, req, req->biotail, bio); 1725 } 1726 1727 static inline bool req_gap_front_merge(struct request *req, struct bio *bio) 1728 { 1729 return bio_will_gap(req->q, NULL, bio, req->bio); 1730 } 1731 1732 int kblockd_schedule_work(struct work_struct *work); 1733 int kblockd_schedule_work_on(int cpu, struct work_struct *work); 1734 int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay); 1735 int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); 1736 int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); 1737 1738 #ifdef CONFIG_BLK_CGROUP 1739 /* 1740 * This should not be using sched_clock(). A real patch is in progress 1741 * to fix this up, until that is in place we need to disable preemption 1742 * around sched_clock() in this function and set_io_start_time_ns(). 1743 */ 1744 static inline void set_start_time_ns(struct request *req) 1745 { 1746 preempt_disable(); 1747 req->start_time_ns = sched_clock(); 1748 preempt_enable(); 1749 } 1750 1751 static inline void set_io_start_time_ns(struct request *req) 1752 { 1753 preempt_disable(); 1754 req->io_start_time_ns = sched_clock(); 1755 preempt_enable(); 1756 } 1757 1758 static inline uint64_t rq_start_time_ns(struct request *req) 1759 { 1760 return req->start_time_ns; 1761 } 1762 1763 static inline uint64_t rq_io_start_time_ns(struct request *req) 1764 { 1765 return req->io_start_time_ns; 1766 } 1767 #else 1768 static inline void set_start_time_ns(struct request *req) {} 1769 static inline void set_io_start_time_ns(struct request *req) {} 1770 static inline uint64_t rq_start_time_ns(struct request *req) 1771 { 1772 return 0; 1773 } 1774 static inline uint64_t rq_io_start_time_ns(struct request *req) 1775 { 1776 return 0; 1777 } 1778 #endif 1779 1780 #define MODULE_ALIAS_BLOCKDEV(major,minor) \ 1781 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 1782 #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 1783 MODULE_ALIAS("block-major-" __stringify(major) "-*") 1784 1785 #if defined(CONFIG_BLK_DEV_INTEGRITY) 1786 1787 enum blk_integrity_flags { 1788 BLK_INTEGRITY_VERIFY = 1 << 0, 1789 BLK_INTEGRITY_GENERATE = 1 << 1, 1790 BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2, 1791 BLK_INTEGRITY_IP_CHECKSUM = 1 << 3, 1792 }; 1793 1794 struct blk_integrity_iter { 1795 void *prot_buf; 1796 void *data_buf; 1797 sector_t seed; 1798 unsigned int data_size; 1799 unsigned short interval; 1800 const char *disk_name; 1801 }; 1802 1803 typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *); 1804 1805 struct blk_integrity_profile { 1806 integrity_processing_fn *generate_fn; 1807 integrity_processing_fn *verify_fn; 1808 const char *name; 1809 }; 1810 1811 extern void blk_integrity_register(struct gendisk *, struct blk_integrity *); 1812 extern void blk_integrity_unregister(struct gendisk *); 1813 extern int blk_integrity_compare(struct gendisk *, struct gendisk *); 1814 extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, 1815 struct scatterlist *); 1816 extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); 1817 extern bool blk_integrity_merge_rq(struct request_queue *, struct request *, 1818 struct request *); 1819 extern bool blk_integrity_merge_bio(struct request_queue *, struct request *, 1820 struct bio *); 1821 1822 static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 1823 { 1824 struct blk_integrity *bi = &disk->queue->integrity; 1825 1826 if (!bi->profile) 1827 return NULL; 1828 1829 return bi; 1830 } 1831 1832 static inline 1833 struct blk_integrity *bdev_get_integrity(struct block_device *bdev) 1834 { 1835 return blk_get_integrity(bdev->bd_disk); 1836 } 1837 1838 static inline bool blk_integrity_rq(struct request *rq) 1839 { 1840 return rq->cmd_flags & REQ_INTEGRITY; 1841 } 1842 1843 static inline void blk_queue_max_integrity_segments(struct request_queue *q, 1844 unsigned int segs) 1845 { 1846 q->limits.max_integrity_segments = segs; 1847 } 1848 1849 static inline unsigned short 1850 queue_max_integrity_segments(struct request_queue *q) 1851 { 1852 return q->limits.max_integrity_segments; 1853 } 1854 1855 static inline bool integrity_req_gap_back_merge(struct request *req, 1856 struct bio *next) 1857 { 1858 struct bio_integrity_payload *bip = bio_integrity(req->bio); 1859 struct bio_integrity_payload *bip_next = bio_integrity(next); 1860 1861 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], 1862 bip_next->bip_vec[0].bv_offset); 1863 } 1864 1865 static inline bool integrity_req_gap_front_merge(struct request *req, 1866 struct bio *bio) 1867 { 1868 struct bio_integrity_payload *bip = bio_integrity(bio); 1869 struct bio_integrity_payload *bip_next = bio_integrity(req->bio); 1870 1871 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], 1872 bip_next->bip_vec[0].bv_offset); 1873 } 1874 1875 #else /* CONFIG_BLK_DEV_INTEGRITY */ 1876 1877 struct bio; 1878 struct block_device; 1879 struct gendisk; 1880 struct blk_integrity; 1881 1882 static inline int blk_integrity_rq(struct request *rq) 1883 { 1884 return 0; 1885 } 1886 static inline int blk_rq_count_integrity_sg(struct request_queue *q, 1887 struct bio *b) 1888 { 1889 return 0; 1890 } 1891 static inline int blk_rq_map_integrity_sg(struct request_queue *q, 1892 struct bio *b, 1893 struct scatterlist *s) 1894 { 1895 return 0; 1896 } 1897 static inline struct blk_integrity *bdev_get_integrity(struct block_device *b) 1898 { 1899 return NULL; 1900 } 1901 static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 1902 { 1903 return NULL; 1904 } 1905 static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b) 1906 { 1907 return 0; 1908 } 1909 static inline void blk_integrity_register(struct gendisk *d, 1910 struct blk_integrity *b) 1911 { 1912 } 1913 static inline void blk_integrity_unregister(struct gendisk *d) 1914 { 1915 } 1916 static inline void blk_queue_max_integrity_segments(struct request_queue *q, 1917 unsigned int segs) 1918 { 1919 } 1920 static inline unsigned short queue_max_integrity_segments(struct request_queue *q) 1921 { 1922 return 0; 1923 } 1924 static inline bool blk_integrity_merge_rq(struct request_queue *rq, 1925 struct request *r1, 1926 struct request *r2) 1927 { 1928 return true; 1929 } 1930 static inline bool blk_integrity_merge_bio(struct request_queue *rq, 1931 struct request *r, 1932 struct bio *b) 1933 { 1934 return true; 1935 } 1936 1937 static inline bool integrity_req_gap_back_merge(struct request *req, 1938 struct bio *next) 1939 { 1940 return false; 1941 } 1942 static inline bool integrity_req_gap_front_merge(struct request *req, 1943 struct bio *bio) 1944 { 1945 return false; 1946 } 1947 1948 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 1949 1950 struct block_device_operations { 1951 int (*open) (struct block_device *, fmode_t); 1952 void (*release) (struct gendisk *, fmode_t); 1953 int (*rw_page)(struct block_device *, sector_t, struct page *, bool); 1954 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1955 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1956 unsigned int (*check_events) (struct gendisk *disk, 1957 unsigned int clearing); 1958 /* ->media_changed() is DEPRECATED, use ->check_events() instead */ 1959 int (*media_changed) (struct gendisk *); 1960 void (*unlock_native_capacity) (struct gendisk *); 1961 int (*revalidate_disk) (struct gendisk *); 1962 int (*getgeo)(struct block_device *, struct hd_geometry *); 1963 /* this callback is with swap_lock and sometimes page table lock held */ 1964 void (*swap_slot_free_notify) (struct block_device *, unsigned long); 1965 struct module *owner; 1966 const struct pr_ops *pr_ops; 1967 }; 1968 1969 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, 1970 unsigned long); 1971 extern int bdev_read_page(struct block_device *, sector_t, struct page *); 1972 extern int bdev_write_page(struct block_device *, sector_t, struct page *, 1973 struct writeback_control *); 1974 #else /* CONFIG_BLOCK */ 1975 1976 struct block_device; 1977 1978 /* 1979 * stubs for when the block layer is configured out 1980 */ 1981 #define buffer_heads_over_limit 0 1982 1983 static inline long nr_blockdev_pages(void) 1984 { 1985 return 0; 1986 } 1987 1988 struct blk_plug { 1989 }; 1990 1991 static inline void blk_start_plug(struct blk_plug *plug) 1992 { 1993 } 1994 1995 static inline void blk_finish_plug(struct blk_plug *plug) 1996 { 1997 } 1998 1999 static inline void blk_flush_plug(struct task_struct *task) 2000 { 2001 } 2002 2003 static inline void blk_schedule_flush_plug(struct task_struct *task) 2004 { 2005 } 2006 2007 2008 static inline bool blk_needs_flush_plug(struct task_struct *tsk) 2009 { 2010 return false; 2011 } 2012 2013 static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, 2014 sector_t *error_sector) 2015 { 2016 return 0; 2017 } 2018 2019 #endif /* CONFIG_BLOCK */ 2020 2021 #endif 2022