1 #ifndef _LINUX_BLKDEV_H 2 #define _LINUX_BLKDEV_H 3 4 #include <linux/sched.h> 5 6 #ifdef CONFIG_BLOCK 7 8 #include <linux/major.h> 9 #include <linux/genhd.h> 10 #include <linux/list.h> 11 #include <linux/llist.h> 12 #include <linux/timer.h> 13 #include <linux/workqueue.h> 14 #include <linux/pagemap.h> 15 #include <linux/backing-dev-defs.h> 16 #include <linux/wait.h> 17 #include <linux/mempool.h> 18 #include <linux/bio.h> 19 #include <linux/stringify.h> 20 #include <linux/gfp.h> 21 #include <linux/bsg.h> 22 #include <linux/smp.h> 23 #include <linux/rcupdate.h> 24 #include <linux/percpu-refcount.h> 25 #include <linux/scatterlist.h> 26 27 struct module; 28 struct scsi_ioctl_command; 29 30 struct request_queue; 31 struct elevator_queue; 32 struct blk_trace; 33 struct request; 34 struct sg_io_hdr; 35 struct bsg_job; 36 struct blkcg_gq; 37 struct blk_flush_queue; 38 39 #define BLKDEV_MIN_RQ 4 40 #define BLKDEV_MAX_RQ 128 /* Default maximum */ 41 42 /* 43 * Maximum number of blkcg policies allowed to be registered concurrently. 44 * Defined here to simplify include dependency. 45 */ 46 #define BLKCG_MAX_POLS 2 47 48 struct request; 49 typedef void (rq_end_io_fn)(struct request *, int); 50 51 #define BLK_RL_SYNCFULL (1U << 0) 52 #define BLK_RL_ASYNCFULL (1U << 1) 53 54 struct request_list { 55 struct request_queue *q; /* the queue this rl belongs to */ 56 #ifdef CONFIG_BLK_CGROUP 57 struct blkcg_gq *blkg; /* blkg this request pool belongs to */ 58 #endif 59 /* 60 * count[], starved[], and wait[] are indexed by 61 * BLK_RW_SYNC/BLK_RW_ASYNC 62 */ 63 int count[2]; 64 int starved[2]; 65 mempool_t *rq_pool; 66 wait_queue_head_t wait[2]; 67 unsigned int flags; 68 }; 69 70 /* 71 * request command types 72 */ 73 enum rq_cmd_type_bits { 74 REQ_TYPE_FS = 1, /* fs request */ 75 REQ_TYPE_BLOCK_PC, /* scsi command */ 76 REQ_TYPE_DRV_PRIV, /* driver defined types from here */ 77 }; 78 79 #define BLK_MAX_CDB 16 80 81 /* 82 * Try to put the fields that are referenced together in the same cacheline. 83 * 84 * If you modify this structure, make sure to update blk_rq_init() and 85 * especially blk_mq_rq_ctx_init() to take care of the added fields. 86 */ 87 struct request { 88 struct list_head queuelist; 89 union { 90 struct call_single_data csd; 91 unsigned long fifo_time; 92 }; 93 94 struct request_queue *q; 95 struct blk_mq_ctx *mq_ctx; 96 97 u64 cmd_flags; 98 unsigned cmd_type; 99 unsigned long atomic_flags; 100 101 int cpu; 102 103 /* the following two fields are internal, NEVER access directly */ 104 unsigned int __data_len; /* total data len */ 105 sector_t __sector; /* sector cursor */ 106 107 struct bio *bio; 108 struct bio *biotail; 109 110 /* 111 * The hash is used inside the scheduler, and killed once the 112 * request reaches the dispatch list. The ipi_list is only used 113 * to queue the request for softirq completion, which is long 114 * after the request has been unhashed (and even removed from 115 * the dispatch list). 116 */ 117 union { 118 struct hlist_node hash; /* merge hash */ 119 struct list_head ipi_list; 120 }; 121 122 /* 123 * The rb_node is only used inside the io scheduler, requests 124 * are pruned when moved to the dispatch queue. So let the 125 * completion_data share space with the rb_node. 126 */ 127 union { 128 struct rb_node rb_node; /* sort/lookup */ 129 void *completion_data; 130 }; 131 132 /* 133 * Three pointers are available for the IO schedulers, if they need 134 * more they have to dynamically allocate it. Flush requests are 135 * never put on the IO scheduler. So let the flush fields share 136 * space with the elevator data. 137 */ 138 union { 139 struct { 140 struct io_cq *icq; 141 void *priv[2]; 142 } elv; 143 144 struct { 145 unsigned int seq; 146 struct list_head list; 147 rq_end_io_fn *saved_end_io; 148 } flush; 149 }; 150 151 struct gendisk *rq_disk; 152 struct hd_struct *part; 153 unsigned long start_time; 154 #ifdef CONFIG_BLK_CGROUP 155 struct request_list *rl; /* rl this rq is alloced from */ 156 unsigned long long start_time_ns; 157 unsigned long long io_start_time_ns; /* when passed to hardware */ 158 #endif 159 /* Number of scatter-gather DMA addr+len pairs after 160 * physical address coalescing is performed. 161 */ 162 unsigned short nr_phys_segments; 163 #if defined(CONFIG_BLK_DEV_INTEGRITY) 164 unsigned short nr_integrity_segments; 165 #endif 166 167 unsigned short ioprio; 168 169 void *special; /* opaque pointer available for LLD use */ 170 171 int tag; 172 int errors; 173 174 /* 175 * when request is used as a packet command carrier 176 */ 177 unsigned char __cmd[BLK_MAX_CDB]; 178 unsigned char *cmd; 179 unsigned short cmd_len; 180 181 unsigned int extra_len; /* length of alignment and padding */ 182 unsigned int sense_len; 183 unsigned int resid_len; /* residual count */ 184 void *sense; 185 186 unsigned long deadline; 187 struct list_head timeout_list; 188 unsigned int timeout; 189 int retries; 190 191 /* 192 * completion callback. 193 */ 194 rq_end_io_fn *end_io; 195 void *end_io_data; 196 197 /* for bidi */ 198 struct request *next_rq; 199 }; 200 201 static inline unsigned short req_get_ioprio(struct request *req) 202 { 203 return req->ioprio; 204 } 205 206 #include <linux/elevator.h> 207 208 struct blk_queue_ctx; 209 210 typedef void (request_fn_proc) (struct request_queue *q); 211 typedef void (make_request_fn) (struct request_queue *q, struct bio *bio); 212 typedef int (prep_rq_fn) (struct request_queue *, struct request *); 213 typedef void (unprep_rq_fn) (struct request_queue *, struct request *); 214 215 struct bio_vec; 216 typedef void (softirq_done_fn)(struct request *); 217 typedef int (dma_drain_needed_fn)(struct request *); 218 typedef int (lld_busy_fn) (struct request_queue *q); 219 typedef int (bsg_job_fn) (struct bsg_job *); 220 221 enum blk_eh_timer_return { 222 BLK_EH_NOT_HANDLED, 223 BLK_EH_HANDLED, 224 BLK_EH_RESET_TIMER, 225 }; 226 227 typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *); 228 229 enum blk_queue_state { 230 Queue_down, 231 Queue_up, 232 }; 233 234 struct blk_queue_tag { 235 struct request **tag_index; /* map of busy tags */ 236 unsigned long *tag_map; /* bit map of free/busy tags */ 237 int busy; /* current depth */ 238 int max_depth; /* what we will send to device */ 239 int real_max_depth; /* what the array can hold */ 240 atomic_t refcnt; /* map can be shared */ 241 int alloc_policy; /* tag allocation policy */ 242 int next_tag; /* next tag */ 243 }; 244 #define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */ 245 #define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */ 246 247 #define BLK_SCSI_MAX_CMDS (256) 248 #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) 249 250 struct queue_limits { 251 unsigned long bounce_pfn; 252 unsigned long seg_boundary_mask; 253 unsigned long virt_boundary_mask; 254 255 unsigned int max_hw_sectors; 256 unsigned int chunk_sectors; 257 unsigned int max_sectors; 258 unsigned int max_segment_size; 259 unsigned int physical_block_size; 260 unsigned int alignment_offset; 261 unsigned int io_min; 262 unsigned int io_opt; 263 unsigned int max_discard_sectors; 264 unsigned int max_hw_discard_sectors; 265 unsigned int max_write_same_sectors; 266 unsigned int discard_granularity; 267 unsigned int discard_alignment; 268 269 unsigned short logical_block_size; 270 unsigned short max_segments; 271 unsigned short max_integrity_segments; 272 273 unsigned char misaligned; 274 unsigned char discard_misaligned; 275 unsigned char cluster; 276 unsigned char discard_zeroes_data; 277 unsigned char raid_partial_stripes_expensive; 278 }; 279 280 struct request_queue { 281 /* 282 * Together with queue_head for cacheline sharing 283 */ 284 struct list_head queue_head; 285 struct request *last_merge; 286 struct elevator_queue *elevator; 287 int nr_rqs[2]; /* # allocated [a]sync rqs */ 288 int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */ 289 290 /* 291 * If blkcg is not used, @q->root_rl serves all requests. If blkcg 292 * is used, root blkg allocates from @q->root_rl and all other 293 * blkgs from their own blkg->rl. Which one to use should be 294 * determined using bio_request_list(). 295 */ 296 struct request_list root_rl; 297 298 request_fn_proc *request_fn; 299 make_request_fn *make_request_fn; 300 prep_rq_fn *prep_rq_fn; 301 unprep_rq_fn *unprep_rq_fn; 302 softirq_done_fn *softirq_done_fn; 303 rq_timed_out_fn *rq_timed_out_fn; 304 dma_drain_needed_fn *dma_drain_needed; 305 lld_busy_fn *lld_busy_fn; 306 307 struct blk_mq_ops *mq_ops; 308 309 unsigned int *mq_map; 310 311 /* sw queues */ 312 struct blk_mq_ctx __percpu *queue_ctx; 313 unsigned int nr_queues; 314 315 /* hw dispatch queues */ 316 struct blk_mq_hw_ctx **queue_hw_ctx; 317 unsigned int nr_hw_queues; 318 319 /* 320 * Dispatch queue sorting 321 */ 322 sector_t end_sector; 323 struct request *boundary_rq; 324 325 /* 326 * Delayed queue handling 327 */ 328 struct delayed_work delay_work; 329 330 struct backing_dev_info backing_dev_info; 331 332 /* 333 * The queue owner gets to use this for whatever they like. 334 * ll_rw_blk doesn't touch it. 335 */ 336 void *queuedata; 337 338 /* 339 * various queue flags, see QUEUE_* below 340 */ 341 unsigned long queue_flags; 342 343 /* 344 * ida allocated id for this queue. Used to index queues from 345 * ioctx. 346 */ 347 int id; 348 349 /* 350 * queue needs bounce pages for pages above this limit 351 */ 352 gfp_t bounce_gfp; 353 354 /* 355 * protects queue structures from reentrancy. ->__queue_lock should 356 * _never_ be used directly, it is queue private. always use 357 * ->queue_lock. 358 */ 359 spinlock_t __queue_lock; 360 spinlock_t *queue_lock; 361 362 /* 363 * queue kobject 364 */ 365 struct kobject kobj; 366 367 /* 368 * mq queue kobject 369 */ 370 struct kobject mq_kobj; 371 372 #ifdef CONFIG_PM 373 struct device *dev; 374 int rpm_status; 375 unsigned int nr_pending; 376 #endif 377 378 /* 379 * queue settings 380 */ 381 unsigned long nr_requests; /* Max # of requests */ 382 unsigned int nr_congestion_on; 383 unsigned int nr_congestion_off; 384 unsigned int nr_batching; 385 386 unsigned int dma_drain_size; 387 void *dma_drain_buffer; 388 unsigned int dma_pad_mask; 389 unsigned int dma_alignment; 390 391 struct blk_queue_tag *queue_tags; 392 struct list_head tag_busy_list; 393 394 unsigned int nr_sorted; 395 unsigned int in_flight[2]; 396 /* 397 * Number of active block driver functions for which blk_drain_queue() 398 * must wait. Must be incremented around functions that unlock the 399 * queue_lock internally, e.g. scsi_request_fn(). 400 */ 401 unsigned int request_fn_active; 402 403 unsigned int rq_timeout; 404 struct timer_list timeout; 405 struct list_head timeout_list; 406 407 struct list_head icq_list; 408 #ifdef CONFIG_BLK_CGROUP 409 DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS); 410 struct blkcg_gq *root_blkg; 411 struct list_head blkg_list; 412 #endif 413 414 struct queue_limits limits; 415 416 /* 417 * sg stuff 418 */ 419 unsigned int sg_timeout; 420 unsigned int sg_reserved_size; 421 int node; 422 #ifdef CONFIG_BLK_DEV_IO_TRACE 423 struct blk_trace *blk_trace; 424 #endif 425 /* 426 * for flush operations 427 */ 428 unsigned int flush_flags; 429 unsigned int flush_not_queueable:1; 430 struct blk_flush_queue *fq; 431 432 struct list_head requeue_list; 433 spinlock_t requeue_lock; 434 struct work_struct requeue_work; 435 436 struct mutex sysfs_lock; 437 438 int bypass_depth; 439 atomic_t mq_freeze_depth; 440 441 #if defined(CONFIG_BLK_DEV_BSG) 442 bsg_job_fn *bsg_job_fn; 443 int bsg_job_size; 444 struct bsg_class_device bsg_dev; 445 #endif 446 447 #ifdef CONFIG_BLK_DEV_THROTTLING 448 /* Throttle data */ 449 struct throtl_data *td; 450 #endif 451 struct rcu_head rcu_head; 452 wait_queue_head_t mq_freeze_wq; 453 struct percpu_ref mq_usage_counter; 454 struct list_head all_q_node; 455 456 struct blk_mq_tag_set *tag_set; 457 struct list_head tag_set_list; 458 struct bio_set *bio_split; 459 460 bool mq_sysfs_init_done; 461 }; 462 463 #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 464 #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 465 #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ 466 #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ 467 #define QUEUE_FLAG_DYING 5 /* queue being torn down */ 468 #define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */ 469 #define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ 470 #define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ 471 #define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */ 472 #define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */ 473 #define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */ 474 #define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */ 475 #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 476 #define QUEUE_FLAG_IO_STAT 13 /* do IO stats */ 477 #define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */ 478 #define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */ 479 #define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ 480 #define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ 481 #define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ 482 #define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ 483 #define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ 484 #define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/ 485 486 #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 487 (1 << QUEUE_FLAG_STACKABLE) | \ 488 (1 << QUEUE_FLAG_SAME_COMP) | \ 489 (1 << QUEUE_FLAG_ADD_RANDOM)) 490 491 #define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 492 (1 << QUEUE_FLAG_STACKABLE) | \ 493 (1 << QUEUE_FLAG_SAME_COMP)) 494 495 static inline void queue_lockdep_assert_held(struct request_queue *q) 496 { 497 if (q->queue_lock) 498 lockdep_assert_held(q->queue_lock); 499 } 500 501 static inline void queue_flag_set_unlocked(unsigned int flag, 502 struct request_queue *q) 503 { 504 __set_bit(flag, &q->queue_flags); 505 } 506 507 static inline int queue_flag_test_and_clear(unsigned int flag, 508 struct request_queue *q) 509 { 510 queue_lockdep_assert_held(q); 511 512 if (test_bit(flag, &q->queue_flags)) { 513 __clear_bit(flag, &q->queue_flags); 514 return 1; 515 } 516 517 return 0; 518 } 519 520 static inline int queue_flag_test_and_set(unsigned int flag, 521 struct request_queue *q) 522 { 523 queue_lockdep_assert_held(q); 524 525 if (!test_bit(flag, &q->queue_flags)) { 526 __set_bit(flag, &q->queue_flags); 527 return 0; 528 } 529 530 return 1; 531 } 532 533 static inline void queue_flag_set(unsigned int flag, struct request_queue *q) 534 { 535 queue_lockdep_assert_held(q); 536 __set_bit(flag, &q->queue_flags); 537 } 538 539 static inline void queue_flag_clear_unlocked(unsigned int flag, 540 struct request_queue *q) 541 { 542 __clear_bit(flag, &q->queue_flags); 543 } 544 545 static inline int queue_in_flight(struct request_queue *q) 546 { 547 return q->in_flight[0] + q->in_flight[1]; 548 } 549 550 static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) 551 { 552 queue_lockdep_assert_held(q); 553 __clear_bit(flag, &q->queue_flags); 554 } 555 556 #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 557 #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 558 #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) 559 #define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) 560 #define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags) 561 #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) 562 #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 563 #define blk_queue_noxmerges(q) \ 564 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 565 #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 566 #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) 567 #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) 568 #define blk_queue_stackable(q) \ 569 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) 570 #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) 571 #define blk_queue_secdiscard(q) (blk_queue_discard(q) && \ 572 test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags)) 573 574 #define blk_noretry_request(rq) \ 575 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ 576 REQ_FAILFAST_DRIVER)) 577 578 #define blk_account_rq(rq) \ 579 (((rq)->cmd_flags & REQ_STARTED) && \ 580 ((rq)->cmd_type == REQ_TYPE_FS)) 581 582 #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) 583 #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 584 /* rq->queuelist of dequeued request must be list_empty() */ 585 #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) 586 587 #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 588 589 #define rq_data_dir(rq) ((int)((rq)->cmd_flags & 1)) 590 591 /* 592 * Driver can handle struct request, if it either has an old style 593 * request_fn defined, or is blk-mq based. 594 */ 595 static inline bool queue_is_rq_based(struct request_queue *q) 596 { 597 return q->request_fn || q->mq_ops; 598 } 599 600 static inline unsigned int blk_queue_cluster(struct request_queue *q) 601 { 602 return q->limits.cluster; 603 } 604 605 /* 606 * We regard a request as sync, if either a read or a sync write 607 */ 608 static inline bool rw_is_sync(unsigned int rw_flags) 609 { 610 return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC); 611 } 612 613 static inline bool rq_is_sync(struct request *rq) 614 { 615 return rw_is_sync(rq->cmd_flags); 616 } 617 618 static inline bool blk_rl_full(struct request_list *rl, bool sync) 619 { 620 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 621 622 return rl->flags & flag; 623 } 624 625 static inline void blk_set_rl_full(struct request_list *rl, bool sync) 626 { 627 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 628 629 rl->flags |= flag; 630 } 631 632 static inline void blk_clear_rl_full(struct request_list *rl, bool sync) 633 { 634 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 635 636 rl->flags &= ~flag; 637 } 638 639 static inline bool rq_mergeable(struct request *rq) 640 { 641 if (rq->cmd_type != REQ_TYPE_FS) 642 return false; 643 644 if (rq->cmd_flags & REQ_NOMERGE_FLAGS) 645 return false; 646 647 return true; 648 } 649 650 static inline bool blk_check_merge_flags(unsigned int flags1, 651 unsigned int flags2) 652 { 653 if ((flags1 & REQ_DISCARD) != (flags2 & REQ_DISCARD)) 654 return false; 655 656 if ((flags1 & REQ_SECURE) != (flags2 & REQ_SECURE)) 657 return false; 658 659 if ((flags1 & REQ_WRITE_SAME) != (flags2 & REQ_WRITE_SAME)) 660 return false; 661 662 return true; 663 } 664 665 static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) 666 { 667 if (bio_data(a) == bio_data(b)) 668 return true; 669 670 return false; 671 } 672 673 /* 674 * q->prep_rq_fn return values 675 */ 676 #define BLKPREP_OK 0 /* serve it */ 677 #define BLKPREP_KILL 1 /* fatal error, kill */ 678 #define BLKPREP_DEFER 2 /* leave on queue */ 679 680 extern unsigned long blk_max_low_pfn, blk_max_pfn; 681 682 /* 683 * standard bounce addresses: 684 * 685 * BLK_BOUNCE_HIGH : bounce all highmem pages 686 * BLK_BOUNCE_ANY : don't bounce anything 687 * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary 688 */ 689 690 #if BITS_PER_LONG == 32 691 #define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT) 692 #else 693 #define BLK_BOUNCE_HIGH -1ULL 694 #endif 695 #define BLK_BOUNCE_ANY (-1ULL) 696 #define BLK_BOUNCE_ISA (DMA_BIT_MASK(24)) 697 698 /* 699 * default timeout for SG_IO if none specified 700 */ 701 #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) 702 #define BLK_MIN_SG_TIMEOUT (7 * HZ) 703 704 #ifdef CONFIG_BOUNCE 705 extern int init_emergency_isa_pool(void); 706 extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); 707 #else 708 static inline int init_emergency_isa_pool(void) 709 { 710 return 0; 711 } 712 static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) 713 { 714 } 715 #endif /* CONFIG_MMU */ 716 717 struct rq_map_data { 718 struct page **pages; 719 int page_order; 720 int nr_entries; 721 unsigned long offset; 722 int null_mapped; 723 int from_user; 724 }; 725 726 struct req_iterator { 727 struct bvec_iter iter; 728 struct bio *bio; 729 }; 730 731 /* This should not be used directly - use rq_for_each_segment */ 732 #define for_each_bio(_bio) \ 733 for (; _bio; _bio = _bio->bi_next) 734 #define __rq_for_each_bio(_bio, rq) \ 735 if ((rq->bio)) \ 736 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) 737 738 #define rq_for_each_segment(bvl, _rq, _iter) \ 739 __rq_for_each_bio(_iter.bio, _rq) \ 740 bio_for_each_segment(bvl, _iter.bio, _iter.iter) 741 742 #define rq_iter_last(bvec, _iter) \ 743 (_iter.bio->bi_next == NULL && \ 744 bio_iter_last(bvec, _iter.iter)) 745 746 #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 747 # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" 748 #endif 749 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 750 extern void rq_flush_dcache_pages(struct request *rq); 751 #else 752 static inline void rq_flush_dcache_pages(struct request *rq) 753 { 754 } 755 #endif 756 757 extern int blk_register_queue(struct gendisk *disk); 758 extern void blk_unregister_queue(struct gendisk *disk); 759 extern void generic_make_request(struct bio *bio); 760 extern void blk_rq_init(struct request_queue *q, struct request *rq); 761 extern void blk_put_request(struct request *); 762 extern void __blk_put_request(struct request_queue *, struct request *); 763 extern struct request *blk_get_request(struct request_queue *, int, gfp_t); 764 extern struct request *blk_make_request(struct request_queue *, struct bio *, 765 gfp_t); 766 extern void blk_rq_set_block_pc(struct request *); 767 extern void blk_requeue_request(struct request_queue *, struct request *); 768 extern void blk_add_request_payload(struct request *rq, struct page *page, 769 unsigned int len); 770 extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); 771 extern int blk_lld_busy(struct request_queue *q); 772 extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 773 struct bio_set *bs, gfp_t gfp_mask, 774 int (*bio_ctr)(struct bio *, struct bio *, void *), 775 void *data); 776 extern void blk_rq_unprep_clone(struct request *rq); 777 extern int blk_insert_cloned_request(struct request_queue *q, 778 struct request *rq); 779 extern void blk_delay_queue(struct request_queue *, unsigned long); 780 extern void blk_queue_split(struct request_queue *, struct bio **, 781 struct bio_set *); 782 extern void blk_recount_segments(struct request_queue *, struct bio *); 783 extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); 784 extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t, 785 unsigned int, void __user *); 786 extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, 787 unsigned int, void __user *); 788 extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, 789 struct scsi_ioctl_command __user *); 790 791 extern void blk_start_queue(struct request_queue *q); 792 extern void blk_stop_queue(struct request_queue *q); 793 extern void blk_sync_queue(struct request_queue *q); 794 extern void __blk_stop_queue(struct request_queue *q); 795 extern void __blk_run_queue(struct request_queue *q); 796 extern void __blk_run_queue_uncond(struct request_queue *q); 797 extern void blk_run_queue(struct request_queue *); 798 extern void blk_run_queue_async(struct request_queue *q); 799 extern int blk_rq_map_user(struct request_queue *, struct request *, 800 struct rq_map_data *, void __user *, unsigned long, 801 gfp_t); 802 extern int blk_rq_unmap_user(struct bio *); 803 extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); 804 extern int blk_rq_map_user_iov(struct request_queue *, struct request *, 805 struct rq_map_data *, const struct iov_iter *, 806 gfp_t); 807 extern int blk_execute_rq(struct request_queue *, struct gendisk *, 808 struct request *, int); 809 extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 810 struct request *, int, rq_end_io_fn *); 811 812 static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 813 { 814 return bdev->bd_disk->queue; /* this is never NULL */ 815 } 816 817 /* 818 * blk_rq_pos() : the current sector 819 * blk_rq_bytes() : bytes left in the entire request 820 * blk_rq_cur_bytes() : bytes left in the current segment 821 * blk_rq_err_bytes() : bytes left till the next error boundary 822 * blk_rq_sectors() : sectors left in the entire request 823 * blk_rq_cur_sectors() : sectors left in the current segment 824 */ 825 static inline sector_t blk_rq_pos(const struct request *rq) 826 { 827 return rq->__sector; 828 } 829 830 static inline unsigned int blk_rq_bytes(const struct request *rq) 831 { 832 return rq->__data_len; 833 } 834 835 static inline int blk_rq_cur_bytes(const struct request *rq) 836 { 837 return rq->bio ? bio_cur_bytes(rq->bio) : 0; 838 } 839 840 extern unsigned int blk_rq_err_bytes(const struct request *rq); 841 842 static inline unsigned int blk_rq_sectors(const struct request *rq) 843 { 844 return blk_rq_bytes(rq) >> 9; 845 } 846 847 static inline unsigned int blk_rq_cur_sectors(const struct request *rq) 848 { 849 return blk_rq_cur_bytes(rq) >> 9; 850 } 851 852 static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, 853 unsigned int cmd_flags) 854 { 855 if (unlikely(cmd_flags & REQ_DISCARD)) 856 return min(q->limits.max_discard_sectors, UINT_MAX >> 9); 857 858 if (unlikely(cmd_flags & REQ_WRITE_SAME)) 859 return q->limits.max_write_same_sectors; 860 861 return q->limits.max_sectors; 862 } 863 864 /* 865 * Return maximum size of a request at given offset. Only valid for 866 * file system requests. 867 */ 868 static inline unsigned int blk_max_size_offset(struct request_queue *q, 869 sector_t offset) 870 { 871 if (!q->limits.chunk_sectors) 872 return q->limits.max_sectors; 873 874 return q->limits.chunk_sectors - 875 (offset & (q->limits.chunk_sectors - 1)); 876 } 877 878 static inline unsigned int blk_rq_get_max_sectors(struct request *rq) 879 { 880 struct request_queue *q = rq->q; 881 882 if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC)) 883 return q->limits.max_hw_sectors; 884 885 if (!q->limits.chunk_sectors || (rq->cmd_flags & REQ_DISCARD)) 886 return blk_queue_get_max_sectors(q, rq->cmd_flags); 887 888 return min(blk_max_size_offset(q, blk_rq_pos(rq)), 889 blk_queue_get_max_sectors(q, rq->cmd_flags)); 890 } 891 892 static inline unsigned int blk_rq_count_bios(struct request *rq) 893 { 894 unsigned int nr_bios = 0; 895 struct bio *bio; 896 897 __rq_for_each_bio(bio, rq) 898 nr_bios++; 899 900 return nr_bios; 901 } 902 903 /* 904 * Request issue related functions. 905 */ 906 extern struct request *blk_peek_request(struct request_queue *q); 907 extern void blk_start_request(struct request *rq); 908 extern struct request *blk_fetch_request(struct request_queue *q); 909 910 /* 911 * Request completion related functions. 912 * 913 * blk_update_request() completes given number of bytes and updates 914 * the request without completing it. 915 * 916 * blk_end_request() and friends. __blk_end_request() must be called 917 * with the request queue spinlock acquired. 918 * 919 * Several drivers define their own end_request and call 920 * blk_end_request() for parts of the original function. 921 * This prevents code duplication in drivers. 922 */ 923 extern bool blk_update_request(struct request *rq, int error, 924 unsigned int nr_bytes); 925 extern void blk_finish_request(struct request *rq, int error); 926 extern bool blk_end_request(struct request *rq, int error, 927 unsigned int nr_bytes); 928 extern void blk_end_request_all(struct request *rq, int error); 929 extern bool blk_end_request_cur(struct request *rq, int error); 930 extern bool blk_end_request_err(struct request *rq, int error); 931 extern bool __blk_end_request(struct request *rq, int error, 932 unsigned int nr_bytes); 933 extern void __blk_end_request_all(struct request *rq, int error); 934 extern bool __blk_end_request_cur(struct request *rq, int error); 935 extern bool __blk_end_request_err(struct request *rq, int error); 936 937 extern void blk_complete_request(struct request *); 938 extern void __blk_complete_request(struct request *); 939 extern void blk_abort_request(struct request *); 940 extern void blk_unprep_request(struct request *); 941 942 /* 943 * Access functions for manipulating queue properties 944 */ 945 extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, 946 spinlock_t *lock, int node_id); 947 extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); 948 extern struct request_queue *blk_init_allocated_queue(struct request_queue *, 949 request_fn_proc *, spinlock_t *); 950 extern void blk_cleanup_queue(struct request_queue *); 951 extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 952 extern void blk_queue_bounce_limit(struct request_queue *, u64); 953 extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int); 954 extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 955 extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); 956 extern void blk_queue_max_segments(struct request_queue *, unsigned short); 957 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 958 extern void blk_queue_max_discard_sectors(struct request_queue *q, 959 unsigned int max_discard_sectors); 960 extern void blk_queue_max_write_same_sectors(struct request_queue *q, 961 unsigned int max_write_same_sectors); 962 extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); 963 extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); 964 extern void blk_queue_alignment_offset(struct request_queue *q, 965 unsigned int alignment); 966 extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); 967 extern void blk_queue_io_min(struct request_queue *q, unsigned int min); 968 extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); 969 extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); 970 extern void blk_set_default_limits(struct queue_limits *lim); 971 extern void blk_set_stacking_limits(struct queue_limits *lim); 972 extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 973 sector_t offset); 974 extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, 975 sector_t offset); 976 extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, 977 sector_t offset); 978 extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 979 extern void blk_queue_dma_pad(struct request_queue *, unsigned int); 980 extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); 981 extern int blk_queue_dma_drain(struct request_queue *q, 982 dma_drain_needed_fn *dma_drain_needed, 983 void *buf, unsigned int size); 984 extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); 985 extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 986 extern void blk_queue_virt_boundary(struct request_queue *, unsigned long); 987 extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 988 extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn); 989 extern void blk_queue_dma_alignment(struct request_queue *, int); 990 extern void blk_queue_update_dma_alignment(struct request_queue *, int); 991 extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 992 extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); 993 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 994 extern void blk_queue_flush(struct request_queue *q, unsigned int flush); 995 extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable); 996 extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 997 998 extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 999 extern void blk_dump_rq_flags(struct request *, char *); 1000 extern long nr_blockdev_pages(void); 1001 1002 bool __must_check blk_get_queue(struct request_queue *); 1003 struct request_queue *blk_alloc_queue(gfp_t); 1004 struct request_queue *blk_alloc_queue_node(gfp_t, int); 1005 extern void blk_put_queue(struct request_queue *); 1006 extern void blk_set_queue_dying(struct request_queue *); 1007 1008 /* 1009 * block layer runtime pm functions 1010 */ 1011 #ifdef CONFIG_PM 1012 extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev); 1013 extern int blk_pre_runtime_suspend(struct request_queue *q); 1014 extern void blk_post_runtime_suspend(struct request_queue *q, int err); 1015 extern void blk_pre_runtime_resume(struct request_queue *q); 1016 extern void blk_post_runtime_resume(struct request_queue *q, int err); 1017 #else 1018 static inline void blk_pm_runtime_init(struct request_queue *q, 1019 struct device *dev) {} 1020 static inline int blk_pre_runtime_suspend(struct request_queue *q) 1021 { 1022 return -ENOSYS; 1023 } 1024 static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {} 1025 static inline void blk_pre_runtime_resume(struct request_queue *q) {} 1026 static inline void blk_post_runtime_resume(struct request_queue *q, int err) {} 1027 #endif 1028 1029 /* 1030 * blk_plug permits building a queue of related requests by holding the I/O 1031 * fragments for a short period. This allows merging of sequential requests 1032 * into single larger request. As the requests are moved from a per-task list to 1033 * the device's request_queue in a batch, this results in improved scalability 1034 * as the lock contention for request_queue lock is reduced. 1035 * 1036 * It is ok not to disable preemption when adding the request to the plug list 1037 * or when attempting a merge, because blk_schedule_flush_list() will only flush 1038 * the plug list when the task sleeps by itself. For details, please see 1039 * schedule() where blk_schedule_flush_plug() is called. 1040 */ 1041 struct blk_plug { 1042 struct list_head list; /* requests */ 1043 struct list_head mq_list; /* blk-mq requests */ 1044 struct list_head cb_list; /* md requires an unplug callback */ 1045 }; 1046 #define BLK_MAX_REQUEST_COUNT 16 1047 1048 struct blk_plug_cb; 1049 typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); 1050 struct blk_plug_cb { 1051 struct list_head list; 1052 blk_plug_cb_fn callback; 1053 void *data; 1054 }; 1055 extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, 1056 void *data, int size); 1057 extern void blk_start_plug(struct blk_plug *); 1058 extern void blk_finish_plug(struct blk_plug *); 1059 extern void blk_flush_plug_list(struct blk_plug *, bool); 1060 1061 static inline void blk_flush_plug(struct task_struct *tsk) 1062 { 1063 struct blk_plug *plug = tsk->plug; 1064 1065 if (plug) 1066 blk_flush_plug_list(plug, false); 1067 } 1068 1069 static inline void blk_schedule_flush_plug(struct task_struct *tsk) 1070 { 1071 struct blk_plug *plug = tsk->plug; 1072 1073 if (plug) 1074 blk_flush_plug_list(plug, true); 1075 } 1076 1077 static inline bool blk_needs_flush_plug(struct task_struct *tsk) 1078 { 1079 struct blk_plug *plug = tsk->plug; 1080 1081 return plug && 1082 (!list_empty(&plug->list) || 1083 !list_empty(&plug->mq_list) || 1084 !list_empty(&plug->cb_list)); 1085 } 1086 1087 /* 1088 * tag stuff 1089 */ 1090 extern int blk_queue_start_tag(struct request_queue *, struct request *); 1091 extern struct request *blk_queue_find_tag(struct request_queue *, int); 1092 extern void blk_queue_end_tag(struct request_queue *, struct request *); 1093 extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int); 1094 extern void blk_queue_free_tags(struct request_queue *); 1095 extern int blk_queue_resize_tags(struct request_queue *, int); 1096 extern void blk_queue_invalidate_tags(struct request_queue *); 1097 extern struct blk_queue_tag *blk_init_tags(int, int); 1098 extern void blk_free_tags(struct blk_queue_tag *); 1099 1100 static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, 1101 int tag) 1102 { 1103 if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) 1104 return NULL; 1105 return bqt->tag_index[tag]; 1106 } 1107 1108 #define BLKDEV_DISCARD_SECURE 0x01 /* secure discard */ 1109 1110 extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); 1111 extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1112 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); 1113 extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, 1114 sector_t nr_sects, gfp_t gfp_mask, struct page *page); 1115 extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1116 sector_t nr_sects, gfp_t gfp_mask, bool discard); 1117 static inline int sb_issue_discard(struct super_block *sb, sector_t block, 1118 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) 1119 { 1120 return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9), 1121 nr_blocks << (sb->s_blocksize_bits - 9), 1122 gfp_mask, flags); 1123 } 1124 static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, 1125 sector_t nr_blocks, gfp_t gfp_mask) 1126 { 1127 return blkdev_issue_zeroout(sb->s_bdev, 1128 block << (sb->s_blocksize_bits - 9), 1129 nr_blocks << (sb->s_blocksize_bits - 9), 1130 gfp_mask, true); 1131 } 1132 1133 extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); 1134 1135 enum blk_default_limits { 1136 BLK_MAX_SEGMENTS = 128, 1137 BLK_SAFE_MAX_SECTORS = 255, 1138 BLK_DEF_MAX_SECTORS = 2560, 1139 BLK_MAX_SEGMENT_SIZE = 65536, 1140 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, 1141 }; 1142 1143 #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 1144 1145 static inline unsigned long queue_bounce_pfn(struct request_queue *q) 1146 { 1147 return q->limits.bounce_pfn; 1148 } 1149 1150 static inline unsigned long queue_segment_boundary(struct request_queue *q) 1151 { 1152 return q->limits.seg_boundary_mask; 1153 } 1154 1155 static inline unsigned long queue_virt_boundary(struct request_queue *q) 1156 { 1157 return q->limits.virt_boundary_mask; 1158 } 1159 1160 static inline unsigned int queue_max_sectors(struct request_queue *q) 1161 { 1162 return q->limits.max_sectors; 1163 } 1164 1165 static inline unsigned int queue_max_hw_sectors(struct request_queue *q) 1166 { 1167 return q->limits.max_hw_sectors; 1168 } 1169 1170 static inline unsigned short queue_max_segments(struct request_queue *q) 1171 { 1172 return q->limits.max_segments; 1173 } 1174 1175 static inline unsigned int queue_max_segment_size(struct request_queue *q) 1176 { 1177 return q->limits.max_segment_size; 1178 } 1179 1180 static inline unsigned short queue_logical_block_size(struct request_queue *q) 1181 { 1182 int retval = 512; 1183 1184 if (q && q->limits.logical_block_size) 1185 retval = q->limits.logical_block_size; 1186 1187 return retval; 1188 } 1189 1190 static inline unsigned short bdev_logical_block_size(struct block_device *bdev) 1191 { 1192 return queue_logical_block_size(bdev_get_queue(bdev)); 1193 } 1194 1195 static inline unsigned int queue_physical_block_size(struct request_queue *q) 1196 { 1197 return q->limits.physical_block_size; 1198 } 1199 1200 static inline unsigned int bdev_physical_block_size(struct block_device *bdev) 1201 { 1202 return queue_physical_block_size(bdev_get_queue(bdev)); 1203 } 1204 1205 static inline unsigned int queue_io_min(struct request_queue *q) 1206 { 1207 return q->limits.io_min; 1208 } 1209 1210 static inline int bdev_io_min(struct block_device *bdev) 1211 { 1212 return queue_io_min(bdev_get_queue(bdev)); 1213 } 1214 1215 static inline unsigned int queue_io_opt(struct request_queue *q) 1216 { 1217 return q->limits.io_opt; 1218 } 1219 1220 static inline int bdev_io_opt(struct block_device *bdev) 1221 { 1222 return queue_io_opt(bdev_get_queue(bdev)); 1223 } 1224 1225 static inline int queue_alignment_offset(struct request_queue *q) 1226 { 1227 if (q->limits.misaligned) 1228 return -1; 1229 1230 return q->limits.alignment_offset; 1231 } 1232 1233 static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) 1234 { 1235 unsigned int granularity = max(lim->physical_block_size, lim->io_min); 1236 unsigned int alignment = sector_div(sector, granularity >> 9) << 9; 1237 1238 return (granularity + lim->alignment_offset - alignment) % granularity; 1239 } 1240 1241 static inline int bdev_alignment_offset(struct block_device *bdev) 1242 { 1243 struct request_queue *q = bdev_get_queue(bdev); 1244 1245 if (q->limits.misaligned) 1246 return -1; 1247 1248 if (bdev != bdev->bd_contains) 1249 return bdev->bd_part->alignment_offset; 1250 1251 return q->limits.alignment_offset; 1252 } 1253 1254 static inline int queue_discard_alignment(struct request_queue *q) 1255 { 1256 if (q->limits.discard_misaligned) 1257 return -1; 1258 1259 return q->limits.discard_alignment; 1260 } 1261 1262 static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) 1263 { 1264 unsigned int alignment, granularity, offset; 1265 1266 if (!lim->max_discard_sectors) 1267 return 0; 1268 1269 /* Why are these in bytes, not sectors? */ 1270 alignment = lim->discard_alignment >> 9; 1271 granularity = lim->discard_granularity >> 9; 1272 if (!granularity) 1273 return 0; 1274 1275 /* Offset of the partition start in 'granularity' sectors */ 1276 offset = sector_div(sector, granularity); 1277 1278 /* And why do we do this modulus *again* in blkdev_issue_discard()? */ 1279 offset = (granularity + alignment - offset) % granularity; 1280 1281 /* Turn it back into bytes, gaah */ 1282 return offset << 9; 1283 } 1284 1285 static inline int bdev_discard_alignment(struct block_device *bdev) 1286 { 1287 struct request_queue *q = bdev_get_queue(bdev); 1288 1289 if (bdev != bdev->bd_contains) 1290 return bdev->bd_part->discard_alignment; 1291 1292 return q->limits.discard_alignment; 1293 } 1294 1295 static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) 1296 { 1297 if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1) 1298 return 1; 1299 1300 return 0; 1301 } 1302 1303 static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev) 1304 { 1305 return queue_discard_zeroes_data(bdev_get_queue(bdev)); 1306 } 1307 1308 static inline unsigned int bdev_write_same(struct block_device *bdev) 1309 { 1310 struct request_queue *q = bdev_get_queue(bdev); 1311 1312 if (q) 1313 return q->limits.max_write_same_sectors; 1314 1315 return 0; 1316 } 1317 1318 static inline int queue_dma_alignment(struct request_queue *q) 1319 { 1320 return q ? q->dma_alignment : 511; 1321 } 1322 1323 static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, 1324 unsigned int len) 1325 { 1326 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; 1327 return !(addr & alignment) && !(len & alignment); 1328 } 1329 1330 /* assumes size > 256 */ 1331 static inline unsigned int blksize_bits(unsigned int size) 1332 { 1333 unsigned int bits = 8; 1334 do { 1335 bits++; 1336 size >>= 1; 1337 } while (size > 256); 1338 return bits; 1339 } 1340 1341 static inline unsigned int block_size(struct block_device *bdev) 1342 { 1343 return bdev->bd_block_size; 1344 } 1345 1346 static inline bool queue_flush_queueable(struct request_queue *q) 1347 { 1348 return !q->flush_not_queueable; 1349 } 1350 1351 typedef struct {struct page *v;} Sector; 1352 1353 unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); 1354 1355 static inline void put_dev_sector(Sector p) 1356 { 1357 page_cache_release(p.v); 1358 } 1359 1360 /* 1361 * Check if adding a bio_vec after bprv with offset would create a gap in 1362 * the SG list. Most drivers don't care about this, but some do. 1363 */ 1364 static inline bool bvec_gap_to_prev(struct request_queue *q, 1365 struct bio_vec *bprv, unsigned int offset) 1366 { 1367 if (!queue_virt_boundary(q)) 1368 return false; 1369 return offset || 1370 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); 1371 } 1372 1373 static inline bool bio_will_gap(struct request_queue *q, struct bio *prev, 1374 struct bio *next) 1375 { 1376 if (!bio_has_data(prev)) 1377 return false; 1378 1379 return bvec_gap_to_prev(q, &prev->bi_io_vec[prev->bi_vcnt - 1], 1380 next->bi_io_vec[0].bv_offset); 1381 } 1382 1383 static inline bool req_gap_back_merge(struct request *req, struct bio *bio) 1384 { 1385 return bio_will_gap(req->q, req->biotail, bio); 1386 } 1387 1388 static inline bool req_gap_front_merge(struct request *req, struct bio *bio) 1389 { 1390 return bio_will_gap(req->q, bio, req->bio); 1391 } 1392 1393 struct work_struct; 1394 int kblockd_schedule_work(struct work_struct *work); 1395 int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay); 1396 int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); 1397 1398 #ifdef CONFIG_BLK_CGROUP 1399 /* 1400 * This should not be using sched_clock(). A real patch is in progress 1401 * to fix this up, until that is in place we need to disable preemption 1402 * around sched_clock() in this function and set_io_start_time_ns(). 1403 */ 1404 static inline void set_start_time_ns(struct request *req) 1405 { 1406 preempt_disable(); 1407 req->start_time_ns = sched_clock(); 1408 preempt_enable(); 1409 } 1410 1411 static inline void set_io_start_time_ns(struct request *req) 1412 { 1413 preempt_disable(); 1414 req->io_start_time_ns = sched_clock(); 1415 preempt_enable(); 1416 } 1417 1418 static inline uint64_t rq_start_time_ns(struct request *req) 1419 { 1420 return req->start_time_ns; 1421 } 1422 1423 static inline uint64_t rq_io_start_time_ns(struct request *req) 1424 { 1425 return req->io_start_time_ns; 1426 } 1427 #else 1428 static inline void set_start_time_ns(struct request *req) {} 1429 static inline void set_io_start_time_ns(struct request *req) {} 1430 static inline uint64_t rq_start_time_ns(struct request *req) 1431 { 1432 return 0; 1433 } 1434 static inline uint64_t rq_io_start_time_ns(struct request *req) 1435 { 1436 return 0; 1437 } 1438 #endif 1439 1440 #define MODULE_ALIAS_BLOCKDEV(major,minor) \ 1441 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 1442 #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 1443 MODULE_ALIAS("block-major-" __stringify(major) "-*") 1444 1445 #if defined(CONFIG_BLK_DEV_INTEGRITY) 1446 1447 enum blk_integrity_flags { 1448 BLK_INTEGRITY_VERIFY = 1 << 0, 1449 BLK_INTEGRITY_GENERATE = 1 << 1, 1450 BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2, 1451 BLK_INTEGRITY_IP_CHECKSUM = 1 << 3, 1452 }; 1453 1454 struct blk_integrity_iter { 1455 void *prot_buf; 1456 void *data_buf; 1457 sector_t seed; 1458 unsigned int data_size; 1459 unsigned short interval; 1460 const char *disk_name; 1461 }; 1462 1463 typedef int (integrity_processing_fn) (struct blk_integrity_iter *); 1464 1465 struct blk_integrity { 1466 integrity_processing_fn *generate_fn; 1467 integrity_processing_fn *verify_fn; 1468 1469 unsigned short flags; 1470 unsigned short tuple_size; 1471 unsigned short interval; 1472 unsigned short tag_size; 1473 1474 const char *name; 1475 1476 struct kobject kobj; 1477 }; 1478 1479 extern bool blk_integrity_is_initialized(struct gendisk *); 1480 extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); 1481 extern void blk_integrity_unregister(struct gendisk *); 1482 extern int blk_integrity_compare(struct gendisk *, struct gendisk *); 1483 extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, 1484 struct scatterlist *); 1485 extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); 1486 extern bool blk_integrity_merge_rq(struct request_queue *, struct request *, 1487 struct request *); 1488 extern bool blk_integrity_merge_bio(struct request_queue *, struct request *, 1489 struct bio *); 1490 1491 static inline 1492 struct blk_integrity *bdev_get_integrity(struct block_device *bdev) 1493 { 1494 return bdev->bd_disk->integrity; 1495 } 1496 1497 static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 1498 { 1499 return disk->integrity; 1500 } 1501 1502 static inline bool blk_integrity_rq(struct request *rq) 1503 { 1504 return rq->cmd_flags & REQ_INTEGRITY; 1505 } 1506 1507 static inline void blk_queue_max_integrity_segments(struct request_queue *q, 1508 unsigned int segs) 1509 { 1510 q->limits.max_integrity_segments = segs; 1511 } 1512 1513 static inline unsigned short 1514 queue_max_integrity_segments(struct request_queue *q) 1515 { 1516 return q->limits.max_integrity_segments; 1517 } 1518 1519 static inline bool integrity_req_gap_back_merge(struct request *req, 1520 struct bio *next) 1521 { 1522 struct bio_integrity_payload *bip = bio_integrity(req->bio); 1523 struct bio_integrity_payload *bip_next = bio_integrity(next); 1524 1525 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], 1526 bip_next->bip_vec[0].bv_offset); 1527 } 1528 1529 static inline bool integrity_req_gap_front_merge(struct request *req, 1530 struct bio *bio) 1531 { 1532 struct bio_integrity_payload *bip = bio_integrity(bio); 1533 struct bio_integrity_payload *bip_next = bio_integrity(req->bio); 1534 1535 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], 1536 bip_next->bip_vec[0].bv_offset); 1537 } 1538 1539 #else /* CONFIG_BLK_DEV_INTEGRITY */ 1540 1541 struct bio; 1542 struct block_device; 1543 struct gendisk; 1544 struct blk_integrity; 1545 1546 static inline int blk_integrity_rq(struct request *rq) 1547 { 1548 return 0; 1549 } 1550 static inline int blk_rq_count_integrity_sg(struct request_queue *q, 1551 struct bio *b) 1552 { 1553 return 0; 1554 } 1555 static inline int blk_rq_map_integrity_sg(struct request_queue *q, 1556 struct bio *b, 1557 struct scatterlist *s) 1558 { 1559 return 0; 1560 } 1561 static inline struct blk_integrity *bdev_get_integrity(struct block_device *b) 1562 { 1563 return NULL; 1564 } 1565 static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 1566 { 1567 return NULL; 1568 } 1569 static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b) 1570 { 1571 return 0; 1572 } 1573 static inline int blk_integrity_register(struct gendisk *d, 1574 struct blk_integrity *b) 1575 { 1576 return 0; 1577 } 1578 static inline void blk_integrity_unregister(struct gendisk *d) 1579 { 1580 } 1581 static inline void blk_queue_max_integrity_segments(struct request_queue *q, 1582 unsigned int segs) 1583 { 1584 } 1585 static inline unsigned short queue_max_integrity_segments(struct request_queue *q) 1586 { 1587 return 0; 1588 } 1589 static inline bool blk_integrity_merge_rq(struct request_queue *rq, 1590 struct request *r1, 1591 struct request *r2) 1592 { 1593 return true; 1594 } 1595 static inline bool blk_integrity_merge_bio(struct request_queue *rq, 1596 struct request *r, 1597 struct bio *b) 1598 { 1599 return true; 1600 } 1601 static inline bool blk_integrity_is_initialized(struct gendisk *g) 1602 { 1603 return 0; 1604 } 1605 static inline bool integrity_req_gap_back_merge(struct request *req, 1606 struct bio *next) 1607 { 1608 return false; 1609 } 1610 static inline bool integrity_req_gap_front_merge(struct request *req, 1611 struct bio *bio) 1612 { 1613 return false; 1614 } 1615 1616 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 1617 1618 struct block_device_operations { 1619 int (*open) (struct block_device *, fmode_t); 1620 void (*release) (struct gendisk *, fmode_t); 1621 int (*rw_page)(struct block_device *, sector_t, struct page *, int rw); 1622 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1623 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1624 long (*direct_access)(struct block_device *, sector_t, void __pmem **, 1625 unsigned long *pfn); 1626 unsigned int (*check_events) (struct gendisk *disk, 1627 unsigned int clearing); 1628 /* ->media_changed() is DEPRECATED, use ->check_events() instead */ 1629 int (*media_changed) (struct gendisk *); 1630 void (*unlock_native_capacity) (struct gendisk *); 1631 int (*revalidate_disk) (struct gendisk *); 1632 int (*getgeo)(struct block_device *, struct hd_geometry *); 1633 /* this callback is with swap_lock and sometimes page table lock held */ 1634 void (*swap_slot_free_notify) (struct block_device *, unsigned long); 1635 struct module *owner; 1636 }; 1637 1638 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, 1639 unsigned long); 1640 extern int bdev_read_page(struct block_device *, sector_t, struct page *); 1641 extern int bdev_write_page(struct block_device *, sector_t, struct page *, 1642 struct writeback_control *); 1643 extern long bdev_direct_access(struct block_device *, sector_t, 1644 void __pmem **addr, unsigned long *pfn, long size); 1645 #else /* CONFIG_BLOCK */ 1646 1647 struct block_device; 1648 1649 /* 1650 * stubs for when the block layer is configured out 1651 */ 1652 #define buffer_heads_over_limit 0 1653 1654 static inline long nr_blockdev_pages(void) 1655 { 1656 return 0; 1657 } 1658 1659 struct blk_plug { 1660 }; 1661 1662 static inline void blk_start_plug(struct blk_plug *plug) 1663 { 1664 } 1665 1666 static inline void blk_finish_plug(struct blk_plug *plug) 1667 { 1668 } 1669 1670 static inline void blk_flush_plug(struct task_struct *task) 1671 { 1672 } 1673 1674 static inline void blk_schedule_flush_plug(struct task_struct *task) 1675 { 1676 } 1677 1678 1679 static inline bool blk_needs_flush_plug(struct task_struct *tsk) 1680 { 1681 return false; 1682 } 1683 1684 static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, 1685 sector_t *error_sector) 1686 { 1687 return 0; 1688 } 1689 1690 #endif /* CONFIG_BLOCK */ 1691 1692 #endif 1693