1 #ifndef _LINUX_BLKDEV_H 2 #define _LINUX_BLKDEV_H 3 4 #include <linux/sched.h> 5 6 #ifdef CONFIG_BLOCK 7 8 #include <linux/major.h> 9 #include <linux/genhd.h> 10 #include <linux/list.h> 11 #include <linux/llist.h> 12 #include <linux/timer.h> 13 #include <linux/workqueue.h> 14 #include <linux/pagemap.h> 15 #include <linux/backing-dev-defs.h> 16 #include <linux/wait.h> 17 #include <linux/mempool.h> 18 #include <linux/bio.h> 19 #include <linux/stringify.h> 20 #include <linux/gfp.h> 21 #include <linux/bsg.h> 22 #include <linux/smp.h> 23 #include <linux/rcupdate.h> 24 #include <linux/percpu-refcount.h> 25 #include <linux/scatterlist.h> 26 27 struct module; 28 struct scsi_ioctl_command; 29 30 struct request_queue; 31 struct elevator_queue; 32 struct blk_trace; 33 struct request; 34 struct sg_io_hdr; 35 struct bsg_job; 36 struct blkcg_gq; 37 struct blk_flush_queue; 38 struct pr_ops; 39 40 #define BLKDEV_MIN_RQ 4 41 #define BLKDEV_MAX_RQ 128 /* Default maximum */ 42 43 /* 44 * Maximum number of blkcg policies allowed to be registered concurrently. 45 * Defined here to simplify include dependency. 46 */ 47 #define BLKCG_MAX_POLS 2 48 49 struct request; 50 typedef void (rq_end_io_fn)(struct request *, int); 51 52 #define BLK_RL_SYNCFULL (1U << 0) 53 #define BLK_RL_ASYNCFULL (1U << 1) 54 55 struct request_list { 56 struct request_queue *q; /* the queue this rl belongs to */ 57 #ifdef CONFIG_BLK_CGROUP 58 struct blkcg_gq *blkg; /* blkg this request pool belongs to */ 59 #endif 60 /* 61 * count[], starved[], and wait[] are indexed by 62 * BLK_RW_SYNC/BLK_RW_ASYNC 63 */ 64 int count[2]; 65 int starved[2]; 66 mempool_t *rq_pool; 67 wait_queue_head_t wait[2]; 68 unsigned int flags; 69 }; 70 71 /* 72 * request command types 73 */ 74 enum rq_cmd_type_bits { 75 REQ_TYPE_FS = 1, /* fs request */ 76 REQ_TYPE_BLOCK_PC, /* scsi command */ 77 REQ_TYPE_DRV_PRIV, /* driver defined types from here */ 78 }; 79 80 #define BLK_MAX_CDB 16 81 82 /* 83 * Try to put the fields that are referenced together in the same cacheline. 84 * 85 * If you modify this structure, make sure to update blk_rq_init() and 86 * especially blk_mq_rq_ctx_init() to take care of the added fields. 87 */ 88 struct request { 89 struct list_head queuelist; 90 union { 91 struct call_single_data csd; 92 unsigned long fifo_time; 93 }; 94 95 struct request_queue *q; 96 struct blk_mq_ctx *mq_ctx; 97 98 u64 cmd_flags; 99 unsigned cmd_type; 100 unsigned long atomic_flags; 101 102 int cpu; 103 104 /* the following two fields are internal, NEVER access directly */ 105 unsigned int __data_len; /* total data len */ 106 sector_t __sector; /* sector cursor */ 107 108 struct bio *bio; 109 struct bio *biotail; 110 111 /* 112 * The hash is used inside the scheduler, and killed once the 113 * request reaches the dispatch list. The ipi_list is only used 114 * to queue the request for softirq completion, which is long 115 * after the request has been unhashed (and even removed from 116 * the dispatch list). 117 */ 118 union { 119 struct hlist_node hash; /* merge hash */ 120 struct list_head ipi_list; 121 }; 122 123 /* 124 * The rb_node is only used inside the io scheduler, requests 125 * are pruned when moved to the dispatch queue. So let the 126 * completion_data share space with the rb_node. 127 */ 128 union { 129 struct rb_node rb_node; /* sort/lookup */ 130 void *completion_data; 131 }; 132 133 /* 134 * Three pointers are available for the IO schedulers, if they need 135 * more they have to dynamically allocate it. Flush requests are 136 * never put on the IO scheduler. So let the flush fields share 137 * space with the elevator data. 138 */ 139 union { 140 struct { 141 struct io_cq *icq; 142 void *priv[2]; 143 } elv; 144 145 struct { 146 unsigned int seq; 147 struct list_head list; 148 rq_end_io_fn *saved_end_io; 149 } flush; 150 }; 151 152 struct gendisk *rq_disk; 153 struct hd_struct *part; 154 unsigned long start_time; 155 #ifdef CONFIG_BLK_CGROUP 156 struct request_list *rl; /* rl this rq is alloced from */ 157 unsigned long long start_time_ns; 158 unsigned long long io_start_time_ns; /* when passed to hardware */ 159 #endif 160 /* Number of scatter-gather DMA addr+len pairs after 161 * physical address coalescing is performed. 162 */ 163 unsigned short nr_phys_segments; 164 #if defined(CONFIG_BLK_DEV_INTEGRITY) 165 unsigned short nr_integrity_segments; 166 #endif 167 168 unsigned short ioprio; 169 170 void *special; /* opaque pointer available for LLD use */ 171 172 int tag; 173 int errors; 174 175 /* 176 * when request is used as a packet command carrier 177 */ 178 unsigned char __cmd[BLK_MAX_CDB]; 179 unsigned char *cmd; 180 unsigned short cmd_len; 181 182 unsigned int extra_len; /* length of alignment and padding */ 183 unsigned int sense_len; 184 unsigned int resid_len; /* residual count */ 185 void *sense; 186 187 unsigned long deadline; 188 struct list_head timeout_list; 189 unsigned int timeout; 190 int retries; 191 192 /* 193 * completion callback. 194 */ 195 rq_end_io_fn *end_io; 196 void *end_io_data; 197 198 /* for bidi */ 199 struct request *next_rq; 200 }; 201 202 static inline unsigned short req_get_ioprio(struct request *req) 203 { 204 return req->ioprio; 205 } 206 207 #include <linux/elevator.h> 208 209 struct blk_queue_ctx; 210 211 typedef void (request_fn_proc) (struct request_queue *q); 212 typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio); 213 typedef int (prep_rq_fn) (struct request_queue *, struct request *); 214 typedef void (unprep_rq_fn) (struct request_queue *, struct request *); 215 216 struct bio_vec; 217 typedef void (softirq_done_fn)(struct request *); 218 typedef int (dma_drain_needed_fn)(struct request *); 219 typedef int (lld_busy_fn) (struct request_queue *q); 220 typedef int (bsg_job_fn) (struct bsg_job *); 221 222 enum blk_eh_timer_return { 223 BLK_EH_NOT_HANDLED, 224 BLK_EH_HANDLED, 225 BLK_EH_RESET_TIMER, 226 }; 227 228 typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *); 229 230 enum blk_queue_state { 231 Queue_down, 232 Queue_up, 233 }; 234 235 struct blk_queue_tag { 236 struct request **tag_index; /* map of busy tags */ 237 unsigned long *tag_map; /* bit map of free/busy tags */ 238 int busy; /* current depth */ 239 int max_depth; /* what we will send to device */ 240 int real_max_depth; /* what the array can hold */ 241 atomic_t refcnt; /* map can be shared */ 242 int alloc_policy; /* tag allocation policy */ 243 int next_tag; /* next tag */ 244 }; 245 #define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */ 246 #define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */ 247 248 #define BLK_SCSI_MAX_CMDS (256) 249 #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) 250 251 struct queue_limits { 252 unsigned long bounce_pfn; 253 unsigned long seg_boundary_mask; 254 unsigned long virt_boundary_mask; 255 256 unsigned int max_hw_sectors; 257 unsigned int max_dev_sectors; 258 unsigned int chunk_sectors; 259 unsigned int max_sectors; 260 unsigned int max_segment_size; 261 unsigned int physical_block_size; 262 unsigned int alignment_offset; 263 unsigned int io_min; 264 unsigned int io_opt; 265 unsigned int max_discard_sectors; 266 unsigned int max_hw_discard_sectors; 267 unsigned int max_write_same_sectors; 268 unsigned int discard_granularity; 269 unsigned int discard_alignment; 270 271 unsigned short logical_block_size; 272 unsigned short max_segments; 273 unsigned short max_integrity_segments; 274 275 unsigned char misaligned; 276 unsigned char discard_misaligned; 277 unsigned char cluster; 278 unsigned char discard_zeroes_data; 279 unsigned char raid_partial_stripes_expensive; 280 }; 281 282 struct request_queue { 283 /* 284 * Together with queue_head for cacheline sharing 285 */ 286 struct list_head queue_head; 287 struct request *last_merge; 288 struct elevator_queue *elevator; 289 int nr_rqs[2]; /* # allocated [a]sync rqs */ 290 int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */ 291 292 /* 293 * If blkcg is not used, @q->root_rl serves all requests. If blkcg 294 * is used, root blkg allocates from @q->root_rl and all other 295 * blkgs from their own blkg->rl. Which one to use should be 296 * determined using bio_request_list(). 297 */ 298 struct request_list root_rl; 299 300 request_fn_proc *request_fn; 301 make_request_fn *make_request_fn; 302 prep_rq_fn *prep_rq_fn; 303 unprep_rq_fn *unprep_rq_fn; 304 softirq_done_fn *softirq_done_fn; 305 rq_timed_out_fn *rq_timed_out_fn; 306 dma_drain_needed_fn *dma_drain_needed; 307 lld_busy_fn *lld_busy_fn; 308 309 struct blk_mq_ops *mq_ops; 310 311 unsigned int *mq_map; 312 313 /* sw queues */ 314 struct blk_mq_ctx __percpu *queue_ctx; 315 unsigned int nr_queues; 316 317 /* hw dispatch queues */ 318 struct blk_mq_hw_ctx **queue_hw_ctx; 319 unsigned int nr_hw_queues; 320 321 /* 322 * Dispatch queue sorting 323 */ 324 sector_t end_sector; 325 struct request *boundary_rq; 326 327 /* 328 * Delayed queue handling 329 */ 330 struct delayed_work delay_work; 331 332 struct backing_dev_info backing_dev_info; 333 334 /* 335 * The queue owner gets to use this for whatever they like. 336 * ll_rw_blk doesn't touch it. 337 */ 338 void *queuedata; 339 340 /* 341 * various queue flags, see QUEUE_* below 342 */ 343 unsigned long queue_flags; 344 345 /* 346 * ida allocated id for this queue. Used to index queues from 347 * ioctx. 348 */ 349 int id; 350 351 /* 352 * queue needs bounce pages for pages above this limit 353 */ 354 gfp_t bounce_gfp; 355 356 /* 357 * protects queue structures from reentrancy. ->__queue_lock should 358 * _never_ be used directly, it is queue private. always use 359 * ->queue_lock. 360 */ 361 spinlock_t __queue_lock; 362 spinlock_t *queue_lock; 363 364 /* 365 * queue kobject 366 */ 367 struct kobject kobj; 368 369 /* 370 * mq queue kobject 371 */ 372 struct kobject mq_kobj; 373 374 #ifdef CONFIG_BLK_DEV_INTEGRITY 375 struct blk_integrity integrity; 376 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 377 378 #ifdef CONFIG_PM 379 struct device *dev; 380 int rpm_status; 381 unsigned int nr_pending; 382 #endif 383 384 /* 385 * queue settings 386 */ 387 unsigned long nr_requests; /* Max # of requests */ 388 unsigned int nr_congestion_on; 389 unsigned int nr_congestion_off; 390 unsigned int nr_batching; 391 392 unsigned int dma_drain_size; 393 void *dma_drain_buffer; 394 unsigned int dma_pad_mask; 395 unsigned int dma_alignment; 396 397 struct blk_queue_tag *queue_tags; 398 struct list_head tag_busy_list; 399 400 unsigned int nr_sorted; 401 unsigned int in_flight[2]; 402 /* 403 * Number of active block driver functions for which blk_drain_queue() 404 * must wait. Must be incremented around functions that unlock the 405 * queue_lock internally, e.g. scsi_request_fn(). 406 */ 407 unsigned int request_fn_active; 408 409 unsigned int rq_timeout; 410 struct timer_list timeout; 411 struct list_head timeout_list; 412 413 struct list_head icq_list; 414 #ifdef CONFIG_BLK_CGROUP 415 DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS); 416 struct blkcg_gq *root_blkg; 417 struct list_head blkg_list; 418 #endif 419 420 struct queue_limits limits; 421 422 /* 423 * sg stuff 424 */ 425 unsigned int sg_timeout; 426 unsigned int sg_reserved_size; 427 int node; 428 #ifdef CONFIG_BLK_DEV_IO_TRACE 429 struct blk_trace *blk_trace; 430 #endif 431 /* 432 * for flush operations 433 */ 434 unsigned int flush_flags; 435 unsigned int flush_not_queueable:1; 436 struct blk_flush_queue *fq; 437 438 struct list_head requeue_list; 439 spinlock_t requeue_lock; 440 struct work_struct requeue_work; 441 442 struct mutex sysfs_lock; 443 444 int bypass_depth; 445 atomic_t mq_freeze_depth; 446 447 #if defined(CONFIG_BLK_DEV_BSG) 448 bsg_job_fn *bsg_job_fn; 449 int bsg_job_size; 450 struct bsg_class_device bsg_dev; 451 #endif 452 453 #ifdef CONFIG_BLK_DEV_THROTTLING 454 /* Throttle data */ 455 struct throtl_data *td; 456 #endif 457 struct rcu_head rcu_head; 458 wait_queue_head_t mq_freeze_wq; 459 struct percpu_ref q_usage_counter; 460 struct list_head all_q_node; 461 462 struct blk_mq_tag_set *tag_set; 463 struct list_head tag_set_list; 464 struct bio_set *bio_split; 465 466 bool mq_sysfs_init_done; 467 }; 468 469 #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 470 #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 471 #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ 472 #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ 473 #define QUEUE_FLAG_DYING 5 /* queue being torn down */ 474 #define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */ 475 #define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ 476 #define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ 477 #define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */ 478 #define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */ 479 #define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */ 480 #define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */ 481 #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 482 #define QUEUE_FLAG_IO_STAT 13 /* do IO stats */ 483 #define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */ 484 #define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */ 485 #define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ 486 #define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ 487 #define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ 488 #define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ 489 #define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ 490 #define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/ 491 #define QUEUE_FLAG_POLL 22 /* IO polling enabled if set */ 492 493 #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 494 (1 << QUEUE_FLAG_STACKABLE) | \ 495 (1 << QUEUE_FLAG_SAME_COMP) | \ 496 (1 << QUEUE_FLAG_ADD_RANDOM)) 497 498 #define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 499 (1 << QUEUE_FLAG_STACKABLE) | \ 500 (1 << QUEUE_FLAG_SAME_COMP)) 501 502 static inline void queue_lockdep_assert_held(struct request_queue *q) 503 { 504 if (q->queue_lock) 505 lockdep_assert_held(q->queue_lock); 506 } 507 508 static inline void queue_flag_set_unlocked(unsigned int flag, 509 struct request_queue *q) 510 { 511 __set_bit(flag, &q->queue_flags); 512 } 513 514 static inline int queue_flag_test_and_clear(unsigned int flag, 515 struct request_queue *q) 516 { 517 queue_lockdep_assert_held(q); 518 519 if (test_bit(flag, &q->queue_flags)) { 520 __clear_bit(flag, &q->queue_flags); 521 return 1; 522 } 523 524 return 0; 525 } 526 527 static inline int queue_flag_test_and_set(unsigned int flag, 528 struct request_queue *q) 529 { 530 queue_lockdep_assert_held(q); 531 532 if (!test_bit(flag, &q->queue_flags)) { 533 __set_bit(flag, &q->queue_flags); 534 return 0; 535 } 536 537 return 1; 538 } 539 540 static inline void queue_flag_set(unsigned int flag, struct request_queue *q) 541 { 542 queue_lockdep_assert_held(q); 543 __set_bit(flag, &q->queue_flags); 544 } 545 546 static inline void queue_flag_clear_unlocked(unsigned int flag, 547 struct request_queue *q) 548 { 549 __clear_bit(flag, &q->queue_flags); 550 } 551 552 static inline int queue_in_flight(struct request_queue *q) 553 { 554 return q->in_flight[0] + q->in_flight[1]; 555 } 556 557 static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) 558 { 559 queue_lockdep_assert_held(q); 560 __clear_bit(flag, &q->queue_flags); 561 } 562 563 #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 564 #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 565 #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) 566 #define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) 567 #define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags) 568 #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) 569 #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 570 #define blk_queue_noxmerges(q) \ 571 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 572 #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 573 #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) 574 #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) 575 #define blk_queue_stackable(q) \ 576 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) 577 #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) 578 #define blk_queue_secdiscard(q) (blk_queue_discard(q) && \ 579 test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags)) 580 581 #define blk_noretry_request(rq) \ 582 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ 583 REQ_FAILFAST_DRIVER)) 584 585 #define blk_account_rq(rq) \ 586 (((rq)->cmd_flags & REQ_STARTED) && \ 587 ((rq)->cmd_type == REQ_TYPE_FS)) 588 589 #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) 590 #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 591 /* rq->queuelist of dequeued request must be list_empty() */ 592 #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) 593 594 #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 595 596 #define rq_data_dir(rq) ((int)((rq)->cmd_flags & 1)) 597 598 /* 599 * Driver can handle struct request, if it either has an old style 600 * request_fn defined, or is blk-mq based. 601 */ 602 static inline bool queue_is_rq_based(struct request_queue *q) 603 { 604 return q->request_fn || q->mq_ops; 605 } 606 607 static inline unsigned int blk_queue_cluster(struct request_queue *q) 608 { 609 return q->limits.cluster; 610 } 611 612 /* 613 * We regard a request as sync, if either a read or a sync write 614 */ 615 static inline bool rw_is_sync(unsigned int rw_flags) 616 { 617 return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC); 618 } 619 620 static inline bool rq_is_sync(struct request *rq) 621 { 622 return rw_is_sync(rq->cmd_flags); 623 } 624 625 static inline bool blk_rl_full(struct request_list *rl, bool sync) 626 { 627 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 628 629 return rl->flags & flag; 630 } 631 632 static inline void blk_set_rl_full(struct request_list *rl, bool sync) 633 { 634 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 635 636 rl->flags |= flag; 637 } 638 639 static inline void blk_clear_rl_full(struct request_list *rl, bool sync) 640 { 641 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 642 643 rl->flags &= ~flag; 644 } 645 646 static inline bool rq_mergeable(struct request *rq) 647 { 648 if (rq->cmd_type != REQ_TYPE_FS) 649 return false; 650 651 if (rq->cmd_flags & REQ_NOMERGE_FLAGS) 652 return false; 653 654 return true; 655 } 656 657 static inline bool blk_check_merge_flags(unsigned int flags1, 658 unsigned int flags2) 659 { 660 if ((flags1 & REQ_DISCARD) != (flags2 & REQ_DISCARD)) 661 return false; 662 663 if ((flags1 & REQ_SECURE) != (flags2 & REQ_SECURE)) 664 return false; 665 666 if ((flags1 & REQ_WRITE_SAME) != (flags2 & REQ_WRITE_SAME)) 667 return false; 668 669 return true; 670 } 671 672 static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) 673 { 674 if (bio_data(a) == bio_data(b)) 675 return true; 676 677 return false; 678 } 679 680 /* 681 * q->prep_rq_fn return values 682 */ 683 #define BLKPREP_OK 0 /* serve it */ 684 #define BLKPREP_KILL 1 /* fatal error, kill */ 685 #define BLKPREP_DEFER 2 /* leave on queue */ 686 687 extern unsigned long blk_max_low_pfn, blk_max_pfn; 688 689 /* 690 * standard bounce addresses: 691 * 692 * BLK_BOUNCE_HIGH : bounce all highmem pages 693 * BLK_BOUNCE_ANY : don't bounce anything 694 * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary 695 */ 696 697 #if BITS_PER_LONG == 32 698 #define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT) 699 #else 700 #define BLK_BOUNCE_HIGH -1ULL 701 #endif 702 #define BLK_BOUNCE_ANY (-1ULL) 703 #define BLK_BOUNCE_ISA (DMA_BIT_MASK(24)) 704 705 /* 706 * default timeout for SG_IO if none specified 707 */ 708 #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) 709 #define BLK_MIN_SG_TIMEOUT (7 * HZ) 710 711 #ifdef CONFIG_BOUNCE 712 extern int init_emergency_isa_pool(void); 713 extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); 714 #else 715 static inline int init_emergency_isa_pool(void) 716 { 717 return 0; 718 } 719 static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) 720 { 721 } 722 #endif /* CONFIG_MMU */ 723 724 struct rq_map_data { 725 struct page **pages; 726 int page_order; 727 int nr_entries; 728 unsigned long offset; 729 int null_mapped; 730 int from_user; 731 }; 732 733 struct req_iterator { 734 struct bvec_iter iter; 735 struct bio *bio; 736 }; 737 738 /* This should not be used directly - use rq_for_each_segment */ 739 #define for_each_bio(_bio) \ 740 for (; _bio; _bio = _bio->bi_next) 741 #define __rq_for_each_bio(_bio, rq) \ 742 if ((rq->bio)) \ 743 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) 744 745 #define rq_for_each_segment(bvl, _rq, _iter) \ 746 __rq_for_each_bio(_iter.bio, _rq) \ 747 bio_for_each_segment(bvl, _iter.bio, _iter.iter) 748 749 #define rq_iter_last(bvec, _iter) \ 750 (_iter.bio->bi_next == NULL && \ 751 bio_iter_last(bvec, _iter.iter)) 752 753 #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 754 # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" 755 #endif 756 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 757 extern void rq_flush_dcache_pages(struct request *rq); 758 #else 759 static inline void rq_flush_dcache_pages(struct request *rq) 760 { 761 } 762 #endif 763 764 extern int blk_register_queue(struct gendisk *disk); 765 extern void blk_unregister_queue(struct gendisk *disk); 766 extern blk_qc_t generic_make_request(struct bio *bio); 767 extern void blk_rq_init(struct request_queue *q, struct request *rq); 768 extern void blk_put_request(struct request *); 769 extern void __blk_put_request(struct request_queue *, struct request *); 770 extern struct request *blk_get_request(struct request_queue *, int, gfp_t); 771 extern struct request *blk_make_request(struct request_queue *, struct bio *, 772 gfp_t); 773 extern void blk_rq_set_block_pc(struct request *); 774 extern void blk_requeue_request(struct request_queue *, struct request *); 775 extern void blk_add_request_payload(struct request *rq, struct page *page, 776 unsigned int len); 777 extern int blk_lld_busy(struct request_queue *q); 778 extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 779 struct bio_set *bs, gfp_t gfp_mask, 780 int (*bio_ctr)(struct bio *, struct bio *, void *), 781 void *data); 782 extern void blk_rq_unprep_clone(struct request *rq); 783 extern int blk_insert_cloned_request(struct request_queue *q, 784 struct request *rq); 785 extern void blk_delay_queue(struct request_queue *, unsigned long); 786 extern void blk_queue_split(struct request_queue *, struct bio **, 787 struct bio_set *); 788 extern void blk_recount_segments(struct request_queue *, struct bio *); 789 extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); 790 extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t, 791 unsigned int, void __user *); 792 extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, 793 unsigned int, void __user *); 794 extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, 795 struct scsi_ioctl_command __user *); 796 797 extern int blk_queue_enter(struct request_queue *q, gfp_t gfp); 798 extern void blk_queue_exit(struct request_queue *q); 799 extern void blk_start_queue(struct request_queue *q); 800 extern void blk_stop_queue(struct request_queue *q); 801 extern void blk_sync_queue(struct request_queue *q); 802 extern void __blk_stop_queue(struct request_queue *q); 803 extern void __blk_run_queue(struct request_queue *q); 804 extern void __blk_run_queue_uncond(struct request_queue *q); 805 extern void blk_run_queue(struct request_queue *); 806 extern void blk_run_queue_async(struct request_queue *q); 807 extern int blk_rq_map_user(struct request_queue *, struct request *, 808 struct rq_map_data *, void __user *, unsigned long, 809 gfp_t); 810 extern int blk_rq_unmap_user(struct bio *); 811 extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); 812 extern int blk_rq_map_user_iov(struct request_queue *, struct request *, 813 struct rq_map_data *, const struct iov_iter *, 814 gfp_t); 815 extern int blk_execute_rq(struct request_queue *, struct gendisk *, 816 struct request *, int); 817 extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 818 struct request *, int, rq_end_io_fn *); 819 820 bool blk_poll(struct request_queue *q, blk_qc_t cookie); 821 822 static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 823 { 824 return bdev->bd_disk->queue; /* this is never NULL */ 825 } 826 827 /* 828 * blk_rq_pos() : the current sector 829 * blk_rq_bytes() : bytes left in the entire request 830 * blk_rq_cur_bytes() : bytes left in the current segment 831 * blk_rq_err_bytes() : bytes left till the next error boundary 832 * blk_rq_sectors() : sectors left in the entire request 833 * blk_rq_cur_sectors() : sectors left in the current segment 834 */ 835 static inline sector_t blk_rq_pos(const struct request *rq) 836 { 837 return rq->__sector; 838 } 839 840 static inline unsigned int blk_rq_bytes(const struct request *rq) 841 { 842 return rq->__data_len; 843 } 844 845 static inline int blk_rq_cur_bytes(const struct request *rq) 846 { 847 return rq->bio ? bio_cur_bytes(rq->bio) : 0; 848 } 849 850 extern unsigned int blk_rq_err_bytes(const struct request *rq); 851 852 static inline unsigned int blk_rq_sectors(const struct request *rq) 853 { 854 return blk_rq_bytes(rq) >> 9; 855 } 856 857 static inline unsigned int blk_rq_cur_sectors(const struct request *rq) 858 { 859 return blk_rq_cur_bytes(rq) >> 9; 860 } 861 862 static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, 863 unsigned int cmd_flags) 864 { 865 if (unlikely(cmd_flags & REQ_DISCARD)) 866 return min(q->limits.max_discard_sectors, UINT_MAX >> 9); 867 868 if (unlikely(cmd_flags & REQ_WRITE_SAME)) 869 return q->limits.max_write_same_sectors; 870 871 return q->limits.max_sectors; 872 } 873 874 /* 875 * Return maximum size of a request at given offset. Only valid for 876 * file system requests. 877 */ 878 static inline unsigned int blk_max_size_offset(struct request_queue *q, 879 sector_t offset) 880 { 881 if (!q->limits.chunk_sectors) 882 return q->limits.max_sectors; 883 884 return q->limits.chunk_sectors - 885 (offset & (q->limits.chunk_sectors - 1)); 886 } 887 888 static inline unsigned int blk_rq_get_max_sectors(struct request *rq) 889 { 890 struct request_queue *q = rq->q; 891 892 if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC)) 893 return q->limits.max_hw_sectors; 894 895 if (!q->limits.chunk_sectors || (rq->cmd_flags & REQ_DISCARD)) 896 return blk_queue_get_max_sectors(q, rq->cmd_flags); 897 898 return min(blk_max_size_offset(q, blk_rq_pos(rq)), 899 blk_queue_get_max_sectors(q, rq->cmd_flags)); 900 } 901 902 static inline unsigned int blk_rq_count_bios(struct request *rq) 903 { 904 unsigned int nr_bios = 0; 905 struct bio *bio; 906 907 __rq_for_each_bio(bio, rq) 908 nr_bios++; 909 910 return nr_bios; 911 } 912 913 /* 914 * Request issue related functions. 915 */ 916 extern struct request *blk_peek_request(struct request_queue *q); 917 extern void blk_start_request(struct request *rq); 918 extern struct request *blk_fetch_request(struct request_queue *q); 919 920 /* 921 * Request completion related functions. 922 * 923 * blk_update_request() completes given number of bytes and updates 924 * the request without completing it. 925 * 926 * blk_end_request() and friends. __blk_end_request() must be called 927 * with the request queue spinlock acquired. 928 * 929 * Several drivers define their own end_request and call 930 * blk_end_request() for parts of the original function. 931 * This prevents code duplication in drivers. 932 */ 933 extern bool blk_update_request(struct request *rq, int error, 934 unsigned int nr_bytes); 935 extern void blk_finish_request(struct request *rq, int error); 936 extern bool blk_end_request(struct request *rq, int error, 937 unsigned int nr_bytes); 938 extern void blk_end_request_all(struct request *rq, int error); 939 extern bool blk_end_request_cur(struct request *rq, int error); 940 extern bool blk_end_request_err(struct request *rq, int error); 941 extern bool __blk_end_request(struct request *rq, int error, 942 unsigned int nr_bytes); 943 extern void __blk_end_request_all(struct request *rq, int error); 944 extern bool __blk_end_request_cur(struct request *rq, int error); 945 extern bool __blk_end_request_err(struct request *rq, int error); 946 947 extern void blk_complete_request(struct request *); 948 extern void __blk_complete_request(struct request *); 949 extern void blk_abort_request(struct request *); 950 extern void blk_unprep_request(struct request *); 951 952 /* 953 * Access functions for manipulating queue properties 954 */ 955 extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, 956 spinlock_t *lock, int node_id); 957 extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); 958 extern struct request_queue *blk_init_allocated_queue(struct request_queue *, 959 request_fn_proc *, spinlock_t *); 960 extern void blk_cleanup_queue(struct request_queue *); 961 extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 962 extern void blk_queue_bounce_limit(struct request_queue *, u64); 963 extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 964 extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); 965 extern void blk_queue_max_segments(struct request_queue *, unsigned short); 966 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 967 extern void blk_queue_max_discard_sectors(struct request_queue *q, 968 unsigned int max_discard_sectors); 969 extern void blk_queue_max_write_same_sectors(struct request_queue *q, 970 unsigned int max_write_same_sectors); 971 extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); 972 extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); 973 extern void blk_queue_alignment_offset(struct request_queue *q, 974 unsigned int alignment); 975 extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); 976 extern void blk_queue_io_min(struct request_queue *q, unsigned int min); 977 extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); 978 extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); 979 extern void blk_set_default_limits(struct queue_limits *lim); 980 extern void blk_set_stacking_limits(struct queue_limits *lim); 981 extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 982 sector_t offset); 983 extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, 984 sector_t offset); 985 extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, 986 sector_t offset); 987 extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 988 extern void blk_queue_dma_pad(struct request_queue *, unsigned int); 989 extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); 990 extern int blk_queue_dma_drain(struct request_queue *q, 991 dma_drain_needed_fn *dma_drain_needed, 992 void *buf, unsigned int size); 993 extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); 994 extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 995 extern void blk_queue_virt_boundary(struct request_queue *, unsigned long); 996 extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 997 extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn); 998 extern void blk_queue_dma_alignment(struct request_queue *, int); 999 extern void blk_queue_update_dma_alignment(struct request_queue *, int); 1000 extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 1001 extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); 1002 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 1003 extern void blk_queue_flush(struct request_queue *q, unsigned int flush); 1004 extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable); 1005 extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 1006 1007 extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 1008 extern void blk_dump_rq_flags(struct request *, char *); 1009 extern long nr_blockdev_pages(void); 1010 1011 bool __must_check blk_get_queue(struct request_queue *); 1012 struct request_queue *blk_alloc_queue(gfp_t); 1013 struct request_queue *blk_alloc_queue_node(gfp_t, int); 1014 extern void blk_put_queue(struct request_queue *); 1015 extern void blk_set_queue_dying(struct request_queue *); 1016 1017 /* 1018 * block layer runtime pm functions 1019 */ 1020 #ifdef CONFIG_PM 1021 extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev); 1022 extern int blk_pre_runtime_suspend(struct request_queue *q); 1023 extern void blk_post_runtime_suspend(struct request_queue *q, int err); 1024 extern void blk_pre_runtime_resume(struct request_queue *q); 1025 extern void blk_post_runtime_resume(struct request_queue *q, int err); 1026 #else 1027 static inline void blk_pm_runtime_init(struct request_queue *q, 1028 struct device *dev) {} 1029 static inline int blk_pre_runtime_suspend(struct request_queue *q) 1030 { 1031 return -ENOSYS; 1032 } 1033 static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {} 1034 static inline void blk_pre_runtime_resume(struct request_queue *q) {} 1035 static inline void blk_post_runtime_resume(struct request_queue *q, int err) {} 1036 #endif 1037 1038 /* 1039 * blk_plug permits building a queue of related requests by holding the I/O 1040 * fragments for a short period. This allows merging of sequential requests 1041 * into single larger request. As the requests are moved from a per-task list to 1042 * the device's request_queue in a batch, this results in improved scalability 1043 * as the lock contention for request_queue lock is reduced. 1044 * 1045 * It is ok not to disable preemption when adding the request to the plug list 1046 * or when attempting a merge, because blk_schedule_flush_list() will only flush 1047 * the plug list when the task sleeps by itself. For details, please see 1048 * schedule() where blk_schedule_flush_plug() is called. 1049 */ 1050 struct blk_plug { 1051 struct list_head list; /* requests */ 1052 struct list_head mq_list; /* blk-mq requests */ 1053 struct list_head cb_list; /* md requires an unplug callback */ 1054 }; 1055 #define BLK_MAX_REQUEST_COUNT 16 1056 1057 struct blk_plug_cb; 1058 typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); 1059 struct blk_plug_cb { 1060 struct list_head list; 1061 blk_plug_cb_fn callback; 1062 void *data; 1063 }; 1064 extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, 1065 void *data, int size); 1066 extern void blk_start_plug(struct blk_plug *); 1067 extern void blk_finish_plug(struct blk_plug *); 1068 extern void blk_flush_plug_list(struct blk_plug *, bool); 1069 1070 static inline void blk_flush_plug(struct task_struct *tsk) 1071 { 1072 struct blk_plug *plug = tsk->plug; 1073 1074 if (plug) 1075 blk_flush_plug_list(plug, false); 1076 } 1077 1078 static inline void blk_schedule_flush_plug(struct task_struct *tsk) 1079 { 1080 struct blk_plug *plug = tsk->plug; 1081 1082 if (plug) 1083 blk_flush_plug_list(plug, true); 1084 } 1085 1086 static inline bool blk_needs_flush_plug(struct task_struct *tsk) 1087 { 1088 struct blk_plug *plug = tsk->plug; 1089 1090 return plug && 1091 (!list_empty(&plug->list) || 1092 !list_empty(&plug->mq_list) || 1093 !list_empty(&plug->cb_list)); 1094 } 1095 1096 /* 1097 * tag stuff 1098 */ 1099 extern int blk_queue_start_tag(struct request_queue *, struct request *); 1100 extern struct request *blk_queue_find_tag(struct request_queue *, int); 1101 extern void blk_queue_end_tag(struct request_queue *, struct request *); 1102 extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int); 1103 extern void blk_queue_free_tags(struct request_queue *); 1104 extern int blk_queue_resize_tags(struct request_queue *, int); 1105 extern void blk_queue_invalidate_tags(struct request_queue *); 1106 extern struct blk_queue_tag *blk_init_tags(int, int); 1107 extern void blk_free_tags(struct blk_queue_tag *); 1108 1109 static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, 1110 int tag) 1111 { 1112 if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) 1113 return NULL; 1114 return bqt->tag_index[tag]; 1115 } 1116 1117 #define BLKDEV_DISCARD_SECURE 0x01 /* secure discard */ 1118 1119 extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); 1120 extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1121 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); 1122 extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, 1123 sector_t nr_sects, gfp_t gfp_mask, struct page *page); 1124 extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1125 sector_t nr_sects, gfp_t gfp_mask, bool discard); 1126 static inline int sb_issue_discard(struct super_block *sb, sector_t block, 1127 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) 1128 { 1129 return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9), 1130 nr_blocks << (sb->s_blocksize_bits - 9), 1131 gfp_mask, flags); 1132 } 1133 static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, 1134 sector_t nr_blocks, gfp_t gfp_mask) 1135 { 1136 return blkdev_issue_zeroout(sb->s_bdev, 1137 block << (sb->s_blocksize_bits - 9), 1138 nr_blocks << (sb->s_blocksize_bits - 9), 1139 gfp_mask, true); 1140 } 1141 1142 extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); 1143 1144 enum blk_default_limits { 1145 BLK_MAX_SEGMENTS = 128, 1146 BLK_SAFE_MAX_SECTORS = 255, 1147 BLK_DEF_MAX_SECTORS = 2560, 1148 BLK_MAX_SEGMENT_SIZE = 65536, 1149 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, 1150 }; 1151 1152 #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 1153 1154 static inline unsigned long queue_bounce_pfn(struct request_queue *q) 1155 { 1156 return q->limits.bounce_pfn; 1157 } 1158 1159 static inline unsigned long queue_segment_boundary(struct request_queue *q) 1160 { 1161 return q->limits.seg_boundary_mask; 1162 } 1163 1164 static inline unsigned long queue_virt_boundary(struct request_queue *q) 1165 { 1166 return q->limits.virt_boundary_mask; 1167 } 1168 1169 static inline unsigned int queue_max_sectors(struct request_queue *q) 1170 { 1171 return q->limits.max_sectors; 1172 } 1173 1174 static inline unsigned int queue_max_hw_sectors(struct request_queue *q) 1175 { 1176 return q->limits.max_hw_sectors; 1177 } 1178 1179 static inline unsigned short queue_max_segments(struct request_queue *q) 1180 { 1181 return q->limits.max_segments; 1182 } 1183 1184 static inline unsigned int queue_max_segment_size(struct request_queue *q) 1185 { 1186 return q->limits.max_segment_size; 1187 } 1188 1189 static inline unsigned short queue_logical_block_size(struct request_queue *q) 1190 { 1191 int retval = 512; 1192 1193 if (q && q->limits.logical_block_size) 1194 retval = q->limits.logical_block_size; 1195 1196 return retval; 1197 } 1198 1199 static inline unsigned short bdev_logical_block_size(struct block_device *bdev) 1200 { 1201 return queue_logical_block_size(bdev_get_queue(bdev)); 1202 } 1203 1204 static inline unsigned int queue_physical_block_size(struct request_queue *q) 1205 { 1206 return q->limits.physical_block_size; 1207 } 1208 1209 static inline unsigned int bdev_physical_block_size(struct block_device *bdev) 1210 { 1211 return queue_physical_block_size(bdev_get_queue(bdev)); 1212 } 1213 1214 static inline unsigned int queue_io_min(struct request_queue *q) 1215 { 1216 return q->limits.io_min; 1217 } 1218 1219 static inline int bdev_io_min(struct block_device *bdev) 1220 { 1221 return queue_io_min(bdev_get_queue(bdev)); 1222 } 1223 1224 static inline unsigned int queue_io_opt(struct request_queue *q) 1225 { 1226 return q->limits.io_opt; 1227 } 1228 1229 static inline int bdev_io_opt(struct block_device *bdev) 1230 { 1231 return queue_io_opt(bdev_get_queue(bdev)); 1232 } 1233 1234 static inline int queue_alignment_offset(struct request_queue *q) 1235 { 1236 if (q->limits.misaligned) 1237 return -1; 1238 1239 return q->limits.alignment_offset; 1240 } 1241 1242 static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) 1243 { 1244 unsigned int granularity = max(lim->physical_block_size, lim->io_min); 1245 unsigned int alignment = sector_div(sector, granularity >> 9) << 9; 1246 1247 return (granularity + lim->alignment_offset - alignment) % granularity; 1248 } 1249 1250 static inline int bdev_alignment_offset(struct block_device *bdev) 1251 { 1252 struct request_queue *q = bdev_get_queue(bdev); 1253 1254 if (q->limits.misaligned) 1255 return -1; 1256 1257 if (bdev != bdev->bd_contains) 1258 return bdev->bd_part->alignment_offset; 1259 1260 return q->limits.alignment_offset; 1261 } 1262 1263 static inline int queue_discard_alignment(struct request_queue *q) 1264 { 1265 if (q->limits.discard_misaligned) 1266 return -1; 1267 1268 return q->limits.discard_alignment; 1269 } 1270 1271 static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) 1272 { 1273 unsigned int alignment, granularity, offset; 1274 1275 if (!lim->max_discard_sectors) 1276 return 0; 1277 1278 /* Why are these in bytes, not sectors? */ 1279 alignment = lim->discard_alignment >> 9; 1280 granularity = lim->discard_granularity >> 9; 1281 if (!granularity) 1282 return 0; 1283 1284 /* Offset of the partition start in 'granularity' sectors */ 1285 offset = sector_div(sector, granularity); 1286 1287 /* And why do we do this modulus *again* in blkdev_issue_discard()? */ 1288 offset = (granularity + alignment - offset) % granularity; 1289 1290 /* Turn it back into bytes, gaah */ 1291 return offset << 9; 1292 } 1293 1294 static inline int bdev_discard_alignment(struct block_device *bdev) 1295 { 1296 struct request_queue *q = bdev_get_queue(bdev); 1297 1298 if (bdev != bdev->bd_contains) 1299 return bdev->bd_part->discard_alignment; 1300 1301 return q->limits.discard_alignment; 1302 } 1303 1304 static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) 1305 { 1306 if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1) 1307 return 1; 1308 1309 return 0; 1310 } 1311 1312 static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev) 1313 { 1314 return queue_discard_zeroes_data(bdev_get_queue(bdev)); 1315 } 1316 1317 static inline unsigned int bdev_write_same(struct block_device *bdev) 1318 { 1319 struct request_queue *q = bdev_get_queue(bdev); 1320 1321 if (q) 1322 return q->limits.max_write_same_sectors; 1323 1324 return 0; 1325 } 1326 1327 static inline int queue_dma_alignment(struct request_queue *q) 1328 { 1329 return q ? q->dma_alignment : 511; 1330 } 1331 1332 static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, 1333 unsigned int len) 1334 { 1335 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; 1336 return !(addr & alignment) && !(len & alignment); 1337 } 1338 1339 /* assumes size > 256 */ 1340 static inline unsigned int blksize_bits(unsigned int size) 1341 { 1342 unsigned int bits = 8; 1343 do { 1344 bits++; 1345 size >>= 1; 1346 } while (size > 256); 1347 return bits; 1348 } 1349 1350 static inline unsigned int block_size(struct block_device *bdev) 1351 { 1352 return bdev->bd_block_size; 1353 } 1354 1355 static inline bool queue_flush_queueable(struct request_queue *q) 1356 { 1357 return !q->flush_not_queueable; 1358 } 1359 1360 typedef struct {struct page *v;} Sector; 1361 1362 unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); 1363 1364 static inline void put_dev_sector(Sector p) 1365 { 1366 page_cache_release(p.v); 1367 } 1368 1369 /* 1370 * Check if adding a bio_vec after bprv with offset would create a gap in 1371 * the SG list. Most drivers don't care about this, but some do. 1372 */ 1373 static inline bool bvec_gap_to_prev(struct request_queue *q, 1374 struct bio_vec *bprv, unsigned int offset) 1375 { 1376 if (!queue_virt_boundary(q)) 1377 return false; 1378 return offset || 1379 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); 1380 } 1381 1382 static inline bool bio_will_gap(struct request_queue *q, struct bio *prev, 1383 struct bio *next) 1384 { 1385 if (!bio_has_data(prev)) 1386 return false; 1387 1388 return bvec_gap_to_prev(q, &prev->bi_io_vec[prev->bi_vcnt - 1], 1389 next->bi_io_vec[0].bv_offset); 1390 } 1391 1392 static inline bool req_gap_back_merge(struct request *req, struct bio *bio) 1393 { 1394 return bio_will_gap(req->q, req->biotail, bio); 1395 } 1396 1397 static inline bool req_gap_front_merge(struct request *req, struct bio *bio) 1398 { 1399 return bio_will_gap(req->q, bio, req->bio); 1400 } 1401 1402 struct work_struct; 1403 int kblockd_schedule_work(struct work_struct *work); 1404 int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay); 1405 int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); 1406 1407 #ifdef CONFIG_BLK_CGROUP 1408 /* 1409 * This should not be using sched_clock(). A real patch is in progress 1410 * to fix this up, until that is in place we need to disable preemption 1411 * around sched_clock() in this function and set_io_start_time_ns(). 1412 */ 1413 static inline void set_start_time_ns(struct request *req) 1414 { 1415 preempt_disable(); 1416 req->start_time_ns = sched_clock(); 1417 preempt_enable(); 1418 } 1419 1420 static inline void set_io_start_time_ns(struct request *req) 1421 { 1422 preempt_disable(); 1423 req->io_start_time_ns = sched_clock(); 1424 preempt_enable(); 1425 } 1426 1427 static inline uint64_t rq_start_time_ns(struct request *req) 1428 { 1429 return req->start_time_ns; 1430 } 1431 1432 static inline uint64_t rq_io_start_time_ns(struct request *req) 1433 { 1434 return req->io_start_time_ns; 1435 } 1436 #else 1437 static inline void set_start_time_ns(struct request *req) {} 1438 static inline void set_io_start_time_ns(struct request *req) {} 1439 static inline uint64_t rq_start_time_ns(struct request *req) 1440 { 1441 return 0; 1442 } 1443 static inline uint64_t rq_io_start_time_ns(struct request *req) 1444 { 1445 return 0; 1446 } 1447 #endif 1448 1449 #define MODULE_ALIAS_BLOCKDEV(major,minor) \ 1450 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 1451 #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 1452 MODULE_ALIAS("block-major-" __stringify(major) "-*") 1453 1454 #if defined(CONFIG_BLK_DEV_INTEGRITY) 1455 1456 enum blk_integrity_flags { 1457 BLK_INTEGRITY_VERIFY = 1 << 0, 1458 BLK_INTEGRITY_GENERATE = 1 << 1, 1459 BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2, 1460 BLK_INTEGRITY_IP_CHECKSUM = 1 << 3, 1461 }; 1462 1463 struct blk_integrity_iter { 1464 void *prot_buf; 1465 void *data_buf; 1466 sector_t seed; 1467 unsigned int data_size; 1468 unsigned short interval; 1469 const char *disk_name; 1470 }; 1471 1472 typedef int (integrity_processing_fn) (struct blk_integrity_iter *); 1473 1474 struct blk_integrity_profile { 1475 integrity_processing_fn *generate_fn; 1476 integrity_processing_fn *verify_fn; 1477 const char *name; 1478 }; 1479 1480 extern void blk_integrity_register(struct gendisk *, struct blk_integrity *); 1481 extern void blk_integrity_unregister(struct gendisk *); 1482 extern int blk_integrity_compare(struct gendisk *, struct gendisk *); 1483 extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, 1484 struct scatterlist *); 1485 extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); 1486 extern bool blk_integrity_merge_rq(struct request_queue *, struct request *, 1487 struct request *); 1488 extern bool blk_integrity_merge_bio(struct request_queue *, struct request *, 1489 struct bio *); 1490 1491 static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 1492 { 1493 struct blk_integrity *bi = &disk->queue->integrity; 1494 1495 if (!bi->profile) 1496 return NULL; 1497 1498 return bi; 1499 } 1500 1501 static inline 1502 struct blk_integrity *bdev_get_integrity(struct block_device *bdev) 1503 { 1504 return blk_get_integrity(bdev->bd_disk); 1505 } 1506 1507 static inline bool blk_integrity_rq(struct request *rq) 1508 { 1509 return rq->cmd_flags & REQ_INTEGRITY; 1510 } 1511 1512 static inline void blk_queue_max_integrity_segments(struct request_queue *q, 1513 unsigned int segs) 1514 { 1515 q->limits.max_integrity_segments = segs; 1516 } 1517 1518 static inline unsigned short 1519 queue_max_integrity_segments(struct request_queue *q) 1520 { 1521 return q->limits.max_integrity_segments; 1522 } 1523 1524 static inline bool integrity_req_gap_back_merge(struct request *req, 1525 struct bio *next) 1526 { 1527 struct bio_integrity_payload *bip = bio_integrity(req->bio); 1528 struct bio_integrity_payload *bip_next = bio_integrity(next); 1529 1530 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], 1531 bip_next->bip_vec[0].bv_offset); 1532 } 1533 1534 static inline bool integrity_req_gap_front_merge(struct request *req, 1535 struct bio *bio) 1536 { 1537 struct bio_integrity_payload *bip = bio_integrity(bio); 1538 struct bio_integrity_payload *bip_next = bio_integrity(req->bio); 1539 1540 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], 1541 bip_next->bip_vec[0].bv_offset); 1542 } 1543 1544 #else /* CONFIG_BLK_DEV_INTEGRITY */ 1545 1546 struct bio; 1547 struct block_device; 1548 struct gendisk; 1549 struct blk_integrity; 1550 1551 static inline int blk_integrity_rq(struct request *rq) 1552 { 1553 return 0; 1554 } 1555 static inline int blk_rq_count_integrity_sg(struct request_queue *q, 1556 struct bio *b) 1557 { 1558 return 0; 1559 } 1560 static inline int blk_rq_map_integrity_sg(struct request_queue *q, 1561 struct bio *b, 1562 struct scatterlist *s) 1563 { 1564 return 0; 1565 } 1566 static inline struct blk_integrity *bdev_get_integrity(struct block_device *b) 1567 { 1568 return NULL; 1569 } 1570 static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 1571 { 1572 return NULL; 1573 } 1574 static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b) 1575 { 1576 return 0; 1577 } 1578 static inline void blk_integrity_register(struct gendisk *d, 1579 struct blk_integrity *b) 1580 { 1581 } 1582 static inline void blk_integrity_unregister(struct gendisk *d) 1583 { 1584 } 1585 static inline void blk_queue_max_integrity_segments(struct request_queue *q, 1586 unsigned int segs) 1587 { 1588 } 1589 static inline unsigned short queue_max_integrity_segments(struct request_queue *q) 1590 { 1591 return 0; 1592 } 1593 static inline bool blk_integrity_merge_rq(struct request_queue *rq, 1594 struct request *r1, 1595 struct request *r2) 1596 { 1597 return true; 1598 } 1599 static inline bool blk_integrity_merge_bio(struct request_queue *rq, 1600 struct request *r, 1601 struct bio *b) 1602 { 1603 return true; 1604 } 1605 1606 static inline bool integrity_req_gap_back_merge(struct request *req, 1607 struct bio *next) 1608 { 1609 return false; 1610 } 1611 static inline bool integrity_req_gap_front_merge(struct request *req, 1612 struct bio *bio) 1613 { 1614 return false; 1615 } 1616 1617 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 1618 1619 struct block_device_operations { 1620 int (*open) (struct block_device *, fmode_t); 1621 void (*release) (struct gendisk *, fmode_t); 1622 int (*rw_page)(struct block_device *, sector_t, struct page *, int rw); 1623 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1624 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1625 long (*direct_access)(struct block_device *, sector_t, void __pmem **, 1626 unsigned long *pfn); 1627 unsigned int (*check_events) (struct gendisk *disk, 1628 unsigned int clearing); 1629 /* ->media_changed() is DEPRECATED, use ->check_events() instead */ 1630 int (*media_changed) (struct gendisk *); 1631 void (*unlock_native_capacity) (struct gendisk *); 1632 int (*revalidate_disk) (struct gendisk *); 1633 int (*getgeo)(struct block_device *, struct hd_geometry *); 1634 /* this callback is with swap_lock and sometimes page table lock held */ 1635 void (*swap_slot_free_notify) (struct block_device *, unsigned long); 1636 struct module *owner; 1637 const struct pr_ops *pr_ops; 1638 }; 1639 1640 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, 1641 unsigned long); 1642 extern int bdev_read_page(struct block_device *, sector_t, struct page *); 1643 extern int bdev_write_page(struct block_device *, sector_t, struct page *, 1644 struct writeback_control *); 1645 extern long bdev_direct_access(struct block_device *, sector_t, 1646 void __pmem **addr, unsigned long *pfn, long size); 1647 #else /* CONFIG_BLOCK */ 1648 1649 struct block_device; 1650 1651 /* 1652 * stubs for when the block layer is configured out 1653 */ 1654 #define buffer_heads_over_limit 0 1655 1656 static inline long nr_blockdev_pages(void) 1657 { 1658 return 0; 1659 } 1660 1661 struct blk_plug { 1662 }; 1663 1664 static inline void blk_start_plug(struct blk_plug *plug) 1665 { 1666 } 1667 1668 static inline void blk_finish_plug(struct blk_plug *plug) 1669 { 1670 } 1671 1672 static inline void blk_flush_plug(struct task_struct *task) 1673 { 1674 } 1675 1676 static inline void blk_schedule_flush_plug(struct task_struct *task) 1677 { 1678 } 1679 1680 1681 static inline bool blk_needs_flush_plug(struct task_struct *tsk) 1682 { 1683 return false; 1684 } 1685 1686 static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, 1687 sector_t *error_sector) 1688 { 1689 return 0; 1690 } 1691 1692 #endif /* CONFIG_BLOCK */ 1693 1694 #endif 1695