1 #ifndef _LINUX_BLKDEV_H 2 #define _LINUX_BLKDEV_H 3 4 #include <linux/sched.h> 5 6 #ifdef CONFIG_BLOCK 7 8 #include <linux/major.h> 9 #include <linux/genhd.h> 10 #include <linux/list.h> 11 #include <linux/llist.h> 12 #include <linux/timer.h> 13 #include <linux/workqueue.h> 14 #include <linux/pagemap.h> 15 #include <linux/backing-dev.h> 16 #include <linux/wait.h> 17 #include <linux/mempool.h> 18 #include <linux/bio.h> 19 #include <linux/stringify.h> 20 #include <linux/gfp.h> 21 #include <linux/bsg.h> 22 #include <linux/smp.h> 23 #include <linux/rcupdate.h> 24 #include <linux/percpu-refcount.h> 25 26 #include <asm/scatterlist.h> 27 28 struct module; 29 struct scsi_ioctl_command; 30 31 struct request_queue; 32 struct elevator_queue; 33 struct request_pm_state; 34 struct blk_trace; 35 struct request; 36 struct sg_io_hdr; 37 struct bsg_job; 38 struct blkcg_gq; 39 struct blk_flush_queue; 40 41 #define BLKDEV_MIN_RQ 4 42 #define BLKDEV_MAX_RQ 128 /* Default maximum */ 43 44 /* 45 * Maximum number of blkcg policies allowed to be registered concurrently. 46 * Defined here to simplify include dependency. 47 */ 48 #define BLKCG_MAX_POLS 2 49 50 struct request; 51 typedef void (rq_end_io_fn)(struct request *, int); 52 53 #define BLK_RL_SYNCFULL (1U << 0) 54 #define BLK_RL_ASYNCFULL (1U << 1) 55 56 struct request_list { 57 struct request_queue *q; /* the queue this rl belongs to */ 58 #ifdef CONFIG_BLK_CGROUP 59 struct blkcg_gq *blkg; /* blkg this request pool belongs to */ 60 #endif 61 /* 62 * count[], starved[], and wait[] are indexed by 63 * BLK_RW_SYNC/BLK_RW_ASYNC 64 */ 65 int count[2]; 66 int starved[2]; 67 mempool_t *rq_pool; 68 wait_queue_head_t wait[2]; 69 unsigned int flags; 70 }; 71 72 /* 73 * request command types 74 */ 75 enum rq_cmd_type_bits { 76 REQ_TYPE_FS = 1, /* fs request */ 77 REQ_TYPE_BLOCK_PC, /* scsi command */ 78 REQ_TYPE_SENSE, /* sense request */ 79 REQ_TYPE_PM_SUSPEND, /* suspend request */ 80 REQ_TYPE_PM_RESUME, /* resume request */ 81 REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ 82 REQ_TYPE_SPECIAL, /* driver defined type */ 83 /* 84 * for ATA/ATAPI devices. this really doesn't belong here, ide should 85 * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver 86 * private REQ_LB opcodes to differentiate what type of request this is 87 */ 88 REQ_TYPE_ATA_TASKFILE, 89 REQ_TYPE_ATA_PC, 90 }; 91 92 #define BLK_MAX_CDB 16 93 94 /* 95 * Try to put the fields that are referenced together in the same cacheline. 96 * 97 * If you modify this structure, make sure to update blk_rq_init() and 98 * especially blk_mq_rq_ctx_init() to take care of the added fields. 99 */ 100 struct request { 101 struct list_head queuelist; 102 union { 103 struct call_single_data csd; 104 unsigned long fifo_time; 105 }; 106 107 struct request_queue *q; 108 struct blk_mq_ctx *mq_ctx; 109 110 u64 cmd_flags; 111 enum rq_cmd_type_bits cmd_type; 112 unsigned long atomic_flags; 113 114 int cpu; 115 116 /* the following two fields are internal, NEVER access directly */ 117 unsigned int __data_len; /* total data len */ 118 sector_t __sector; /* sector cursor */ 119 120 struct bio *bio; 121 struct bio *biotail; 122 123 /* 124 * The hash is used inside the scheduler, and killed once the 125 * request reaches the dispatch list. The ipi_list is only used 126 * to queue the request for softirq completion, which is long 127 * after the request has been unhashed (and even removed from 128 * the dispatch list). 129 */ 130 union { 131 struct hlist_node hash; /* merge hash */ 132 struct list_head ipi_list; 133 }; 134 135 /* 136 * The rb_node is only used inside the io scheduler, requests 137 * are pruned when moved to the dispatch queue. So let the 138 * completion_data share space with the rb_node. 139 */ 140 union { 141 struct rb_node rb_node; /* sort/lookup */ 142 void *completion_data; 143 }; 144 145 /* 146 * Three pointers are available for the IO schedulers, if they need 147 * more they have to dynamically allocate it. Flush requests are 148 * never put on the IO scheduler. So let the flush fields share 149 * space with the elevator data. 150 */ 151 union { 152 struct { 153 struct io_cq *icq; 154 void *priv[2]; 155 } elv; 156 157 struct { 158 unsigned int seq; 159 struct list_head list; 160 rq_end_io_fn *saved_end_io; 161 } flush; 162 }; 163 164 struct gendisk *rq_disk; 165 struct hd_struct *part; 166 unsigned long start_time; 167 #ifdef CONFIG_BLK_CGROUP 168 struct request_list *rl; /* rl this rq is alloced from */ 169 unsigned long long start_time_ns; 170 unsigned long long io_start_time_ns; /* when passed to hardware */ 171 #endif 172 /* Number of scatter-gather DMA addr+len pairs after 173 * physical address coalescing is performed. 174 */ 175 unsigned short nr_phys_segments; 176 #if defined(CONFIG_BLK_DEV_INTEGRITY) 177 unsigned short nr_integrity_segments; 178 #endif 179 180 unsigned short ioprio; 181 182 void *special; /* opaque pointer available for LLD use */ 183 184 int tag; 185 int errors; 186 187 /* 188 * when request is used as a packet command carrier 189 */ 190 unsigned char __cmd[BLK_MAX_CDB]; 191 unsigned char *cmd; 192 unsigned short cmd_len; 193 194 unsigned int extra_len; /* length of alignment and padding */ 195 unsigned int sense_len; 196 unsigned int resid_len; /* residual count */ 197 void *sense; 198 199 unsigned long deadline; 200 struct list_head timeout_list; 201 unsigned int timeout; 202 int retries; 203 204 /* 205 * completion callback. 206 */ 207 rq_end_io_fn *end_io; 208 void *end_io_data; 209 210 /* for bidi */ 211 struct request *next_rq; 212 }; 213 214 static inline unsigned short req_get_ioprio(struct request *req) 215 { 216 return req->ioprio; 217 } 218 219 /* 220 * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME 221 * requests. Some step values could eventually be made generic. 222 */ 223 struct request_pm_state 224 { 225 /* PM state machine step value, currently driver specific */ 226 int pm_step; 227 /* requested PM state value (S1, S2, S3, S4, ...) */ 228 u32 pm_state; 229 void* data; /* for driver use */ 230 }; 231 232 #include <linux/elevator.h> 233 234 struct blk_queue_ctx; 235 236 typedef void (request_fn_proc) (struct request_queue *q); 237 typedef void (make_request_fn) (struct request_queue *q, struct bio *bio); 238 typedef int (prep_rq_fn) (struct request_queue *, struct request *); 239 typedef void (unprep_rq_fn) (struct request_queue *, struct request *); 240 241 struct bio_vec; 242 struct bvec_merge_data { 243 struct block_device *bi_bdev; 244 sector_t bi_sector; 245 unsigned bi_size; 246 unsigned long bi_rw; 247 }; 248 typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, 249 struct bio_vec *); 250 typedef void (softirq_done_fn)(struct request *); 251 typedef int (dma_drain_needed_fn)(struct request *); 252 typedef int (lld_busy_fn) (struct request_queue *q); 253 typedef int (bsg_job_fn) (struct bsg_job *); 254 255 enum blk_eh_timer_return { 256 BLK_EH_NOT_HANDLED, 257 BLK_EH_HANDLED, 258 BLK_EH_RESET_TIMER, 259 }; 260 261 typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *); 262 263 enum blk_queue_state { 264 Queue_down, 265 Queue_up, 266 }; 267 268 struct blk_queue_tag { 269 struct request **tag_index; /* map of busy tags */ 270 unsigned long *tag_map; /* bit map of free/busy tags */ 271 int busy; /* current depth */ 272 int max_depth; /* what we will send to device */ 273 int real_max_depth; /* what the array can hold */ 274 atomic_t refcnt; /* map can be shared */ 275 int alloc_policy; /* tag allocation policy */ 276 int next_tag; /* next tag */ 277 }; 278 #define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */ 279 #define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */ 280 281 #define BLK_SCSI_MAX_CMDS (256) 282 #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) 283 284 struct queue_limits { 285 unsigned long bounce_pfn; 286 unsigned long seg_boundary_mask; 287 288 unsigned int max_hw_sectors; 289 unsigned int chunk_sectors; 290 unsigned int max_sectors; 291 unsigned int max_segment_size; 292 unsigned int physical_block_size; 293 unsigned int alignment_offset; 294 unsigned int io_min; 295 unsigned int io_opt; 296 unsigned int max_discard_sectors; 297 unsigned int max_write_same_sectors; 298 unsigned int discard_granularity; 299 unsigned int discard_alignment; 300 301 unsigned short logical_block_size; 302 unsigned short max_segments; 303 unsigned short max_integrity_segments; 304 305 unsigned char misaligned; 306 unsigned char discard_misaligned; 307 unsigned char cluster; 308 unsigned char discard_zeroes_data; 309 unsigned char raid_partial_stripes_expensive; 310 }; 311 312 struct request_queue { 313 /* 314 * Together with queue_head for cacheline sharing 315 */ 316 struct list_head queue_head; 317 struct request *last_merge; 318 struct elevator_queue *elevator; 319 int nr_rqs[2]; /* # allocated [a]sync rqs */ 320 int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */ 321 322 /* 323 * If blkcg is not used, @q->root_rl serves all requests. If blkcg 324 * is used, root blkg allocates from @q->root_rl and all other 325 * blkgs from their own blkg->rl. Which one to use should be 326 * determined using bio_request_list(). 327 */ 328 struct request_list root_rl; 329 330 request_fn_proc *request_fn; 331 make_request_fn *make_request_fn; 332 prep_rq_fn *prep_rq_fn; 333 unprep_rq_fn *unprep_rq_fn; 334 merge_bvec_fn *merge_bvec_fn; 335 softirq_done_fn *softirq_done_fn; 336 rq_timed_out_fn *rq_timed_out_fn; 337 dma_drain_needed_fn *dma_drain_needed; 338 lld_busy_fn *lld_busy_fn; 339 340 struct blk_mq_ops *mq_ops; 341 342 unsigned int *mq_map; 343 344 /* sw queues */ 345 struct blk_mq_ctx __percpu *queue_ctx; 346 unsigned int nr_queues; 347 348 /* hw dispatch queues */ 349 struct blk_mq_hw_ctx **queue_hw_ctx; 350 unsigned int nr_hw_queues; 351 352 /* 353 * Dispatch queue sorting 354 */ 355 sector_t end_sector; 356 struct request *boundary_rq; 357 358 /* 359 * Delayed queue handling 360 */ 361 struct delayed_work delay_work; 362 363 struct backing_dev_info backing_dev_info; 364 365 /* 366 * The queue owner gets to use this for whatever they like. 367 * ll_rw_blk doesn't touch it. 368 */ 369 void *queuedata; 370 371 /* 372 * various queue flags, see QUEUE_* below 373 */ 374 unsigned long queue_flags; 375 376 /* 377 * ida allocated id for this queue. Used to index queues from 378 * ioctx. 379 */ 380 int id; 381 382 /* 383 * queue needs bounce pages for pages above this limit 384 */ 385 gfp_t bounce_gfp; 386 387 /* 388 * protects queue structures from reentrancy. ->__queue_lock should 389 * _never_ be used directly, it is queue private. always use 390 * ->queue_lock. 391 */ 392 spinlock_t __queue_lock; 393 spinlock_t *queue_lock; 394 395 /* 396 * queue kobject 397 */ 398 struct kobject kobj; 399 400 /* 401 * mq queue kobject 402 */ 403 struct kobject mq_kobj; 404 405 #ifdef CONFIG_PM 406 struct device *dev; 407 int rpm_status; 408 unsigned int nr_pending; 409 #endif 410 411 /* 412 * queue settings 413 */ 414 unsigned long nr_requests; /* Max # of requests */ 415 unsigned int nr_congestion_on; 416 unsigned int nr_congestion_off; 417 unsigned int nr_batching; 418 419 unsigned int dma_drain_size; 420 void *dma_drain_buffer; 421 unsigned int dma_pad_mask; 422 unsigned int dma_alignment; 423 424 struct blk_queue_tag *queue_tags; 425 struct list_head tag_busy_list; 426 427 unsigned int nr_sorted; 428 unsigned int in_flight[2]; 429 /* 430 * Number of active block driver functions for which blk_drain_queue() 431 * must wait. Must be incremented around functions that unlock the 432 * queue_lock internally, e.g. scsi_request_fn(). 433 */ 434 unsigned int request_fn_active; 435 436 unsigned int rq_timeout; 437 struct timer_list timeout; 438 struct list_head timeout_list; 439 440 struct list_head icq_list; 441 #ifdef CONFIG_BLK_CGROUP 442 DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS); 443 struct blkcg_gq *root_blkg; 444 struct list_head blkg_list; 445 #endif 446 447 struct queue_limits limits; 448 449 /* 450 * sg stuff 451 */ 452 unsigned int sg_timeout; 453 unsigned int sg_reserved_size; 454 int node; 455 #ifdef CONFIG_BLK_DEV_IO_TRACE 456 struct blk_trace *blk_trace; 457 #endif 458 /* 459 * for flush operations 460 */ 461 unsigned int flush_flags; 462 unsigned int flush_not_queueable:1; 463 struct blk_flush_queue *fq; 464 465 struct list_head requeue_list; 466 spinlock_t requeue_lock; 467 struct work_struct requeue_work; 468 469 struct mutex sysfs_lock; 470 471 int bypass_depth; 472 int mq_freeze_depth; 473 474 #if defined(CONFIG_BLK_DEV_BSG) 475 bsg_job_fn *bsg_job_fn; 476 int bsg_job_size; 477 struct bsg_class_device bsg_dev; 478 #endif 479 480 #ifdef CONFIG_BLK_DEV_THROTTLING 481 /* Throttle data */ 482 struct throtl_data *td; 483 #endif 484 struct rcu_head rcu_head; 485 wait_queue_head_t mq_freeze_wq; 486 struct percpu_ref mq_usage_counter; 487 struct list_head all_q_node; 488 489 struct blk_mq_tag_set *tag_set; 490 struct list_head tag_set_list; 491 }; 492 493 #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 494 #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 495 #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ 496 #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ 497 #define QUEUE_FLAG_DYING 5 /* queue being torn down */ 498 #define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */ 499 #define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ 500 #define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ 501 #define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */ 502 #define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */ 503 #define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */ 504 #define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */ 505 #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 506 #define QUEUE_FLAG_IO_STAT 13 /* do IO stats */ 507 #define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */ 508 #define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */ 509 #define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ 510 #define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ 511 #define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ 512 #define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ 513 #define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ 514 #define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/ 515 #define QUEUE_FLAG_SG_GAPS 22 /* queue doesn't support SG gaps */ 516 517 #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 518 (1 << QUEUE_FLAG_STACKABLE) | \ 519 (1 << QUEUE_FLAG_SAME_COMP) | \ 520 (1 << QUEUE_FLAG_ADD_RANDOM)) 521 522 #define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 523 (1 << QUEUE_FLAG_STACKABLE) | \ 524 (1 << QUEUE_FLAG_SAME_COMP)) 525 526 static inline void queue_lockdep_assert_held(struct request_queue *q) 527 { 528 if (q->queue_lock) 529 lockdep_assert_held(q->queue_lock); 530 } 531 532 static inline void queue_flag_set_unlocked(unsigned int flag, 533 struct request_queue *q) 534 { 535 __set_bit(flag, &q->queue_flags); 536 } 537 538 static inline int queue_flag_test_and_clear(unsigned int flag, 539 struct request_queue *q) 540 { 541 queue_lockdep_assert_held(q); 542 543 if (test_bit(flag, &q->queue_flags)) { 544 __clear_bit(flag, &q->queue_flags); 545 return 1; 546 } 547 548 return 0; 549 } 550 551 static inline int queue_flag_test_and_set(unsigned int flag, 552 struct request_queue *q) 553 { 554 queue_lockdep_assert_held(q); 555 556 if (!test_bit(flag, &q->queue_flags)) { 557 __set_bit(flag, &q->queue_flags); 558 return 0; 559 } 560 561 return 1; 562 } 563 564 static inline void queue_flag_set(unsigned int flag, struct request_queue *q) 565 { 566 queue_lockdep_assert_held(q); 567 __set_bit(flag, &q->queue_flags); 568 } 569 570 static inline void queue_flag_clear_unlocked(unsigned int flag, 571 struct request_queue *q) 572 { 573 __clear_bit(flag, &q->queue_flags); 574 } 575 576 static inline int queue_in_flight(struct request_queue *q) 577 { 578 return q->in_flight[0] + q->in_flight[1]; 579 } 580 581 static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) 582 { 583 queue_lockdep_assert_held(q); 584 __clear_bit(flag, &q->queue_flags); 585 } 586 587 #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 588 #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 589 #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) 590 #define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) 591 #define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags) 592 #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) 593 #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 594 #define blk_queue_noxmerges(q) \ 595 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 596 #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 597 #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) 598 #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) 599 #define blk_queue_stackable(q) \ 600 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) 601 #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) 602 #define blk_queue_secdiscard(q) (blk_queue_discard(q) && \ 603 test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags)) 604 605 #define blk_noretry_request(rq) \ 606 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ 607 REQ_FAILFAST_DRIVER)) 608 609 #define blk_account_rq(rq) \ 610 (((rq)->cmd_flags & REQ_STARTED) && \ 611 ((rq)->cmd_type == REQ_TYPE_FS)) 612 613 #define blk_pm_request(rq) \ 614 ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \ 615 (rq)->cmd_type == REQ_TYPE_PM_RESUME) 616 617 #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) 618 #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 619 /* rq->queuelist of dequeued request must be list_empty() */ 620 #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) 621 622 #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 623 624 #define rq_data_dir(rq) (((rq)->cmd_flags & 1) != 0) 625 626 /* 627 * Driver can handle struct request, if it either has an old style 628 * request_fn defined, or is blk-mq based. 629 */ 630 static inline bool queue_is_rq_based(struct request_queue *q) 631 { 632 return q->request_fn || q->mq_ops; 633 } 634 635 static inline unsigned int blk_queue_cluster(struct request_queue *q) 636 { 637 return q->limits.cluster; 638 } 639 640 /* 641 * We regard a request as sync, if either a read or a sync write 642 */ 643 static inline bool rw_is_sync(unsigned int rw_flags) 644 { 645 return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC); 646 } 647 648 static inline bool rq_is_sync(struct request *rq) 649 { 650 return rw_is_sync(rq->cmd_flags); 651 } 652 653 static inline bool blk_rl_full(struct request_list *rl, bool sync) 654 { 655 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 656 657 return rl->flags & flag; 658 } 659 660 static inline void blk_set_rl_full(struct request_list *rl, bool sync) 661 { 662 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 663 664 rl->flags |= flag; 665 } 666 667 static inline void blk_clear_rl_full(struct request_list *rl, bool sync) 668 { 669 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 670 671 rl->flags &= ~flag; 672 } 673 674 static inline bool rq_mergeable(struct request *rq) 675 { 676 if (rq->cmd_type != REQ_TYPE_FS) 677 return false; 678 679 if (rq->cmd_flags & REQ_NOMERGE_FLAGS) 680 return false; 681 682 return true; 683 } 684 685 static inline bool blk_check_merge_flags(unsigned int flags1, 686 unsigned int flags2) 687 { 688 if ((flags1 & REQ_DISCARD) != (flags2 & REQ_DISCARD)) 689 return false; 690 691 if ((flags1 & REQ_SECURE) != (flags2 & REQ_SECURE)) 692 return false; 693 694 if ((flags1 & REQ_WRITE_SAME) != (flags2 & REQ_WRITE_SAME)) 695 return false; 696 697 return true; 698 } 699 700 static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) 701 { 702 if (bio_data(a) == bio_data(b)) 703 return true; 704 705 return false; 706 } 707 708 /* 709 * q->prep_rq_fn return values 710 */ 711 #define BLKPREP_OK 0 /* serve it */ 712 #define BLKPREP_KILL 1 /* fatal error, kill */ 713 #define BLKPREP_DEFER 2 /* leave on queue */ 714 715 extern unsigned long blk_max_low_pfn, blk_max_pfn; 716 717 /* 718 * standard bounce addresses: 719 * 720 * BLK_BOUNCE_HIGH : bounce all highmem pages 721 * BLK_BOUNCE_ANY : don't bounce anything 722 * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary 723 */ 724 725 #if BITS_PER_LONG == 32 726 #define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT) 727 #else 728 #define BLK_BOUNCE_HIGH -1ULL 729 #endif 730 #define BLK_BOUNCE_ANY (-1ULL) 731 #define BLK_BOUNCE_ISA (DMA_BIT_MASK(24)) 732 733 /* 734 * default timeout for SG_IO if none specified 735 */ 736 #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) 737 #define BLK_MIN_SG_TIMEOUT (7 * HZ) 738 739 #ifdef CONFIG_BOUNCE 740 extern int init_emergency_isa_pool(void); 741 extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); 742 #else 743 static inline int init_emergency_isa_pool(void) 744 { 745 return 0; 746 } 747 static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) 748 { 749 } 750 #endif /* CONFIG_MMU */ 751 752 struct rq_map_data { 753 struct page **pages; 754 int page_order; 755 int nr_entries; 756 unsigned long offset; 757 int null_mapped; 758 int from_user; 759 }; 760 761 struct req_iterator { 762 struct bvec_iter iter; 763 struct bio *bio; 764 }; 765 766 /* This should not be used directly - use rq_for_each_segment */ 767 #define for_each_bio(_bio) \ 768 for (; _bio; _bio = _bio->bi_next) 769 #define __rq_for_each_bio(_bio, rq) \ 770 if ((rq->bio)) \ 771 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) 772 773 #define rq_for_each_segment(bvl, _rq, _iter) \ 774 __rq_for_each_bio(_iter.bio, _rq) \ 775 bio_for_each_segment(bvl, _iter.bio, _iter.iter) 776 777 #define rq_iter_last(bvec, _iter) \ 778 (_iter.bio->bi_next == NULL && \ 779 bio_iter_last(bvec, _iter.iter)) 780 781 #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 782 # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" 783 #endif 784 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 785 extern void rq_flush_dcache_pages(struct request *rq); 786 #else 787 static inline void rq_flush_dcache_pages(struct request *rq) 788 { 789 } 790 #endif 791 792 extern int blk_register_queue(struct gendisk *disk); 793 extern void blk_unregister_queue(struct gendisk *disk); 794 extern void generic_make_request(struct bio *bio); 795 extern void blk_rq_init(struct request_queue *q, struct request *rq); 796 extern void blk_put_request(struct request *); 797 extern void __blk_put_request(struct request_queue *, struct request *); 798 extern struct request *blk_get_request(struct request_queue *, int, gfp_t); 799 extern struct request *blk_make_request(struct request_queue *, struct bio *, 800 gfp_t); 801 extern void blk_rq_set_block_pc(struct request *); 802 extern void blk_requeue_request(struct request_queue *, struct request *); 803 extern void blk_add_request_payload(struct request *rq, struct page *page, 804 unsigned int len); 805 extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); 806 extern int blk_lld_busy(struct request_queue *q); 807 extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 808 struct bio_set *bs, gfp_t gfp_mask, 809 int (*bio_ctr)(struct bio *, struct bio *, void *), 810 void *data); 811 extern void blk_rq_unprep_clone(struct request *rq); 812 extern int blk_insert_cloned_request(struct request_queue *q, 813 struct request *rq); 814 extern void blk_delay_queue(struct request_queue *, unsigned long); 815 extern void blk_recount_segments(struct request_queue *, struct bio *); 816 extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); 817 extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t, 818 unsigned int, void __user *); 819 extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, 820 unsigned int, void __user *); 821 extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, 822 struct scsi_ioctl_command __user *); 823 824 extern void blk_queue_bio(struct request_queue *q, struct bio *bio); 825 826 /* 827 * A queue has just exitted congestion. Note this in the global counter of 828 * congested queues, and wake up anyone who was waiting for requests to be 829 * put back. 830 */ 831 static inline void blk_clear_queue_congested(struct request_queue *q, int sync) 832 { 833 clear_bdi_congested(&q->backing_dev_info, sync); 834 } 835 836 /* 837 * A queue has just entered congestion. Flag that in the queue's VM-visible 838 * state flags and increment the global gounter of congested queues. 839 */ 840 static inline void blk_set_queue_congested(struct request_queue *q, int sync) 841 { 842 set_bdi_congested(&q->backing_dev_info, sync); 843 } 844 845 extern void blk_start_queue(struct request_queue *q); 846 extern void blk_stop_queue(struct request_queue *q); 847 extern void blk_sync_queue(struct request_queue *q); 848 extern void __blk_stop_queue(struct request_queue *q); 849 extern void __blk_run_queue(struct request_queue *q); 850 extern void blk_run_queue(struct request_queue *); 851 extern void blk_run_queue_async(struct request_queue *q); 852 extern int blk_rq_map_user(struct request_queue *, struct request *, 853 struct rq_map_data *, void __user *, unsigned long, 854 gfp_t); 855 extern int blk_rq_unmap_user(struct bio *); 856 extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); 857 extern int blk_rq_map_user_iov(struct request_queue *, struct request *, 858 struct rq_map_data *, const struct iov_iter *, 859 gfp_t); 860 extern int blk_execute_rq(struct request_queue *, struct gendisk *, 861 struct request *, int); 862 extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 863 struct request *, int, rq_end_io_fn *); 864 865 static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 866 { 867 return bdev->bd_disk->queue; /* this is never NULL */ 868 } 869 870 /* 871 * blk_rq_pos() : the current sector 872 * blk_rq_bytes() : bytes left in the entire request 873 * blk_rq_cur_bytes() : bytes left in the current segment 874 * blk_rq_err_bytes() : bytes left till the next error boundary 875 * blk_rq_sectors() : sectors left in the entire request 876 * blk_rq_cur_sectors() : sectors left in the current segment 877 */ 878 static inline sector_t blk_rq_pos(const struct request *rq) 879 { 880 return rq->__sector; 881 } 882 883 static inline unsigned int blk_rq_bytes(const struct request *rq) 884 { 885 return rq->__data_len; 886 } 887 888 static inline int blk_rq_cur_bytes(const struct request *rq) 889 { 890 return rq->bio ? bio_cur_bytes(rq->bio) : 0; 891 } 892 893 extern unsigned int blk_rq_err_bytes(const struct request *rq); 894 895 static inline unsigned int blk_rq_sectors(const struct request *rq) 896 { 897 return blk_rq_bytes(rq) >> 9; 898 } 899 900 static inline unsigned int blk_rq_cur_sectors(const struct request *rq) 901 { 902 return blk_rq_cur_bytes(rq) >> 9; 903 } 904 905 static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, 906 unsigned int cmd_flags) 907 { 908 if (unlikely(cmd_flags & REQ_DISCARD)) 909 return min(q->limits.max_discard_sectors, UINT_MAX >> 9); 910 911 if (unlikely(cmd_flags & REQ_WRITE_SAME)) 912 return q->limits.max_write_same_sectors; 913 914 return q->limits.max_sectors; 915 } 916 917 /* 918 * Return maximum size of a request at given offset. Only valid for 919 * file system requests. 920 */ 921 static inline unsigned int blk_max_size_offset(struct request_queue *q, 922 sector_t offset) 923 { 924 if (!q->limits.chunk_sectors) 925 return q->limits.max_sectors; 926 927 return q->limits.chunk_sectors - 928 (offset & (q->limits.chunk_sectors - 1)); 929 } 930 931 static inline unsigned int blk_rq_get_max_sectors(struct request *rq) 932 { 933 struct request_queue *q = rq->q; 934 935 if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC)) 936 return q->limits.max_hw_sectors; 937 938 if (!q->limits.chunk_sectors) 939 return blk_queue_get_max_sectors(q, rq->cmd_flags); 940 941 return min(blk_max_size_offset(q, blk_rq_pos(rq)), 942 blk_queue_get_max_sectors(q, rq->cmd_flags)); 943 } 944 945 static inline unsigned int blk_rq_count_bios(struct request *rq) 946 { 947 unsigned int nr_bios = 0; 948 struct bio *bio; 949 950 __rq_for_each_bio(bio, rq) 951 nr_bios++; 952 953 return nr_bios; 954 } 955 956 /* 957 * Request issue related functions. 958 */ 959 extern struct request *blk_peek_request(struct request_queue *q); 960 extern void blk_start_request(struct request *rq); 961 extern struct request *blk_fetch_request(struct request_queue *q); 962 963 /* 964 * Request completion related functions. 965 * 966 * blk_update_request() completes given number of bytes and updates 967 * the request without completing it. 968 * 969 * blk_end_request() and friends. __blk_end_request() must be called 970 * with the request queue spinlock acquired. 971 * 972 * Several drivers define their own end_request and call 973 * blk_end_request() for parts of the original function. 974 * This prevents code duplication in drivers. 975 */ 976 extern bool blk_update_request(struct request *rq, int error, 977 unsigned int nr_bytes); 978 extern void blk_finish_request(struct request *rq, int error); 979 extern bool blk_end_request(struct request *rq, int error, 980 unsigned int nr_bytes); 981 extern void blk_end_request_all(struct request *rq, int error); 982 extern bool blk_end_request_cur(struct request *rq, int error); 983 extern bool blk_end_request_err(struct request *rq, int error); 984 extern bool __blk_end_request(struct request *rq, int error, 985 unsigned int nr_bytes); 986 extern void __blk_end_request_all(struct request *rq, int error); 987 extern bool __blk_end_request_cur(struct request *rq, int error); 988 extern bool __blk_end_request_err(struct request *rq, int error); 989 990 extern void blk_complete_request(struct request *); 991 extern void __blk_complete_request(struct request *); 992 extern void blk_abort_request(struct request *); 993 extern void blk_unprep_request(struct request *); 994 995 /* 996 * Access functions for manipulating queue properties 997 */ 998 extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, 999 spinlock_t *lock, int node_id); 1000 extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); 1001 extern struct request_queue *blk_init_allocated_queue(struct request_queue *, 1002 request_fn_proc *, spinlock_t *); 1003 extern void blk_cleanup_queue(struct request_queue *); 1004 extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 1005 extern void blk_queue_bounce_limit(struct request_queue *, u64); 1006 extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int); 1007 extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 1008 extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); 1009 extern void blk_queue_max_segments(struct request_queue *, unsigned short); 1010 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 1011 extern void blk_queue_max_discard_sectors(struct request_queue *q, 1012 unsigned int max_discard_sectors); 1013 extern void blk_queue_max_write_same_sectors(struct request_queue *q, 1014 unsigned int max_write_same_sectors); 1015 extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); 1016 extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); 1017 extern void blk_queue_alignment_offset(struct request_queue *q, 1018 unsigned int alignment); 1019 extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); 1020 extern void blk_queue_io_min(struct request_queue *q, unsigned int min); 1021 extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); 1022 extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); 1023 extern void blk_set_default_limits(struct queue_limits *lim); 1024 extern void blk_set_stacking_limits(struct queue_limits *lim); 1025 extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 1026 sector_t offset); 1027 extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, 1028 sector_t offset); 1029 extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, 1030 sector_t offset); 1031 extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 1032 extern void blk_queue_dma_pad(struct request_queue *, unsigned int); 1033 extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); 1034 extern int blk_queue_dma_drain(struct request_queue *q, 1035 dma_drain_needed_fn *dma_drain_needed, 1036 void *buf, unsigned int size); 1037 extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); 1038 extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 1039 extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 1040 extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn); 1041 extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); 1042 extern void blk_queue_dma_alignment(struct request_queue *, int); 1043 extern void blk_queue_update_dma_alignment(struct request_queue *, int); 1044 extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 1045 extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); 1046 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 1047 extern void blk_queue_flush(struct request_queue *q, unsigned int flush); 1048 extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable); 1049 extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 1050 1051 extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 1052 extern void blk_dump_rq_flags(struct request *, char *); 1053 extern long nr_blockdev_pages(void); 1054 1055 bool __must_check blk_get_queue(struct request_queue *); 1056 struct request_queue *blk_alloc_queue(gfp_t); 1057 struct request_queue *blk_alloc_queue_node(gfp_t, int); 1058 extern void blk_put_queue(struct request_queue *); 1059 1060 /* 1061 * block layer runtime pm functions 1062 */ 1063 #ifdef CONFIG_PM 1064 extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev); 1065 extern int blk_pre_runtime_suspend(struct request_queue *q); 1066 extern void blk_post_runtime_suspend(struct request_queue *q, int err); 1067 extern void blk_pre_runtime_resume(struct request_queue *q); 1068 extern void blk_post_runtime_resume(struct request_queue *q, int err); 1069 #else 1070 static inline void blk_pm_runtime_init(struct request_queue *q, 1071 struct device *dev) {} 1072 static inline int blk_pre_runtime_suspend(struct request_queue *q) 1073 { 1074 return -ENOSYS; 1075 } 1076 static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {} 1077 static inline void blk_pre_runtime_resume(struct request_queue *q) {} 1078 static inline void blk_post_runtime_resume(struct request_queue *q, int err) {} 1079 #endif 1080 1081 /* 1082 * blk_plug permits building a queue of related requests by holding the I/O 1083 * fragments for a short period. This allows merging of sequential requests 1084 * into single larger request. As the requests are moved from a per-task list to 1085 * the device's request_queue in a batch, this results in improved scalability 1086 * as the lock contention for request_queue lock is reduced. 1087 * 1088 * It is ok not to disable preemption when adding the request to the plug list 1089 * or when attempting a merge, because blk_schedule_flush_list() will only flush 1090 * the plug list when the task sleeps by itself. For details, please see 1091 * schedule() where blk_schedule_flush_plug() is called. 1092 */ 1093 struct blk_plug { 1094 struct list_head list; /* requests */ 1095 struct list_head mq_list; /* blk-mq requests */ 1096 struct list_head cb_list; /* md requires an unplug callback */ 1097 }; 1098 #define BLK_MAX_REQUEST_COUNT 16 1099 1100 struct blk_plug_cb; 1101 typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); 1102 struct blk_plug_cb { 1103 struct list_head list; 1104 blk_plug_cb_fn callback; 1105 void *data; 1106 }; 1107 extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, 1108 void *data, int size); 1109 extern void blk_start_plug(struct blk_plug *); 1110 extern void blk_finish_plug(struct blk_plug *); 1111 extern void blk_flush_plug_list(struct blk_plug *, bool); 1112 1113 static inline void blk_flush_plug(struct task_struct *tsk) 1114 { 1115 struct blk_plug *plug = tsk->plug; 1116 1117 if (plug) 1118 blk_flush_plug_list(plug, false); 1119 } 1120 1121 static inline void blk_schedule_flush_plug(struct task_struct *tsk) 1122 { 1123 struct blk_plug *plug = tsk->plug; 1124 1125 if (plug) 1126 blk_flush_plug_list(plug, true); 1127 } 1128 1129 static inline bool blk_needs_flush_plug(struct task_struct *tsk) 1130 { 1131 struct blk_plug *plug = tsk->plug; 1132 1133 return plug && 1134 (!list_empty(&plug->list) || 1135 !list_empty(&plug->mq_list) || 1136 !list_empty(&plug->cb_list)); 1137 } 1138 1139 /* 1140 * tag stuff 1141 */ 1142 extern int blk_queue_start_tag(struct request_queue *, struct request *); 1143 extern struct request *blk_queue_find_tag(struct request_queue *, int); 1144 extern void blk_queue_end_tag(struct request_queue *, struct request *); 1145 extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int); 1146 extern void blk_queue_free_tags(struct request_queue *); 1147 extern int blk_queue_resize_tags(struct request_queue *, int); 1148 extern void blk_queue_invalidate_tags(struct request_queue *); 1149 extern struct blk_queue_tag *blk_init_tags(int, int); 1150 extern void blk_free_tags(struct blk_queue_tag *); 1151 1152 static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, 1153 int tag) 1154 { 1155 if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) 1156 return NULL; 1157 return bqt->tag_index[tag]; 1158 } 1159 1160 #define BLKDEV_DISCARD_SECURE 0x01 /* secure discard */ 1161 1162 extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); 1163 extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1164 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); 1165 extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, 1166 sector_t nr_sects, gfp_t gfp_mask, struct page *page); 1167 extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1168 sector_t nr_sects, gfp_t gfp_mask, bool discard); 1169 static inline int sb_issue_discard(struct super_block *sb, sector_t block, 1170 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) 1171 { 1172 return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9), 1173 nr_blocks << (sb->s_blocksize_bits - 9), 1174 gfp_mask, flags); 1175 } 1176 static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, 1177 sector_t nr_blocks, gfp_t gfp_mask) 1178 { 1179 return blkdev_issue_zeroout(sb->s_bdev, 1180 block << (sb->s_blocksize_bits - 9), 1181 nr_blocks << (sb->s_blocksize_bits - 9), 1182 gfp_mask, true); 1183 } 1184 1185 extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); 1186 1187 enum blk_default_limits { 1188 BLK_MAX_SEGMENTS = 128, 1189 BLK_SAFE_MAX_SECTORS = 255, 1190 BLK_MAX_SEGMENT_SIZE = 65536, 1191 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, 1192 }; 1193 1194 #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 1195 1196 static inline unsigned long queue_bounce_pfn(struct request_queue *q) 1197 { 1198 return q->limits.bounce_pfn; 1199 } 1200 1201 static inline unsigned long queue_segment_boundary(struct request_queue *q) 1202 { 1203 return q->limits.seg_boundary_mask; 1204 } 1205 1206 static inline unsigned int queue_max_sectors(struct request_queue *q) 1207 { 1208 return q->limits.max_sectors; 1209 } 1210 1211 static inline unsigned int queue_max_hw_sectors(struct request_queue *q) 1212 { 1213 return q->limits.max_hw_sectors; 1214 } 1215 1216 static inline unsigned short queue_max_segments(struct request_queue *q) 1217 { 1218 return q->limits.max_segments; 1219 } 1220 1221 static inline unsigned int queue_max_segment_size(struct request_queue *q) 1222 { 1223 return q->limits.max_segment_size; 1224 } 1225 1226 static inline unsigned short queue_logical_block_size(struct request_queue *q) 1227 { 1228 int retval = 512; 1229 1230 if (q && q->limits.logical_block_size) 1231 retval = q->limits.logical_block_size; 1232 1233 return retval; 1234 } 1235 1236 static inline unsigned short bdev_logical_block_size(struct block_device *bdev) 1237 { 1238 return queue_logical_block_size(bdev_get_queue(bdev)); 1239 } 1240 1241 static inline unsigned int queue_physical_block_size(struct request_queue *q) 1242 { 1243 return q->limits.physical_block_size; 1244 } 1245 1246 static inline unsigned int bdev_physical_block_size(struct block_device *bdev) 1247 { 1248 return queue_physical_block_size(bdev_get_queue(bdev)); 1249 } 1250 1251 static inline unsigned int queue_io_min(struct request_queue *q) 1252 { 1253 return q->limits.io_min; 1254 } 1255 1256 static inline int bdev_io_min(struct block_device *bdev) 1257 { 1258 return queue_io_min(bdev_get_queue(bdev)); 1259 } 1260 1261 static inline unsigned int queue_io_opt(struct request_queue *q) 1262 { 1263 return q->limits.io_opt; 1264 } 1265 1266 static inline int bdev_io_opt(struct block_device *bdev) 1267 { 1268 return queue_io_opt(bdev_get_queue(bdev)); 1269 } 1270 1271 static inline int queue_alignment_offset(struct request_queue *q) 1272 { 1273 if (q->limits.misaligned) 1274 return -1; 1275 1276 return q->limits.alignment_offset; 1277 } 1278 1279 static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) 1280 { 1281 unsigned int granularity = max(lim->physical_block_size, lim->io_min); 1282 unsigned int alignment = sector_div(sector, granularity >> 9) << 9; 1283 1284 return (granularity + lim->alignment_offset - alignment) % granularity; 1285 } 1286 1287 static inline int bdev_alignment_offset(struct block_device *bdev) 1288 { 1289 struct request_queue *q = bdev_get_queue(bdev); 1290 1291 if (q->limits.misaligned) 1292 return -1; 1293 1294 if (bdev != bdev->bd_contains) 1295 return bdev->bd_part->alignment_offset; 1296 1297 return q->limits.alignment_offset; 1298 } 1299 1300 static inline int queue_discard_alignment(struct request_queue *q) 1301 { 1302 if (q->limits.discard_misaligned) 1303 return -1; 1304 1305 return q->limits.discard_alignment; 1306 } 1307 1308 static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) 1309 { 1310 unsigned int alignment, granularity, offset; 1311 1312 if (!lim->max_discard_sectors) 1313 return 0; 1314 1315 /* Why are these in bytes, not sectors? */ 1316 alignment = lim->discard_alignment >> 9; 1317 granularity = lim->discard_granularity >> 9; 1318 if (!granularity) 1319 return 0; 1320 1321 /* Offset of the partition start in 'granularity' sectors */ 1322 offset = sector_div(sector, granularity); 1323 1324 /* And why do we do this modulus *again* in blkdev_issue_discard()? */ 1325 offset = (granularity + alignment - offset) % granularity; 1326 1327 /* Turn it back into bytes, gaah */ 1328 return offset << 9; 1329 } 1330 1331 static inline int bdev_discard_alignment(struct block_device *bdev) 1332 { 1333 struct request_queue *q = bdev_get_queue(bdev); 1334 1335 if (bdev != bdev->bd_contains) 1336 return bdev->bd_part->discard_alignment; 1337 1338 return q->limits.discard_alignment; 1339 } 1340 1341 static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) 1342 { 1343 if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1) 1344 return 1; 1345 1346 return 0; 1347 } 1348 1349 static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev) 1350 { 1351 return queue_discard_zeroes_data(bdev_get_queue(bdev)); 1352 } 1353 1354 static inline unsigned int bdev_write_same(struct block_device *bdev) 1355 { 1356 struct request_queue *q = bdev_get_queue(bdev); 1357 1358 if (q) 1359 return q->limits.max_write_same_sectors; 1360 1361 return 0; 1362 } 1363 1364 static inline int queue_dma_alignment(struct request_queue *q) 1365 { 1366 return q ? q->dma_alignment : 511; 1367 } 1368 1369 static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, 1370 unsigned int len) 1371 { 1372 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; 1373 return !(addr & alignment) && !(len & alignment); 1374 } 1375 1376 /* assumes size > 256 */ 1377 static inline unsigned int blksize_bits(unsigned int size) 1378 { 1379 unsigned int bits = 8; 1380 do { 1381 bits++; 1382 size >>= 1; 1383 } while (size > 256); 1384 return bits; 1385 } 1386 1387 static inline unsigned int block_size(struct block_device *bdev) 1388 { 1389 return bdev->bd_block_size; 1390 } 1391 1392 static inline bool queue_flush_queueable(struct request_queue *q) 1393 { 1394 return !q->flush_not_queueable; 1395 } 1396 1397 typedef struct {struct page *v;} Sector; 1398 1399 unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); 1400 1401 static inline void put_dev_sector(Sector p) 1402 { 1403 page_cache_release(p.v); 1404 } 1405 1406 struct work_struct; 1407 int kblockd_schedule_work(struct work_struct *work); 1408 int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay); 1409 int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); 1410 1411 #ifdef CONFIG_BLK_CGROUP 1412 /* 1413 * This should not be using sched_clock(). A real patch is in progress 1414 * to fix this up, until that is in place we need to disable preemption 1415 * around sched_clock() in this function and set_io_start_time_ns(). 1416 */ 1417 static inline void set_start_time_ns(struct request *req) 1418 { 1419 preempt_disable(); 1420 req->start_time_ns = sched_clock(); 1421 preempt_enable(); 1422 } 1423 1424 static inline void set_io_start_time_ns(struct request *req) 1425 { 1426 preempt_disable(); 1427 req->io_start_time_ns = sched_clock(); 1428 preempt_enable(); 1429 } 1430 1431 static inline uint64_t rq_start_time_ns(struct request *req) 1432 { 1433 return req->start_time_ns; 1434 } 1435 1436 static inline uint64_t rq_io_start_time_ns(struct request *req) 1437 { 1438 return req->io_start_time_ns; 1439 } 1440 #else 1441 static inline void set_start_time_ns(struct request *req) {} 1442 static inline void set_io_start_time_ns(struct request *req) {} 1443 static inline uint64_t rq_start_time_ns(struct request *req) 1444 { 1445 return 0; 1446 } 1447 static inline uint64_t rq_io_start_time_ns(struct request *req) 1448 { 1449 return 0; 1450 } 1451 #endif 1452 1453 #define MODULE_ALIAS_BLOCKDEV(major,minor) \ 1454 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 1455 #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 1456 MODULE_ALIAS("block-major-" __stringify(major) "-*") 1457 1458 #if defined(CONFIG_BLK_DEV_INTEGRITY) 1459 1460 enum blk_integrity_flags { 1461 BLK_INTEGRITY_VERIFY = 1 << 0, 1462 BLK_INTEGRITY_GENERATE = 1 << 1, 1463 BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2, 1464 BLK_INTEGRITY_IP_CHECKSUM = 1 << 3, 1465 }; 1466 1467 struct blk_integrity_iter { 1468 void *prot_buf; 1469 void *data_buf; 1470 sector_t seed; 1471 unsigned int data_size; 1472 unsigned short interval; 1473 const char *disk_name; 1474 }; 1475 1476 typedef int (integrity_processing_fn) (struct blk_integrity_iter *); 1477 1478 struct blk_integrity { 1479 integrity_processing_fn *generate_fn; 1480 integrity_processing_fn *verify_fn; 1481 1482 unsigned short flags; 1483 unsigned short tuple_size; 1484 unsigned short interval; 1485 unsigned short tag_size; 1486 1487 const char *name; 1488 1489 struct kobject kobj; 1490 }; 1491 1492 extern bool blk_integrity_is_initialized(struct gendisk *); 1493 extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); 1494 extern void blk_integrity_unregister(struct gendisk *); 1495 extern int blk_integrity_compare(struct gendisk *, struct gendisk *); 1496 extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, 1497 struct scatterlist *); 1498 extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); 1499 extern bool blk_integrity_merge_rq(struct request_queue *, struct request *, 1500 struct request *); 1501 extern bool blk_integrity_merge_bio(struct request_queue *, struct request *, 1502 struct bio *); 1503 1504 static inline 1505 struct blk_integrity *bdev_get_integrity(struct block_device *bdev) 1506 { 1507 return bdev->bd_disk->integrity; 1508 } 1509 1510 static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 1511 { 1512 return disk->integrity; 1513 } 1514 1515 static inline bool blk_integrity_rq(struct request *rq) 1516 { 1517 return rq->cmd_flags & REQ_INTEGRITY; 1518 } 1519 1520 static inline void blk_queue_max_integrity_segments(struct request_queue *q, 1521 unsigned int segs) 1522 { 1523 q->limits.max_integrity_segments = segs; 1524 } 1525 1526 static inline unsigned short 1527 queue_max_integrity_segments(struct request_queue *q) 1528 { 1529 return q->limits.max_integrity_segments; 1530 } 1531 1532 #else /* CONFIG_BLK_DEV_INTEGRITY */ 1533 1534 struct bio; 1535 struct block_device; 1536 struct gendisk; 1537 struct blk_integrity; 1538 1539 static inline int blk_integrity_rq(struct request *rq) 1540 { 1541 return 0; 1542 } 1543 static inline int blk_rq_count_integrity_sg(struct request_queue *q, 1544 struct bio *b) 1545 { 1546 return 0; 1547 } 1548 static inline int blk_rq_map_integrity_sg(struct request_queue *q, 1549 struct bio *b, 1550 struct scatterlist *s) 1551 { 1552 return 0; 1553 } 1554 static inline struct blk_integrity *bdev_get_integrity(struct block_device *b) 1555 { 1556 return NULL; 1557 } 1558 static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 1559 { 1560 return NULL; 1561 } 1562 static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b) 1563 { 1564 return 0; 1565 } 1566 static inline int blk_integrity_register(struct gendisk *d, 1567 struct blk_integrity *b) 1568 { 1569 return 0; 1570 } 1571 static inline void blk_integrity_unregister(struct gendisk *d) 1572 { 1573 } 1574 static inline void blk_queue_max_integrity_segments(struct request_queue *q, 1575 unsigned int segs) 1576 { 1577 } 1578 static inline unsigned short queue_max_integrity_segments(struct request_queue *q) 1579 { 1580 return 0; 1581 } 1582 static inline bool blk_integrity_merge_rq(struct request_queue *rq, 1583 struct request *r1, 1584 struct request *r2) 1585 { 1586 return true; 1587 } 1588 static inline bool blk_integrity_merge_bio(struct request_queue *rq, 1589 struct request *r, 1590 struct bio *b) 1591 { 1592 return true; 1593 } 1594 static inline bool blk_integrity_is_initialized(struct gendisk *g) 1595 { 1596 return 0; 1597 } 1598 1599 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 1600 1601 struct block_device_operations { 1602 int (*open) (struct block_device *, fmode_t); 1603 void (*release) (struct gendisk *, fmode_t); 1604 int (*rw_page)(struct block_device *, sector_t, struct page *, int rw); 1605 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1606 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1607 long (*direct_access)(struct block_device *, sector_t, 1608 void **, unsigned long *pfn, long size); 1609 unsigned int (*check_events) (struct gendisk *disk, 1610 unsigned int clearing); 1611 /* ->media_changed() is DEPRECATED, use ->check_events() instead */ 1612 int (*media_changed) (struct gendisk *); 1613 void (*unlock_native_capacity) (struct gendisk *); 1614 int (*revalidate_disk) (struct gendisk *); 1615 int (*getgeo)(struct block_device *, struct hd_geometry *); 1616 /* this callback is with swap_lock and sometimes page table lock held */ 1617 void (*swap_slot_free_notify) (struct block_device *, unsigned long); 1618 struct module *owner; 1619 }; 1620 1621 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, 1622 unsigned long); 1623 extern int bdev_read_page(struct block_device *, sector_t, struct page *); 1624 extern int bdev_write_page(struct block_device *, sector_t, struct page *, 1625 struct writeback_control *); 1626 extern long bdev_direct_access(struct block_device *, sector_t, void **addr, 1627 unsigned long *pfn, long size); 1628 #else /* CONFIG_BLOCK */ 1629 1630 struct block_device; 1631 1632 /* 1633 * stubs for when the block layer is configured out 1634 */ 1635 #define buffer_heads_over_limit 0 1636 1637 static inline long nr_blockdev_pages(void) 1638 { 1639 return 0; 1640 } 1641 1642 struct blk_plug { 1643 }; 1644 1645 static inline void blk_start_plug(struct blk_plug *plug) 1646 { 1647 } 1648 1649 static inline void blk_finish_plug(struct blk_plug *plug) 1650 { 1651 } 1652 1653 static inline void blk_flush_plug(struct task_struct *task) 1654 { 1655 } 1656 1657 static inline void blk_schedule_flush_plug(struct task_struct *task) 1658 { 1659 } 1660 1661 1662 static inline bool blk_needs_flush_plug(struct task_struct *tsk) 1663 { 1664 return false; 1665 } 1666 1667 static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, 1668 sector_t *error_sector) 1669 { 1670 return 0; 1671 } 1672 1673 #endif /* CONFIG_BLOCK */ 1674 1675 #endif 1676