1 #ifndef _LINUX_BLKDEV_H 2 #define _LINUX_BLKDEV_H 3 4 #include <linux/sched.h> 5 6 #ifdef CONFIG_BLOCK 7 8 #include <linux/major.h> 9 #include <linux/genhd.h> 10 #include <linux/list.h> 11 #include <linux/timer.h> 12 #include <linux/workqueue.h> 13 #include <linux/pagemap.h> 14 #include <linux/backing-dev.h> 15 #include <linux/wait.h> 16 #include <linux/mempool.h> 17 #include <linux/bio.h> 18 #include <linux/stringify.h> 19 #include <linux/gfp.h> 20 #include <linux/bsg.h> 21 #include <linux/smp.h> 22 23 #include <asm/scatterlist.h> 24 25 struct module; 26 struct scsi_ioctl_command; 27 28 struct request_queue; 29 struct elevator_queue; 30 struct request_pm_state; 31 struct blk_trace; 32 struct request; 33 struct sg_io_hdr; 34 struct bsg_job; 35 struct blkcg_gq; 36 37 #define BLKDEV_MIN_RQ 4 38 #define BLKDEV_MAX_RQ 128 /* Default maximum */ 39 40 /* 41 * Maximum number of blkcg policies allowed to be registered concurrently. 42 * Defined here to simplify include dependency. 43 */ 44 #define BLKCG_MAX_POLS 2 45 46 struct request; 47 typedef void (rq_end_io_fn)(struct request *, int); 48 49 #define BLK_RL_SYNCFULL (1U << 0) 50 #define BLK_RL_ASYNCFULL (1U << 1) 51 52 struct request_list { 53 struct request_queue *q; /* the queue this rl belongs to */ 54 #ifdef CONFIG_BLK_CGROUP 55 struct blkcg_gq *blkg; /* blkg this request pool belongs to */ 56 #endif 57 /* 58 * count[], starved[], and wait[] are indexed by 59 * BLK_RW_SYNC/BLK_RW_ASYNC 60 */ 61 int count[2]; 62 int starved[2]; 63 mempool_t *rq_pool; 64 wait_queue_head_t wait[2]; 65 unsigned int flags; 66 }; 67 68 /* 69 * request command types 70 */ 71 enum rq_cmd_type_bits { 72 REQ_TYPE_FS = 1, /* fs request */ 73 REQ_TYPE_BLOCK_PC, /* scsi command */ 74 REQ_TYPE_SENSE, /* sense request */ 75 REQ_TYPE_PM_SUSPEND, /* suspend request */ 76 REQ_TYPE_PM_RESUME, /* resume request */ 77 REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ 78 REQ_TYPE_SPECIAL, /* driver defined type */ 79 /* 80 * for ATA/ATAPI devices. this really doesn't belong here, ide should 81 * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver 82 * private REQ_LB opcodes to differentiate what type of request this is 83 */ 84 REQ_TYPE_ATA_TASKFILE, 85 REQ_TYPE_ATA_PC, 86 }; 87 88 #define BLK_MAX_CDB 16 89 90 /* 91 * try to put the fields that are referenced together in the same cacheline. 92 * if you modify this structure, be sure to check block/blk-core.c:blk_rq_init() 93 * as well! 94 */ 95 struct request { 96 struct list_head queuelist; 97 struct call_single_data csd; 98 99 struct request_queue *q; 100 101 unsigned int cmd_flags; 102 enum rq_cmd_type_bits cmd_type; 103 unsigned long atomic_flags; 104 105 int cpu; 106 107 /* the following two fields are internal, NEVER access directly */ 108 unsigned int __data_len; /* total data len */ 109 sector_t __sector; /* sector cursor */ 110 111 struct bio *bio; 112 struct bio *biotail; 113 114 struct hlist_node hash; /* merge hash */ 115 /* 116 * The rb_node is only used inside the io scheduler, requests 117 * are pruned when moved to the dispatch queue. So let the 118 * completion_data share space with the rb_node. 119 */ 120 union { 121 struct rb_node rb_node; /* sort/lookup */ 122 void *completion_data; 123 }; 124 125 /* 126 * Three pointers are available for the IO schedulers, if they need 127 * more they have to dynamically allocate it. Flush requests are 128 * never put on the IO scheduler. So let the flush fields share 129 * space with the elevator data. 130 */ 131 union { 132 struct { 133 struct io_cq *icq; 134 void *priv[2]; 135 } elv; 136 137 struct { 138 unsigned int seq; 139 struct list_head list; 140 rq_end_io_fn *saved_end_io; 141 } flush; 142 }; 143 144 struct gendisk *rq_disk; 145 struct hd_struct *part; 146 unsigned long start_time; 147 #ifdef CONFIG_BLK_CGROUP 148 struct request_list *rl; /* rl this rq is alloced from */ 149 unsigned long long start_time_ns; 150 unsigned long long io_start_time_ns; /* when passed to hardware */ 151 #endif 152 /* Number of scatter-gather DMA addr+len pairs after 153 * physical address coalescing is performed. 154 */ 155 unsigned short nr_phys_segments; 156 #if defined(CONFIG_BLK_DEV_INTEGRITY) 157 unsigned short nr_integrity_segments; 158 #endif 159 160 unsigned short ioprio; 161 162 int ref_count; 163 164 void *special; /* opaque pointer available for LLD use */ 165 char *buffer; /* kaddr of the current segment if available */ 166 167 int tag; 168 int errors; 169 170 /* 171 * when request is used as a packet command carrier 172 */ 173 unsigned char __cmd[BLK_MAX_CDB]; 174 unsigned char *cmd; 175 unsigned short cmd_len; 176 177 unsigned int extra_len; /* length of alignment and padding */ 178 unsigned int sense_len; 179 unsigned int resid_len; /* residual count */ 180 void *sense; 181 182 unsigned long deadline; 183 struct list_head timeout_list; 184 unsigned int timeout; 185 int retries; 186 187 /* 188 * completion callback. 189 */ 190 rq_end_io_fn *end_io; 191 void *end_io_data; 192 193 /* for bidi */ 194 struct request *next_rq; 195 }; 196 197 static inline unsigned short req_get_ioprio(struct request *req) 198 { 199 return req->ioprio; 200 } 201 202 /* 203 * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME 204 * requests. Some step values could eventually be made generic. 205 */ 206 struct request_pm_state 207 { 208 /* PM state machine step value, currently driver specific */ 209 int pm_step; 210 /* requested PM state value (S1, S2, S3, S4, ...) */ 211 u32 pm_state; 212 void* data; /* for driver use */ 213 }; 214 215 #include <linux/elevator.h> 216 217 typedef void (request_fn_proc) (struct request_queue *q); 218 typedef void (make_request_fn) (struct request_queue *q, struct bio *bio); 219 typedef int (prep_rq_fn) (struct request_queue *, struct request *); 220 typedef void (unprep_rq_fn) (struct request_queue *, struct request *); 221 222 struct bio_vec; 223 struct bvec_merge_data { 224 struct block_device *bi_bdev; 225 sector_t bi_sector; 226 unsigned bi_size; 227 unsigned long bi_rw; 228 }; 229 typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, 230 struct bio_vec *); 231 typedef void (softirq_done_fn)(struct request *); 232 typedef int (dma_drain_needed_fn)(struct request *); 233 typedef int (lld_busy_fn) (struct request_queue *q); 234 typedef int (bsg_job_fn) (struct bsg_job *); 235 236 enum blk_eh_timer_return { 237 BLK_EH_NOT_HANDLED, 238 BLK_EH_HANDLED, 239 BLK_EH_RESET_TIMER, 240 }; 241 242 typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *); 243 244 enum blk_queue_state { 245 Queue_down, 246 Queue_up, 247 }; 248 249 struct blk_queue_tag { 250 struct request **tag_index; /* map of busy tags */ 251 unsigned long *tag_map; /* bit map of free/busy tags */ 252 int busy; /* current depth */ 253 int max_depth; /* what we will send to device */ 254 int real_max_depth; /* what the array can hold */ 255 atomic_t refcnt; /* map can be shared */ 256 }; 257 258 #define BLK_SCSI_MAX_CMDS (256) 259 #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) 260 261 struct queue_limits { 262 unsigned long bounce_pfn; 263 unsigned long seg_boundary_mask; 264 265 unsigned int max_hw_sectors; 266 unsigned int max_sectors; 267 unsigned int max_segment_size; 268 unsigned int physical_block_size; 269 unsigned int alignment_offset; 270 unsigned int io_min; 271 unsigned int io_opt; 272 unsigned int max_discard_sectors; 273 unsigned int max_write_same_sectors; 274 unsigned int discard_granularity; 275 unsigned int discard_alignment; 276 277 unsigned short logical_block_size; 278 unsigned short max_segments; 279 unsigned short max_integrity_segments; 280 281 unsigned char misaligned; 282 unsigned char discard_misaligned; 283 unsigned char cluster; 284 unsigned char discard_zeroes_data; 285 }; 286 287 struct request_queue { 288 /* 289 * Together with queue_head for cacheline sharing 290 */ 291 struct list_head queue_head; 292 struct request *last_merge; 293 struct elevator_queue *elevator; 294 int nr_rqs[2]; /* # allocated [a]sync rqs */ 295 int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */ 296 297 /* 298 * If blkcg is not used, @q->root_rl serves all requests. If blkcg 299 * is used, root blkg allocates from @q->root_rl and all other 300 * blkgs from their own blkg->rl. Which one to use should be 301 * determined using bio_request_list(). 302 */ 303 struct request_list root_rl; 304 305 request_fn_proc *request_fn; 306 make_request_fn *make_request_fn; 307 prep_rq_fn *prep_rq_fn; 308 unprep_rq_fn *unprep_rq_fn; 309 merge_bvec_fn *merge_bvec_fn; 310 softirq_done_fn *softirq_done_fn; 311 rq_timed_out_fn *rq_timed_out_fn; 312 dma_drain_needed_fn *dma_drain_needed; 313 lld_busy_fn *lld_busy_fn; 314 315 /* 316 * Dispatch queue sorting 317 */ 318 sector_t end_sector; 319 struct request *boundary_rq; 320 321 /* 322 * Delayed queue handling 323 */ 324 struct delayed_work delay_work; 325 326 struct backing_dev_info backing_dev_info; 327 328 /* 329 * The queue owner gets to use this for whatever they like. 330 * ll_rw_blk doesn't touch it. 331 */ 332 void *queuedata; 333 334 /* 335 * various queue flags, see QUEUE_* below 336 */ 337 unsigned long queue_flags; 338 339 /* 340 * ida allocated id for this queue. Used to index queues from 341 * ioctx. 342 */ 343 int id; 344 345 /* 346 * queue needs bounce pages for pages above this limit 347 */ 348 gfp_t bounce_gfp; 349 350 /* 351 * protects queue structures from reentrancy. ->__queue_lock should 352 * _never_ be used directly, it is queue private. always use 353 * ->queue_lock. 354 */ 355 spinlock_t __queue_lock; 356 spinlock_t *queue_lock; 357 358 /* 359 * queue kobject 360 */ 361 struct kobject kobj; 362 363 /* 364 * queue settings 365 */ 366 unsigned long nr_requests; /* Max # of requests */ 367 unsigned int nr_congestion_on; 368 unsigned int nr_congestion_off; 369 unsigned int nr_batching; 370 371 unsigned int dma_drain_size; 372 void *dma_drain_buffer; 373 unsigned int dma_pad_mask; 374 unsigned int dma_alignment; 375 376 struct blk_queue_tag *queue_tags; 377 struct list_head tag_busy_list; 378 379 unsigned int nr_sorted; 380 unsigned int in_flight[2]; 381 /* 382 * Number of active block driver functions for which blk_drain_queue() 383 * must wait. Must be incremented around functions that unlock the 384 * queue_lock internally, e.g. scsi_request_fn(). 385 */ 386 unsigned int request_fn_active; 387 388 unsigned int rq_timeout; 389 struct timer_list timeout; 390 struct list_head timeout_list; 391 392 struct list_head icq_list; 393 #ifdef CONFIG_BLK_CGROUP 394 DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS); 395 struct blkcg_gq *root_blkg; 396 struct list_head blkg_list; 397 #endif 398 399 struct queue_limits limits; 400 401 /* 402 * sg stuff 403 */ 404 unsigned int sg_timeout; 405 unsigned int sg_reserved_size; 406 int node; 407 #ifdef CONFIG_BLK_DEV_IO_TRACE 408 struct blk_trace *blk_trace; 409 #endif 410 /* 411 * for flush operations 412 */ 413 unsigned int flush_flags; 414 unsigned int flush_not_queueable:1; 415 unsigned int flush_queue_delayed:1; 416 unsigned int flush_pending_idx:1; 417 unsigned int flush_running_idx:1; 418 unsigned long flush_pending_since; 419 struct list_head flush_queue[2]; 420 struct list_head flush_data_in_flight; 421 struct request flush_rq; 422 423 struct mutex sysfs_lock; 424 425 int bypass_depth; 426 427 #if defined(CONFIG_BLK_DEV_BSG) 428 bsg_job_fn *bsg_job_fn; 429 int bsg_job_size; 430 struct bsg_class_device bsg_dev; 431 #endif 432 433 #ifdef CONFIG_BLK_CGROUP 434 struct list_head all_q_node; 435 #endif 436 #ifdef CONFIG_BLK_DEV_THROTTLING 437 /* Throttle data */ 438 struct throtl_data *td; 439 #endif 440 }; 441 442 #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 443 #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 444 #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ 445 #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ 446 #define QUEUE_FLAG_DYING 5 /* queue being torn down */ 447 #define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */ 448 #define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ 449 #define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ 450 #define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */ 451 #define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */ 452 #define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */ 453 #define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */ 454 #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 455 #define QUEUE_FLAG_IO_STAT 13 /* do IO stats */ 456 #define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */ 457 #define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */ 458 #define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ 459 #define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ 460 #define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ 461 #define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ 462 463 #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 464 (1 << QUEUE_FLAG_STACKABLE) | \ 465 (1 << QUEUE_FLAG_SAME_COMP) | \ 466 (1 << QUEUE_FLAG_ADD_RANDOM)) 467 468 static inline void queue_lockdep_assert_held(struct request_queue *q) 469 { 470 if (q->queue_lock) 471 lockdep_assert_held(q->queue_lock); 472 } 473 474 static inline void queue_flag_set_unlocked(unsigned int flag, 475 struct request_queue *q) 476 { 477 __set_bit(flag, &q->queue_flags); 478 } 479 480 static inline int queue_flag_test_and_clear(unsigned int flag, 481 struct request_queue *q) 482 { 483 queue_lockdep_assert_held(q); 484 485 if (test_bit(flag, &q->queue_flags)) { 486 __clear_bit(flag, &q->queue_flags); 487 return 1; 488 } 489 490 return 0; 491 } 492 493 static inline int queue_flag_test_and_set(unsigned int flag, 494 struct request_queue *q) 495 { 496 queue_lockdep_assert_held(q); 497 498 if (!test_bit(flag, &q->queue_flags)) { 499 __set_bit(flag, &q->queue_flags); 500 return 0; 501 } 502 503 return 1; 504 } 505 506 static inline void queue_flag_set(unsigned int flag, struct request_queue *q) 507 { 508 queue_lockdep_assert_held(q); 509 __set_bit(flag, &q->queue_flags); 510 } 511 512 static inline void queue_flag_clear_unlocked(unsigned int flag, 513 struct request_queue *q) 514 { 515 __clear_bit(flag, &q->queue_flags); 516 } 517 518 static inline int queue_in_flight(struct request_queue *q) 519 { 520 return q->in_flight[0] + q->in_flight[1]; 521 } 522 523 static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) 524 { 525 queue_lockdep_assert_held(q); 526 __clear_bit(flag, &q->queue_flags); 527 } 528 529 #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 530 #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 531 #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) 532 #define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) 533 #define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags) 534 #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 535 #define blk_queue_noxmerges(q) \ 536 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 537 #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 538 #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) 539 #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) 540 #define blk_queue_stackable(q) \ 541 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) 542 #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) 543 #define blk_queue_secdiscard(q) (blk_queue_discard(q) && \ 544 test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags)) 545 546 #define blk_noretry_request(rq) \ 547 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ 548 REQ_FAILFAST_DRIVER)) 549 550 #define blk_account_rq(rq) \ 551 (((rq)->cmd_flags & REQ_STARTED) && \ 552 ((rq)->cmd_type == REQ_TYPE_FS)) 553 554 #define blk_pm_request(rq) \ 555 ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \ 556 (rq)->cmd_type == REQ_TYPE_PM_RESUME) 557 558 #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) 559 #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 560 /* rq->queuelist of dequeued request must be list_empty() */ 561 #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) 562 563 #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 564 565 #define rq_data_dir(rq) ((rq)->cmd_flags & 1) 566 567 static inline unsigned int blk_queue_cluster(struct request_queue *q) 568 { 569 return q->limits.cluster; 570 } 571 572 /* 573 * We regard a request as sync, if either a read or a sync write 574 */ 575 static inline bool rw_is_sync(unsigned int rw_flags) 576 { 577 return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC); 578 } 579 580 static inline bool rq_is_sync(struct request *rq) 581 { 582 return rw_is_sync(rq->cmd_flags); 583 } 584 585 static inline bool blk_rl_full(struct request_list *rl, bool sync) 586 { 587 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 588 589 return rl->flags & flag; 590 } 591 592 static inline void blk_set_rl_full(struct request_list *rl, bool sync) 593 { 594 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 595 596 rl->flags |= flag; 597 } 598 599 static inline void blk_clear_rl_full(struct request_list *rl, bool sync) 600 { 601 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 602 603 rl->flags &= ~flag; 604 } 605 606 static inline bool rq_mergeable(struct request *rq) 607 { 608 if (rq->cmd_type != REQ_TYPE_FS) 609 return false; 610 611 if (rq->cmd_flags & REQ_NOMERGE_FLAGS) 612 return false; 613 614 return true; 615 } 616 617 static inline bool blk_check_merge_flags(unsigned int flags1, 618 unsigned int flags2) 619 { 620 if ((flags1 & REQ_DISCARD) != (flags2 & REQ_DISCARD)) 621 return false; 622 623 if ((flags1 & REQ_SECURE) != (flags2 & REQ_SECURE)) 624 return false; 625 626 if ((flags1 & REQ_WRITE_SAME) != (flags2 & REQ_WRITE_SAME)) 627 return false; 628 629 return true; 630 } 631 632 static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) 633 { 634 if (bio_data(a) == bio_data(b)) 635 return true; 636 637 return false; 638 } 639 640 /* 641 * q->prep_rq_fn return values 642 */ 643 #define BLKPREP_OK 0 /* serve it */ 644 #define BLKPREP_KILL 1 /* fatal error, kill */ 645 #define BLKPREP_DEFER 2 /* leave on queue */ 646 647 extern unsigned long blk_max_low_pfn, blk_max_pfn; 648 649 /* 650 * standard bounce addresses: 651 * 652 * BLK_BOUNCE_HIGH : bounce all highmem pages 653 * BLK_BOUNCE_ANY : don't bounce anything 654 * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary 655 */ 656 657 #if BITS_PER_LONG == 32 658 #define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT) 659 #else 660 #define BLK_BOUNCE_HIGH -1ULL 661 #endif 662 #define BLK_BOUNCE_ANY (-1ULL) 663 #define BLK_BOUNCE_ISA (DMA_BIT_MASK(24)) 664 665 /* 666 * default timeout for SG_IO if none specified 667 */ 668 #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) 669 #define BLK_MIN_SG_TIMEOUT (7 * HZ) 670 671 #ifdef CONFIG_BOUNCE 672 extern int init_emergency_isa_pool(void); 673 extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); 674 #else 675 static inline int init_emergency_isa_pool(void) 676 { 677 return 0; 678 } 679 static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) 680 { 681 } 682 #endif /* CONFIG_MMU */ 683 684 struct rq_map_data { 685 struct page **pages; 686 int page_order; 687 int nr_entries; 688 unsigned long offset; 689 int null_mapped; 690 int from_user; 691 }; 692 693 struct req_iterator { 694 int i; 695 struct bio *bio; 696 }; 697 698 /* This should not be used directly - use rq_for_each_segment */ 699 #define for_each_bio(_bio) \ 700 for (; _bio; _bio = _bio->bi_next) 701 #define __rq_for_each_bio(_bio, rq) \ 702 if ((rq->bio)) \ 703 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) 704 705 #define rq_for_each_segment(bvl, _rq, _iter) \ 706 __rq_for_each_bio(_iter.bio, _rq) \ 707 bio_for_each_segment(bvl, _iter.bio, _iter.i) 708 709 #define rq_iter_last(rq, _iter) \ 710 (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1) 711 712 #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 713 # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" 714 #endif 715 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 716 extern void rq_flush_dcache_pages(struct request *rq); 717 #else 718 static inline void rq_flush_dcache_pages(struct request *rq) 719 { 720 } 721 #endif 722 723 extern int blk_register_queue(struct gendisk *disk); 724 extern void blk_unregister_queue(struct gendisk *disk); 725 extern void generic_make_request(struct bio *bio); 726 extern void blk_rq_init(struct request_queue *q, struct request *rq); 727 extern void blk_put_request(struct request *); 728 extern void __blk_put_request(struct request_queue *, struct request *); 729 extern struct request *blk_get_request(struct request_queue *, int, gfp_t); 730 extern struct request *blk_make_request(struct request_queue *, struct bio *, 731 gfp_t); 732 extern void blk_requeue_request(struct request_queue *, struct request *); 733 extern void blk_add_request_payload(struct request *rq, struct page *page, 734 unsigned int len); 735 extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); 736 extern int blk_lld_busy(struct request_queue *q); 737 extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 738 struct bio_set *bs, gfp_t gfp_mask, 739 int (*bio_ctr)(struct bio *, struct bio *, void *), 740 void *data); 741 extern void blk_rq_unprep_clone(struct request *rq); 742 extern int blk_insert_cloned_request(struct request_queue *q, 743 struct request *rq); 744 extern void blk_delay_queue(struct request_queue *, unsigned long); 745 extern void blk_recount_segments(struct request_queue *, struct bio *); 746 extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); 747 extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t, 748 unsigned int, void __user *); 749 extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, 750 unsigned int, void __user *); 751 extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, 752 struct scsi_ioctl_command __user *); 753 754 extern void blk_queue_bio(struct request_queue *q, struct bio *bio); 755 756 /* 757 * A queue has just exitted congestion. Note this in the global counter of 758 * congested queues, and wake up anyone who was waiting for requests to be 759 * put back. 760 */ 761 static inline void blk_clear_queue_congested(struct request_queue *q, int sync) 762 { 763 clear_bdi_congested(&q->backing_dev_info, sync); 764 } 765 766 /* 767 * A queue has just entered congestion. Flag that in the queue's VM-visible 768 * state flags and increment the global gounter of congested queues. 769 */ 770 static inline void blk_set_queue_congested(struct request_queue *q, int sync) 771 { 772 set_bdi_congested(&q->backing_dev_info, sync); 773 } 774 775 extern void blk_start_queue(struct request_queue *q); 776 extern void blk_stop_queue(struct request_queue *q); 777 extern void blk_sync_queue(struct request_queue *q); 778 extern void __blk_stop_queue(struct request_queue *q); 779 extern void __blk_run_queue(struct request_queue *q); 780 extern void blk_run_queue(struct request_queue *); 781 extern void blk_run_queue_async(struct request_queue *q); 782 extern int blk_rq_map_user(struct request_queue *, struct request *, 783 struct rq_map_data *, void __user *, unsigned long, 784 gfp_t); 785 extern int blk_rq_unmap_user(struct bio *); 786 extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); 787 extern int blk_rq_map_user_iov(struct request_queue *, struct request *, 788 struct rq_map_data *, struct sg_iovec *, int, 789 unsigned int, gfp_t); 790 extern int blk_execute_rq(struct request_queue *, struct gendisk *, 791 struct request *, int); 792 extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 793 struct request *, int, rq_end_io_fn *); 794 795 static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 796 { 797 return bdev->bd_disk->queue; 798 } 799 800 /* 801 * blk_rq_pos() : the current sector 802 * blk_rq_bytes() : bytes left in the entire request 803 * blk_rq_cur_bytes() : bytes left in the current segment 804 * blk_rq_err_bytes() : bytes left till the next error boundary 805 * blk_rq_sectors() : sectors left in the entire request 806 * blk_rq_cur_sectors() : sectors left in the current segment 807 */ 808 static inline sector_t blk_rq_pos(const struct request *rq) 809 { 810 return rq->__sector; 811 } 812 813 static inline unsigned int blk_rq_bytes(const struct request *rq) 814 { 815 return rq->__data_len; 816 } 817 818 static inline int blk_rq_cur_bytes(const struct request *rq) 819 { 820 return rq->bio ? bio_cur_bytes(rq->bio) : 0; 821 } 822 823 extern unsigned int blk_rq_err_bytes(const struct request *rq); 824 825 static inline unsigned int blk_rq_sectors(const struct request *rq) 826 { 827 return blk_rq_bytes(rq) >> 9; 828 } 829 830 static inline unsigned int blk_rq_cur_sectors(const struct request *rq) 831 { 832 return blk_rq_cur_bytes(rq) >> 9; 833 } 834 835 static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, 836 unsigned int cmd_flags) 837 { 838 if (unlikely(cmd_flags & REQ_DISCARD)) 839 return q->limits.max_discard_sectors; 840 841 if (unlikely(cmd_flags & REQ_WRITE_SAME)) 842 return q->limits.max_write_same_sectors; 843 844 return q->limits.max_sectors; 845 } 846 847 static inline unsigned int blk_rq_get_max_sectors(struct request *rq) 848 { 849 struct request_queue *q = rq->q; 850 851 if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC)) 852 return q->limits.max_hw_sectors; 853 854 return blk_queue_get_max_sectors(q, rq->cmd_flags); 855 } 856 857 /* 858 * Request issue related functions. 859 */ 860 extern struct request *blk_peek_request(struct request_queue *q); 861 extern void blk_start_request(struct request *rq); 862 extern struct request *blk_fetch_request(struct request_queue *q); 863 864 /* 865 * Request completion related functions. 866 * 867 * blk_update_request() completes given number of bytes and updates 868 * the request without completing it. 869 * 870 * blk_end_request() and friends. __blk_end_request() must be called 871 * with the request queue spinlock acquired. 872 * 873 * Several drivers define their own end_request and call 874 * blk_end_request() for parts of the original function. 875 * This prevents code duplication in drivers. 876 */ 877 extern bool blk_update_request(struct request *rq, int error, 878 unsigned int nr_bytes); 879 extern bool blk_end_request(struct request *rq, int error, 880 unsigned int nr_bytes); 881 extern void blk_end_request_all(struct request *rq, int error); 882 extern bool blk_end_request_cur(struct request *rq, int error); 883 extern bool blk_end_request_err(struct request *rq, int error); 884 extern bool __blk_end_request(struct request *rq, int error, 885 unsigned int nr_bytes); 886 extern void __blk_end_request_all(struct request *rq, int error); 887 extern bool __blk_end_request_cur(struct request *rq, int error); 888 extern bool __blk_end_request_err(struct request *rq, int error); 889 890 extern void blk_complete_request(struct request *); 891 extern void __blk_complete_request(struct request *); 892 extern void blk_abort_request(struct request *); 893 extern void blk_unprep_request(struct request *); 894 895 /* 896 * Access functions for manipulating queue properties 897 */ 898 extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, 899 spinlock_t *lock, int node_id); 900 extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); 901 extern struct request_queue *blk_init_allocated_queue(struct request_queue *, 902 request_fn_proc *, spinlock_t *); 903 extern void blk_cleanup_queue(struct request_queue *); 904 extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 905 extern void blk_queue_bounce_limit(struct request_queue *, u64); 906 extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int); 907 extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 908 extern void blk_queue_max_segments(struct request_queue *, unsigned short); 909 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 910 extern void blk_queue_max_discard_sectors(struct request_queue *q, 911 unsigned int max_discard_sectors); 912 extern void blk_queue_max_write_same_sectors(struct request_queue *q, 913 unsigned int max_write_same_sectors); 914 extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); 915 extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); 916 extern void blk_queue_alignment_offset(struct request_queue *q, 917 unsigned int alignment); 918 extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); 919 extern void blk_queue_io_min(struct request_queue *q, unsigned int min); 920 extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); 921 extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); 922 extern void blk_set_default_limits(struct queue_limits *lim); 923 extern void blk_set_stacking_limits(struct queue_limits *lim); 924 extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 925 sector_t offset); 926 extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, 927 sector_t offset); 928 extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, 929 sector_t offset); 930 extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 931 extern void blk_queue_dma_pad(struct request_queue *, unsigned int); 932 extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); 933 extern int blk_queue_dma_drain(struct request_queue *q, 934 dma_drain_needed_fn *dma_drain_needed, 935 void *buf, unsigned int size); 936 extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); 937 extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 938 extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 939 extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn); 940 extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); 941 extern void blk_queue_dma_alignment(struct request_queue *, int); 942 extern void blk_queue_update_dma_alignment(struct request_queue *, int); 943 extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 944 extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); 945 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 946 extern void blk_queue_flush(struct request_queue *q, unsigned int flush); 947 extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable); 948 extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 949 950 extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 951 extern int blk_bio_map_sg(struct request_queue *q, struct bio *bio, 952 struct scatterlist *sglist); 953 extern void blk_dump_rq_flags(struct request *, char *); 954 extern long nr_blockdev_pages(void); 955 956 bool __must_check blk_get_queue(struct request_queue *); 957 struct request_queue *blk_alloc_queue(gfp_t); 958 struct request_queue *blk_alloc_queue_node(gfp_t, int); 959 extern void blk_put_queue(struct request_queue *); 960 961 /* 962 * blk_plug permits building a queue of related requests by holding the I/O 963 * fragments for a short period. This allows merging of sequential requests 964 * into single larger request. As the requests are moved from a per-task list to 965 * the device's request_queue in a batch, this results in improved scalability 966 * as the lock contention for request_queue lock is reduced. 967 * 968 * It is ok not to disable preemption when adding the request to the plug list 969 * or when attempting a merge, because blk_schedule_flush_list() will only flush 970 * the plug list when the task sleeps by itself. For details, please see 971 * schedule() where blk_schedule_flush_plug() is called. 972 */ 973 struct blk_plug { 974 unsigned long magic; /* detect uninitialized use-cases */ 975 struct list_head list; /* requests */ 976 struct list_head cb_list; /* md requires an unplug callback */ 977 unsigned int should_sort; /* list to be sorted before flushing? */ 978 }; 979 #define BLK_MAX_REQUEST_COUNT 16 980 981 struct blk_plug_cb; 982 typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); 983 struct blk_plug_cb { 984 struct list_head list; 985 blk_plug_cb_fn callback; 986 void *data; 987 }; 988 extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, 989 void *data, int size); 990 extern void blk_start_plug(struct blk_plug *); 991 extern void blk_finish_plug(struct blk_plug *); 992 extern void blk_flush_plug_list(struct blk_plug *, bool); 993 994 static inline void blk_flush_plug(struct task_struct *tsk) 995 { 996 struct blk_plug *plug = tsk->plug; 997 998 if (plug) 999 blk_flush_plug_list(plug, false); 1000 } 1001 1002 static inline void blk_schedule_flush_plug(struct task_struct *tsk) 1003 { 1004 struct blk_plug *plug = tsk->plug; 1005 1006 if (plug) 1007 blk_flush_plug_list(plug, true); 1008 } 1009 1010 static inline bool blk_needs_flush_plug(struct task_struct *tsk) 1011 { 1012 struct blk_plug *plug = tsk->plug; 1013 1014 return plug && (!list_empty(&plug->list) || !list_empty(&plug->cb_list)); 1015 } 1016 1017 /* 1018 * tag stuff 1019 */ 1020 #define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) 1021 extern int blk_queue_start_tag(struct request_queue *, struct request *); 1022 extern struct request *blk_queue_find_tag(struct request_queue *, int); 1023 extern void blk_queue_end_tag(struct request_queue *, struct request *); 1024 extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *); 1025 extern void blk_queue_free_tags(struct request_queue *); 1026 extern int blk_queue_resize_tags(struct request_queue *, int); 1027 extern void blk_queue_invalidate_tags(struct request_queue *); 1028 extern struct blk_queue_tag *blk_init_tags(int); 1029 extern void blk_free_tags(struct blk_queue_tag *); 1030 1031 static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, 1032 int tag) 1033 { 1034 if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) 1035 return NULL; 1036 return bqt->tag_index[tag]; 1037 } 1038 1039 #define BLKDEV_DISCARD_SECURE 0x01 /* secure discard */ 1040 1041 extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); 1042 extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1043 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); 1044 extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, 1045 sector_t nr_sects, gfp_t gfp_mask, struct page *page); 1046 extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1047 sector_t nr_sects, gfp_t gfp_mask); 1048 static inline int sb_issue_discard(struct super_block *sb, sector_t block, 1049 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) 1050 { 1051 return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9), 1052 nr_blocks << (sb->s_blocksize_bits - 9), 1053 gfp_mask, flags); 1054 } 1055 static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, 1056 sector_t nr_blocks, gfp_t gfp_mask) 1057 { 1058 return blkdev_issue_zeroout(sb->s_bdev, 1059 block << (sb->s_blocksize_bits - 9), 1060 nr_blocks << (sb->s_blocksize_bits - 9), 1061 gfp_mask); 1062 } 1063 1064 extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); 1065 1066 enum blk_default_limits { 1067 BLK_MAX_SEGMENTS = 128, 1068 BLK_SAFE_MAX_SECTORS = 255, 1069 BLK_DEF_MAX_SECTORS = 1024, 1070 BLK_MAX_SEGMENT_SIZE = 65536, 1071 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, 1072 }; 1073 1074 #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 1075 1076 static inline unsigned long queue_bounce_pfn(struct request_queue *q) 1077 { 1078 return q->limits.bounce_pfn; 1079 } 1080 1081 static inline unsigned long queue_segment_boundary(struct request_queue *q) 1082 { 1083 return q->limits.seg_boundary_mask; 1084 } 1085 1086 static inline unsigned int queue_max_sectors(struct request_queue *q) 1087 { 1088 return q->limits.max_sectors; 1089 } 1090 1091 static inline unsigned int queue_max_hw_sectors(struct request_queue *q) 1092 { 1093 return q->limits.max_hw_sectors; 1094 } 1095 1096 static inline unsigned short queue_max_segments(struct request_queue *q) 1097 { 1098 return q->limits.max_segments; 1099 } 1100 1101 static inline unsigned int queue_max_segment_size(struct request_queue *q) 1102 { 1103 return q->limits.max_segment_size; 1104 } 1105 1106 static inline unsigned short queue_logical_block_size(struct request_queue *q) 1107 { 1108 int retval = 512; 1109 1110 if (q && q->limits.logical_block_size) 1111 retval = q->limits.logical_block_size; 1112 1113 return retval; 1114 } 1115 1116 static inline unsigned short bdev_logical_block_size(struct block_device *bdev) 1117 { 1118 return queue_logical_block_size(bdev_get_queue(bdev)); 1119 } 1120 1121 static inline unsigned int queue_physical_block_size(struct request_queue *q) 1122 { 1123 return q->limits.physical_block_size; 1124 } 1125 1126 static inline unsigned int bdev_physical_block_size(struct block_device *bdev) 1127 { 1128 return queue_physical_block_size(bdev_get_queue(bdev)); 1129 } 1130 1131 static inline unsigned int queue_io_min(struct request_queue *q) 1132 { 1133 return q->limits.io_min; 1134 } 1135 1136 static inline int bdev_io_min(struct block_device *bdev) 1137 { 1138 return queue_io_min(bdev_get_queue(bdev)); 1139 } 1140 1141 static inline unsigned int queue_io_opt(struct request_queue *q) 1142 { 1143 return q->limits.io_opt; 1144 } 1145 1146 static inline int bdev_io_opt(struct block_device *bdev) 1147 { 1148 return queue_io_opt(bdev_get_queue(bdev)); 1149 } 1150 1151 static inline int queue_alignment_offset(struct request_queue *q) 1152 { 1153 if (q->limits.misaligned) 1154 return -1; 1155 1156 return q->limits.alignment_offset; 1157 } 1158 1159 static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) 1160 { 1161 unsigned int granularity = max(lim->physical_block_size, lim->io_min); 1162 unsigned int alignment = (sector << 9) & (granularity - 1); 1163 1164 return (granularity + lim->alignment_offset - alignment) 1165 & (granularity - 1); 1166 } 1167 1168 static inline int bdev_alignment_offset(struct block_device *bdev) 1169 { 1170 struct request_queue *q = bdev_get_queue(bdev); 1171 1172 if (q->limits.misaligned) 1173 return -1; 1174 1175 if (bdev != bdev->bd_contains) 1176 return bdev->bd_part->alignment_offset; 1177 1178 return q->limits.alignment_offset; 1179 } 1180 1181 static inline int queue_discard_alignment(struct request_queue *q) 1182 { 1183 if (q->limits.discard_misaligned) 1184 return -1; 1185 1186 return q->limits.discard_alignment; 1187 } 1188 1189 static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) 1190 { 1191 unsigned int alignment, granularity, offset; 1192 1193 if (!lim->max_discard_sectors) 1194 return 0; 1195 1196 /* Why are these in bytes, not sectors? */ 1197 alignment = lim->discard_alignment >> 9; 1198 granularity = lim->discard_granularity >> 9; 1199 if (!granularity) 1200 return 0; 1201 1202 /* Offset of the partition start in 'granularity' sectors */ 1203 offset = sector_div(sector, granularity); 1204 1205 /* And why do we do this modulus *again* in blkdev_issue_discard()? */ 1206 offset = (granularity + alignment - offset) % granularity; 1207 1208 /* Turn it back into bytes, gaah */ 1209 return offset << 9; 1210 } 1211 1212 static inline int bdev_discard_alignment(struct block_device *bdev) 1213 { 1214 struct request_queue *q = bdev_get_queue(bdev); 1215 1216 if (bdev != bdev->bd_contains) 1217 return bdev->bd_part->discard_alignment; 1218 1219 return q->limits.discard_alignment; 1220 } 1221 1222 static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) 1223 { 1224 if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1) 1225 return 1; 1226 1227 return 0; 1228 } 1229 1230 static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev) 1231 { 1232 return queue_discard_zeroes_data(bdev_get_queue(bdev)); 1233 } 1234 1235 static inline unsigned int bdev_write_same(struct block_device *bdev) 1236 { 1237 struct request_queue *q = bdev_get_queue(bdev); 1238 1239 if (q) 1240 return q->limits.max_write_same_sectors; 1241 1242 return 0; 1243 } 1244 1245 static inline int queue_dma_alignment(struct request_queue *q) 1246 { 1247 return q ? q->dma_alignment : 511; 1248 } 1249 1250 static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, 1251 unsigned int len) 1252 { 1253 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; 1254 return !(addr & alignment) && !(len & alignment); 1255 } 1256 1257 /* assumes size > 256 */ 1258 static inline unsigned int blksize_bits(unsigned int size) 1259 { 1260 unsigned int bits = 8; 1261 do { 1262 bits++; 1263 size >>= 1; 1264 } while (size > 256); 1265 return bits; 1266 } 1267 1268 static inline unsigned int block_size(struct block_device *bdev) 1269 { 1270 return bdev->bd_block_size; 1271 } 1272 1273 static inline bool queue_flush_queueable(struct request_queue *q) 1274 { 1275 return !q->flush_not_queueable; 1276 } 1277 1278 typedef struct {struct page *v;} Sector; 1279 1280 unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); 1281 1282 static inline void put_dev_sector(Sector p) 1283 { 1284 page_cache_release(p.v); 1285 } 1286 1287 struct work_struct; 1288 int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); 1289 1290 #ifdef CONFIG_BLK_CGROUP 1291 /* 1292 * This should not be using sched_clock(). A real patch is in progress 1293 * to fix this up, until that is in place we need to disable preemption 1294 * around sched_clock() in this function and set_io_start_time_ns(). 1295 */ 1296 static inline void set_start_time_ns(struct request *req) 1297 { 1298 preempt_disable(); 1299 req->start_time_ns = sched_clock(); 1300 preempt_enable(); 1301 } 1302 1303 static inline void set_io_start_time_ns(struct request *req) 1304 { 1305 preempt_disable(); 1306 req->io_start_time_ns = sched_clock(); 1307 preempt_enable(); 1308 } 1309 1310 static inline uint64_t rq_start_time_ns(struct request *req) 1311 { 1312 return req->start_time_ns; 1313 } 1314 1315 static inline uint64_t rq_io_start_time_ns(struct request *req) 1316 { 1317 return req->io_start_time_ns; 1318 } 1319 #else 1320 static inline void set_start_time_ns(struct request *req) {} 1321 static inline void set_io_start_time_ns(struct request *req) {} 1322 static inline uint64_t rq_start_time_ns(struct request *req) 1323 { 1324 return 0; 1325 } 1326 static inline uint64_t rq_io_start_time_ns(struct request *req) 1327 { 1328 return 0; 1329 } 1330 #endif 1331 1332 #define MODULE_ALIAS_BLOCKDEV(major,minor) \ 1333 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 1334 #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 1335 MODULE_ALIAS("block-major-" __stringify(major) "-*") 1336 1337 #if defined(CONFIG_BLK_DEV_INTEGRITY) 1338 1339 #define INTEGRITY_FLAG_READ 2 /* verify data integrity on read */ 1340 #define INTEGRITY_FLAG_WRITE 4 /* generate data integrity on write */ 1341 1342 struct blk_integrity_exchg { 1343 void *prot_buf; 1344 void *data_buf; 1345 sector_t sector; 1346 unsigned int data_size; 1347 unsigned short sector_size; 1348 const char *disk_name; 1349 }; 1350 1351 typedef void (integrity_gen_fn) (struct blk_integrity_exchg *); 1352 typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *); 1353 typedef void (integrity_set_tag_fn) (void *, void *, unsigned int); 1354 typedef void (integrity_get_tag_fn) (void *, void *, unsigned int); 1355 1356 struct blk_integrity { 1357 integrity_gen_fn *generate_fn; 1358 integrity_vrfy_fn *verify_fn; 1359 integrity_set_tag_fn *set_tag_fn; 1360 integrity_get_tag_fn *get_tag_fn; 1361 1362 unsigned short flags; 1363 unsigned short tuple_size; 1364 unsigned short sector_size; 1365 unsigned short tag_size; 1366 1367 const char *name; 1368 1369 struct kobject kobj; 1370 }; 1371 1372 extern bool blk_integrity_is_initialized(struct gendisk *); 1373 extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); 1374 extern void blk_integrity_unregister(struct gendisk *); 1375 extern int blk_integrity_compare(struct gendisk *, struct gendisk *); 1376 extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, 1377 struct scatterlist *); 1378 extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); 1379 extern int blk_integrity_merge_rq(struct request_queue *, struct request *, 1380 struct request *); 1381 extern int blk_integrity_merge_bio(struct request_queue *, struct request *, 1382 struct bio *); 1383 1384 static inline 1385 struct blk_integrity *bdev_get_integrity(struct block_device *bdev) 1386 { 1387 return bdev->bd_disk->integrity; 1388 } 1389 1390 static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 1391 { 1392 return disk->integrity; 1393 } 1394 1395 static inline int blk_integrity_rq(struct request *rq) 1396 { 1397 if (rq->bio == NULL) 1398 return 0; 1399 1400 return bio_integrity(rq->bio); 1401 } 1402 1403 static inline void blk_queue_max_integrity_segments(struct request_queue *q, 1404 unsigned int segs) 1405 { 1406 q->limits.max_integrity_segments = segs; 1407 } 1408 1409 static inline unsigned short 1410 queue_max_integrity_segments(struct request_queue *q) 1411 { 1412 return q->limits.max_integrity_segments; 1413 } 1414 1415 #else /* CONFIG_BLK_DEV_INTEGRITY */ 1416 1417 struct bio; 1418 struct block_device; 1419 struct gendisk; 1420 struct blk_integrity; 1421 1422 static inline int blk_integrity_rq(struct request *rq) 1423 { 1424 return 0; 1425 } 1426 static inline int blk_rq_count_integrity_sg(struct request_queue *q, 1427 struct bio *b) 1428 { 1429 return 0; 1430 } 1431 static inline int blk_rq_map_integrity_sg(struct request_queue *q, 1432 struct bio *b, 1433 struct scatterlist *s) 1434 { 1435 return 0; 1436 } 1437 static inline struct blk_integrity *bdev_get_integrity(struct block_device *b) 1438 { 1439 return 0; 1440 } 1441 static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 1442 { 1443 return NULL; 1444 } 1445 static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b) 1446 { 1447 return 0; 1448 } 1449 static inline int blk_integrity_register(struct gendisk *d, 1450 struct blk_integrity *b) 1451 { 1452 return 0; 1453 } 1454 static inline void blk_integrity_unregister(struct gendisk *d) 1455 { 1456 } 1457 static inline void blk_queue_max_integrity_segments(struct request_queue *q, 1458 unsigned int segs) 1459 { 1460 } 1461 static inline unsigned short queue_max_integrity_segments(struct request_queue *q) 1462 { 1463 return 0; 1464 } 1465 static inline int blk_integrity_merge_rq(struct request_queue *rq, 1466 struct request *r1, 1467 struct request *r2) 1468 { 1469 return 0; 1470 } 1471 static inline int blk_integrity_merge_bio(struct request_queue *rq, 1472 struct request *r, 1473 struct bio *b) 1474 { 1475 return 0; 1476 } 1477 static inline bool blk_integrity_is_initialized(struct gendisk *g) 1478 { 1479 return 0; 1480 } 1481 1482 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 1483 1484 struct block_device_operations { 1485 int (*open) (struct block_device *, fmode_t); 1486 int (*release) (struct gendisk *, fmode_t); 1487 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1488 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1489 int (*direct_access) (struct block_device *, sector_t, 1490 void **, unsigned long *); 1491 unsigned int (*check_events) (struct gendisk *disk, 1492 unsigned int clearing); 1493 /* ->media_changed() is DEPRECATED, use ->check_events() instead */ 1494 int (*media_changed) (struct gendisk *); 1495 void (*unlock_native_capacity) (struct gendisk *); 1496 int (*revalidate_disk) (struct gendisk *); 1497 int (*getgeo)(struct block_device *, struct hd_geometry *); 1498 /* this callback is with swap_lock and sometimes page table lock held */ 1499 void (*swap_slot_free_notify) (struct block_device *, unsigned long); 1500 struct module *owner; 1501 }; 1502 1503 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, 1504 unsigned long); 1505 #else /* CONFIG_BLOCK */ 1506 /* 1507 * stubs for when the block layer is configured out 1508 */ 1509 #define buffer_heads_over_limit 0 1510 1511 static inline long nr_blockdev_pages(void) 1512 { 1513 return 0; 1514 } 1515 1516 struct blk_plug { 1517 }; 1518 1519 static inline void blk_start_plug(struct blk_plug *plug) 1520 { 1521 } 1522 1523 static inline void blk_finish_plug(struct blk_plug *plug) 1524 { 1525 } 1526 1527 static inline void blk_flush_plug(struct task_struct *task) 1528 { 1529 } 1530 1531 static inline void blk_schedule_flush_plug(struct task_struct *task) 1532 { 1533 } 1534 1535 1536 static inline bool blk_needs_flush_plug(struct task_struct *tsk) 1537 { 1538 return false; 1539 } 1540 1541 #endif /* CONFIG_BLOCK */ 1542 1543 #endif 1544