1 #ifndef _LINUX_BLKDEV_H 2 #define _LINUX_BLKDEV_H 3 4 #include <linux/sched.h> 5 6 #ifdef CONFIG_BLOCK 7 8 #include <linux/major.h> 9 #include <linux/genhd.h> 10 #include <linux/list.h> 11 #include <linux/timer.h> 12 #include <linux/workqueue.h> 13 #include <linux/pagemap.h> 14 #include <linux/backing-dev.h> 15 #include <linux/wait.h> 16 #include <linux/mempool.h> 17 #include <linux/bio.h> 18 #include <linux/stringify.h> 19 #include <linux/gfp.h> 20 #include <linux/bsg.h> 21 #include <linux/smp.h> 22 23 #include <asm/scatterlist.h> 24 25 struct module; 26 struct scsi_ioctl_command; 27 28 struct request_queue; 29 struct elevator_queue; 30 struct request_pm_state; 31 struct blk_trace; 32 struct request; 33 struct sg_io_hdr; 34 struct bsg_job; 35 struct blkcg_gq; 36 37 #define BLKDEV_MIN_RQ 4 38 #define BLKDEV_MAX_RQ 128 /* Default maximum */ 39 40 /* 41 * Maximum number of blkcg policies allowed to be registered concurrently. 42 * Defined here to simplify include dependency. 43 */ 44 #define BLKCG_MAX_POLS 2 45 46 struct request; 47 typedef void (rq_end_io_fn)(struct request *, int); 48 49 struct request_list { 50 /* 51 * count[], starved[], and wait[] are indexed by 52 * BLK_RW_SYNC/BLK_RW_ASYNC 53 */ 54 int count[2]; 55 int starved[2]; 56 int elvpriv; 57 mempool_t *rq_pool; 58 wait_queue_head_t wait[2]; 59 }; 60 61 /* 62 * request command types 63 */ 64 enum rq_cmd_type_bits { 65 REQ_TYPE_FS = 1, /* fs request */ 66 REQ_TYPE_BLOCK_PC, /* scsi command */ 67 REQ_TYPE_SENSE, /* sense request */ 68 REQ_TYPE_PM_SUSPEND, /* suspend request */ 69 REQ_TYPE_PM_RESUME, /* resume request */ 70 REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ 71 REQ_TYPE_SPECIAL, /* driver defined type */ 72 /* 73 * for ATA/ATAPI devices. this really doesn't belong here, ide should 74 * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver 75 * private REQ_LB opcodes to differentiate what type of request this is 76 */ 77 REQ_TYPE_ATA_TASKFILE, 78 REQ_TYPE_ATA_PC, 79 }; 80 81 #define BLK_MAX_CDB 16 82 83 /* 84 * try to put the fields that are referenced together in the same cacheline. 85 * if you modify this structure, be sure to check block/blk-core.c:blk_rq_init() 86 * as well! 87 */ 88 struct request { 89 struct list_head queuelist; 90 struct call_single_data csd; 91 92 struct request_queue *q; 93 94 unsigned int cmd_flags; 95 enum rq_cmd_type_bits cmd_type; 96 unsigned long atomic_flags; 97 98 int cpu; 99 100 /* the following two fields are internal, NEVER access directly */ 101 unsigned int __data_len; /* total data len */ 102 sector_t __sector; /* sector cursor */ 103 104 struct bio *bio; 105 struct bio *biotail; 106 107 struct hlist_node hash; /* merge hash */ 108 /* 109 * The rb_node is only used inside the io scheduler, requests 110 * are pruned when moved to the dispatch queue. So let the 111 * completion_data share space with the rb_node. 112 */ 113 union { 114 struct rb_node rb_node; /* sort/lookup */ 115 void *completion_data; 116 }; 117 118 /* 119 * Three pointers are available for the IO schedulers, if they need 120 * more they have to dynamically allocate it. Flush requests are 121 * never put on the IO scheduler. So let the flush fields share 122 * space with the elevator data. 123 */ 124 union { 125 struct { 126 struct io_cq *icq; 127 void *priv[2]; 128 } elv; 129 130 struct { 131 unsigned int seq; 132 struct list_head list; 133 rq_end_io_fn *saved_end_io; 134 } flush; 135 }; 136 137 struct gendisk *rq_disk; 138 struct hd_struct *part; 139 unsigned long start_time; 140 #ifdef CONFIG_BLK_CGROUP 141 unsigned long long start_time_ns; 142 unsigned long long io_start_time_ns; /* when passed to hardware */ 143 #endif 144 /* Number of scatter-gather DMA addr+len pairs after 145 * physical address coalescing is performed. 146 */ 147 unsigned short nr_phys_segments; 148 #if defined(CONFIG_BLK_DEV_INTEGRITY) 149 unsigned short nr_integrity_segments; 150 #endif 151 152 unsigned short ioprio; 153 154 int ref_count; 155 156 void *special; /* opaque pointer available for LLD use */ 157 char *buffer; /* kaddr of the current segment if available */ 158 159 int tag; 160 int errors; 161 162 /* 163 * when request is used as a packet command carrier 164 */ 165 unsigned char __cmd[BLK_MAX_CDB]; 166 unsigned char *cmd; 167 unsigned short cmd_len; 168 169 unsigned int extra_len; /* length of alignment and padding */ 170 unsigned int sense_len; 171 unsigned int resid_len; /* residual count */ 172 void *sense; 173 174 unsigned long deadline; 175 struct list_head timeout_list; 176 unsigned int timeout; 177 int retries; 178 179 /* 180 * completion callback. 181 */ 182 rq_end_io_fn *end_io; 183 void *end_io_data; 184 185 /* for bidi */ 186 struct request *next_rq; 187 }; 188 189 static inline unsigned short req_get_ioprio(struct request *req) 190 { 191 return req->ioprio; 192 } 193 194 /* 195 * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME 196 * requests. Some step values could eventually be made generic. 197 */ 198 struct request_pm_state 199 { 200 /* PM state machine step value, currently driver specific */ 201 int pm_step; 202 /* requested PM state value (S1, S2, S3, S4, ...) */ 203 u32 pm_state; 204 void* data; /* for driver use */ 205 }; 206 207 #include <linux/elevator.h> 208 209 typedef void (request_fn_proc) (struct request_queue *q); 210 typedef void (make_request_fn) (struct request_queue *q, struct bio *bio); 211 typedef int (prep_rq_fn) (struct request_queue *, struct request *); 212 typedef void (unprep_rq_fn) (struct request_queue *, struct request *); 213 214 struct bio_vec; 215 struct bvec_merge_data { 216 struct block_device *bi_bdev; 217 sector_t bi_sector; 218 unsigned bi_size; 219 unsigned long bi_rw; 220 }; 221 typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, 222 struct bio_vec *); 223 typedef void (softirq_done_fn)(struct request *); 224 typedef int (dma_drain_needed_fn)(struct request *); 225 typedef int (lld_busy_fn) (struct request_queue *q); 226 typedef int (bsg_job_fn) (struct bsg_job *); 227 228 enum blk_eh_timer_return { 229 BLK_EH_NOT_HANDLED, 230 BLK_EH_HANDLED, 231 BLK_EH_RESET_TIMER, 232 }; 233 234 typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *); 235 236 enum blk_queue_state { 237 Queue_down, 238 Queue_up, 239 }; 240 241 struct blk_queue_tag { 242 struct request **tag_index; /* map of busy tags */ 243 unsigned long *tag_map; /* bit map of free/busy tags */ 244 int busy; /* current depth */ 245 int max_depth; /* what we will send to device */ 246 int real_max_depth; /* what the array can hold */ 247 atomic_t refcnt; /* map can be shared */ 248 }; 249 250 #define BLK_SCSI_MAX_CMDS (256) 251 #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) 252 253 struct queue_limits { 254 unsigned long bounce_pfn; 255 unsigned long seg_boundary_mask; 256 257 unsigned int max_hw_sectors; 258 unsigned int max_sectors; 259 unsigned int max_segment_size; 260 unsigned int physical_block_size; 261 unsigned int alignment_offset; 262 unsigned int io_min; 263 unsigned int io_opt; 264 unsigned int max_discard_sectors; 265 unsigned int discard_granularity; 266 unsigned int discard_alignment; 267 268 unsigned short logical_block_size; 269 unsigned short max_segments; 270 unsigned short max_integrity_segments; 271 272 unsigned char misaligned; 273 unsigned char discard_misaligned; 274 unsigned char cluster; 275 unsigned char discard_zeroes_data; 276 }; 277 278 struct request_queue { 279 /* 280 * Together with queue_head for cacheline sharing 281 */ 282 struct list_head queue_head; 283 struct request *last_merge; 284 struct elevator_queue *elevator; 285 286 /* 287 * the queue request freelist, one for reads and one for writes 288 */ 289 struct request_list rq; 290 291 request_fn_proc *request_fn; 292 make_request_fn *make_request_fn; 293 prep_rq_fn *prep_rq_fn; 294 unprep_rq_fn *unprep_rq_fn; 295 merge_bvec_fn *merge_bvec_fn; 296 softirq_done_fn *softirq_done_fn; 297 rq_timed_out_fn *rq_timed_out_fn; 298 dma_drain_needed_fn *dma_drain_needed; 299 lld_busy_fn *lld_busy_fn; 300 301 /* 302 * Dispatch queue sorting 303 */ 304 sector_t end_sector; 305 struct request *boundary_rq; 306 307 /* 308 * Delayed queue handling 309 */ 310 struct delayed_work delay_work; 311 312 struct backing_dev_info backing_dev_info; 313 314 /* 315 * The queue owner gets to use this for whatever they like. 316 * ll_rw_blk doesn't touch it. 317 */ 318 void *queuedata; 319 320 /* 321 * various queue flags, see QUEUE_* below 322 */ 323 unsigned long queue_flags; 324 325 /* 326 * ida allocated id for this queue. Used to index queues from 327 * ioctx. 328 */ 329 int id; 330 331 /* 332 * queue needs bounce pages for pages above this limit 333 */ 334 gfp_t bounce_gfp; 335 336 /* 337 * protects queue structures from reentrancy. ->__queue_lock should 338 * _never_ be used directly, it is queue private. always use 339 * ->queue_lock. 340 */ 341 spinlock_t __queue_lock; 342 spinlock_t *queue_lock; 343 344 /* 345 * queue kobject 346 */ 347 struct kobject kobj; 348 349 /* 350 * queue settings 351 */ 352 unsigned long nr_requests; /* Max # of requests */ 353 unsigned int nr_congestion_on; 354 unsigned int nr_congestion_off; 355 unsigned int nr_batching; 356 357 unsigned int dma_drain_size; 358 void *dma_drain_buffer; 359 unsigned int dma_pad_mask; 360 unsigned int dma_alignment; 361 362 struct blk_queue_tag *queue_tags; 363 struct list_head tag_busy_list; 364 365 unsigned int nr_sorted; 366 unsigned int in_flight[2]; 367 368 unsigned int rq_timeout; 369 struct timer_list timeout; 370 struct list_head timeout_list; 371 372 struct list_head icq_list; 373 #ifdef CONFIG_BLK_CGROUP 374 DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS); 375 struct blkcg_gq *root_blkg; 376 struct list_head blkg_list; 377 #endif 378 379 struct queue_limits limits; 380 381 /* 382 * sg stuff 383 */ 384 unsigned int sg_timeout; 385 unsigned int sg_reserved_size; 386 int node; 387 #ifdef CONFIG_BLK_DEV_IO_TRACE 388 struct blk_trace *blk_trace; 389 #endif 390 /* 391 * for flush operations 392 */ 393 unsigned int flush_flags; 394 unsigned int flush_not_queueable:1; 395 unsigned int flush_queue_delayed:1; 396 unsigned int flush_pending_idx:1; 397 unsigned int flush_running_idx:1; 398 unsigned long flush_pending_since; 399 struct list_head flush_queue[2]; 400 struct list_head flush_data_in_flight; 401 struct request flush_rq; 402 403 struct mutex sysfs_lock; 404 405 int bypass_depth; 406 407 #if defined(CONFIG_BLK_DEV_BSG) 408 bsg_job_fn *bsg_job_fn; 409 int bsg_job_size; 410 struct bsg_class_device bsg_dev; 411 #endif 412 413 #ifdef CONFIG_BLK_CGROUP 414 struct list_head all_q_node; 415 #endif 416 #ifdef CONFIG_BLK_DEV_THROTTLING 417 /* Throttle data */ 418 struct throtl_data *td; 419 #endif 420 }; 421 422 #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 423 #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 424 #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ 425 #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ 426 #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ 427 #define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */ 428 #define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ 429 #define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ 430 #define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */ 431 #define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */ 432 #define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */ 433 #define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */ 434 #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 435 #define QUEUE_FLAG_IO_STAT 13 /* do IO stats */ 436 #define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */ 437 #define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */ 438 #define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ 439 #define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ 440 #define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ 441 442 #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 443 (1 << QUEUE_FLAG_STACKABLE) | \ 444 (1 << QUEUE_FLAG_SAME_COMP) | \ 445 (1 << QUEUE_FLAG_ADD_RANDOM)) 446 447 static inline void queue_lockdep_assert_held(struct request_queue *q) 448 { 449 if (q->queue_lock) 450 lockdep_assert_held(q->queue_lock); 451 } 452 453 static inline void queue_flag_set_unlocked(unsigned int flag, 454 struct request_queue *q) 455 { 456 __set_bit(flag, &q->queue_flags); 457 } 458 459 static inline int queue_flag_test_and_clear(unsigned int flag, 460 struct request_queue *q) 461 { 462 queue_lockdep_assert_held(q); 463 464 if (test_bit(flag, &q->queue_flags)) { 465 __clear_bit(flag, &q->queue_flags); 466 return 1; 467 } 468 469 return 0; 470 } 471 472 static inline int queue_flag_test_and_set(unsigned int flag, 473 struct request_queue *q) 474 { 475 queue_lockdep_assert_held(q); 476 477 if (!test_bit(flag, &q->queue_flags)) { 478 __set_bit(flag, &q->queue_flags); 479 return 0; 480 } 481 482 return 1; 483 } 484 485 static inline void queue_flag_set(unsigned int flag, struct request_queue *q) 486 { 487 queue_lockdep_assert_held(q); 488 __set_bit(flag, &q->queue_flags); 489 } 490 491 static inline void queue_flag_clear_unlocked(unsigned int flag, 492 struct request_queue *q) 493 { 494 __clear_bit(flag, &q->queue_flags); 495 } 496 497 static inline int queue_in_flight(struct request_queue *q) 498 { 499 return q->in_flight[0] + q->in_flight[1]; 500 } 501 502 static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) 503 { 504 queue_lockdep_assert_held(q); 505 __clear_bit(flag, &q->queue_flags); 506 } 507 508 #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 509 #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 510 #define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) 511 #define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags) 512 #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 513 #define blk_queue_noxmerges(q) \ 514 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 515 #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 516 #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) 517 #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) 518 #define blk_queue_stackable(q) \ 519 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) 520 #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) 521 #define blk_queue_secdiscard(q) (blk_queue_discard(q) && \ 522 test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags)) 523 524 #define blk_noretry_request(rq) \ 525 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ 526 REQ_FAILFAST_DRIVER)) 527 528 #define blk_account_rq(rq) \ 529 (((rq)->cmd_flags & REQ_STARTED) && \ 530 ((rq)->cmd_type == REQ_TYPE_FS || \ 531 ((rq)->cmd_flags & REQ_DISCARD))) 532 533 #define blk_pm_request(rq) \ 534 ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \ 535 (rq)->cmd_type == REQ_TYPE_PM_RESUME) 536 537 #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) 538 #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 539 /* rq->queuelist of dequeued request must be list_empty() */ 540 #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) 541 542 #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 543 544 #define rq_data_dir(rq) ((rq)->cmd_flags & 1) 545 546 static inline unsigned int blk_queue_cluster(struct request_queue *q) 547 { 548 return q->limits.cluster; 549 } 550 551 /* 552 * We regard a request as sync, if either a read or a sync write 553 */ 554 static inline bool rw_is_sync(unsigned int rw_flags) 555 { 556 return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC); 557 } 558 559 static inline bool rq_is_sync(struct request *rq) 560 { 561 return rw_is_sync(rq->cmd_flags); 562 } 563 564 static inline int blk_queue_full(struct request_queue *q, int sync) 565 { 566 if (sync) 567 return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags); 568 return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags); 569 } 570 571 static inline void blk_set_queue_full(struct request_queue *q, int sync) 572 { 573 if (sync) 574 queue_flag_set(QUEUE_FLAG_SYNCFULL, q); 575 else 576 queue_flag_set(QUEUE_FLAG_ASYNCFULL, q); 577 } 578 579 static inline void blk_clear_queue_full(struct request_queue *q, int sync) 580 { 581 if (sync) 582 queue_flag_clear(QUEUE_FLAG_SYNCFULL, q); 583 else 584 queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q); 585 } 586 587 588 /* 589 * mergeable request must not have _NOMERGE or _BARRIER bit set, nor may 590 * it already be started by driver. 591 */ 592 #define RQ_NOMERGE_FLAGS \ 593 (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA) 594 #define rq_mergeable(rq) \ 595 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ 596 (((rq)->cmd_flags & REQ_DISCARD) || \ 597 (rq)->cmd_type == REQ_TYPE_FS)) 598 599 /* 600 * q->prep_rq_fn return values 601 */ 602 #define BLKPREP_OK 0 /* serve it */ 603 #define BLKPREP_KILL 1 /* fatal error, kill */ 604 #define BLKPREP_DEFER 2 /* leave on queue */ 605 606 extern unsigned long blk_max_low_pfn, blk_max_pfn; 607 608 /* 609 * standard bounce addresses: 610 * 611 * BLK_BOUNCE_HIGH : bounce all highmem pages 612 * BLK_BOUNCE_ANY : don't bounce anything 613 * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary 614 */ 615 616 #if BITS_PER_LONG == 32 617 #define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT) 618 #else 619 #define BLK_BOUNCE_HIGH -1ULL 620 #endif 621 #define BLK_BOUNCE_ANY (-1ULL) 622 #define BLK_BOUNCE_ISA (DMA_BIT_MASK(24)) 623 624 /* 625 * default timeout for SG_IO if none specified 626 */ 627 #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) 628 #define BLK_MIN_SG_TIMEOUT (7 * HZ) 629 630 #ifdef CONFIG_BOUNCE 631 extern int init_emergency_isa_pool(void); 632 extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); 633 #else 634 static inline int init_emergency_isa_pool(void) 635 { 636 return 0; 637 } 638 static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) 639 { 640 } 641 #endif /* CONFIG_MMU */ 642 643 struct rq_map_data { 644 struct page **pages; 645 int page_order; 646 int nr_entries; 647 unsigned long offset; 648 int null_mapped; 649 int from_user; 650 }; 651 652 struct req_iterator { 653 int i; 654 struct bio *bio; 655 }; 656 657 /* This should not be used directly - use rq_for_each_segment */ 658 #define for_each_bio(_bio) \ 659 for (; _bio; _bio = _bio->bi_next) 660 #define __rq_for_each_bio(_bio, rq) \ 661 if ((rq->bio)) \ 662 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) 663 664 #define rq_for_each_segment(bvl, _rq, _iter) \ 665 __rq_for_each_bio(_iter.bio, _rq) \ 666 bio_for_each_segment(bvl, _iter.bio, _iter.i) 667 668 #define rq_iter_last(rq, _iter) \ 669 (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1) 670 671 #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 672 # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" 673 #endif 674 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 675 extern void rq_flush_dcache_pages(struct request *rq); 676 #else 677 static inline void rq_flush_dcache_pages(struct request *rq) 678 { 679 } 680 #endif 681 682 extern int blk_register_queue(struct gendisk *disk); 683 extern void blk_unregister_queue(struct gendisk *disk); 684 extern void generic_make_request(struct bio *bio); 685 extern void blk_rq_init(struct request_queue *q, struct request *rq); 686 extern void blk_put_request(struct request *); 687 extern void __blk_put_request(struct request_queue *, struct request *); 688 extern struct request *blk_get_request(struct request_queue *, int, gfp_t); 689 extern struct request *blk_make_request(struct request_queue *, struct bio *, 690 gfp_t); 691 extern void blk_requeue_request(struct request_queue *, struct request *); 692 extern void blk_add_request_payload(struct request *rq, struct page *page, 693 unsigned int len); 694 extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); 695 extern int blk_lld_busy(struct request_queue *q); 696 extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 697 struct bio_set *bs, gfp_t gfp_mask, 698 int (*bio_ctr)(struct bio *, struct bio *, void *), 699 void *data); 700 extern void blk_rq_unprep_clone(struct request *rq); 701 extern int blk_insert_cloned_request(struct request_queue *q, 702 struct request *rq); 703 extern void blk_delay_queue(struct request_queue *, unsigned long); 704 extern void blk_recount_segments(struct request_queue *, struct bio *); 705 extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); 706 extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t, 707 unsigned int, void __user *); 708 extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, 709 unsigned int, void __user *); 710 extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, 711 struct scsi_ioctl_command __user *); 712 713 extern void blk_queue_bio(struct request_queue *q, struct bio *bio); 714 715 /* 716 * A queue has just exitted congestion. Note this in the global counter of 717 * congested queues, and wake up anyone who was waiting for requests to be 718 * put back. 719 */ 720 static inline void blk_clear_queue_congested(struct request_queue *q, int sync) 721 { 722 clear_bdi_congested(&q->backing_dev_info, sync); 723 } 724 725 /* 726 * A queue has just entered congestion. Flag that in the queue's VM-visible 727 * state flags and increment the global gounter of congested queues. 728 */ 729 static inline void blk_set_queue_congested(struct request_queue *q, int sync) 730 { 731 set_bdi_congested(&q->backing_dev_info, sync); 732 } 733 734 extern void blk_start_queue(struct request_queue *q); 735 extern void blk_stop_queue(struct request_queue *q); 736 extern void blk_sync_queue(struct request_queue *q); 737 extern void __blk_stop_queue(struct request_queue *q); 738 extern void __blk_run_queue(struct request_queue *q); 739 extern void blk_run_queue(struct request_queue *); 740 extern void blk_run_queue_async(struct request_queue *q); 741 extern int blk_rq_map_user(struct request_queue *, struct request *, 742 struct rq_map_data *, void __user *, unsigned long, 743 gfp_t); 744 extern int blk_rq_unmap_user(struct bio *); 745 extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); 746 extern int blk_rq_map_user_iov(struct request_queue *, struct request *, 747 struct rq_map_data *, struct sg_iovec *, int, 748 unsigned int, gfp_t); 749 extern int blk_execute_rq(struct request_queue *, struct gendisk *, 750 struct request *, int); 751 extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 752 struct request *, int, rq_end_io_fn *); 753 754 static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 755 { 756 return bdev->bd_disk->queue; 757 } 758 759 /* 760 * blk_rq_pos() : the current sector 761 * blk_rq_bytes() : bytes left in the entire request 762 * blk_rq_cur_bytes() : bytes left in the current segment 763 * blk_rq_err_bytes() : bytes left till the next error boundary 764 * blk_rq_sectors() : sectors left in the entire request 765 * blk_rq_cur_sectors() : sectors left in the current segment 766 */ 767 static inline sector_t blk_rq_pos(const struct request *rq) 768 { 769 return rq->__sector; 770 } 771 772 static inline unsigned int blk_rq_bytes(const struct request *rq) 773 { 774 return rq->__data_len; 775 } 776 777 static inline int blk_rq_cur_bytes(const struct request *rq) 778 { 779 return rq->bio ? bio_cur_bytes(rq->bio) : 0; 780 } 781 782 extern unsigned int blk_rq_err_bytes(const struct request *rq); 783 784 static inline unsigned int blk_rq_sectors(const struct request *rq) 785 { 786 return blk_rq_bytes(rq) >> 9; 787 } 788 789 static inline unsigned int blk_rq_cur_sectors(const struct request *rq) 790 { 791 return blk_rq_cur_bytes(rq) >> 9; 792 } 793 794 /* 795 * Request issue related functions. 796 */ 797 extern struct request *blk_peek_request(struct request_queue *q); 798 extern void blk_start_request(struct request *rq); 799 extern struct request *blk_fetch_request(struct request_queue *q); 800 801 /* 802 * Request completion related functions. 803 * 804 * blk_update_request() completes given number of bytes and updates 805 * the request without completing it. 806 * 807 * blk_end_request() and friends. __blk_end_request() must be called 808 * with the request queue spinlock acquired. 809 * 810 * Several drivers define their own end_request and call 811 * blk_end_request() for parts of the original function. 812 * This prevents code duplication in drivers. 813 */ 814 extern bool blk_update_request(struct request *rq, int error, 815 unsigned int nr_bytes); 816 extern bool blk_end_request(struct request *rq, int error, 817 unsigned int nr_bytes); 818 extern void blk_end_request_all(struct request *rq, int error); 819 extern bool blk_end_request_cur(struct request *rq, int error); 820 extern bool blk_end_request_err(struct request *rq, int error); 821 extern bool __blk_end_request(struct request *rq, int error, 822 unsigned int nr_bytes); 823 extern void __blk_end_request_all(struct request *rq, int error); 824 extern bool __blk_end_request_cur(struct request *rq, int error); 825 extern bool __blk_end_request_err(struct request *rq, int error); 826 827 extern void blk_complete_request(struct request *); 828 extern void __blk_complete_request(struct request *); 829 extern void blk_abort_request(struct request *); 830 extern void blk_abort_queue(struct request_queue *); 831 extern void blk_unprep_request(struct request *); 832 833 /* 834 * Access functions for manipulating queue properties 835 */ 836 extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, 837 spinlock_t *lock, int node_id); 838 extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); 839 extern struct request_queue *blk_init_allocated_queue(struct request_queue *, 840 request_fn_proc *, spinlock_t *); 841 extern void blk_cleanup_queue(struct request_queue *); 842 extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 843 extern void blk_queue_bounce_limit(struct request_queue *, u64); 844 extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int); 845 extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 846 extern void blk_queue_max_segments(struct request_queue *, unsigned short); 847 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 848 extern void blk_queue_max_discard_sectors(struct request_queue *q, 849 unsigned int max_discard_sectors); 850 extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); 851 extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); 852 extern void blk_queue_alignment_offset(struct request_queue *q, 853 unsigned int alignment); 854 extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); 855 extern void blk_queue_io_min(struct request_queue *q, unsigned int min); 856 extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); 857 extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); 858 extern void blk_set_default_limits(struct queue_limits *lim); 859 extern void blk_set_stacking_limits(struct queue_limits *lim); 860 extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 861 sector_t offset); 862 extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, 863 sector_t offset); 864 extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, 865 sector_t offset); 866 extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 867 extern void blk_queue_dma_pad(struct request_queue *, unsigned int); 868 extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); 869 extern int blk_queue_dma_drain(struct request_queue *q, 870 dma_drain_needed_fn *dma_drain_needed, 871 void *buf, unsigned int size); 872 extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); 873 extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 874 extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 875 extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn); 876 extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); 877 extern void blk_queue_dma_alignment(struct request_queue *, int); 878 extern void blk_queue_update_dma_alignment(struct request_queue *, int); 879 extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 880 extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); 881 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 882 extern void blk_queue_flush(struct request_queue *q, unsigned int flush); 883 extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable); 884 extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 885 886 extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 887 extern void blk_dump_rq_flags(struct request *, char *); 888 extern long nr_blockdev_pages(void); 889 890 bool __must_check blk_get_queue(struct request_queue *); 891 struct request_queue *blk_alloc_queue(gfp_t); 892 struct request_queue *blk_alloc_queue_node(gfp_t, int); 893 extern void blk_put_queue(struct request_queue *); 894 895 /* 896 * blk_plug permits building a queue of related requests by holding the I/O 897 * fragments for a short period. This allows merging of sequential requests 898 * into single larger request. As the requests are moved from a per-task list to 899 * the device's request_queue in a batch, this results in improved scalability 900 * as the lock contention for request_queue lock is reduced. 901 * 902 * It is ok not to disable preemption when adding the request to the plug list 903 * or when attempting a merge, because blk_schedule_flush_list() will only flush 904 * the plug list when the task sleeps by itself. For details, please see 905 * schedule() where blk_schedule_flush_plug() is called. 906 */ 907 struct blk_plug { 908 unsigned long magic; /* detect uninitialized use-cases */ 909 struct list_head list; /* requests */ 910 struct list_head cb_list; /* md requires an unplug callback */ 911 unsigned int should_sort; /* list to be sorted before flushing? */ 912 }; 913 #define BLK_MAX_REQUEST_COUNT 16 914 915 struct blk_plug_cb { 916 struct list_head list; 917 void (*callback)(struct blk_plug_cb *); 918 }; 919 920 extern void blk_start_plug(struct blk_plug *); 921 extern void blk_finish_plug(struct blk_plug *); 922 extern void blk_flush_plug_list(struct blk_plug *, bool); 923 924 static inline void blk_flush_plug(struct task_struct *tsk) 925 { 926 struct blk_plug *plug = tsk->plug; 927 928 if (plug) 929 blk_flush_plug_list(plug, false); 930 } 931 932 static inline void blk_schedule_flush_plug(struct task_struct *tsk) 933 { 934 struct blk_plug *plug = tsk->plug; 935 936 if (plug) 937 blk_flush_plug_list(plug, true); 938 } 939 940 static inline bool blk_needs_flush_plug(struct task_struct *tsk) 941 { 942 struct blk_plug *plug = tsk->plug; 943 944 return plug && (!list_empty(&plug->list) || !list_empty(&plug->cb_list)); 945 } 946 947 /* 948 * tag stuff 949 */ 950 #define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) 951 extern int blk_queue_start_tag(struct request_queue *, struct request *); 952 extern struct request *blk_queue_find_tag(struct request_queue *, int); 953 extern void blk_queue_end_tag(struct request_queue *, struct request *); 954 extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *); 955 extern void blk_queue_free_tags(struct request_queue *); 956 extern int blk_queue_resize_tags(struct request_queue *, int); 957 extern void blk_queue_invalidate_tags(struct request_queue *); 958 extern struct blk_queue_tag *blk_init_tags(int); 959 extern void blk_free_tags(struct blk_queue_tag *); 960 961 static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, 962 int tag) 963 { 964 if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) 965 return NULL; 966 return bqt->tag_index[tag]; 967 } 968 969 #define BLKDEV_DISCARD_SECURE 0x01 /* secure discard */ 970 971 extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); 972 extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 973 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); 974 extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 975 sector_t nr_sects, gfp_t gfp_mask); 976 static inline int sb_issue_discard(struct super_block *sb, sector_t block, 977 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) 978 { 979 return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9), 980 nr_blocks << (sb->s_blocksize_bits - 9), 981 gfp_mask, flags); 982 } 983 static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, 984 sector_t nr_blocks, gfp_t gfp_mask) 985 { 986 return blkdev_issue_zeroout(sb->s_bdev, 987 block << (sb->s_blocksize_bits - 9), 988 nr_blocks << (sb->s_blocksize_bits - 9), 989 gfp_mask); 990 } 991 992 extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); 993 994 enum blk_default_limits { 995 BLK_MAX_SEGMENTS = 128, 996 BLK_SAFE_MAX_SECTORS = 255, 997 BLK_DEF_MAX_SECTORS = 1024, 998 BLK_MAX_SEGMENT_SIZE = 65536, 999 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, 1000 }; 1001 1002 #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 1003 1004 static inline unsigned long queue_bounce_pfn(struct request_queue *q) 1005 { 1006 return q->limits.bounce_pfn; 1007 } 1008 1009 static inline unsigned long queue_segment_boundary(struct request_queue *q) 1010 { 1011 return q->limits.seg_boundary_mask; 1012 } 1013 1014 static inline unsigned int queue_max_sectors(struct request_queue *q) 1015 { 1016 return q->limits.max_sectors; 1017 } 1018 1019 static inline unsigned int queue_max_hw_sectors(struct request_queue *q) 1020 { 1021 return q->limits.max_hw_sectors; 1022 } 1023 1024 static inline unsigned short queue_max_segments(struct request_queue *q) 1025 { 1026 return q->limits.max_segments; 1027 } 1028 1029 static inline unsigned int queue_max_segment_size(struct request_queue *q) 1030 { 1031 return q->limits.max_segment_size; 1032 } 1033 1034 static inline unsigned short queue_logical_block_size(struct request_queue *q) 1035 { 1036 int retval = 512; 1037 1038 if (q && q->limits.logical_block_size) 1039 retval = q->limits.logical_block_size; 1040 1041 return retval; 1042 } 1043 1044 static inline unsigned short bdev_logical_block_size(struct block_device *bdev) 1045 { 1046 return queue_logical_block_size(bdev_get_queue(bdev)); 1047 } 1048 1049 static inline unsigned int queue_physical_block_size(struct request_queue *q) 1050 { 1051 return q->limits.physical_block_size; 1052 } 1053 1054 static inline unsigned int bdev_physical_block_size(struct block_device *bdev) 1055 { 1056 return queue_physical_block_size(bdev_get_queue(bdev)); 1057 } 1058 1059 static inline unsigned int queue_io_min(struct request_queue *q) 1060 { 1061 return q->limits.io_min; 1062 } 1063 1064 static inline int bdev_io_min(struct block_device *bdev) 1065 { 1066 return queue_io_min(bdev_get_queue(bdev)); 1067 } 1068 1069 static inline unsigned int queue_io_opt(struct request_queue *q) 1070 { 1071 return q->limits.io_opt; 1072 } 1073 1074 static inline int bdev_io_opt(struct block_device *bdev) 1075 { 1076 return queue_io_opt(bdev_get_queue(bdev)); 1077 } 1078 1079 static inline int queue_alignment_offset(struct request_queue *q) 1080 { 1081 if (q->limits.misaligned) 1082 return -1; 1083 1084 return q->limits.alignment_offset; 1085 } 1086 1087 static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) 1088 { 1089 unsigned int granularity = max(lim->physical_block_size, lim->io_min); 1090 unsigned int alignment = (sector << 9) & (granularity - 1); 1091 1092 return (granularity + lim->alignment_offset - alignment) 1093 & (granularity - 1); 1094 } 1095 1096 static inline int bdev_alignment_offset(struct block_device *bdev) 1097 { 1098 struct request_queue *q = bdev_get_queue(bdev); 1099 1100 if (q->limits.misaligned) 1101 return -1; 1102 1103 if (bdev != bdev->bd_contains) 1104 return bdev->bd_part->alignment_offset; 1105 1106 return q->limits.alignment_offset; 1107 } 1108 1109 static inline int queue_discard_alignment(struct request_queue *q) 1110 { 1111 if (q->limits.discard_misaligned) 1112 return -1; 1113 1114 return q->limits.discard_alignment; 1115 } 1116 1117 static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) 1118 { 1119 unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1); 1120 1121 if (!lim->max_discard_sectors) 1122 return 0; 1123 1124 return (lim->discard_granularity + lim->discard_alignment - alignment) 1125 & (lim->discard_granularity - 1); 1126 } 1127 1128 static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) 1129 { 1130 if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1) 1131 return 1; 1132 1133 return 0; 1134 } 1135 1136 static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev) 1137 { 1138 return queue_discard_zeroes_data(bdev_get_queue(bdev)); 1139 } 1140 1141 static inline int queue_dma_alignment(struct request_queue *q) 1142 { 1143 return q ? q->dma_alignment : 511; 1144 } 1145 1146 static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, 1147 unsigned int len) 1148 { 1149 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; 1150 return !(addr & alignment) && !(len & alignment); 1151 } 1152 1153 /* assumes size > 256 */ 1154 static inline unsigned int blksize_bits(unsigned int size) 1155 { 1156 unsigned int bits = 8; 1157 do { 1158 bits++; 1159 size >>= 1; 1160 } while (size > 256); 1161 return bits; 1162 } 1163 1164 static inline unsigned int block_size(struct block_device *bdev) 1165 { 1166 return bdev->bd_block_size; 1167 } 1168 1169 static inline bool queue_flush_queueable(struct request_queue *q) 1170 { 1171 return !q->flush_not_queueable; 1172 } 1173 1174 typedef struct {struct page *v;} Sector; 1175 1176 unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); 1177 1178 static inline void put_dev_sector(Sector p) 1179 { 1180 page_cache_release(p.v); 1181 } 1182 1183 struct work_struct; 1184 int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); 1185 1186 #ifdef CONFIG_BLK_CGROUP 1187 /* 1188 * This should not be using sched_clock(). A real patch is in progress 1189 * to fix this up, until that is in place we need to disable preemption 1190 * around sched_clock() in this function and set_io_start_time_ns(). 1191 */ 1192 static inline void set_start_time_ns(struct request *req) 1193 { 1194 preempt_disable(); 1195 req->start_time_ns = sched_clock(); 1196 preempt_enable(); 1197 } 1198 1199 static inline void set_io_start_time_ns(struct request *req) 1200 { 1201 preempt_disable(); 1202 req->io_start_time_ns = sched_clock(); 1203 preempt_enable(); 1204 } 1205 1206 static inline uint64_t rq_start_time_ns(struct request *req) 1207 { 1208 return req->start_time_ns; 1209 } 1210 1211 static inline uint64_t rq_io_start_time_ns(struct request *req) 1212 { 1213 return req->io_start_time_ns; 1214 } 1215 #else 1216 static inline void set_start_time_ns(struct request *req) {} 1217 static inline void set_io_start_time_ns(struct request *req) {} 1218 static inline uint64_t rq_start_time_ns(struct request *req) 1219 { 1220 return 0; 1221 } 1222 static inline uint64_t rq_io_start_time_ns(struct request *req) 1223 { 1224 return 0; 1225 } 1226 #endif 1227 1228 #define MODULE_ALIAS_BLOCKDEV(major,minor) \ 1229 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 1230 #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 1231 MODULE_ALIAS("block-major-" __stringify(major) "-*") 1232 1233 #if defined(CONFIG_BLK_DEV_INTEGRITY) 1234 1235 #define INTEGRITY_FLAG_READ 2 /* verify data integrity on read */ 1236 #define INTEGRITY_FLAG_WRITE 4 /* generate data integrity on write */ 1237 1238 struct blk_integrity_exchg { 1239 void *prot_buf; 1240 void *data_buf; 1241 sector_t sector; 1242 unsigned int data_size; 1243 unsigned short sector_size; 1244 const char *disk_name; 1245 }; 1246 1247 typedef void (integrity_gen_fn) (struct blk_integrity_exchg *); 1248 typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *); 1249 typedef void (integrity_set_tag_fn) (void *, void *, unsigned int); 1250 typedef void (integrity_get_tag_fn) (void *, void *, unsigned int); 1251 1252 struct blk_integrity { 1253 integrity_gen_fn *generate_fn; 1254 integrity_vrfy_fn *verify_fn; 1255 integrity_set_tag_fn *set_tag_fn; 1256 integrity_get_tag_fn *get_tag_fn; 1257 1258 unsigned short flags; 1259 unsigned short tuple_size; 1260 unsigned short sector_size; 1261 unsigned short tag_size; 1262 1263 const char *name; 1264 1265 struct kobject kobj; 1266 }; 1267 1268 extern bool blk_integrity_is_initialized(struct gendisk *); 1269 extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); 1270 extern void blk_integrity_unregister(struct gendisk *); 1271 extern int blk_integrity_compare(struct gendisk *, struct gendisk *); 1272 extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, 1273 struct scatterlist *); 1274 extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); 1275 extern int blk_integrity_merge_rq(struct request_queue *, struct request *, 1276 struct request *); 1277 extern int blk_integrity_merge_bio(struct request_queue *, struct request *, 1278 struct bio *); 1279 1280 static inline 1281 struct blk_integrity *bdev_get_integrity(struct block_device *bdev) 1282 { 1283 return bdev->bd_disk->integrity; 1284 } 1285 1286 static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 1287 { 1288 return disk->integrity; 1289 } 1290 1291 static inline int blk_integrity_rq(struct request *rq) 1292 { 1293 if (rq->bio == NULL) 1294 return 0; 1295 1296 return bio_integrity(rq->bio); 1297 } 1298 1299 static inline void blk_queue_max_integrity_segments(struct request_queue *q, 1300 unsigned int segs) 1301 { 1302 q->limits.max_integrity_segments = segs; 1303 } 1304 1305 static inline unsigned short 1306 queue_max_integrity_segments(struct request_queue *q) 1307 { 1308 return q->limits.max_integrity_segments; 1309 } 1310 1311 #else /* CONFIG_BLK_DEV_INTEGRITY */ 1312 1313 struct bio; 1314 struct block_device; 1315 struct gendisk; 1316 struct blk_integrity; 1317 1318 static inline int blk_integrity_rq(struct request *rq) 1319 { 1320 return 0; 1321 } 1322 static inline int blk_rq_count_integrity_sg(struct request_queue *q, 1323 struct bio *b) 1324 { 1325 return 0; 1326 } 1327 static inline int blk_rq_map_integrity_sg(struct request_queue *q, 1328 struct bio *b, 1329 struct scatterlist *s) 1330 { 1331 return 0; 1332 } 1333 static inline struct blk_integrity *bdev_get_integrity(struct block_device *b) 1334 { 1335 return 0; 1336 } 1337 static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 1338 { 1339 return NULL; 1340 } 1341 static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b) 1342 { 1343 return 0; 1344 } 1345 static inline int blk_integrity_register(struct gendisk *d, 1346 struct blk_integrity *b) 1347 { 1348 return 0; 1349 } 1350 static inline void blk_integrity_unregister(struct gendisk *d) 1351 { 1352 } 1353 static inline void blk_queue_max_integrity_segments(struct request_queue *q, 1354 unsigned int segs) 1355 { 1356 } 1357 static inline unsigned short queue_max_integrity_segments(struct request_queue *q) 1358 { 1359 return 0; 1360 } 1361 static inline int blk_integrity_merge_rq(struct request_queue *rq, 1362 struct request *r1, 1363 struct request *r2) 1364 { 1365 return 0; 1366 } 1367 static inline int blk_integrity_merge_bio(struct request_queue *rq, 1368 struct request *r, 1369 struct bio *b) 1370 { 1371 return 0; 1372 } 1373 static inline bool blk_integrity_is_initialized(struct gendisk *g) 1374 { 1375 return 0; 1376 } 1377 1378 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 1379 1380 struct block_device_operations { 1381 int (*open) (struct block_device *, fmode_t); 1382 int (*release) (struct gendisk *, fmode_t); 1383 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1384 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1385 int (*direct_access) (struct block_device *, sector_t, 1386 void **, unsigned long *); 1387 unsigned int (*check_events) (struct gendisk *disk, 1388 unsigned int clearing); 1389 /* ->media_changed() is DEPRECATED, use ->check_events() instead */ 1390 int (*media_changed) (struct gendisk *); 1391 void (*unlock_native_capacity) (struct gendisk *); 1392 int (*revalidate_disk) (struct gendisk *); 1393 int (*getgeo)(struct block_device *, struct hd_geometry *); 1394 /* this callback is with swap_lock and sometimes page table lock held */ 1395 void (*swap_slot_free_notify) (struct block_device *, unsigned long); 1396 struct module *owner; 1397 }; 1398 1399 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, 1400 unsigned long); 1401 #else /* CONFIG_BLOCK */ 1402 /* 1403 * stubs for when the block layer is configured out 1404 */ 1405 #define buffer_heads_over_limit 0 1406 1407 static inline long nr_blockdev_pages(void) 1408 { 1409 return 0; 1410 } 1411 1412 struct blk_plug { 1413 }; 1414 1415 static inline void blk_start_plug(struct blk_plug *plug) 1416 { 1417 } 1418 1419 static inline void blk_finish_plug(struct blk_plug *plug) 1420 { 1421 } 1422 1423 static inline void blk_flush_plug(struct task_struct *task) 1424 { 1425 } 1426 1427 static inline void blk_schedule_flush_plug(struct task_struct *task) 1428 { 1429 } 1430 1431 1432 static inline bool blk_needs_flush_plug(struct task_struct *tsk) 1433 { 1434 return false; 1435 } 1436 1437 #endif /* CONFIG_BLOCK */ 1438 1439 #endif 1440