1 #ifndef _LINUX_BLKDEV_H 2 #define _LINUX_BLKDEV_H 3 4 #ifdef CONFIG_BLOCK 5 6 #include <linux/sched.h> 7 #include <linux/major.h> 8 #include <linux/genhd.h> 9 #include <linux/list.h> 10 #include <linux/timer.h> 11 #include <linux/workqueue.h> 12 #include <linux/pagemap.h> 13 #include <linux/backing-dev.h> 14 #include <linux/wait.h> 15 #include <linux/mempool.h> 16 #include <linux/bio.h> 17 #include <linux/module.h> 18 #include <linux/stringify.h> 19 #include <linux/gfp.h> 20 #include <linux/bsg.h> 21 #include <linux/smp.h> 22 23 #include <asm/scatterlist.h> 24 25 struct scsi_ioctl_command; 26 27 struct request_queue; 28 struct elevator_queue; 29 struct request_pm_state; 30 struct blk_trace; 31 struct request; 32 struct sg_io_hdr; 33 struct bsg_job; 34 35 #define BLKDEV_MIN_RQ 4 36 #define BLKDEV_MAX_RQ 128 /* Default maximum */ 37 38 struct request; 39 typedef void (rq_end_io_fn)(struct request *, int); 40 41 struct request_list { 42 /* 43 * count[], starved[], and wait[] are indexed by 44 * BLK_RW_SYNC/BLK_RW_ASYNC 45 */ 46 int count[2]; 47 int starved[2]; 48 int elvpriv; 49 mempool_t *rq_pool; 50 wait_queue_head_t wait[2]; 51 }; 52 53 /* 54 * request command types 55 */ 56 enum rq_cmd_type_bits { 57 REQ_TYPE_FS = 1, /* fs request */ 58 REQ_TYPE_BLOCK_PC, /* scsi command */ 59 REQ_TYPE_SENSE, /* sense request */ 60 REQ_TYPE_PM_SUSPEND, /* suspend request */ 61 REQ_TYPE_PM_RESUME, /* resume request */ 62 REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ 63 REQ_TYPE_SPECIAL, /* driver defined type */ 64 /* 65 * for ATA/ATAPI devices. this really doesn't belong here, ide should 66 * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver 67 * private REQ_LB opcodes to differentiate what type of request this is 68 */ 69 REQ_TYPE_ATA_TASKFILE, 70 REQ_TYPE_ATA_PC, 71 }; 72 73 #define BLK_MAX_CDB 16 74 75 /* 76 * try to put the fields that are referenced together in the same cacheline. 77 * if you modify this structure, be sure to check block/blk-core.c:blk_rq_init() 78 * as well! 79 */ 80 struct request { 81 struct list_head queuelist; 82 struct call_single_data csd; 83 84 struct request_queue *q; 85 86 unsigned int cmd_flags; 87 enum rq_cmd_type_bits cmd_type; 88 unsigned long atomic_flags; 89 90 int cpu; 91 92 /* the following two fields are internal, NEVER access directly */ 93 unsigned int __data_len; /* total data len */ 94 sector_t __sector; /* sector cursor */ 95 96 struct bio *bio; 97 struct bio *biotail; 98 99 struct hlist_node hash; /* merge hash */ 100 /* 101 * The rb_node is only used inside the io scheduler, requests 102 * are pruned when moved to the dispatch queue. So let the 103 * completion_data share space with the rb_node. 104 */ 105 union { 106 struct rb_node rb_node; /* sort/lookup */ 107 void *completion_data; 108 }; 109 110 /* 111 * Three pointers are available for the IO schedulers, if they need 112 * more they have to dynamically allocate it. Flush requests are 113 * never put on the IO scheduler. So let the flush fields share 114 * space with the three elevator_private pointers. 115 */ 116 union { 117 void *elevator_private[3]; 118 struct { 119 unsigned int seq; 120 struct list_head list; 121 rq_end_io_fn *saved_end_io; 122 } flush; 123 }; 124 125 struct gendisk *rq_disk; 126 struct hd_struct *part; 127 unsigned long start_time; 128 #ifdef CONFIG_BLK_CGROUP 129 unsigned long long start_time_ns; 130 unsigned long long io_start_time_ns; /* when passed to hardware */ 131 #endif 132 /* Number of scatter-gather DMA addr+len pairs after 133 * physical address coalescing is performed. 134 */ 135 unsigned short nr_phys_segments; 136 #if defined(CONFIG_BLK_DEV_INTEGRITY) 137 unsigned short nr_integrity_segments; 138 #endif 139 140 unsigned short ioprio; 141 142 int ref_count; 143 144 void *special; /* opaque pointer available for LLD use */ 145 char *buffer; /* kaddr of the current segment if available */ 146 147 int tag; 148 int errors; 149 150 /* 151 * when request is used as a packet command carrier 152 */ 153 unsigned char __cmd[BLK_MAX_CDB]; 154 unsigned char *cmd; 155 unsigned short cmd_len; 156 157 unsigned int extra_len; /* length of alignment and padding */ 158 unsigned int sense_len; 159 unsigned int resid_len; /* residual count */ 160 void *sense; 161 162 unsigned long deadline; 163 struct list_head timeout_list; 164 unsigned int timeout; 165 int retries; 166 167 /* 168 * completion callback. 169 */ 170 rq_end_io_fn *end_io; 171 void *end_io_data; 172 173 /* for bidi */ 174 struct request *next_rq; 175 }; 176 177 static inline unsigned short req_get_ioprio(struct request *req) 178 { 179 return req->ioprio; 180 } 181 182 /* 183 * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME 184 * requests. Some step values could eventually be made generic. 185 */ 186 struct request_pm_state 187 { 188 /* PM state machine step value, currently driver specific */ 189 int pm_step; 190 /* requested PM state value (S1, S2, S3, S4, ...) */ 191 u32 pm_state; 192 void* data; /* for driver use */ 193 }; 194 195 #include <linux/elevator.h> 196 197 typedef void (request_fn_proc) (struct request_queue *q); 198 typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); 199 typedef int (prep_rq_fn) (struct request_queue *, struct request *); 200 typedef void (unprep_rq_fn) (struct request_queue *, struct request *); 201 202 struct bio_vec; 203 struct bvec_merge_data { 204 struct block_device *bi_bdev; 205 sector_t bi_sector; 206 unsigned bi_size; 207 unsigned long bi_rw; 208 }; 209 typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, 210 struct bio_vec *); 211 typedef void (softirq_done_fn)(struct request *); 212 typedef int (dma_drain_needed_fn)(struct request *); 213 typedef int (lld_busy_fn) (struct request_queue *q); 214 typedef int (bsg_job_fn) (struct bsg_job *); 215 216 enum blk_eh_timer_return { 217 BLK_EH_NOT_HANDLED, 218 BLK_EH_HANDLED, 219 BLK_EH_RESET_TIMER, 220 }; 221 222 typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *); 223 224 enum blk_queue_state { 225 Queue_down, 226 Queue_up, 227 }; 228 229 struct blk_queue_tag { 230 struct request **tag_index; /* map of busy tags */ 231 unsigned long *tag_map; /* bit map of free/busy tags */ 232 int busy; /* current depth */ 233 int max_depth; /* what we will send to device */ 234 int real_max_depth; /* what the array can hold */ 235 atomic_t refcnt; /* map can be shared */ 236 }; 237 238 #define BLK_SCSI_MAX_CMDS (256) 239 #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) 240 241 struct queue_limits { 242 unsigned long bounce_pfn; 243 unsigned long seg_boundary_mask; 244 245 unsigned int max_hw_sectors; 246 unsigned int max_sectors; 247 unsigned int max_segment_size; 248 unsigned int physical_block_size; 249 unsigned int alignment_offset; 250 unsigned int io_min; 251 unsigned int io_opt; 252 unsigned int max_discard_sectors; 253 unsigned int discard_granularity; 254 unsigned int discard_alignment; 255 256 unsigned short logical_block_size; 257 unsigned short max_segments; 258 unsigned short max_integrity_segments; 259 260 unsigned char misaligned; 261 unsigned char discard_misaligned; 262 unsigned char cluster; 263 unsigned char discard_zeroes_data; 264 }; 265 266 struct request_queue { 267 /* 268 * Together with queue_head for cacheline sharing 269 */ 270 struct list_head queue_head; 271 struct request *last_merge; 272 struct elevator_queue *elevator; 273 274 /* 275 * the queue request freelist, one for reads and one for writes 276 */ 277 struct request_list rq; 278 279 request_fn_proc *request_fn; 280 make_request_fn *make_request_fn; 281 prep_rq_fn *prep_rq_fn; 282 unprep_rq_fn *unprep_rq_fn; 283 merge_bvec_fn *merge_bvec_fn; 284 softirq_done_fn *softirq_done_fn; 285 rq_timed_out_fn *rq_timed_out_fn; 286 dma_drain_needed_fn *dma_drain_needed; 287 lld_busy_fn *lld_busy_fn; 288 289 /* 290 * Dispatch queue sorting 291 */ 292 sector_t end_sector; 293 struct request *boundary_rq; 294 295 /* 296 * Delayed queue handling 297 */ 298 struct delayed_work delay_work; 299 300 struct backing_dev_info backing_dev_info; 301 302 /* 303 * The queue owner gets to use this for whatever they like. 304 * ll_rw_blk doesn't touch it. 305 */ 306 void *queuedata; 307 308 /* 309 * various queue flags, see QUEUE_* below 310 */ 311 unsigned long queue_flags; 312 313 /* 314 * queue needs bounce pages for pages above this limit 315 */ 316 gfp_t bounce_gfp; 317 318 /* 319 * protects queue structures from reentrancy. ->__queue_lock should 320 * _never_ be used directly, it is queue private. always use 321 * ->queue_lock. 322 */ 323 spinlock_t __queue_lock; 324 spinlock_t *queue_lock; 325 326 /* 327 * queue kobject 328 */ 329 struct kobject kobj; 330 331 /* 332 * queue settings 333 */ 334 unsigned long nr_requests; /* Max # of requests */ 335 unsigned int nr_congestion_on; 336 unsigned int nr_congestion_off; 337 unsigned int nr_batching; 338 339 unsigned int dma_drain_size; 340 void *dma_drain_buffer; 341 unsigned int dma_pad_mask; 342 unsigned int dma_alignment; 343 344 struct blk_queue_tag *queue_tags; 345 struct list_head tag_busy_list; 346 347 unsigned int nr_sorted; 348 unsigned int in_flight[2]; 349 350 unsigned int rq_timeout; 351 struct timer_list timeout; 352 struct list_head timeout_list; 353 354 struct queue_limits limits; 355 356 /* 357 * sg stuff 358 */ 359 unsigned int sg_timeout; 360 unsigned int sg_reserved_size; 361 int node; 362 #ifdef CONFIG_BLK_DEV_IO_TRACE 363 struct blk_trace *blk_trace; 364 #endif 365 /* 366 * for flush operations 367 */ 368 unsigned int flush_flags; 369 unsigned int flush_not_queueable:1; 370 unsigned int flush_queue_delayed:1; 371 unsigned int flush_pending_idx:1; 372 unsigned int flush_running_idx:1; 373 unsigned long flush_pending_since; 374 struct list_head flush_queue[2]; 375 struct list_head flush_data_in_flight; 376 struct request flush_rq; 377 378 struct mutex sysfs_lock; 379 380 #if defined(CONFIG_BLK_DEV_BSG) 381 bsg_job_fn *bsg_job_fn; 382 int bsg_job_size; 383 struct bsg_class_device bsg_dev; 384 #endif 385 386 #ifdef CONFIG_BLK_DEV_THROTTLING 387 /* Throttle data */ 388 struct throtl_data *td; 389 #endif 390 }; 391 392 #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 393 #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 394 #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ 395 #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ 396 #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ 397 #define QUEUE_FLAG_ELVSWITCH 6 /* don't use elevator, just do FIFO */ 398 #define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ 399 #define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ 400 #define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */ 401 #define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */ 402 #define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */ 403 #define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */ 404 #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 405 #define QUEUE_FLAG_IO_STAT 13 /* do IO stats */ 406 #define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */ 407 #define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */ 408 #define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ 409 #define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ 410 #define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ 411 412 #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 413 (1 << QUEUE_FLAG_STACKABLE) | \ 414 (1 << QUEUE_FLAG_SAME_COMP) | \ 415 (1 << QUEUE_FLAG_ADD_RANDOM)) 416 417 static inline int queue_is_locked(struct request_queue *q) 418 { 419 #ifdef CONFIG_SMP 420 spinlock_t *lock = q->queue_lock; 421 return lock && spin_is_locked(lock); 422 #else 423 return 1; 424 #endif 425 } 426 427 static inline void queue_flag_set_unlocked(unsigned int flag, 428 struct request_queue *q) 429 { 430 __set_bit(flag, &q->queue_flags); 431 } 432 433 static inline int queue_flag_test_and_clear(unsigned int flag, 434 struct request_queue *q) 435 { 436 WARN_ON_ONCE(!queue_is_locked(q)); 437 438 if (test_bit(flag, &q->queue_flags)) { 439 __clear_bit(flag, &q->queue_flags); 440 return 1; 441 } 442 443 return 0; 444 } 445 446 static inline int queue_flag_test_and_set(unsigned int flag, 447 struct request_queue *q) 448 { 449 WARN_ON_ONCE(!queue_is_locked(q)); 450 451 if (!test_bit(flag, &q->queue_flags)) { 452 __set_bit(flag, &q->queue_flags); 453 return 0; 454 } 455 456 return 1; 457 } 458 459 static inline void queue_flag_set(unsigned int flag, struct request_queue *q) 460 { 461 WARN_ON_ONCE(!queue_is_locked(q)); 462 __set_bit(flag, &q->queue_flags); 463 } 464 465 static inline void queue_flag_clear_unlocked(unsigned int flag, 466 struct request_queue *q) 467 { 468 __clear_bit(flag, &q->queue_flags); 469 } 470 471 static inline int queue_in_flight(struct request_queue *q) 472 { 473 return q->in_flight[0] + q->in_flight[1]; 474 } 475 476 static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) 477 { 478 WARN_ON_ONCE(!queue_is_locked(q)); 479 __clear_bit(flag, &q->queue_flags); 480 } 481 482 #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 483 #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 484 #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 485 #define blk_queue_noxmerges(q) \ 486 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 487 #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 488 #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) 489 #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) 490 #define blk_queue_stackable(q) \ 491 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) 492 #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) 493 #define blk_queue_secdiscard(q) (blk_queue_discard(q) && \ 494 test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags)) 495 496 #define blk_noretry_request(rq) \ 497 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ 498 REQ_FAILFAST_DRIVER)) 499 500 #define blk_account_rq(rq) \ 501 (((rq)->cmd_flags & REQ_STARTED) && \ 502 ((rq)->cmd_type == REQ_TYPE_FS || \ 503 ((rq)->cmd_flags & REQ_DISCARD))) 504 505 #define blk_pm_request(rq) \ 506 ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \ 507 (rq)->cmd_type == REQ_TYPE_PM_RESUME) 508 509 #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) 510 #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 511 /* rq->queuelist of dequeued request must be list_empty() */ 512 #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) 513 514 #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 515 516 #define rq_data_dir(rq) ((rq)->cmd_flags & 1) 517 518 static inline unsigned int blk_queue_cluster(struct request_queue *q) 519 { 520 return q->limits.cluster; 521 } 522 523 /* 524 * We regard a request as sync, if either a read or a sync write 525 */ 526 static inline bool rw_is_sync(unsigned int rw_flags) 527 { 528 return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC); 529 } 530 531 static inline bool rq_is_sync(struct request *rq) 532 { 533 return rw_is_sync(rq->cmd_flags); 534 } 535 536 static inline int blk_queue_full(struct request_queue *q, int sync) 537 { 538 if (sync) 539 return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags); 540 return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags); 541 } 542 543 static inline void blk_set_queue_full(struct request_queue *q, int sync) 544 { 545 if (sync) 546 queue_flag_set(QUEUE_FLAG_SYNCFULL, q); 547 else 548 queue_flag_set(QUEUE_FLAG_ASYNCFULL, q); 549 } 550 551 static inline void blk_clear_queue_full(struct request_queue *q, int sync) 552 { 553 if (sync) 554 queue_flag_clear(QUEUE_FLAG_SYNCFULL, q); 555 else 556 queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q); 557 } 558 559 560 /* 561 * mergeable request must not have _NOMERGE or _BARRIER bit set, nor may 562 * it already be started by driver. 563 */ 564 #define RQ_NOMERGE_FLAGS \ 565 (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA) 566 #define rq_mergeable(rq) \ 567 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ 568 (((rq)->cmd_flags & REQ_DISCARD) || \ 569 (rq)->cmd_type == REQ_TYPE_FS)) 570 571 /* 572 * q->prep_rq_fn return values 573 */ 574 #define BLKPREP_OK 0 /* serve it */ 575 #define BLKPREP_KILL 1 /* fatal error, kill */ 576 #define BLKPREP_DEFER 2 /* leave on queue */ 577 578 extern unsigned long blk_max_low_pfn, blk_max_pfn; 579 580 /* 581 * standard bounce addresses: 582 * 583 * BLK_BOUNCE_HIGH : bounce all highmem pages 584 * BLK_BOUNCE_ANY : don't bounce anything 585 * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary 586 */ 587 588 #if BITS_PER_LONG == 32 589 #define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT) 590 #else 591 #define BLK_BOUNCE_HIGH -1ULL 592 #endif 593 #define BLK_BOUNCE_ANY (-1ULL) 594 #define BLK_BOUNCE_ISA (DMA_BIT_MASK(24)) 595 596 /* 597 * default timeout for SG_IO if none specified 598 */ 599 #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) 600 #define BLK_MIN_SG_TIMEOUT (7 * HZ) 601 602 #ifdef CONFIG_BOUNCE 603 extern int init_emergency_isa_pool(void); 604 extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); 605 #else 606 static inline int init_emergency_isa_pool(void) 607 { 608 return 0; 609 } 610 static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) 611 { 612 } 613 #endif /* CONFIG_MMU */ 614 615 struct rq_map_data { 616 struct page **pages; 617 int page_order; 618 int nr_entries; 619 unsigned long offset; 620 int null_mapped; 621 int from_user; 622 }; 623 624 struct req_iterator { 625 int i; 626 struct bio *bio; 627 }; 628 629 /* This should not be used directly - use rq_for_each_segment */ 630 #define for_each_bio(_bio) \ 631 for (; _bio; _bio = _bio->bi_next) 632 #define __rq_for_each_bio(_bio, rq) \ 633 if ((rq->bio)) \ 634 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) 635 636 #define rq_for_each_segment(bvl, _rq, _iter) \ 637 __rq_for_each_bio(_iter.bio, _rq) \ 638 bio_for_each_segment(bvl, _iter.bio, _iter.i) 639 640 #define rq_iter_last(rq, _iter) \ 641 (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1) 642 643 #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 644 # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" 645 #endif 646 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 647 extern void rq_flush_dcache_pages(struct request *rq); 648 #else 649 static inline void rq_flush_dcache_pages(struct request *rq) 650 { 651 } 652 #endif 653 654 extern int blk_register_queue(struct gendisk *disk); 655 extern void blk_unregister_queue(struct gendisk *disk); 656 extern void generic_make_request(struct bio *bio); 657 extern void blk_rq_init(struct request_queue *q, struct request *rq); 658 extern void blk_put_request(struct request *); 659 extern void __blk_put_request(struct request_queue *, struct request *); 660 extern struct request *blk_get_request(struct request_queue *, int, gfp_t); 661 extern struct request *blk_make_request(struct request_queue *, struct bio *, 662 gfp_t); 663 extern void blk_insert_request(struct request_queue *, struct request *, int, void *); 664 extern void blk_requeue_request(struct request_queue *, struct request *); 665 extern void blk_add_request_payload(struct request *rq, struct page *page, 666 unsigned int len); 667 extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); 668 extern int blk_lld_busy(struct request_queue *q); 669 extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 670 struct bio_set *bs, gfp_t gfp_mask, 671 int (*bio_ctr)(struct bio *, struct bio *, void *), 672 void *data); 673 extern void blk_rq_unprep_clone(struct request *rq); 674 extern int blk_insert_cloned_request(struct request_queue *q, 675 struct request *rq); 676 extern void blk_delay_queue(struct request_queue *, unsigned long); 677 extern void blk_recount_segments(struct request_queue *, struct bio *); 678 extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, 679 unsigned int, void __user *); 680 extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, 681 struct scsi_ioctl_command __user *); 682 683 /* 684 * A queue has just exitted congestion. Note this in the global counter of 685 * congested queues, and wake up anyone who was waiting for requests to be 686 * put back. 687 */ 688 static inline void blk_clear_queue_congested(struct request_queue *q, int sync) 689 { 690 clear_bdi_congested(&q->backing_dev_info, sync); 691 } 692 693 /* 694 * A queue has just entered congestion. Flag that in the queue's VM-visible 695 * state flags and increment the global gounter of congested queues. 696 */ 697 static inline void blk_set_queue_congested(struct request_queue *q, int sync) 698 { 699 set_bdi_congested(&q->backing_dev_info, sync); 700 } 701 702 extern void blk_start_queue(struct request_queue *q); 703 extern void blk_stop_queue(struct request_queue *q); 704 extern void blk_sync_queue(struct request_queue *q); 705 extern void __blk_stop_queue(struct request_queue *q); 706 extern void __blk_run_queue(struct request_queue *q); 707 extern void blk_run_queue(struct request_queue *); 708 extern void blk_run_queue_async(struct request_queue *q); 709 extern int blk_rq_map_user(struct request_queue *, struct request *, 710 struct rq_map_data *, void __user *, unsigned long, 711 gfp_t); 712 extern int blk_rq_unmap_user(struct bio *); 713 extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); 714 extern int blk_rq_map_user_iov(struct request_queue *, struct request *, 715 struct rq_map_data *, struct sg_iovec *, int, 716 unsigned int, gfp_t); 717 extern int blk_execute_rq(struct request_queue *, struct gendisk *, 718 struct request *, int); 719 extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 720 struct request *, int, rq_end_io_fn *); 721 722 static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 723 { 724 return bdev->bd_disk->queue; 725 } 726 727 /* 728 * blk_rq_pos() : the current sector 729 * blk_rq_bytes() : bytes left in the entire request 730 * blk_rq_cur_bytes() : bytes left in the current segment 731 * blk_rq_err_bytes() : bytes left till the next error boundary 732 * blk_rq_sectors() : sectors left in the entire request 733 * blk_rq_cur_sectors() : sectors left in the current segment 734 */ 735 static inline sector_t blk_rq_pos(const struct request *rq) 736 { 737 return rq->__sector; 738 } 739 740 static inline unsigned int blk_rq_bytes(const struct request *rq) 741 { 742 return rq->__data_len; 743 } 744 745 static inline int blk_rq_cur_bytes(const struct request *rq) 746 { 747 return rq->bio ? bio_cur_bytes(rq->bio) : 0; 748 } 749 750 extern unsigned int blk_rq_err_bytes(const struct request *rq); 751 752 static inline unsigned int blk_rq_sectors(const struct request *rq) 753 { 754 return blk_rq_bytes(rq) >> 9; 755 } 756 757 static inline unsigned int blk_rq_cur_sectors(const struct request *rq) 758 { 759 return blk_rq_cur_bytes(rq) >> 9; 760 } 761 762 /* 763 * Request issue related functions. 764 */ 765 extern struct request *blk_peek_request(struct request_queue *q); 766 extern void blk_start_request(struct request *rq); 767 extern struct request *blk_fetch_request(struct request_queue *q); 768 769 /* 770 * Request completion related functions. 771 * 772 * blk_update_request() completes given number of bytes and updates 773 * the request without completing it. 774 * 775 * blk_end_request() and friends. __blk_end_request() must be called 776 * with the request queue spinlock acquired. 777 * 778 * Several drivers define their own end_request and call 779 * blk_end_request() for parts of the original function. 780 * This prevents code duplication in drivers. 781 */ 782 extern bool blk_update_request(struct request *rq, int error, 783 unsigned int nr_bytes); 784 extern bool blk_end_request(struct request *rq, int error, 785 unsigned int nr_bytes); 786 extern void blk_end_request_all(struct request *rq, int error); 787 extern bool blk_end_request_cur(struct request *rq, int error); 788 extern bool blk_end_request_err(struct request *rq, int error); 789 extern bool __blk_end_request(struct request *rq, int error, 790 unsigned int nr_bytes); 791 extern void __blk_end_request_all(struct request *rq, int error); 792 extern bool __blk_end_request_cur(struct request *rq, int error); 793 extern bool __blk_end_request_err(struct request *rq, int error); 794 795 extern void blk_complete_request(struct request *); 796 extern void __blk_complete_request(struct request *); 797 extern void blk_abort_request(struct request *); 798 extern void blk_abort_queue(struct request_queue *); 799 extern void blk_unprep_request(struct request *); 800 801 /* 802 * Access functions for manipulating queue properties 803 */ 804 extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, 805 spinlock_t *lock, int node_id); 806 extern struct request_queue *blk_init_allocated_queue_node(struct request_queue *, 807 request_fn_proc *, 808 spinlock_t *, int node_id); 809 extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); 810 extern struct request_queue *blk_init_allocated_queue(struct request_queue *, 811 request_fn_proc *, spinlock_t *); 812 extern void blk_cleanup_queue(struct request_queue *); 813 extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 814 extern void blk_queue_bounce_limit(struct request_queue *, u64); 815 extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int); 816 extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 817 extern void blk_queue_max_segments(struct request_queue *, unsigned short); 818 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 819 extern void blk_queue_max_discard_sectors(struct request_queue *q, 820 unsigned int max_discard_sectors); 821 extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); 822 extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); 823 extern void blk_queue_alignment_offset(struct request_queue *q, 824 unsigned int alignment); 825 extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); 826 extern void blk_queue_io_min(struct request_queue *q, unsigned int min); 827 extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); 828 extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); 829 extern void blk_set_default_limits(struct queue_limits *lim); 830 extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 831 sector_t offset); 832 extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, 833 sector_t offset); 834 extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, 835 sector_t offset); 836 extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 837 extern void blk_queue_dma_pad(struct request_queue *, unsigned int); 838 extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); 839 extern int blk_queue_dma_drain(struct request_queue *q, 840 dma_drain_needed_fn *dma_drain_needed, 841 void *buf, unsigned int size); 842 extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); 843 extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 844 extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 845 extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn); 846 extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); 847 extern void blk_queue_dma_alignment(struct request_queue *, int); 848 extern void blk_queue_update_dma_alignment(struct request_queue *, int); 849 extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 850 extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); 851 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 852 extern void blk_queue_flush(struct request_queue *q, unsigned int flush); 853 extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable); 854 extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 855 856 extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 857 extern void blk_dump_rq_flags(struct request *, char *); 858 extern long nr_blockdev_pages(void); 859 860 int blk_get_queue(struct request_queue *); 861 struct request_queue *blk_alloc_queue(gfp_t); 862 struct request_queue *blk_alloc_queue_node(gfp_t, int); 863 extern void blk_put_queue(struct request_queue *); 864 865 /* 866 * Note: Code in between changing the blk_plug list/cb_list or element of such 867 * lists is preemptable, but such code can't do sleep (or be very careful), 868 * otherwise data is corrupted. For details, please check schedule() where 869 * blk_schedule_flush_plug() is called. 870 */ 871 struct blk_plug { 872 unsigned long magic; 873 struct list_head list; 874 struct list_head cb_list; 875 unsigned int should_sort; 876 }; 877 #define BLK_MAX_REQUEST_COUNT 16 878 879 struct blk_plug_cb { 880 struct list_head list; 881 void (*callback)(struct blk_plug_cb *); 882 }; 883 884 extern void blk_start_plug(struct blk_plug *); 885 extern void blk_finish_plug(struct blk_plug *); 886 extern void blk_flush_plug_list(struct blk_plug *, bool); 887 888 static inline void blk_flush_plug(struct task_struct *tsk) 889 { 890 struct blk_plug *plug = tsk->plug; 891 892 if (plug) 893 blk_flush_plug_list(plug, false); 894 } 895 896 static inline void blk_schedule_flush_plug(struct task_struct *tsk) 897 { 898 struct blk_plug *plug = tsk->plug; 899 900 if (plug) 901 blk_flush_plug_list(plug, true); 902 } 903 904 static inline bool blk_needs_flush_plug(struct task_struct *tsk) 905 { 906 struct blk_plug *plug = tsk->plug; 907 908 return plug && (!list_empty(&plug->list) || !list_empty(&plug->cb_list)); 909 } 910 911 /* 912 * tag stuff 913 */ 914 #define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) 915 extern int blk_queue_start_tag(struct request_queue *, struct request *); 916 extern struct request *blk_queue_find_tag(struct request_queue *, int); 917 extern void blk_queue_end_tag(struct request_queue *, struct request *); 918 extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *); 919 extern void blk_queue_free_tags(struct request_queue *); 920 extern int blk_queue_resize_tags(struct request_queue *, int); 921 extern void blk_queue_invalidate_tags(struct request_queue *); 922 extern struct blk_queue_tag *blk_init_tags(int); 923 extern void blk_free_tags(struct blk_queue_tag *); 924 925 static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, 926 int tag) 927 { 928 if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) 929 return NULL; 930 return bqt->tag_index[tag]; 931 } 932 933 #define BLKDEV_DISCARD_SECURE 0x01 /* secure discard */ 934 935 extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); 936 extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 937 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); 938 extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 939 sector_t nr_sects, gfp_t gfp_mask); 940 static inline int sb_issue_discard(struct super_block *sb, sector_t block, 941 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) 942 { 943 return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9), 944 nr_blocks << (sb->s_blocksize_bits - 9), 945 gfp_mask, flags); 946 } 947 static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, 948 sector_t nr_blocks, gfp_t gfp_mask) 949 { 950 return blkdev_issue_zeroout(sb->s_bdev, 951 block << (sb->s_blocksize_bits - 9), 952 nr_blocks << (sb->s_blocksize_bits - 9), 953 gfp_mask); 954 } 955 956 extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); 957 958 enum blk_default_limits { 959 BLK_MAX_SEGMENTS = 128, 960 BLK_SAFE_MAX_SECTORS = 255, 961 BLK_DEF_MAX_SECTORS = 1024, 962 BLK_MAX_SEGMENT_SIZE = 65536, 963 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, 964 }; 965 966 #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 967 968 static inline unsigned long queue_bounce_pfn(struct request_queue *q) 969 { 970 return q->limits.bounce_pfn; 971 } 972 973 static inline unsigned long queue_segment_boundary(struct request_queue *q) 974 { 975 return q->limits.seg_boundary_mask; 976 } 977 978 static inline unsigned int queue_max_sectors(struct request_queue *q) 979 { 980 return q->limits.max_sectors; 981 } 982 983 static inline unsigned int queue_max_hw_sectors(struct request_queue *q) 984 { 985 return q->limits.max_hw_sectors; 986 } 987 988 static inline unsigned short queue_max_segments(struct request_queue *q) 989 { 990 return q->limits.max_segments; 991 } 992 993 static inline unsigned int queue_max_segment_size(struct request_queue *q) 994 { 995 return q->limits.max_segment_size; 996 } 997 998 static inline unsigned short queue_logical_block_size(struct request_queue *q) 999 { 1000 int retval = 512; 1001 1002 if (q && q->limits.logical_block_size) 1003 retval = q->limits.logical_block_size; 1004 1005 return retval; 1006 } 1007 1008 static inline unsigned short bdev_logical_block_size(struct block_device *bdev) 1009 { 1010 return queue_logical_block_size(bdev_get_queue(bdev)); 1011 } 1012 1013 static inline unsigned int queue_physical_block_size(struct request_queue *q) 1014 { 1015 return q->limits.physical_block_size; 1016 } 1017 1018 static inline unsigned int bdev_physical_block_size(struct block_device *bdev) 1019 { 1020 return queue_physical_block_size(bdev_get_queue(bdev)); 1021 } 1022 1023 static inline unsigned int queue_io_min(struct request_queue *q) 1024 { 1025 return q->limits.io_min; 1026 } 1027 1028 static inline int bdev_io_min(struct block_device *bdev) 1029 { 1030 return queue_io_min(bdev_get_queue(bdev)); 1031 } 1032 1033 static inline unsigned int queue_io_opt(struct request_queue *q) 1034 { 1035 return q->limits.io_opt; 1036 } 1037 1038 static inline int bdev_io_opt(struct block_device *bdev) 1039 { 1040 return queue_io_opt(bdev_get_queue(bdev)); 1041 } 1042 1043 static inline int queue_alignment_offset(struct request_queue *q) 1044 { 1045 if (q->limits.misaligned) 1046 return -1; 1047 1048 return q->limits.alignment_offset; 1049 } 1050 1051 static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) 1052 { 1053 unsigned int granularity = max(lim->physical_block_size, lim->io_min); 1054 unsigned int alignment = (sector << 9) & (granularity - 1); 1055 1056 return (granularity + lim->alignment_offset - alignment) 1057 & (granularity - 1); 1058 } 1059 1060 static inline int bdev_alignment_offset(struct block_device *bdev) 1061 { 1062 struct request_queue *q = bdev_get_queue(bdev); 1063 1064 if (q->limits.misaligned) 1065 return -1; 1066 1067 if (bdev != bdev->bd_contains) 1068 return bdev->bd_part->alignment_offset; 1069 1070 return q->limits.alignment_offset; 1071 } 1072 1073 static inline int queue_discard_alignment(struct request_queue *q) 1074 { 1075 if (q->limits.discard_misaligned) 1076 return -1; 1077 1078 return q->limits.discard_alignment; 1079 } 1080 1081 static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) 1082 { 1083 unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1); 1084 1085 if (!lim->max_discard_sectors) 1086 return 0; 1087 1088 return (lim->discard_granularity + lim->discard_alignment - alignment) 1089 & (lim->discard_granularity - 1); 1090 } 1091 1092 static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) 1093 { 1094 if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1) 1095 return 1; 1096 1097 return 0; 1098 } 1099 1100 static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev) 1101 { 1102 return queue_discard_zeroes_data(bdev_get_queue(bdev)); 1103 } 1104 1105 static inline int queue_dma_alignment(struct request_queue *q) 1106 { 1107 return q ? q->dma_alignment : 511; 1108 } 1109 1110 static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, 1111 unsigned int len) 1112 { 1113 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; 1114 return !(addr & alignment) && !(len & alignment); 1115 } 1116 1117 /* assumes size > 256 */ 1118 static inline unsigned int blksize_bits(unsigned int size) 1119 { 1120 unsigned int bits = 8; 1121 do { 1122 bits++; 1123 size >>= 1; 1124 } while (size > 256); 1125 return bits; 1126 } 1127 1128 static inline unsigned int block_size(struct block_device *bdev) 1129 { 1130 return bdev->bd_block_size; 1131 } 1132 1133 static inline bool queue_flush_queueable(struct request_queue *q) 1134 { 1135 return !q->flush_not_queueable; 1136 } 1137 1138 typedef struct {struct page *v;} Sector; 1139 1140 unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); 1141 1142 static inline void put_dev_sector(Sector p) 1143 { 1144 page_cache_release(p.v); 1145 } 1146 1147 struct work_struct; 1148 int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); 1149 1150 #ifdef CONFIG_BLK_CGROUP 1151 /* 1152 * This should not be using sched_clock(). A real patch is in progress 1153 * to fix this up, until that is in place we need to disable preemption 1154 * around sched_clock() in this function and set_io_start_time_ns(). 1155 */ 1156 static inline void set_start_time_ns(struct request *req) 1157 { 1158 preempt_disable(); 1159 req->start_time_ns = sched_clock(); 1160 preempt_enable(); 1161 } 1162 1163 static inline void set_io_start_time_ns(struct request *req) 1164 { 1165 preempt_disable(); 1166 req->io_start_time_ns = sched_clock(); 1167 preempt_enable(); 1168 } 1169 1170 static inline uint64_t rq_start_time_ns(struct request *req) 1171 { 1172 return req->start_time_ns; 1173 } 1174 1175 static inline uint64_t rq_io_start_time_ns(struct request *req) 1176 { 1177 return req->io_start_time_ns; 1178 } 1179 #else 1180 static inline void set_start_time_ns(struct request *req) {} 1181 static inline void set_io_start_time_ns(struct request *req) {} 1182 static inline uint64_t rq_start_time_ns(struct request *req) 1183 { 1184 return 0; 1185 } 1186 static inline uint64_t rq_io_start_time_ns(struct request *req) 1187 { 1188 return 0; 1189 } 1190 #endif 1191 1192 #ifdef CONFIG_BLK_DEV_THROTTLING 1193 extern int blk_throtl_init(struct request_queue *q); 1194 extern void blk_throtl_exit(struct request_queue *q); 1195 extern int blk_throtl_bio(struct request_queue *q, struct bio **bio); 1196 #else /* CONFIG_BLK_DEV_THROTTLING */ 1197 static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio) 1198 { 1199 return 0; 1200 } 1201 1202 static inline int blk_throtl_init(struct request_queue *q) { return 0; } 1203 static inline int blk_throtl_exit(struct request_queue *q) { return 0; } 1204 #endif /* CONFIG_BLK_DEV_THROTTLING */ 1205 1206 #define MODULE_ALIAS_BLOCKDEV(major,minor) \ 1207 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 1208 #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 1209 MODULE_ALIAS("block-major-" __stringify(major) "-*") 1210 1211 #if defined(CONFIG_BLK_DEV_INTEGRITY) 1212 1213 #define INTEGRITY_FLAG_READ 2 /* verify data integrity on read */ 1214 #define INTEGRITY_FLAG_WRITE 4 /* generate data integrity on write */ 1215 1216 struct blk_integrity_exchg { 1217 void *prot_buf; 1218 void *data_buf; 1219 sector_t sector; 1220 unsigned int data_size; 1221 unsigned short sector_size; 1222 const char *disk_name; 1223 }; 1224 1225 typedef void (integrity_gen_fn) (struct blk_integrity_exchg *); 1226 typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *); 1227 typedef void (integrity_set_tag_fn) (void *, void *, unsigned int); 1228 typedef void (integrity_get_tag_fn) (void *, void *, unsigned int); 1229 1230 struct blk_integrity { 1231 integrity_gen_fn *generate_fn; 1232 integrity_vrfy_fn *verify_fn; 1233 integrity_set_tag_fn *set_tag_fn; 1234 integrity_get_tag_fn *get_tag_fn; 1235 1236 unsigned short flags; 1237 unsigned short tuple_size; 1238 unsigned short sector_size; 1239 unsigned short tag_size; 1240 1241 const char *name; 1242 1243 struct kobject kobj; 1244 }; 1245 1246 extern bool blk_integrity_is_initialized(struct gendisk *); 1247 extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); 1248 extern void blk_integrity_unregister(struct gendisk *); 1249 extern int blk_integrity_compare(struct gendisk *, struct gendisk *); 1250 extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, 1251 struct scatterlist *); 1252 extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); 1253 extern int blk_integrity_merge_rq(struct request_queue *, struct request *, 1254 struct request *); 1255 extern int blk_integrity_merge_bio(struct request_queue *, struct request *, 1256 struct bio *); 1257 1258 static inline 1259 struct blk_integrity *bdev_get_integrity(struct block_device *bdev) 1260 { 1261 return bdev->bd_disk->integrity; 1262 } 1263 1264 static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 1265 { 1266 return disk->integrity; 1267 } 1268 1269 static inline int blk_integrity_rq(struct request *rq) 1270 { 1271 if (rq->bio == NULL) 1272 return 0; 1273 1274 return bio_integrity(rq->bio); 1275 } 1276 1277 static inline void blk_queue_max_integrity_segments(struct request_queue *q, 1278 unsigned int segs) 1279 { 1280 q->limits.max_integrity_segments = segs; 1281 } 1282 1283 static inline unsigned short 1284 queue_max_integrity_segments(struct request_queue *q) 1285 { 1286 return q->limits.max_integrity_segments; 1287 } 1288 1289 #else /* CONFIG_BLK_DEV_INTEGRITY */ 1290 1291 #define blk_integrity_rq(rq) (0) 1292 #define blk_rq_count_integrity_sg(a, b) (0) 1293 #define blk_rq_map_integrity_sg(a, b, c) (0) 1294 #define bdev_get_integrity(a) (0) 1295 #define blk_get_integrity(a) (0) 1296 #define blk_integrity_compare(a, b) (0) 1297 #define blk_integrity_register(a, b) (0) 1298 #define blk_integrity_unregister(a) do { } while (0) 1299 #define blk_queue_max_integrity_segments(a, b) do { } while (0) 1300 #define queue_max_integrity_segments(a) (0) 1301 #define blk_integrity_merge_rq(a, b, c) (0) 1302 #define blk_integrity_merge_bio(a, b, c) (0) 1303 #define blk_integrity_is_initialized(a) (0) 1304 1305 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 1306 1307 struct block_device_operations { 1308 int (*open) (struct block_device *, fmode_t); 1309 int (*release) (struct gendisk *, fmode_t); 1310 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1311 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1312 int (*direct_access) (struct block_device *, sector_t, 1313 void **, unsigned long *); 1314 unsigned int (*check_events) (struct gendisk *disk, 1315 unsigned int clearing); 1316 /* ->media_changed() is DEPRECATED, use ->check_events() instead */ 1317 int (*media_changed) (struct gendisk *); 1318 void (*unlock_native_capacity) (struct gendisk *); 1319 int (*revalidate_disk) (struct gendisk *); 1320 int (*getgeo)(struct block_device *, struct hd_geometry *); 1321 /* this callback is with swap_lock and sometimes page table lock held */ 1322 void (*swap_slot_free_notify) (struct block_device *, unsigned long); 1323 struct module *owner; 1324 }; 1325 1326 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, 1327 unsigned long); 1328 #else /* CONFIG_BLOCK */ 1329 /* 1330 * stubs for when the block layer is configured out 1331 */ 1332 #define buffer_heads_over_limit 0 1333 1334 static inline long nr_blockdev_pages(void) 1335 { 1336 return 0; 1337 } 1338 1339 struct blk_plug { 1340 }; 1341 1342 static inline void blk_start_plug(struct blk_plug *plug) 1343 { 1344 } 1345 1346 static inline void blk_finish_plug(struct blk_plug *plug) 1347 { 1348 } 1349 1350 static inline void blk_flush_plug(struct task_struct *task) 1351 { 1352 } 1353 1354 static inline void blk_schedule_flush_plug(struct task_struct *task) 1355 { 1356 } 1357 1358 1359 static inline bool blk_needs_flush_plug(struct task_struct *tsk) 1360 { 1361 return false; 1362 } 1363 1364 #endif /* CONFIG_BLOCK */ 1365 1366 #endif 1367