1 #ifndef _LINUX_BLKDEV_H 2 #define _LINUX_BLKDEV_H 3 4 #ifdef CONFIG_BLOCK 5 6 #include <linux/sched.h> 7 #include <linux/major.h> 8 #include <linux/genhd.h> 9 #include <linux/list.h> 10 #include <linux/timer.h> 11 #include <linux/workqueue.h> 12 #include <linux/pagemap.h> 13 #include <linux/backing-dev.h> 14 #include <linux/wait.h> 15 #include <linux/mempool.h> 16 #include <linux/bio.h> 17 #include <linux/stringify.h> 18 #include <linux/gfp.h> 19 #include <linux/bsg.h> 20 #include <linux/smp.h> 21 22 #include <asm/scatterlist.h> 23 24 struct module; 25 struct scsi_ioctl_command; 26 27 struct request_queue; 28 struct elevator_queue; 29 struct request_pm_state; 30 struct blk_trace; 31 struct request; 32 struct sg_io_hdr; 33 struct bsg_job; 34 35 #define BLKDEV_MIN_RQ 4 36 #define BLKDEV_MAX_RQ 128 /* Default maximum */ 37 38 struct request; 39 typedef void (rq_end_io_fn)(struct request *, int); 40 41 struct request_list { 42 /* 43 * count[], starved[], and wait[] are indexed by 44 * BLK_RW_SYNC/BLK_RW_ASYNC 45 */ 46 int count[2]; 47 int starved[2]; 48 int elvpriv; 49 mempool_t *rq_pool; 50 wait_queue_head_t wait[2]; 51 }; 52 53 /* 54 * request command types 55 */ 56 enum rq_cmd_type_bits { 57 REQ_TYPE_FS = 1, /* fs request */ 58 REQ_TYPE_BLOCK_PC, /* scsi command */ 59 REQ_TYPE_SENSE, /* sense request */ 60 REQ_TYPE_PM_SUSPEND, /* suspend request */ 61 REQ_TYPE_PM_RESUME, /* resume request */ 62 REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ 63 REQ_TYPE_SPECIAL, /* driver defined type */ 64 /* 65 * for ATA/ATAPI devices. this really doesn't belong here, ide should 66 * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver 67 * private REQ_LB opcodes to differentiate what type of request this is 68 */ 69 REQ_TYPE_ATA_TASKFILE, 70 REQ_TYPE_ATA_PC, 71 }; 72 73 #define BLK_MAX_CDB 16 74 75 /* 76 * try to put the fields that are referenced together in the same cacheline. 77 * if you modify this structure, be sure to check block/blk-core.c:blk_rq_init() 78 * as well! 79 */ 80 struct request { 81 struct list_head queuelist; 82 struct call_single_data csd; 83 84 struct request_queue *q; 85 86 unsigned int cmd_flags; 87 enum rq_cmd_type_bits cmd_type; 88 unsigned long atomic_flags; 89 90 int cpu; 91 92 /* the following two fields are internal, NEVER access directly */ 93 unsigned int __data_len; /* total data len */ 94 sector_t __sector; /* sector cursor */ 95 96 struct bio *bio; 97 struct bio *biotail; 98 99 struct hlist_node hash; /* merge hash */ 100 /* 101 * The rb_node is only used inside the io scheduler, requests 102 * are pruned when moved to the dispatch queue. So let the 103 * completion_data share space with the rb_node. 104 */ 105 union { 106 struct rb_node rb_node; /* sort/lookup */ 107 void *completion_data; 108 }; 109 110 /* 111 * Three pointers are available for the IO schedulers, if they need 112 * more they have to dynamically allocate it. Flush requests are 113 * never put on the IO scheduler. So let the flush fields share 114 * space with the three elevator_private pointers. 115 */ 116 union { 117 void *elevator_private[3]; 118 struct { 119 unsigned int seq; 120 struct list_head list; 121 rq_end_io_fn *saved_end_io; 122 } flush; 123 }; 124 125 struct gendisk *rq_disk; 126 struct hd_struct *part; 127 unsigned long start_time; 128 #ifdef CONFIG_BLK_CGROUP 129 unsigned long long start_time_ns; 130 unsigned long long io_start_time_ns; /* when passed to hardware */ 131 #endif 132 /* Number of scatter-gather DMA addr+len pairs after 133 * physical address coalescing is performed. 134 */ 135 unsigned short nr_phys_segments; 136 #if defined(CONFIG_BLK_DEV_INTEGRITY) 137 unsigned short nr_integrity_segments; 138 #endif 139 140 unsigned short ioprio; 141 142 int ref_count; 143 144 void *special; /* opaque pointer available for LLD use */ 145 char *buffer; /* kaddr of the current segment if available */ 146 147 int tag; 148 int errors; 149 150 /* 151 * when request is used as a packet command carrier 152 */ 153 unsigned char __cmd[BLK_MAX_CDB]; 154 unsigned char *cmd; 155 unsigned short cmd_len; 156 157 unsigned int extra_len; /* length of alignment and padding */ 158 unsigned int sense_len; 159 unsigned int resid_len; /* residual count */ 160 void *sense; 161 162 unsigned long deadline; 163 struct list_head timeout_list; 164 unsigned int timeout; 165 int retries; 166 167 /* 168 * completion callback. 169 */ 170 rq_end_io_fn *end_io; 171 void *end_io_data; 172 173 /* for bidi */ 174 struct request *next_rq; 175 }; 176 177 static inline unsigned short req_get_ioprio(struct request *req) 178 { 179 return req->ioprio; 180 } 181 182 /* 183 * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME 184 * requests. Some step values could eventually be made generic. 185 */ 186 struct request_pm_state 187 { 188 /* PM state machine step value, currently driver specific */ 189 int pm_step; 190 /* requested PM state value (S1, S2, S3, S4, ...) */ 191 u32 pm_state; 192 void* data; /* for driver use */ 193 }; 194 195 #include <linux/elevator.h> 196 197 typedef void (request_fn_proc) (struct request_queue *q); 198 typedef void (make_request_fn) (struct request_queue *q, struct bio *bio); 199 typedef int (prep_rq_fn) (struct request_queue *, struct request *); 200 typedef void (unprep_rq_fn) (struct request_queue *, struct request *); 201 202 struct bio_vec; 203 struct bvec_merge_data { 204 struct block_device *bi_bdev; 205 sector_t bi_sector; 206 unsigned bi_size; 207 unsigned long bi_rw; 208 }; 209 typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, 210 struct bio_vec *); 211 typedef void (softirq_done_fn)(struct request *); 212 typedef int (dma_drain_needed_fn)(struct request *); 213 typedef int (lld_busy_fn) (struct request_queue *q); 214 typedef int (bsg_job_fn) (struct bsg_job *); 215 216 enum blk_eh_timer_return { 217 BLK_EH_NOT_HANDLED, 218 BLK_EH_HANDLED, 219 BLK_EH_RESET_TIMER, 220 }; 221 222 typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *); 223 224 enum blk_queue_state { 225 Queue_down, 226 Queue_up, 227 }; 228 229 struct blk_queue_tag { 230 struct request **tag_index; /* map of busy tags */ 231 unsigned long *tag_map; /* bit map of free/busy tags */ 232 int busy; /* current depth */ 233 int max_depth; /* what we will send to device */ 234 int real_max_depth; /* what the array can hold */ 235 atomic_t refcnt; /* map can be shared */ 236 }; 237 238 #define BLK_SCSI_MAX_CMDS (256) 239 #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) 240 241 struct queue_limits { 242 unsigned long bounce_pfn; 243 unsigned long seg_boundary_mask; 244 245 unsigned int max_hw_sectors; 246 unsigned int max_sectors; 247 unsigned int max_segment_size; 248 unsigned int physical_block_size; 249 unsigned int alignment_offset; 250 unsigned int io_min; 251 unsigned int io_opt; 252 unsigned int max_discard_sectors; 253 unsigned int discard_granularity; 254 unsigned int discard_alignment; 255 256 unsigned short logical_block_size; 257 unsigned short max_segments; 258 unsigned short max_integrity_segments; 259 260 unsigned char misaligned; 261 unsigned char discard_misaligned; 262 unsigned char cluster; 263 unsigned char discard_zeroes_data; 264 }; 265 266 struct request_queue { 267 /* 268 * Together with queue_head for cacheline sharing 269 */ 270 struct list_head queue_head; 271 struct request *last_merge; 272 struct elevator_queue *elevator; 273 274 /* 275 * the queue request freelist, one for reads and one for writes 276 */ 277 struct request_list rq; 278 279 request_fn_proc *request_fn; 280 make_request_fn *make_request_fn; 281 prep_rq_fn *prep_rq_fn; 282 unprep_rq_fn *unprep_rq_fn; 283 merge_bvec_fn *merge_bvec_fn; 284 softirq_done_fn *softirq_done_fn; 285 rq_timed_out_fn *rq_timed_out_fn; 286 dma_drain_needed_fn *dma_drain_needed; 287 lld_busy_fn *lld_busy_fn; 288 289 /* 290 * Dispatch queue sorting 291 */ 292 sector_t end_sector; 293 struct request *boundary_rq; 294 295 /* 296 * Delayed queue handling 297 */ 298 struct delayed_work delay_work; 299 300 struct backing_dev_info backing_dev_info; 301 302 /* 303 * The queue owner gets to use this for whatever they like. 304 * ll_rw_blk doesn't touch it. 305 */ 306 void *queuedata; 307 308 /* 309 * various queue flags, see QUEUE_* below 310 */ 311 unsigned long queue_flags; 312 313 /* 314 * queue needs bounce pages for pages above this limit 315 */ 316 gfp_t bounce_gfp; 317 318 /* 319 * protects queue structures from reentrancy. ->__queue_lock should 320 * _never_ be used directly, it is queue private. always use 321 * ->queue_lock. 322 */ 323 spinlock_t __queue_lock; 324 spinlock_t *queue_lock; 325 326 /* 327 * queue kobject 328 */ 329 struct kobject kobj; 330 331 /* 332 * queue settings 333 */ 334 unsigned long nr_requests; /* Max # of requests */ 335 unsigned int nr_congestion_on; 336 unsigned int nr_congestion_off; 337 unsigned int nr_batching; 338 339 unsigned int dma_drain_size; 340 void *dma_drain_buffer; 341 unsigned int dma_pad_mask; 342 unsigned int dma_alignment; 343 344 struct blk_queue_tag *queue_tags; 345 struct list_head tag_busy_list; 346 347 unsigned int nr_sorted; 348 unsigned int in_flight[2]; 349 350 unsigned int rq_timeout; 351 struct timer_list timeout; 352 struct list_head timeout_list; 353 354 struct queue_limits limits; 355 356 /* 357 * sg stuff 358 */ 359 unsigned int sg_timeout; 360 unsigned int sg_reserved_size; 361 int node; 362 #ifdef CONFIG_BLK_DEV_IO_TRACE 363 struct blk_trace *blk_trace; 364 #endif 365 /* 366 * for flush operations 367 */ 368 unsigned int flush_flags; 369 unsigned int flush_not_queueable:1; 370 unsigned int flush_queue_delayed:1; 371 unsigned int flush_pending_idx:1; 372 unsigned int flush_running_idx:1; 373 unsigned long flush_pending_since; 374 struct list_head flush_queue[2]; 375 struct list_head flush_data_in_flight; 376 struct request flush_rq; 377 378 struct mutex sysfs_lock; 379 380 #if defined(CONFIG_BLK_DEV_BSG) 381 bsg_job_fn *bsg_job_fn; 382 int bsg_job_size; 383 struct bsg_class_device bsg_dev; 384 #endif 385 386 #ifdef CONFIG_BLK_DEV_THROTTLING 387 /* Throttle data */ 388 struct throtl_data *td; 389 #endif 390 }; 391 392 #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 393 #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 394 #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ 395 #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ 396 #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ 397 #define QUEUE_FLAG_ELVSWITCH 6 /* don't use elevator, just do FIFO */ 398 #define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ 399 #define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ 400 #define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */ 401 #define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */ 402 #define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */ 403 #define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */ 404 #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 405 #define QUEUE_FLAG_IO_STAT 13 /* do IO stats */ 406 #define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */ 407 #define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */ 408 #define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ 409 #define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ 410 #define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ 411 412 #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 413 (1 << QUEUE_FLAG_STACKABLE) | \ 414 (1 << QUEUE_FLAG_SAME_COMP) | \ 415 (1 << QUEUE_FLAG_ADD_RANDOM)) 416 417 static inline int queue_is_locked(struct request_queue *q) 418 { 419 #ifdef CONFIG_SMP 420 spinlock_t *lock = q->queue_lock; 421 return lock && spin_is_locked(lock); 422 #else 423 return 1; 424 #endif 425 } 426 427 static inline void queue_flag_set_unlocked(unsigned int flag, 428 struct request_queue *q) 429 { 430 __set_bit(flag, &q->queue_flags); 431 } 432 433 static inline int queue_flag_test_and_clear(unsigned int flag, 434 struct request_queue *q) 435 { 436 WARN_ON_ONCE(!queue_is_locked(q)); 437 438 if (test_bit(flag, &q->queue_flags)) { 439 __clear_bit(flag, &q->queue_flags); 440 return 1; 441 } 442 443 return 0; 444 } 445 446 static inline int queue_flag_test_and_set(unsigned int flag, 447 struct request_queue *q) 448 { 449 WARN_ON_ONCE(!queue_is_locked(q)); 450 451 if (!test_bit(flag, &q->queue_flags)) { 452 __set_bit(flag, &q->queue_flags); 453 return 0; 454 } 455 456 return 1; 457 } 458 459 static inline void queue_flag_set(unsigned int flag, struct request_queue *q) 460 { 461 WARN_ON_ONCE(!queue_is_locked(q)); 462 __set_bit(flag, &q->queue_flags); 463 } 464 465 static inline void queue_flag_clear_unlocked(unsigned int flag, 466 struct request_queue *q) 467 { 468 __clear_bit(flag, &q->queue_flags); 469 } 470 471 static inline int queue_in_flight(struct request_queue *q) 472 { 473 return q->in_flight[0] + q->in_flight[1]; 474 } 475 476 static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) 477 { 478 WARN_ON_ONCE(!queue_is_locked(q)); 479 __clear_bit(flag, &q->queue_flags); 480 } 481 482 #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 483 #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 484 #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 485 #define blk_queue_noxmerges(q) \ 486 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 487 #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 488 #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) 489 #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) 490 #define blk_queue_stackable(q) \ 491 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) 492 #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) 493 #define blk_queue_secdiscard(q) (blk_queue_discard(q) && \ 494 test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags)) 495 496 #define blk_noretry_request(rq) \ 497 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ 498 REQ_FAILFAST_DRIVER)) 499 500 #define blk_account_rq(rq) \ 501 (((rq)->cmd_flags & REQ_STARTED) && \ 502 ((rq)->cmd_type == REQ_TYPE_FS || \ 503 ((rq)->cmd_flags & REQ_DISCARD))) 504 505 #define blk_pm_request(rq) \ 506 ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \ 507 (rq)->cmd_type == REQ_TYPE_PM_RESUME) 508 509 #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) 510 #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 511 /* rq->queuelist of dequeued request must be list_empty() */ 512 #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) 513 514 #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 515 516 #define rq_data_dir(rq) ((rq)->cmd_flags & 1) 517 518 static inline unsigned int blk_queue_cluster(struct request_queue *q) 519 { 520 return q->limits.cluster; 521 } 522 523 /* 524 * We regard a request as sync, if either a read or a sync write 525 */ 526 static inline bool rw_is_sync(unsigned int rw_flags) 527 { 528 return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC); 529 } 530 531 static inline bool rq_is_sync(struct request *rq) 532 { 533 return rw_is_sync(rq->cmd_flags); 534 } 535 536 static inline int blk_queue_full(struct request_queue *q, int sync) 537 { 538 if (sync) 539 return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags); 540 return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags); 541 } 542 543 static inline void blk_set_queue_full(struct request_queue *q, int sync) 544 { 545 if (sync) 546 queue_flag_set(QUEUE_FLAG_SYNCFULL, q); 547 else 548 queue_flag_set(QUEUE_FLAG_ASYNCFULL, q); 549 } 550 551 static inline void blk_clear_queue_full(struct request_queue *q, int sync) 552 { 553 if (sync) 554 queue_flag_clear(QUEUE_FLAG_SYNCFULL, q); 555 else 556 queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q); 557 } 558 559 560 /* 561 * mergeable request must not have _NOMERGE or _BARRIER bit set, nor may 562 * it already be started by driver. 563 */ 564 #define RQ_NOMERGE_FLAGS \ 565 (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA) 566 #define rq_mergeable(rq) \ 567 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ 568 (((rq)->cmd_flags & REQ_DISCARD) || \ 569 (rq)->cmd_type == REQ_TYPE_FS)) 570 571 /* 572 * q->prep_rq_fn return values 573 */ 574 #define BLKPREP_OK 0 /* serve it */ 575 #define BLKPREP_KILL 1 /* fatal error, kill */ 576 #define BLKPREP_DEFER 2 /* leave on queue */ 577 578 extern unsigned long blk_max_low_pfn, blk_max_pfn; 579 580 /* 581 * standard bounce addresses: 582 * 583 * BLK_BOUNCE_HIGH : bounce all highmem pages 584 * BLK_BOUNCE_ANY : don't bounce anything 585 * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary 586 */ 587 588 #if BITS_PER_LONG == 32 589 #define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT) 590 #else 591 #define BLK_BOUNCE_HIGH -1ULL 592 #endif 593 #define BLK_BOUNCE_ANY (-1ULL) 594 #define BLK_BOUNCE_ISA (DMA_BIT_MASK(24)) 595 596 /* 597 * default timeout for SG_IO if none specified 598 */ 599 #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) 600 #define BLK_MIN_SG_TIMEOUT (7 * HZ) 601 602 #ifdef CONFIG_BOUNCE 603 extern int init_emergency_isa_pool(void); 604 extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); 605 #else 606 static inline int init_emergency_isa_pool(void) 607 { 608 return 0; 609 } 610 static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) 611 { 612 } 613 #endif /* CONFIG_MMU */ 614 615 struct rq_map_data { 616 struct page **pages; 617 int page_order; 618 int nr_entries; 619 unsigned long offset; 620 int null_mapped; 621 int from_user; 622 }; 623 624 struct req_iterator { 625 int i; 626 struct bio *bio; 627 }; 628 629 /* This should not be used directly - use rq_for_each_segment */ 630 #define for_each_bio(_bio) \ 631 for (; _bio; _bio = _bio->bi_next) 632 #define __rq_for_each_bio(_bio, rq) \ 633 if ((rq->bio)) \ 634 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) 635 636 #define rq_for_each_segment(bvl, _rq, _iter) \ 637 __rq_for_each_bio(_iter.bio, _rq) \ 638 bio_for_each_segment(bvl, _iter.bio, _iter.i) 639 640 #define rq_iter_last(rq, _iter) \ 641 (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1) 642 643 #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 644 # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" 645 #endif 646 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 647 extern void rq_flush_dcache_pages(struct request *rq); 648 #else 649 static inline void rq_flush_dcache_pages(struct request *rq) 650 { 651 } 652 #endif 653 654 extern int blk_register_queue(struct gendisk *disk); 655 extern void blk_unregister_queue(struct gendisk *disk); 656 extern void generic_make_request(struct bio *bio); 657 extern void blk_rq_init(struct request_queue *q, struct request *rq); 658 extern void blk_put_request(struct request *); 659 extern void __blk_put_request(struct request_queue *, struct request *); 660 extern struct request *blk_get_request(struct request_queue *, int, gfp_t); 661 extern struct request *blk_make_request(struct request_queue *, struct bio *, 662 gfp_t); 663 extern void blk_insert_request(struct request_queue *, struct request *, int, void *); 664 extern void blk_requeue_request(struct request_queue *, struct request *); 665 extern void blk_add_request_payload(struct request *rq, struct page *page, 666 unsigned int len); 667 extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); 668 extern int blk_lld_busy(struct request_queue *q); 669 extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 670 struct bio_set *bs, gfp_t gfp_mask, 671 int (*bio_ctr)(struct bio *, struct bio *, void *), 672 void *data); 673 extern void blk_rq_unprep_clone(struct request *rq); 674 extern int blk_insert_cloned_request(struct request_queue *q, 675 struct request *rq); 676 extern void blk_delay_queue(struct request_queue *, unsigned long); 677 extern void blk_recount_segments(struct request_queue *, struct bio *); 678 extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, 679 unsigned int, void __user *); 680 extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, 681 struct scsi_ioctl_command __user *); 682 683 extern void blk_queue_bio(struct request_queue *q, struct bio *bio); 684 685 /* 686 * A queue has just exitted congestion. Note this in the global counter of 687 * congested queues, and wake up anyone who was waiting for requests to be 688 * put back. 689 */ 690 static inline void blk_clear_queue_congested(struct request_queue *q, int sync) 691 { 692 clear_bdi_congested(&q->backing_dev_info, sync); 693 } 694 695 /* 696 * A queue has just entered congestion. Flag that in the queue's VM-visible 697 * state flags and increment the global gounter of congested queues. 698 */ 699 static inline void blk_set_queue_congested(struct request_queue *q, int sync) 700 { 701 set_bdi_congested(&q->backing_dev_info, sync); 702 } 703 704 extern void blk_start_queue(struct request_queue *q); 705 extern void blk_stop_queue(struct request_queue *q); 706 extern void blk_sync_queue(struct request_queue *q); 707 extern void __blk_stop_queue(struct request_queue *q); 708 extern void __blk_run_queue(struct request_queue *q); 709 extern void blk_run_queue(struct request_queue *); 710 extern void blk_run_queue_async(struct request_queue *q); 711 extern int blk_rq_map_user(struct request_queue *, struct request *, 712 struct rq_map_data *, void __user *, unsigned long, 713 gfp_t); 714 extern int blk_rq_unmap_user(struct bio *); 715 extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); 716 extern int blk_rq_map_user_iov(struct request_queue *, struct request *, 717 struct rq_map_data *, struct sg_iovec *, int, 718 unsigned int, gfp_t); 719 extern int blk_execute_rq(struct request_queue *, struct gendisk *, 720 struct request *, int); 721 extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 722 struct request *, int, rq_end_io_fn *); 723 724 static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 725 { 726 return bdev->bd_disk->queue; 727 } 728 729 /* 730 * blk_rq_pos() : the current sector 731 * blk_rq_bytes() : bytes left in the entire request 732 * blk_rq_cur_bytes() : bytes left in the current segment 733 * blk_rq_err_bytes() : bytes left till the next error boundary 734 * blk_rq_sectors() : sectors left in the entire request 735 * blk_rq_cur_sectors() : sectors left in the current segment 736 */ 737 static inline sector_t blk_rq_pos(const struct request *rq) 738 { 739 return rq->__sector; 740 } 741 742 static inline unsigned int blk_rq_bytes(const struct request *rq) 743 { 744 return rq->__data_len; 745 } 746 747 static inline int blk_rq_cur_bytes(const struct request *rq) 748 { 749 return rq->bio ? bio_cur_bytes(rq->bio) : 0; 750 } 751 752 extern unsigned int blk_rq_err_bytes(const struct request *rq); 753 754 static inline unsigned int blk_rq_sectors(const struct request *rq) 755 { 756 return blk_rq_bytes(rq) >> 9; 757 } 758 759 static inline unsigned int blk_rq_cur_sectors(const struct request *rq) 760 { 761 return blk_rq_cur_bytes(rq) >> 9; 762 } 763 764 /* 765 * Request issue related functions. 766 */ 767 extern struct request *blk_peek_request(struct request_queue *q); 768 extern void blk_start_request(struct request *rq); 769 extern struct request *blk_fetch_request(struct request_queue *q); 770 771 /* 772 * Request completion related functions. 773 * 774 * blk_update_request() completes given number of bytes and updates 775 * the request without completing it. 776 * 777 * blk_end_request() and friends. __blk_end_request() must be called 778 * with the request queue spinlock acquired. 779 * 780 * Several drivers define their own end_request and call 781 * blk_end_request() for parts of the original function. 782 * This prevents code duplication in drivers. 783 */ 784 extern bool blk_update_request(struct request *rq, int error, 785 unsigned int nr_bytes); 786 extern bool blk_end_request(struct request *rq, int error, 787 unsigned int nr_bytes); 788 extern void blk_end_request_all(struct request *rq, int error); 789 extern bool blk_end_request_cur(struct request *rq, int error); 790 extern bool blk_end_request_err(struct request *rq, int error); 791 extern bool __blk_end_request(struct request *rq, int error, 792 unsigned int nr_bytes); 793 extern void __blk_end_request_all(struct request *rq, int error); 794 extern bool __blk_end_request_cur(struct request *rq, int error); 795 extern bool __blk_end_request_err(struct request *rq, int error); 796 797 extern void blk_complete_request(struct request *); 798 extern void __blk_complete_request(struct request *); 799 extern void blk_abort_request(struct request *); 800 extern void blk_abort_queue(struct request_queue *); 801 extern void blk_unprep_request(struct request *); 802 803 /* 804 * Access functions for manipulating queue properties 805 */ 806 extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, 807 spinlock_t *lock, int node_id); 808 extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); 809 extern struct request_queue *blk_init_allocated_queue(struct request_queue *, 810 request_fn_proc *, spinlock_t *); 811 extern void blk_cleanup_queue(struct request_queue *); 812 extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 813 extern void blk_queue_bounce_limit(struct request_queue *, u64); 814 extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int); 815 extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 816 extern void blk_queue_max_segments(struct request_queue *, unsigned short); 817 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 818 extern void blk_queue_max_discard_sectors(struct request_queue *q, 819 unsigned int max_discard_sectors); 820 extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); 821 extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); 822 extern void blk_queue_alignment_offset(struct request_queue *q, 823 unsigned int alignment); 824 extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); 825 extern void blk_queue_io_min(struct request_queue *q, unsigned int min); 826 extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); 827 extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); 828 extern void blk_set_default_limits(struct queue_limits *lim); 829 extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 830 sector_t offset); 831 extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, 832 sector_t offset); 833 extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, 834 sector_t offset); 835 extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 836 extern void blk_queue_dma_pad(struct request_queue *, unsigned int); 837 extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); 838 extern int blk_queue_dma_drain(struct request_queue *q, 839 dma_drain_needed_fn *dma_drain_needed, 840 void *buf, unsigned int size); 841 extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); 842 extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 843 extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 844 extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn); 845 extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); 846 extern void blk_queue_dma_alignment(struct request_queue *, int); 847 extern void blk_queue_update_dma_alignment(struct request_queue *, int); 848 extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 849 extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); 850 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 851 extern void blk_queue_flush(struct request_queue *q, unsigned int flush); 852 extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable); 853 extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 854 855 extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 856 extern void blk_dump_rq_flags(struct request *, char *); 857 extern long nr_blockdev_pages(void); 858 859 int blk_get_queue(struct request_queue *); 860 struct request_queue *blk_alloc_queue(gfp_t); 861 struct request_queue *blk_alloc_queue_node(gfp_t, int); 862 extern void blk_put_queue(struct request_queue *); 863 864 /* 865 * blk_plug permits building a queue of related requests by holding the I/O 866 * fragments for a short period. This allows merging of sequential requests 867 * into single larger request. As the requests are moved from a per-task list to 868 * the device's request_queue in a batch, this results in improved scalability 869 * as the lock contention for request_queue lock is reduced. 870 * 871 * It is ok not to disable preemption when adding the request to the plug list 872 * or when attempting a merge, because blk_schedule_flush_list() will only flush 873 * the plug list when the task sleeps by itself. For details, please see 874 * schedule() where blk_schedule_flush_plug() is called. 875 */ 876 struct blk_plug { 877 unsigned long magic; /* detect uninitialized use-cases */ 878 struct list_head list; /* requests */ 879 struct list_head cb_list; /* md requires an unplug callback */ 880 unsigned int should_sort; /* list to be sorted before flushing? */ 881 }; 882 #define BLK_MAX_REQUEST_COUNT 16 883 884 struct blk_plug_cb { 885 struct list_head list; 886 void (*callback)(struct blk_plug_cb *); 887 }; 888 889 extern void blk_start_plug(struct blk_plug *); 890 extern void blk_finish_plug(struct blk_plug *); 891 extern void blk_flush_plug_list(struct blk_plug *, bool); 892 893 static inline void blk_flush_plug(struct task_struct *tsk) 894 { 895 struct blk_plug *plug = tsk->plug; 896 897 if (plug) 898 blk_flush_plug_list(plug, false); 899 } 900 901 static inline void blk_schedule_flush_plug(struct task_struct *tsk) 902 { 903 struct blk_plug *plug = tsk->plug; 904 905 if (plug) 906 blk_flush_plug_list(plug, true); 907 } 908 909 static inline bool blk_needs_flush_plug(struct task_struct *tsk) 910 { 911 struct blk_plug *plug = tsk->plug; 912 913 return plug && (!list_empty(&plug->list) || !list_empty(&plug->cb_list)); 914 } 915 916 /* 917 * tag stuff 918 */ 919 #define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) 920 extern int blk_queue_start_tag(struct request_queue *, struct request *); 921 extern struct request *blk_queue_find_tag(struct request_queue *, int); 922 extern void blk_queue_end_tag(struct request_queue *, struct request *); 923 extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *); 924 extern void blk_queue_free_tags(struct request_queue *); 925 extern int blk_queue_resize_tags(struct request_queue *, int); 926 extern void blk_queue_invalidate_tags(struct request_queue *); 927 extern struct blk_queue_tag *blk_init_tags(int); 928 extern void blk_free_tags(struct blk_queue_tag *); 929 930 static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, 931 int tag) 932 { 933 if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) 934 return NULL; 935 return bqt->tag_index[tag]; 936 } 937 938 #define BLKDEV_DISCARD_SECURE 0x01 /* secure discard */ 939 940 extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); 941 extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 942 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); 943 extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 944 sector_t nr_sects, gfp_t gfp_mask); 945 static inline int sb_issue_discard(struct super_block *sb, sector_t block, 946 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) 947 { 948 return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9), 949 nr_blocks << (sb->s_blocksize_bits - 9), 950 gfp_mask, flags); 951 } 952 static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, 953 sector_t nr_blocks, gfp_t gfp_mask) 954 { 955 return blkdev_issue_zeroout(sb->s_bdev, 956 block << (sb->s_blocksize_bits - 9), 957 nr_blocks << (sb->s_blocksize_bits - 9), 958 gfp_mask); 959 } 960 961 extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); 962 963 enum blk_default_limits { 964 BLK_MAX_SEGMENTS = 128, 965 BLK_SAFE_MAX_SECTORS = 255, 966 BLK_DEF_MAX_SECTORS = 1024, 967 BLK_MAX_SEGMENT_SIZE = 65536, 968 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, 969 }; 970 971 #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 972 973 static inline unsigned long queue_bounce_pfn(struct request_queue *q) 974 { 975 return q->limits.bounce_pfn; 976 } 977 978 static inline unsigned long queue_segment_boundary(struct request_queue *q) 979 { 980 return q->limits.seg_boundary_mask; 981 } 982 983 static inline unsigned int queue_max_sectors(struct request_queue *q) 984 { 985 return q->limits.max_sectors; 986 } 987 988 static inline unsigned int queue_max_hw_sectors(struct request_queue *q) 989 { 990 return q->limits.max_hw_sectors; 991 } 992 993 static inline unsigned short queue_max_segments(struct request_queue *q) 994 { 995 return q->limits.max_segments; 996 } 997 998 static inline unsigned int queue_max_segment_size(struct request_queue *q) 999 { 1000 return q->limits.max_segment_size; 1001 } 1002 1003 static inline unsigned short queue_logical_block_size(struct request_queue *q) 1004 { 1005 int retval = 512; 1006 1007 if (q && q->limits.logical_block_size) 1008 retval = q->limits.logical_block_size; 1009 1010 return retval; 1011 } 1012 1013 static inline unsigned short bdev_logical_block_size(struct block_device *bdev) 1014 { 1015 return queue_logical_block_size(bdev_get_queue(bdev)); 1016 } 1017 1018 static inline unsigned int queue_physical_block_size(struct request_queue *q) 1019 { 1020 return q->limits.physical_block_size; 1021 } 1022 1023 static inline unsigned int bdev_physical_block_size(struct block_device *bdev) 1024 { 1025 return queue_physical_block_size(bdev_get_queue(bdev)); 1026 } 1027 1028 static inline unsigned int queue_io_min(struct request_queue *q) 1029 { 1030 return q->limits.io_min; 1031 } 1032 1033 static inline int bdev_io_min(struct block_device *bdev) 1034 { 1035 return queue_io_min(bdev_get_queue(bdev)); 1036 } 1037 1038 static inline unsigned int queue_io_opt(struct request_queue *q) 1039 { 1040 return q->limits.io_opt; 1041 } 1042 1043 static inline int bdev_io_opt(struct block_device *bdev) 1044 { 1045 return queue_io_opt(bdev_get_queue(bdev)); 1046 } 1047 1048 static inline int queue_alignment_offset(struct request_queue *q) 1049 { 1050 if (q->limits.misaligned) 1051 return -1; 1052 1053 return q->limits.alignment_offset; 1054 } 1055 1056 static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) 1057 { 1058 unsigned int granularity = max(lim->physical_block_size, lim->io_min); 1059 unsigned int alignment = (sector << 9) & (granularity - 1); 1060 1061 return (granularity + lim->alignment_offset - alignment) 1062 & (granularity - 1); 1063 } 1064 1065 static inline int bdev_alignment_offset(struct block_device *bdev) 1066 { 1067 struct request_queue *q = bdev_get_queue(bdev); 1068 1069 if (q->limits.misaligned) 1070 return -1; 1071 1072 if (bdev != bdev->bd_contains) 1073 return bdev->bd_part->alignment_offset; 1074 1075 return q->limits.alignment_offset; 1076 } 1077 1078 static inline int queue_discard_alignment(struct request_queue *q) 1079 { 1080 if (q->limits.discard_misaligned) 1081 return -1; 1082 1083 return q->limits.discard_alignment; 1084 } 1085 1086 static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) 1087 { 1088 unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1); 1089 1090 if (!lim->max_discard_sectors) 1091 return 0; 1092 1093 return (lim->discard_granularity + lim->discard_alignment - alignment) 1094 & (lim->discard_granularity - 1); 1095 } 1096 1097 static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) 1098 { 1099 if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1) 1100 return 1; 1101 1102 return 0; 1103 } 1104 1105 static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev) 1106 { 1107 return queue_discard_zeroes_data(bdev_get_queue(bdev)); 1108 } 1109 1110 static inline int queue_dma_alignment(struct request_queue *q) 1111 { 1112 return q ? q->dma_alignment : 511; 1113 } 1114 1115 static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, 1116 unsigned int len) 1117 { 1118 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; 1119 return !(addr & alignment) && !(len & alignment); 1120 } 1121 1122 /* assumes size > 256 */ 1123 static inline unsigned int blksize_bits(unsigned int size) 1124 { 1125 unsigned int bits = 8; 1126 do { 1127 bits++; 1128 size >>= 1; 1129 } while (size > 256); 1130 return bits; 1131 } 1132 1133 static inline unsigned int block_size(struct block_device *bdev) 1134 { 1135 return bdev->bd_block_size; 1136 } 1137 1138 static inline bool queue_flush_queueable(struct request_queue *q) 1139 { 1140 return !q->flush_not_queueable; 1141 } 1142 1143 typedef struct {struct page *v;} Sector; 1144 1145 unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); 1146 1147 static inline void put_dev_sector(Sector p) 1148 { 1149 page_cache_release(p.v); 1150 } 1151 1152 struct work_struct; 1153 int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); 1154 1155 #ifdef CONFIG_BLK_CGROUP 1156 /* 1157 * This should not be using sched_clock(). A real patch is in progress 1158 * to fix this up, until that is in place we need to disable preemption 1159 * around sched_clock() in this function and set_io_start_time_ns(). 1160 */ 1161 static inline void set_start_time_ns(struct request *req) 1162 { 1163 preempt_disable(); 1164 req->start_time_ns = sched_clock(); 1165 preempt_enable(); 1166 } 1167 1168 static inline void set_io_start_time_ns(struct request *req) 1169 { 1170 preempt_disable(); 1171 req->io_start_time_ns = sched_clock(); 1172 preempt_enable(); 1173 } 1174 1175 static inline uint64_t rq_start_time_ns(struct request *req) 1176 { 1177 return req->start_time_ns; 1178 } 1179 1180 static inline uint64_t rq_io_start_time_ns(struct request *req) 1181 { 1182 return req->io_start_time_ns; 1183 } 1184 #else 1185 static inline void set_start_time_ns(struct request *req) {} 1186 static inline void set_io_start_time_ns(struct request *req) {} 1187 static inline uint64_t rq_start_time_ns(struct request *req) 1188 { 1189 return 0; 1190 } 1191 static inline uint64_t rq_io_start_time_ns(struct request *req) 1192 { 1193 return 0; 1194 } 1195 #endif 1196 1197 #define MODULE_ALIAS_BLOCKDEV(major,minor) \ 1198 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 1199 #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 1200 MODULE_ALIAS("block-major-" __stringify(major) "-*") 1201 1202 #if defined(CONFIG_BLK_DEV_INTEGRITY) 1203 1204 #define INTEGRITY_FLAG_READ 2 /* verify data integrity on read */ 1205 #define INTEGRITY_FLAG_WRITE 4 /* generate data integrity on write */ 1206 1207 struct blk_integrity_exchg { 1208 void *prot_buf; 1209 void *data_buf; 1210 sector_t sector; 1211 unsigned int data_size; 1212 unsigned short sector_size; 1213 const char *disk_name; 1214 }; 1215 1216 typedef void (integrity_gen_fn) (struct blk_integrity_exchg *); 1217 typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *); 1218 typedef void (integrity_set_tag_fn) (void *, void *, unsigned int); 1219 typedef void (integrity_get_tag_fn) (void *, void *, unsigned int); 1220 1221 struct blk_integrity { 1222 integrity_gen_fn *generate_fn; 1223 integrity_vrfy_fn *verify_fn; 1224 integrity_set_tag_fn *set_tag_fn; 1225 integrity_get_tag_fn *get_tag_fn; 1226 1227 unsigned short flags; 1228 unsigned short tuple_size; 1229 unsigned short sector_size; 1230 unsigned short tag_size; 1231 1232 const char *name; 1233 1234 struct kobject kobj; 1235 }; 1236 1237 extern bool blk_integrity_is_initialized(struct gendisk *); 1238 extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); 1239 extern void blk_integrity_unregister(struct gendisk *); 1240 extern int blk_integrity_compare(struct gendisk *, struct gendisk *); 1241 extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, 1242 struct scatterlist *); 1243 extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); 1244 extern int blk_integrity_merge_rq(struct request_queue *, struct request *, 1245 struct request *); 1246 extern int blk_integrity_merge_bio(struct request_queue *, struct request *, 1247 struct bio *); 1248 1249 static inline 1250 struct blk_integrity *bdev_get_integrity(struct block_device *bdev) 1251 { 1252 return bdev->bd_disk->integrity; 1253 } 1254 1255 static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 1256 { 1257 return disk->integrity; 1258 } 1259 1260 static inline int blk_integrity_rq(struct request *rq) 1261 { 1262 if (rq->bio == NULL) 1263 return 0; 1264 1265 return bio_integrity(rq->bio); 1266 } 1267 1268 static inline void blk_queue_max_integrity_segments(struct request_queue *q, 1269 unsigned int segs) 1270 { 1271 q->limits.max_integrity_segments = segs; 1272 } 1273 1274 static inline unsigned short 1275 queue_max_integrity_segments(struct request_queue *q) 1276 { 1277 return q->limits.max_integrity_segments; 1278 } 1279 1280 #else /* CONFIG_BLK_DEV_INTEGRITY */ 1281 1282 #define blk_integrity_rq(rq) (0) 1283 #define blk_rq_count_integrity_sg(a, b) (0) 1284 #define blk_rq_map_integrity_sg(a, b, c) (0) 1285 #define bdev_get_integrity(a) (0) 1286 #define blk_get_integrity(a) (0) 1287 #define blk_integrity_compare(a, b) (0) 1288 #define blk_integrity_register(a, b) (0) 1289 #define blk_integrity_unregister(a) do { } while (0) 1290 #define blk_queue_max_integrity_segments(a, b) do { } while (0) 1291 #define queue_max_integrity_segments(a) (0) 1292 #define blk_integrity_merge_rq(a, b, c) (0) 1293 #define blk_integrity_merge_bio(a, b, c) (0) 1294 #define blk_integrity_is_initialized(a) (0) 1295 1296 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 1297 1298 struct block_device_operations { 1299 int (*open) (struct block_device *, fmode_t); 1300 int (*release) (struct gendisk *, fmode_t); 1301 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1302 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1303 int (*direct_access) (struct block_device *, sector_t, 1304 void **, unsigned long *); 1305 unsigned int (*check_events) (struct gendisk *disk, 1306 unsigned int clearing); 1307 /* ->media_changed() is DEPRECATED, use ->check_events() instead */ 1308 int (*media_changed) (struct gendisk *); 1309 void (*unlock_native_capacity) (struct gendisk *); 1310 int (*revalidate_disk) (struct gendisk *); 1311 int (*getgeo)(struct block_device *, struct hd_geometry *); 1312 /* this callback is with swap_lock and sometimes page table lock held */ 1313 void (*swap_slot_free_notify) (struct block_device *, unsigned long); 1314 struct module *owner; 1315 }; 1316 1317 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, 1318 unsigned long); 1319 #else /* CONFIG_BLOCK */ 1320 /* 1321 * stubs for when the block layer is configured out 1322 */ 1323 #define buffer_heads_over_limit 0 1324 1325 static inline long nr_blockdev_pages(void) 1326 { 1327 return 0; 1328 } 1329 1330 struct blk_plug { 1331 }; 1332 1333 static inline void blk_start_plug(struct blk_plug *plug) 1334 { 1335 } 1336 1337 static inline void blk_finish_plug(struct blk_plug *plug) 1338 { 1339 } 1340 1341 static inline void blk_flush_plug(struct task_struct *task) 1342 { 1343 } 1344 1345 static inline void blk_schedule_flush_plug(struct task_struct *task) 1346 { 1347 } 1348 1349 1350 static inline bool blk_needs_flush_plug(struct task_struct *tsk) 1351 { 1352 return false; 1353 } 1354 1355 #endif /* CONFIG_BLOCK */ 1356 1357 #endif 1358