1 #ifndef _LINUX_BLKDEV_H 2 #define _LINUX_BLKDEV_H 3 4 #ifdef CONFIG_BLOCK 5 6 #include <linux/sched.h> 7 #include <linux/major.h> 8 #include <linux/genhd.h> 9 #include <linux/list.h> 10 #include <linux/timer.h> 11 #include <linux/workqueue.h> 12 #include <linux/pagemap.h> 13 #include <linux/backing-dev.h> 14 #include <linux/wait.h> 15 #include <linux/mempool.h> 16 #include <linux/bio.h> 17 #include <linux/module.h> 18 #include <linux/stringify.h> 19 #include <linux/gfp.h> 20 #include <linux/bsg.h> 21 #include <linux/smp.h> 22 23 #include <asm/scatterlist.h> 24 25 struct scsi_ioctl_command; 26 27 struct request_queue; 28 struct elevator_queue; 29 struct request_pm_state; 30 struct blk_trace; 31 struct request; 32 struct sg_io_hdr; 33 34 #define BLKDEV_MIN_RQ 4 35 #define BLKDEV_MAX_RQ 128 /* Default maximum */ 36 37 struct request; 38 typedef void (rq_end_io_fn)(struct request *, int); 39 40 struct request_list { 41 /* 42 * count[], starved[], and wait[] are indexed by 43 * BLK_RW_SYNC/BLK_RW_ASYNC 44 */ 45 int count[2]; 46 int starved[2]; 47 int elvpriv; 48 mempool_t *rq_pool; 49 wait_queue_head_t wait[2]; 50 }; 51 52 /* 53 * request command types 54 */ 55 enum rq_cmd_type_bits { 56 REQ_TYPE_FS = 1, /* fs request */ 57 REQ_TYPE_BLOCK_PC, /* scsi command */ 58 REQ_TYPE_SENSE, /* sense request */ 59 REQ_TYPE_PM_SUSPEND, /* suspend request */ 60 REQ_TYPE_PM_RESUME, /* resume request */ 61 REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ 62 REQ_TYPE_SPECIAL, /* driver defined type */ 63 REQ_TYPE_LINUX_BLOCK, /* generic block layer message */ 64 /* 65 * for ATA/ATAPI devices. this really doesn't belong here, ide should 66 * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver 67 * private REQ_LB opcodes to differentiate what type of request this is 68 */ 69 REQ_TYPE_ATA_TASKFILE, 70 REQ_TYPE_ATA_PC, 71 }; 72 73 enum { 74 BLK_RW_ASYNC = 0, 75 BLK_RW_SYNC = 1, 76 }; 77 78 /* 79 * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being 80 * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a 81 * SCSI cdb. 82 * 83 * 0x00 -> 0x3f are driver private, to be used for whatever purpose they need, 84 * typically to differentiate REQ_TYPE_SPECIAL requests. 85 * 86 */ 87 enum { 88 REQ_LB_OP_EJECT = 0x40, /* eject request */ 89 REQ_LB_OP_FLUSH = 0x41, /* flush request */ 90 REQ_LB_OP_DISCARD = 0x42, /* discard sectors */ 91 }; 92 93 /* 94 * request type modified bits. first two bits match BIO_RW* bits, important 95 */ 96 enum rq_flag_bits { 97 __REQ_RW, /* not set, read. set, write */ 98 __REQ_FAILFAST_DEV, /* no driver retries of device errors */ 99 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ 100 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ 101 __REQ_DISCARD, /* request to discard sectors */ 102 __REQ_SORTED, /* elevator knows about this request */ 103 __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ 104 __REQ_HARDBARRIER, /* may not be passed by drive either */ 105 __REQ_FUA, /* forced unit access */ 106 __REQ_NOMERGE, /* don't touch this for merging */ 107 __REQ_STARTED, /* drive already may have started this one */ 108 __REQ_DONTPREP, /* don't call prep for this one */ 109 __REQ_QUEUED, /* uses queueing */ 110 __REQ_ELVPRIV, /* elevator private data attached */ 111 __REQ_FAILED, /* set if the request failed */ 112 __REQ_QUIET, /* don't worry about errors */ 113 __REQ_PREEMPT, /* set for "ide_preempt" requests */ 114 __REQ_ORDERED_COLOR, /* is before or after barrier */ 115 __REQ_RW_SYNC, /* request is sync (sync write or read) */ 116 __REQ_ALLOCED, /* request came from our alloc pool */ 117 __REQ_RW_META, /* metadata io request */ 118 __REQ_COPY_USER, /* contains copies of user pages */ 119 __REQ_INTEGRITY, /* integrity metadata has been remapped */ 120 __REQ_NOIDLE, /* Don't anticipate more IO after this one */ 121 __REQ_IO_STAT, /* account I/O stat */ 122 __REQ_NR_BITS, /* stops here */ 123 }; 124 125 #define REQ_RW (1 << __REQ_RW) 126 #define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV) 127 #define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT) 128 #define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER) 129 #define REQ_DISCARD (1 << __REQ_DISCARD) 130 #define REQ_SORTED (1 << __REQ_SORTED) 131 #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) 132 #define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) 133 #define REQ_FUA (1 << __REQ_FUA) 134 #define REQ_NOMERGE (1 << __REQ_NOMERGE) 135 #define REQ_STARTED (1 << __REQ_STARTED) 136 #define REQ_DONTPREP (1 << __REQ_DONTPREP) 137 #define REQ_QUEUED (1 << __REQ_QUEUED) 138 #define REQ_ELVPRIV (1 << __REQ_ELVPRIV) 139 #define REQ_FAILED (1 << __REQ_FAILED) 140 #define REQ_QUIET (1 << __REQ_QUIET) 141 #define REQ_PREEMPT (1 << __REQ_PREEMPT) 142 #define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR) 143 #define REQ_RW_SYNC (1 << __REQ_RW_SYNC) 144 #define REQ_ALLOCED (1 << __REQ_ALLOCED) 145 #define REQ_RW_META (1 << __REQ_RW_META) 146 #define REQ_COPY_USER (1 << __REQ_COPY_USER) 147 #define REQ_INTEGRITY (1 << __REQ_INTEGRITY) 148 #define REQ_NOIDLE (1 << __REQ_NOIDLE) 149 #define REQ_IO_STAT (1 << __REQ_IO_STAT) 150 151 #define BLK_MAX_CDB 16 152 153 /* 154 * try to put the fields that are referenced together in the same cacheline. 155 * if you modify this structure, be sure to check block/blk-core.c:rq_init() 156 * as well! 157 */ 158 struct request { 159 struct list_head queuelist; 160 struct call_single_data csd; 161 int cpu; 162 163 struct request_queue *q; 164 165 unsigned int cmd_flags; 166 enum rq_cmd_type_bits cmd_type; 167 unsigned long atomic_flags; 168 169 /* Maintain bio traversal state for part by part I/O submission. 170 * hard_* are block layer internals, no driver should touch them! 171 */ 172 173 sector_t sector; /* next sector to submit */ 174 sector_t hard_sector; /* next sector to complete */ 175 unsigned long nr_sectors; /* no. of sectors left to submit */ 176 unsigned long hard_nr_sectors; /* no. of sectors left to complete */ 177 /* no. of sectors left to submit in the current segment */ 178 unsigned int current_nr_sectors; 179 180 /* no. of sectors left to complete in the current segment */ 181 unsigned int hard_cur_sectors; 182 183 struct bio *bio; 184 struct bio *biotail; 185 186 struct hlist_node hash; /* merge hash */ 187 /* 188 * The rb_node is only used inside the io scheduler, requests 189 * are pruned when moved to the dispatch queue. So let the 190 * completion_data share space with the rb_node. 191 */ 192 union { 193 struct rb_node rb_node; /* sort/lookup */ 194 void *completion_data; 195 }; 196 197 /* 198 * two pointers are available for the IO schedulers, if they need 199 * more they have to dynamically allocate it. 200 */ 201 void *elevator_private; 202 void *elevator_private2; 203 204 struct gendisk *rq_disk; 205 unsigned long start_time; 206 207 /* Number of scatter-gather DMA addr+len pairs after 208 * physical address coalescing is performed. 209 */ 210 unsigned short nr_phys_segments; 211 212 unsigned short ioprio; 213 214 void *special; 215 char *buffer; 216 217 int tag; 218 int errors; 219 220 int ref_count; 221 222 /* 223 * when request is used as a packet command carrier 224 */ 225 unsigned short cmd_len; 226 unsigned char __cmd[BLK_MAX_CDB]; 227 unsigned char *cmd; 228 229 unsigned int data_len; 230 unsigned int extra_len; /* length of alignment and padding */ 231 unsigned int sense_len; 232 void *data; 233 void *sense; 234 235 unsigned long deadline; 236 struct list_head timeout_list; 237 unsigned int timeout; 238 int retries; 239 240 /* 241 * completion callback. 242 */ 243 rq_end_io_fn *end_io; 244 void *end_io_data; 245 246 /* for bidi */ 247 struct request *next_rq; 248 }; 249 250 static inline unsigned short req_get_ioprio(struct request *req) 251 { 252 return req->ioprio; 253 } 254 255 /* 256 * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME 257 * requests. Some step values could eventually be made generic. 258 */ 259 struct request_pm_state 260 { 261 /* PM state machine step value, currently driver specific */ 262 int pm_step; 263 /* requested PM state value (S1, S2, S3, S4, ...) */ 264 u32 pm_state; 265 void* data; /* for driver use */ 266 }; 267 268 #include <linux/elevator.h> 269 270 typedef void (request_fn_proc) (struct request_queue *q); 271 typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); 272 typedef int (prep_rq_fn) (struct request_queue *, struct request *); 273 typedef void (unplug_fn) (struct request_queue *); 274 typedef int (prepare_discard_fn) (struct request_queue *, struct request *); 275 276 struct bio_vec; 277 struct bvec_merge_data { 278 struct block_device *bi_bdev; 279 sector_t bi_sector; 280 unsigned bi_size; 281 unsigned long bi_rw; 282 }; 283 typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, 284 struct bio_vec *); 285 typedef void (prepare_flush_fn) (struct request_queue *, struct request *); 286 typedef void (softirq_done_fn)(struct request *); 287 typedef int (dma_drain_needed_fn)(struct request *); 288 typedef int (lld_busy_fn) (struct request_queue *q); 289 290 enum blk_eh_timer_return { 291 BLK_EH_NOT_HANDLED, 292 BLK_EH_HANDLED, 293 BLK_EH_RESET_TIMER, 294 }; 295 296 typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *); 297 298 enum blk_queue_state { 299 Queue_down, 300 Queue_up, 301 }; 302 303 struct blk_queue_tag { 304 struct request **tag_index; /* map of busy tags */ 305 unsigned long *tag_map; /* bit map of free/busy tags */ 306 int busy; /* current depth */ 307 int max_depth; /* what we will send to device */ 308 int real_max_depth; /* what the array can hold */ 309 atomic_t refcnt; /* map can be shared */ 310 }; 311 312 #define BLK_SCSI_MAX_CMDS (256) 313 #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) 314 315 struct blk_cmd_filter { 316 unsigned long read_ok[BLK_SCSI_CMD_PER_LONG]; 317 unsigned long write_ok[BLK_SCSI_CMD_PER_LONG]; 318 struct kobject kobj; 319 }; 320 321 struct request_queue 322 { 323 /* 324 * Together with queue_head for cacheline sharing 325 */ 326 struct list_head queue_head; 327 struct request *last_merge; 328 struct elevator_queue *elevator; 329 330 /* 331 * the queue request freelist, one for reads and one for writes 332 */ 333 struct request_list rq; 334 335 request_fn_proc *request_fn; 336 make_request_fn *make_request_fn; 337 prep_rq_fn *prep_rq_fn; 338 unplug_fn *unplug_fn; 339 prepare_discard_fn *prepare_discard_fn; 340 merge_bvec_fn *merge_bvec_fn; 341 prepare_flush_fn *prepare_flush_fn; 342 softirq_done_fn *softirq_done_fn; 343 rq_timed_out_fn *rq_timed_out_fn; 344 dma_drain_needed_fn *dma_drain_needed; 345 lld_busy_fn *lld_busy_fn; 346 347 /* 348 * Dispatch queue sorting 349 */ 350 sector_t end_sector; 351 struct request *boundary_rq; 352 353 /* 354 * Auto-unplugging state 355 */ 356 struct timer_list unplug_timer; 357 int unplug_thresh; /* After this many requests */ 358 unsigned long unplug_delay; /* After this many jiffies */ 359 struct work_struct unplug_work; 360 361 struct backing_dev_info backing_dev_info; 362 363 /* 364 * The queue owner gets to use this for whatever they like. 365 * ll_rw_blk doesn't touch it. 366 */ 367 void *queuedata; 368 369 /* 370 * queue needs bounce pages for pages above this limit 371 */ 372 unsigned long bounce_pfn; 373 gfp_t bounce_gfp; 374 375 /* 376 * various queue flags, see QUEUE_* below 377 */ 378 unsigned long queue_flags; 379 380 /* 381 * protects queue structures from reentrancy. ->__queue_lock should 382 * _never_ be used directly, it is queue private. always use 383 * ->queue_lock. 384 */ 385 spinlock_t __queue_lock; 386 spinlock_t *queue_lock; 387 388 /* 389 * queue kobject 390 */ 391 struct kobject kobj; 392 393 /* 394 * queue settings 395 */ 396 unsigned long nr_requests; /* Max # of requests */ 397 unsigned int nr_congestion_on; 398 unsigned int nr_congestion_off; 399 unsigned int nr_batching; 400 401 unsigned int max_sectors; 402 unsigned int max_hw_sectors; 403 unsigned short max_phys_segments; 404 unsigned short max_hw_segments; 405 unsigned short hardsect_size; 406 unsigned int max_segment_size; 407 408 unsigned long seg_boundary_mask; 409 void *dma_drain_buffer; 410 unsigned int dma_drain_size; 411 unsigned int dma_pad_mask; 412 unsigned int dma_alignment; 413 414 struct blk_queue_tag *queue_tags; 415 struct list_head tag_busy_list; 416 417 unsigned int nr_sorted; 418 unsigned int in_flight; 419 420 unsigned int rq_timeout; 421 struct timer_list timeout; 422 struct list_head timeout_list; 423 424 /* 425 * sg stuff 426 */ 427 unsigned int sg_timeout; 428 unsigned int sg_reserved_size; 429 int node; 430 #ifdef CONFIG_BLK_DEV_IO_TRACE 431 struct blk_trace *blk_trace; 432 #endif 433 /* 434 * reserved for flush operations 435 */ 436 unsigned int ordered, next_ordered, ordseq; 437 int orderr, ordcolor; 438 struct request pre_flush_rq, bar_rq, post_flush_rq; 439 struct request *orig_bar_rq; 440 441 struct mutex sysfs_lock; 442 443 #if defined(CONFIG_BLK_DEV_BSG) 444 struct bsg_class_device bsg_dev; 445 #endif 446 struct blk_cmd_filter cmd_filter; 447 }; 448 449 #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ 450 #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 451 #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 452 #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ 453 #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ 454 #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ 455 #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ 456 #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ 457 #define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ 458 #define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */ 459 #define QUEUE_FLAG_NOMERGES 10 /* disable merge attempts */ 460 #define QUEUE_FLAG_SAME_COMP 11 /* force complete on same CPU */ 461 #define QUEUE_FLAG_FAIL_IO 12 /* fake timeout */ 462 #define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */ 463 #define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ 464 #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 465 #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ 466 467 #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 468 (1 << QUEUE_FLAG_CLUSTER) | \ 469 (1 << QUEUE_FLAG_STACKABLE)) 470 471 static inline int queue_is_locked(struct request_queue *q) 472 { 473 #ifdef CONFIG_SMP 474 spinlock_t *lock = q->queue_lock; 475 return lock && spin_is_locked(lock); 476 #else 477 return 1; 478 #endif 479 } 480 481 static inline void queue_flag_set_unlocked(unsigned int flag, 482 struct request_queue *q) 483 { 484 __set_bit(flag, &q->queue_flags); 485 } 486 487 static inline int queue_flag_test_and_clear(unsigned int flag, 488 struct request_queue *q) 489 { 490 WARN_ON_ONCE(!queue_is_locked(q)); 491 492 if (test_bit(flag, &q->queue_flags)) { 493 __clear_bit(flag, &q->queue_flags); 494 return 1; 495 } 496 497 return 0; 498 } 499 500 static inline int queue_flag_test_and_set(unsigned int flag, 501 struct request_queue *q) 502 { 503 WARN_ON_ONCE(!queue_is_locked(q)); 504 505 if (!test_bit(flag, &q->queue_flags)) { 506 __set_bit(flag, &q->queue_flags); 507 return 0; 508 } 509 510 return 1; 511 } 512 513 static inline void queue_flag_set(unsigned int flag, struct request_queue *q) 514 { 515 WARN_ON_ONCE(!queue_is_locked(q)); 516 __set_bit(flag, &q->queue_flags); 517 } 518 519 static inline void queue_flag_clear_unlocked(unsigned int flag, 520 struct request_queue *q) 521 { 522 __clear_bit(flag, &q->queue_flags); 523 } 524 525 static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) 526 { 527 WARN_ON_ONCE(!queue_is_locked(q)); 528 __clear_bit(flag, &q->queue_flags); 529 } 530 531 enum { 532 /* 533 * Hardbarrier is supported with one of the following methods. 534 * 535 * NONE : hardbarrier unsupported 536 * DRAIN : ordering by draining is enough 537 * DRAIN_FLUSH : ordering by draining w/ pre and post flushes 538 * DRAIN_FUA : ordering by draining w/ pre flush and FUA write 539 * TAG : ordering by tag is enough 540 * TAG_FLUSH : ordering by tag w/ pre and post flushes 541 * TAG_FUA : ordering by tag w/ pre flush and FUA write 542 */ 543 QUEUE_ORDERED_BY_DRAIN = 0x01, 544 QUEUE_ORDERED_BY_TAG = 0x02, 545 QUEUE_ORDERED_DO_PREFLUSH = 0x10, 546 QUEUE_ORDERED_DO_BAR = 0x20, 547 QUEUE_ORDERED_DO_POSTFLUSH = 0x40, 548 QUEUE_ORDERED_DO_FUA = 0x80, 549 550 QUEUE_ORDERED_NONE = 0x00, 551 552 QUEUE_ORDERED_DRAIN = QUEUE_ORDERED_BY_DRAIN | 553 QUEUE_ORDERED_DO_BAR, 554 QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN | 555 QUEUE_ORDERED_DO_PREFLUSH | 556 QUEUE_ORDERED_DO_POSTFLUSH, 557 QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN | 558 QUEUE_ORDERED_DO_PREFLUSH | 559 QUEUE_ORDERED_DO_FUA, 560 561 QUEUE_ORDERED_TAG = QUEUE_ORDERED_BY_TAG | 562 QUEUE_ORDERED_DO_BAR, 563 QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG | 564 QUEUE_ORDERED_DO_PREFLUSH | 565 QUEUE_ORDERED_DO_POSTFLUSH, 566 QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG | 567 QUEUE_ORDERED_DO_PREFLUSH | 568 QUEUE_ORDERED_DO_FUA, 569 570 /* 571 * Ordered operation sequence 572 */ 573 QUEUE_ORDSEQ_STARTED = 0x01, /* flushing in progress */ 574 QUEUE_ORDSEQ_DRAIN = 0x02, /* waiting for the queue to be drained */ 575 QUEUE_ORDSEQ_PREFLUSH = 0x04, /* pre-flushing in progress */ 576 QUEUE_ORDSEQ_BAR = 0x08, /* original barrier req in progress */ 577 QUEUE_ORDSEQ_POSTFLUSH = 0x10, /* post-flushing in progress */ 578 QUEUE_ORDSEQ_DONE = 0x20, 579 }; 580 581 #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) 582 #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 583 #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 584 #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 585 #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 586 #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) 587 #define blk_queue_flushing(q) ((q)->ordseq) 588 #define blk_queue_stackable(q) \ 589 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) 590 591 #define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) 592 #define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) 593 #define blk_special_request(rq) ((rq)->cmd_type == REQ_TYPE_SPECIAL) 594 #define blk_sense_request(rq) ((rq)->cmd_type == REQ_TYPE_SENSE) 595 596 #define blk_failfast_dev(rq) ((rq)->cmd_flags & REQ_FAILFAST_DEV) 597 #define blk_failfast_transport(rq) ((rq)->cmd_flags & REQ_FAILFAST_TRANSPORT) 598 #define blk_failfast_driver(rq) ((rq)->cmd_flags & REQ_FAILFAST_DRIVER) 599 #define blk_noretry_request(rq) (blk_failfast_dev(rq) || \ 600 blk_failfast_transport(rq) || \ 601 blk_failfast_driver(rq)) 602 #define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED) 603 #define blk_rq_io_stat(rq) ((rq)->cmd_flags & REQ_IO_STAT) 604 #define blk_rq_quiet(rq) ((rq)->cmd_flags & REQ_QUIET) 605 606 #define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq))) 607 608 #define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND) 609 #define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME) 610 #define blk_pm_request(rq) \ 611 (blk_pm_suspend_request(rq) || blk_pm_resume_request(rq)) 612 613 #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) 614 #define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED) 615 #define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER) 616 #define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) 617 #define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD) 618 #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 619 /* rq->queuelist of dequeued request must be list_empty() */ 620 #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) 621 622 #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 623 624 #define rq_data_dir(rq) ((rq)->cmd_flags & 1) 625 626 /* 627 * We regard a request as sync, if either a read or a sync write 628 */ 629 static inline bool rw_is_sync(unsigned int rw_flags) 630 { 631 return !(rw_flags & REQ_RW) || (rw_flags & REQ_RW_SYNC); 632 } 633 634 static inline bool rq_is_sync(struct request *rq) 635 { 636 return rw_is_sync(rq->cmd_flags); 637 } 638 639 #define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META) 640 #define rq_noidle(rq) ((rq)->cmd_flags & REQ_NOIDLE) 641 642 static inline int blk_queue_full(struct request_queue *q, int sync) 643 { 644 if (sync) 645 return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags); 646 return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags); 647 } 648 649 static inline void blk_set_queue_full(struct request_queue *q, int sync) 650 { 651 if (sync) 652 queue_flag_set(QUEUE_FLAG_SYNCFULL, q); 653 else 654 queue_flag_set(QUEUE_FLAG_ASYNCFULL, q); 655 } 656 657 static inline void blk_clear_queue_full(struct request_queue *q, int sync) 658 { 659 if (sync) 660 queue_flag_clear(QUEUE_FLAG_SYNCFULL, q); 661 else 662 queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q); 663 } 664 665 666 /* 667 * mergeable request must not have _NOMERGE or _BARRIER bit set, nor may 668 * it already be started by driver. 669 */ 670 #define RQ_NOMERGE_FLAGS \ 671 (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) 672 #define rq_mergeable(rq) \ 673 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ 674 (blk_discard_rq(rq) || blk_fs_request((rq)))) 675 676 /* 677 * q->prep_rq_fn return values 678 */ 679 #define BLKPREP_OK 0 /* serve it */ 680 #define BLKPREP_KILL 1 /* fatal error, kill */ 681 #define BLKPREP_DEFER 2 /* leave on queue */ 682 683 extern unsigned long blk_max_low_pfn, blk_max_pfn; 684 685 /* 686 * standard bounce addresses: 687 * 688 * BLK_BOUNCE_HIGH : bounce all highmem pages 689 * BLK_BOUNCE_ANY : don't bounce anything 690 * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary 691 */ 692 693 #if BITS_PER_LONG == 32 694 #define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT) 695 #else 696 #define BLK_BOUNCE_HIGH -1ULL 697 #endif 698 #define BLK_BOUNCE_ANY (-1ULL) 699 #define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD) 700 701 /* 702 * default timeout for SG_IO if none specified 703 */ 704 #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) 705 #define BLK_MIN_SG_TIMEOUT (7 * HZ) 706 707 #ifdef CONFIG_BOUNCE 708 extern int init_emergency_isa_pool(void); 709 extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); 710 #else 711 static inline int init_emergency_isa_pool(void) 712 { 713 return 0; 714 } 715 static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) 716 { 717 } 718 #endif /* CONFIG_MMU */ 719 720 struct rq_map_data { 721 struct page **pages; 722 int page_order; 723 int nr_entries; 724 unsigned long offset; 725 int null_mapped; 726 }; 727 728 struct req_iterator { 729 int i; 730 struct bio *bio; 731 }; 732 733 /* This should not be used directly - use rq_for_each_segment */ 734 #define for_each_bio(_bio) \ 735 for (; _bio; _bio = _bio->bi_next) 736 #define __rq_for_each_bio(_bio, rq) \ 737 if ((rq->bio)) \ 738 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) 739 740 #define rq_for_each_segment(bvl, _rq, _iter) \ 741 __rq_for_each_bio(_iter.bio, _rq) \ 742 bio_for_each_segment(bvl, _iter.bio, _iter.i) 743 744 #define rq_iter_last(rq, _iter) \ 745 (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1) 746 747 extern int blk_register_queue(struct gendisk *disk); 748 extern void blk_unregister_queue(struct gendisk *disk); 749 extern void register_disk(struct gendisk *dev); 750 extern void generic_make_request(struct bio *bio); 751 extern void blk_rq_init(struct request_queue *q, struct request *rq); 752 extern void blk_put_request(struct request *); 753 extern void __blk_put_request(struct request_queue *, struct request *); 754 extern struct request *blk_get_request(struct request_queue *, int, gfp_t); 755 extern void blk_insert_request(struct request_queue *, struct request *, int, void *); 756 extern void blk_requeue_request(struct request_queue *, struct request *); 757 extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); 758 extern int blk_lld_busy(struct request_queue *q); 759 extern int blk_insert_cloned_request(struct request_queue *q, 760 struct request *rq); 761 extern void blk_plug_device(struct request_queue *); 762 extern void blk_plug_device_unlocked(struct request_queue *); 763 extern int blk_remove_plug(struct request_queue *); 764 extern void blk_recount_segments(struct request_queue *, struct bio *); 765 extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, 766 unsigned int, void __user *); 767 extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, 768 struct scsi_ioctl_command __user *); 769 770 /* 771 * Temporary export, until SCSI gets fixed up. 772 */ 773 extern int blk_rq_append_bio(struct request_queue *q, struct request *rq, 774 struct bio *bio); 775 776 /* 777 * A queue has just exitted congestion. Note this in the global counter of 778 * congested queues, and wake up anyone who was waiting for requests to be 779 * put back. 780 */ 781 static inline void blk_clear_queue_congested(struct request_queue *q, int rw) 782 { 783 clear_bdi_congested(&q->backing_dev_info, rw); 784 } 785 786 /* 787 * A queue has just entered congestion. Flag that in the queue's VM-visible 788 * state flags and increment the global gounter of congested queues. 789 */ 790 static inline void blk_set_queue_congested(struct request_queue *q, int rw) 791 { 792 set_bdi_congested(&q->backing_dev_info, rw); 793 } 794 795 extern void blk_start_queue(struct request_queue *q); 796 extern void blk_stop_queue(struct request_queue *q); 797 extern void blk_sync_queue(struct request_queue *q); 798 extern void __blk_stop_queue(struct request_queue *q); 799 extern void __blk_run_queue(struct request_queue *); 800 extern void blk_run_queue(struct request_queue *); 801 extern void blk_start_queueing(struct request_queue *); 802 extern int blk_rq_map_user(struct request_queue *, struct request *, 803 struct rq_map_data *, void __user *, unsigned long, 804 gfp_t); 805 extern int blk_rq_unmap_user(struct bio *); 806 extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); 807 extern int blk_rq_map_user_iov(struct request_queue *, struct request *, 808 struct rq_map_data *, struct sg_iovec *, int, 809 unsigned int, gfp_t); 810 extern int blk_execute_rq(struct request_queue *, struct gendisk *, 811 struct request *, int); 812 extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 813 struct request *, int, rq_end_io_fn *); 814 extern void blk_unplug(struct request_queue *q); 815 816 static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 817 { 818 return bdev->bd_disk->queue; 819 } 820 821 static inline void blk_run_backing_dev(struct backing_dev_info *bdi, 822 struct page *page) 823 { 824 if (bdi && bdi->unplug_io_fn) 825 bdi->unplug_io_fn(bdi, page); 826 } 827 828 static inline void blk_run_address_space(struct address_space *mapping) 829 { 830 if (mapping) 831 blk_run_backing_dev(mapping->backing_dev_info, NULL); 832 } 833 834 extern void blkdev_dequeue_request(struct request *req); 835 836 /* 837 * blk_end_request() and friends. 838 * __blk_end_request() and end_request() must be called with 839 * the request queue spinlock acquired. 840 * 841 * Several drivers define their own end_request and call 842 * blk_end_request() for parts of the original function. 843 * This prevents code duplication in drivers. 844 */ 845 extern int blk_end_request(struct request *rq, int error, 846 unsigned int nr_bytes); 847 extern int __blk_end_request(struct request *rq, int error, 848 unsigned int nr_bytes); 849 extern int blk_end_bidi_request(struct request *rq, int error, 850 unsigned int nr_bytes, unsigned int bidi_bytes); 851 extern void end_request(struct request *, int); 852 extern int blk_end_request_callback(struct request *rq, int error, 853 unsigned int nr_bytes, 854 int (drv_callback)(struct request *)); 855 extern void blk_complete_request(struct request *); 856 extern void __blk_complete_request(struct request *); 857 extern void blk_abort_request(struct request *); 858 extern void blk_abort_queue(struct request_queue *); 859 extern void blk_update_request(struct request *rq, int error, 860 unsigned int nr_bytes); 861 862 /* 863 * blk_end_request() takes bytes instead of sectors as a complete size. 864 * blk_rq_bytes() returns bytes left to complete in the entire request. 865 * blk_rq_cur_bytes() returns bytes left to complete in the current segment. 866 */ 867 extern unsigned int blk_rq_bytes(struct request *rq); 868 extern unsigned int blk_rq_cur_bytes(struct request *rq); 869 870 /* 871 * Access functions for manipulating queue properties 872 */ 873 extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, 874 spinlock_t *lock, int node_id); 875 extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); 876 extern void blk_cleanup_queue(struct request_queue *); 877 extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 878 extern void blk_queue_bounce_limit(struct request_queue *, u64); 879 extern void blk_queue_max_sectors(struct request_queue *, unsigned int); 880 extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); 881 extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); 882 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 883 extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); 884 extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 885 extern void blk_queue_dma_pad(struct request_queue *, unsigned int); 886 extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); 887 extern int blk_queue_dma_drain(struct request_queue *q, 888 dma_drain_needed_fn *dma_drain_needed, 889 void *buf, unsigned int size); 890 extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); 891 extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 892 extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 893 extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); 894 extern void blk_queue_dma_alignment(struct request_queue *, int); 895 extern void blk_queue_update_dma_alignment(struct request_queue *, int); 896 extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 897 extern void blk_queue_set_discard(struct request_queue *, prepare_discard_fn *); 898 extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); 899 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 900 extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 901 extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); 902 extern bool blk_do_ordered(struct request_queue *, struct request **); 903 extern unsigned blk_ordered_cur_seq(struct request_queue *); 904 extern unsigned blk_ordered_req_seq(struct request *); 905 extern bool blk_ordered_complete_seq(struct request_queue *, unsigned, int); 906 907 extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 908 extern void blk_dump_rq_flags(struct request *, char *); 909 extern void generic_unplug_device(struct request_queue *); 910 extern long nr_blockdev_pages(void); 911 912 int blk_get_queue(struct request_queue *); 913 struct request_queue *blk_alloc_queue(gfp_t); 914 struct request_queue *blk_alloc_queue_node(gfp_t, int); 915 extern void blk_put_queue(struct request_queue *); 916 917 /* 918 * tag stuff 919 */ 920 #define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) 921 extern int blk_queue_start_tag(struct request_queue *, struct request *); 922 extern struct request *blk_queue_find_tag(struct request_queue *, int); 923 extern void blk_queue_end_tag(struct request_queue *, struct request *); 924 extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *); 925 extern void blk_queue_free_tags(struct request_queue *); 926 extern int blk_queue_resize_tags(struct request_queue *, int); 927 extern void blk_queue_invalidate_tags(struct request_queue *); 928 extern struct blk_queue_tag *blk_init_tags(int); 929 extern void blk_free_tags(struct blk_queue_tag *); 930 931 static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, 932 int tag) 933 { 934 if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) 935 return NULL; 936 return bqt->tag_index[tag]; 937 } 938 939 extern int blkdev_issue_flush(struct block_device *, sector_t *); 940 extern int blkdev_issue_discard(struct block_device *, 941 sector_t sector, sector_t nr_sects, gfp_t); 942 943 static inline int sb_issue_discard(struct super_block *sb, 944 sector_t block, sector_t nr_blocks) 945 { 946 block <<= (sb->s_blocksize_bits - 9); 947 nr_blocks <<= (sb->s_blocksize_bits - 9); 948 return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL); 949 } 950 951 /* 952 * command filter functions 953 */ 954 extern int blk_verify_command(struct blk_cmd_filter *filter, 955 unsigned char *cmd, fmode_t has_write_perm); 956 extern void blk_unregister_filter(struct gendisk *disk); 957 extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter); 958 959 #define MAX_PHYS_SEGMENTS 128 960 #define MAX_HW_SEGMENTS 128 961 #define SAFE_MAX_SECTORS 255 962 #define BLK_DEF_MAX_SECTORS 1024 963 964 #define MAX_SEGMENT_SIZE 65536 965 966 #define BLK_SEG_BOUNDARY_MASK 0xFFFFFFFFUL 967 968 #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 969 970 static inline int queue_hardsect_size(struct request_queue *q) 971 { 972 int retval = 512; 973 974 if (q && q->hardsect_size) 975 retval = q->hardsect_size; 976 977 return retval; 978 } 979 980 static inline int bdev_hardsect_size(struct block_device *bdev) 981 { 982 return queue_hardsect_size(bdev_get_queue(bdev)); 983 } 984 985 static inline int queue_dma_alignment(struct request_queue *q) 986 { 987 return q ? q->dma_alignment : 511; 988 } 989 990 static inline int blk_rq_aligned(struct request_queue *q, void *addr, 991 unsigned int len) 992 { 993 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; 994 return !((unsigned long)addr & alignment) && !(len & alignment); 995 } 996 997 /* assumes size > 256 */ 998 static inline unsigned int blksize_bits(unsigned int size) 999 { 1000 unsigned int bits = 8; 1001 do { 1002 bits++; 1003 size >>= 1; 1004 } while (size > 256); 1005 return bits; 1006 } 1007 1008 static inline unsigned int block_size(struct block_device *bdev) 1009 { 1010 return bdev->bd_block_size; 1011 } 1012 1013 typedef struct {struct page *v;} Sector; 1014 1015 unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); 1016 1017 static inline void put_dev_sector(Sector p) 1018 { 1019 page_cache_release(p.v); 1020 } 1021 1022 struct work_struct; 1023 int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); 1024 1025 #define MODULE_ALIAS_BLOCKDEV(major,minor) \ 1026 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 1027 #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 1028 MODULE_ALIAS("block-major-" __stringify(major) "-*") 1029 1030 #if defined(CONFIG_BLK_DEV_INTEGRITY) 1031 1032 #define INTEGRITY_FLAG_READ 2 /* verify data integrity on read */ 1033 #define INTEGRITY_FLAG_WRITE 4 /* generate data integrity on write */ 1034 1035 struct blk_integrity_exchg { 1036 void *prot_buf; 1037 void *data_buf; 1038 sector_t sector; 1039 unsigned int data_size; 1040 unsigned short sector_size; 1041 const char *disk_name; 1042 }; 1043 1044 typedef void (integrity_gen_fn) (struct blk_integrity_exchg *); 1045 typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *); 1046 typedef void (integrity_set_tag_fn) (void *, void *, unsigned int); 1047 typedef void (integrity_get_tag_fn) (void *, void *, unsigned int); 1048 1049 struct blk_integrity { 1050 integrity_gen_fn *generate_fn; 1051 integrity_vrfy_fn *verify_fn; 1052 integrity_set_tag_fn *set_tag_fn; 1053 integrity_get_tag_fn *get_tag_fn; 1054 1055 unsigned short flags; 1056 unsigned short tuple_size; 1057 unsigned short sector_size; 1058 unsigned short tag_size; 1059 1060 const char *name; 1061 1062 struct kobject kobj; 1063 }; 1064 1065 extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); 1066 extern void blk_integrity_unregister(struct gendisk *); 1067 extern int blk_integrity_compare(struct gendisk *, struct gendisk *); 1068 extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *); 1069 extern int blk_rq_count_integrity_sg(struct request *); 1070 1071 static inline 1072 struct blk_integrity *bdev_get_integrity(struct block_device *bdev) 1073 { 1074 return bdev->bd_disk->integrity; 1075 } 1076 1077 static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 1078 { 1079 return disk->integrity; 1080 } 1081 1082 static inline int blk_integrity_rq(struct request *rq) 1083 { 1084 if (rq->bio == NULL) 1085 return 0; 1086 1087 return bio_integrity(rq->bio); 1088 } 1089 1090 #else /* CONFIG_BLK_DEV_INTEGRITY */ 1091 1092 #define blk_integrity_rq(rq) (0) 1093 #define blk_rq_count_integrity_sg(a) (0) 1094 #define blk_rq_map_integrity_sg(a, b) (0) 1095 #define bdev_get_integrity(a) (0) 1096 #define blk_get_integrity(a) (0) 1097 #define blk_integrity_compare(a, b) (0) 1098 #define blk_integrity_register(a, b) (0) 1099 #define blk_integrity_unregister(a) do { } while (0); 1100 1101 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 1102 1103 struct block_device_operations { 1104 int (*open) (struct block_device *, fmode_t); 1105 int (*release) (struct gendisk *, fmode_t); 1106 int (*locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1107 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1108 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1109 int (*direct_access) (struct block_device *, sector_t, 1110 void **, unsigned long *); 1111 int (*media_changed) (struct gendisk *); 1112 int (*revalidate_disk) (struct gendisk *); 1113 int (*getgeo)(struct block_device *, struct hd_geometry *); 1114 struct module *owner; 1115 }; 1116 1117 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, 1118 unsigned long); 1119 #else /* CONFIG_BLOCK */ 1120 /* 1121 * stubs for when the block layer is configured out 1122 */ 1123 #define buffer_heads_over_limit 0 1124 1125 static inline long nr_blockdev_pages(void) 1126 { 1127 return 0; 1128 } 1129 1130 #endif /* CONFIG_BLOCK */ 1131 1132 #endif 1133