1 #ifndef _LINUX_BLKDEV_H 2 #define _LINUX_BLKDEV_H 3 4 #ifdef CONFIG_BLOCK 5 6 #include <linux/sched.h> 7 #include <linux/major.h> 8 #include <linux/genhd.h> 9 #include <linux/list.h> 10 #include <linux/timer.h> 11 #include <linux/workqueue.h> 12 #include <linux/pagemap.h> 13 #include <linux/backing-dev.h> 14 #include <linux/wait.h> 15 #include <linux/mempool.h> 16 #include <linux/bio.h> 17 #include <linux/module.h> 18 #include <linux/stringify.h> 19 #include <linux/gfp.h> 20 #include <linux/bsg.h> 21 #include <linux/smp.h> 22 23 #include <asm/scatterlist.h> 24 25 struct scsi_ioctl_command; 26 27 struct request_queue; 28 struct elevator_queue; 29 struct request_pm_state; 30 struct blk_trace; 31 struct request; 32 struct sg_io_hdr; 33 34 #define BLKDEV_MIN_RQ 4 35 #define BLKDEV_MAX_RQ 128 /* Default maximum */ 36 37 struct request; 38 typedef void (rq_end_io_fn)(struct request *, int); 39 40 struct request_list { 41 int count[2]; 42 int starved[2]; 43 int elvpriv; 44 mempool_t *rq_pool; 45 wait_queue_head_t wait[2]; 46 }; 47 48 /* 49 * request command types 50 */ 51 enum rq_cmd_type_bits { 52 REQ_TYPE_FS = 1, /* fs request */ 53 REQ_TYPE_BLOCK_PC, /* scsi command */ 54 REQ_TYPE_SENSE, /* sense request */ 55 REQ_TYPE_PM_SUSPEND, /* suspend request */ 56 REQ_TYPE_PM_RESUME, /* resume request */ 57 REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ 58 REQ_TYPE_SPECIAL, /* driver defined type */ 59 REQ_TYPE_LINUX_BLOCK, /* generic block layer message */ 60 /* 61 * for ATA/ATAPI devices. this really doesn't belong here, ide should 62 * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver 63 * private REQ_LB opcodes to differentiate what type of request this is 64 */ 65 REQ_TYPE_ATA_TASKFILE, 66 REQ_TYPE_ATA_PC, 67 }; 68 69 /* 70 * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being 71 * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a 72 * SCSI cdb. 73 * 74 * 0x00 -> 0x3f are driver private, to be used for whatever purpose they need, 75 * typically to differentiate REQ_TYPE_SPECIAL requests. 76 * 77 */ 78 enum { 79 REQ_LB_OP_EJECT = 0x40, /* eject request */ 80 REQ_LB_OP_FLUSH = 0x41, /* flush request */ 81 REQ_LB_OP_DISCARD = 0x42, /* discard sectors */ 82 }; 83 84 /* 85 * request type modified bits. first two bits match BIO_RW* bits, important 86 */ 87 enum rq_flag_bits { 88 __REQ_RW, /* not set, read. set, write */ 89 __REQ_FAILFAST_DEV, /* no driver retries of device errors */ 90 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ 91 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ 92 __REQ_DISCARD, /* request to discard sectors */ 93 __REQ_SORTED, /* elevator knows about this request */ 94 __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ 95 __REQ_HARDBARRIER, /* may not be passed by drive either */ 96 __REQ_FUA, /* forced unit access */ 97 __REQ_NOMERGE, /* don't touch this for merging */ 98 __REQ_STARTED, /* drive already may have started this one */ 99 __REQ_DONTPREP, /* don't call prep for this one */ 100 __REQ_QUEUED, /* uses queueing */ 101 __REQ_ELVPRIV, /* elevator private data attached */ 102 __REQ_FAILED, /* set if the request failed */ 103 __REQ_QUIET, /* don't worry about errors */ 104 __REQ_PREEMPT, /* set for "ide_preempt" requests */ 105 __REQ_ORDERED_COLOR, /* is before or after barrier */ 106 __REQ_RW_SYNC, /* request is sync (O_DIRECT) */ 107 __REQ_ALLOCED, /* request came from our alloc pool */ 108 __REQ_RW_META, /* metadata io request */ 109 __REQ_COPY_USER, /* contains copies of user pages */ 110 __REQ_INTEGRITY, /* integrity metadata has been remapped */ 111 __REQ_NR_BITS, /* stops here */ 112 }; 113 114 #define REQ_RW (1 << __REQ_RW) 115 #define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV) 116 #define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT) 117 #define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER) 118 #define REQ_DISCARD (1 << __REQ_DISCARD) 119 #define REQ_SORTED (1 << __REQ_SORTED) 120 #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) 121 #define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) 122 #define REQ_FUA (1 << __REQ_FUA) 123 #define REQ_NOMERGE (1 << __REQ_NOMERGE) 124 #define REQ_STARTED (1 << __REQ_STARTED) 125 #define REQ_DONTPREP (1 << __REQ_DONTPREP) 126 #define REQ_QUEUED (1 << __REQ_QUEUED) 127 #define REQ_ELVPRIV (1 << __REQ_ELVPRIV) 128 #define REQ_FAILED (1 << __REQ_FAILED) 129 #define REQ_QUIET (1 << __REQ_QUIET) 130 #define REQ_PREEMPT (1 << __REQ_PREEMPT) 131 #define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR) 132 #define REQ_RW_SYNC (1 << __REQ_RW_SYNC) 133 #define REQ_ALLOCED (1 << __REQ_ALLOCED) 134 #define REQ_RW_META (1 << __REQ_RW_META) 135 #define REQ_COPY_USER (1 << __REQ_COPY_USER) 136 #define REQ_INTEGRITY (1 << __REQ_INTEGRITY) 137 138 #define BLK_MAX_CDB 16 139 140 /* 141 * try to put the fields that are referenced together in the same cacheline. 142 * if you modify this structure, be sure to check block/blk-core.c:rq_init() 143 * as well! 144 */ 145 struct request { 146 struct list_head queuelist; 147 struct call_single_data csd; 148 int cpu; 149 150 struct request_queue *q; 151 152 unsigned int cmd_flags; 153 enum rq_cmd_type_bits cmd_type; 154 unsigned long atomic_flags; 155 156 /* Maintain bio traversal state for part by part I/O submission. 157 * hard_* are block layer internals, no driver should touch them! 158 */ 159 160 sector_t sector; /* next sector to submit */ 161 sector_t hard_sector; /* next sector to complete */ 162 unsigned long nr_sectors; /* no. of sectors left to submit */ 163 unsigned long hard_nr_sectors; /* no. of sectors left to complete */ 164 /* no. of sectors left to submit in the current segment */ 165 unsigned int current_nr_sectors; 166 167 /* no. of sectors left to complete in the current segment */ 168 unsigned int hard_cur_sectors; 169 170 struct bio *bio; 171 struct bio *biotail; 172 173 struct hlist_node hash; /* merge hash */ 174 /* 175 * The rb_node is only used inside the io scheduler, requests 176 * are pruned when moved to the dispatch queue. So let the 177 * completion_data share space with the rb_node. 178 */ 179 union { 180 struct rb_node rb_node; /* sort/lookup */ 181 void *completion_data; 182 }; 183 184 /* 185 * two pointers are available for the IO schedulers, if they need 186 * more they have to dynamically allocate it. 187 */ 188 void *elevator_private; 189 void *elevator_private2; 190 191 struct gendisk *rq_disk; 192 unsigned long start_time; 193 194 /* Number of scatter-gather DMA addr+len pairs after 195 * physical address coalescing is performed. 196 */ 197 unsigned short nr_phys_segments; 198 199 unsigned short ioprio; 200 201 void *special; 202 char *buffer; 203 204 int tag; 205 int errors; 206 207 int ref_count; 208 209 /* 210 * when request is used as a packet command carrier 211 */ 212 unsigned short cmd_len; 213 unsigned char __cmd[BLK_MAX_CDB]; 214 unsigned char *cmd; 215 216 unsigned int data_len; 217 unsigned int extra_len; /* length of alignment and padding */ 218 unsigned int sense_len; 219 void *data; 220 void *sense; 221 222 unsigned long deadline; 223 struct list_head timeout_list; 224 unsigned int timeout; 225 int retries; 226 227 /* 228 * completion callback. 229 */ 230 rq_end_io_fn *end_io; 231 void *end_io_data; 232 233 /* for bidi */ 234 struct request *next_rq; 235 }; 236 237 static inline unsigned short req_get_ioprio(struct request *req) 238 { 239 return req->ioprio; 240 } 241 242 /* 243 * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME 244 * requests. Some step values could eventually be made generic. 245 */ 246 struct request_pm_state 247 { 248 /* PM state machine step value, currently driver specific */ 249 int pm_step; 250 /* requested PM state value (S1, S2, S3, S4, ...) */ 251 u32 pm_state; 252 void* data; /* for driver use */ 253 }; 254 255 #include <linux/elevator.h> 256 257 typedef void (request_fn_proc) (struct request_queue *q); 258 typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); 259 typedef int (prep_rq_fn) (struct request_queue *, struct request *); 260 typedef void (unplug_fn) (struct request_queue *); 261 typedef int (prepare_discard_fn) (struct request_queue *, struct request *); 262 263 struct bio_vec; 264 struct bvec_merge_data { 265 struct block_device *bi_bdev; 266 sector_t bi_sector; 267 unsigned bi_size; 268 unsigned long bi_rw; 269 }; 270 typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, 271 struct bio_vec *); 272 typedef void (prepare_flush_fn) (struct request_queue *, struct request *); 273 typedef void (softirq_done_fn)(struct request *); 274 typedef int (dma_drain_needed_fn)(struct request *); 275 typedef int (lld_busy_fn) (struct request_queue *q); 276 277 enum blk_eh_timer_return { 278 BLK_EH_NOT_HANDLED, 279 BLK_EH_HANDLED, 280 BLK_EH_RESET_TIMER, 281 }; 282 283 typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *); 284 285 enum blk_queue_state { 286 Queue_down, 287 Queue_up, 288 }; 289 290 struct blk_queue_tag { 291 struct request **tag_index; /* map of busy tags */ 292 unsigned long *tag_map; /* bit map of free/busy tags */ 293 int busy; /* current depth */ 294 int max_depth; /* what we will send to device */ 295 int real_max_depth; /* what the array can hold */ 296 atomic_t refcnt; /* map can be shared */ 297 }; 298 299 #define BLK_SCSI_MAX_CMDS (256) 300 #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) 301 302 struct blk_cmd_filter { 303 unsigned long read_ok[BLK_SCSI_CMD_PER_LONG]; 304 unsigned long write_ok[BLK_SCSI_CMD_PER_LONG]; 305 struct kobject kobj; 306 }; 307 308 struct request_queue 309 { 310 /* 311 * Together with queue_head for cacheline sharing 312 */ 313 struct list_head queue_head; 314 struct request *last_merge; 315 struct elevator_queue *elevator; 316 317 /* 318 * the queue request freelist, one for reads and one for writes 319 */ 320 struct request_list rq; 321 322 request_fn_proc *request_fn; 323 make_request_fn *make_request_fn; 324 prep_rq_fn *prep_rq_fn; 325 unplug_fn *unplug_fn; 326 prepare_discard_fn *prepare_discard_fn; 327 merge_bvec_fn *merge_bvec_fn; 328 prepare_flush_fn *prepare_flush_fn; 329 softirq_done_fn *softirq_done_fn; 330 rq_timed_out_fn *rq_timed_out_fn; 331 dma_drain_needed_fn *dma_drain_needed; 332 lld_busy_fn *lld_busy_fn; 333 334 /* 335 * Dispatch queue sorting 336 */ 337 sector_t end_sector; 338 struct request *boundary_rq; 339 340 /* 341 * Auto-unplugging state 342 */ 343 struct timer_list unplug_timer; 344 int unplug_thresh; /* After this many requests */ 345 unsigned long unplug_delay; /* After this many jiffies */ 346 struct work_struct unplug_work; 347 348 struct backing_dev_info backing_dev_info; 349 350 /* 351 * The queue owner gets to use this for whatever they like. 352 * ll_rw_blk doesn't touch it. 353 */ 354 void *queuedata; 355 356 /* 357 * queue needs bounce pages for pages above this limit 358 */ 359 unsigned long bounce_pfn; 360 gfp_t bounce_gfp; 361 362 /* 363 * various queue flags, see QUEUE_* below 364 */ 365 unsigned long queue_flags; 366 367 /* 368 * protects queue structures from reentrancy. ->__queue_lock should 369 * _never_ be used directly, it is queue private. always use 370 * ->queue_lock. 371 */ 372 spinlock_t __queue_lock; 373 spinlock_t *queue_lock; 374 375 /* 376 * queue kobject 377 */ 378 struct kobject kobj; 379 380 /* 381 * queue settings 382 */ 383 unsigned long nr_requests; /* Max # of requests */ 384 unsigned int nr_congestion_on; 385 unsigned int nr_congestion_off; 386 unsigned int nr_batching; 387 388 unsigned int max_sectors; 389 unsigned int max_hw_sectors; 390 unsigned short max_phys_segments; 391 unsigned short max_hw_segments; 392 unsigned short hardsect_size; 393 unsigned int max_segment_size; 394 395 unsigned long seg_boundary_mask; 396 void *dma_drain_buffer; 397 unsigned int dma_drain_size; 398 unsigned int dma_pad_mask; 399 unsigned int dma_alignment; 400 401 struct blk_queue_tag *queue_tags; 402 struct list_head tag_busy_list; 403 404 unsigned int nr_sorted; 405 unsigned int in_flight; 406 407 unsigned int rq_timeout; 408 struct timer_list timeout; 409 struct list_head timeout_list; 410 411 /* 412 * sg stuff 413 */ 414 unsigned int sg_timeout; 415 unsigned int sg_reserved_size; 416 int node; 417 #ifdef CONFIG_BLK_DEV_IO_TRACE 418 struct blk_trace *blk_trace; 419 #endif 420 /* 421 * reserved for flush operations 422 */ 423 unsigned int ordered, next_ordered, ordseq; 424 int orderr, ordcolor; 425 struct request pre_flush_rq, bar_rq, post_flush_rq; 426 struct request *orig_bar_rq; 427 428 struct mutex sysfs_lock; 429 430 #if defined(CONFIG_BLK_DEV_BSG) 431 struct bsg_class_device bsg_dev; 432 #endif 433 struct blk_cmd_filter cmd_filter; 434 }; 435 436 #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ 437 #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 438 #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 439 #define QUEUE_FLAG_READFULL 3 /* read queue has been filled */ 440 #define QUEUE_FLAG_WRITEFULL 4 /* write queue has been filled */ 441 #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ 442 #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ 443 #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ 444 #define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ 445 #define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */ 446 #define QUEUE_FLAG_NOMERGES 10 /* disable merge attempts */ 447 #define QUEUE_FLAG_SAME_COMP 11 /* force complete on same CPU */ 448 #define QUEUE_FLAG_FAIL_IO 12 /* fake timeout */ 449 #define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */ 450 #define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ 451 #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 452 453 static inline int queue_is_locked(struct request_queue *q) 454 { 455 #ifdef CONFIG_SMP 456 spinlock_t *lock = q->queue_lock; 457 return lock && spin_is_locked(lock); 458 #else 459 return 1; 460 #endif 461 } 462 463 static inline void queue_flag_set_unlocked(unsigned int flag, 464 struct request_queue *q) 465 { 466 __set_bit(flag, &q->queue_flags); 467 } 468 469 static inline int queue_flag_test_and_clear(unsigned int flag, 470 struct request_queue *q) 471 { 472 WARN_ON_ONCE(!queue_is_locked(q)); 473 474 if (test_bit(flag, &q->queue_flags)) { 475 __clear_bit(flag, &q->queue_flags); 476 return 1; 477 } 478 479 return 0; 480 } 481 482 static inline int queue_flag_test_and_set(unsigned int flag, 483 struct request_queue *q) 484 { 485 WARN_ON_ONCE(!queue_is_locked(q)); 486 487 if (!test_bit(flag, &q->queue_flags)) { 488 __set_bit(flag, &q->queue_flags); 489 return 0; 490 } 491 492 return 1; 493 } 494 495 static inline void queue_flag_set(unsigned int flag, struct request_queue *q) 496 { 497 WARN_ON_ONCE(!queue_is_locked(q)); 498 __set_bit(flag, &q->queue_flags); 499 } 500 501 static inline void queue_flag_clear_unlocked(unsigned int flag, 502 struct request_queue *q) 503 { 504 __clear_bit(flag, &q->queue_flags); 505 } 506 507 static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) 508 { 509 WARN_ON_ONCE(!queue_is_locked(q)); 510 __clear_bit(flag, &q->queue_flags); 511 } 512 513 enum { 514 /* 515 * Hardbarrier is supported with one of the following methods. 516 * 517 * NONE : hardbarrier unsupported 518 * DRAIN : ordering by draining is enough 519 * DRAIN_FLUSH : ordering by draining w/ pre and post flushes 520 * DRAIN_FUA : ordering by draining w/ pre flush and FUA write 521 * TAG : ordering by tag is enough 522 * TAG_FLUSH : ordering by tag w/ pre and post flushes 523 * TAG_FUA : ordering by tag w/ pre flush and FUA write 524 */ 525 QUEUE_ORDERED_BY_DRAIN = 0x01, 526 QUEUE_ORDERED_BY_TAG = 0x02, 527 QUEUE_ORDERED_DO_PREFLUSH = 0x10, 528 QUEUE_ORDERED_DO_BAR = 0x20, 529 QUEUE_ORDERED_DO_POSTFLUSH = 0x40, 530 QUEUE_ORDERED_DO_FUA = 0x80, 531 532 QUEUE_ORDERED_NONE = 0x00, 533 534 QUEUE_ORDERED_DRAIN = QUEUE_ORDERED_BY_DRAIN | 535 QUEUE_ORDERED_DO_BAR, 536 QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN | 537 QUEUE_ORDERED_DO_PREFLUSH | 538 QUEUE_ORDERED_DO_POSTFLUSH, 539 QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN | 540 QUEUE_ORDERED_DO_PREFLUSH | 541 QUEUE_ORDERED_DO_FUA, 542 543 QUEUE_ORDERED_TAG = QUEUE_ORDERED_BY_TAG | 544 QUEUE_ORDERED_DO_BAR, 545 QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG | 546 QUEUE_ORDERED_DO_PREFLUSH | 547 QUEUE_ORDERED_DO_POSTFLUSH, 548 QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG | 549 QUEUE_ORDERED_DO_PREFLUSH | 550 QUEUE_ORDERED_DO_FUA, 551 552 /* 553 * Ordered operation sequence 554 */ 555 QUEUE_ORDSEQ_STARTED = 0x01, /* flushing in progress */ 556 QUEUE_ORDSEQ_DRAIN = 0x02, /* waiting for the queue to be drained */ 557 QUEUE_ORDSEQ_PREFLUSH = 0x04, /* pre-flushing in progress */ 558 QUEUE_ORDSEQ_BAR = 0x08, /* original barrier req in progress */ 559 QUEUE_ORDSEQ_POSTFLUSH = 0x10, /* post-flushing in progress */ 560 QUEUE_ORDSEQ_DONE = 0x20, 561 }; 562 563 #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) 564 #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 565 #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 566 #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 567 #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 568 #define blk_queue_flushing(q) ((q)->ordseq) 569 #define blk_queue_stackable(q) \ 570 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) 571 572 #define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) 573 #define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) 574 #define blk_special_request(rq) ((rq)->cmd_type == REQ_TYPE_SPECIAL) 575 #define blk_sense_request(rq) ((rq)->cmd_type == REQ_TYPE_SENSE) 576 577 #define blk_failfast_dev(rq) ((rq)->cmd_flags & REQ_FAILFAST_DEV) 578 #define blk_failfast_transport(rq) ((rq)->cmd_flags & REQ_FAILFAST_TRANSPORT) 579 #define blk_failfast_driver(rq) ((rq)->cmd_flags & REQ_FAILFAST_DRIVER) 580 #define blk_noretry_request(rq) (blk_failfast_dev(rq) || \ 581 blk_failfast_transport(rq) || \ 582 blk_failfast_driver(rq)) 583 #define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED) 584 585 #define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq))) 586 587 #define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND) 588 #define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME) 589 #define blk_pm_request(rq) \ 590 (blk_pm_suspend_request(rq) || blk_pm_resume_request(rq)) 591 592 #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) 593 #define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED) 594 #define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER) 595 #define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) 596 #define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD) 597 #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 598 /* rq->queuelist of dequeued request must be list_empty() */ 599 #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) 600 601 #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 602 603 #define rq_data_dir(rq) ((rq)->cmd_flags & 1) 604 605 /* 606 * We regard a request as sync, if it's a READ or a SYNC write. 607 */ 608 #define rq_is_sync(rq) (rq_data_dir((rq)) == READ || (rq)->cmd_flags & REQ_RW_SYNC) 609 #define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META) 610 611 static inline int blk_queue_full(struct request_queue *q, int rw) 612 { 613 if (rw == READ) 614 return test_bit(QUEUE_FLAG_READFULL, &q->queue_flags); 615 return test_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); 616 } 617 618 static inline void blk_set_queue_full(struct request_queue *q, int rw) 619 { 620 if (rw == READ) 621 queue_flag_set(QUEUE_FLAG_READFULL, q); 622 else 623 queue_flag_set(QUEUE_FLAG_WRITEFULL, q); 624 } 625 626 static inline void blk_clear_queue_full(struct request_queue *q, int rw) 627 { 628 if (rw == READ) 629 queue_flag_clear(QUEUE_FLAG_READFULL, q); 630 else 631 queue_flag_clear(QUEUE_FLAG_WRITEFULL, q); 632 } 633 634 635 /* 636 * mergeable request must not have _NOMERGE or _BARRIER bit set, nor may 637 * it already be started by driver. 638 */ 639 #define RQ_NOMERGE_FLAGS \ 640 (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) 641 #define rq_mergeable(rq) \ 642 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ 643 (blk_discard_rq(rq) || blk_fs_request((rq)))) 644 645 /* 646 * q->prep_rq_fn return values 647 */ 648 #define BLKPREP_OK 0 /* serve it */ 649 #define BLKPREP_KILL 1 /* fatal error, kill */ 650 #define BLKPREP_DEFER 2 /* leave on queue */ 651 652 extern unsigned long blk_max_low_pfn, blk_max_pfn; 653 654 /* 655 * standard bounce addresses: 656 * 657 * BLK_BOUNCE_HIGH : bounce all highmem pages 658 * BLK_BOUNCE_ANY : don't bounce anything 659 * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary 660 */ 661 662 #if BITS_PER_LONG == 32 663 #define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT) 664 #else 665 #define BLK_BOUNCE_HIGH -1ULL 666 #endif 667 #define BLK_BOUNCE_ANY (-1ULL) 668 #define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD) 669 670 /* 671 * default timeout for SG_IO if none specified 672 */ 673 #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) 674 #define BLK_MIN_SG_TIMEOUT (7 * HZ) 675 676 #ifdef CONFIG_BOUNCE 677 extern int init_emergency_isa_pool(void); 678 extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); 679 #else 680 static inline int init_emergency_isa_pool(void) 681 { 682 return 0; 683 } 684 static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) 685 { 686 } 687 #endif /* CONFIG_MMU */ 688 689 struct rq_map_data { 690 struct page **pages; 691 int page_order; 692 int nr_entries; 693 }; 694 695 struct req_iterator { 696 int i; 697 struct bio *bio; 698 }; 699 700 /* This should not be used directly - use rq_for_each_segment */ 701 #define __rq_for_each_bio(_bio, rq) \ 702 if ((rq->bio)) \ 703 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) 704 705 #define rq_for_each_segment(bvl, _rq, _iter) \ 706 __rq_for_each_bio(_iter.bio, _rq) \ 707 bio_for_each_segment(bvl, _iter.bio, _iter.i) 708 709 #define rq_iter_last(rq, _iter) \ 710 (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1) 711 712 extern int blk_register_queue(struct gendisk *disk); 713 extern void blk_unregister_queue(struct gendisk *disk); 714 extern void register_disk(struct gendisk *dev); 715 extern void generic_make_request(struct bio *bio); 716 extern void blk_rq_init(struct request_queue *q, struct request *rq); 717 extern void blk_put_request(struct request *); 718 extern void __blk_put_request(struct request_queue *, struct request *); 719 extern struct request *blk_get_request(struct request_queue *, int, gfp_t); 720 extern void blk_insert_request(struct request_queue *, struct request *, int, void *); 721 extern void blk_requeue_request(struct request_queue *, struct request *); 722 extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); 723 extern int blk_lld_busy(struct request_queue *q); 724 extern int blk_insert_cloned_request(struct request_queue *q, 725 struct request *rq); 726 extern void blk_plug_device(struct request_queue *); 727 extern void blk_plug_device_unlocked(struct request_queue *); 728 extern int blk_remove_plug(struct request_queue *); 729 extern void blk_recount_segments(struct request_queue *, struct bio *); 730 extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, 731 unsigned int, void __user *); 732 extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, 733 struct scsi_ioctl_command __user *); 734 735 /* 736 * Temporary export, until SCSI gets fixed up. 737 */ 738 extern int blk_rq_append_bio(struct request_queue *q, struct request *rq, 739 struct bio *bio); 740 741 /* 742 * A queue has just exitted congestion. Note this in the global counter of 743 * congested queues, and wake up anyone who was waiting for requests to be 744 * put back. 745 */ 746 static inline void blk_clear_queue_congested(struct request_queue *q, int rw) 747 { 748 clear_bdi_congested(&q->backing_dev_info, rw); 749 } 750 751 /* 752 * A queue has just entered congestion. Flag that in the queue's VM-visible 753 * state flags and increment the global gounter of congested queues. 754 */ 755 static inline void blk_set_queue_congested(struct request_queue *q, int rw) 756 { 757 set_bdi_congested(&q->backing_dev_info, rw); 758 } 759 760 extern void blk_start_queue(struct request_queue *q); 761 extern void blk_stop_queue(struct request_queue *q); 762 extern void blk_sync_queue(struct request_queue *q); 763 extern void __blk_stop_queue(struct request_queue *q); 764 extern void __blk_run_queue(struct request_queue *); 765 extern void blk_run_queue(struct request_queue *); 766 extern void blk_start_queueing(struct request_queue *); 767 extern int blk_rq_map_user(struct request_queue *, struct request *, 768 struct rq_map_data *, void __user *, unsigned long, 769 gfp_t); 770 extern int blk_rq_unmap_user(struct bio *); 771 extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); 772 extern int blk_rq_map_user_iov(struct request_queue *, struct request *, 773 struct rq_map_data *, struct sg_iovec *, int, 774 unsigned int, gfp_t); 775 extern int blk_execute_rq(struct request_queue *, struct gendisk *, 776 struct request *, int); 777 extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 778 struct request *, int, rq_end_io_fn *); 779 extern void blk_unplug(struct request_queue *q); 780 781 static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 782 { 783 return bdev->bd_disk->queue; 784 } 785 786 static inline void blk_run_backing_dev(struct backing_dev_info *bdi, 787 struct page *page) 788 { 789 if (bdi && bdi->unplug_io_fn) 790 bdi->unplug_io_fn(bdi, page); 791 } 792 793 static inline void blk_run_address_space(struct address_space *mapping) 794 { 795 if (mapping) 796 blk_run_backing_dev(mapping->backing_dev_info, NULL); 797 } 798 799 extern void blkdev_dequeue_request(struct request *req); 800 801 /* 802 * blk_end_request() and friends. 803 * __blk_end_request() and end_request() must be called with 804 * the request queue spinlock acquired. 805 * 806 * Several drivers define their own end_request and call 807 * blk_end_request() for parts of the original function. 808 * This prevents code duplication in drivers. 809 */ 810 extern int blk_end_request(struct request *rq, int error, 811 unsigned int nr_bytes); 812 extern int __blk_end_request(struct request *rq, int error, 813 unsigned int nr_bytes); 814 extern int blk_end_bidi_request(struct request *rq, int error, 815 unsigned int nr_bytes, unsigned int bidi_bytes); 816 extern void end_request(struct request *, int); 817 extern int blk_end_request_callback(struct request *rq, int error, 818 unsigned int nr_bytes, 819 int (drv_callback)(struct request *)); 820 extern void blk_complete_request(struct request *); 821 extern void __blk_complete_request(struct request *); 822 extern void blk_abort_request(struct request *); 823 extern void blk_abort_queue(struct request_queue *); 824 extern void blk_update_request(struct request *rq, int error, 825 unsigned int nr_bytes); 826 827 /* 828 * blk_end_request() takes bytes instead of sectors as a complete size. 829 * blk_rq_bytes() returns bytes left to complete in the entire request. 830 * blk_rq_cur_bytes() returns bytes left to complete in the current segment. 831 */ 832 extern unsigned int blk_rq_bytes(struct request *rq); 833 extern unsigned int blk_rq_cur_bytes(struct request *rq); 834 835 /* 836 * Access functions for manipulating queue properties 837 */ 838 extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, 839 spinlock_t *lock, int node_id); 840 extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); 841 extern void blk_cleanup_queue(struct request_queue *); 842 extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 843 extern void blk_queue_bounce_limit(struct request_queue *, u64); 844 extern void blk_queue_max_sectors(struct request_queue *, unsigned int); 845 extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); 846 extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); 847 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 848 extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); 849 extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 850 extern void blk_queue_dma_pad(struct request_queue *, unsigned int); 851 extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); 852 extern int blk_queue_dma_drain(struct request_queue *q, 853 dma_drain_needed_fn *dma_drain_needed, 854 void *buf, unsigned int size); 855 extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); 856 extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 857 extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 858 extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); 859 extern void blk_queue_dma_alignment(struct request_queue *, int); 860 extern void blk_queue_update_dma_alignment(struct request_queue *, int); 861 extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 862 extern void blk_queue_set_discard(struct request_queue *, prepare_discard_fn *); 863 extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); 864 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 865 extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 866 extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); 867 extern bool blk_do_ordered(struct request_queue *, struct request **); 868 extern unsigned blk_ordered_cur_seq(struct request_queue *); 869 extern unsigned blk_ordered_req_seq(struct request *); 870 extern bool blk_ordered_complete_seq(struct request_queue *, unsigned, int); 871 872 extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 873 extern void blk_dump_rq_flags(struct request *, char *); 874 extern void generic_unplug_device(struct request_queue *); 875 extern long nr_blockdev_pages(void); 876 877 int blk_get_queue(struct request_queue *); 878 struct request_queue *blk_alloc_queue(gfp_t); 879 struct request_queue *blk_alloc_queue_node(gfp_t, int); 880 extern void blk_put_queue(struct request_queue *); 881 882 /* 883 * tag stuff 884 */ 885 #define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) 886 extern int blk_queue_start_tag(struct request_queue *, struct request *); 887 extern struct request *blk_queue_find_tag(struct request_queue *, int); 888 extern void blk_queue_end_tag(struct request_queue *, struct request *); 889 extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *); 890 extern void blk_queue_free_tags(struct request_queue *); 891 extern int blk_queue_resize_tags(struct request_queue *, int); 892 extern void blk_queue_invalidate_tags(struct request_queue *); 893 extern struct blk_queue_tag *blk_init_tags(int); 894 extern void blk_free_tags(struct blk_queue_tag *); 895 896 static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, 897 int tag) 898 { 899 if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) 900 return NULL; 901 return bqt->tag_index[tag]; 902 } 903 904 extern int blkdev_issue_flush(struct block_device *, sector_t *); 905 extern int blkdev_issue_discard(struct block_device *, 906 sector_t sector, sector_t nr_sects, gfp_t); 907 908 static inline int sb_issue_discard(struct super_block *sb, 909 sector_t block, sector_t nr_blocks) 910 { 911 block <<= (sb->s_blocksize_bits - 9); 912 nr_blocks <<= (sb->s_blocksize_bits - 9); 913 return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL); 914 } 915 916 /* 917 * command filter functions 918 */ 919 extern int blk_verify_command(struct blk_cmd_filter *filter, 920 unsigned char *cmd, fmode_t has_write_perm); 921 extern void blk_unregister_filter(struct gendisk *disk); 922 extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter); 923 924 #define MAX_PHYS_SEGMENTS 128 925 #define MAX_HW_SEGMENTS 128 926 #define SAFE_MAX_SECTORS 255 927 #define BLK_DEF_MAX_SECTORS 1024 928 929 #define MAX_SEGMENT_SIZE 65536 930 931 #define BLK_SEG_BOUNDARY_MASK 0xFFFFFFFFUL 932 933 #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 934 935 static inline int queue_hardsect_size(struct request_queue *q) 936 { 937 int retval = 512; 938 939 if (q && q->hardsect_size) 940 retval = q->hardsect_size; 941 942 return retval; 943 } 944 945 static inline int bdev_hardsect_size(struct block_device *bdev) 946 { 947 return queue_hardsect_size(bdev_get_queue(bdev)); 948 } 949 950 static inline int queue_dma_alignment(struct request_queue *q) 951 { 952 return q ? q->dma_alignment : 511; 953 } 954 955 static inline int blk_rq_aligned(struct request_queue *q, void *addr, 956 unsigned int len) 957 { 958 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; 959 return !((unsigned long)addr & alignment) && !(len & alignment); 960 } 961 962 /* assumes size > 256 */ 963 static inline unsigned int blksize_bits(unsigned int size) 964 { 965 unsigned int bits = 8; 966 do { 967 bits++; 968 size >>= 1; 969 } while (size > 256); 970 return bits; 971 } 972 973 static inline unsigned int block_size(struct block_device *bdev) 974 { 975 return bdev->bd_block_size; 976 } 977 978 typedef struct {struct page *v;} Sector; 979 980 unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); 981 982 static inline void put_dev_sector(Sector p) 983 { 984 page_cache_release(p.v); 985 } 986 987 struct work_struct; 988 int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); 989 990 #define MODULE_ALIAS_BLOCKDEV(major,minor) \ 991 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 992 #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 993 MODULE_ALIAS("block-major-" __stringify(major) "-*") 994 995 #if defined(CONFIG_BLK_DEV_INTEGRITY) 996 997 #define INTEGRITY_FLAG_READ 2 /* verify data integrity on read */ 998 #define INTEGRITY_FLAG_WRITE 4 /* generate data integrity on write */ 999 1000 struct blk_integrity_exchg { 1001 void *prot_buf; 1002 void *data_buf; 1003 sector_t sector; 1004 unsigned int data_size; 1005 unsigned short sector_size; 1006 const char *disk_name; 1007 }; 1008 1009 typedef void (integrity_gen_fn) (struct blk_integrity_exchg *); 1010 typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *); 1011 typedef void (integrity_set_tag_fn) (void *, void *, unsigned int); 1012 typedef void (integrity_get_tag_fn) (void *, void *, unsigned int); 1013 1014 struct blk_integrity { 1015 integrity_gen_fn *generate_fn; 1016 integrity_vrfy_fn *verify_fn; 1017 integrity_set_tag_fn *set_tag_fn; 1018 integrity_get_tag_fn *get_tag_fn; 1019 1020 unsigned short flags; 1021 unsigned short tuple_size; 1022 unsigned short sector_size; 1023 unsigned short tag_size; 1024 1025 const char *name; 1026 1027 struct kobject kobj; 1028 }; 1029 1030 extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); 1031 extern void blk_integrity_unregister(struct gendisk *); 1032 extern int blk_integrity_compare(struct gendisk *, struct gendisk *); 1033 extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *); 1034 extern int blk_rq_count_integrity_sg(struct request *); 1035 1036 static inline 1037 struct blk_integrity *bdev_get_integrity(struct block_device *bdev) 1038 { 1039 return bdev->bd_disk->integrity; 1040 } 1041 1042 static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 1043 { 1044 return disk->integrity; 1045 } 1046 1047 static inline int blk_integrity_rq(struct request *rq) 1048 { 1049 if (rq->bio == NULL) 1050 return 0; 1051 1052 return bio_integrity(rq->bio); 1053 } 1054 1055 #else /* CONFIG_BLK_DEV_INTEGRITY */ 1056 1057 #define blk_integrity_rq(rq) (0) 1058 #define blk_rq_count_integrity_sg(a) (0) 1059 #define blk_rq_map_integrity_sg(a, b) (0) 1060 #define bdev_get_integrity(a) (0) 1061 #define blk_get_integrity(a) (0) 1062 #define blk_integrity_compare(a, b) (0) 1063 #define blk_integrity_register(a, b) (0) 1064 #define blk_integrity_unregister(a) do { } while (0); 1065 1066 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 1067 1068 struct block_device_operations { 1069 int (*open) (struct block_device *, fmode_t); 1070 int (*release) (struct gendisk *, fmode_t); 1071 int (*locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1072 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1073 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1074 int (*direct_access) (struct block_device *, sector_t, 1075 void **, unsigned long *); 1076 int (*media_changed) (struct gendisk *); 1077 int (*revalidate_disk) (struct gendisk *); 1078 int (*getgeo)(struct block_device *, struct hd_geometry *); 1079 struct module *owner; 1080 }; 1081 1082 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, 1083 unsigned long); 1084 #else /* CONFIG_BLOCK */ 1085 /* 1086 * stubs for when the block layer is configured out 1087 */ 1088 #define buffer_heads_over_limit 0 1089 1090 static inline long nr_blockdev_pages(void) 1091 { 1092 return 0; 1093 } 1094 1095 #endif /* CONFIG_BLOCK */ 1096 1097 #endif 1098