1 #ifndef _LINUX_BLKDEV_H 2 #define _LINUX_BLKDEV_H 3 4 #ifdef CONFIG_BLOCK 5 6 #include <linux/sched.h> 7 #include <linux/major.h> 8 #include <linux/genhd.h> 9 #include <linux/list.h> 10 #include <linux/timer.h> 11 #include <linux/workqueue.h> 12 #include <linux/pagemap.h> 13 #include <linux/backing-dev.h> 14 #include <linux/wait.h> 15 #include <linux/mempool.h> 16 #include <linux/bio.h> 17 #include <linux/module.h> 18 #include <linux/stringify.h> 19 #include <linux/bsg.h> 20 21 #include <asm/scatterlist.h> 22 23 struct scsi_ioctl_command; 24 25 struct request_queue; 26 typedef struct request_queue request_queue_t __deprecated; 27 struct elevator_queue; 28 typedef struct elevator_queue elevator_t; 29 struct request_pm_state; 30 struct blk_trace; 31 struct request; 32 struct sg_io_hdr; 33 34 #define BLKDEV_MIN_RQ 4 35 #define BLKDEV_MAX_RQ 128 /* Default maximum */ 36 37 int put_io_context(struct io_context *ioc); 38 void exit_io_context(void); 39 struct io_context *get_io_context(gfp_t gfp_flags, int node); 40 struct io_context *alloc_io_context(gfp_t gfp_flags, int node); 41 void copy_io_context(struct io_context **pdst, struct io_context **psrc); 42 43 struct request; 44 typedef void (rq_end_io_fn)(struct request *, int); 45 46 struct request_list { 47 int count[2]; 48 int starved[2]; 49 int elvpriv; 50 mempool_t *rq_pool; 51 wait_queue_head_t wait[2]; 52 }; 53 54 /* 55 * request command types 56 */ 57 enum rq_cmd_type_bits { 58 REQ_TYPE_FS = 1, /* fs request */ 59 REQ_TYPE_BLOCK_PC, /* scsi command */ 60 REQ_TYPE_SENSE, /* sense request */ 61 REQ_TYPE_PM_SUSPEND, /* suspend request */ 62 REQ_TYPE_PM_RESUME, /* resume request */ 63 REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ 64 REQ_TYPE_FLUSH, /* flush request */ 65 REQ_TYPE_SPECIAL, /* driver defined type */ 66 REQ_TYPE_LINUX_BLOCK, /* generic block layer message */ 67 /* 68 * for ATA/ATAPI devices. this really doesn't belong here, ide should 69 * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver 70 * private REQ_LB opcodes to differentiate what type of request this is 71 */ 72 REQ_TYPE_ATA_TASKFILE, 73 REQ_TYPE_ATA_PC, 74 }; 75 76 /* 77 * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being 78 * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a 79 * SCSI cdb. 80 * 81 * 0x00 -> 0x3f are driver private, to be used for whatever purpose they need, 82 * typically to differentiate REQ_TYPE_SPECIAL requests. 83 * 84 */ 85 enum { 86 /* 87 * just examples for now 88 */ 89 REQ_LB_OP_EJECT = 0x40, /* eject request */ 90 REQ_LB_OP_FLUSH = 0x41, /* flush device */ 91 }; 92 93 /* 94 * request type modified bits. first three bits match BIO_RW* bits, important 95 */ 96 enum rq_flag_bits { 97 __REQ_RW, /* not set, read. set, write */ 98 __REQ_FAILFAST, /* no low level driver retries */ 99 __REQ_SORTED, /* elevator knows about this request */ 100 __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ 101 __REQ_HARDBARRIER, /* may not be passed by drive either */ 102 __REQ_FUA, /* forced unit access */ 103 __REQ_NOMERGE, /* don't touch this for merging */ 104 __REQ_STARTED, /* drive already may have started this one */ 105 __REQ_DONTPREP, /* don't call prep for this one */ 106 __REQ_QUEUED, /* uses queueing */ 107 __REQ_ELVPRIV, /* elevator private data attached */ 108 __REQ_FAILED, /* set if the request failed */ 109 __REQ_QUIET, /* don't worry about errors */ 110 __REQ_PREEMPT, /* set for "ide_preempt" requests */ 111 __REQ_ORDERED_COLOR, /* is before or after barrier */ 112 __REQ_RW_SYNC, /* request is sync (O_DIRECT) */ 113 __REQ_ALLOCED, /* request came from our alloc pool */ 114 __REQ_RW_META, /* metadata io request */ 115 __REQ_NR_BITS, /* stops here */ 116 }; 117 118 #define REQ_RW (1 << __REQ_RW) 119 #define REQ_FAILFAST (1 << __REQ_FAILFAST) 120 #define REQ_SORTED (1 << __REQ_SORTED) 121 #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) 122 #define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) 123 #define REQ_FUA (1 << __REQ_FUA) 124 #define REQ_NOMERGE (1 << __REQ_NOMERGE) 125 #define REQ_STARTED (1 << __REQ_STARTED) 126 #define REQ_DONTPREP (1 << __REQ_DONTPREP) 127 #define REQ_QUEUED (1 << __REQ_QUEUED) 128 #define REQ_ELVPRIV (1 << __REQ_ELVPRIV) 129 #define REQ_FAILED (1 << __REQ_FAILED) 130 #define REQ_QUIET (1 << __REQ_QUIET) 131 #define REQ_PREEMPT (1 << __REQ_PREEMPT) 132 #define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR) 133 #define REQ_RW_SYNC (1 << __REQ_RW_SYNC) 134 #define REQ_ALLOCED (1 << __REQ_ALLOCED) 135 #define REQ_RW_META (1 << __REQ_RW_META) 136 137 #define BLK_MAX_CDB 16 138 139 /* 140 * try to put the fields that are referenced together in the same cacheline 141 */ 142 struct request { 143 struct list_head queuelist; 144 struct list_head donelist; 145 146 struct request_queue *q; 147 148 unsigned int cmd_flags; 149 enum rq_cmd_type_bits cmd_type; 150 151 /* Maintain bio traversal state for part by part I/O submission. 152 * hard_* are block layer internals, no driver should touch them! 153 */ 154 155 sector_t sector; /* next sector to submit */ 156 sector_t hard_sector; /* next sector to complete */ 157 unsigned long nr_sectors; /* no. of sectors left to submit */ 158 unsigned long hard_nr_sectors; /* no. of sectors left to complete */ 159 /* no. of sectors left to submit in the current segment */ 160 unsigned int current_nr_sectors; 161 162 /* no. of sectors left to complete in the current segment */ 163 unsigned int hard_cur_sectors; 164 165 struct bio *bio; 166 struct bio *biotail; 167 168 struct hlist_node hash; /* merge hash */ 169 /* 170 * The rb_node is only used inside the io scheduler, requests 171 * are pruned when moved to the dispatch queue. So let the 172 * completion_data share space with the rb_node. 173 */ 174 union { 175 struct rb_node rb_node; /* sort/lookup */ 176 void *completion_data; 177 }; 178 179 /* 180 * two pointers are available for the IO schedulers, if they need 181 * more they have to dynamically allocate it. 182 */ 183 void *elevator_private; 184 void *elevator_private2; 185 186 struct gendisk *rq_disk; 187 unsigned long start_time; 188 189 /* Number of scatter-gather DMA addr+len pairs after 190 * physical address coalescing is performed. 191 */ 192 unsigned short nr_phys_segments; 193 194 /* Number of scatter-gather addr+len pairs after 195 * physical and DMA remapping hardware coalescing is performed. 196 * This is the number of scatter-gather entries the driver 197 * will actually have to deal with after DMA mapping is done. 198 */ 199 unsigned short nr_hw_segments; 200 201 unsigned short ioprio; 202 203 void *special; 204 char *buffer; 205 206 int tag; 207 int errors; 208 209 int ref_count; 210 211 /* 212 * when request is used as a packet command carrier 213 */ 214 unsigned int cmd_len; 215 unsigned char cmd[BLK_MAX_CDB]; 216 217 unsigned int data_len; 218 unsigned int sense_len; 219 void *data; 220 void *sense; 221 222 unsigned int timeout; 223 int retries; 224 225 /* 226 * completion callback. 227 */ 228 rq_end_io_fn *end_io; 229 void *end_io_data; 230 231 /* for bidi */ 232 struct request *next_rq; 233 }; 234 235 /* 236 * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME 237 * requests. Some step values could eventually be made generic. 238 */ 239 struct request_pm_state 240 { 241 /* PM state machine step value, currently driver specific */ 242 int pm_step; 243 /* requested PM state value (S1, S2, S3, S4, ...) */ 244 u32 pm_state; 245 void* data; /* for driver use */ 246 }; 247 248 #include <linux/elevator.h> 249 250 typedef void (request_fn_proc) (struct request_queue *q); 251 typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); 252 typedef int (prep_rq_fn) (struct request_queue *, struct request *); 253 typedef void (unplug_fn) (struct request_queue *); 254 255 struct bio_vec; 256 typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *); 257 typedef void (prepare_flush_fn) (struct request_queue *, struct request *); 258 typedef void (softirq_done_fn)(struct request *); 259 260 enum blk_queue_state { 261 Queue_down, 262 Queue_up, 263 }; 264 265 struct blk_queue_tag { 266 struct request **tag_index; /* map of busy tags */ 267 unsigned long *tag_map; /* bit map of free/busy tags */ 268 int busy; /* current depth */ 269 int max_depth; /* what we will send to device */ 270 int real_max_depth; /* what the array can hold */ 271 atomic_t refcnt; /* map can be shared */ 272 }; 273 274 struct request_queue 275 { 276 /* 277 * Together with queue_head for cacheline sharing 278 */ 279 struct list_head queue_head; 280 struct request *last_merge; 281 elevator_t *elevator; 282 283 /* 284 * the queue request freelist, one for reads and one for writes 285 */ 286 struct request_list rq; 287 288 request_fn_proc *request_fn; 289 make_request_fn *make_request_fn; 290 prep_rq_fn *prep_rq_fn; 291 unplug_fn *unplug_fn; 292 merge_bvec_fn *merge_bvec_fn; 293 prepare_flush_fn *prepare_flush_fn; 294 softirq_done_fn *softirq_done_fn; 295 296 /* 297 * Dispatch queue sorting 298 */ 299 sector_t end_sector; 300 struct request *boundary_rq; 301 302 /* 303 * Auto-unplugging state 304 */ 305 struct timer_list unplug_timer; 306 int unplug_thresh; /* After this many requests */ 307 unsigned long unplug_delay; /* After this many jiffies */ 308 struct work_struct unplug_work; 309 310 struct backing_dev_info backing_dev_info; 311 312 /* 313 * The queue owner gets to use this for whatever they like. 314 * ll_rw_blk doesn't touch it. 315 */ 316 void *queuedata; 317 318 /* 319 * queue needs bounce pages for pages above this limit 320 */ 321 unsigned long bounce_pfn; 322 gfp_t bounce_gfp; 323 324 /* 325 * various queue flags, see QUEUE_* below 326 */ 327 unsigned long queue_flags; 328 329 /* 330 * protects queue structures from reentrancy. ->__queue_lock should 331 * _never_ be used directly, it is queue private. always use 332 * ->queue_lock. 333 */ 334 spinlock_t __queue_lock; 335 spinlock_t *queue_lock; 336 337 /* 338 * queue kobject 339 */ 340 struct kobject kobj; 341 342 /* 343 * queue settings 344 */ 345 unsigned long nr_requests; /* Max # of requests */ 346 unsigned int nr_congestion_on; 347 unsigned int nr_congestion_off; 348 unsigned int nr_batching; 349 350 unsigned int max_sectors; 351 unsigned int max_hw_sectors; 352 unsigned short max_phys_segments; 353 unsigned short max_hw_segments; 354 unsigned short hardsect_size; 355 unsigned int max_segment_size; 356 357 unsigned long seg_boundary_mask; 358 void *dma_drain_buffer; 359 unsigned int dma_drain_size; 360 unsigned int dma_alignment; 361 362 struct blk_queue_tag *queue_tags; 363 struct list_head tag_busy_list; 364 365 unsigned int nr_sorted; 366 unsigned int in_flight; 367 368 /* 369 * sg stuff 370 */ 371 unsigned int sg_timeout; 372 unsigned int sg_reserved_size; 373 int node; 374 #ifdef CONFIG_BLK_DEV_IO_TRACE 375 struct blk_trace *blk_trace; 376 #endif 377 /* 378 * reserved for flush operations 379 */ 380 unsigned int ordered, next_ordered, ordseq; 381 int orderr, ordcolor; 382 struct request pre_flush_rq, bar_rq, post_flush_rq; 383 struct request *orig_bar_rq; 384 385 struct mutex sysfs_lock; 386 387 #if defined(CONFIG_BLK_DEV_BSG) 388 struct bsg_class_device bsg_dev; 389 #endif 390 }; 391 392 #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ 393 #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 394 #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 395 #define QUEUE_FLAG_READFULL 3 /* read queue has been filled */ 396 #define QUEUE_FLAG_WRITEFULL 4 /* write queue has been filled */ 397 #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ 398 #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ 399 #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ 400 #define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ 401 #define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */ 402 403 enum { 404 /* 405 * Hardbarrier is supported with one of the following methods. 406 * 407 * NONE : hardbarrier unsupported 408 * DRAIN : ordering by draining is enough 409 * DRAIN_FLUSH : ordering by draining w/ pre and post flushes 410 * DRAIN_FUA : ordering by draining w/ pre flush and FUA write 411 * TAG : ordering by tag is enough 412 * TAG_FLUSH : ordering by tag w/ pre and post flushes 413 * TAG_FUA : ordering by tag w/ pre flush and FUA write 414 */ 415 QUEUE_ORDERED_NONE = 0x00, 416 QUEUE_ORDERED_DRAIN = 0x01, 417 QUEUE_ORDERED_TAG = 0x02, 418 419 QUEUE_ORDERED_PREFLUSH = 0x10, 420 QUEUE_ORDERED_POSTFLUSH = 0x20, 421 QUEUE_ORDERED_FUA = 0x40, 422 423 QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN | 424 QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH, 425 QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN | 426 QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA, 427 QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG | 428 QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH, 429 QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG | 430 QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA, 431 432 /* 433 * Ordered operation sequence 434 */ 435 QUEUE_ORDSEQ_STARTED = 0x01, /* flushing in progress */ 436 QUEUE_ORDSEQ_DRAIN = 0x02, /* waiting for the queue to be drained */ 437 QUEUE_ORDSEQ_PREFLUSH = 0x04, /* pre-flushing in progress */ 438 QUEUE_ORDSEQ_BAR = 0x08, /* original barrier req in progress */ 439 QUEUE_ORDSEQ_POSTFLUSH = 0x10, /* post-flushing in progress */ 440 QUEUE_ORDSEQ_DONE = 0x20, 441 }; 442 443 #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) 444 #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 445 #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 446 #define blk_queue_flushing(q) ((q)->ordseq) 447 448 #define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) 449 #define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) 450 #define blk_special_request(rq) ((rq)->cmd_type == REQ_TYPE_SPECIAL) 451 #define blk_sense_request(rq) ((rq)->cmd_type == REQ_TYPE_SENSE) 452 453 #define blk_noretry_request(rq) ((rq)->cmd_flags & REQ_FAILFAST) 454 #define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED) 455 456 #define blk_account_rq(rq) (blk_rq_started(rq) && blk_fs_request(rq)) 457 458 #define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND) 459 #define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME) 460 #define blk_pm_request(rq) \ 461 (blk_pm_suspend_request(rq) || blk_pm_resume_request(rq)) 462 463 #define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED) 464 #define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER) 465 #define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) 466 #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 467 #define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors) 468 /* rq->queuelist of dequeued request must be list_empty() */ 469 #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) 470 471 #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 472 473 #define rq_data_dir(rq) ((rq)->cmd_flags & 1) 474 475 /* 476 * We regard a request as sync, if it's a READ or a SYNC write. 477 */ 478 #define rq_is_sync(rq) (rq_data_dir((rq)) == READ || (rq)->cmd_flags & REQ_RW_SYNC) 479 #define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META) 480 481 static inline int blk_queue_full(struct request_queue *q, int rw) 482 { 483 if (rw == READ) 484 return test_bit(QUEUE_FLAG_READFULL, &q->queue_flags); 485 return test_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); 486 } 487 488 static inline void blk_set_queue_full(struct request_queue *q, int rw) 489 { 490 if (rw == READ) 491 set_bit(QUEUE_FLAG_READFULL, &q->queue_flags); 492 else 493 set_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); 494 } 495 496 static inline void blk_clear_queue_full(struct request_queue *q, int rw) 497 { 498 if (rw == READ) 499 clear_bit(QUEUE_FLAG_READFULL, &q->queue_flags); 500 else 501 clear_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); 502 } 503 504 505 /* 506 * mergeable request must not have _NOMERGE or _BARRIER bit set, nor may 507 * it already be started by driver. 508 */ 509 #define RQ_NOMERGE_FLAGS \ 510 (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) 511 #define rq_mergeable(rq) \ 512 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq))) 513 514 /* 515 * q->prep_rq_fn return values 516 */ 517 #define BLKPREP_OK 0 /* serve it */ 518 #define BLKPREP_KILL 1 /* fatal error, kill */ 519 #define BLKPREP_DEFER 2 /* leave on queue */ 520 521 extern unsigned long blk_max_low_pfn, blk_max_pfn; 522 523 /* 524 * standard bounce addresses: 525 * 526 * BLK_BOUNCE_HIGH : bounce all highmem pages 527 * BLK_BOUNCE_ANY : don't bounce anything 528 * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary 529 */ 530 #define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT) 531 #define BLK_BOUNCE_ANY ((u64)blk_max_pfn << PAGE_SHIFT) 532 #define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD) 533 534 /* 535 * default timeout for SG_IO if none specified 536 */ 537 #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) 538 539 #ifdef CONFIG_BOUNCE 540 extern int init_emergency_isa_pool(void); 541 extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); 542 #else 543 static inline int init_emergency_isa_pool(void) 544 { 545 return 0; 546 } 547 static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) 548 { 549 } 550 #endif /* CONFIG_MMU */ 551 552 struct req_iterator { 553 int i; 554 struct bio *bio; 555 }; 556 557 /* This should not be used directly - use rq_for_each_segment */ 558 #define __rq_for_each_bio(_bio, rq) \ 559 if ((rq->bio)) \ 560 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) 561 562 #define rq_for_each_segment(bvl, _rq, _iter) \ 563 __rq_for_each_bio(_iter.bio, _rq) \ 564 bio_for_each_segment(bvl, _iter.bio, _iter.i) 565 566 #define rq_iter_last(rq, _iter) \ 567 (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1) 568 569 extern int blk_register_queue(struct gendisk *disk); 570 extern void blk_unregister_queue(struct gendisk *disk); 571 extern void register_disk(struct gendisk *dev); 572 extern void generic_make_request(struct bio *bio); 573 extern void blk_put_request(struct request *); 574 extern void __blk_put_request(struct request_queue *, struct request *); 575 extern void blk_end_sync_rq(struct request *rq, int error); 576 extern struct request *blk_get_request(struct request_queue *, int, gfp_t); 577 extern void blk_insert_request(struct request_queue *, struct request *, int, void *); 578 extern void blk_requeue_request(struct request_queue *, struct request *); 579 extern void blk_plug_device(struct request_queue *); 580 extern int blk_remove_plug(struct request_queue *); 581 extern void blk_recount_segments(struct request_queue *, struct bio *); 582 extern int scsi_cmd_ioctl(struct file *, struct request_queue *, 583 struct gendisk *, unsigned int, void __user *); 584 extern int sg_scsi_ioctl(struct file *, struct request_queue *, 585 struct gendisk *, struct scsi_ioctl_command __user *); 586 587 /* 588 * Temporary export, until SCSI gets fixed up. 589 */ 590 extern int blk_rq_append_bio(struct request_queue *q, struct request *rq, 591 struct bio *bio); 592 593 /* 594 * A queue has just exitted congestion. Note this in the global counter of 595 * congested queues, and wake up anyone who was waiting for requests to be 596 * put back. 597 */ 598 static inline void blk_clear_queue_congested(struct request_queue *q, int rw) 599 { 600 clear_bdi_congested(&q->backing_dev_info, rw); 601 } 602 603 /* 604 * A queue has just entered congestion. Flag that in the queue's VM-visible 605 * state flags and increment the global gounter of congested queues. 606 */ 607 static inline void blk_set_queue_congested(struct request_queue *q, int rw) 608 { 609 set_bdi_congested(&q->backing_dev_info, rw); 610 } 611 612 extern void blk_start_queue(struct request_queue *q); 613 extern void blk_stop_queue(struct request_queue *q); 614 extern void blk_sync_queue(struct request_queue *q); 615 extern void __blk_stop_queue(struct request_queue *q); 616 extern void blk_run_queue(struct request_queue *); 617 extern void blk_start_queueing(struct request_queue *); 618 extern int blk_rq_map_user(struct request_queue *, struct request *, void __user *, unsigned long); 619 extern int blk_rq_unmap_user(struct bio *); 620 extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); 621 extern int blk_rq_map_user_iov(struct request_queue *, struct request *, 622 struct sg_iovec *, int, unsigned int); 623 extern int blk_execute_rq(struct request_queue *, struct gendisk *, 624 struct request *, int); 625 extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 626 struct request *, int, rq_end_io_fn *); 627 extern int blk_verify_command(unsigned char *, int); 628 extern void blk_unplug(struct request_queue *q); 629 630 static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 631 { 632 return bdev->bd_disk->queue; 633 } 634 635 static inline void blk_run_backing_dev(struct backing_dev_info *bdi, 636 struct page *page) 637 { 638 if (bdi && bdi->unplug_io_fn) 639 bdi->unplug_io_fn(bdi, page); 640 } 641 642 static inline void blk_run_address_space(struct address_space *mapping) 643 { 644 if (mapping) 645 blk_run_backing_dev(mapping->backing_dev_info, NULL); 646 } 647 648 /* 649 * blk_end_request() and friends. 650 * __blk_end_request() and end_request() must be called with 651 * the request queue spinlock acquired. 652 * 653 * Several drivers define their own end_request and call 654 * blk_end_request() for parts of the original function. 655 * This prevents code duplication in drivers. 656 */ 657 extern int blk_end_request(struct request *rq, int error, 658 unsigned int nr_bytes); 659 extern int __blk_end_request(struct request *rq, int error, 660 unsigned int nr_bytes); 661 extern int blk_end_bidi_request(struct request *rq, int error, 662 unsigned int nr_bytes, unsigned int bidi_bytes); 663 extern void end_request(struct request *, int); 664 extern void end_queued_request(struct request *, int); 665 extern void end_dequeued_request(struct request *, int); 666 extern int blk_end_request_callback(struct request *rq, int error, 667 unsigned int nr_bytes, 668 int (drv_callback)(struct request *)); 669 extern void blk_complete_request(struct request *); 670 671 /* 672 * blk_end_request() takes bytes instead of sectors as a complete size. 673 * blk_rq_bytes() returns bytes left to complete in the entire request. 674 * blk_rq_cur_bytes() returns bytes left to complete in the current segment. 675 */ 676 extern unsigned int blk_rq_bytes(struct request *rq); 677 extern unsigned int blk_rq_cur_bytes(struct request *rq); 678 679 static inline void blkdev_dequeue_request(struct request *req) 680 { 681 elv_dequeue_request(req->q, req); 682 } 683 684 /* 685 * Access functions for manipulating queue properties 686 */ 687 extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, 688 spinlock_t *lock, int node_id); 689 extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); 690 extern void blk_cleanup_queue(struct request_queue *); 691 extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 692 extern void blk_queue_bounce_limit(struct request_queue *, u64); 693 extern void blk_queue_max_sectors(struct request_queue *, unsigned int); 694 extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); 695 extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); 696 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 697 extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); 698 extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 699 extern int blk_queue_dma_drain(struct request_queue *q, void *buf, 700 unsigned int size); 701 extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 702 extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 703 extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); 704 extern void blk_queue_dma_alignment(struct request_queue *, int); 705 extern void blk_queue_update_dma_alignment(struct request_queue *, int); 706 extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 707 extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 708 extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); 709 extern int blk_do_ordered(struct request_queue *, struct request **); 710 extern unsigned blk_ordered_cur_seq(struct request_queue *); 711 extern unsigned blk_ordered_req_seq(struct request *); 712 extern void blk_ordered_complete_seq(struct request_queue *, unsigned, int); 713 714 extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 715 extern void blk_dump_rq_flags(struct request *, char *); 716 extern void generic_unplug_device(struct request_queue *); 717 extern void __generic_unplug_device(struct request_queue *); 718 extern long nr_blockdev_pages(void); 719 720 int blk_get_queue(struct request_queue *); 721 struct request_queue *blk_alloc_queue(gfp_t); 722 struct request_queue *blk_alloc_queue_node(gfp_t, int); 723 extern void blk_put_queue(struct request_queue *); 724 725 /* 726 * tag stuff 727 */ 728 #define blk_queue_tag_depth(q) ((q)->queue_tags->busy) 729 #define blk_queue_tag_queue(q) ((q)->queue_tags->busy < (q)->queue_tags->max_depth) 730 #define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) 731 extern int blk_queue_start_tag(struct request_queue *, struct request *); 732 extern struct request *blk_queue_find_tag(struct request_queue *, int); 733 extern void blk_queue_end_tag(struct request_queue *, struct request *); 734 extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *); 735 extern void blk_queue_free_tags(struct request_queue *); 736 extern int blk_queue_resize_tags(struct request_queue *, int); 737 extern void blk_queue_invalidate_tags(struct request_queue *); 738 extern struct blk_queue_tag *blk_init_tags(int); 739 extern void blk_free_tags(struct blk_queue_tag *); 740 741 static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, 742 int tag) 743 { 744 if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) 745 return NULL; 746 return bqt->tag_index[tag]; 747 } 748 749 extern int blkdev_issue_flush(struct block_device *, sector_t *); 750 751 #define MAX_PHYS_SEGMENTS 128 752 #define MAX_HW_SEGMENTS 128 753 #define SAFE_MAX_SECTORS 255 754 #define BLK_DEF_MAX_SECTORS 1024 755 756 #define MAX_SEGMENT_SIZE 65536 757 758 #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 759 760 static inline int queue_hardsect_size(struct request_queue *q) 761 { 762 int retval = 512; 763 764 if (q && q->hardsect_size) 765 retval = q->hardsect_size; 766 767 return retval; 768 } 769 770 static inline int bdev_hardsect_size(struct block_device *bdev) 771 { 772 return queue_hardsect_size(bdev_get_queue(bdev)); 773 } 774 775 static inline int queue_dma_alignment(struct request_queue *q) 776 { 777 return q ? q->dma_alignment : 511; 778 } 779 780 /* assumes size > 256 */ 781 static inline unsigned int blksize_bits(unsigned int size) 782 { 783 unsigned int bits = 8; 784 do { 785 bits++; 786 size >>= 1; 787 } while (size > 256); 788 return bits; 789 } 790 791 static inline unsigned int block_size(struct block_device *bdev) 792 { 793 return bdev->bd_block_size; 794 } 795 796 typedef struct {struct page *v;} Sector; 797 798 unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); 799 800 static inline void put_dev_sector(Sector p) 801 { 802 page_cache_release(p.v); 803 } 804 805 struct work_struct; 806 int kblockd_schedule_work(struct work_struct *work); 807 void kblockd_flush_work(struct work_struct *work); 808 809 #define MODULE_ALIAS_BLOCKDEV(major,minor) \ 810 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 811 #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 812 MODULE_ALIAS("block-major-" __stringify(major) "-*") 813 814 815 #else /* CONFIG_BLOCK */ 816 /* 817 * stubs for when the block layer is configured out 818 */ 819 #define buffer_heads_over_limit 0 820 821 static inline long nr_blockdev_pages(void) 822 { 823 return 0; 824 } 825 826 static inline void exit_io_context(void) 827 { 828 } 829 830 struct io_context; 831 static inline int put_io_context(struct io_context *ioc) 832 { 833 return 1; 834 } 835 836 837 #endif /* CONFIG_BLOCK */ 838 839 #endif 840