11da177e4SLinus Torvalds #ifndef _LINUX_BLKDEV_H 21da177e4SLinus Torvalds #define _LINUX_BLKDEV_H 31da177e4SLinus Torvalds 4f5ff8422SJens Axboe #ifdef CONFIG_BLOCK 5f5ff8422SJens Axboe 6bcfd8d36SAndrew Morton #include <linux/sched.h> 71da177e4SLinus Torvalds #include <linux/major.h> 81da177e4SLinus Torvalds #include <linux/genhd.h> 91da177e4SLinus Torvalds #include <linux/list.h> 101da177e4SLinus Torvalds #include <linux/timer.h> 111da177e4SLinus Torvalds #include <linux/workqueue.h> 121da177e4SLinus Torvalds #include <linux/pagemap.h> 131da177e4SLinus Torvalds #include <linux/backing-dev.h> 141da177e4SLinus Torvalds #include <linux/wait.h> 151da177e4SLinus Torvalds #include <linux/mempool.h> 161da177e4SLinus Torvalds #include <linux/bio.h> 171da177e4SLinus Torvalds #include <linux/module.h> 181da177e4SLinus Torvalds #include <linux/stringify.h> 19d351af01SFUJITA Tomonori #include <linux/bsg.h> 201da177e4SLinus Torvalds 211da177e4SLinus Torvalds #include <asm/scatterlist.h> 221da177e4SLinus Torvalds 2321b2f0c8SChristoph Hellwig struct scsi_ioctl_command; 2421b2f0c8SChristoph Hellwig 251da177e4SLinus Torvalds struct request_queue; 2671f65e6bSJens Axboe typedef struct request_queue request_queue_t __deprecated; 271da177e4SLinus Torvalds struct elevator_queue; 281da177e4SLinus Torvalds typedef struct elevator_queue elevator_t; 291da177e4SLinus Torvalds struct request_pm_state; 302056a782SJens Axboe struct blk_trace; 313d6392cfSJens Axboe struct request; 323d6392cfSJens Axboe struct sg_io_hdr; 331da177e4SLinus Torvalds 341da177e4SLinus Torvalds #define BLKDEV_MIN_RQ 4 351da177e4SLinus Torvalds #define BLKDEV_MAX_RQ 128 /* Default maximum */ 361da177e4SLinus Torvalds 37d38ecf93SJens Axboe int put_io_context(struct io_context *ioc); 381da177e4SLinus Torvalds void exit_io_context(void); 39b5deef90SJens Axboe struct io_context *get_io_context(gfp_t gfp_flags, int node); 40fd0928dfSJens Axboe struct io_context *alloc_io_context(gfp_t gfp_flags, int node); 411da177e4SLinus Torvalds void copy_io_context(struct io_context **pdst, struct io_context **psrc); 421da177e4SLinus Torvalds 431da177e4SLinus Torvalds struct request; 448ffdc655STejun Heo typedef void (rq_end_io_fn)(struct request *, int); 451da177e4SLinus Torvalds 461da177e4SLinus Torvalds struct request_list { 471da177e4SLinus Torvalds int count[2]; 481da177e4SLinus Torvalds int starved[2]; 49cb98fc8bSTejun Heo int elvpriv; 501da177e4SLinus Torvalds mempool_t *rq_pool; 511da177e4SLinus Torvalds wait_queue_head_t wait[2]; 521da177e4SLinus Torvalds }; 531da177e4SLinus Torvalds 544aff5e23SJens Axboe /* 554aff5e23SJens Axboe * request command types 564aff5e23SJens Axboe */ 574aff5e23SJens Axboe enum rq_cmd_type_bits { 584aff5e23SJens Axboe REQ_TYPE_FS = 1, /* fs request */ 594aff5e23SJens Axboe REQ_TYPE_BLOCK_PC, /* scsi command */ 604aff5e23SJens Axboe REQ_TYPE_SENSE, /* sense request */ 614aff5e23SJens Axboe REQ_TYPE_PM_SUSPEND, /* suspend request */ 624aff5e23SJens Axboe REQ_TYPE_PM_RESUME, /* resume request */ 634aff5e23SJens Axboe REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ 644aff5e23SJens Axboe REQ_TYPE_FLUSH, /* flush request */ 654aff5e23SJens Axboe REQ_TYPE_SPECIAL, /* driver defined type */ 664aff5e23SJens Axboe REQ_TYPE_LINUX_BLOCK, /* generic block layer message */ 674aff5e23SJens Axboe /* 684aff5e23SJens Axboe * for ATA/ATAPI devices. this really doesn't belong here, ide should 694aff5e23SJens Axboe * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver 704aff5e23SJens Axboe * private REQ_LB opcodes to differentiate what type of request this is 714aff5e23SJens Axboe */ 724aff5e23SJens Axboe REQ_TYPE_ATA_TASKFILE, 73cea2885aSJens Axboe REQ_TYPE_ATA_PC, 744aff5e23SJens Axboe }; 754aff5e23SJens Axboe 764aff5e23SJens Axboe /* 774aff5e23SJens Axboe * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being 784aff5e23SJens Axboe * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a 794aff5e23SJens Axboe * SCSI cdb. 804aff5e23SJens Axboe * 814aff5e23SJens Axboe * 0x00 -> 0x3f are driver private, to be used for whatever purpose they need, 824aff5e23SJens Axboe * typically to differentiate REQ_TYPE_SPECIAL requests. 834aff5e23SJens Axboe * 844aff5e23SJens Axboe */ 854aff5e23SJens Axboe enum { 864aff5e23SJens Axboe /* 874aff5e23SJens Axboe * just examples for now 884aff5e23SJens Axboe */ 894aff5e23SJens Axboe REQ_LB_OP_EJECT = 0x40, /* eject request */ 904aff5e23SJens Axboe REQ_LB_OP_FLUSH = 0x41, /* flush device */ 914aff5e23SJens Axboe }; 924aff5e23SJens Axboe 934aff5e23SJens Axboe /* 944aff5e23SJens Axboe * request type modified bits. first three bits match BIO_RW* bits, important 954aff5e23SJens Axboe */ 964aff5e23SJens Axboe enum rq_flag_bits { 974aff5e23SJens Axboe __REQ_RW, /* not set, read. set, write */ 984aff5e23SJens Axboe __REQ_FAILFAST, /* no low level driver retries */ 994aff5e23SJens Axboe __REQ_SORTED, /* elevator knows about this request */ 1004aff5e23SJens Axboe __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ 1014aff5e23SJens Axboe __REQ_HARDBARRIER, /* may not be passed by drive either */ 1024aff5e23SJens Axboe __REQ_FUA, /* forced unit access */ 1034aff5e23SJens Axboe __REQ_NOMERGE, /* don't touch this for merging */ 1044aff5e23SJens Axboe __REQ_STARTED, /* drive already may have started this one */ 1054aff5e23SJens Axboe __REQ_DONTPREP, /* don't call prep for this one */ 1064aff5e23SJens Axboe __REQ_QUEUED, /* uses queueing */ 1074aff5e23SJens Axboe __REQ_ELVPRIV, /* elevator private data attached */ 1084aff5e23SJens Axboe __REQ_FAILED, /* set if the request failed */ 1094aff5e23SJens Axboe __REQ_QUIET, /* don't worry about errors */ 1104aff5e23SJens Axboe __REQ_PREEMPT, /* set for "ide_preempt" requests */ 1114aff5e23SJens Axboe __REQ_ORDERED_COLOR, /* is before or after barrier */ 1124aff5e23SJens Axboe __REQ_RW_SYNC, /* request is sync (O_DIRECT) */ 11349171e5cSJens Axboe __REQ_ALLOCED, /* request came from our alloc pool */ 1145404bc7aSJens Axboe __REQ_RW_META, /* metadata io request */ 1154aff5e23SJens Axboe __REQ_NR_BITS, /* stops here */ 1164aff5e23SJens Axboe }; 1174aff5e23SJens Axboe 1184aff5e23SJens Axboe #define REQ_RW (1 << __REQ_RW) 1194aff5e23SJens Axboe #define REQ_FAILFAST (1 << __REQ_FAILFAST) 1204aff5e23SJens Axboe #define REQ_SORTED (1 << __REQ_SORTED) 1214aff5e23SJens Axboe #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) 1224aff5e23SJens Axboe #define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) 1234aff5e23SJens Axboe #define REQ_FUA (1 << __REQ_FUA) 1244aff5e23SJens Axboe #define REQ_NOMERGE (1 << __REQ_NOMERGE) 1254aff5e23SJens Axboe #define REQ_STARTED (1 << __REQ_STARTED) 1264aff5e23SJens Axboe #define REQ_DONTPREP (1 << __REQ_DONTPREP) 1274aff5e23SJens Axboe #define REQ_QUEUED (1 << __REQ_QUEUED) 1284aff5e23SJens Axboe #define REQ_ELVPRIV (1 << __REQ_ELVPRIV) 1294aff5e23SJens Axboe #define REQ_FAILED (1 << __REQ_FAILED) 1304aff5e23SJens Axboe #define REQ_QUIET (1 << __REQ_QUIET) 1314aff5e23SJens Axboe #define REQ_PREEMPT (1 << __REQ_PREEMPT) 1324aff5e23SJens Axboe #define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR) 1334aff5e23SJens Axboe #define REQ_RW_SYNC (1 << __REQ_RW_SYNC) 13449171e5cSJens Axboe #define REQ_ALLOCED (1 << __REQ_ALLOCED) 1355404bc7aSJens Axboe #define REQ_RW_META (1 << __REQ_RW_META) 1364aff5e23SJens Axboe 1371da177e4SLinus Torvalds #define BLK_MAX_CDB 16 1381da177e4SLinus Torvalds 1391da177e4SLinus Torvalds /* 14063a71386SJens Axboe * try to put the fields that are referenced together in the same cacheline. 14163a71386SJens Axboe * if you modify this structure, be sure to check block/blk-core.c:rq_init() 14263a71386SJens Axboe * as well! 1431da177e4SLinus Torvalds */ 1441da177e4SLinus Torvalds struct request { 145ff856badSJens Axboe struct list_head queuelist; 146ff856badSJens Axboe struct list_head donelist; 147ff856badSJens Axboe 148165125e1SJens Axboe struct request_queue *q; 149e6a1c874SJens Axboe 1504aff5e23SJens Axboe unsigned int cmd_flags; 1514aff5e23SJens Axboe enum rq_cmd_type_bits cmd_type; 1521da177e4SLinus Torvalds 1531da177e4SLinus Torvalds /* Maintain bio traversal state for part by part I/O submission. 1541da177e4SLinus Torvalds * hard_* are block layer internals, no driver should touch them! 1551da177e4SLinus Torvalds */ 1561da177e4SLinus Torvalds 1571da177e4SLinus Torvalds sector_t sector; /* next sector to submit */ 158e6a1c874SJens Axboe sector_t hard_sector; /* next sector to complete */ 1591da177e4SLinus Torvalds unsigned long nr_sectors; /* no. of sectors left to submit */ 160e6a1c874SJens Axboe unsigned long hard_nr_sectors; /* no. of sectors left to complete */ 1611da177e4SLinus Torvalds /* no. of sectors left to submit in the current segment */ 1621da177e4SLinus Torvalds unsigned int current_nr_sectors; 1631da177e4SLinus Torvalds 1641da177e4SLinus Torvalds /* no. of sectors left to complete in the current segment */ 1651da177e4SLinus Torvalds unsigned int hard_cur_sectors; 1661da177e4SLinus Torvalds 1671da177e4SLinus Torvalds struct bio *bio; 1681da177e4SLinus Torvalds struct bio *biotail; 1691da177e4SLinus Torvalds 1709817064bSJens Axboe struct hlist_node hash; /* merge hash */ 171e6a1c874SJens Axboe /* 172e6a1c874SJens Axboe * The rb_node is only used inside the io scheduler, requests 173e6a1c874SJens Axboe * are pruned when moved to the dispatch queue. So let the 174e6a1c874SJens Axboe * completion_data share space with the rb_node. 175e6a1c874SJens Axboe */ 176e6a1c874SJens Axboe union { 1772e662b65SJens Axboe struct rb_node rb_node; /* sort/lookup */ 178e6a1c874SJens Axboe void *completion_data; 179e6a1c874SJens Axboe }; 1809817064bSJens Axboe 181ff7d145fSJens Axboe /* 182ff7d145fSJens Axboe * two pointers are available for the IO schedulers, if they need 183ff7d145fSJens Axboe * more they have to dynamically allocate it. 184ff7d145fSJens Axboe */ 1851da177e4SLinus Torvalds void *elevator_private; 186ff7d145fSJens Axboe void *elevator_private2; 187ff7d145fSJens Axboe 1888f34ee75SJens Axboe struct gendisk *rq_disk; 1891da177e4SLinus Torvalds unsigned long start_time; 1901da177e4SLinus Torvalds 1911da177e4SLinus Torvalds /* Number of scatter-gather DMA addr+len pairs after 1921da177e4SLinus Torvalds * physical address coalescing is performed. 1931da177e4SLinus Torvalds */ 1941da177e4SLinus Torvalds unsigned short nr_phys_segments; 1951da177e4SLinus Torvalds 1961da177e4SLinus Torvalds /* Number of scatter-gather addr+len pairs after 1971da177e4SLinus Torvalds * physical and DMA remapping hardware coalescing is performed. 1981da177e4SLinus Torvalds * This is the number of scatter-gather entries the driver 1991da177e4SLinus Torvalds * will actually have to deal with after DMA mapping is done. 2001da177e4SLinus Torvalds */ 2011da177e4SLinus Torvalds unsigned short nr_hw_segments; 2021da177e4SLinus Torvalds 2038f34ee75SJens Axboe unsigned short ioprio; 2048f34ee75SJens Axboe 2051da177e4SLinus Torvalds void *special; 2068f34ee75SJens Axboe char *buffer; 2071da177e4SLinus Torvalds 208cdd60262SJens Axboe int tag; 209cdd60262SJens Axboe int errors; 210cdd60262SJens Axboe 211cdd60262SJens Axboe int ref_count; 212cdd60262SJens Axboe 2131da177e4SLinus Torvalds /* 2141da177e4SLinus Torvalds * when request is used as a packet command carrier 2151da177e4SLinus Torvalds */ 2161da177e4SLinus Torvalds unsigned int cmd_len; 2171da177e4SLinus Torvalds unsigned char cmd[BLK_MAX_CDB]; 2181da177e4SLinus Torvalds 2196b00769fSTejun Heo unsigned int raw_data_len; 2201da177e4SLinus Torvalds unsigned int data_len; 2211da177e4SLinus Torvalds unsigned int sense_len; 2228f34ee75SJens Axboe void *data; 2231da177e4SLinus Torvalds void *sense; 2241da177e4SLinus Torvalds 2251da177e4SLinus Torvalds unsigned int timeout; 22617e01f21SMike Christie int retries; 2271da177e4SLinus Torvalds 2281da177e4SLinus Torvalds /* 229c00895abSJens Axboe * completion callback. 2301da177e4SLinus Torvalds */ 2311da177e4SLinus Torvalds rq_end_io_fn *end_io; 2321da177e4SLinus Torvalds void *end_io_data; 233abae1fdeSFUJITA Tomonori 234abae1fdeSFUJITA Tomonori /* for bidi */ 235abae1fdeSFUJITA Tomonori struct request *next_rq; 2361da177e4SLinus Torvalds }; 2371da177e4SLinus Torvalds 2381da177e4SLinus Torvalds /* 2394aff5e23SJens Axboe * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME 2401da177e4SLinus Torvalds * requests. Some step values could eventually be made generic. 2411da177e4SLinus Torvalds */ 2421da177e4SLinus Torvalds struct request_pm_state 2431da177e4SLinus Torvalds { 2441da177e4SLinus Torvalds /* PM state machine step value, currently driver specific */ 2451da177e4SLinus Torvalds int pm_step; 2461da177e4SLinus Torvalds /* requested PM state value (S1, S2, S3, S4, ...) */ 2471da177e4SLinus Torvalds u32 pm_state; 2481da177e4SLinus Torvalds void* data; /* for driver use */ 2491da177e4SLinus Torvalds }; 2501da177e4SLinus Torvalds 2511da177e4SLinus Torvalds #include <linux/elevator.h> 2521da177e4SLinus Torvalds 253165125e1SJens Axboe typedef void (request_fn_proc) (struct request_queue *q); 254165125e1SJens Axboe typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); 255165125e1SJens Axboe typedef int (prep_rq_fn) (struct request_queue *, struct request *); 256165125e1SJens Axboe typedef void (unplug_fn) (struct request_queue *); 2571da177e4SLinus Torvalds 2581da177e4SLinus Torvalds struct bio_vec; 259165125e1SJens Axboe typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *); 260165125e1SJens Axboe typedef void (prepare_flush_fn) (struct request_queue *, struct request *); 261ff856badSJens Axboe typedef void (softirq_done_fn)(struct request *); 262*2fb98e84STejun Heo typedef int (dma_drain_needed_fn)(struct request *); 2631da177e4SLinus Torvalds 2641da177e4SLinus Torvalds enum blk_queue_state { 2651da177e4SLinus Torvalds Queue_down, 2661da177e4SLinus Torvalds Queue_up, 2671da177e4SLinus Torvalds }; 2681da177e4SLinus Torvalds 2691da177e4SLinus Torvalds struct blk_queue_tag { 2701da177e4SLinus Torvalds struct request **tag_index; /* map of busy tags */ 2711da177e4SLinus Torvalds unsigned long *tag_map; /* bit map of free/busy tags */ 2721da177e4SLinus Torvalds int busy; /* current depth */ 2731da177e4SLinus Torvalds int max_depth; /* what we will send to device */ 274ba025082STejun Heo int real_max_depth; /* what the array can hold */ 2751da177e4SLinus Torvalds atomic_t refcnt; /* map can be shared */ 2761da177e4SLinus Torvalds }; 2771da177e4SLinus Torvalds 2781da177e4SLinus Torvalds struct request_queue 2791da177e4SLinus Torvalds { 2801da177e4SLinus Torvalds /* 2811da177e4SLinus Torvalds * Together with queue_head for cacheline sharing 2821da177e4SLinus Torvalds */ 2831da177e4SLinus Torvalds struct list_head queue_head; 2841da177e4SLinus Torvalds struct request *last_merge; 2851da177e4SLinus Torvalds elevator_t *elevator; 2861da177e4SLinus Torvalds 2871da177e4SLinus Torvalds /* 2881da177e4SLinus Torvalds * the queue request freelist, one for reads and one for writes 2891da177e4SLinus Torvalds */ 2901da177e4SLinus Torvalds struct request_list rq; 2911da177e4SLinus Torvalds 2921da177e4SLinus Torvalds request_fn_proc *request_fn; 2931da177e4SLinus Torvalds make_request_fn *make_request_fn; 2941da177e4SLinus Torvalds prep_rq_fn *prep_rq_fn; 2951da177e4SLinus Torvalds unplug_fn *unplug_fn; 2961da177e4SLinus Torvalds merge_bvec_fn *merge_bvec_fn; 2971da177e4SLinus Torvalds prepare_flush_fn *prepare_flush_fn; 298ff856badSJens Axboe softirq_done_fn *softirq_done_fn; 299*2fb98e84STejun Heo dma_drain_needed_fn *dma_drain_needed; 3001da177e4SLinus Torvalds 3011da177e4SLinus Torvalds /* 3028922e16cSTejun Heo * Dispatch queue sorting 3038922e16cSTejun Heo */ 3041b47f531SJens Axboe sector_t end_sector; 3058922e16cSTejun Heo struct request *boundary_rq; 3068922e16cSTejun Heo 3078922e16cSTejun Heo /* 3081da177e4SLinus Torvalds * Auto-unplugging state 3091da177e4SLinus Torvalds */ 3101da177e4SLinus Torvalds struct timer_list unplug_timer; 3111da177e4SLinus Torvalds int unplug_thresh; /* After this many requests */ 3121da177e4SLinus Torvalds unsigned long unplug_delay; /* After this many jiffies */ 3131da177e4SLinus Torvalds struct work_struct unplug_work; 3141da177e4SLinus Torvalds 3151da177e4SLinus Torvalds struct backing_dev_info backing_dev_info; 3161da177e4SLinus Torvalds 3171da177e4SLinus Torvalds /* 3181da177e4SLinus Torvalds * The queue owner gets to use this for whatever they like. 3191da177e4SLinus Torvalds * ll_rw_blk doesn't touch it. 3201da177e4SLinus Torvalds */ 3211da177e4SLinus Torvalds void *queuedata; 3221da177e4SLinus Torvalds 3231da177e4SLinus Torvalds /* 3241da177e4SLinus Torvalds * queue needs bounce pages for pages above this limit 3251da177e4SLinus Torvalds */ 3261da177e4SLinus Torvalds unsigned long bounce_pfn; 3278267e268SAl Viro gfp_t bounce_gfp; 3281da177e4SLinus Torvalds 3291da177e4SLinus Torvalds /* 3301da177e4SLinus Torvalds * various queue flags, see QUEUE_* below 3311da177e4SLinus Torvalds */ 3321da177e4SLinus Torvalds unsigned long queue_flags; 3331da177e4SLinus Torvalds 3341da177e4SLinus Torvalds /* 335152587deS * protects queue structures from reentrancy. ->__queue_lock should 336152587deS * _never_ be used directly, it is queue private. always use 337152587deS * ->queue_lock. 3381da177e4SLinus Torvalds */ 339152587deS spinlock_t __queue_lock; 3401da177e4SLinus Torvalds spinlock_t *queue_lock; 3411da177e4SLinus Torvalds 3421da177e4SLinus Torvalds /* 3431da177e4SLinus Torvalds * queue kobject 3441da177e4SLinus Torvalds */ 3451da177e4SLinus Torvalds struct kobject kobj; 3461da177e4SLinus Torvalds 3471da177e4SLinus Torvalds /* 3481da177e4SLinus Torvalds * queue settings 3491da177e4SLinus Torvalds */ 3501da177e4SLinus Torvalds unsigned long nr_requests; /* Max # of requests */ 3511da177e4SLinus Torvalds unsigned int nr_congestion_on; 3521da177e4SLinus Torvalds unsigned int nr_congestion_off; 3531da177e4SLinus Torvalds unsigned int nr_batching; 3541da177e4SLinus Torvalds 3552cb2e147SJens Axboe unsigned int max_sectors; 3562cb2e147SJens Axboe unsigned int max_hw_sectors; 3571da177e4SLinus Torvalds unsigned short max_phys_segments; 3581da177e4SLinus Torvalds unsigned short max_hw_segments; 3591da177e4SLinus Torvalds unsigned short hardsect_size; 3601da177e4SLinus Torvalds unsigned int max_segment_size; 3611da177e4SLinus Torvalds 3621da177e4SLinus Torvalds unsigned long seg_boundary_mask; 363fa0ccd83SJames Bottomley void *dma_drain_buffer; 364fa0ccd83SJames Bottomley unsigned int dma_drain_size; 3651da177e4SLinus Torvalds unsigned int dma_alignment; 3661da177e4SLinus Torvalds 3671da177e4SLinus Torvalds struct blk_queue_tag *queue_tags; 3686eca9004SJens Axboe struct list_head tag_busy_list; 3691da177e4SLinus Torvalds 37015853af9STejun Heo unsigned int nr_sorted; 3711da177e4SLinus Torvalds unsigned int in_flight; 3721da177e4SLinus Torvalds 3731da177e4SLinus Torvalds /* 3741da177e4SLinus Torvalds * sg stuff 3751da177e4SLinus Torvalds */ 3761da177e4SLinus Torvalds unsigned int sg_timeout; 3771da177e4SLinus Torvalds unsigned int sg_reserved_size; 3781946089aSChristoph Lameter int node; 3796c5c9341SAlexey Dobriyan #ifdef CONFIG_BLK_DEV_IO_TRACE 3802056a782SJens Axboe struct blk_trace *blk_trace; 3816c5c9341SAlexey Dobriyan #endif 3821da177e4SLinus Torvalds /* 3831da177e4SLinus Torvalds * reserved for flush operations 3841da177e4SLinus Torvalds */ 385797e7dbbSTejun Heo unsigned int ordered, next_ordered, ordseq; 386797e7dbbSTejun Heo int orderr, ordcolor; 387797e7dbbSTejun Heo struct request pre_flush_rq, bar_rq, post_flush_rq; 388797e7dbbSTejun Heo struct request *orig_bar_rq; 389483f4afcSAl Viro 390483f4afcSAl Viro struct mutex sysfs_lock; 391d351af01SFUJITA Tomonori 392d351af01SFUJITA Tomonori #if defined(CONFIG_BLK_DEV_BSG) 393d351af01SFUJITA Tomonori struct bsg_class_device bsg_dev; 394d351af01SFUJITA Tomonori #endif 3951da177e4SLinus Torvalds }; 3961da177e4SLinus Torvalds 3971da177e4SLinus Torvalds #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ 3981da177e4SLinus Torvalds #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 3991da177e4SLinus Torvalds #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 4004e97182aSQi Yong #define QUEUE_FLAG_READFULL 3 /* read queue has been filled */ 4014e97182aSQi Yong #define QUEUE_FLAG_WRITEFULL 4 /* write queue has been filled */ 4021da177e4SLinus Torvalds #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ 4031da177e4SLinus Torvalds #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ 4041da177e4SLinus Torvalds #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ 40564521d1aSJens Axboe #define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ 406abae1fdeSFUJITA Tomonori #define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */ 407797e7dbbSTejun Heo 408797e7dbbSTejun Heo enum { 409797e7dbbSTejun Heo /* 410797e7dbbSTejun Heo * Hardbarrier is supported with one of the following methods. 411797e7dbbSTejun Heo * 412797e7dbbSTejun Heo * NONE : hardbarrier unsupported 413797e7dbbSTejun Heo * DRAIN : ordering by draining is enough 414797e7dbbSTejun Heo * DRAIN_FLUSH : ordering by draining w/ pre and post flushes 415797e7dbbSTejun Heo * DRAIN_FUA : ordering by draining w/ pre flush and FUA write 416797e7dbbSTejun Heo * TAG : ordering by tag is enough 417797e7dbbSTejun Heo * TAG_FLUSH : ordering by tag w/ pre and post flushes 418797e7dbbSTejun Heo * TAG_FUA : ordering by tag w/ pre flush and FUA write 419797e7dbbSTejun Heo */ 420797e7dbbSTejun Heo QUEUE_ORDERED_NONE = 0x00, 421797e7dbbSTejun Heo QUEUE_ORDERED_DRAIN = 0x01, 422797e7dbbSTejun Heo QUEUE_ORDERED_TAG = 0x02, 423797e7dbbSTejun Heo 424797e7dbbSTejun Heo QUEUE_ORDERED_PREFLUSH = 0x10, 425797e7dbbSTejun Heo QUEUE_ORDERED_POSTFLUSH = 0x20, 426797e7dbbSTejun Heo QUEUE_ORDERED_FUA = 0x40, 427797e7dbbSTejun Heo 428797e7dbbSTejun Heo QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN | 429797e7dbbSTejun Heo QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH, 430797e7dbbSTejun Heo QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN | 431797e7dbbSTejun Heo QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA, 432797e7dbbSTejun Heo QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG | 433797e7dbbSTejun Heo QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH, 434797e7dbbSTejun Heo QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG | 435797e7dbbSTejun Heo QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA, 436797e7dbbSTejun Heo 437797e7dbbSTejun Heo /* 438797e7dbbSTejun Heo * Ordered operation sequence 439797e7dbbSTejun Heo */ 440797e7dbbSTejun Heo QUEUE_ORDSEQ_STARTED = 0x01, /* flushing in progress */ 441797e7dbbSTejun Heo QUEUE_ORDSEQ_DRAIN = 0x02, /* waiting for the queue to be drained */ 442797e7dbbSTejun Heo QUEUE_ORDSEQ_PREFLUSH = 0x04, /* pre-flushing in progress */ 443797e7dbbSTejun Heo QUEUE_ORDSEQ_BAR = 0x08, /* original barrier req in progress */ 444797e7dbbSTejun Heo QUEUE_ORDSEQ_POSTFLUSH = 0x10, /* post-flushing in progress */ 445797e7dbbSTejun Heo QUEUE_ORDSEQ_DONE = 0x20, 446797e7dbbSTejun Heo }; 4471da177e4SLinus Torvalds 4481da177e4SLinus Torvalds #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) 4491da177e4SLinus Torvalds #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 4501da177e4SLinus Torvalds #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 451797e7dbbSTejun Heo #define blk_queue_flushing(q) ((q)->ordseq) 4521da177e4SLinus Torvalds 4534aff5e23SJens Axboe #define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) 4544aff5e23SJens Axboe #define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) 4554aff5e23SJens Axboe #define blk_special_request(rq) ((rq)->cmd_type == REQ_TYPE_SPECIAL) 4564aff5e23SJens Axboe #define blk_sense_request(rq) ((rq)->cmd_type == REQ_TYPE_SENSE) 4574aff5e23SJens Axboe 4584aff5e23SJens Axboe #define blk_noretry_request(rq) ((rq)->cmd_flags & REQ_FAILFAST) 4594aff5e23SJens Axboe #define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED) 4601da177e4SLinus Torvalds 4611da177e4SLinus Torvalds #define blk_account_rq(rq) (blk_rq_started(rq) && blk_fs_request(rq)) 4621da177e4SLinus Torvalds 4634aff5e23SJens Axboe #define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND) 4644aff5e23SJens Axboe #define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME) 4651da177e4SLinus Torvalds #define blk_pm_request(rq) \ 4664aff5e23SJens Axboe (blk_pm_suspend_request(rq) || blk_pm_resume_request(rq)) 4671da177e4SLinus Torvalds 4684aff5e23SJens Axboe #define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED) 4694aff5e23SJens Axboe #define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER) 4704aff5e23SJens Axboe #define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) 471abae1fdeSFUJITA Tomonori #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 472bf2de6f5SJens Axboe #define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors) 473336cdb40SKiyoshi Ueda /* rq->queuelist of dequeued request must be list_empty() */ 474336cdb40SKiyoshi Ueda #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) 4751da177e4SLinus Torvalds 4761da177e4SLinus Torvalds #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 4771da177e4SLinus Torvalds 4784aff5e23SJens Axboe #define rq_data_dir(rq) ((rq)->cmd_flags & 1) 4791da177e4SLinus Torvalds 4809e2585a8SJens Axboe /* 4819e2585a8SJens Axboe * We regard a request as sync, if it's a READ or a SYNC write. 4829e2585a8SJens Axboe */ 4839e2585a8SJens Axboe #define rq_is_sync(rq) (rq_data_dir((rq)) == READ || (rq)->cmd_flags & REQ_RW_SYNC) 4845404bc7aSJens Axboe #define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META) 4859e2585a8SJens Axboe 4861da177e4SLinus Torvalds static inline int blk_queue_full(struct request_queue *q, int rw) 4871da177e4SLinus Torvalds { 4881da177e4SLinus Torvalds if (rw == READ) 4891da177e4SLinus Torvalds return test_bit(QUEUE_FLAG_READFULL, &q->queue_flags); 4901da177e4SLinus Torvalds return test_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); 4911da177e4SLinus Torvalds } 4921da177e4SLinus Torvalds 4931da177e4SLinus Torvalds static inline void blk_set_queue_full(struct request_queue *q, int rw) 4941da177e4SLinus Torvalds { 4951da177e4SLinus Torvalds if (rw == READ) 4961da177e4SLinus Torvalds set_bit(QUEUE_FLAG_READFULL, &q->queue_flags); 4971da177e4SLinus Torvalds else 4981da177e4SLinus Torvalds set_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); 4991da177e4SLinus Torvalds } 5001da177e4SLinus Torvalds 5011da177e4SLinus Torvalds static inline void blk_clear_queue_full(struct request_queue *q, int rw) 5021da177e4SLinus Torvalds { 5031da177e4SLinus Torvalds if (rw == READ) 5041da177e4SLinus Torvalds clear_bit(QUEUE_FLAG_READFULL, &q->queue_flags); 5051da177e4SLinus Torvalds else 5061da177e4SLinus Torvalds clear_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); 5071da177e4SLinus Torvalds } 5081da177e4SLinus Torvalds 5091da177e4SLinus Torvalds 5101da177e4SLinus Torvalds /* 5111da177e4SLinus Torvalds * mergeable request must not have _NOMERGE or _BARRIER bit set, nor may 5121da177e4SLinus Torvalds * it already be started by driver. 5131da177e4SLinus Torvalds */ 5141da177e4SLinus Torvalds #define RQ_NOMERGE_FLAGS \ 5151da177e4SLinus Torvalds (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) 5161da177e4SLinus Torvalds #define rq_mergeable(rq) \ 5174aff5e23SJens Axboe (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq))) 5181da177e4SLinus Torvalds 5191da177e4SLinus Torvalds /* 5201da177e4SLinus Torvalds * q->prep_rq_fn return values 5211da177e4SLinus Torvalds */ 5221da177e4SLinus Torvalds #define BLKPREP_OK 0 /* serve it */ 5231da177e4SLinus Torvalds #define BLKPREP_KILL 1 /* fatal error, kill */ 5241da177e4SLinus Torvalds #define BLKPREP_DEFER 2 /* leave on queue */ 5251da177e4SLinus Torvalds 5261da177e4SLinus Torvalds extern unsigned long blk_max_low_pfn, blk_max_pfn; 5271da177e4SLinus Torvalds 5281da177e4SLinus Torvalds /* 5291da177e4SLinus Torvalds * standard bounce addresses: 5301da177e4SLinus Torvalds * 5311da177e4SLinus Torvalds * BLK_BOUNCE_HIGH : bounce all highmem pages 5321da177e4SLinus Torvalds * BLK_BOUNCE_ANY : don't bounce anything 5331da177e4SLinus Torvalds * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary 5341da177e4SLinus Torvalds */ 5351da177e4SLinus Torvalds #define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT) 5361da177e4SLinus Torvalds #define BLK_BOUNCE_ANY ((u64)blk_max_pfn << PAGE_SHIFT) 5371da177e4SLinus Torvalds #define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD) 5381da177e4SLinus Torvalds 5393d6392cfSJens Axboe /* 5403d6392cfSJens Axboe * default timeout for SG_IO if none specified 5413d6392cfSJens Axboe */ 5423d6392cfSJens Axboe #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) 5433d6392cfSJens Axboe 5442a7326b5SChristoph Lameter #ifdef CONFIG_BOUNCE 5451da177e4SLinus Torvalds extern int init_emergency_isa_pool(void); 546165125e1SJens Axboe extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); 5471da177e4SLinus Torvalds #else 5481da177e4SLinus Torvalds static inline int init_emergency_isa_pool(void) 5491da177e4SLinus Torvalds { 5501da177e4SLinus Torvalds return 0; 5511da177e4SLinus Torvalds } 552165125e1SJens Axboe static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) 5531da177e4SLinus Torvalds { 5541da177e4SLinus Torvalds } 5551da177e4SLinus Torvalds #endif /* CONFIG_MMU */ 5561da177e4SLinus Torvalds 5575705f702SNeilBrown struct req_iterator { 5585705f702SNeilBrown int i; 5595705f702SNeilBrown struct bio *bio; 5605705f702SNeilBrown }; 5615705f702SNeilBrown 5625705f702SNeilBrown /* This should not be used directly - use rq_for_each_segment */ 5635705f702SNeilBrown #define __rq_for_each_bio(_bio, rq) \ 5641da177e4SLinus Torvalds if ((rq->bio)) \ 5651da177e4SLinus Torvalds for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) 5661da177e4SLinus Torvalds 5675705f702SNeilBrown #define rq_for_each_segment(bvl, _rq, _iter) \ 5685705f702SNeilBrown __rq_for_each_bio(_iter.bio, _rq) \ 5695705f702SNeilBrown bio_for_each_segment(bvl, _iter.bio, _iter.i) 5705705f702SNeilBrown 5715705f702SNeilBrown #define rq_iter_last(rq, _iter) \ 5725705f702SNeilBrown (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1) 5735705f702SNeilBrown 5741da177e4SLinus Torvalds extern int blk_register_queue(struct gendisk *disk); 5751da177e4SLinus Torvalds extern void blk_unregister_queue(struct gendisk *disk); 5761da177e4SLinus Torvalds extern void register_disk(struct gendisk *dev); 5771da177e4SLinus Torvalds extern void generic_make_request(struct bio *bio); 5781da177e4SLinus Torvalds extern void blk_put_request(struct request *); 579165125e1SJens Axboe extern void __blk_put_request(struct request_queue *, struct request *); 5808ffdc655STejun Heo extern void blk_end_sync_rq(struct request *rq, int error); 581165125e1SJens Axboe extern struct request *blk_get_request(struct request_queue *, int, gfp_t); 582165125e1SJens Axboe extern void blk_insert_request(struct request_queue *, struct request *, int, void *); 583165125e1SJens Axboe extern void blk_requeue_request(struct request_queue *, struct request *); 584165125e1SJens Axboe extern void blk_plug_device(struct request_queue *); 585165125e1SJens Axboe extern int blk_remove_plug(struct request_queue *); 586165125e1SJens Axboe extern void blk_recount_segments(struct request_queue *, struct bio *); 58745e79a3aSFUJITA Tomonori extern int scsi_cmd_ioctl(struct file *, struct request_queue *, 58845e79a3aSFUJITA Tomonori struct gendisk *, unsigned int, void __user *); 58921b2f0c8SChristoph Hellwig extern int sg_scsi_ioctl(struct file *, struct request_queue *, 59021b2f0c8SChristoph Hellwig struct gendisk *, struct scsi_ioctl_command __user *); 5913fcfab16SAndrew Morton 5923fcfab16SAndrew Morton /* 5931aa4f24fSJens Axboe * Temporary export, until SCSI gets fixed up. 5941aa4f24fSJens Axboe */ 5953001ca77SNeilBrown extern int blk_rq_append_bio(struct request_queue *q, struct request *rq, 5963001ca77SNeilBrown struct bio *bio); 5971aa4f24fSJens Axboe 5981aa4f24fSJens Axboe /* 5993fcfab16SAndrew Morton * A queue has just exitted congestion. Note this in the global counter of 6003fcfab16SAndrew Morton * congested queues, and wake up anyone who was waiting for requests to be 6013fcfab16SAndrew Morton * put back. 6023fcfab16SAndrew Morton */ 603165125e1SJens Axboe static inline void blk_clear_queue_congested(struct request_queue *q, int rw) 6043fcfab16SAndrew Morton { 6053fcfab16SAndrew Morton clear_bdi_congested(&q->backing_dev_info, rw); 6063fcfab16SAndrew Morton } 6073fcfab16SAndrew Morton 6083fcfab16SAndrew Morton /* 6093fcfab16SAndrew Morton * A queue has just entered congestion. Flag that in the queue's VM-visible 6103fcfab16SAndrew Morton * state flags and increment the global gounter of congested queues. 6113fcfab16SAndrew Morton */ 612165125e1SJens Axboe static inline void blk_set_queue_congested(struct request_queue *q, int rw) 6133fcfab16SAndrew Morton { 6143fcfab16SAndrew Morton set_bdi_congested(&q->backing_dev_info, rw); 6153fcfab16SAndrew Morton } 6163fcfab16SAndrew Morton 617165125e1SJens Axboe extern void blk_start_queue(struct request_queue *q); 618165125e1SJens Axboe extern void blk_stop_queue(struct request_queue *q); 6191da177e4SLinus Torvalds extern void blk_sync_queue(struct request_queue *q); 620165125e1SJens Axboe extern void __blk_stop_queue(struct request_queue *q); 621165125e1SJens Axboe extern void blk_run_queue(struct request_queue *); 622165125e1SJens Axboe extern void blk_start_queueing(struct request_queue *); 623165125e1SJens Axboe extern int blk_rq_map_user(struct request_queue *, struct request *, void __user *, unsigned long); 6248e5cfc45SJens Axboe extern int blk_rq_unmap_user(struct bio *); 625165125e1SJens Axboe extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); 626165125e1SJens Axboe extern int blk_rq_map_user_iov(struct request_queue *, struct request *, 6270e75f906SMike Christie struct sg_iovec *, int, unsigned int); 628165125e1SJens Axboe extern int blk_execute_rq(struct request_queue *, struct gendisk *, 629994ca9a1SJames Bottomley struct request *, int); 630165125e1SJens Axboe extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 63115fc858aSJens Axboe struct request *, int, rq_end_io_fn *); 632337ad41dSFUJITA Tomonori extern int blk_verify_command(unsigned char *, int); 6332ad8b1efSAlan D. Brunelle extern void blk_unplug(struct request_queue *q); 6346e39b69eSMike Christie 635165125e1SJens Axboe static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 6361da177e4SLinus Torvalds { 6371da177e4SLinus Torvalds return bdev->bd_disk->queue; 6381da177e4SLinus Torvalds } 6391da177e4SLinus Torvalds 6401da177e4SLinus Torvalds static inline void blk_run_backing_dev(struct backing_dev_info *bdi, 6411da177e4SLinus Torvalds struct page *page) 6421da177e4SLinus Torvalds { 6431da177e4SLinus Torvalds if (bdi && bdi->unplug_io_fn) 6441da177e4SLinus Torvalds bdi->unplug_io_fn(bdi, page); 6451da177e4SLinus Torvalds } 6461da177e4SLinus Torvalds 6471da177e4SLinus Torvalds static inline void blk_run_address_space(struct address_space *mapping) 6481da177e4SLinus Torvalds { 6491da177e4SLinus Torvalds if (mapping) 6501da177e4SLinus Torvalds blk_run_backing_dev(mapping->backing_dev_info, NULL); 6511da177e4SLinus Torvalds } 6521da177e4SLinus Torvalds 6531da177e4SLinus Torvalds /* 6543bcddeacSKiyoshi Ueda * blk_end_request() and friends. 6553bcddeacSKiyoshi Ueda * __blk_end_request() and end_request() must be called with 6563bcddeacSKiyoshi Ueda * the request queue spinlock acquired. 6571da177e4SLinus Torvalds * 6581da177e4SLinus Torvalds * Several drivers define their own end_request and call 6593bcddeacSKiyoshi Ueda * blk_end_request() for parts of the original function. 6603bcddeacSKiyoshi Ueda * This prevents code duplication in drivers. 6611da177e4SLinus Torvalds */ 66222b13210SJens Axboe extern int blk_end_request(struct request *rq, int error, 66322b13210SJens Axboe unsigned int nr_bytes); 66422b13210SJens Axboe extern int __blk_end_request(struct request *rq, int error, 66522b13210SJens Axboe unsigned int nr_bytes); 66622b13210SJens Axboe extern int blk_end_bidi_request(struct request *rq, int error, 66722b13210SJens Axboe unsigned int nr_bytes, unsigned int bidi_bytes); 668a0cd1285SJens Axboe extern void end_request(struct request *, int); 669a0cd1285SJens Axboe extern void end_queued_request(struct request *, int); 670a0cd1285SJens Axboe extern void end_dequeued_request(struct request *, int); 67122b13210SJens Axboe extern int blk_end_request_callback(struct request *rq, int error, 67222b13210SJens Axboe unsigned int nr_bytes, 673e19a3ab0SKiyoshi Ueda int (drv_callback)(struct request *)); 674ff856badSJens Axboe extern void blk_complete_request(struct request *); 675ff856badSJens Axboe 6761da177e4SLinus Torvalds /* 6773b11313aSKiyoshi Ueda * blk_end_request() takes bytes instead of sectors as a complete size. 6783b11313aSKiyoshi Ueda * blk_rq_bytes() returns bytes left to complete in the entire request. 6793b11313aSKiyoshi Ueda * blk_rq_cur_bytes() returns bytes left to complete in the current segment. 6801da177e4SLinus Torvalds */ 6813b11313aSKiyoshi Ueda extern unsigned int blk_rq_bytes(struct request *rq); 6823b11313aSKiyoshi Ueda extern unsigned int blk_rq_cur_bytes(struct request *rq); 6831da177e4SLinus Torvalds 6841da177e4SLinus Torvalds static inline void blkdev_dequeue_request(struct request *req) 6851da177e4SLinus Torvalds { 6868922e16cSTejun Heo elv_dequeue_request(req->q, req); 6871da177e4SLinus Torvalds } 6881da177e4SLinus Torvalds 6891da177e4SLinus Torvalds /* 6901da177e4SLinus Torvalds * Access functions for manipulating queue properties 6911da177e4SLinus Torvalds */ 692165125e1SJens Axboe extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, 6931946089aSChristoph Lameter spinlock_t *lock, int node_id); 694165125e1SJens Axboe extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); 695165125e1SJens Axboe extern void blk_cleanup_queue(struct request_queue *); 696165125e1SJens Axboe extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 697165125e1SJens Axboe extern void blk_queue_bounce_limit(struct request_queue *, u64); 698165125e1SJens Axboe extern void blk_queue_max_sectors(struct request_queue *, unsigned int); 699165125e1SJens Axboe extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); 700165125e1SJens Axboe extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); 701165125e1SJens Axboe extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 702165125e1SJens Axboe extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); 703165125e1SJens Axboe extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 704*2fb98e84STejun Heo extern int blk_queue_dma_drain(struct request_queue *q, 705*2fb98e84STejun Heo dma_drain_needed_fn *dma_drain_needed, 706*2fb98e84STejun Heo void *buf, unsigned int size); 707165125e1SJens Axboe extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 708165125e1SJens Axboe extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 709165125e1SJens Axboe extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); 710165125e1SJens Axboe extern void blk_queue_dma_alignment(struct request_queue *, int); 71111c3e689SJames Bottomley extern void blk_queue_update_dma_alignment(struct request_queue *, int); 712165125e1SJens Axboe extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 7131da177e4SLinus Torvalds extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 714165125e1SJens Axboe extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); 715165125e1SJens Axboe extern int blk_do_ordered(struct request_queue *, struct request **); 716165125e1SJens Axboe extern unsigned blk_ordered_cur_seq(struct request_queue *); 717797e7dbbSTejun Heo extern unsigned blk_ordered_req_seq(struct request *); 718165125e1SJens Axboe extern void blk_ordered_complete_seq(struct request_queue *, unsigned, int); 7191da177e4SLinus Torvalds 720165125e1SJens Axboe extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 7211da177e4SLinus Torvalds extern void blk_dump_rq_flags(struct request *, char *); 722165125e1SJens Axboe extern void generic_unplug_device(struct request_queue *); 723165125e1SJens Axboe extern void __generic_unplug_device(struct request_queue *); 7241da177e4SLinus Torvalds extern long nr_blockdev_pages(void); 7251da177e4SLinus Torvalds 726165125e1SJens Axboe int blk_get_queue(struct request_queue *); 727165125e1SJens Axboe struct request_queue *blk_alloc_queue(gfp_t); 728165125e1SJens Axboe struct request_queue *blk_alloc_queue_node(gfp_t, int); 729165125e1SJens Axboe extern void blk_put_queue(struct request_queue *); 7301da177e4SLinus Torvalds 7311da177e4SLinus Torvalds /* 7321da177e4SLinus Torvalds * tag stuff 7331da177e4SLinus Torvalds */ 7341da177e4SLinus Torvalds #define blk_queue_tag_depth(q) ((q)->queue_tags->busy) 7351da177e4SLinus Torvalds #define blk_queue_tag_queue(q) ((q)->queue_tags->busy < (q)->queue_tags->max_depth) 7364aff5e23SJens Axboe #define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) 737165125e1SJens Axboe extern int blk_queue_start_tag(struct request_queue *, struct request *); 738165125e1SJens Axboe extern struct request *blk_queue_find_tag(struct request_queue *, int); 739165125e1SJens Axboe extern void blk_queue_end_tag(struct request_queue *, struct request *); 740165125e1SJens Axboe extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *); 741165125e1SJens Axboe extern void blk_queue_free_tags(struct request_queue *); 742165125e1SJens Axboe extern int blk_queue_resize_tags(struct request_queue *, int); 743165125e1SJens Axboe extern void blk_queue_invalidate_tags(struct request_queue *); 744492dfb48SJames Bottomley extern struct blk_queue_tag *blk_init_tags(int); 745492dfb48SJames Bottomley extern void blk_free_tags(struct blk_queue_tag *); 7461da177e4SLinus Torvalds 747f583f492SDavid C Somayajulu static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, 748f583f492SDavid C Somayajulu int tag) 749f583f492SDavid C Somayajulu { 750f583f492SDavid C Somayajulu if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) 751f583f492SDavid C Somayajulu return NULL; 752f583f492SDavid C Somayajulu return bqt->tag_index[tag]; 753f583f492SDavid C Somayajulu } 754f583f492SDavid C Somayajulu 7551da177e4SLinus Torvalds extern int blkdev_issue_flush(struct block_device *, sector_t *); 7561da177e4SLinus Torvalds 7571da177e4SLinus Torvalds #define MAX_PHYS_SEGMENTS 128 7581da177e4SLinus Torvalds #define MAX_HW_SEGMENTS 128 759defd94b7SMike Christie #define SAFE_MAX_SECTORS 255 760defd94b7SMike Christie #define BLK_DEF_MAX_SECTORS 1024 7611da177e4SLinus Torvalds 7621da177e4SLinus Torvalds #define MAX_SEGMENT_SIZE 65536 7631da177e4SLinus Torvalds 7641da177e4SLinus Torvalds #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 7651da177e4SLinus Torvalds 766165125e1SJens Axboe static inline int queue_hardsect_size(struct request_queue *q) 7671da177e4SLinus Torvalds { 7681da177e4SLinus Torvalds int retval = 512; 7691da177e4SLinus Torvalds 7701da177e4SLinus Torvalds if (q && q->hardsect_size) 7711da177e4SLinus Torvalds retval = q->hardsect_size; 7721da177e4SLinus Torvalds 7731da177e4SLinus Torvalds return retval; 7741da177e4SLinus Torvalds } 7751da177e4SLinus Torvalds 7761da177e4SLinus Torvalds static inline int bdev_hardsect_size(struct block_device *bdev) 7771da177e4SLinus Torvalds { 7781da177e4SLinus Torvalds return queue_hardsect_size(bdev_get_queue(bdev)); 7791da177e4SLinus Torvalds } 7801da177e4SLinus Torvalds 781165125e1SJens Axboe static inline int queue_dma_alignment(struct request_queue *q) 7821da177e4SLinus Torvalds { 783482eb689SPete Wyckoff return q ? q->dma_alignment : 511; 7841da177e4SLinus Torvalds } 7851da177e4SLinus Torvalds 7861da177e4SLinus Torvalds /* assumes size > 256 */ 7871da177e4SLinus Torvalds static inline unsigned int blksize_bits(unsigned int size) 7881da177e4SLinus Torvalds { 7891da177e4SLinus Torvalds unsigned int bits = 8; 7901da177e4SLinus Torvalds do { 7911da177e4SLinus Torvalds bits++; 7921da177e4SLinus Torvalds size >>= 1; 7931da177e4SLinus Torvalds } while (size > 256); 7941da177e4SLinus Torvalds return bits; 7951da177e4SLinus Torvalds } 7961da177e4SLinus Torvalds 7972befb9e3SAdrian Bunk static inline unsigned int block_size(struct block_device *bdev) 7981da177e4SLinus Torvalds { 7991da177e4SLinus Torvalds return bdev->bd_block_size; 8001da177e4SLinus Torvalds } 8011da177e4SLinus Torvalds 8021da177e4SLinus Torvalds typedef struct {struct page *v;} Sector; 8031da177e4SLinus Torvalds 8041da177e4SLinus Torvalds unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); 8051da177e4SLinus Torvalds 8061da177e4SLinus Torvalds static inline void put_dev_sector(Sector p) 8071da177e4SLinus Torvalds { 8081da177e4SLinus Torvalds page_cache_release(p.v); 8091da177e4SLinus Torvalds } 8101da177e4SLinus Torvalds 8111da177e4SLinus Torvalds struct work_struct; 8121da177e4SLinus Torvalds int kblockd_schedule_work(struct work_struct *work); 81319a75d83SAndrew Morton void kblockd_flush_work(struct work_struct *work); 8141da177e4SLinus Torvalds 8151da177e4SLinus Torvalds #define MODULE_ALIAS_BLOCKDEV(major,minor) \ 8161da177e4SLinus Torvalds MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 8171da177e4SLinus Torvalds #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 8181da177e4SLinus Torvalds MODULE_ALIAS("block-major-" __stringify(major) "-*") 8191da177e4SLinus Torvalds 8201da177e4SLinus Torvalds 8219361401eSDavid Howells #else /* CONFIG_BLOCK */ 8229361401eSDavid Howells /* 8239361401eSDavid Howells * stubs for when the block layer is configured out 8249361401eSDavid Howells */ 8259361401eSDavid Howells #define buffer_heads_over_limit 0 8269361401eSDavid Howells 8279361401eSDavid Howells static inline long nr_blockdev_pages(void) 8289361401eSDavid Howells { 8299361401eSDavid Howells return 0; 8309361401eSDavid Howells } 8319361401eSDavid Howells 832bcfd8d36SAndrew Morton static inline void exit_io_context(void) 833bcfd8d36SAndrew Morton { 834bcfd8d36SAndrew Morton } 8359361401eSDavid Howells 836023ccde1SJens Axboe struct io_context; 837fd0928dfSJens Axboe static inline int put_io_context(struct io_context *ioc) 838fd0928dfSJens Axboe { 839fd0928dfSJens Axboe return 1; 840fd0928dfSJens Axboe } 841fd0928dfSJens Axboe 842fd0928dfSJens Axboe 8439361401eSDavid Howells #endif /* CONFIG_BLOCK */ 8449361401eSDavid Howells 8451da177e4SLinus Torvalds #endif 846