11da177e4SLinus Torvalds #ifndef _LINUX_BLKDEV_H 21da177e4SLinus Torvalds #define _LINUX_BLKDEV_H 31da177e4SLinus Torvalds 4f5ff8422SJens Axboe #ifdef CONFIG_BLOCK 5f5ff8422SJens Axboe 6bcfd8d36SAndrew Morton #include <linux/sched.h> 71da177e4SLinus Torvalds #include <linux/major.h> 81da177e4SLinus Torvalds #include <linux/genhd.h> 91da177e4SLinus Torvalds #include <linux/list.h> 101da177e4SLinus Torvalds #include <linux/timer.h> 111da177e4SLinus Torvalds #include <linux/workqueue.h> 121da177e4SLinus Torvalds #include <linux/pagemap.h> 131da177e4SLinus Torvalds #include <linux/backing-dev.h> 141da177e4SLinus Torvalds #include <linux/wait.h> 151da177e4SLinus Torvalds #include <linux/mempool.h> 161da177e4SLinus Torvalds #include <linux/bio.h> 171da177e4SLinus Torvalds #include <linux/module.h> 181da177e4SLinus Torvalds #include <linux/stringify.h> 19d351af01SFUJITA Tomonori #include <linux/bsg.h> 20c7c22e4dSJens Axboe #include <linux/smp.h> 211da177e4SLinus Torvalds 221da177e4SLinus Torvalds #include <asm/scatterlist.h> 231da177e4SLinus Torvalds 2421b2f0c8SChristoph Hellwig struct scsi_ioctl_command; 2521b2f0c8SChristoph Hellwig 261da177e4SLinus Torvalds struct request_queue; 271da177e4SLinus Torvalds struct elevator_queue; 281da177e4SLinus Torvalds typedef struct elevator_queue elevator_t; 291da177e4SLinus Torvalds struct request_pm_state; 302056a782SJens Axboe struct blk_trace; 313d6392cfSJens Axboe struct request; 323d6392cfSJens Axboe struct sg_io_hdr; 331da177e4SLinus Torvalds 341da177e4SLinus Torvalds #define BLKDEV_MIN_RQ 4 351da177e4SLinus Torvalds #define BLKDEV_MAX_RQ 128 /* Default maximum */ 361da177e4SLinus Torvalds 371da177e4SLinus Torvalds struct request; 388ffdc655STejun Heo typedef void (rq_end_io_fn)(struct request *, int); 391da177e4SLinus Torvalds 401da177e4SLinus Torvalds struct request_list { 411da177e4SLinus Torvalds int count[2]; 421da177e4SLinus Torvalds int starved[2]; 43cb98fc8bSTejun Heo int elvpriv; 441da177e4SLinus Torvalds mempool_t *rq_pool; 451da177e4SLinus Torvalds wait_queue_head_t wait[2]; 461da177e4SLinus Torvalds }; 471da177e4SLinus Torvalds 484aff5e23SJens Axboe /* 494aff5e23SJens Axboe * request command types 504aff5e23SJens Axboe */ 514aff5e23SJens Axboe enum rq_cmd_type_bits { 524aff5e23SJens Axboe REQ_TYPE_FS = 1, /* fs request */ 534aff5e23SJens Axboe REQ_TYPE_BLOCK_PC, /* scsi command */ 544aff5e23SJens Axboe REQ_TYPE_SENSE, /* sense request */ 554aff5e23SJens Axboe REQ_TYPE_PM_SUSPEND, /* suspend request */ 564aff5e23SJens Axboe REQ_TYPE_PM_RESUME, /* resume request */ 574aff5e23SJens Axboe REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ 584aff5e23SJens Axboe REQ_TYPE_SPECIAL, /* driver defined type */ 594aff5e23SJens Axboe REQ_TYPE_LINUX_BLOCK, /* generic block layer message */ 604aff5e23SJens Axboe /* 614aff5e23SJens Axboe * for ATA/ATAPI devices. this really doesn't belong here, ide should 624aff5e23SJens Axboe * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver 634aff5e23SJens Axboe * private REQ_LB opcodes to differentiate what type of request this is 644aff5e23SJens Axboe */ 654aff5e23SJens Axboe REQ_TYPE_ATA_TASKFILE, 66cea2885aSJens Axboe REQ_TYPE_ATA_PC, 674aff5e23SJens Axboe }; 684aff5e23SJens Axboe 694aff5e23SJens Axboe /* 704aff5e23SJens Axboe * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being 714aff5e23SJens Axboe * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a 724aff5e23SJens Axboe * SCSI cdb. 734aff5e23SJens Axboe * 744aff5e23SJens Axboe * 0x00 -> 0x3f are driver private, to be used for whatever purpose they need, 754aff5e23SJens Axboe * typically to differentiate REQ_TYPE_SPECIAL requests. 764aff5e23SJens Axboe * 774aff5e23SJens Axboe */ 784aff5e23SJens Axboe enum { 794aff5e23SJens Axboe REQ_LB_OP_EJECT = 0x40, /* eject request */ 801a8e2bddSDavid Woodhouse REQ_LB_OP_FLUSH = 0x41, /* flush request */ 81eae9acd1SDavid Woodhouse REQ_LB_OP_DISCARD = 0x42, /* discard sectors */ 824aff5e23SJens Axboe }; 834aff5e23SJens Axboe 844aff5e23SJens Axboe /* 85d628eaefSDavid Woodhouse * request type modified bits. first two bits match BIO_RW* bits, important 864aff5e23SJens Axboe */ 874aff5e23SJens Axboe enum rq_flag_bits { 884aff5e23SJens Axboe __REQ_RW, /* not set, read. set, write */ 894aff5e23SJens Axboe __REQ_FAILFAST, /* no low level driver retries */ 90fb2dce86SDavid Woodhouse __REQ_DISCARD, /* request to discard sectors */ 914aff5e23SJens Axboe __REQ_SORTED, /* elevator knows about this request */ 924aff5e23SJens Axboe __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ 934aff5e23SJens Axboe __REQ_HARDBARRIER, /* may not be passed by drive either */ 944aff5e23SJens Axboe __REQ_FUA, /* forced unit access */ 954aff5e23SJens Axboe __REQ_NOMERGE, /* don't touch this for merging */ 964aff5e23SJens Axboe __REQ_STARTED, /* drive already may have started this one */ 974aff5e23SJens Axboe __REQ_DONTPREP, /* don't call prep for this one */ 984aff5e23SJens Axboe __REQ_QUEUED, /* uses queueing */ 994aff5e23SJens Axboe __REQ_ELVPRIV, /* elevator private data attached */ 1004aff5e23SJens Axboe __REQ_FAILED, /* set if the request failed */ 1014aff5e23SJens Axboe __REQ_QUIET, /* don't worry about errors */ 1024aff5e23SJens Axboe __REQ_PREEMPT, /* set for "ide_preempt" requests */ 1034aff5e23SJens Axboe __REQ_ORDERED_COLOR, /* is before or after barrier */ 1044aff5e23SJens Axboe __REQ_RW_SYNC, /* request is sync (O_DIRECT) */ 10549171e5cSJens Axboe __REQ_ALLOCED, /* request came from our alloc pool */ 1065404bc7aSJens Axboe __REQ_RW_META, /* metadata io request */ 107f18573abSFUJITA Tomonori __REQ_COPY_USER, /* contains copies of user pages */ 1087ba1ba12SMartin K. Petersen __REQ_INTEGRITY, /* integrity metadata has been remapped */ 1094aff5e23SJens Axboe __REQ_NR_BITS, /* stops here */ 1104aff5e23SJens Axboe }; 1114aff5e23SJens Axboe 1124aff5e23SJens Axboe #define REQ_RW (1 << __REQ_RW) 113fb2dce86SDavid Woodhouse #define REQ_DISCARD (1 << __REQ_DISCARD) 1144aff5e23SJens Axboe #define REQ_FAILFAST (1 << __REQ_FAILFAST) 1154aff5e23SJens Axboe #define REQ_SORTED (1 << __REQ_SORTED) 1164aff5e23SJens Axboe #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) 1174aff5e23SJens Axboe #define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) 1184aff5e23SJens Axboe #define REQ_FUA (1 << __REQ_FUA) 1194aff5e23SJens Axboe #define REQ_NOMERGE (1 << __REQ_NOMERGE) 1204aff5e23SJens Axboe #define REQ_STARTED (1 << __REQ_STARTED) 1214aff5e23SJens Axboe #define REQ_DONTPREP (1 << __REQ_DONTPREP) 1224aff5e23SJens Axboe #define REQ_QUEUED (1 << __REQ_QUEUED) 1234aff5e23SJens Axboe #define REQ_ELVPRIV (1 << __REQ_ELVPRIV) 1244aff5e23SJens Axboe #define REQ_FAILED (1 << __REQ_FAILED) 1254aff5e23SJens Axboe #define REQ_QUIET (1 << __REQ_QUIET) 1264aff5e23SJens Axboe #define REQ_PREEMPT (1 << __REQ_PREEMPT) 1274aff5e23SJens Axboe #define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR) 1284aff5e23SJens Axboe #define REQ_RW_SYNC (1 << __REQ_RW_SYNC) 12949171e5cSJens Axboe #define REQ_ALLOCED (1 << __REQ_ALLOCED) 1305404bc7aSJens Axboe #define REQ_RW_META (1 << __REQ_RW_META) 131f18573abSFUJITA Tomonori #define REQ_COPY_USER (1 << __REQ_COPY_USER) 1327ba1ba12SMartin K. Petersen #define REQ_INTEGRITY (1 << __REQ_INTEGRITY) 1334aff5e23SJens Axboe 1341da177e4SLinus Torvalds #define BLK_MAX_CDB 16 1351da177e4SLinus Torvalds 1361da177e4SLinus Torvalds /* 13763a71386SJens Axboe * try to put the fields that are referenced together in the same cacheline. 13863a71386SJens Axboe * if you modify this structure, be sure to check block/blk-core.c:rq_init() 13963a71386SJens Axboe * as well! 1401da177e4SLinus Torvalds */ 1411da177e4SLinus Torvalds struct request { 142ff856badSJens Axboe struct list_head queuelist; 143c7c22e4dSJens Axboe struct call_single_data csd; 144c7c22e4dSJens Axboe int cpu; 145ff856badSJens Axboe 146165125e1SJens Axboe struct request_queue *q; 147e6a1c874SJens Axboe 1484aff5e23SJens Axboe unsigned int cmd_flags; 1494aff5e23SJens Axboe enum rq_cmd_type_bits cmd_type; 1501da177e4SLinus Torvalds 1511da177e4SLinus Torvalds /* Maintain bio traversal state for part by part I/O submission. 1521da177e4SLinus Torvalds * hard_* are block layer internals, no driver should touch them! 1531da177e4SLinus Torvalds */ 1541da177e4SLinus Torvalds 1551da177e4SLinus Torvalds sector_t sector; /* next sector to submit */ 156e6a1c874SJens Axboe sector_t hard_sector; /* next sector to complete */ 1571da177e4SLinus Torvalds unsigned long nr_sectors; /* no. of sectors left to submit */ 158e6a1c874SJens Axboe unsigned long hard_nr_sectors; /* no. of sectors left to complete */ 1591da177e4SLinus Torvalds /* no. of sectors left to submit in the current segment */ 1601da177e4SLinus Torvalds unsigned int current_nr_sectors; 1611da177e4SLinus Torvalds 1621da177e4SLinus Torvalds /* no. of sectors left to complete in the current segment */ 1631da177e4SLinus Torvalds unsigned int hard_cur_sectors; 1641da177e4SLinus Torvalds 1651da177e4SLinus Torvalds struct bio *bio; 1661da177e4SLinus Torvalds struct bio *biotail; 1671da177e4SLinus Torvalds 1689817064bSJens Axboe struct hlist_node hash; /* merge hash */ 169e6a1c874SJens Axboe /* 170e6a1c874SJens Axboe * The rb_node is only used inside the io scheduler, requests 171e6a1c874SJens Axboe * are pruned when moved to the dispatch queue. So let the 172e6a1c874SJens Axboe * completion_data share space with the rb_node. 173e6a1c874SJens Axboe */ 174e6a1c874SJens Axboe union { 1752e662b65SJens Axboe struct rb_node rb_node; /* sort/lookup */ 176e6a1c874SJens Axboe void *completion_data; 177e6a1c874SJens Axboe }; 1789817064bSJens Axboe 179ff7d145fSJens Axboe /* 180ff7d145fSJens Axboe * two pointers are available for the IO schedulers, if they need 181ff7d145fSJens Axboe * more they have to dynamically allocate it. 182ff7d145fSJens Axboe */ 1831da177e4SLinus Torvalds void *elevator_private; 184ff7d145fSJens Axboe void *elevator_private2; 185ff7d145fSJens Axboe 1868f34ee75SJens Axboe struct gendisk *rq_disk; 1871da177e4SLinus Torvalds unsigned long start_time; 1881da177e4SLinus Torvalds 1891da177e4SLinus Torvalds /* Number of scatter-gather DMA addr+len pairs after 1901da177e4SLinus Torvalds * physical address coalescing is performed. 1911da177e4SLinus Torvalds */ 1921da177e4SLinus Torvalds unsigned short nr_phys_segments; 1931da177e4SLinus Torvalds 1948f34ee75SJens Axboe unsigned short ioprio; 1958f34ee75SJens Axboe 1961da177e4SLinus Torvalds void *special; 1978f34ee75SJens Axboe char *buffer; 1981da177e4SLinus Torvalds 199cdd60262SJens Axboe int tag; 200cdd60262SJens Axboe int errors; 201cdd60262SJens Axboe 202cdd60262SJens Axboe int ref_count; 203cdd60262SJens Axboe 2041da177e4SLinus Torvalds /* 2051da177e4SLinus Torvalds * when request is used as a packet command carrier 2061da177e4SLinus Torvalds */ 207d7e3c324SFUJITA Tomonori unsigned short cmd_len; 208d7e3c324SFUJITA Tomonori unsigned char __cmd[BLK_MAX_CDB]; 209d7e3c324SFUJITA Tomonori unsigned char *cmd; 2101da177e4SLinus Torvalds 2111da177e4SLinus Torvalds unsigned int data_len; 2127a85f889SFUJITA Tomonori unsigned int extra_len; /* length of alignment and padding */ 2131da177e4SLinus Torvalds unsigned int sense_len; 2148f34ee75SJens Axboe void *data; 2151da177e4SLinus Torvalds void *sense; 2161da177e4SLinus Torvalds 2171da177e4SLinus Torvalds unsigned int timeout; 21817e01f21SMike Christie int retries; 2191da177e4SLinus Torvalds 2201da177e4SLinus Torvalds /* 221c00895abSJens Axboe * completion callback. 2221da177e4SLinus Torvalds */ 2231da177e4SLinus Torvalds rq_end_io_fn *end_io; 2241da177e4SLinus Torvalds void *end_io_data; 225abae1fdeSFUJITA Tomonori 226abae1fdeSFUJITA Tomonori /* for bidi */ 227abae1fdeSFUJITA Tomonori struct request *next_rq; 2281da177e4SLinus Torvalds }; 2291da177e4SLinus Torvalds 230766ca442SFernando Luis Vázquez Cao static inline unsigned short req_get_ioprio(struct request *req) 231766ca442SFernando Luis Vázquez Cao { 232766ca442SFernando Luis Vázquez Cao return req->ioprio; 233766ca442SFernando Luis Vázquez Cao } 234766ca442SFernando Luis Vázquez Cao 2351da177e4SLinus Torvalds /* 2364aff5e23SJens Axboe * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME 2371da177e4SLinus Torvalds * requests. Some step values could eventually be made generic. 2381da177e4SLinus Torvalds */ 2391da177e4SLinus Torvalds struct request_pm_state 2401da177e4SLinus Torvalds { 2411da177e4SLinus Torvalds /* PM state machine step value, currently driver specific */ 2421da177e4SLinus Torvalds int pm_step; 2431da177e4SLinus Torvalds /* requested PM state value (S1, S2, S3, S4, ...) */ 2441da177e4SLinus Torvalds u32 pm_state; 2451da177e4SLinus Torvalds void* data; /* for driver use */ 2461da177e4SLinus Torvalds }; 2471da177e4SLinus Torvalds 2481da177e4SLinus Torvalds #include <linux/elevator.h> 2491da177e4SLinus Torvalds 250165125e1SJens Axboe typedef void (request_fn_proc) (struct request_queue *q); 251165125e1SJens Axboe typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); 252165125e1SJens Axboe typedef int (prep_rq_fn) (struct request_queue *, struct request *); 253165125e1SJens Axboe typedef void (unplug_fn) (struct request_queue *); 254fb2dce86SDavid Woodhouse typedef int (prepare_discard_fn) (struct request_queue *, struct request *); 2551da177e4SLinus Torvalds 2561da177e4SLinus Torvalds struct bio_vec; 257cc371e66SAlasdair G Kergon struct bvec_merge_data { 258cc371e66SAlasdair G Kergon struct block_device *bi_bdev; 259cc371e66SAlasdair G Kergon sector_t bi_sector; 260cc371e66SAlasdair G Kergon unsigned bi_size; 261cc371e66SAlasdair G Kergon unsigned long bi_rw; 262cc371e66SAlasdair G Kergon }; 263cc371e66SAlasdair G Kergon typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, 264cc371e66SAlasdair G Kergon struct bio_vec *); 265165125e1SJens Axboe typedef void (prepare_flush_fn) (struct request_queue *, struct request *); 266ff856badSJens Axboe typedef void (softirq_done_fn)(struct request *); 2672fb98e84STejun Heo typedef int (dma_drain_needed_fn)(struct request *); 2681da177e4SLinus Torvalds 2691da177e4SLinus Torvalds enum blk_queue_state { 2701da177e4SLinus Torvalds Queue_down, 2711da177e4SLinus Torvalds Queue_up, 2721da177e4SLinus Torvalds }; 2731da177e4SLinus Torvalds 2741da177e4SLinus Torvalds struct blk_queue_tag { 2751da177e4SLinus Torvalds struct request **tag_index; /* map of busy tags */ 2761da177e4SLinus Torvalds unsigned long *tag_map; /* bit map of free/busy tags */ 2771da177e4SLinus Torvalds int busy; /* current depth */ 2781da177e4SLinus Torvalds int max_depth; /* what we will send to device */ 279ba025082STejun Heo int real_max_depth; /* what the array can hold */ 2801da177e4SLinus Torvalds atomic_t refcnt; /* map can be shared */ 2811da177e4SLinus Torvalds }; 2821da177e4SLinus Torvalds 283abf54393SFUJITA Tomonori #define BLK_SCSI_MAX_CMDS (256) 284abf54393SFUJITA Tomonori #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) 285abf54393SFUJITA Tomonori 2864beab5c6SFUJITA Tomonori struct blk_cmd_filter { 287abf54393SFUJITA Tomonori unsigned long read_ok[BLK_SCSI_CMD_PER_LONG]; 288abf54393SFUJITA Tomonori unsigned long write_ok[BLK_SCSI_CMD_PER_LONG]; 289abf54393SFUJITA Tomonori struct kobject kobj; 290abf54393SFUJITA Tomonori }; 291abf54393SFUJITA Tomonori 2921da177e4SLinus Torvalds struct request_queue 2931da177e4SLinus Torvalds { 2941da177e4SLinus Torvalds /* 2951da177e4SLinus Torvalds * Together with queue_head for cacheline sharing 2961da177e4SLinus Torvalds */ 2971da177e4SLinus Torvalds struct list_head queue_head; 2981da177e4SLinus Torvalds struct request *last_merge; 2991da177e4SLinus Torvalds elevator_t *elevator; 3001da177e4SLinus Torvalds 3011da177e4SLinus Torvalds /* 3021da177e4SLinus Torvalds * the queue request freelist, one for reads and one for writes 3031da177e4SLinus Torvalds */ 3041da177e4SLinus Torvalds struct request_list rq; 3051da177e4SLinus Torvalds 3061da177e4SLinus Torvalds request_fn_proc *request_fn; 3071da177e4SLinus Torvalds make_request_fn *make_request_fn; 3081da177e4SLinus Torvalds prep_rq_fn *prep_rq_fn; 3091da177e4SLinus Torvalds unplug_fn *unplug_fn; 310fb2dce86SDavid Woodhouse prepare_discard_fn *prepare_discard_fn; 3111da177e4SLinus Torvalds merge_bvec_fn *merge_bvec_fn; 3121da177e4SLinus Torvalds prepare_flush_fn *prepare_flush_fn; 313ff856badSJens Axboe softirq_done_fn *softirq_done_fn; 3142fb98e84STejun Heo dma_drain_needed_fn *dma_drain_needed; 3151da177e4SLinus Torvalds 3161da177e4SLinus Torvalds /* 3178922e16cSTejun Heo * Dispatch queue sorting 3188922e16cSTejun Heo */ 3191b47f531SJens Axboe sector_t end_sector; 3208922e16cSTejun Heo struct request *boundary_rq; 3218922e16cSTejun Heo 3228922e16cSTejun Heo /* 3231da177e4SLinus Torvalds * Auto-unplugging state 3241da177e4SLinus Torvalds */ 3251da177e4SLinus Torvalds struct timer_list unplug_timer; 3261da177e4SLinus Torvalds int unplug_thresh; /* After this many requests */ 3271da177e4SLinus Torvalds unsigned long unplug_delay; /* After this many jiffies */ 3281da177e4SLinus Torvalds struct work_struct unplug_work; 3291da177e4SLinus Torvalds 3301da177e4SLinus Torvalds struct backing_dev_info backing_dev_info; 3311da177e4SLinus Torvalds 3321da177e4SLinus Torvalds /* 3331da177e4SLinus Torvalds * The queue owner gets to use this for whatever they like. 3341da177e4SLinus Torvalds * ll_rw_blk doesn't touch it. 3351da177e4SLinus Torvalds */ 3361da177e4SLinus Torvalds void *queuedata; 3371da177e4SLinus Torvalds 3381da177e4SLinus Torvalds /* 3391da177e4SLinus Torvalds * queue needs bounce pages for pages above this limit 3401da177e4SLinus Torvalds */ 3411da177e4SLinus Torvalds unsigned long bounce_pfn; 3428267e268SAl Viro gfp_t bounce_gfp; 3431da177e4SLinus Torvalds 3441da177e4SLinus Torvalds /* 3451da177e4SLinus Torvalds * various queue flags, see QUEUE_* below 3461da177e4SLinus Torvalds */ 3471da177e4SLinus Torvalds unsigned long queue_flags; 3481da177e4SLinus Torvalds 3491da177e4SLinus Torvalds /* 350152587deS * protects queue structures from reentrancy. ->__queue_lock should 351152587deS * _never_ be used directly, it is queue private. always use 352152587deS * ->queue_lock. 3531da177e4SLinus Torvalds */ 354152587deS spinlock_t __queue_lock; 3551da177e4SLinus Torvalds spinlock_t *queue_lock; 3561da177e4SLinus Torvalds 3571da177e4SLinus Torvalds /* 3581da177e4SLinus Torvalds * queue kobject 3591da177e4SLinus Torvalds */ 3601da177e4SLinus Torvalds struct kobject kobj; 3611da177e4SLinus Torvalds 3621da177e4SLinus Torvalds /* 3631da177e4SLinus Torvalds * queue settings 3641da177e4SLinus Torvalds */ 3651da177e4SLinus Torvalds unsigned long nr_requests; /* Max # of requests */ 3661da177e4SLinus Torvalds unsigned int nr_congestion_on; 3671da177e4SLinus Torvalds unsigned int nr_congestion_off; 3681da177e4SLinus Torvalds unsigned int nr_batching; 3691da177e4SLinus Torvalds 3702cb2e147SJens Axboe unsigned int max_sectors; 3712cb2e147SJens Axboe unsigned int max_hw_sectors; 3721da177e4SLinus Torvalds unsigned short max_phys_segments; 3731da177e4SLinus Torvalds unsigned short max_hw_segments; 3741da177e4SLinus Torvalds unsigned short hardsect_size; 3751da177e4SLinus Torvalds unsigned int max_segment_size; 3761da177e4SLinus Torvalds 3771da177e4SLinus Torvalds unsigned long seg_boundary_mask; 378fa0ccd83SJames Bottomley void *dma_drain_buffer; 379fa0ccd83SJames Bottomley unsigned int dma_drain_size; 380e3790c7dSTejun Heo unsigned int dma_pad_mask; 3811da177e4SLinus Torvalds unsigned int dma_alignment; 3821da177e4SLinus Torvalds 3831da177e4SLinus Torvalds struct blk_queue_tag *queue_tags; 3846eca9004SJens Axboe struct list_head tag_busy_list; 3851da177e4SLinus Torvalds 38615853af9STejun Heo unsigned int nr_sorted; 3871da177e4SLinus Torvalds unsigned int in_flight; 3881da177e4SLinus Torvalds 3891da177e4SLinus Torvalds /* 3901da177e4SLinus Torvalds * sg stuff 3911da177e4SLinus Torvalds */ 3921da177e4SLinus Torvalds unsigned int sg_timeout; 3931da177e4SLinus Torvalds unsigned int sg_reserved_size; 3941946089aSChristoph Lameter int node; 3956c5c9341SAlexey Dobriyan #ifdef CONFIG_BLK_DEV_IO_TRACE 3962056a782SJens Axboe struct blk_trace *blk_trace; 3976c5c9341SAlexey Dobriyan #endif 3981da177e4SLinus Torvalds /* 3991da177e4SLinus Torvalds * reserved for flush operations 4001da177e4SLinus Torvalds */ 401797e7dbbSTejun Heo unsigned int ordered, next_ordered, ordseq; 402797e7dbbSTejun Heo int orderr, ordcolor; 403797e7dbbSTejun Heo struct request pre_flush_rq, bar_rq, post_flush_rq; 404797e7dbbSTejun Heo struct request *orig_bar_rq; 405483f4afcSAl Viro 406483f4afcSAl Viro struct mutex sysfs_lock; 407d351af01SFUJITA Tomonori 408d351af01SFUJITA Tomonori #if defined(CONFIG_BLK_DEV_BSG) 409d351af01SFUJITA Tomonori struct bsg_class_device bsg_dev; 410d351af01SFUJITA Tomonori #endif 4114beab5c6SFUJITA Tomonori struct blk_cmd_filter cmd_filter; 4121da177e4SLinus Torvalds }; 4131da177e4SLinus Torvalds 4141da177e4SLinus Torvalds #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ 4151da177e4SLinus Torvalds #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 4161da177e4SLinus Torvalds #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 4174e97182aSQi Yong #define QUEUE_FLAG_READFULL 3 /* read queue has been filled */ 4184e97182aSQi Yong #define QUEUE_FLAG_WRITEFULL 4 /* write queue has been filled */ 4191da177e4SLinus Torvalds #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ 4201da177e4SLinus Torvalds #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ 4211da177e4SLinus Torvalds #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ 42264521d1aSJens Axboe #define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ 423abae1fdeSFUJITA Tomonori #define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */ 424ac9fafa1SAlan D. Brunelle #define QUEUE_FLAG_NOMERGES 10 /* disable merge attempts */ 425c7c22e4dSJens Axboe #define QUEUE_FLAG_SAME_COMP 11 /* force complete on same CPU */ 426797e7dbbSTejun Heo 4278f45c1a5SLinus Torvalds static inline int queue_is_locked(struct request_queue *q) 4288f45c1a5SLinus Torvalds { 4297663c1e2SJens Axboe #ifdef CONFIG_SMP 4308f45c1a5SLinus Torvalds spinlock_t *lock = q->queue_lock; 4318f45c1a5SLinus Torvalds return lock && spin_is_locked(lock); 4327663c1e2SJens Axboe #else 4337663c1e2SJens Axboe return 1; 4347663c1e2SJens Axboe #endif 4358f45c1a5SLinus Torvalds } 4368f45c1a5SLinus Torvalds 43775ad23bcSNick Piggin static inline void queue_flag_set_unlocked(unsigned int flag, 43875ad23bcSNick Piggin struct request_queue *q) 43975ad23bcSNick Piggin { 44075ad23bcSNick Piggin __set_bit(flag, &q->queue_flags); 44175ad23bcSNick Piggin } 44275ad23bcSNick Piggin 443e48ec690SJens Axboe static inline int queue_flag_test_and_clear(unsigned int flag, 444e48ec690SJens Axboe struct request_queue *q) 445e48ec690SJens Axboe { 446e48ec690SJens Axboe WARN_ON_ONCE(!queue_is_locked(q)); 447e48ec690SJens Axboe 448e48ec690SJens Axboe if (test_bit(flag, &q->queue_flags)) { 449e48ec690SJens Axboe __clear_bit(flag, &q->queue_flags); 450e48ec690SJens Axboe return 1; 451e48ec690SJens Axboe } 452e48ec690SJens Axboe 453e48ec690SJens Axboe return 0; 454e48ec690SJens Axboe } 455e48ec690SJens Axboe 456e48ec690SJens Axboe static inline int queue_flag_test_and_set(unsigned int flag, 457e48ec690SJens Axboe struct request_queue *q) 458e48ec690SJens Axboe { 459e48ec690SJens Axboe WARN_ON_ONCE(!queue_is_locked(q)); 460e48ec690SJens Axboe 461e48ec690SJens Axboe if (!test_bit(flag, &q->queue_flags)) { 462e48ec690SJens Axboe __set_bit(flag, &q->queue_flags); 463e48ec690SJens Axboe return 0; 464e48ec690SJens Axboe } 465e48ec690SJens Axboe 466e48ec690SJens Axboe return 1; 467e48ec690SJens Axboe } 468e48ec690SJens Axboe 46975ad23bcSNick Piggin static inline void queue_flag_set(unsigned int flag, struct request_queue *q) 47075ad23bcSNick Piggin { 4718f45c1a5SLinus Torvalds WARN_ON_ONCE(!queue_is_locked(q)); 47275ad23bcSNick Piggin __set_bit(flag, &q->queue_flags); 47375ad23bcSNick Piggin } 47475ad23bcSNick Piggin 47575ad23bcSNick Piggin static inline void queue_flag_clear_unlocked(unsigned int flag, 47675ad23bcSNick Piggin struct request_queue *q) 47775ad23bcSNick Piggin { 47875ad23bcSNick Piggin __clear_bit(flag, &q->queue_flags); 47975ad23bcSNick Piggin } 48075ad23bcSNick Piggin 48175ad23bcSNick Piggin static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) 48275ad23bcSNick Piggin { 4838f45c1a5SLinus Torvalds WARN_ON_ONCE(!queue_is_locked(q)); 48475ad23bcSNick Piggin __clear_bit(flag, &q->queue_flags); 48575ad23bcSNick Piggin } 48675ad23bcSNick Piggin 487797e7dbbSTejun Heo enum { 488797e7dbbSTejun Heo /* 489797e7dbbSTejun Heo * Hardbarrier is supported with one of the following methods. 490797e7dbbSTejun Heo * 491797e7dbbSTejun Heo * NONE : hardbarrier unsupported 492797e7dbbSTejun Heo * DRAIN : ordering by draining is enough 493797e7dbbSTejun Heo * DRAIN_FLUSH : ordering by draining w/ pre and post flushes 494797e7dbbSTejun Heo * DRAIN_FUA : ordering by draining w/ pre flush and FUA write 495797e7dbbSTejun Heo * TAG : ordering by tag is enough 496797e7dbbSTejun Heo * TAG_FLUSH : ordering by tag w/ pre and post flushes 497797e7dbbSTejun Heo * TAG_FUA : ordering by tag w/ pre flush and FUA write 498797e7dbbSTejun Heo */ 499797e7dbbSTejun Heo QUEUE_ORDERED_NONE = 0x00, 500797e7dbbSTejun Heo QUEUE_ORDERED_DRAIN = 0x01, 501797e7dbbSTejun Heo QUEUE_ORDERED_TAG = 0x02, 502797e7dbbSTejun Heo 503797e7dbbSTejun Heo QUEUE_ORDERED_PREFLUSH = 0x10, 504797e7dbbSTejun Heo QUEUE_ORDERED_POSTFLUSH = 0x20, 505797e7dbbSTejun Heo QUEUE_ORDERED_FUA = 0x40, 506797e7dbbSTejun Heo 507797e7dbbSTejun Heo QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN | 508797e7dbbSTejun Heo QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH, 509797e7dbbSTejun Heo QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN | 510797e7dbbSTejun Heo QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA, 511797e7dbbSTejun Heo QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG | 512797e7dbbSTejun Heo QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH, 513797e7dbbSTejun Heo QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG | 514797e7dbbSTejun Heo QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA, 515797e7dbbSTejun Heo 516797e7dbbSTejun Heo /* 517797e7dbbSTejun Heo * Ordered operation sequence 518797e7dbbSTejun Heo */ 519797e7dbbSTejun Heo QUEUE_ORDSEQ_STARTED = 0x01, /* flushing in progress */ 520797e7dbbSTejun Heo QUEUE_ORDSEQ_DRAIN = 0x02, /* waiting for the queue to be drained */ 521797e7dbbSTejun Heo QUEUE_ORDSEQ_PREFLUSH = 0x04, /* pre-flushing in progress */ 522797e7dbbSTejun Heo QUEUE_ORDSEQ_BAR = 0x08, /* original barrier req in progress */ 523797e7dbbSTejun Heo QUEUE_ORDSEQ_POSTFLUSH = 0x10, /* post-flushing in progress */ 524797e7dbbSTejun Heo QUEUE_ORDSEQ_DONE = 0x20, 525797e7dbbSTejun Heo }; 5261da177e4SLinus Torvalds 5271da177e4SLinus Torvalds #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) 5281da177e4SLinus Torvalds #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 5291da177e4SLinus Torvalds #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 530ac9fafa1SAlan D. Brunelle #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 531797e7dbbSTejun Heo #define blk_queue_flushing(q) ((q)->ordseq) 5321da177e4SLinus Torvalds 5334aff5e23SJens Axboe #define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) 5344aff5e23SJens Axboe #define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) 5354aff5e23SJens Axboe #define blk_special_request(rq) ((rq)->cmd_type == REQ_TYPE_SPECIAL) 5364aff5e23SJens Axboe #define blk_sense_request(rq) ((rq)->cmd_type == REQ_TYPE_SENSE) 5374aff5e23SJens Axboe 5384aff5e23SJens Axboe #define blk_noretry_request(rq) ((rq)->cmd_flags & REQ_FAILFAST) 5394aff5e23SJens Axboe #define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED) 5401da177e4SLinus Torvalds 541e17fc0a1SDavid Woodhouse #define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq))) 5421da177e4SLinus Torvalds 5434aff5e23SJens Axboe #define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND) 5444aff5e23SJens Axboe #define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME) 5451da177e4SLinus Torvalds #define blk_pm_request(rq) \ 5464aff5e23SJens Axboe (blk_pm_suspend_request(rq) || blk_pm_resume_request(rq)) 5471da177e4SLinus Torvalds 548ab780f1eSJens Axboe #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) 5494aff5e23SJens Axboe #define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED) 5504aff5e23SJens Axboe #define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER) 5514aff5e23SJens Axboe #define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) 552fb2dce86SDavid Woodhouse #define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD) 553abae1fdeSFUJITA Tomonori #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 554bf2de6f5SJens Axboe #define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors) 555336cdb40SKiyoshi Ueda /* rq->queuelist of dequeued request must be list_empty() */ 556336cdb40SKiyoshi Ueda #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) 5571da177e4SLinus Torvalds 5581da177e4SLinus Torvalds #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 5591da177e4SLinus Torvalds 5604aff5e23SJens Axboe #define rq_data_dir(rq) ((rq)->cmd_flags & 1) 5611da177e4SLinus Torvalds 5629e2585a8SJens Axboe /* 5639e2585a8SJens Axboe * We regard a request as sync, if it's a READ or a SYNC write. 5649e2585a8SJens Axboe */ 5659e2585a8SJens Axboe #define rq_is_sync(rq) (rq_data_dir((rq)) == READ || (rq)->cmd_flags & REQ_RW_SYNC) 5665404bc7aSJens Axboe #define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META) 5679e2585a8SJens Axboe 5681da177e4SLinus Torvalds static inline int blk_queue_full(struct request_queue *q, int rw) 5691da177e4SLinus Torvalds { 5701da177e4SLinus Torvalds if (rw == READ) 5711da177e4SLinus Torvalds return test_bit(QUEUE_FLAG_READFULL, &q->queue_flags); 5721da177e4SLinus Torvalds return test_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); 5731da177e4SLinus Torvalds } 5741da177e4SLinus Torvalds 5751da177e4SLinus Torvalds static inline void blk_set_queue_full(struct request_queue *q, int rw) 5761da177e4SLinus Torvalds { 5771da177e4SLinus Torvalds if (rw == READ) 57875ad23bcSNick Piggin queue_flag_set(QUEUE_FLAG_READFULL, q); 5791da177e4SLinus Torvalds else 58075ad23bcSNick Piggin queue_flag_set(QUEUE_FLAG_WRITEFULL, q); 5811da177e4SLinus Torvalds } 5821da177e4SLinus Torvalds 5831da177e4SLinus Torvalds static inline void blk_clear_queue_full(struct request_queue *q, int rw) 5841da177e4SLinus Torvalds { 5851da177e4SLinus Torvalds if (rw == READ) 58675ad23bcSNick Piggin queue_flag_clear(QUEUE_FLAG_READFULL, q); 5871da177e4SLinus Torvalds else 58875ad23bcSNick Piggin queue_flag_clear(QUEUE_FLAG_WRITEFULL, q); 5891da177e4SLinus Torvalds } 5901da177e4SLinus Torvalds 5911da177e4SLinus Torvalds 5921da177e4SLinus Torvalds /* 5931da177e4SLinus Torvalds * mergeable request must not have _NOMERGE or _BARRIER bit set, nor may 5941da177e4SLinus Torvalds * it already be started by driver. 5951da177e4SLinus Torvalds */ 5961da177e4SLinus Torvalds #define RQ_NOMERGE_FLAGS \ 5971da177e4SLinus Torvalds (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) 5981da177e4SLinus Torvalds #define rq_mergeable(rq) \ 599e17fc0a1SDavid Woodhouse (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ 600e17fc0a1SDavid Woodhouse (blk_discard_rq(rq) || blk_fs_request((rq)))) 6011da177e4SLinus Torvalds 6021da177e4SLinus Torvalds /* 6031da177e4SLinus Torvalds * q->prep_rq_fn return values 6041da177e4SLinus Torvalds */ 6051da177e4SLinus Torvalds #define BLKPREP_OK 0 /* serve it */ 6061da177e4SLinus Torvalds #define BLKPREP_KILL 1 /* fatal error, kill */ 6071da177e4SLinus Torvalds #define BLKPREP_DEFER 2 /* leave on queue */ 6081da177e4SLinus Torvalds 6091da177e4SLinus Torvalds extern unsigned long blk_max_low_pfn, blk_max_pfn; 6101da177e4SLinus Torvalds 6111da177e4SLinus Torvalds /* 6121da177e4SLinus Torvalds * standard bounce addresses: 6131da177e4SLinus Torvalds * 6141da177e4SLinus Torvalds * BLK_BOUNCE_HIGH : bounce all highmem pages 6151da177e4SLinus Torvalds * BLK_BOUNCE_ANY : don't bounce anything 6161da177e4SLinus Torvalds * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary 6171da177e4SLinus Torvalds */ 6182472892aSAndi Kleen 6192472892aSAndi Kleen #if BITS_PER_LONG == 32 6201da177e4SLinus Torvalds #define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT) 6212472892aSAndi Kleen #else 6222472892aSAndi Kleen #define BLK_BOUNCE_HIGH -1ULL 6232472892aSAndi Kleen #endif 6242472892aSAndi Kleen #define BLK_BOUNCE_ANY (-1ULL) 6251da177e4SLinus Torvalds #define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD) 6261da177e4SLinus Torvalds 6273d6392cfSJens Axboe /* 6283d6392cfSJens Axboe * default timeout for SG_IO if none specified 6293d6392cfSJens Axboe */ 6303d6392cfSJens Axboe #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) 6313d6392cfSJens Axboe 6322a7326b5SChristoph Lameter #ifdef CONFIG_BOUNCE 6331da177e4SLinus Torvalds extern int init_emergency_isa_pool(void); 634165125e1SJens Axboe extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); 6351da177e4SLinus Torvalds #else 6361da177e4SLinus Torvalds static inline int init_emergency_isa_pool(void) 6371da177e4SLinus Torvalds { 6381da177e4SLinus Torvalds return 0; 6391da177e4SLinus Torvalds } 640165125e1SJens Axboe static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) 6411da177e4SLinus Torvalds { 6421da177e4SLinus Torvalds } 6431da177e4SLinus Torvalds #endif /* CONFIG_MMU */ 6441da177e4SLinus Torvalds 645*152e283fSFUJITA Tomonori struct rq_map_data { 646*152e283fSFUJITA Tomonori struct page **pages; 647*152e283fSFUJITA Tomonori int page_order; 648*152e283fSFUJITA Tomonori int nr_entries; 649*152e283fSFUJITA Tomonori }; 650*152e283fSFUJITA Tomonori 6515705f702SNeilBrown struct req_iterator { 6525705f702SNeilBrown int i; 6535705f702SNeilBrown struct bio *bio; 6545705f702SNeilBrown }; 6555705f702SNeilBrown 6565705f702SNeilBrown /* This should not be used directly - use rq_for_each_segment */ 6575705f702SNeilBrown #define __rq_for_each_bio(_bio, rq) \ 6581da177e4SLinus Torvalds if ((rq->bio)) \ 6591da177e4SLinus Torvalds for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) 6601da177e4SLinus Torvalds 6615705f702SNeilBrown #define rq_for_each_segment(bvl, _rq, _iter) \ 6625705f702SNeilBrown __rq_for_each_bio(_iter.bio, _rq) \ 6635705f702SNeilBrown bio_for_each_segment(bvl, _iter.bio, _iter.i) 6645705f702SNeilBrown 6655705f702SNeilBrown #define rq_iter_last(rq, _iter) \ 6665705f702SNeilBrown (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1) 6675705f702SNeilBrown 6681da177e4SLinus Torvalds extern int blk_register_queue(struct gendisk *disk); 6691da177e4SLinus Torvalds extern void blk_unregister_queue(struct gendisk *disk); 6701da177e4SLinus Torvalds extern void register_disk(struct gendisk *dev); 6711da177e4SLinus Torvalds extern void generic_make_request(struct bio *bio); 6722a4aa30cSFUJITA Tomonori extern void blk_rq_init(struct request_queue *q, struct request *rq); 6731da177e4SLinus Torvalds extern void blk_put_request(struct request *); 674165125e1SJens Axboe extern void __blk_put_request(struct request_queue *, struct request *); 675165125e1SJens Axboe extern struct request *blk_get_request(struct request_queue *, int, gfp_t); 676165125e1SJens Axboe extern void blk_insert_request(struct request_queue *, struct request *, int, void *); 677165125e1SJens Axboe extern void blk_requeue_request(struct request_queue *, struct request *); 678165125e1SJens Axboe extern void blk_plug_device(struct request_queue *); 6796c5e0c4dSJens Axboe extern void blk_plug_device_unlocked(struct request_queue *); 680165125e1SJens Axboe extern int blk_remove_plug(struct request_queue *); 681165125e1SJens Axboe extern void blk_recount_segments(struct request_queue *, struct bio *); 68245e79a3aSFUJITA Tomonori extern int scsi_cmd_ioctl(struct file *, struct request_queue *, 68345e79a3aSFUJITA Tomonori struct gendisk *, unsigned int, void __user *); 68421b2f0c8SChristoph Hellwig extern int sg_scsi_ioctl(struct file *, struct request_queue *, 68521b2f0c8SChristoph Hellwig struct gendisk *, struct scsi_ioctl_command __user *); 6863fcfab16SAndrew Morton 6873fcfab16SAndrew Morton /* 6881aa4f24fSJens Axboe * Temporary export, until SCSI gets fixed up. 6891aa4f24fSJens Axboe */ 6903001ca77SNeilBrown extern int blk_rq_append_bio(struct request_queue *q, struct request *rq, 6913001ca77SNeilBrown struct bio *bio); 6921aa4f24fSJens Axboe 6931aa4f24fSJens Axboe /* 6943fcfab16SAndrew Morton * A queue has just exitted congestion. Note this in the global counter of 6953fcfab16SAndrew Morton * congested queues, and wake up anyone who was waiting for requests to be 6963fcfab16SAndrew Morton * put back. 6973fcfab16SAndrew Morton */ 698165125e1SJens Axboe static inline void blk_clear_queue_congested(struct request_queue *q, int rw) 6993fcfab16SAndrew Morton { 7003fcfab16SAndrew Morton clear_bdi_congested(&q->backing_dev_info, rw); 7013fcfab16SAndrew Morton } 7023fcfab16SAndrew Morton 7033fcfab16SAndrew Morton /* 7043fcfab16SAndrew Morton * A queue has just entered congestion. Flag that in the queue's VM-visible 7053fcfab16SAndrew Morton * state flags and increment the global gounter of congested queues. 7063fcfab16SAndrew Morton */ 707165125e1SJens Axboe static inline void blk_set_queue_congested(struct request_queue *q, int rw) 7083fcfab16SAndrew Morton { 7093fcfab16SAndrew Morton set_bdi_congested(&q->backing_dev_info, rw); 7103fcfab16SAndrew Morton } 7113fcfab16SAndrew Morton 712165125e1SJens Axboe extern void blk_start_queue(struct request_queue *q); 713165125e1SJens Axboe extern void blk_stop_queue(struct request_queue *q); 7141da177e4SLinus Torvalds extern void blk_sync_queue(struct request_queue *q); 715165125e1SJens Axboe extern void __blk_stop_queue(struct request_queue *q); 71675ad23bcSNick Piggin extern void __blk_run_queue(struct request_queue *); 717165125e1SJens Axboe extern void blk_run_queue(struct request_queue *); 718165125e1SJens Axboe extern void blk_start_queueing(struct request_queue *); 719a3bce90eSFUJITA Tomonori extern int blk_rq_map_user(struct request_queue *, struct request *, 720*152e283fSFUJITA Tomonori struct rq_map_data *, void __user *, unsigned long, 721*152e283fSFUJITA Tomonori gfp_t); 7228e5cfc45SJens Axboe extern int blk_rq_unmap_user(struct bio *); 723165125e1SJens Axboe extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); 724165125e1SJens Axboe extern int blk_rq_map_user_iov(struct request_queue *, struct request *, 725*152e283fSFUJITA Tomonori struct rq_map_data *, struct sg_iovec *, int, 726*152e283fSFUJITA Tomonori unsigned int, gfp_t); 727165125e1SJens Axboe extern int blk_execute_rq(struct request_queue *, struct gendisk *, 728994ca9a1SJames Bottomley struct request *, int); 729165125e1SJens Axboe extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 73015fc858aSJens Axboe struct request *, int, rq_end_io_fn *); 7312ad8b1efSAlan D. Brunelle extern void blk_unplug(struct request_queue *q); 7326e39b69eSMike Christie 733165125e1SJens Axboe static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 7341da177e4SLinus Torvalds { 7351da177e4SLinus Torvalds return bdev->bd_disk->queue; 7361da177e4SLinus Torvalds } 7371da177e4SLinus Torvalds 7381da177e4SLinus Torvalds static inline void blk_run_backing_dev(struct backing_dev_info *bdi, 7391da177e4SLinus Torvalds struct page *page) 7401da177e4SLinus Torvalds { 7411da177e4SLinus Torvalds if (bdi && bdi->unplug_io_fn) 7421da177e4SLinus Torvalds bdi->unplug_io_fn(bdi, page); 7431da177e4SLinus Torvalds } 7441da177e4SLinus Torvalds 7451da177e4SLinus Torvalds static inline void blk_run_address_space(struct address_space *mapping) 7461da177e4SLinus Torvalds { 7471da177e4SLinus Torvalds if (mapping) 7481da177e4SLinus Torvalds blk_run_backing_dev(mapping->backing_dev_info, NULL); 7491da177e4SLinus Torvalds } 7501da177e4SLinus Torvalds 7511da177e4SLinus Torvalds /* 7523bcddeacSKiyoshi Ueda * blk_end_request() and friends. 7533bcddeacSKiyoshi Ueda * __blk_end_request() and end_request() must be called with 7543bcddeacSKiyoshi Ueda * the request queue spinlock acquired. 7551da177e4SLinus Torvalds * 7561da177e4SLinus Torvalds * Several drivers define their own end_request and call 7573bcddeacSKiyoshi Ueda * blk_end_request() for parts of the original function. 7583bcddeacSKiyoshi Ueda * This prevents code duplication in drivers. 7591da177e4SLinus Torvalds */ 76022b13210SJens Axboe extern int blk_end_request(struct request *rq, int error, 76122b13210SJens Axboe unsigned int nr_bytes); 76222b13210SJens Axboe extern int __blk_end_request(struct request *rq, int error, 76322b13210SJens Axboe unsigned int nr_bytes); 76422b13210SJens Axboe extern int blk_end_bidi_request(struct request *rq, int error, 76522b13210SJens Axboe unsigned int nr_bytes, unsigned int bidi_bytes); 766a0cd1285SJens Axboe extern void end_request(struct request *, int); 767a0cd1285SJens Axboe extern void end_queued_request(struct request *, int); 768a0cd1285SJens Axboe extern void end_dequeued_request(struct request *, int); 76922b13210SJens Axboe extern int blk_end_request_callback(struct request *rq, int error, 77022b13210SJens Axboe unsigned int nr_bytes, 771e19a3ab0SKiyoshi Ueda int (drv_callback)(struct request *)); 772ff856badSJens Axboe extern void blk_complete_request(struct request *); 773ff856badSJens Axboe 7741da177e4SLinus Torvalds /* 7753b11313aSKiyoshi Ueda * blk_end_request() takes bytes instead of sectors as a complete size. 7763b11313aSKiyoshi Ueda * blk_rq_bytes() returns bytes left to complete in the entire request. 7773b11313aSKiyoshi Ueda * blk_rq_cur_bytes() returns bytes left to complete in the current segment. 7781da177e4SLinus Torvalds */ 7793b11313aSKiyoshi Ueda extern unsigned int blk_rq_bytes(struct request *rq); 7803b11313aSKiyoshi Ueda extern unsigned int blk_rq_cur_bytes(struct request *rq); 7811da177e4SLinus Torvalds 7821da177e4SLinus Torvalds static inline void blkdev_dequeue_request(struct request *req) 7831da177e4SLinus Torvalds { 7848922e16cSTejun Heo elv_dequeue_request(req->q, req); 7851da177e4SLinus Torvalds } 7861da177e4SLinus Torvalds 7871da177e4SLinus Torvalds /* 7881da177e4SLinus Torvalds * Access functions for manipulating queue properties 7891da177e4SLinus Torvalds */ 790165125e1SJens Axboe extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, 7911946089aSChristoph Lameter spinlock_t *lock, int node_id); 792165125e1SJens Axboe extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); 793165125e1SJens Axboe extern void blk_cleanup_queue(struct request_queue *); 794165125e1SJens Axboe extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 795165125e1SJens Axboe extern void blk_queue_bounce_limit(struct request_queue *, u64); 796165125e1SJens Axboe extern void blk_queue_max_sectors(struct request_queue *, unsigned int); 797165125e1SJens Axboe extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); 798165125e1SJens Axboe extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); 799165125e1SJens Axboe extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 800165125e1SJens Axboe extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); 801165125e1SJens Axboe extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 802e3790c7dSTejun Heo extern void blk_queue_dma_pad(struct request_queue *, unsigned int); 80327f8221aSFUJITA Tomonori extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); 8042fb98e84STejun Heo extern int blk_queue_dma_drain(struct request_queue *q, 8052fb98e84STejun Heo dma_drain_needed_fn *dma_drain_needed, 8062fb98e84STejun Heo void *buf, unsigned int size); 807165125e1SJens Axboe extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 808165125e1SJens Axboe extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 809165125e1SJens Axboe extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); 810165125e1SJens Axboe extern void blk_queue_dma_alignment(struct request_queue *, int); 81111c3e689SJames Bottomley extern void blk_queue_update_dma_alignment(struct request_queue *, int); 812165125e1SJens Axboe extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 813fb2dce86SDavid Woodhouse extern void blk_queue_set_discard(struct request_queue *, prepare_discard_fn *); 8141da177e4SLinus Torvalds extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 815165125e1SJens Axboe extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); 816165125e1SJens Axboe extern int blk_do_ordered(struct request_queue *, struct request **); 817165125e1SJens Axboe extern unsigned blk_ordered_cur_seq(struct request_queue *); 818797e7dbbSTejun Heo extern unsigned blk_ordered_req_seq(struct request *); 819165125e1SJens Axboe extern void blk_ordered_complete_seq(struct request_queue *, unsigned, int); 8201da177e4SLinus Torvalds 821165125e1SJens Axboe extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 8221da177e4SLinus Torvalds extern void blk_dump_rq_flags(struct request *, char *); 823165125e1SJens Axboe extern void generic_unplug_device(struct request_queue *); 824165125e1SJens Axboe extern void __generic_unplug_device(struct request_queue *); 8251da177e4SLinus Torvalds extern long nr_blockdev_pages(void); 8261da177e4SLinus Torvalds 827165125e1SJens Axboe int blk_get_queue(struct request_queue *); 828165125e1SJens Axboe struct request_queue *blk_alloc_queue(gfp_t); 829165125e1SJens Axboe struct request_queue *blk_alloc_queue_node(gfp_t, int); 830165125e1SJens Axboe extern void blk_put_queue(struct request_queue *); 8311da177e4SLinus Torvalds 8321da177e4SLinus Torvalds /* 8331da177e4SLinus Torvalds * tag stuff 8341da177e4SLinus Torvalds */ 8354aff5e23SJens Axboe #define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) 836165125e1SJens Axboe extern int blk_queue_start_tag(struct request_queue *, struct request *); 837165125e1SJens Axboe extern struct request *blk_queue_find_tag(struct request_queue *, int); 838165125e1SJens Axboe extern void blk_queue_end_tag(struct request_queue *, struct request *); 839165125e1SJens Axboe extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *); 840165125e1SJens Axboe extern void blk_queue_free_tags(struct request_queue *); 841165125e1SJens Axboe extern int blk_queue_resize_tags(struct request_queue *, int); 842165125e1SJens Axboe extern void blk_queue_invalidate_tags(struct request_queue *); 843492dfb48SJames Bottomley extern struct blk_queue_tag *blk_init_tags(int); 844492dfb48SJames Bottomley extern void blk_free_tags(struct blk_queue_tag *); 8451da177e4SLinus Torvalds 846f583f492SDavid C Somayajulu static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, 847f583f492SDavid C Somayajulu int tag) 848f583f492SDavid C Somayajulu { 849f583f492SDavid C Somayajulu if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) 850f583f492SDavid C Somayajulu return NULL; 851f583f492SDavid C Somayajulu return bqt->tag_index[tag]; 852f583f492SDavid C Somayajulu } 853f583f492SDavid C Somayajulu 8541da177e4SLinus Torvalds extern int blkdev_issue_flush(struct block_device *, sector_t *); 855fb2dce86SDavid Woodhouse extern int blkdev_issue_discard(struct block_device *, sector_t sector, 856fb2dce86SDavid Woodhouse unsigned nr_sects); 857fb2dce86SDavid Woodhouse 858fb2dce86SDavid Woodhouse static inline int sb_issue_discard(struct super_block *sb, 859fb2dce86SDavid Woodhouse sector_t block, unsigned nr_blocks) 860fb2dce86SDavid Woodhouse { 861fb2dce86SDavid Woodhouse block <<= (sb->s_blocksize_bits - 9); 862fb2dce86SDavid Woodhouse nr_blocks <<= (sb->s_blocksize_bits - 9); 863fb2dce86SDavid Woodhouse return blkdev_issue_discard(sb->s_bdev, block, nr_blocks); 864fb2dce86SDavid Woodhouse } 8651da177e4SLinus Torvalds 8660b07de85SAdel Gadllah /* 8670b07de85SAdel Gadllah * command filter functions 8680b07de85SAdel Gadllah */ 8694beab5c6SFUJITA Tomonori extern int blk_verify_command(struct blk_cmd_filter *filter, 870abf54393SFUJITA Tomonori unsigned char *cmd, int has_write_perm); 8714beab5c6SFUJITA Tomonori extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter); 8720b07de85SAdel Gadllah 8731da177e4SLinus Torvalds #define MAX_PHYS_SEGMENTS 128 8741da177e4SLinus Torvalds #define MAX_HW_SEGMENTS 128 875defd94b7SMike Christie #define SAFE_MAX_SECTORS 255 876defd94b7SMike Christie #define BLK_DEF_MAX_SECTORS 1024 8771da177e4SLinus Torvalds 8781da177e4SLinus Torvalds #define MAX_SEGMENT_SIZE 65536 8791da177e4SLinus Torvalds 8801da177e4SLinus Torvalds #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 8811da177e4SLinus Torvalds 882165125e1SJens Axboe static inline int queue_hardsect_size(struct request_queue *q) 8831da177e4SLinus Torvalds { 8841da177e4SLinus Torvalds int retval = 512; 8851da177e4SLinus Torvalds 8861da177e4SLinus Torvalds if (q && q->hardsect_size) 8871da177e4SLinus Torvalds retval = q->hardsect_size; 8881da177e4SLinus Torvalds 8891da177e4SLinus Torvalds return retval; 8901da177e4SLinus Torvalds } 8911da177e4SLinus Torvalds 8921da177e4SLinus Torvalds static inline int bdev_hardsect_size(struct block_device *bdev) 8931da177e4SLinus Torvalds { 8941da177e4SLinus Torvalds return queue_hardsect_size(bdev_get_queue(bdev)); 8951da177e4SLinus Torvalds } 8961da177e4SLinus Torvalds 897165125e1SJens Axboe static inline int queue_dma_alignment(struct request_queue *q) 8981da177e4SLinus Torvalds { 899482eb689SPete Wyckoff return q ? q->dma_alignment : 511; 9001da177e4SLinus Torvalds } 9011da177e4SLinus Torvalds 9021da177e4SLinus Torvalds /* assumes size > 256 */ 9031da177e4SLinus Torvalds static inline unsigned int blksize_bits(unsigned int size) 9041da177e4SLinus Torvalds { 9051da177e4SLinus Torvalds unsigned int bits = 8; 9061da177e4SLinus Torvalds do { 9071da177e4SLinus Torvalds bits++; 9081da177e4SLinus Torvalds size >>= 1; 9091da177e4SLinus Torvalds } while (size > 256); 9101da177e4SLinus Torvalds return bits; 9111da177e4SLinus Torvalds } 9121da177e4SLinus Torvalds 9132befb9e3SAdrian Bunk static inline unsigned int block_size(struct block_device *bdev) 9141da177e4SLinus Torvalds { 9151da177e4SLinus Torvalds return bdev->bd_block_size; 9161da177e4SLinus Torvalds } 9171da177e4SLinus Torvalds 9181da177e4SLinus Torvalds typedef struct {struct page *v;} Sector; 9191da177e4SLinus Torvalds 9201da177e4SLinus Torvalds unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); 9211da177e4SLinus Torvalds 9221da177e4SLinus Torvalds static inline void put_dev_sector(Sector p) 9231da177e4SLinus Torvalds { 9241da177e4SLinus Torvalds page_cache_release(p.v); 9251da177e4SLinus Torvalds } 9261da177e4SLinus Torvalds 9271da177e4SLinus Torvalds struct work_struct; 92818887ad9SJens Axboe int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); 92919a75d83SAndrew Morton void kblockd_flush_work(struct work_struct *work); 9301da177e4SLinus Torvalds 9311da177e4SLinus Torvalds #define MODULE_ALIAS_BLOCKDEV(major,minor) \ 9321da177e4SLinus Torvalds MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 9331da177e4SLinus Torvalds #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 9341da177e4SLinus Torvalds MODULE_ALIAS("block-major-" __stringify(major) "-*") 9351da177e4SLinus Torvalds 9367ba1ba12SMartin K. Petersen #if defined(CONFIG_BLK_DEV_INTEGRITY) 9377ba1ba12SMartin K. Petersen 938b24498d4SJens Axboe #define INTEGRITY_FLAG_READ 2 /* verify data integrity on read */ 939b24498d4SJens Axboe #define INTEGRITY_FLAG_WRITE 4 /* generate data integrity on write */ 9407ba1ba12SMartin K. Petersen 9417ba1ba12SMartin K. Petersen struct blk_integrity_exchg { 9427ba1ba12SMartin K. Petersen void *prot_buf; 9437ba1ba12SMartin K. Petersen void *data_buf; 9447ba1ba12SMartin K. Petersen sector_t sector; 9457ba1ba12SMartin K. Petersen unsigned int data_size; 9467ba1ba12SMartin K. Petersen unsigned short sector_size; 9477ba1ba12SMartin K. Petersen const char *disk_name; 9487ba1ba12SMartin K. Petersen }; 9497ba1ba12SMartin K. Petersen 9507ba1ba12SMartin K. Petersen typedef void (integrity_gen_fn) (struct blk_integrity_exchg *); 9517ba1ba12SMartin K. Petersen typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *); 9527ba1ba12SMartin K. Petersen typedef void (integrity_set_tag_fn) (void *, void *, unsigned int); 9537ba1ba12SMartin K. Petersen typedef void (integrity_get_tag_fn) (void *, void *, unsigned int); 9547ba1ba12SMartin K. Petersen 9557ba1ba12SMartin K. Petersen struct blk_integrity { 9567ba1ba12SMartin K. Petersen integrity_gen_fn *generate_fn; 9577ba1ba12SMartin K. Petersen integrity_vrfy_fn *verify_fn; 9587ba1ba12SMartin K. Petersen integrity_set_tag_fn *set_tag_fn; 9597ba1ba12SMartin K. Petersen integrity_get_tag_fn *get_tag_fn; 9607ba1ba12SMartin K. Petersen 9617ba1ba12SMartin K. Petersen unsigned short flags; 9627ba1ba12SMartin K. Petersen unsigned short tuple_size; 9637ba1ba12SMartin K. Petersen unsigned short sector_size; 9647ba1ba12SMartin K. Petersen unsigned short tag_size; 9657ba1ba12SMartin K. Petersen 9667ba1ba12SMartin K. Petersen const char *name; 9677ba1ba12SMartin K. Petersen 9687ba1ba12SMartin K. Petersen struct kobject kobj; 9697ba1ba12SMartin K. Petersen }; 9707ba1ba12SMartin K. Petersen 9717ba1ba12SMartin K. Petersen extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); 9727ba1ba12SMartin K. Petersen extern void blk_integrity_unregister(struct gendisk *); 9737ba1ba12SMartin K. Petersen extern int blk_integrity_compare(struct block_device *, struct block_device *); 9747ba1ba12SMartin K. Petersen extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *); 9757ba1ba12SMartin K. Petersen extern int blk_rq_count_integrity_sg(struct request *); 9767ba1ba12SMartin K. Petersen 9777ba1ba12SMartin K. Petersen static inline unsigned short blk_integrity_tuple_size(struct blk_integrity *bi) 9787ba1ba12SMartin K. Petersen { 9797ba1ba12SMartin K. Petersen if (bi) 9807ba1ba12SMartin K. Petersen return bi->tuple_size; 9817ba1ba12SMartin K. Petersen 9827ba1ba12SMartin K. Petersen return 0; 9837ba1ba12SMartin K. Petersen } 9847ba1ba12SMartin K. Petersen 9857ba1ba12SMartin K. Petersen static inline struct blk_integrity *bdev_get_integrity(struct block_device *bdev) 9867ba1ba12SMartin K. Petersen { 9877ba1ba12SMartin K. Petersen return bdev->bd_disk->integrity; 9887ba1ba12SMartin K. Petersen } 9897ba1ba12SMartin K. Petersen 9907ba1ba12SMartin K. Petersen static inline unsigned int bdev_get_tag_size(struct block_device *bdev) 9917ba1ba12SMartin K. Petersen { 9927ba1ba12SMartin K. Petersen struct blk_integrity *bi = bdev_get_integrity(bdev); 9937ba1ba12SMartin K. Petersen 9947ba1ba12SMartin K. Petersen if (bi) 9957ba1ba12SMartin K. Petersen return bi->tag_size; 9967ba1ba12SMartin K. Petersen 9977ba1ba12SMartin K. Petersen return 0; 9987ba1ba12SMartin K. Petersen } 9997ba1ba12SMartin K. Petersen 10007ba1ba12SMartin K. Petersen static inline int bdev_integrity_enabled(struct block_device *bdev, int rw) 10017ba1ba12SMartin K. Petersen { 10027ba1ba12SMartin K. Petersen struct blk_integrity *bi = bdev_get_integrity(bdev); 10037ba1ba12SMartin K. Petersen 10047ba1ba12SMartin K. Petersen if (bi == NULL) 10057ba1ba12SMartin K. Petersen return 0; 10067ba1ba12SMartin K. Petersen 10077ba1ba12SMartin K. Petersen if (rw == READ && bi->verify_fn != NULL && 1008b24498d4SJens Axboe (bi->flags & INTEGRITY_FLAG_READ)) 10097ba1ba12SMartin K. Petersen return 1; 10107ba1ba12SMartin K. Petersen 10117ba1ba12SMartin K. Petersen if (rw == WRITE && bi->generate_fn != NULL && 1012b24498d4SJens Axboe (bi->flags & INTEGRITY_FLAG_WRITE)) 10137ba1ba12SMartin K. Petersen return 1; 10147ba1ba12SMartin K. Petersen 10157ba1ba12SMartin K. Petersen return 0; 10167ba1ba12SMartin K. Petersen } 10177ba1ba12SMartin K. Petersen 10187ba1ba12SMartin K. Petersen static inline int blk_integrity_rq(struct request *rq) 10197ba1ba12SMartin K. Petersen { 1020d442cc44SMartin K. Petersen if (rq->bio == NULL) 1021d442cc44SMartin K. Petersen return 0; 1022d442cc44SMartin K. Petersen 10237ba1ba12SMartin K. Petersen return bio_integrity(rq->bio); 10247ba1ba12SMartin K. Petersen } 10257ba1ba12SMartin K. Petersen 10267ba1ba12SMartin K. Petersen #else /* CONFIG_BLK_DEV_INTEGRITY */ 10277ba1ba12SMartin K. Petersen 10287ba1ba12SMartin K. Petersen #define blk_integrity_rq(rq) (0) 10297ba1ba12SMartin K. Petersen #define blk_rq_count_integrity_sg(a) (0) 10307ba1ba12SMartin K. Petersen #define blk_rq_map_integrity_sg(a, b) (0) 10317ba1ba12SMartin K. Petersen #define bdev_get_integrity(a) (0) 10327ba1ba12SMartin K. Petersen #define bdev_get_tag_size(a) (0) 10337ba1ba12SMartin K. Petersen #define blk_integrity_compare(a, b) (0) 10347ba1ba12SMartin K. Petersen #define blk_integrity_register(a, b) (0) 10357ba1ba12SMartin K. Petersen #define blk_integrity_unregister(a) do { } while (0); 10367ba1ba12SMartin K. Petersen 10377ba1ba12SMartin K. Petersen #endif /* CONFIG_BLK_DEV_INTEGRITY */ 10387ba1ba12SMartin K. Petersen 10399361401eSDavid Howells #else /* CONFIG_BLOCK */ 10409361401eSDavid Howells /* 10419361401eSDavid Howells * stubs for when the block layer is configured out 10429361401eSDavid Howells */ 10439361401eSDavid Howells #define buffer_heads_over_limit 0 10449361401eSDavid Howells 10459361401eSDavid Howells static inline long nr_blockdev_pages(void) 10469361401eSDavid Howells { 10479361401eSDavid Howells return 0; 10489361401eSDavid Howells } 10499361401eSDavid Howells 10509361401eSDavid Howells #endif /* CONFIG_BLOCK */ 10519361401eSDavid Howells 10521da177e4SLinus Torvalds #endif 1053