1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
27cc01581STejun Heo /*
37cc01581STejun Heo * Block data types and constants. Directly include this file only to
47cc01581STejun Heo * break include dependency loop.
57cc01581STejun Heo */
67cc01581STejun Heo #ifndef __LINUX_BLK_TYPES_H
77cc01581STejun Heo #define __LINUX_BLK_TYPES_H
87cc01581STejun Heo
97cc01581STejun Heo #include <linux/types.h>
100781e79eSMing Lei #include <linux/bvec.h>
110d02129eSChristoph Hellwig #include <linux/device.h>
125238dcf4SOmar Sandoval #include <linux/ktime.h>
1344981351SBart Van Assche #include <linux/rw_hint.h>
147cc01581STejun Heo
157cc01581STejun Heo struct bio_set;
167cc01581STejun Heo struct bio;
177cc01581STejun Heo struct bio_integrity_payload;
187cc01581STejun Heo struct page;
19852c788fSTejun Heo struct io_context;
20852c788fSTejun Heo struct cgroup_subsys_state;
214246a0b6SChristoph Hellwig typedef void (bio_end_io_t) (struct bio *);
22a892c8d5SSatya Tangirala struct bio_crypt_ctx;
237cc01581STejun Heo
2499457db8SChristoph Hellwig /*
2599457db8SChristoph Hellwig * The basic unit of block I/O is a sector. It is used in a number of contexts
2699457db8SChristoph Hellwig * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9
2799457db8SChristoph Hellwig * bytes. Variables of type sector_t represent an offset or size that is a
2899457db8SChristoph Hellwig * multiple of 512 bytes. Hence these two constants.
2999457db8SChristoph Hellwig */
3099457db8SChristoph Hellwig #ifndef SECTOR_SHIFT
3199457db8SChristoph Hellwig #define SECTOR_SHIFT 9
3299457db8SChristoph Hellwig #endif
3399457db8SChristoph Hellwig #ifndef SECTOR_SIZE
3499457db8SChristoph Hellwig #define SECTOR_SIZE (1 << SECTOR_SHIFT)
3599457db8SChristoph Hellwig #endif
3699457db8SChristoph Hellwig
3799457db8SChristoph Hellwig #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
3899457db8SChristoph Hellwig #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
3999457db8SChristoph Hellwig #define SECTOR_MASK (PAGE_SECTORS - 1)
4099457db8SChristoph Hellwig
41621c1f42SChristoph Hellwig struct block_device {
4229ff57c6SChristoph Hellwig sector_t bd_start_sect;
43f09313c5SJens Axboe sector_t bd_nr_sectors;
443838c406SJens Axboe struct gendisk * bd_disk;
453838c406SJens Axboe struct request_queue * bd_queue;
4615e3d2c5SChristoph Hellwig struct disk_stats __percpu *bd_stats;
4715e3d2c5SChristoph Hellwig unsigned long bd_stamp;
481116b9faSAl Viro atomic_t __bd_flags; // partition number + flags
491116b9faSAl Viro #define BD_PARTNO 255 // lower 8 bits; assign-once
5001e198f0SAl Viro #define BD_READ_ONLY (1u<<8) // read-only policy
514c80105eSAl Viro #define BD_WRITE_HOLDER (1u<<9)
52ac2b6f9dSAl Viro #define BD_HAS_SUBMIT_BIO (1u<<10)
5349a43daeSAl Viro #define BD_RO_WARNED (1u<<11)
54811ba89aSAl Viro #ifdef CONFIG_FAIL_MAKE_REQUEST
55811ba89aSAl Viro #define BD_MAKE_IT_FAIL (1u<<12)
56811ba89aSAl Viro #endif
5746d40cfaSChristoph Hellwig dev_t bd_dev;
58e33aef2cSAl Viro struct address_space *bd_mapping; /* page cache */
59fad907cfSMing Lei
609acf381fSChristoph Hellwig atomic_t bd_openers;
613838c406SJens Axboe spinlock_t bd_size_lock; /* for bd_inode->i_size updates */
62621c1f42SChristoph Hellwig void * bd_claiming;
63621c1f42SChristoph Hellwig void * bd_holder;
640718afd4SChristoph Hellwig const struct blk_holder_ops *bd_holder_ops;
650718afd4SChristoph Hellwig struct mutex bd_holder_lock;
663838c406SJens Axboe int bd_holders;
673838c406SJens Axboe struct kobject *bd_holder_dir;
683838c406SJens Axboe
6990f95dc4SChristian Brauner atomic_t bd_fsfreeze_count; /* number of freeze requests */
7090f95dc4SChristian Brauner struct mutex bd_fsfreeze_mutex; /* serialize freeze/thaw */
71231926dbSChristoph Hellwig
72231926dbSChristoph Hellwig struct partition_meta_info *bd_meta_info;
73ed5cc702SJan Kara int bd_writers;
743838c406SJens Axboe #ifdef CONFIG_SECURITY
753838c406SJens Axboe void *bd_security;
763838c406SJens Axboe #endif
773838c406SJens Axboe /*
783838c406SJens Axboe * keep this out-of-line as it's both big and not needed in the fast
79621c1f42SChristoph Hellwig * path
80621c1f42SChristoph Hellwig */
81a954ea81SChristoph Hellwig struct device bd_device;
82cb8432d6SChristoph Hellwig } __randomize_layout;
83a954ea81SChristoph Hellwig
840d02129eSChristoph Hellwig #define bdev_whole(_bdev) \
850d02129eSChristoph Hellwig ((_bdev)->bd_disk->part0)
860d02129eSChristoph Hellwig
878d65269fSChristoph Hellwig #define dev_to_bdev(device) \
880d02129eSChristoph Hellwig container_of((device), struct block_device, bd_device)
898d65269fSChristoph Hellwig
902a842acaSChristoph Hellwig #define bdev_kobj(_bdev) \
912a842acaSChristoph Hellwig (&((_bdev)->bd_device.kobj))
922a842acaSChristoph Hellwig
932a842acaSChristoph Hellwig /*
94aad5b23eSMikulas Patocka * Block error status values. See block/blk-core:blk_errors for the details.
952a842acaSChristoph Hellwig */
962a842acaSChristoph Hellwig typedef u8 __bitwise blk_status_t;
972a842acaSChristoph Hellwig typedef u16 blk_short_t;
982a842acaSChristoph Hellwig #define BLK_STS_OK 0
992a842acaSChristoph Hellwig #define BLK_STS_NOTSUPP ((__force blk_status_t)1)
1002a842acaSChristoph Hellwig #define BLK_STS_TIMEOUT ((__force blk_status_t)2)
1017ba15083SMike Christie #define BLK_STS_NOSPC ((__force blk_status_t)3)
1022a842acaSChristoph Hellwig #define BLK_STS_TRANSPORT ((__force blk_status_t)4)
1032a842acaSChristoph Hellwig #define BLK_STS_TARGET ((__force blk_status_t)5)
1042a842acaSChristoph Hellwig #define BLK_STS_RESV_CONFLICT ((__force blk_status_t)6)
1052a842acaSChristoph Hellwig #define BLK_STS_MEDIUM ((__force blk_status_t)7)
1062a842acaSChristoph Hellwig #define BLK_STS_PROTECTION ((__force blk_status_t)8)
1074e4cbee9SChristoph Hellwig #define BLK_STS_RESOURCE ((__force blk_status_t)9)
1084e4cbee9SChristoph Hellwig #define BLK_STS_IOERR ((__force blk_status_t)10)
1094e4cbee9SChristoph Hellwig
11098d40e76SHannes Reinecke /* hack for device mapper, don't use elsewhere: */
11198d40e76SHannes Reinecke #define BLK_STS_DM_REQUEUE ((__force blk_status_t)11)
11298d40e76SHannes Reinecke
11398d40e76SHannes Reinecke /*
11403a07c92SGoldwyn Rodrigues * BLK_STS_AGAIN should only be returned if RQF_NOWAIT is set
11503a07c92SGoldwyn Rodrigues * and the bio would block (cf bio_wouldblock_error())
11686ff7c2aSMing Lei */
11786ff7c2aSMing Lei #define BLK_STS_AGAIN ((__force blk_status_t)12)
11886ff7c2aSMing Lei
11986ff7c2aSMing Lei /*
12086ff7c2aSMing Lei * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if
12186ff7c2aSMing Lei * device related resources are unavailable, but the driver can guarantee
12286ff7c2aSMing Lei * that the queue will be rerun in the future once resources become
12386ff7c2aSMing Lei * available again. This is typically the case for device specific
12486ff7c2aSMing Lei * resources that are consumed for IO. If the driver fails allocating these
12586ff7c2aSMing Lei * resources, we know that inflight (or pending) IO will free these
12686ff7c2aSMing Lei * resource upon completion.
12786ff7c2aSMing Lei *
12886ff7c2aSMing Lei * This is different from BLK_STS_RESOURCE in that it explicitly references
12986ff7c2aSMing Lei * a device specific resource. For resources of wider scope, allocation
13086ff7c2aSMing Lei * failure can happen without having pending IO. This means that we can't
13186ff7c2aSMing Lei * rely on request completions freeing these resources, as IO may not be in
13286ff7c2aSMing Lei * flight. Examples of that are kernel memory allocations, DMA mappings, or
13386ff7c2aSMing Lei * any other system wide resources.
1340512a75bSKeith Busch */
1353b481d91SKeith Busch #define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13)
1363b481d91SKeith Busch
1373b481d91SKeith Busch /*
1383b481d91SKeith Busch * BLK_STS_ZONE_OPEN_RESOURCE is returned from the driver in the completion
1393b481d91SKeith Busch * path if the device returns a status indicating that too many zone resources
1403b481d91SKeith Busch * are currently open. The same command should be successful if resubmitted
14163b5385eSDamien Le Moal * after the number of open zones decreases below the device's limits, which is
1423b481d91SKeith Busch * reported in the request_queue's max_open_zones.
1433b481d91SKeith Busch */
1443b481d91SKeith Busch #define BLK_STS_ZONE_OPEN_RESOURCE ((__force blk_status_t)14)
1453b481d91SKeith Busch
1463b481d91SKeith Busch /*
1473b481d91SKeith Busch * BLK_STS_ZONE_ACTIVE_RESOURCE is returned from the driver in the completion
1483b481d91SKeith Busch * path if the device returns a status indicating that too many zone resources
1493b481d91SKeith Busch * are currently active. The same command should be successful if resubmitted
15063b5385eSDamien Le Moal * after the number of active zones decreases below the device's limits, which
1513b481d91SKeith Busch * is reported in the request_queue's max_active_zones.
1522651bf68SSong Liu */
1532651bf68SSong Liu #define BLK_STS_ZONE_ACTIVE_RESOURCE ((__force blk_status_t)15)
1542651bf68SSong Liu
1552651bf68SSong Liu /*
1562651bf68SSong Liu * BLK_STS_OFFLINE is returned from the driver when the target device is offline
15763b5385eSDamien Le Moal * or is being taken offline. This could help differentiate the case where a
1582651bf68SSong Liu * device is intentionally being shut down from a real I/O error.
159dffc480dSDamien Le Moal */
160dffc480dSDamien Le Moal #define BLK_STS_OFFLINE ((__force blk_status_t)16)
161dffc480dSDamien Le Moal
162dffc480dSDamien Le Moal /*
16363b5385eSDamien Le Moal * BLK_STS_DURATION_LIMIT is returned from the driver when the target device
164dffc480dSDamien Le Moal * aborted the command because it exceeded one of its Command Duration Limits.
1659da3d1e9SJohn Garry */
1669da3d1e9SJohn Garry #define BLK_STS_DURATION_LIMIT ((__force blk_status_t)17)
1679da3d1e9SJohn Garry
1689da3d1e9SJohn Garry /*
1699da3d1e9SJohn Garry * Invalid size or alignment.
1709111e568SKeith Busch */
1719111e568SKeith Busch #define BLK_STS_INVAL ((__force blk_status_t)19)
1729111e568SKeith Busch
1739111e568SKeith Busch /**
1749111e568SKeith Busch * blk_path_error - returns true if error may be path related
1759111e568SKeith Busch * @error: status the request was completed with
1769111e568SKeith Busch *
1779111e568SKeith Busch * Description:
1789111e568SKeith Busch * This classifies block error status into non-retryable errors and ones
1799111e568SKeith Busch * that may be successful if retried on a failover path.
1809111e568SKeith Busch *
1819111e568SKeith Busch * Return:
1829111e568SKeith Busch * %false - retrying failover path will not help
1839111e568SKeith Busch * %true - may succeed if retried
1849111e568SKeith Busch */
blk_path_error(blk_status_t error)1859111e568SKeith Busch static inline bool blk_path_error(blk_status_t error)
1869111e568SKeith Busch {
1879111e568SKeith Busch switch (error) {
1887ba15083SMike Christie case BLK_STS_NOTSUPP:
1899111e568SKeith Busch case BLK_STS_NOSPC:
1909111e568SKeith Busch case BLK_STS_TARGET:
1919111e568SKeith Busch case BLK_STS_RESV_CONFLICT:
1929111e568SKeith Busch case BLK_STS_MEDIUM:
1939111e568SKeith Busch case BLK_STS_PROTECTION:
1949111e568SKeith Busch return false;
1959111e568SKeith Busch }
1969111e568SKeith Busch
1979111e568SKeith Busch /* Anything else could be a path failure, so should be retried */
1985238dcf4SOmar Sandoval return true;
1995238dcf4SOmar Sandoval }
2005238dcf4SOmar Sandoval
2015238dcf4SOmar Sandoval struct bio_issue {
202342a72a3SBart Van Assche u64 value;
203342a72a3SBart Van Assche };
2043e08773cSChristoph Hellwig
2053e08773cSChristoph Hellwig typedef __u32 __bitwise blk_opf_t;
2063e08773cSChristoph Hellwig
2075238dcf4SOmar Sandoval typedef unsigned int blk_qc_t;
2087cc01581STejun Heo #define BLK_QC_T_NONE -1U
2097cc01581STejun Heo
2107cc01581STejun Heo /*
2117cc01581STejun Heo * main unit of I/O for the block layer and lower layers (ie drivers and
2127cc01581STejun Heo * stacking drivers)
213309dca30SChristoph Hellwig */
214342a72a3SBart Van Assche struct bio {
2155d2ae142SBart Van Assche struct bio *bi_next; /* request queue link */
2164e1b2d52SMike Christie struct block_device *bi_bdev;
2177a800a20SChristoph Hellwig blk_opf_t bi_opf; /* bottom bits REQ_OP, top bits
21843b62ce3SMike Christie * req_flags.
21944981351SBart Van Assche */
220111be883SShaohua Li unsigned short bi_flags; /* BIO_* below */
221993e4cdeSDavid Sterba unsigned short bi_ioprio;
2227cc01581STejun Heo enum rw_hint bi_write_hint;
223111be883SShaohua Li blk_status_t bi_status;
224196d38bcSKent Overstreet atomic_t __bi_remaining;
225dd291d77SDamien Le Moal
226dd291d77SDamien Le Moal struct bvec_iter bi_iter;
2273e08773cSChristoph Hellwig
228dd291d77SDamien Le Moal union {
229dd291d77SDamien Le Moal /* for polled bios: */
230dd291d77SDamien Le Moal blk_qc_t bi_cookie;
2317cc01581STejun Heo /* for plugged zoned writes only: */
2327cc01581STejun Heo unsigned int __bi_nr_segments;
233852c788fSTejun Heo };
234852c788fSTejun Heo bio_end_io_t *bi_end_io;
235db6638d7SDennis Zhou void *bi_private;
236db6638d7SDennis Zhou #ifdef CONFIG_BLK_CGROUP
237db6638d7SDennis Zhou /*
238db6638d7SDennis Zhou * Represents the association of the css and request_queue for the bio.
239852c788fSTejun Heo * If a bio goes direct to device, it will not have a blkg as it will
24008e18eabSJosef Bacik * not have a request_queue associated with it. The reference is put
2415238dcf4SOmar Sandoval * on release of the bio.
2427caa4715STejun Heo */
2437caa4715STejun Heo struct blkcg_gq *bi_blkg;
2447caa4715STejun Heo struct bio_issue bi_issue;
2459e234eeaSShaohua Li #ifdef CONFIG_BLK_CGROUP_IOCOST
246a892c8d5SSatya Tangirala u64 bi_iocost_cost;
247a892c8d5SSatya Tangirala #endif
248a892c8d5SSatya Tangirala #endif
249a892c8d5SSatya Tangirala
250a892c8d5SSatya Tangirala #ifdef CONFIG_BLK_INLINE_ENCRYPTION
2517cc01581STejun Heo struct bio_crypt_ctx *bi_crypt_context;
2527cc01581STejun Heo #endif
2537cc01581STejun Heo
2547cc01581STejun Heo #if defined(CONFIG_BLK_DEV_INTEGRITY)
2554f024f37SKent Overstreet struct bio_integrity_payload *bi_integrity; /* data integrity */
2564f024f37SKent Overstreet #endif
257f44b48c7SKent Overstreet
258f44b48c7SKent Overstreet unsigned short bi_vcnt; /* how many bio_vec's */
259f44b48c7SKent Overstreet
260f44b48c7SKent Overstreet /*
2614f024f37SKent Overstreet * Everything starting with bi_max_vecs will be preserved by bio_reset()
262f44b48c7SKent Overstreet */
263dac56212SJens Axboe
264f44b48c7SKent Overstreet unsigned short bi_max_vecs; /* max bvl_vecs we can hold */
265f44b48c7SKent Overstreet
266f44b48c7SKent Overstreet atomic_t __bi_cnt; /* pin count */
267395c72a7SKent Overstreet
268395c72a7SKent Overstreet struct bio_vec *bi_io_vec; /* the actual vec list */
2697cc01581STejun Heo
2707cc01581STejun Heo struct bio_set *bi_pool;
2717cc01581STejun Heo
2727cc01581STejun Heo /*
2737cc01581STejun Heo * We can inline a number of vecs at the end of the bio, to avoid
2745a58ec8cSGustavo A. R. Silva * double allocations for a small number of bio_vecs. This member
2757cc01581STejun Heo * MUST obviously be kept at the very end of the bio.
2767cc01581STejun Heo */
277f44b48c7SKent Overstreet struct bio_vec bi_inline_vecs[];
278e83502caSChaitanya Kulkarni };
279f44b48c7SKent Overstreet
2807cc01581STejun Heo #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
2817cc01581STejun Heo #define BIO_MAX_SECTORS (UINT_MAX >> SECTOR_SHIFT)
2827cc01581STejun Heo
2832b24e6f6SJohannes Thumshirn /*
284fd363244SDavid Howells * bio flags
2852b24e6f6SJohannes Thumshirn */
2862b24e6f6SJohannes Thumshirn enum {
2872b24e6f6SJohannes Thumshirn BIO_PAGE_PINNED, /* Unpin pages in bio_release_pages() */
2882b24e6f6SJohannes Thumshirn BIO_CLONED, /* doesn't own data */
2892b24e6f6SJohannes Thumshirn BIO_BOUNCED, /* bio is a bounce bio */
290320fb0f9SYu Kuai BIO_QUIET, /* Make BIO Quiet */
2918d2bbd4cSChristoph Hellwig BIO_CHAIN, /* chained bio, ->bi_remaining in effect */
2922b24e6f6SJohannes Thumshirn BIO_REFFED, /* bio has elevated ->bi_cnt */
293fbbaf700SNeilBrown BIO_BPS_THROTTLED, /* This bio has already been subjected to
2940376e9efSChristoph Hellwig * throttling rules. Don't do it again. */
295aa1b46dcSTejun Heo BIO_TRACE_COMPLETION, /* bio_endio() should trace the final completion
296aa1b46dcSTejun Heo * of this bio. */
29730c5d345SChristoph Hellwig BIO_CGROUP_ACCT, /* has been accounted to a cgroup */
298dd291d77SDamien Le Moal BIO_QOS_THROTTLED, /* bio went through rq_qos throttle path */
2999b1ce7f0SDamien Le Moal BIO_QOS_MERGED, /* but went through rq_qos merge path */
3002b24e6f6SJohannes Thumshirn BIO_REMAPPED,
3012b24e6f6SJohannes Thumshirn BIO_ZONE_WRITE_PLUGGING, /* bio handled through zone write plugging */
302cd4a4ae4SJens Axboe BIO_EMULATES_ZONE_APPEND, /* bio emulates a zone append operation */
3039a95e4efSBart Van Assche BIO_FLAG_LAST
3049a95e4efSBart Van Assche };
305ff07a02eSBart Van Assche
306342a72a3SBart Van Assche typedef __u32 __bitwise blk_mq_req_flags_t;
307ff07a02eSBart Van Assche
308ff07a02eSBart Van Assche #define REQ_OP_BITS 8
309ff07a02eSBart Van Assche #define REQ_OP_MASK (__force blk_opf_t)((1 << REQ_OP_BITS) - 1)
310ff07a02eSBart Van Assche #define REQ_FLAG_BITS 24
311ef295ecfSChristoph Hellwig
31287374179SChristoph Hellwig /**
31387374179SChristoph Hellwig * enum req_op - Operations common to the bio and request structures.
31487374179SChristoph Hellwig * We use 8 bits for encoding the operation, and the remaining 24 for flags.
31587374179SChristoph Hellwig *
31687374179SChristoph Hellwig * The least significant bit of the operation number indicates the data
31787374179SChristoph Hellwig * transfer direction:
31887374179SChristoph Hellwig *
31987374179SChristoph Hellwig * - if the least significant bit is set transfers are TO the device
32087374179SChristoph Hellwig * - if the least significant bit is not set transfers are FROM the device
3217cc01581STejun Heo *
322ff07a02eSBart Van Assche * If a operation does not transfer data the least significant bit has no
32387374179SChristoph Hellwig * meaning.
324342a72a3SBart Van Assche */
32587374179SChristoph Hellwig enum req_op {
326342a72a3SBart Van Assche /* read sectors from the device */
32787374179SChristoph Hellwig REQ_OP_READ = (__force blk_opf_t)0,
328342a72a3SBart Van Assche /* write sectors to the device */
32987374179SChristoph Hellwig REQ_OP_WRITE = (__force blk_opf_t)1,
330342a72a3SBart Van Assche /* flush the volatile write cache */
33187374179SChristoph Hellwig REQ_OP_FLUSH = (__force blk_opf_t)2,
332342a72a3SBart Van Assche /* discard sectors */
3331c042f8dSChristoph Hellwig REQ_OP_DISCARD = (__force blk_opf_t)3,
3341c042f8dSChristoph Hellwig /* securely erase sectors */
335a6f0788eSChaitanya Kulkarni REQ_OP_SECURE_ERASE = (__force blk_opf_t)5,
336342a72a3SBart Van Assche /* write data at the current zone write pointer */
3376c1b1da5SAjay Joshi REQ_OP_ZONE_APPEND = (__force blk_opf_t)7,
338342a72a3SBart Van Assche /* write the zero filled sector many times */
3396c1b1da5SAjay Joshi REQ_OP_WRITE_ZEROES = (__force blk_opf_t)9,
340342a72a3SBart Van Assche /* Open a zone */
3416c1b1da5SAjay Joshi REQ_OP_ZONE_OPEN = (__force blk_opf_t)10,
342342a72a3SBart Van Assche /* Close a zone */
343ecdef9f4SColy Li REQ_OP_ZONE_CLOSE = (__force blk_opf_t)11,
3441c042f8dSChristoph Hellwig /* Transition a zone to full */
345ecdef9f4SColy Li REQ_OP_ZONE_FINISH = (__force blk_opf_t)12,
3461c042f8dSChristoph Hellwig /* reset a zone write pointer */
347ef295ecfSChristoph Hellwig REQ_OP_ZONE_RESET = (__force blk_opf_t)13,
348aebf526bSChristoph Hellwig /* reset all the zone present on the device */
349342a72a3SBart Van Assche REQ_OP_ZONE_RESET_ALL = (__force blk_opf_t)15,
350342a72a3SBart Van Assche
351aebf526bSChristoph Hellwig /* Driver private requests */
352342a72a3SBart Van Assche REQ_OP_DRV_IN = (__force blk_opf_t)34,
353ef295ecfSChristoph Hellwig REQ_OP_DRV_OUT = (__force blk_opf_t)35,
354ef295ecfSChristoph Hellwig
355*6fa99325SJohn Garry REQ_OP_LAST = (__force blk_opf_t)36,
356ef295ecfSChristoph Hellwig };
357ef295ecfSChristoph Hellwig
358ef295ecfSChristoph Hellwig /* Keep cmd_flag_name[] in sync with the definitions below */
3597cc01581STejun Heo enum req_flag_bits {
3607cc01581STejun Heo __REQ_FAILFAST_DEV = /* no driver retries of device errors */
3617cc01581STejun Heo REQ_OP_BITS,
3627cc01581STejun Heo __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
36365299a3bSChristoph Hellwig __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */
364bd1c1c21SChristoph Hellwig __REQ_SYNC, /* request is sync (sync write or read) */
365a2b80967SChristoph Hellwig __REQ_META, /* metadata io request */
366180b2f95SMartin K. Petersen __REQ_PRIO, /* boost priority in cfq */
3678e4bf844SMatthew Wilcox __REQ_NOMERGE, /* don't touch this for merging */
36828a8f0d3SMike Christie __REQ_IDLE, /* anticipate more IO after this one */
369188bd2b1SChristoph Hellwig __REQ_INTEGRITY, /* I/O includes block integrity payload */
3701d796d6aSJens Axboe __REQ_FUA, /* forced unit access */
3718977f563SChristoph Hellwig __REQ_PREFLUSH, /* request for cache flush */
3723e08773cSChristoph Hellwig __REQ_RAHEAD, /* read ahead, can fail anytime */
3730df71650SMike Snitzer __REQ_BACKGROUND, /* background IO */
3745ce7729fSChristoph Hellwig __REQ_NOWAIT, /* Don't wait if request will block */
3755ce7729fSChristoph Hellwig __REQ_POLLED, /* caller polls for completion using bio_poll */
3763480373eSChristoph Hellwig __REQ_ALLOC_CACHE, /* allocate IO from cache if available */
3779da3d1e9SJohn Garry __REQ_SWAP, /* swap I/O */
3785ce7729fSChristoph Hellwig __REQ_DRV, /* for driver use */
3795ce7729fSChristoph Hellwig __REQ_FS_PRIVATE, /* for file system (submitter) use */
3805ce7729fSChristoph Hellwig __REQ_ATOMIC, /* for atomic write operations */
3815ce7729fSChristoph Hellwig /*
3825ce7729fSChristoph Hellwig * Command specific flags, keep last:
3835ce7729fSChristoph Hellwig */
3847cc01581STejun Heo /* for REQ_OP_WRITE_ZEROES: */
3857cc01581STejun Heo __REQ_NOUNMAP, /* do not free blocks when zeroing */
3867cc01581STejun Heo
387342a72a3SBart Van Assche __REQ_NR_BITS, /* stops here */
388342a72a3SBart Van Assche };
389342a72a3SBart Van Assche
390342a72a3SBart Van Assche #define REQ_FAILFAST_DEV \
391342a72a3SBart Van Assche (__force blk_opf_t)(1ULL << __REQ_FAILFAST_DEV)
392342a72a3SBart Van Assche #define REQ_FAILFAST_TRANSPORT \
393342a72a3SBart Van Assche (__force blk_opf_t)(1ULL << __REQ_FAILFAST_TRANSPORT)
394342a72a3SBart Van Assche #define REQ_FAILFAST_DRIVER \
395342a72a3SBart Van Assche (__force blk_opf_t)(1ULL << __REQ_FAILFAST_DRIVER)
396342a72a3SBart Van Assche #define REQ_SYNC (__force blk_opf_t)(1ULL << __REQ_SYNC)
397342a72a3SBart Van Assche #define REQ_META (__force blk_opf_t)(1ULL << __REQ_META)
398342a72a3SBart Van Assche #define REQ_PRIO (__force blk_opf_t)(1ULL << __REQ_PRIO)
399342a72a3SBart Van Assche #define REQ_NOMERGE (__force blk_opf_t)(1ULL << __REQ_NOMERGE)
400342a72a3SBart Van Assche #define REQ_IDLE (__force blk_opf_t)(1ULL << __REQ_IDLE)
401342a72a3SBart Van Assche #define REQ_INTEGRITY (__force blk_opf_t)(1ULL << __REQ_INTEGRITY)
402342a72a3SBart Van Assche #define REQ_FUA (__force blk_opf_t)(1ULL << __REQ_FUA)
403342a72a3SBart Van Assche #define REQ_PREFLUSH (__force blk_opf_t)(1ULL << __REQ_PREFLUSH)
404342a72a3SBart Van Assche #define REQ_RAHEAD (__force blk_opf_t)(1ULL << __REQ_RAHEAD)
405342a72a3SBart Van Assche #define REQ_BACKGROUND (__force blk_opf_t)(1ULL << __REQ_BACKGROUND)
406342a72a3SBart Van Assche #define REQ_NOWAIT (__force blk_opf_t)(1ULL << __REQ_NOWAIT)
4073480373eSChristoph Hellwig #define REQ_POLLED (__force blk_opf_t)(1ULL << __REQ_POLLED)
4083480373eSChristoph Hellwig #define REQ_ALLOC_CACHE (__force blk_opf_t)(1ULL << __REQ_ALLOC_CACHE)
4099da3d1e9SJohn Garry #define REQ_SWAP (__force blk_opf_t)(1ULL << __REQ_SWAP)
4103480373eSChristoph Hellwig #define REQ_DRV (__force blk_opf_t)(1ULL << __REQ_DRV)
4113480373eSChristoph Hellwig #define REQ_FS_PRIVATE (__force blk_opf_t)(1ULL << __REQ_FS_PRIVATE)
412d928be9fSChristoph Hellwig #define REQ_ATOMIC (__force blk_opf_t)(1ULL << __REQ_ATOMIC)
4137cc01581STejun Heo
4147cc01581STejun Heo #define REQ_NOUNMAP (__force blk_opf_t)(1ULL << __REQ_NOUNMAP)
4157cc01581STejun Heo
416e2a60da7SMartin K. Petersen #define REQ_FAILFAST_MASK \
417e8064021SChristoph Hellwig (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
418e2a60da7SMartin K. Petersen
419dbae2c55SMichael Callahan #define REQ_NOMERGE_FLAGS \
420dbae2c55SMichael Callahan (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
421dbae2c55SMichael Callahan
422bdca3c87SMichael Callahan enum stat_group {
423b6866318SKonstantin Khlebnikov STAT_READ,
424dbae2c55SMichael Callahan STAT_WRITE,
425dbae2c55SMichael Callahan STAT_DISCARD,
426dbae2c55SMichael Callahan STAT_FLUSH,
427dbae2c55SMichael Callahan
4282d9b02beSBart Van Assche NR_STAT_GROUPS
4292d9b02beSBart Van Assche };
4302d9b02beSBart Van Assche
bio_op(const struct bio * bio)4312d9b02beSBart Van Assche static inline enum req_op bio_op(const struct bio *bio)
4327cc01581STejun Heo {
433342a72a3SBart Van Assche return bio->bi_opf & REQ_OP_MASK;
43487374179SChristoph Hellwig }
435342a72a3SBart Van Assche
op_is_write(blk_opf_t op)43687374179SChristoph Hellwig static inline bool op_is_write(blk_opf_t op)
43787374179SChristoph Hellwig {
438b685d3d6SChristoph Hellwig return !!(op & (__force blk_opf_t)1);
439f73f44ebSChristoph Hellwig }
440f73f44ebSChristoph Hellwig
441f73f44ebSChristoph Hellwig /*
442342a72a3SBart Van Assche * Check if the bio or request is one that needs special treatment in the
443f73f44ebSChristoph Hellwig * flush state machine.
444f73f44ebSChristoph Hellwig */
op_is_flush(blk_opf_t op)445f73f44ebSChristoph Hellwig static inline bool op_is_flush(blk_opf_t op)
446f73f44ebSChristoph Hellwig {
447f73f44ebSChristoph Hellwig return op & (REQ_FUA | REQ_PREFLUSH);
448b685d3d6SChristoph Hellwig }
449b685d3d6SChristoph Hellwig
450b685d3d6SChristoph Hellwig /*
451b685d3d6SChristoph Hellwig * Reads are always treated as synchronous, as are requests with the FUA or
452342a72a3SBart Van Assche * PREFLUSH flag. Other operations may be marked as synchronous using the
453ef295ecfSChristoph Hellwig * REQ_SYNC flag.
454b685d3d6SChristoph Hellwig */
op_is_sync(blk_opf_t op)455b685d3d6SChristoph Hellwig static inline bool op_is_sync(blk_opf_t op)
456ef295ecfSChristoph Hellwig {
457c11f0c0bSJens Axboe return (op & REQ_OP_MASK) == REQ_OP_READ ||
458342a72a3SBart Van Assche (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
459bdca3c87SMichael Callahan }
460bdca3c87SMichael Callahan
op_is_discard(blk_opf_t op)461bdca3c87SMichael Callahan static inline bool op_is_discard(blk_opf_t op)
462bdca3c87SMichael Callahan {
4636c1b1da5SAjay Joshi return (op & REQ_OP_MASK) == REQ_OP_DISCARD;
4646c1b1da5SAjay Joshi }
4656c1b1da5SAjay Joshi
4666c1b1da5SAjay Joshi /*
4676c1b1da5SAjay Joshi * Check if a bio or request operation is a zone management operation, with
4686c1b1da5SAjay Joshi * the exception of REQ_OP_ZONE_RESET_ALL which is treated as a special case
469ff07a02eSBart Van Assche * due to its different handling in the block layer and device response in
4706c1b1da5SAjay Joshi * case of command failure.
4716c1b1da5SAjay Joshi */
op_is_zone_mgmt(enum req_op op)4726c1b1da5SAjay Joshi static inline bool op_is_zone_mgmt(enum req_op op)
4736c1b1da5SAjay Joshi {
4746c1b1da5SAjay Joshi switch (op & REQ_OP_MASK) {
4756c1b1da5SAjay Joshi case REQ_OP_ZONE_RESET:
4766c1b1da5SAjay Joshi case REQ_OP_ZONE_OPEN:
4776c1b1da5SAjay Joshi case REQ_OP_ZONE_CLOSE:
4786c1b1da5SAjay Joshi case REQ_OP_ZONE_FINISH:
4796c1b1da5SAjay Joshi return true;
4806c1b1da5SAjay Joshi default:
4816c1b1da5SAjay Joshi return false;
48277e7ffd7SBart Van Assche }
483ddcf35d3SMichael Callahan }
484bdca3c87SMichael Callahan
op_stat_group(enum req_op op)485bdca3c87SMichael Callahan static inline int op_stat_group(enum req_op op)
486ddcf35d3SMichael Callahan {
487ddcf35d3SMichael Callahan if (op_is_discard(op))
488ddcf35d3SMichael Callahan return STAT_DISCARD;
489cf43e6beSJens Axboe return op_is_write(op);
490eca8b53aSShaohua Li }
491cf43e6beSJens Axboe
492cf43e6beSJens Axboe struct blk_rq_stat {
493eca8b53aSShaohua Li u64 mean;
494cf43e6beSJens Axboe u64 min;
495cf43e6beSJens Axboe u64 max;
496cf43e6beSJens Axboe u32 nr_samples;
4977cc01581STejun Heo u64 batch;
498 };
499
500 #endif /* __LINUX_BLK_TYPES_H */
501