1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Portions Copyright (C) 1992 Drew Eckhardt 4 */ 5 #ifndef _LINUX_BLKDEV_H 6 #define _LINUX_BLKDEV_H 7 8 #include <linux/types.h> 9 #include <linux/blk_types.h> 10 #include <linux/device.h> 11 #include <linux/list.h> 12 #include <linux/llist.h> 13 #include <linux/minmax.h> 14 #include <linux/timer.h> 15 #include <linux/workqueue.h> 16 #include <linux/wait.h> 17 #include <linux/bio.h> 18 #include <linux/gfp.h> 19 #include <linux/kdev_t.h> 20 #include <linux/rcupdate.h> 21 #include <linux/percpu-refcount.h> 22 #include <linux/blkzoned.h> 23 #include <linux/sched.h> 24 #include <linux/sbitmap.h> 25 #include <linux/uuid.h> 26 #include <linux/xarray.h> 27 #include <linux/file.h> 28 #include <linux/lockdep.h> 29 30 struct module; 31 struct request_queue; 32 struct elevator_queue; 33 struct blk_trace; 34 struct request; 35 struct sg_io_hdr; 36 struct blkcg_gq; 37 struct blk_flush_queue; 38 struct kiocb; 39 struct pr_ops; 40 struct rq_qos; 41 struct blk_queue_stats; 42 struct blk_stat_callback; 43 struct blk_crypto_profile; 44 45 extern const struct device_type disk_type; 46 extern const struct device_type part_type; 47 extern const struct class block_class; 48 49 /* 50 * Maximum number of blkcg policies allowed to be registered concurrently. 51 * Defined here to simplify include dependency. 52 */ 53 #define BLKCG_MAX_POLS 6 54 55 #define DISK_MAX_PARTS 256 56 #define DISK_NAME_LEN 32 57 58 #define PARTITION_META_INFO_VOLNAMELTH 64 59 /* 60 * Enough for the string representation of any kind of UUID plus NULL. 61 * EFI UUID is 36 characters. MSDOS UUID is 11 characters. 62 */ 63 #define PARTITION_META_INFO_UUIDLTH (UUID_STRING_LEN + 1) 64 65 struct partition_meta_info { 66 char uuid[PARTITION_META_INFO_UUIDLTH]; 67 u8 volname[PARTITION_META_INFO_VOLNAMELTH]; 68 }; 69 70 /** 71 * DOC: genhd capability flags 72 * 73 * ``GENHD_FL_REMOVABLE``: indicates that the block device gives access to 74 * removable media. When set, the device remains present even when media is not 75 * inserted. Shall not be set for devices which are removed entirely when the 76 * media is removed. 77 * 78 * ``GENHD_FL_HIDDEN``: the block device is hidden; it doesn't produce events, 79 * doesn't appear in sysfs, and can't be opened from userspace or using 80 * blkdev_get*. Used for the underlying components of multipath devices. 81 * 82 * ``GENHD_FL_NO_PART``: partition support is disabled. The kernel will not 83 * scan for partitions from add_disk, and users can't add partitions manually. 84 * 85 */ 86 enum { 87 GENHD_FL_REMOVABLE = 1 << 0, 88 GENHD_FL_HIDDEN = 1 << 1, 89 GENHD_FL_NO_PART = 1 << 2, 90 }; 91 92 enum { 93 DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */ 94 DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */ 95 }; 96 97 enum { 98 /* Poll even if events_poll_msecs is unset */ 99 DISK_EVENT_FLAG_POLL = 1 << 0, 100 /* Forward events to udev */ 101 DISK_EVENT_FLAG_UEVENT = 1 << 1, 102 /* Block event polling when open for exclusive write */ 103 DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE = 1 << 2, 104 }; 105 106 struct disk_events; 107 struct badblocks; 108 109 enum blk_integrity_checksum { 110 BLK_INTEGRITY_CSUM_NONE = 0, 111 BLK_INTEGRITY_CSUM_IP = 1, 112 BLK_INTEGRITY_CSUM_CRC = 2, 113 BLK_INTEGRITY_CSUM_CRC64 = 3, 114 } __packed ; 115 116 struct blk_integrity { 117 unsigned char flags; 118 enum blk_integrity_checksum csum_type; 119 unsigned char tuple_size; 120 unsigned char pi_offset; 121 unsigned char interval_exp; 122 unsigned char tag_size; 123 }; 124 125 typedef unsigned int __bitwise blk_mode_t; 126 127 /* open for reading */ 128 #define BLK_OPEN_READ ((__force blk_mode_t)(1 << 0)) 129 /* open for writing */ 130 #define BLK_OPEN_WRITE ((__force blk_mode_t)(1 << 1)) 131 /* open exclusively (vs other exclusive openers */ 132 #define BLK_OPEN_EXCL ((__force blk_mode_t)(1 << 2)) 133 /* opened with O_NDELAY */ 134 #define BLK_OPEN_NDELAY ((__force blk_mode_t)(1 << 3)) 135 /* open for "writes" only for ioctls (specialy hack for floppy.c) */ 136 #define BLK_OPEN_WRITE_IOCTL ((__force blk_mode_t)(1 << 4)) 137 /* open is exclusive wrt all other BLK_OPEN_WRITE opens to the device */ 138 #define BLK_OPEN_RESTRICT_WRITES ((__force blk_mode_t)(1 << 5)) 139 /* return partition scanning errors */ 140 #define BLK_OPEN_STRICT_SCAN ((__force blk_mode_t)(1 << 6)) 141 142 struct gendisk { 143 /* 144 * major/first_minor/minors should not be set by any new driver, the 145 * block core will take care of allocating them automatically. 146 */ 147 int major; 148 int first_minor; 149 int minors; 150 151 char disk_name[DISK_NAME_LEN]; /* name of major driver */ 152 153 unsigned short events; /* supported events */ 154 unsigned short event_flags; /* flags related to event processing */ 155 156 struct xarray part_tbl; 157 struct block_device *part0; 158 159 const struct block_device_operations *fops; 160 struct request_queue *queue; 161 void *private_data; 162 163 struct bio_set bio_split; 164 165 int flags; 166 unsigned long state; 167 #define GD_NEED_PART_SCAN 0 168 #define GD_READ_ONLY 1 169 #define GD_DEAD 2 170 #define GD_NATIVE_CAPACITY 3 171 #define GD_ADDED 4 172 #define GD_SUPPRESS_PART_SCAN 5 173 #define GD_OWNS_QUEUE 6 174 175 struct mutex open_mutex; /* open/close mutex */ 176 unsigned open_partitions; /* number of open partitions */ 177 178 struct backing_dev_info *bdi; 179 struct kobject queue_kobj; /* the queue/ directory */ 180 struct kobject *slave_dir; 181 #ifdef CONFIG_BLOCK_HOLDER_DEPRECATED 182 struct list_head slave_bdevs; 183 #endif 184 struct timer_rand_state *random; 185 atomic_t sync_io; /* RAID */ 186 struct disk_events *ev; 187 188 #ifdef CONFIG_BLK_DEV_ZONED 189 /* 190 * Zoned block device information. Reads of this information must be 191 * protected with blk_queue_enter() / blk_queue_exit(). Modifying this 192 * information is only allowed while no requests are being processed. 193 * See also blk_mq_freeze_queue() and blk_mq_unfreeze_queue(). 194 */ 195 unsigned int nr_zones; 196 unsigned int zone_capacity; 197 unsigned int last_zone_capacity; 198 unsigned long __rcu *conv_zones_bitmap; 199 unsigned int zone_wplugs_hash_bits; 200 spinlock_t zone_wplugs_lock; 201 struct mempool_s *zone_wplugs_pool; 202 struct hlist_head *zone_wplugs_hash; 203 struct workqueue_struct *zone_wplugs_wq; 204 #endif /* CONFIG_BLK_DEV_ZONED */ 205 206 #if IS_ENABLED(CONFIG_CDROM) 207 struct cdrom_device_info *cdi; 208 #endif 209 int node_id; 210 struct badblocks *bb; 211 struct lockdep_map lockdep_map; 212 u64 diskseq; 213 blk_mode_t open_mode; 214 215 /* 216 * Independent sector access ranges. This is always NULL for 217 * devices that do not have multiple independent access ranges. 218 */ 219 struct blk_independent_access_ranges *ia_ranges; 220 }; 221 222 /** 223 * disk_openers - returns how many openers are there for a disk 224 * @disk: disk to check 225 * 226 * This returns the number of openers for a disk. Note that this value is only 227 * stable if disk->open_mutex is held. 228 * 229 * Note: Due to a quirk in the block layer open code, each open partition is 230 * only counted once even if there are multiple openers. 231 */ 232 static inline unsigned int disk_openers(struct gendisk *disk) 233 { 234 return atomic_read(&disk->part0->bd_openers); 235 } 236 237 /** 238 * disk_has_partscan - return %true if partition scanning is enabled on a disk 239 * @disk: disk to check 240 * 241 * Returns %true if partitions scanning is enabled for @disk, or %false if 242 * partition scanning is disabled either permanently or temporarily. 243 */ 244 static inline bool disk_has_partscan(struct gendisk *disk) 245 { 246 return !(disk->flags & (GENHD_FL_NO_PART | GENHD_FL_HIDDEN)) && 247 !test_bit(GD_SUPPRESS_PART_SCAN, &disk->state); 248 } 249 250 /* 251 * The gendisk is refcounted by the part0 block_device, and the bd_device 252 * therein is also used for device model presentation in sysfs. 253 */ 254 #define dev_to_disk(device) \ 255 (dev_to_bdev(device)->bd_disk) 256 #define disk_to_dev(disk) \ 257 (&((disk)->part0->bd_device)) 258 259 #if IS_REACHABLE(CONFIG_CDROM) 260 #define disk_to_cdi(disk) ((disk)->cdi) 261 #else 262 #define disk_to_cdi(disk) NULL 263 #endif 264 265 static inline dev_t disk_devt(struct gendisk *disk) 266 { 267 return MKDEV(disk->major, disk->first_minor); 268 } 269 270 /* blk_validate_limits() validates bsize, so drivers don't usually need to */ 271 static inline int blk_validate_block_size(unsigned long bsize) 272 { 273 if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize)) 274 return -EINVAL; 275 276 return 0; 277 } 278 279 static inline bool blk_op_is_passthrough(blk_opf_t op) 280 { 281 op &= REQ_OP_MASK; 282 return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT; 283 } 284 285 /* flags set by the driver in queue_limits.features */ 286 typedef unsigned int __bitwise blk_features_t; 287 288 /* supports a volatile write cache */ 289 #define BLK_FEAT_WRITE_CACHE ((__force blk_features_t)(1u << 0)) 290 291 /* supports passing on the FUA bit */ 292 #define BLK_FEAT_FUA ((__force blk_features_t)(1u << 1)) 293 294 /* rotational device (hard drive or floppy) */ 295 #define BLK_FEAT_ROTATIONAL ((__force blk_features_t)(1u << 2)) 296 297 /* contributes to the random number pool */ 298 #define BLK_FEAT_ADD_RANDOM ((__force blk_features_t)(1u << 3)) 299 300 /* do disk/partitions IO accounting */ 301 #define BLK_FEAT_IO_STAT ((__force blk_features_t)(1u << 4)) 302 303 /* don't modify data until writeback is done */ 304 #define BLK_FEAT_STABLE_WRITES ((__force blk_features_t)(1u << 5)) 305 306 /* always completes in submit context */ 307 #define BLK_FEAT_SYNCHRONOUS ((__force blk_features_t)(1u << 6)) 308 309 /* supports REQ_NOWAIT */ 310 #define BLK_FEAT_NOWAIT ((__force blk_features_t)(1u << 7)) 311 312 /* supports DAX */ 313 #define BLK_FEAT_DAX ((__force blk_features_t)(1u << 8)) 314 315 /* supports I/O polling */ 316 #define BLK_FEAT_POLL ((__force blk_features_t)(1u << 9)) 317 318 /* is a zoned device */ 319 #define BLK_FEAT_ZONED ((__force blk_features_t)(1u << 10)) 320 321 /* supports PCI(e) p2p requests */ 322 #define BLK_FEAT_PCI_P2PDMA ((__force blk_features_t)(1u << 12)) 323 324 /* skip this queue in blk_mq_(un)quiesce_tagset */ 325 #define BLK_FEAT_SKIP_TAGSET_QUIESCE ((__force blk_features_t)(1u << 13)) 326 327 /* bounce all highmem pages */ 328 #define BLK_FEAT_BOUNCE_HIGH ((__force blk_features_t)(1u << 14)) 329 330 /* undocumented magic for bcache */ 331 #define BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE \ 332 ((__force blk_features_t)(1u << 15)) 333 334 /* atomic writes enabled */ 335 #define BLK_FEAT_ATOMIC_WRITES \ 336 ((__force blk_features_t)(1u << 16)) 337 338 /* 339 * Flags automatically inherited when stacking limits. 340 */ 341 #define BLK_FEAT_INHERIT_MASK \ 342 (BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA | BLK_FEAT_ROTATIONAL | \ 343 BLK_FEAT_STABLE_WRITES | BLK_FEAT_ZONED | BLK_FEAT_BOUNCE_HIGH | \ 344 BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE) 345 346 /* internal flags in queue_limits.flags */ 347 typedef unsigned int __bitwise blk_flags_t; 348 349 /* do not send FLUSH/FUA commands despite advertising a write cache */ 350 #define BLK_FLAG_WRITE_CACHE_DISABLED ((__force blk_flags_t)(1u << 0)) 351 352 /* I/O topology is misaligned */ 353 #define BLK_FLAG_MISALIGNED ((__force blk_flags_t)(1u << 1)) 354 355 /* passthrough command IO accounting */ 356 #define BLK_FLAG_IOSTATS_PASSTHROUGH ((__force blk_flags_t)(1u << 2)) 357 358 struct queue_limits { 359 blk_features_t features; 360 blk_flags_t flags; 361 unsigned long seg_boundary_mask; 362 unsigned long virt_boundary_mask; 363 364 unsigned int max_hw_sectors; 365 unsigned int max_dev_sectors; 366 unsigned int chunk_sectors; 367 unsigned int max_sectors; 368 unsigned int max_user_sectors; 369 unsigned int max_segment_size; 370 unsigned int physical_block_size; 371 unsigned int logical_block_size; 372 unsigned int alignment_offset; 373 unsigned int io_min; 374 unsigned int io_opt; 375 unsigned int max_discard_sectors; 376 unsigned int max_hw_discard_sectors; 377 unsigned int max_user_discard_sectors; 378 unsigned int max_secure_erase_sectors; 379 unsigned int max_write_zeroes_sectors; 380 unsigned int max_hw_zone_append_sectors; 381 unsigned int max_zone_append_sectors; 382 unsigned int discard_granularity; 383 unsigned int discard_alignment; 384 unsigned int zone_write_granularity; 385 386 /* atomic write limits */ 387 unsigned int atomic_write_hw_max; 388 unsigned int atomic_write_max_sectors; 389 unsigned int atomic_write_hw_boundary; 390 unsigned int atomic_write_boundary_sectors; 391 unsigned int atomic_write_hw_unit_min; 392 unsigned int atomic_write_unit_min; 393 unsigned int atomic_write_hw_unit_max; 394 unsigned int atomic_write_unit_max; 395 396 unsigned short max_segments; 397 unsigned short max_integrity_segments; 398 unsigned short max_discard_segments; 399 400 unsigned int max_open_zones; 401 unsigned int max_active_zones; 402 403 /* 404 * Drivers that set dma_alignment to less than 511 must be prepared to 405 * handle individual bvec's that are not a multiple of a SECTOR_SIZE 406 * due to possible offsets. 407 */ 408 unsigned int dma_alignment; 409 unsigned int dma_pad_mask; 410 411 struct blk_integrity integrity; 412 }; 413 414 typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx, 415 void *data); 416 417 #define BLK_ALL_ZONES ((unsigned int)-1) 418 int blkdev_report_zones(struct block_device *bdev, sector_t sector, 419 unsigned int nr_zones, report_zones_cb cb, void *data); 420 int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op, 421 sector_t sectors, sector_t nr_sectors); 422 int blk_revalidate_disk_zones(struct gendisk *disk); 423 424 /* 425 * Independent access ranges: struct blk_independent_access_range describes 426 * a range of contiguous sectors that can be accessed using device command 427 * execution resources that are independent from the resources used for 428 * other access ranges. This is typically found with single-LUN multi-actuator 429 * HDDs where each access range is served by a different set of heads. 430 * The set of independent ranges supported by the device is defined using 431 * struct blk_independent_access_ranges. The independent ranges must not overlap 432 * and must include all sectors within the disk capacity (no sector holes 433 * allowed). 434 * For a device with multiple ranges, requests targeting sectors in different 435 * ranges can be executed in parallel. A request can straddle an access range 436 * boundary. 437 */ 438 struct blk_independent_access_range { 439 struct kobject kobj; 440 sector_t sector; 441 sector_t nr_sectors; 442 }; 443 444 struct blk_independent_access_ranges { 445 struct kobject kobj; 446 bool sysfs_registered; 447 unsigned int nr_ia_ranges; 448 struct blk_independent_access_range ia_range[]; 449 }; 450 451 struct request_queue { 452 /* 453 * The queue owner gets to use this for whatever they like. 454 * ll_rw_blk doesn't touch it. 455 */ 456 void *queuedata; 457 458 struct elevator_queue *elevator; 459 460 const struct blk_mq_ops *mq_ops; 461 462 /* sw queues */ 463 struct blk_mq_ctx __percpu *queue_ctx; 464 465 /* 466 * various queue flags, see QUEUE_* below 467 */ 468 unsigned long queue_flags; 469 470 unsigned int rq_timeout; 471 472 unsigned int queue_depth; 473 474 refcount_t refs; 475 476 /* hw dispatch queues */ 477 unsigned int nr_hw_queues; 478 struct xarray hctx_table; 479 480 struct percpu_ref q_usage_counter; 481 struct lock_class_key io_lock_cls_key; 482 struct lockdep_map io_lockdep_map; 483 484 struct lock_class_key q_lock_cls_key; 485 struct lockdep_map q_lockdep_map; 486 487 struct request *last_merge; 488 489 spinlock_t queue_lock; 490 491 int quiesce_depth; 492 493 struct gendisk *disk; 494 495 /* 496 * mq queue kobject 497 */ 498 struct kobject *mq_kobj; 499 500 struct queue_limits limits; 501 502 #ifdef CONFIG_PM 503 struct device *dev; 504 enum rpm_status rpm_status; 505 #endif 506 507 /* 508 * Number of contexts that have called blk_set_pm_only(). If this 509 * counter is above zero then only RQF_PM requests are processed. 510 */ 511 atomic_t pm_only; 512 513 struct blk_queue_stats *stats; 514 struct rq_qos *rq_qos; 515 struct mutex rq_qos_mutex; 516 517 /* 518 * ida allocated id for this queue. Used to index queues from 519 * ioctx. 520 */ 521 int id; 522 523 /* 524 * queue settings 525 */ 526 unsigned long nr_requests; /* Max # of requests */ 527 528 #ifdef CONFIG_BLK_INLINE_ENCRYPTION 529 struct blk_crypto_profile *crypto_profile; 530 struct kobject *crypto_kobject; 531 #endif 532 533 struct timer_list timeout; 534 struct work_struct timeout_work; 535 536 atomic_t nr_active_requests_shared_tags; 537 538 struct blk_mq_tags *sched_shared_tags; 539 540 struct list_head icq_list; 541 #ifdef CONFIG_BLK_CGROUP 542 DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS); 543 struct blkcg_gq *root_blkg; 544 struct list_head blkg_list; 545 struct mutex blkcg_mutex; 546 #endif 547 548 int node; 549 550 spinlock_t requeue_lock; 551 struct list_head requeue_list; 552 struct delayed_work requeue_work; 553 554 #ifdef CONFIG_BLK_DEV_IO_TRACE 555 struct blk_trace __rcu *blk_trace; 556 #endif 557 /* 558 * for flush operations 559 */ 560 struct blk_flush_queue *fq; 561 struct list_head flush_list; 562 563 /* 564 * Protects against I/O scheduler switching, particularly when 565 * updating q->elevator. Since the elevator update code path may 566 * also modify q->nr_requests, this lock also protects the sysfs 567 * attribute nr_requests. 568 * To ensure proper locking order during an elevator update, first 569 * freeze the queue, then acquire ->elevator_lock. 570 */ 571 struct mutex elevator_lock; 572 573 struct mutex sysfs_lock; 574 struct mutex limits_lock; 575 576 /* 577 * for reusing dead hctx instance in case of updating 578 * nr_hw_queues 579 */ 580 struct list_head unused_hctx_list; 581 spinlock_t unused_hctx_lock; 582 583 int mq_freeze_depth; 584 585 #ifdef CONFIG_BLK_DEV_THROTTLING 586 /* Throttle data */ 587 struct throtl_data *td; 588 #endif 589 struct rcu_head rcu_head; 590 #ifdef CONFIG_LOCKDEP 591 struct task_struct *mq_freeze_owner; 592 int mq_freeze_owner_depth; 593 /* 594 * Records disk & queue state in current context, used in unfreeze 595 * queue 596 */ 597 bool mq_freeze_disk_dead; 598 bool mq_freeze_queue_dying; 599 #endif 600 wait_queue_head_t mq_freeze_wq; 601 /* 602 * Protect concurrent access to q_usage_counter by 603 * percpu_ref_kill() and percpu_ref_reinit(). 604 */ 605 struct mutex mq_freeze_lock; 606 607 struct blk_mq_tag_set *tag_set; 608 struct list_head tag_set_list; 609 610 struct dentry *debugfs_dir; 611 struct dentry *sched_debugfs_dir; 612 struct dentry *rqos_debugfs_dir; 613 /* 614 * Serializes all debugfs metadata operations using the above dentries. 615 */ 616 struct mutex debugfs_mutex; 617 }; 618 619 /* Keep blk_queue_flag_name[] in sync with the definitions below */ 620 enum { 621 QUEUE_FLAG_DYING, /* queue being torn down */ 622 QUEUE_FLAG_NOMERGES, /* disable merge attempts */ 623 QUEUE_FLAG_SAME_COMP, /* complete on same CPU-group */ 624 QUEUE_FLAG_FAIL_IO, /* fake timeout */ 625 QUEUE_FLAG_NOXMERGES, /* No extended merges */ 626 QUEUE_FLAG_SAME_FORCE, /* force complete on same CPU */ 627 QUEUE_FLAG_INIT_DONE, /* queue is initialized */ 628 QUEUE_FLAG_STATS, /* track IO start and completion times */ 629 QUEUE_FLAG_REGISTERED, /* queue has been registered to a disk */ 630 QUEUE_FLAG_QUIESCED, /* queue has been quiesced */ 631 QUEUE_FLAG_RQ_ALLOC_TIME, /* record rq->alloc_time_ns */ 632 QUEUE_FLAG_HCTX_ACTIVE, /* at least one blk-mq hctx is active */ 633 QUEUE_FLAG_SQ_SCHED, /* single queue style io dispatch */ 634 QUEUE_FLAG_MAX 635 }; 636 637 #define QUEUE_FLAG_MQ_DEFAULT (1UL << QUEUE_FLAG_SAME_COMP) 638 639 void blk_queue_flag_set(unsigned int flag, struct request_queue *q); 640 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q); 641 642 #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) 643 #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) 644 #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 645 #define blk_queue_noxmerges(q) \ 646 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 647 #define blk_queue_nonrot(q) (!((q)->limits.features & BLK_FEAT_ROTATIONAL)) 648 #define blk_queue_io_stat(q) ((q)->limits.features & BLK_FEAT_IO_STAT) 649 #define blk_queue_passthrough_stat(q) \ 650 ((q)->limits.flags & BLK_FLAG_IOSTATS_PASSTHROUGH) 651 #define blk_queue_dax(q) ((q)->limits.features & BLK_FEAT_DAX) 652 #define blk_queue_pci_p2pdma(q) ((q)->limits.features & BLK_FEAT_PCI_P2PDMA) 653 #ifdef CONFIG_BLK_RQ_ALLOC_TIME 654 #define blk_queue_rq_alloc_time(q) \ 655 test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags) 656 #else 657 #define blk_queue_rq_alloc_time(q) false 658 #endif 659 660 #define blk_noretry_request(rq) \ 661 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ 662 REQ_FAILFAST_DRIVER)) 663 #define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags) 664 #define blk_queue_pm_only(q) atomic_read(&(q)->pm_only) 665 #define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags) 666 #define blk_queue_sq_sched(q) test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags) 667 #define blk_queue_skip_tagset_quiesce(q) \ 668 ((q)->limits.features & BLK_FEAT_SKIP_TAGSET_QUIESCE) 669 670 extern void blk_set_pm_only(struct request_queue *q); 671 extern void blk_clear_pm_only(struct request_queue *q); 672 673 #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 674 675 #define dma_map_bvec(dev, bv, dir, attrs) \ 676 dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \ 677 (dir), (attrs)) 678 679 static inline bool queue_is_mq(struct request_queue *q) 680 { 681 return q->mq_ops; 682 } 683 684 #ifdef CONFIG_PM 685 static inline enum rpm_status queue_rpm_status(struct request_queue *q) 686 { 687 return q->rpm_status; 688 } 689 #else 690 static inline enum rpm_status queue_rpm_status(struct request_queue *q) 691 { 692 return RPM_ACTIVE; 693 } 694 #endif 695 696 static inline bool blk_queue_is_zoned(struct request_queue *q) 697 { 698 return IS_ENABLED(CONFIG_BLK_DEV_ZONED) && 699 (q->limits.features & BLK_FEAT_ZONED); 700 } 701 702 #ifdef CONFIG_BLK_DEV_ZONED 703 static inline unsigned int disk_nr_zones(struct gendisk *disk) 704 { 705 return disk->nr_zones; 706 } 707 bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs); 708 #else /* CONFIG_BLK_DEV_ZONED */ 709 static inline unsigned int disk_nr_zones(struct gendisk *disk) 710 { 711 return 0; 712 } 713 static inline bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs) 714 { 715 return false; 716 } 717 #endif /* CONFIG_BLK_DEV_ZONED */ 718 719 static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector) 720 { 721 if (!blk_queue_is_zoned(disk->queue)) 722 return 0; 723 return sector >> ilog2(disk->queue->limits.chunk_sectors); 724 } 725 726 static inline unsigned int bdev_nr_zones(struct block_device *bdev) 727 { 728 return disk_nr_zones(bdev->bd_disk); 729 } 730 731 static inline unsigned int bdev_max_open_zones(struct block_device *bdev) 732 { 733 return bdev->bd_disk->queue->limits.max_open_zones; 734 } 735 736 static inline unsigned int bdev_max_active_zones(struct block_device *bdev) 737 { 738 return bdev->bd_disk->queue->limits.max_active_zones; 739 } 740 741 static inline unsigned int blk_queue_depth(struct request_queue *q) 742 { 743 if (q->queue_depth) 744 return q->queue_depth; 745 746 return q->nr_requests; 747 } 748 749 /* 750 * default timeout for SG_IO if none specified 751 */ 752 #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) 753 #define BLK_MIN_SG_TIMEOUT (7 * HZ) 754 755 /* This should not be used directly - use rq_for_each_segment */ 756 #define for_each_bio(_bio) \ 757 for (; _bio; _bio = _bio->bi_next) 758 759 int __must_check add_disk_fwnode(struct device *parent, struct gendisk *disk, 760 const struct attribute_group **groups, 761 struct fwnode_handle *fwnode); 762 int __must_check device_add_disk(struct device *parent, struct gendisk *disk, 763 const struct attribute_group **groups); 764 static inline int __must_check add_disk(struct gendisk *disk) 765 { 766 return device_add_disk(NULL, disk, NULL); 767 } 768 void del_gendisk(struct gendisk *gp); 769 void invalidate_disk(struct gendisk *disk); 770 void set_disk_ro(struct gendisk *disk, bool read_only); 771 void disk_uevent(struct gendisk *disk, enum kobject_action action); 772 773 static inline u8 bdev_partno(const struct block_device *bdev) 774 { 775 return atomic_read(&bdev->__bd_flags) & BD_PARTNO; 776 } 777 778 static inline bool bdev_test_flag(const struct block_device *bdev, unsigned flag) 779 { 780 return atomic_read(&bdev->__bd_flags) & flag; 781 } 782 783 static inline void bdev_set_flag(struct block_device *bdev, unsigned flag) 784 { 785 atomic_or(flag, &bdev->__bd_flags); 786 } 787 788 static inline void bdev_clear_flag(struct block_device *bdev, unsigned flag) 789 { 790 atomic_andnot(flag, &bdev->__bd_flags); 791 } 792 793 static inline bool get_disk_ro(struct gendisk *disk) 794 { 795 return bdev_test_flag(disk->part0, BD_READ_ONLY) || 796 test_bit(GD_READ_ONLY, &disk->state); 797 } 798 799 static inline bool bdev_read_only(struct block_device *bdev) 800 { 801 return bdev_test_flag(bdev, BD_READ_ONLY) || get_disk_ro(bdev->bd_disk); 802 } 803 804 bool set_capacity_and_notify(struct gendisk *disk, sector_t size); 805 void disk_force_media_change(struct gendisk *disk); 806 void bdev_mark_dead(struct block_device *bdev, bool surprise); 807 808 void add_disk_randomness(struct gendisk *disk) __latent_entropy; 809 void rand_initialize_disk(struct gendisk *disk); 810 811 static inline sector_t get_start_sect(struct block_device *bdev) 812 { 813 return bdev->bd_start_sect; 814 } 815 816 static inline sector_t bdev_nr_sectors(struct block_device *bdev) 817 { 818 return bdev->bd_nr_sectors; 819 } 820 821 static inline loff_t bdev_nr_bytes(struct block_device *bdev) 822 { 823 return (loff_t)bdev_nr_sectors(bdev) << SECTOR_SHIFT; 824 } 825 826 static inline sector_t get_capacity(struct gendisk *disk) 827 { 828 return bdev_nr_sectors(disk->part0); 829 } 830 831 static inline u64 sb_bdev_nr_blocks(struct super_block *sb) 832 { 833 return bdev_nr_sectors(sb->s_bdev) >> 834 (sb->s_blocksize_bits - SECTOR_SHIFT); 835 } 836 837 int bdev_disk_changed(struct gendisk *disk, bool invalidate); 838 839 void put_disk(struct gendisk *disk); 840 struct gendisk *__blk_alloc_disk(struct queue_limits *lim, int node, 841 struct lock_class_key *lkclass); 842 843 /** 844 * blk_alloc_disk - allocate a gendisk structure 845 * @lim: queue limits to be used for this disk. 846 * @node_id: numa node to allocate on 847 * 848 * Allocate and pre-initialize a gendisk structure for use with BIO based 849 * drivers. 850 * 851 * Returns an ERR_PTR on error, else the allocated disk. 852 * 853 * Context: can sleep 854 */ 855 #define blk_alloc_disk(lim, node_id) \ 856 ({ \ 857 static struct lock_class_key __key; \ 858 \ 859 __blk_alloc_disk(lim, node_id, &__key); \ 860 }) 861 862 int __register_blkdev(unsigned int major, const char *name, 863 void (*probe)(dev_t devt)); 864 #define register_blkdev(major, name) \ 865 __register_blkdev(major, name, NULL) 866 void unregister_blkdev(unsigned int major, const char *name); 867 868 bool disk_check_media_change(struct gendisk *disk); 869 void set_capacity(struct gendisk *disk, sector_t size); 870 871 #ifdef CONFIG_BLOCK_HOLDER_DEPRECATED 872 int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk); 873 void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk); 874 #else 875 static inline int bd_link_disk_holder(struct block_device *bdev, 876 struct gendisk *disk) 877 { 878 return 0; 879 } 880 static inline void bd_unlink_disk_holder(struct block_device *bdev, 881 struct gendisk *disk) 882 { 883 } 884 #endif /* CONFIG_BLOCK_HOLDER_DEPRECATED */ 885 886 dev_t part_devt(struct gendisk *disk, u8 partno); 887 void inc_diskseq(struct gendisk *disk); 888 void blk_request_module(dev_t devt); 889 890 extern int blk_register_queue(struct gendisk *disk); 891 extern void blk_unregister_queue(struct gendisk *disk); 892 void submit_bio_noacct(struct bio *bio); 893 struct bio *bio_split_to_limits(struct bio *bio); 894 895 extern int blk_lld_busy(struct request_queue *q); 896 extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags); 897 extern void blk_queue_exit(struct request_queue *q); 898 extern void blk_sync_queue(struct request_queue *q); 899 900 /* Helper to convert REQ_OP_XXX to its string format XXX */ 901 extern const char *blk_op_str(enum req_op op); 902 903 int blk_status_to_errno(blk_status_t status); 904 blk_status_t errno_to_blk_status(int errno); 905 const char *blk_status_to_str(blk_status_t status); 906 907 /* only poll the hardware once, don't continue until a completion was found */ 908 #define BLK_POLL_ONESHOT (1 << 0) 909 int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags); 910 int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob, 911 unsigned int flags); 912 913 static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 914 { 915 return bdev->bd_queue; /* this is never NULL */ 916 } 917 918 /* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */ 919 const char *blk_zone_cond_str(enum blk_zone_cond zone_cond); 920 921 static inline unsigned int bio_zone_no(struct bio *bio) 922 { 923 return disk_zone_no(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector); 924 } 925 926 static inline bool bio_straddles_zones(struct bio *bio) 927 { 928 return bio_sectors(bio) && 929 bio_zone_no(bio) != 930 disk_zone_no(bio->bi_bdev->bd_disk, bio_end_sector(bio) - 1); 931 } 932 933 /* 934 * Return how much within the boundary is left to be used for I/O at a given 935 * offset. 936 */ 937 static inline unsigned int blk_boundary_sectors_left(sector_t offset, 938 unsigned int boundary_sectors) 939 { 940 if (unlikely(!is_power_of_2(boundary_sectors))) 941 return boundary_sectors - sector_div(offset, boundary_sectors); 942 return boundary_sectors - (offset & (boundary_sectors - 1)); 943 } 944 945 /** 946 * queue_limits_start_update - start an atomic update of queue limits 947 * @q: queue to update 948 * 949 * This functions starts an atomic update of the queue limits. It takes a lock 950 * to prevent other updates and returns a snapshot of the current limits that 951 * the caller can modify. The caller must call queue_limits_commit_update() 952 * to finish the update. 953 * 954 * Context: process context. 955 */ 956 static inline struct queue_limits 957 queue_limits_start_update(struct request_queue *q) 958 { 959 mutex_lock(&q->limits_lock); 960 return q->limits; 961 } 962 int queue_limits_commit_update_frozen(struct request_queue *q, 963 struct queue_limits *lim); 964 int queue_limits_commit_update(struct request_queue *q, 965 struct queue_limits *lim); 966 int queue_limits_set(struct request_queue *q, struct queue_limits *lim); 967 int blk_validate_limits(struct queue_limits *lim); 968 969 /** 970 * queue_limits_cancel_update - cancel an atomic update of queue limits 971 * @q: queue to update 972 * 973 * This functions cancels an atomic update of the queue limits started by 974 * queue_limits_start_update() and should be used when an error occurs after 975 * starting update. 976 */ 977 static inline void queue_limits_cancel_update(struct request_queue *q) 978 { 979 mutex_unlock(&q->limits_lock); 980 } 981 982 /* 983 * These helpers are for drivers that have sloppy feature negotiation and might 984 * have to disable DISCARD, WRITE_ZEROES or SECURE_DISCARD from the I/O 985 * completion handler when the device returned an indicator that the respective 986 * feature is not actually supported. They are racy and the driver needs to 987 * cope with that. Try to avoid this scheme if you can. 988 */ 989 static inline void blk_queue_disable_discard(struct request_queue *q) 990 { 991 q->limits.max_discard_sectors = 0; 992 } 993 994 static inline void blk_queue_disable_secure_erase(struct request_queue *q) 995 { 996 q->limits.max_secure_erase_sectors = 0; 997 } 998 999 static inline void blk_queue_disable_write_zeroes(struct request_queue *q) 1000 { 1001 q->limits.max_write_zeroes_sectors = 0; 1002 } 1003 1004 /* 1005 * Access functions for manipulating queue properties 1006 */ 1007 extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth); 1008 extern void blk_set_stacking_limits(struct queue_limits *lim); 1009 extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 1010 sector_t offset); 1011 void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev, 1012 sector_t offset, const char *pfx); 1013 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 1014 1015 struct blk_independent_access_ranges * 1016 disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges); 1017 void disk_set_independent_access_ranges(struct gendisk *disk, 1018 struct blk_independent_access_ranges *iars); 1019 1020 bool __must_check blk_get_queue(struct request_queue *); 1021 extern void blk_put_queue(struct request_queue *); 1022 1023 void blk_mark_disk_dead(struct gendisk *disk); 1024 1025 struct rq_list { 1026 struct request *head; 1027 struct request *tail; 1028 }; 1029 1030 #ifdef CONFIG_BLOCK 1031 /* 1032 * blk_plug permits building a queue of related requests by holding the I/O 1033 * fragments for a short period. This allows merging of sequential requests 1034 * into single larger request. As the requests are moved from a per-task list to 1035 * the device's request_queue in a batch, this results in improved scalability 1036 * as the lock contention for request_queue lock is reduced. 1037 * 1038 * It is ok not to disable preemption when adding the request to the plug list 1039 * or when attempting a merge. For details, please see schedule() where 1040 * blk_flush_plug() is called. 1041 */ 1042 struct blk_plug { 1043 struct rq_list mq_list; /* blk-mq requests */ 1044 1045 /* if ios_left is > 1, we can batch tag/rq allocations */ 1046 struct rq_list cached_rqs; 1047 u64 cur_ktime; 1048 unsigned short nr_ios; 1049 1050 unsigned short rq_count; 1051 1052 bool multiple_queues; 1053 bool has_elevator; 1054 1055 struct list_head cb_list; /* md requires an unplug callback */ 1056 }; 1057 1058 struct blk_plug_cb; 1059 typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); 1060 struct blk_plug_cb { 1061 struct list_head list; 1062 blk_plug_cb_fn callback; 1063 void *data; 1064 }; 1065 extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, 1066 void *data, int size); 1067 extern void blk_start_plug(struct blk_plug *); 1068 extern void blk_start_plug_nr_ios(struct blk_plug *, unsigned short); 1069 extern void blk_finish_plug(struct blk_plug *); 1070 1071 void __blk_flush_plug(struct blk_plug *plug, bool from_schedule); 1072 static inline void blk_flush_plug(struct blk_plug *plug, bool async) 1073 { 1074 if (plug) 1075 __blk_flush_plug(plug, async); 1076 } 1077 1078 /* 1079 * tsk == current here 1080 */ 1081 static inline void blk_plug_invalidate_ts(struct task_struct *tsk) 1082 { 1083 struct blk_plug *plug = tsk->plug; 1084 1085 if (plug) 1086 plug->cur_ktime = 0; 1087 current->flags &= ~PF_BLOCK_TS; 1088 } 1089 1090 int blkdev_issue_flush(struct block_device *bdev); 1091 long nr_blockdev_pages(void); 1092 #else /* CONFIG_BLOCK */ 1093 struct blk_plug { 1094 }; 1095 1096 static inline void blk_start_plug_nr_ios(struct blk_plug *plug, 1097 unsigned short nr_ios) 1098 { 1099 } 1100 1101 static inline void blk_start_plug(struct blk_plug *plug) 1102 { 1103 } 1104 1105 static inline void blk_finish_plug(struct blk_plug *plug) 1106 { 1107 } 1108 1109 static inline void blk_flush_plug(struct blk_plug *plug, bool async) 1110 { 1111 } 1112 1113 static inline void blk_plug_invalidate_ts(struct task_struct *tsk) 1114 { 1115 } 1116 1117 static inline int blkdev_issue_flush(struct block_device *bdev) 1118 { 1119 return 0; 1120 } 1121 1122 static inline long nr_blockdev_pages(void) 1123 { 1124 return 0; 1125 } 1126 #endif /* CONFIG_BLOCK */ 1127 1128 extern void blk_io_schedule(void); 1129 1130 int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1131 sector_t nr_sects, gfp_t gfp_mask); 1132 int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1133 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop); 1134 int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector, 1135 sector_t nr_sects, gfp_t gfp); 1136 1137 #define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */ 1138 #define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */ 1139 #define BLKDEV_ZERO_KILLABLE (1 << 2) /* interruptible by fatal signals */ 1140 1141 extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1142 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, 1143 unsigned flags); 1144 extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1145 sector_t nr_sects, gfp_t gfp_mask, unsigned flags); 1146 1147 static inline int sb_issue_discard(struct super_block *sb, sector_t block, 1148 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) 1149 { 1150 return blkdev_issue_discard(sb->s_bdev, 1151 block << (sb->s_blocksize_bits - 1152 SECTOR_SHIFT), 1153 nr_blocks << (sb->s_blocksize_bits - 1154 SECTOR_SHIFT), 1155 gfp_mask); 1156 } 1157 static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, 1158 sector_t nr_blocks, gfp_t gfp_mask) 1159 { 1160 return blkdev_issue_zeroout(sb->s_bdev, 1161 block << (sb->s_blocksize_bits - 1162 SECTOR_SHIFT), 1163 nr_blocks << (sb->s_blocksize_bits - 1164 SECTOR_SHIFT), 1165 gfp_mask, 0); 1166 } 1167 1168 static inline bool bdev_is_partition(struct block_device *bdev) 1169 { 1170 return bdev_partno(bdev) != 0; 1171 } 1172 1173 enum blk_default_limits { 1174 BLK_MAX_SEGMENTS = 128, 1175 BLK_SAFE_MAX_SECTORS = 255, 1176 BLK_MAX_SEGMENT_SIZE = 65536, 1177 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, 1178 }; 1179 1180 /* 1181 * Default upper limit for the software max_sectors limit used for 1182 * regular file system I/O. This can be increased through sysfs. 1183 * 1184 * Not to be confused with the max_hw_sector limit that is entirely 1185 * controlled by the driver, usually based on hardware limits. 1186 */ 1187 #define BLK_DEF_MAX_SECTORS_CAP 2560u 1188 1189 static inline struct queue_limits *bdev_limits(struct block_device *bdev) 1190 { 1191 return &bdev_get_queue(bdev)->limits; 1192 } 1193 1194 static inline unsigned long queue_segment_boundary(const struct request_queue *q) 1195 { 1196 return q->limits.seg_boundary_mask; 1197 } 1198 1199 static inline unsigned long queue_virt_boundary(const struct request_queue *q) 1200 { 1201 return q->limits.virt_boundary_mask; 1202 } 1203 1204 static inline unsigned int queue_max_sectors(const struct request_queue *q) 1205 { 1206 return q->limits.max_sectors; 1207 } 1208 1209 static inline unsigned int queue_max_bytes(struct request_queue *q) 1210 { 1211 return min_t(unsigned int, queue_max_sectors(q), INT_MAX >> 9) << 9; 1212 } 1213 1214 static inline unsigned int queue_max_hw_sectors(const struct request_queue *q) 1215 { 1216 return q->limits.max_hw_sectors; 1217 } 1218 1219 static inline unsigned short queue_max_segments(const struct request_queue *q) 1220 { 1221 return q->limits.max_segments; 1222 } 1223 1224 static inline unsigned short queue_max_discard_segments(const struct request_queue *q) 1225 { 1226 return q->limits.max_discard_segments; 1227 } 1228 1229 static inline unsigned int queue_max_segment_size(const struct request_queue *q) 1230 { 1231 return q->limits.max_segment_size; 1232 } 1233 1234 static inline bool queue_emulates_zone_append(struct request_queue *q) 1235 { 1236 return blk_queue_is_zoned(q) && !q->limits.max_hw_zone_append_sectors; 1237 } 1238 1239 static inline bool bdev_emulates_zone_append(struct block_device *bdev) 1240 { 1241 return queue_emulates_zone_append(bdev_get_queue(bdev)); 1242 } 1243 1244 static inline unsigned int 1245 bdev_max_zone_append_sectors(struct block_device *bdev) 1246 { 1247 return bdev_limits(bdev)->max_zone_append_sectors; 1248 } 1249 1250 static inline unsigned int bdev_max_segments(struct block_device *bdev) 1251 { 1252 return queue_max_segments(bdev_get_queue(bdev)); 1253 } 1254 1255 static inline unsigned queue_logical_block_size(const struct request_queue *q) 1256 { 1257 return q->limits.logical_block_size; 1258 } 1259 1260 static inline unsigned int bdev_logical_block_size(struct block_device *bdev) 1261 { 1262 return queue_logical_block_size(bdev_get_queue(bdev)); 1263 } 1264 1265 static inline unsigned int queue_physical_block_size(const struct request_queue *q) 1266 { 1267 return q->limits.physical_block_size; 1268 } 1269 1270 static inline unsigned int bdev_physical_block_size(struct block_device *bdev) 1271 { 1272 return queue_physical_block_size(bdev_get_queue(bdev)); 1273 } 1274 1275 static inline unsigned int queue_io_min(const struct request_queue *q) 1276 { 1277 return q->limits.io_min; 1278 } 1279 1280 static inline unsigned int bdev_io_min(struct block_device *bdev) 1281 { 1282 return queue_io_min(bdev_get_queue(bdev)); 1283 } 1284 1285 static inline unsigned int queue_io_opt(const struct request_queue *q) 1286 { 1287 return q->limits.io_opt; 1288 } 1289 1290 static inline unsigned int bdev_io_opt(struct block_device *bdev) 1291 { 1292 return queue_io_opt(bdev_get_queue(bdev)); 1293 } 1294 1295 static inline unsigned int 1296 queue_zone_write_granularity(const struct request_queue *q) 1297 { 1298 return q->limits.zone_write_granularity; 1299 } 1300 1301 static inline unsigned int 1302 bdev_zone_write_granularity(struct block_device *bdev) 1303 { 1304 return queue_zone_write_granularity(bdev_get_queue(bdev)); 1305 } 1306 1307 int bdev_alignment_offset(struct block_device *bdev); 1308 unsigned int bdev_discard_alignment(struct block_device *bdev); 1309 1310 static inline unsigned int bdev_max_discard_sectors(struct block_device *bdev) 1311 { 1312 return bdev_limits(bdev)->max_discard_sectors; 1313 } 1314 1315 static inline unsigned int bdev_discard_granularity(struct block_device *bdev) 1316 { 1317 return bdev_limits(bdev)->discard_granularity; 1318 } 1319 1320 static inline unsigned int 1321 bdev_max_secure_erase_sectors(struct block_device *bdev) 1322 { 1323 return bdev_limits(bdev)->max_secure_erase_sectors; 1324 } 1325 1326 static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev) 1327 { 1328 return bdev_limits(bdev)->max_write_zeroes_sectors; 1329 } 1330 1331 static inline bool bdev_nonrot(struct block_device *bdev) 1332 { 1333 return blk_queue_nonrot(bdev_get_queue(bdev)); 1334 } 1335 1336 static inline bool bdev_synchronous(struct block_device *bdev) 1337 { 1338 return bdev->bd_disk->queue->limits.features & BLK_FEAT_SYNCHRONOUS; 1339 } 1340 1341 static inline bool bdev_stable_writes(struct block_device *bdev) 1342 { 1343 struct request_queue *q = bdev_get_queue(bdev); 1344 1345 if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) && 1346 q->limits.integrity.csum_type != BLK_INTEGRITY_CSUM_NONE) 1347 return true; 1348 return q->limits.features & BLK_FEAT_STABLE_WRITES; 1349 } 1350 1351 static inline bool blk_queue_write_cache(struct request_queue *q) 1352 { 1353 return (q->limits.features & BLK_FEAT_WRITE_CACHE) && 1354 !(q->limits.flags & BLK_FLAG_WRITE_CACHE_DISABLED); 1355 } 1356 1357 static inline bool bdev_write_cache(struct block_device *bdev) 1358 { 1359 return blk_queue_write_cache(bdev_get_queue(bdev)); 1360 } 1361 1362 static inline bool bdev_fua(struct block_device *bdev) 1363 { 1364 return bdev_limits(bdev)->features & BLK_FEAT_FUA; 1365 } 1366 1367 static inline bool bdev_nowait(struct block_device *bdev) 1368 { 1369 return bdev->bd_disk->queue->limits.features & BLK_FEAT_NOWAIT; 1370 } 1371 1372 static inline bool bdev_is_zoned(struct block_device *bdev) 1373 { 1374 return blk_queue_is_zoned(bdev_get_queue(bdev)); 1375 } 1376 1377 static inline unsigned int bdev_zone_no(struct block_device *bdev, sector_t sec) 1378 { 1379 return disk_zone_no(bdev->bd_disk, sec); 1380 } 1381 1382 static inline sector_t bdev_zone_sectors(struct block_device *bdev) 1383 { 1384 struct request_queue *q = bdev_get_queue(bdev); 1385 1386 if (!blk_queue_is_zoned(q)) 1387 return 0; 1388 return q->limits.chunk_sectors; 1389 } 1390 1391 static inline sector_t bdev_offset_from_zone_start(struct block_device *bdev, 1392 sector_t sector) 1393 { 1394 return sector & (bdev_zone_sectors(bdev) - 1); 1395 } 1396 1397 static inline sector_t bio_offset_from_zone_start(struct bio *bio) 1398 { 1399 return bdev_offset_from_zone_start(bio->bi_bdev, 1400 bio->bi_iter.bi_sector); 1401 } 1402 1403 static inline bool bdev_is_zone_start(struct block_device *bdev, 1404 sector_t sector) 1405 { 1406 return bdev_offset_from_zone_start(bdev, sector) == 0; 1407 } 1408 1409 /** 1410 * bdev_zone_is_seq - check if a sector belongs to a sequential write zone 1411 * @bdev: block device to check 1412 * @sector: sector number 1413 * 1414 * Check if @sector on @bdev is contained in a sequential write required zone. 1415 */ 1416 static inline bool bdev_zone_is_seq(struct block_device *bdev, sector_t sector) 1417 { 1418 bool is_seq = false; 1419 1420 #if IS_ENABLED(CONFIG_BLK_DEV_ZONED) 1421 if (bdev_is_zoned(bdev)) { 1422 struct gendisk *disk = bdev->bd_disk; 1423 unsigned long *bitmap; 1424 1425 rcu_read_lock(); 1426 bitmap = rcu_dereference(disk->conv_zones_bitmap); 1427 is_seq = !bitmap || 1428 !test_bit(disk_zone_no(disk, sector), bitmap); 1429 rcu_read_unlock(); 1430 } 1431 #endif 1432 1433 return is_seq; 1434 } 1435 1436 int blk_zone_issue_zeroout(struct block_device *bdev, sector_t sector, 1437 sector_t nr_sects, gfp_t gfp_mask); 1438 1439 static inline unsigned int queue_dma_alignment(const struct request_queue *q) 1440 { 1441 return q->limits.dma_alignment; 1442 } 1443 1444 static inline unsigned int 1445 queue_atomic_write_unit_max_bytes(const struct request_queue *q) 1446 { 1447 return q->limits.atomic_write_unit_max; 1448 } 1449 1450 static inline unsigned int 1451 queue_atomic_write_unit_min_bytes(const struct request_queue *q) 1452 { 1453 return q->limits.atomic_write_unit_min; 1454 } 1455 1456 static inline unsigned int 1457 queue_atomic_write_boundary_bytes(const struct request_queue *q) 1458 { 1459 return q->limits.atomic_write_boundary_sectors << SECTOR_SHIFT; 1460 } 1461 1462 static inline unsigned int 1463 queue_atomic_write_max_bytes(const struct request_queue *q) 1464 { 1465 return q->limits.atomic_write_max_sectors << SECTOR_SHIFT; 1466 } 1467 1468 static inline unsigned int bdev_dma_alignment(struct block_device *bdev) 1469 { 1470 return queue_dma_alignment(bdev_get_queue(bdev)); 1471 } 1472 1473 static inline bool bdev_iter_is_aligned(struct block_device *bdev, 1474 struct iov_iter *iter) 1475 { 1476 return iov_iter_is_aligned(iter, bdev_dma_alignment(bdev), 1477 bdev_logical_block_size(bdev) - 1); 1478 } 1479 1480 static inline unsigned int 1481 blk_lim_dma_alignment_and_pad(struct queue_limits *lim) 1482 { 1483 return lim->dma_alignment | lim->dma_pad_mask; 1484 } 1485 1486 static inline bool blk_rq_aligned(struct request_queue *q, unsigned long addr, 1487 unsigned int len) 1488 { 1489 unsigned int alignment = blk_lim_dma_alignment_and_pad(&q->limits); 1490 1491 return !(addr & alignment) && !(len & alignment); 1492 } 1493 1494 /* assumes size > 256 */ 1495 static inline unsigned int blksize_bits(unsigned int size) 1496 { 1497 return order_base_2(size >> SECTOR_SHIFT) + SECTOR_SHIFT; 1498 } 1499 1500 int kblockd_schedule_work(struct work_struct *work); 1501 int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); 1502 1503 #define MODULE_ALIAS_BLOCKDEV(major,minor) \ 1504 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 1505 #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 1506 MODULE_ALIAS("block-major-" __stringify(major) "-*") 1507 1508 #ifdef CONFIG_BLK_INLINE_ENCRYPTION 1509 1510 bool blk_crypto_register(struct blk_crypto_profile *profile, 1511 struct request_queue *q); 1512 1513 #else /* CONFIG_BLK_INLINE_ENCRYPTION */ 1514 1515 static inline bool blk_crypto_register(struct blk_crypto_profile *profile, 1516 struct request_queue *q) 1517 { 1518 return true; 1519 } 1520 1521 #endif /* CONFIG_BLK_INLINE_ENCRYPTION */ 1522 1523 enum blk_unique_id { 1524 /* these match the Designator Types specified in SPC */ 1525 BLK_UID_T10 = 1, 1526 BLK_UID_EUI64 = 2, 1527 BLK_UID_NAA = 3, 1528 }; 1529 1530 struct block_device_operations { 1531 void (*submit_bio)(struct bio *bio); 1532 int (*poll_bio)(struct bio *bio, struct io_comp_batch *iob, 1533 unsigned int flags); 1534 int (*open)(struct gendisk *disk, blk_mode_t mode); 1535 void (*release)(struct gendisk *disk); 1536 int (*ioctl)(struct block_device *bdev, blk_mode_t mode, 1537 unsigned cmd, unsigned long arg); 1538 int (*compat_ioctl)(struct block_device *bdev, blk_mode_t mode, 1539 unsigned cmd, unsigned long arg); 1540 unsigned int (*check_events) (struct gendisk *disk, 1541 unsigned int clearing); 1542 void (*unlock_native_capacity) (struct gendisk *); 1543 int (*getgeo)(struct block_device *, struct hd_geometry *); 1544 int (*set_read_only)(struct block_device *bdev, bool ro); 1545 void (*free_disk)(struct gendisk *disk); 1546 /* this callback is with swap_lock and sometimes page table lock held */ 1547 void (*swap_slot_free_notify) (struct block_device *, unsigned long); 1548 int (*report_zones)(struct gendisk *, sector_t sector, 1549 unsigned int nr_zones, report_zones_cb cb, void *data); 1550 char *(*devnode)(struct gendisk *disk, umode_t *mode); 1551 /* returns the length of the identifier or a negative errno: */ 1552 int (*get_unique_id)(struct gendisk *disk, u8 id[16], 1553 enum blk_unique_id id_type); 1554 struct module *owner; 1555 const struct pr_ops *pr_ops; 1556 1557 /* 1558 * Special callback for probing GPT entry at a given sector. 1559 * Needed by Android devices, used by GPT scanner and MMC blk 1560 * driver. 1561 */ 1562 int (*alternative_gpt_sector)(struct gendisk *disk, sector_t *sector); 1563 }; 1564 1565 #ifdef CONFIG_COMPAT 1566 extern int blkdev_compat_ptr_ioctl(struct block_device *, blk_mode_t, 1567 unsigned int, unsigned long); 1568 #else 1569 #define blkdev_compat_ptr_ioctl NULL 1570 #endif 1571 1572 static inline void blk_wake_io_task(struct task_struct *waiter) 1573 { 1574 /* 1575 * If we're polling, the task itself is doing the completions. For 1576 * that case, we don't need to signal a wakeup, it's enough to just 1577 * mark us as RUNNING. 1578 */ 1579 if (waiter == current) 1580 __set_current_state(TASK_RUNNING); 1581 else 1582 wake_up_process(waiter); 1583 } 1584 1585 unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op, 1586 unsigned long start_time); 1587 void bdev_end_io_acct(struct block_device *bdev, enum req_op op, 1588 unsigned int sectors, unsigned long start_time); 1589 1590 unsigned long bio_start_io_acct(struct bio *bio); 1591 void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time, 1592 struct block_device *orig_bdev); 1593 1594 /** 1595 * bio_end_io_acct - end I/O accounting for bio based drivers 1596 * @bio: bio to end account for 1597 * @start_time: start time returned by bio_start_io_acct() 1598 */ 1599 static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time) 1600 { 1601 return bio_end_io_acct_remapped(bio, start_time, bio->bi_bdev); 1602 } 1603 1604 int set_blocksize(struct file *file, int size); 1605 1606 int lookup_bdev(const char *pathname, dev_t *dev); 1607 1608 void blkdev_show(struct seq_file *seqf, off_t offset); 1609 1610 #define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */ 1611 #define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */ 1612 #ifdef CONFIG_BLOCK 1613 #define BLKDEV_MAJOR_MAX 512 1614 #else 1615 #define BLKDEV_MAJOR_MAX 0 1616 #endif 1617 1618 struct blk_holder_ops { 1619 void (*mark_dead)(struct block_device *bdev, bool surprise); 1620 1621 /* 1622 * Sync the file system mounted on the block device. 1623 */ 1624 void (*sync)(struct block_device *bdev); 1625 1626 /* 1627 * Freeze the file system mounted on the block device. 1628 */ 1629 int (*freeze)(struct block_device *bdev); 1630 1631 /* 1632 * Thaw the file system mounted on the block device. 1633 */ 1634 int (*thaw)(struct block_device *bdev); 1635 }; 1636 1637 /* 1638 * For filesystems using @fs_holder_ops, the @holder argument passed to 1639 * helpers used to open and claim block devices via 1640 * bd_prepare_to_claim() must point to a superblock. 1641 */ 1642 extern const struct blk_holder_ops fs_holder_ops; 1643 1644 /* 1645 * Return the correct open flags for blkdev_get_by_* for super block flags 1646 * as stored in sb->s_flags. 1647 */ 1648 #define sb_open_mode(flags) \ 1649 (BLK_OPEN_READ | BLK_OPEN_RESTRICT_WRITES | \ 1650 (((flags) & SB_RDONLY) ? 0 : BLK_OPEN_WRITE)) 1651 1652 struct file *bdev_file_open_by_dev(dev_t dev, blk_mode_t mode, void *holder, 1653 const struct blk_holder_ops *hops); 1654 struct file *bdev_file_open_by_path(const char *path, blk_mode_t mode, 1655 void *holder, const struct blk_holder_ops *hops); 1656 int bd_prepare_to_claim(struct block_device *bdev, void *holder, 1657 const struct blk_holder_ops *hops); 1658 void bd_abort_claiming(struct block_device *bdev, void *holder); 1659 1660 /* just for blk-cgroup, don't use elsewhere */ 1661 struct block_device *blkdev_get_no_open(dev_t dev); 1662 void blkdev_put_no_open(struct block_device *bdev); 1663 1664 struct block_device *I_BDEV(struct inode *inode); 1665 struct block_device *file_bdev(struct file *bdev_file); 1666 bool disk_live(struct gendisk *disk); 1667 unsigned int block_size(struct block_device *bdev); 1668 1669 #ifdef CONFIG_BLOCK 1670 void invalidate_bdev(struct block_device *bdev); 1671 int sync_blockdev(struct block_device *bdev); 1672 int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend); 1673 int sync_blockdev_nowait(struct block_device *bdev); 1674 void sync_bdevs(bool wait); 1675 void bdev_statx(struct path *, struct kstat *, u32); 1676 void printk_all_partitions(void); 1677 int __init early_lookup_bdev(const char *pathname, dev_t *dev); 1678 #else 1679 static inline void invalidate_bdev(struct block_device *bdev) 1680 { 1681 } 1682 static inline int sync_blockdev(struct block_device *bdev) 1683 { 1684 return 0; 1685 } 1686 static inline int sync_blockdev_nowait(struct block_device *bdev) 1687 { 1688 return 0; 1689 } 1690 static inline void sync_bdevs(bool wait) 1691 { 1692 } 1693 static inline void bdev_statx(struct path *path, struct kstat *stat, 1694 u32 request_mask) 1695 { 1696 } 1697 static inline void printk_all_partitions(void) 1698 { 1699 } 1700 static inline int early_lookup_bdev(const char *pathname, dev_t *dev) 1701 { 1702 return -EINVAL; 1703 } 1704 #endif /* CONFIG_BLOCK */ 1705 1706 int bdev_freeze(struct block_device *bdev); 1707 int bdev_thaw(struct block_device *bdev); 1708 void bdev_fput(struct file *bdev_file); 1709 1710 struct io_comp_batch { 1711 struct rq_list req_list; 1712 bool need_ts; 1713 void (*complete)(struct io_comp_batch *); 1714 }; 1715 1716 static inline bool blk_atomic_write_start_sect_aligned(sector_t sector, 1717 struct queue_limits *limits) 1718 { 1719 unsigned int alignment = max(limits->atomic_write_hw_unit_min, 1720 limits->atomic_write_hw_boundary); 1721 1722 return IS_ALIGNED(sector, alignment >> SECTOR_SHIFT); 1723 } 1724 1725 static inline bool bdev_can_atomic_write(struct block_device *bdev) 1726 { 1727 struct request_queue *bd_queue = bdev->bd_queue; 1728 struct queue_limits *limits = &bd_queue->limits; 1729 1730 if (!limits->atomic_write_unit_min) 1731 return false; 1732 1733 if (bdev_is_partition(bdev)) 1734 return blk_atomic_write_start_sect_aligned(bdev->bd_start_sect, 1735 limits); 1736 1737 return true; 1738 } 1739 1740 static inline unsigned int 1741 bdev_atomic_write_unit_min_bytes(struct block_device *bdev) 1742 { 1743 if (!bdev_can_atomic_write(bdev)) 1744 return 0; 1745 return queue_atomic_write_unit_min_bytes(bdev_get_queue(bdev)); 1746 } 1747 1748 static inline unsigned int 1749 bdev_atomic_write_unit_max_bytes(struct block_device *bdev) 1750 { 1751 if (!bdev_can_atomic_write(bdev)) 1752 return 0; 1753 return queue_atomic_write_unit_max_bytes(bdev_get_queue(bdev)); 1754 } 1755 1756 #define DEFINE_IO_COMP_BATCH(name) struct io_comp_batch name = { } 1757 1758 #endif /* _LINUX_BLKDEV_H */ 1759