1 /* 2 * Copyright (C) 2001 Sistina Software (UK) Limited. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the LGPL. 6 */ 7 8 #ifndef _LINUX_DEVICE_MAPPER_H 9 #define _LINUX_DEVICE_MAPPER_H 10 11 #include <linux/bio.h> 12 #include <linux/blkdev.h> 13 #include <linux/math64.h> 14 #include <linux/ratelimit.h> 15 16 struct dm_dev; 17 struct dm_target; 18 struct dm_table; 19 struct mapped_device; 20 struct bio_vec; 21 22 /* 23 * Type of table, mapped_device's mempool and request_queue 24 */ 25 enum dm_queue_mode { 26 DM_TYPE_NONE = 0, 27 DM_TYPE_BIO_BASED = 1, 28 DM_TYPE_REQUEST_BASED = 2, 29 DM_TYPE_MQ_REQUEST_BASED = 3, 30 DM_TYPE_DAX_BIO_BASED = 4, 31 }; 32 33 typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t; 34 35 union map_info { 36 void *ptr; 37 }; 38 39 /* 40 * In the constructor the target parameter will already have the 41 * table, type, begin and len fields filled in. 42 */ 43 typedef int (*dm_ctr_fn) (struct dm_target *target, 44 unsigned int argc, char **argv); 45 46 /* 47 * The destructor doesn't need to free the dm_target, just 48 * anything hidden ti->private. 49 */ 50 typedef void (*dm_dtr_fn) (struct dm_target *ti); 51 52 /* 53 * The map function must return: 54 * < 0: error 55 * = 0: The target will handle the io by resubmitting it later 56 * = 1: simple remap complete 57 * = 2: The target wants to push back the io 58 */ 59 typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio); 60 typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti, 61 struct request *rq, 62 union map_info *map_context, 63 struct request **clone); 64 typedef void (*dm_release_clone_request_fn) (struct request *clone); 65 66 /* 67 * Returns: 68 * < 0 : error (currently ignored) 69 * 0 : ended successfully 70 * 1 : for some reason the io has still not completed (eg, 71 * multipath target might want to requeue a failed io). 72 * 2 : The target wants to push back the io 73 */ 74 typedef int (*dm_endio_fn) (struct dm_target *ti, 75 struct bio *bio, blk_status_t *error); 76 typedef int (*dm_request_endio_fn) (struct dm_target *ti, 77 struct request *clone, blk_status_t error, 78 union map_info *map_context); 79 80 typedef void (*dm_presuspend_fn) (struct dm_target *ti); 81 typedef void (*dm_presuspend_undo_fn) (struct dm_target *ti); 82 typedef void (*dm_postsuspend_fn) (struct dm_target *ti); 83 typedef int (*dm_preresume_fn) (struct dm_target *ti); 84 typedef void (*dm_resume_fn) (struct dm_target *ti); 85 86 typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type, 87 unsigned status_flags, char *result, unsigned maxlen); 88 89 typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv); 90 91 typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, 92 struct block_device **bdev, fmode_t *mode); 93 94 /* 95 * These iteration functions are typically used to check (and combine) 96 * properties of underlying devices. 97 * E.g. Does at least one underlying device support flush? 98 * Does any underlying device not support WRITE_SAME? 99 * 100 * The callout function is called once for each contiguous section of 101 * an underlying device. State can be maintained in *data. 102 * Return non-zero to stop iterating through any further devices. 103 */ 104 typedef int (*iterate_devices_callout_fn) (struct dm_target *ti, 105 struct dm_dev *dev, 106 sector_t start, sector_t len, 107 void *data); 108 109 /* 110 * This function must iterate through each section of device used by the 111 * target until it encounters a non-zero return code, which it then returns. 112 * Returns zero if no callout returned non-zero. 113 */ 114 typedef int (*dm_iterate_devices_fn) (struct dm_target *ti, 115 iterate_devices_callout_fn fn, 116 void *data); 117 118 typedef void (*dm_io_hints_fn) (struct dm_target *ti, 119 struct queue_limits *limits); 120 121 /* 122 * Returns: 123 * 0: The target can handle the next I/O immediately. 124 * 1: The target can't handle the next I/O immediately. 125 */ 126 typedef int (*dm_busy_fn) (struct dm_target *ti); 127 128 /* 129 * Returns: 130 * < 0 : error 131 * >= 0 : the number of bytes accessible at the address 132 */ 133 typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff, 134 long nr_pages, void **kaddr, pfn_t *pfn); 135 typedef size_t (*dm_dax_copy_from_iter_fn)(struct dm_target *ti, pgoff_t pgoff, 136 void *addr, size_t bytes, struct iov_iter *i); 137 #define PAGE_SECTORS (PAGE_SIZE / 512) 138 139 void dm_error(const char *message); 140 141 struct dm_dev { 142 struct block_device *bdev; 143 struct dax_device *dax_dev; 144 fmode_t mode; 145 char name[16]; 146 }; 147 148 dev_t dm_get_dev_t(const char *path); 149 150 /* 151 * Constructors should call these functions to ensure destination devices 152 * are opened/closed correctly. 153 */ 154 int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, 155 struct dm_dev **result); 156 void dm_put_device(struct dm_target *ti, struct dm_dev *d); 157 158 /* 159 * Information about a target type 160 */ 161 162 struct target_type { 163 uint64_t features; 164 const char *name; 165 struct module *module; 166 unsigned version[3]; 167 dm_ctr_fn ctr; 168 dm_dtr_fn dtr; 169 dm_map_fn map; 170 dm_clone_and_map_request_fn clone_and_map_rq; 171 dm_release_clone_request_fn release_clone_rq; 172 dm_endio_fn end_io; 173 dm_request_endio_fn rq_end_io; 174 dm_presuspend_fn presuspend; 175 dm_presuspend_undo_fn presuspend_undo; 176 dm_postsuspend_fn postsuspend; 177 dm_preresume_fn preresume; 178 dm_resume_fn resume; 179 dm_status_fn status; 180 dm_message_fn message; 181 dm_prepare_ioctl_fn prepare_ioctl; 182 dm_busy_fn busy; 183 dm_iterate_devices_fn iterate_devices; 184 dm_io_hints_fn io_hints; 185 dm_dax_direct_access_fn direct_access; 186 dm_dax_copy_from_iter_fn dax_copy_from_iter; 187 188 /* For internal device-mapper use. */ 189 struct list_head list; 190 }; 191 192 /* 193 * Target features 194 */ 195 196 /* 197 * Any table that contains an instance of this target must have only one. 198 */ 199 #define DM_TARGET_SINGLETON 0x00000001 200 #define dm_target_needs_singleton(type) ((type)->features & DM_TARGET_SINGLETON) 201 202 /* 203 * Indicates that a target does not support read-only devices. 204 */ 205 #define DM_TARGET_ALWAYS_WRITEABLE 0x00000002 206 #define dm_target_always_writeable(type) \ 207 ((type)->features & DM_TARGET_ALWAYS_WRITEABLE) 208 209 /* 210 * Any device that contains a table with an instance of this target may never 211 * have tables containing any different target type. 212 */ 213 #define DM_TARGET_IMMUTABLE 0x00000004 214 #define dm_target_is_immutable(type) ((type)->features & DM_TARGET_IMMUTABLE) 215 216 /* 217 * Indicates that a target may replace any target; even immutable targets. 218 * .map, .map_rq, .clone_and_map_rq and .release_clone_rq are all defined. 219 */ 220 #define DM_TARGET_WILDCARD 0x00000008 221 #define dm_target_is_wildcard(type) ((type)->features & DM_TARGET_WILDCARD) 222 223 /* 224 * Some targets need to be sent the same WRITE bio severals times so 225 * that they can send copies of it to different devices. This function 226 * examines any supplied bio and returns the number of copies of it the 227 * target requires. 228 */ 229 typedef unsigned (*dm_num_write_bios_fn) (struct dm_target *ti, struct bio *bio); 230 231 /* 232 * A target implements own bio data integrity. 233 */ 234 #define DM_TARGET_INTEGRITY 0x00000010 235 #define dm_target_has_integrity(type) ((type)->features & DM_TARGET_INTEGRITY) 236 237 /* 238 * A target passes integrity data to the lower device. 239 */ 240 #define DM_TARGET_PASSES_INTEGRITY 0x00000020 241 #define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY) 242 243 /* 244 * Indicates that a target supports host-managed zoned block devices. 245 */ 246 #define DM_TARGET_ZONED_HM 0x00000040 247 #define dm_target_supports_zoned_hm(type) ((type)->features & DM_TARGET_ZONED_HM) 248 249 struct dm_target { 250 struct dm_table *table; 251 struct target_type *type; 252 253 /* target limits */ 254 sector_t begin; 255 sector_t len; 256 257 /* If non-zero, maximum size of I/O submitted to a target. */ 258 uint32_t max_io_len; 259 260 /* 261 * A number of zero-length barrier bios that will be submitted 262 * to the target for the purpose of flushing cache. 263 * 264 * The bio number can be accessed with dm_bio_get_target_bio_nr. 265 * It is a responsibility of the target driver to remap these bios 266 * to the real underlying devices. 267 */ 268 unsigned num_flush_bios; 269 270 /* 271 * The number of discard bios that will be submitted to the target. 272 * The bio number can be accessed with dm_bio_get_target_bio_nr. 273 */ 274 unsigned num_discard_bios; 275 276 /* 277 * The number of WRITE SAME bios that will be submitted to the target. 278 * The bio number can be accessed with dm_bio_get_target_bio_nr. 279 */ 280 unsigned num_write_same_bios; 281 282 /* 283 * The number of WRITE ZEROES bios that will be submitted to the target. 284 * The bio number can be accessed with dm_bio_get_target_bio_nr. 285 */ 286 unsigned num_write_zeroes_bios; 287 288 /* 289 * The minimum number of extra bytes allocated in each io for the 290 * target to use. 291 */ 292 unsigned per_io_data_size; 293 294 /* 295 * If defined, this function is called to find out how many 296 * duplicate bios should be sent to the target when writing 297 * data. 298 */ 299 dm_num_write_bios_fn num_write_bios; 300 301 /* target specific data */ 302 void *private; 303 304 /* Used to provide an error string from the ctr */ 305 char *error; 306 307 /* 308 * Set if this target needs to receive flushes regardless of 309 * whether or not its underlying devices have support. 310 */ 311 bool flush_supported:1; 312 313 /* 314 * Set if this target needs to receive discards regardless of 315 * whether or not its underlying devices have support. 316 */ 317 bool discards_supported:1; 318 319 /* 320 * Set if the target required discard bios to be split 321 * on max_io_len boundary. 322 */ 323 bool split_discard_bios:1; 324 }; 325 326 /* Each target can link one of these into the table */ 327 struct dm_target_callbacks { 328 struct list_head list; 329 int (*congested_fn) (struct dm_target_callbacks *, int); 330 }; 331 332 /* 333 * For bio-based dm. 334 * One of these is allocated for each bio. 335 * This structure shouldn't be touched directly by target drivers. 336 * It is here so that we can inline dm_per_bio_data and 337 * dm_bio_from_per_bio_data 338 */ 339 struct dm_target_io { 340 struct dm_io *io; 341 struct dm_target *ti; 342 unsigned target_bio_nr; 343 unsigned *len_ptr; 344 struct bio clone; 345 }; 346 347 static inline void *dm_per_bio_data(struct bio *bio, size_t data_size) 348 { 349 return (char *)bio - offsetof(struct dm_target_io, clone) - data_size; 350 } 351 352 static inline struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size) 353 { 354 return (struct bio *)((char *)data + data_size + offsetof(struct dm_target_io, clone)); 355 } 356 357 static inline unsigned dm_bio_get_target_bio_nr(const struct bio *bio) 358 { 359 return container_of(bio, struct dm_target_io, clone)->target_bio_nr; 360 } 361 362 int dm_register_target(struct target_type *t); 363 void dm_unregister_target(struct target_type *t); 364 365 /* 366 * Target argument parsing. 367 */ 368 struct dm_arg_set { 369 unsigned argc; 370 char **argv; 371 }; 372 373 /* 374 * The minimum and maximum value of a numeric argument, together with 375 * the error message to use if the number is found to be outside that range. 376 */ 377 struct dm_arg { 378 unsigned min; 379 unsigned max; 380 char *error; 381 }; 382 383 /* 384 * Validate the next argument, either returning it as *value or, if invalid, 385 * returning -EINVAL and setting *error. 386 */ 387 int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set, 388 unsigned *value, char **error); 389 390 /* 391 * Process the next argument as the start of a group containing between 392 * arg->min and arg->max further arguments. Either return the size as 393 * *num_args or, if invalid, return -EINVAL and set *error. 394 */ 395 int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set, 396 unsigned *num_args, char **error); 397 398 /* 399 * Return the current argument and shift to the next. 400 */ 401 const char *dm_shift_arg(struct dm_arg_set *as); 402 403 /* 404 * Move through num_args arguments. 405 */ 406 void dm_consume_args(struct dm_arg_set *as, unsigned num_args); 407 408 /*----------------------------------------------------------------- 409 * Functions for creating and manipulating mapped devices. 410 * Drop the reference with dm_put when you finish with the object. 411 *---------------------------------------------------------------*/ 412 413 /* 414 * DM_ANY_MINOR chooses the next available minor number. 415 */ 416 #define DM_ANY_MINOR (-1) 417 int dm_create(int minor, struct mapped_device **md); 418 419 /* 420 * Reference counting for md. 421 */ 422 struct mapped_device *dm_get_md(dev_t dev); 423 void dm_get(struct mapped_device *md); 424 int dm_hold(struct mapped_device *md); 425 void dm_put(struct mapped_device *md); 426 427 /* 428 * An arbitrary pointer may be stored alongside a mapped device. 429 */ 430 void dm_set_mdptr(struct mapped_device *md, void *ptr); 431 void *dm_get_mdptr(struct mapped_device *md); 432 433 /* 434 * A device can still be used while suspended, but I/O is deferred. 435 */ 436 int dm_suspend(struct mapped_device *md, unsigned suspend_flags); 437 int dm_resume(struct mapped_device *md); 438 439 /* 440 * Event functions. 441 */ 442 uint32_t dm_get_event_nr(struct mapped_device *md); 443 int dm_wait_event(struct mapped_device *md, int event_nr); 444 uint32_t dm_next_uevent_seq(struct mapped_device *md); 445 void dm_uevent_add(struct mapped_device *md, struct list_head *elist); 446 447 /* 448 * Info functions. 449 */ 450 const char *dm_device_name(struct mapped_device *md); 451 int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid); 452 struct gendisk *dm_disk(struct mapped_device *md); 453 int dm_suspended(struct dm_target *ti); 454 int dm_noflush_suspending(struct dm_target *ti); 455 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors); 456 void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, 457 sector_t start); 458 union map_info *dm_get_rq_mapinfo(struct request *rq); 459 460 struct queue_limits *dm_get_queue_limits(struct mapped_device *md); 461 462 /* 463 * Geometry functions. 464 */ 465 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo); 466 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo); 467 468 /*----------------------------------------------------------------- 469 * Functions for manipulating device-mapper tables. 470 *---------------------------------------------------------------*/ 471 472 /* 473 * First create an empty table. 474 */ 475 int dm_table_create(struct dm_table **result, fmode_t mode, 476 unsigned num_targets, struct mapped_device *md); 477 478 /* 479 * Then call this once for each target. 480 */ 481 int dm_table_add_target(struct dm_table *t, const char *type, 482 sector_t start, sector_t len, char *params); 483 484 /* 485 * Target_ctr should call this if it needs to add any callbacks. 486 */ 487 void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb); 488 489 /* 490 * Target can use this to set the table's type. 491 * Can only ever be called from a target's ctr. 492 * Useful for "hybrid" target (supports both bio-based 493 * and request-based). 494 */ 495 void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type); 496 497 /* 498 * Finally call this to make the table ready for use. 499 */ 500 int dm_table_complete(struct dm_table *t); 501 502 /* 503 * Target may require that it is never sent I/O larger than len. 504 */ 505 int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len); 506 507 /* 508 * Table reference counting. 509 */ 510 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx); 511 void dm_put_live_table(struct mapped_device *md, int srcu_idx); 512 void dm_sync_table(struct mapped_device *md); 513 514 /* 515 * Queries 516 */ 517 sector_t dm_table_get_size(struct dm_table *t); 518 unsigned int dm_table_get_num_targets(struct dm_table *t); 519 fmode_t dm_table_get_mode(struct dm_table *t); 520 struct mapped_device *dm_table_get_md(struct dm_table *t); 521 522 /* 523 * Trigger an event. 524 */ 525 void dm_table_event(struct dm_table *t); 526 527 /* 528 * Run the queue for request-based targets. 529 */ 530 void dm_table_run_md_queue_async(struct dm_table *t); 531 532 /* 533 * The device must be suspended before calling this method. 534 * Returns the previous table, which the caller must destroy. 535 */ 536 struct dm_table *dm_swap_table(struct mapped_device *md, 537 struct dm_table *t); 538 539 /* 540 * A wrapper around vmalloc. 541 */ 542 void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size); 543 544 /*----------------------------------------------------------------- 545 * Macros. 546 *---------------------------------------------------------------*/ 547 #define DM_NAME "device-mapper" 548 549 #define DM_RATELIMIT(pr_func, fmt, ...) \ 550 do { \ 551 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, \ 552 DEFAULT_RATELIMIT_BURST); \ 553 \ 554 if (__ratelimit(&rs)) \ 555 pr_func(DM_FMT(fmt), ##__VA_ARGS__); \ 556 } while (0) 557 558 #define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n" 559 560 #define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__) 561 562 #define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__) 563 #define DMERR_LIMIT(fmt, ...) DM_RATELIMIT(pr_err, fmt, ##__VA_ARGS__) 564 #define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__) 565 #define DMWARN_LIMIT(fmt, ...) DM_RATELIMIT(pr_warn, fmt, ##__VA_ARGS__) 566 #define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__) 567 #define DMINFO_LIMIT(fmt, ...) DM_RATELIMIT(pr_info, fmt, ##__VA_ARGS__) 568 569 #ifdef CONFIG_DM_DEBUG 570 #define DMDEBUG(fmt, ...) printk(KERN_DEBUG DM_FMT(fmt), ##__VA_ARGS__) 571 #define DMDEBUG_LIMIT(fmt, ...) DM_RATELIMIT(pr_debug, fmt, ##__VA_ARGS__) 572 #else 573 #define DMDEBUG(fmt, ...) no_printk(fmt, ##__VA_ARGS__) 574 #define DMDEBUG_LIMIT(fmt, ...) no_printk(fmt, ##__VA_ARGS__) 575 #endif 576 577 #define DMEMIT(x...) sz += ((sz >= maxlen) ? \ 578 0 : scnprintf(result + sz, maxlen - sz, x)) 579 580 #define SECTOR_SHIFT 9 581 582 /* 583 * Definitions of return values from target end_io function. 584 */ 585 #define DM_ENDIO_DONE 0 586 #define DM_ENDIO_INCOMPLETE 1 587 #define DM_ENDIO_REQUEUE 2 588 589 /* 590 * Definitions of return values from target map function. 591 */ 592 #define DM_MAPIO_SUBMITTED 0 593 #define DM_MAPIO_REMAPPED 1 594 #define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE 595 #define DM_MAPIO_DELAY_REQUEUE 3 596 #define DM_MAPIO_KILL 4 597 598 #define dm_sector_div64(x, y)( \ 599 { \ 600 u64 _res; \ 601 (x) = div64_u64_rem(x, y, &_res); \ 602 _res; \ 603 } \ 604 ) 605 606 /* 607 * Ceiling(n / sz) 608 */ 609 #define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz)) 610 611 #define dm_sector_div_up(n, sz) ( \ 612 { \ 613 sector_t _r = ((n) + (sz) - 1); \ 614 sector_div(_r, (sz)); \ 615 _r; \ 616 } \ 617 ) 618 619 /* 620 * ceiling(n / size) * size 621 */ 622 #define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz)) 623 624 #define dm_array_too_big(fixed, obj, num) \ 625 ((num) > (UINT_MAX - (fixed)) / (obj)) 626 627 /* 628 * Sector offset taken relative to the start of the target instead of 629 * relative to the start of the device. 630 */ 631 #define dm_target_offset(ti, sector) ((sector) - (ti)->begin) 632 633 static inline sector_t to_sector(unsigned long n) 634 { 635 return (n >> SECTOR_SHIFT); 636 } 637 638 static inline unsigned long to_bytes(sector_t n) 639 { 640 return (n << SECTOR_SHIFT); 641 } 642 643 #endif /* _LINUX_DEVICE_MAPPER_H */ 644