xref: /linux-6.15/include/linux/device-mapper.h (revision bbb03029)
1 /*
2  * Copyright (C) 2001 Sistina Software (UK) Limited.
3  * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4  *
5  * This file is released under the LGPL.
6  */
7 
8 #ifndef _LINUX_DEVICE_MAPPER_H
9 #define _LINUX_DEVICE_MAPPER_H
10 
11 #include <linux/bio.h>
12 #include <linux/blkdev.h>
13 #include <linux/math64.h>
14 #include <linux/ratelimit.h>
15 
16 struct dm_dev;
17 struct dm_target;
18 struct dm_table;
19 struct mapped_device;
20 struct bio_vec;
21 
22 /*
23  * Type of table, mapped_device's mempool and request_queue
24  */
25 enum dm_queue_mode {
26 	DM_TYPE_NONE		 = 0,
27 	DM_TYPE_BIO_BASED	 = 1,
28 	DM_TYPE_REQUEST_BASED	 = 2,
29 	DM_TYPE_MQ_REQUEST_BASED = 3,
30 	DM_TYPE_DAX_BIO_BASED	 = 4,
31 };
32 
33 typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
34 
35 union map_info {
36 	void *ptr;
37 };
38 
39 /*
40  * In the constructor the target parameter will already have the
41  * table, type, begin and len fields filled in.
42  */
43 typedef int (*dm_ctr_fn) (struct dm_target *target,
44 			  unsigned int argc, char **argv);
45 
46 /*
47  * The destructor doesn't need to free the dm_target, just
48  * anything hidden ti->private.
49  */
50 typedef void (*dm_dtr_fn) (struct dm_target *ti);
51 
52 /*
53  * The map function must return:
54  * < 0: error
55  * = 0: The target will handle the io by resubmitting it later
56  * = 1: simple remap complete
57  * = 2: The target wants to push back the io
58  */
59 typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio);
60 typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti,
61 					    struct request *rq,
62 					    union map_info *map_context,
63 					    struct request **clone);
64 typedef void (*dm_release_clone_request_fn) (struct request *clone);
65 
66 /*
67  * Returns:
68  * < 0 : error (currently ignored)
69  * 0   : ended successfully
70  * 1   : for some reason the io has still not completed (eg,
71  *       multipath target might want to requeue a failed io).
72  * 2   : The target wants to push back the io
73  */
74 typedef int (*dm_endio_fn) (struct dm_target *ti,
75 			    struct bio *bio, blk_status_t *error);
76 typedef int (*dm_request_endio_fn) (struct dm_target *ti,
77 				    struct request *clone, blk_status_t error,
78 				    union map_info *map_context);
79 
80 typedef void (*dm_presuspend_fn) (struct dm_target *ti);
81 typedef void (*dm_presuspend_undo_fn) (struct dm_target *ti);
82 typedef void (*dm_postsuspend_fn) (struct dm_target *ti);
83 typedef int (*dm_preresume_fn) (struct dm_target *ti);
84 typedef void (*dm_resume_fn) (struct dm_target *ti);
85 
86 typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
87 			      unsigned status_flags, char *result, unsigned maxlen);
88 
89 typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv);
90 
91 typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti,
92 			    struct block_device **bdev, fmode_t *mode);
93 
94 /*
95  * These iteration functions are typically used to check (and combine)
96  * properties of underlying devices.
97  * E.g. Does at least one underlying device support flush?
98  *      Does any underlying device not support WRITE_SAME?
99  *
100  * The callout function is called once for each contiguous section of
101  * an underlying device.  State can be maintained in *data.
102  * Return non-zero to stop iterating through any further devices.
103  */
104 typedef int (*iterate_devices_callout_fn) (struct dm_target *ti,
105 					   struct dm_dev *dev,
106 					   sector_t start, sector_t len,
107 					   void *data);
108 
109 /*
110  * This function must iterate through each section of device used by the
111  * target until it encounters a non-zero return code, which it then returns.
112  * Returns zero if no callout returned non-zero.
113  */
114 typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
115 				      iterate_devices_callout_fn fn,
116 				      void *data);
117 
118 typedef void (*dm_io_hints_fn) (struct dm_target *ti,
119 				struct queue_limits *limits);
120 
121 /*
122  * Returns:
123  *    0: The target can handle the next I/O immediately.
124  *    1: The target can't handle the next I/O immediately.
125  */
126 typedef int (*dm_busy_fn) (struct dm_target *ti);
127 
128 /*
129  * Returns:
130  *  < 0 : error
131  * >= 0 : the number of bytes accessible at the address
132  */
133 typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
134 		long nr_pages, void **kaddr, pfn_t *pfn);
135 typedef size_t (*dm_dax_copy_from_iter_fn)(struct dm_target *ti, pgoff_t pgoff,
136 		void *addr, size_t bytes, struct iov_iter *i);
137 typedef void (*dm_dax_flush_fn)(struct dm_target *ti, pgoff_t pgoff, void *addr,
138 		size_t size);
139 #define PAGE_SECTORS (PAGE_SIZE / 512)
140 
141 void dm_error(const char *message);
142 
143 struct dm_dev {
144 	struct block_device *bdev;
145 	struct dax_device *dax_dev;
146 	fmode_t mode;
147 	char name[16];
148 };
149 
150 dev_t dm_get_dev_t(const char *path);
151 
152 /*
153  * Constructors should call these functions to ensure destination devices
154  * are opened/closed correctly.
155  */
156 int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
157 		  struct dm_dev **result);
158 void dm_put_device(struct dm_target *ti, struct dm_dev *d);
159 
160 /*
161  * Information about a target type
162  */
163 
164 struct target_type {
165 	uint64_t features;
166 	const char *name;
167 	struct module *module;
168 	unsigned version[3];
169 	dm_ctr_fn ctr;
170 	dm_dtr_fn dtr;
171 	dm_map_fn map;
172 	dm_clone_and_map_request_fn clone_and_map_rq;
173 	dm_release_clone_request_fn release_clone_rq;
174 	dm_endio_fn end_io;
175 	dm_request_endio_fn rq_end_io;
176 	dm_presuspend_fn presuspend;
177 	dm_presuspend_undo_fn presuspend_undo;
178 	dm_postsuspend_fn postsuspend;
179 	dm_preresume_fn preresume;
180 	dm_resume_fn resume;
181 	dm_status_fn status;
182 	dm_message_fn message;
183 	dm_prepare_ioctl_fn prepare_ioctl;
184 	dm_busy_fn busy;
185 	dm_iterate_devices_fn iterate_devices;
186 	dm_io_hints_fn io_hints;
187 	dm_dax_direct_access_fn direct_access;
188 	dm_dax_copy_from_iter_fn dax_copy_from_iter;
189 	dm_dax_flush_fn dax_flush;
190 
191 	/* For internal device-mapper use. */
192 	struct list_head list;
193 };
194 
195 /*
196  * Target features
197  */
198 
199 /*
200  * Any table that contains an instance of this target must have only one.
201  */
202 #define DM_TARGET_SINGLETON		0x00000001
203 #define dm_target_needs_singleton(type)	((type)->features & DM_TARGET_SINGLETON)
204 
205 /*
206  * Indicates that a target does not support read-only devices.
207  */
208 #define DM_TARGET_ALWAYS_WRITEABLE	0x00000002
209 #define dm_target_always_writeable(type) \
210 		((type)->features & DM_TARGET_ALWAYS_WRITEABLE)
211 
212 /*
213  * Any device that contains a table with an instance of this target may never
214  * have tables containing any different target type.
215  */
216 #define DM_TARGET_IMMUTABLE		0x00000004
217 #define dm_target_is_immutable(type)	((type)->features & DM_TARGET_IMMUTABLE)
218 
219 /*
220  * Indicates that a target may replace any target; even immutable targets.
221  * .map, .map_rq, .clone_and_map_rq and .release_clone_rq are all defined.
222  */
223 #define DM_TARGET_WILDCARD		0x00000008
224 #define dm_target_is_wildcard(type)	((type)->features & DM_TARGET_WILDCARD)
225 
226 /*
227  * Some targets need to be sent the same WRITE bio severals times so
228  * that they can send copies of it to different devices.  This function
229  * examines any supplied bio and returns the number of copies of it the
230  * target requires.
231  */
232 typedef unsigned (*dm_num_write_bios_fn) (struct dm_target *ti, struct bio *bio);
233 
234 /*
235  * A target implements own bio data integrity.
236  */
237 #define DM_TARGET_INTEGRITY		0x00000010
238 #define dm_target_has_integrity(type)	((type)->features & DM_TARGET_INTEGRITY)
239 
240 /*
241  * A target passes integrity data to the lower device.
242  */
243 #define DM_TARGET_PASSES_INTEGRITY	0x00000020
244 #define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY)
245 
246 /*
247  * Indicates that a target supports host-managed zoned block devices.
248  */
249 #define DM_TARGET_ZONED_HM		0x00000040
250 #define dm_target_supports_zoned_hm(type) ((type)->features & DM_TARGET_ZONED_HM)
251 
252 struct dm_target {
253 	struct dm_table *table;
254 	struct target_type *type;
255 
256 	/* target limits */
257 	sector_t begin;
258 	sector_t len;
259 
260 	/* If non-zero, maximum size of I/O submitted to a target. */
261 	uint32_t max_io_len;
262 
263 	/*
264 	 * A number of zero-length barrier bios that will be submitted
265 	 * to the target for the purpose of flushing cache.
266 	 *
267 	 * The bio number can be accessed with dm_bio_get_target_bio_nr.
268 	 * It is a responsibility of the target driver to remap these bios
269 	 * to the real underlying devices.
270 	 */
271 	unsigned num_flush_bios;
272 
273 	/*
274 	 * The number of discard bios that will be submitted to the target.
275 	 * The bio number can be accessed with dm_bio_get_target_bio_nr.
276 	 */
277 	unsigned num_discard_bios;
278 
279 	/*
280 	 * The number of WRITE SAME bios that will be submitted to the target.
281 	 * The bio number can be accessed with dm_bio_get_target_bio_nr.
282 	 */
283 	unsigned num_write_same_bios;
284 
285 	/*
286 	 * The number of WRITE ZEROES bios that will be submitted to the target.
287 	 * The bio number can be accessed with dm_bio_get_target_bio_nr.
288 	 */
289 	unsigned num_write_zeroes_bios;
290 
291 	/*
292 	 * The minimum number of extra bytes allocated in each io for the
293 	 * target to use.
294 	 */
295 	unsigned per_io_data_size;
296 
297 	/*
298 	 * If defined, this function is called to find out how many
299 	 * duplicate bios should be sent to the target when writing
300 	 * data.
301 	 */
302 	dm_num_write_bios_fn num_write_bios;
303 
304 	/* target specific data */
305 	void *private;
306 
307 	/* Used to provide an error string from the ctr */
308 	char *error;
309 
310 	/*
311 	 * Set if this target needs to receive flushes regardless of
312 	 * whether or not its underlying devices have support.
313 	 */
314 	bool flush_supported:1;
315 
316 	/*
317 	 * Set if this target needs to receive discards regardless of
318 	 * whether or not its underlying devices have support.
319 	 */
320 	bool discards_supported:1;
321 
322 	/*
323 	 * Set if the target required discard bios to be split
324 	 * on max_io_len boundary.
325 	 */
326 	bool split_discard_bios:1;
327 };
328 
329 /* Each target can link one of these into the table */
330 struct dm_target_callbacks {
331 	struct list_head list;
332 	int (*congested_fn) (struct dm_target_callbacks *, int);
333 };
334 
335 /*
336  * For bio-based dm.
337  * One of these is allocated for each bio.
338  * This structure shouldn't be touched directly by target drivers.
339  * It is here so that we can inline dm_per_bio_data and
340  * dm_bio_from_per_bio_data
341  */
342 struct dm_target_io {
343 	struct dm_io *io;
344 	struct dm_target *ti;
345 	unsigned target_bio_nr;
346 	unsigned *len_ptr;
347 	struct bio clone;
348 };
349 
350 static inline void *dm_per_bio_data(struct bio *bio, size_t data_size)
351 {
352 	return (char *)bio - offsetof(struct dm_target_io, clone) - data_size;
353 }
354 
355 static inline struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
356 {
357 	return (struct bio *)((char *)data + data_size + offsetof(struct dm_target_io, clone));
358 }
359 
360 static inline unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
361 {
362 	return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
363 }
364 
365 int dm_register_target(struct target_type *t);
366 void dm_unregister_target(struct target_type *t);
367 
368 /*
369  * Target argument parsing.
370  */
371 struct dm_arg_set {
372 	unsigned argc;
373 	char **argv;
374 };
375 
376 /*
377  * The minimum and maximum value of a numeric argument, together with
378  * the error message to use if the number is found to be outside that range.
379  */
380 struct dm_arg {
381 	unsigned min;
382 	unsigned max;
383 	char *error;
384 };
385 
386 /*
387  * Validate the next argument, either returning it as *value or, if invalid,
388  * returning -EINVAL and setting *error.
389  */
390 int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set,
391 		unsigned *value, char **error);
392 
393 /*
394  * Process the next argument as the start of a group containing between
395  * arg->min and arg->max further arguments. Either return the size as
396  * *num_args or, if invalid, return -EINVAL and set *error.
397  */
398 int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set,
399 		      unsigned *num_args, char **error);
400 
401 /*
402  * Return the current argument and shift to the next.
403  */
404 const char *dm_shift_arg(struct dm_arg_set *as);
405 
406 /*
407  * Move through num_args arguments.
408  */
409 void dm_consume_args(struct dm_arg_set *as, unsigned num_args);
410 
411 /*-----------------------------------------------------------------
412  * Functions for creating and manipulating mapped devices.
413  * Drop the reference with dm_put when you finish with the object.
414  *---------------------------------------------------------------*/
415 
416 /*
417  * DM_ANY_MINOR chooses the next available minor number.
418  */
419 #define DM_ANY_MINOR (-1)
420 int dm_create(int minor, struct mapped_device **md);
421 
422 /*
423  * Reference counting for md.
424  */
425 struct mapped_device *dm_get_md(dev_t dev);
426 void dm_get(struct mapped_device *md);
427 int dm_hold(struct mapped_device *md);
428 void dm_put(struct mapped_device *md);
429 
430 /*
431  * An arbitrary pointer may be stored alongside a mapped device.
432  */
433 void dm_set_mdptr(struct mapped_device *md, void *ptr);
434 void *dm_get_mdptr(struct mapped_device *md);
435 
436 /*
437  * A device can still be used while suspended, but I/O is deferred.
438  */
439 int dm_suspend(struct mapped_device *md, unsigned suspend_flags);
440 int dm_resume(struct mapped_device *md);
441 
442 /*
443  * Event functions.
444  */
445 uint32_t dm_get_event_nr(struct mapped_device *md);
446 int dm_wait_event(struct mapped_device *md, int event_nr);
447 uint32_t dm_next_uevent_seq(struct mapped_device *md);
448 void dm_uevent_add(struct mapped_device *md, struct list_head *elist);
449 
450 /*
451  * Info functions.
452  */
453 const char *dm_device_name(struct mapped_device *md);
454 int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
455 struct gendisk *dm_disk(struct mapped_device *md);
456 int dm_suspended(struct dm_target *ti);
457 int dm_noflush_suspending(struct dm_target *ti);
458 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
459 void dm_remap_zone_report(struct dm_target *ti, struct bio *bio,
460 			  sector_t start);
461 union map_info *dm_get_rq_mapinfo(struct request *rq);
462 
463 struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
464 
465 /*
466  * Geometry functions.
467  */
468 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
469 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
470 
471 /*-----------------------------------------------------------------
472  * Functions for manipulating device-mapper tables.
473  *---------------------------------------------------------------*/
474 
475 /*
476  * First create an empty table.
477  */
478 int dm_table_create(struct dm_table **result, fmode_t mode,
479 		    unsigned num_targets, struct mapped_device *md);
480 
481 /*
482  * Then call this once for each target.
483  */
484 int dm_table_add_target(struct dm_table *t, const char *type,
485 			sector_t start, sector_t len, char *params);
486 
487 /*
488  * Target_ctr should call this if it needs to add any callbacks.
489  */
490 void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb);
491 
492 /*
493  * Target can use this to set the table's type.
494  * Can only ever be called from a target's ctr.
495  * Useful for "hybrid" target (supports both bio-based
496  * and request-based).
497  */
498 void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type);
499 
500 /*
501  * Finally call this to make the table ready for use.
502  */
503 int dm_table_complete(struct dm_table *t);
504 
505 /*
506  * Target may require that it is never sent I/O larger than len.
507  */
508 int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len);
509 
510 /*
511  * Table reference counting.
512  */
513 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx);
514 void dm_put_live_table(struct mapped_device *md, int srcu_idx);
515 void dm_sync_table(struct mapped_device *md);
516 
517 /*
518  * Queries
519  */
520 sector_t dm_table_get_size(struct dm_table *t);
521 unsigned int dm_table_get_num_targets(struct dm_table *t);
522 fmode_t dm_table_get_mode(struct dm_table *t);
523 struct mapped_device *dm_table_get_md(struct dm_table *t);
524 
525 /*
526  * Trigger an event.
527  */
528 void dm_table_event(struct dm_table *t);
529 
530 /*
531  * Run the queue for request-based targets.
532  */
533 void dm_table_run_md_queue_async(struct dm_table *t);
534 
535 /*
536  * The device must be suspended before calling this method.
537  * Returns the previous table, which the caller must destroy.
538  */
539 struct dm_table *dm_swap_table(struct mapped_device *md,
540 			       struct dm_table *t);
541 
542 /*
543  * A wrapper around vmalloc.
544  */
545 void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
546 
547 /*-----------------------------------------------------------------
548  * Macros.
549  *---------------------------------------------------------------*/
550 #define DM_NAME "device-mapper"
551 
552 #ifdef CONFIG_PRINTK
553 extern struct ratelimit_state dm_ratelimit_state;
554 
555 #define dm_ratelimit()	__ratelimit(&dm_ratelimit_state)
556 #else
557 #define dm_ratelimit()	0
558 #endif
559 
560 #define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n"
561 
562 #define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__)
563 
564 #define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__)
565 #define DMERR_LIMIT(fmt, ...)						\
566 do {									\
567 	if (dm_ratelimit())						\
568 		DMERR(fmt, ##__VA_ARGS__);				\
569 } while (0)
570 
571 #define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__)
572 #define DMWARN_LIMIT(fmt, ...)						\
573 do {									\
574 	if (dm_ratelimit())						\
575 		DMWARN(fmt, ##__VA_ARGS__);				\
576 } while (0)
577 
578 #define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__)
579 #define DMINFO_LIMIT(fmt, ...)						\
580 do {									\
581 	if (dm_ratelimit())						\
582 		DMINFO(fmt, ##__VA_ARGS__);				\
583 } while (0)
584 
585 #ifdef CONFIG_DM_DEBUG
586 #define DMDEBUG(fmt, ...) printk(KERN_DEBUG DM_FMT(fmt), ##__VA_ARGS__)
587 #define DMDEBUG_LIMIT(fmt, ...)						\
588 do {									\
589 	if (dm_ratelimit())						\
590 		DMDEBUG(fmt, ##__VA_ARGS__);				\
591 } while (0)
592 #else
593 #define DMDEBUG(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
594 #define DMDEBUG_LIMIT(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
595 #endif
596 
597 #define DMEMIT(x...) sz += ((sz >= maxlen) ? \
598 			  0 : scnprintf(result + sz, maxlen - sz, x))
599 
600 #define SECTOR_SHIFT 9
601 
602 /*
603  * Definitions of return values from target end_io function.
604  */
605 #define DM_ENDIO_DONE		0
606 #define DM_ENDIO_INCOMPLETE	1
607 #define DM_ENDIO_REQUEUE	2
608 
609 /*
610  * Definitions of return values from target map function.
611  */
612 #define DM_MAPIO_SUBMITTED	0
613 #define DM_MAPIO_REMAPPED	1
614 #define DM_MAPIO_REQUEUE	DM_ENDIO_REQUEUE
615 #define DM_MAPIO_DELAY_REQUEUE	3
616 #define DM_MAPIO_KILL		4
617 
618 #define dm_sector_div64(x, y)( \
619 { \
620 	u64 _res; \
621 	(x) = div64_u64_rem(x, y, &_res); \
622 	_res; \
623 } \
624 )
625 
626 /*
627  * Ceiling(n / sz)
628  */
629 #define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz))
630 
631 #define dm_sector_div_up(n, sz) ( \
632 { \
633 	sector_t _r = ((n) + (sz) - 1); \
634 	sector_div(_r, (sz)); \
635 	_r; \
636 } \
637 )
638 
639 /*
640  * ceiling(n / size) * size
641  */
642 #define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
643 
644 #define dm_array_too_big(fixed, obj, num) \
645 	((num) > (UINT_MAX - (fixed)) / (obj))
646 
647 /*
648  * Sector offset taken relative to the start of the target instead of
649  * relative to the start of the device.
650  */
651 #define dm_target_offset(ti, sector) ((sector) - (ti)->begin)
652 
653 static inline sector_t to_sector(unsigned long n)
654 {
655 	return (n >> SECTOR_SHIFT);
656 }
657 
658 static inline unsigned long to_bytes(sector_t n)
659 {
660 	return (n << SECTOR_SHIFT);
661 }
662 
663 #endif	/* _LINUX_DEVICE_MAPPER_H */
664