xref: /linux-6.15/include/linux/device-mapper.h (revision 6faeeea4)
1 /*
2  * Copyright (C) 2001 Sistina Software (UK) Limited.
3  * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4  *
5  * This file is released under the LGPL.
6  */
7 
8 #ifndef _LINUX_DEVICE_MAPPER_H
9 #define _LINUX_DEVICE_MAPPER_H
10 
11 #include <linux/bio.h>
12 #include <linux/blkdev.h>
13 #include <linux/math64.h>
14 #include <linux/ratelimit.h>
15 
16 struct dm_dev;
17 struct dm_target;
18 struct dm_table;
19 struct mapped_device;
20 struct bio_vec;
21 
22 typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
23 
24 union map_info {
25 	void *ptr;
26 };
27 
28 /*
29  * In the constructor the target parameter will already have the
30  * table, type, begin and len fields filled in.
31  */
32 typedef int (*dm_ctr_fn) (struct dm_target *target,
33 			  unsigned int argc, char **argv);
34 
35 /*
36  * The destructor doesn't need to free the dm_target, just
37  * anything hidden ti->private.
38  */
39 typedef void (*dm_dtr_fn) (struct dm_target *ti);
40 
41 /*
42  * The map function must return:
43  * < 0: error
44  * = 0: The target will handle the io by resubmitting it later
45  * = 1: simple remap complete
46  * = 2: The target wants to push back the io
47  */
48 typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio);
49 typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone,
50 				  union map_info *map_context);
51 typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti,
52 					    struct request *rq,
53 					    union map_info *map_context,
54 					    struct request **clone);
55 typedef void (*dm_release_clone_request_fn) (struct request *clone);
56 
57 /*
58  * Returns:
59  * < 0 : error (currently ignored)
60  * 0   : ended successfully
61  * 1   : for some reason the io has still not completed (eg,
62  *       multipath target might want to requeue a failed io).
63  * 2   : The target wants to push back the io
64  */
65 typedef int (*dm_endio_fn) (struct dm_target *ti,
66 			    struct bio *bio, int error);
67 typedef int (*dm_request_endio_fn) (struct dm_target *ti,
68 				    struct request *clone, int error,
69 				    union map_info *map_context);
70 
71 typedef void (*dm_presuspend_fn) (struct dm_target *ti);
72 typedef void (*dm_presuspend_undo_fn) (struct dm_target *ti);
73 typedef void (*dm_postsuspend_fn) (struct dm_target *ti);
74 typedef int (*dm_preresume_fn) (struct dm_target *ti);
75 typedef void (*dm_resume_fn) (struct dm_target *ti);
76 
77 typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
78 			      unsigned status_flags, char *result, unsigned maxlen);
79 
80 typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv);
81 
82 typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd,
83 			    unsigned long arg);
84 
85 typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm,
86 			    struct bio_vec *biovec, int max_size);
87 
88 /*
89  * These iteration functions are typically used to check (and combine)
90  * properties of underlying devices.
91  * E.g. Does at least one underlying device support flush?
92  *      Does any underlying device not support WRITE_SAME?
93  *
94  * The callout function is called once for each contiguous section of
95  * an underlying device.  State can be maintained in *data.
96  * Return non-zero to stop iterating through any further devices.
97  */
98 typedef int (*iterate_devices_callout_fn) (struct dm_target *ti,
99 					   struct dm_dev *dev,
100 					   sector_t start, sector_t len,
101 					   void *data);
102 
103 /*
104  * This function must iterate through each section of device used by the
105  * target until it encounters a non-zero return code, which it then returns.
106  * Returns zero if no callout returned non-zero.
107  */
108 typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
109 				      iterate_devices_callout_fn fn,
110 				      void *data);
111 
112 typedef void (*dm_io_hints_fn) (struct dm_target *ti,
113 				struct queue_limits *limits);
114 
115 /*
116  * Returns:
117  *    0: The target can handle the next I/O immediately.
118  *    1: The target can't handle the next I/O immediately.
119  */
120 typedef int (*dm_busy_fn) (struct dm_target *ti);
121 
122 void dm_error(const char *message);
123 
124 struct dm_dev {
125 	struct block_device *bdev;
126 	fmode_t mode;
127 	char name[16];
128 };
129 
130 /*
131  * Constructors should call these functions to ensure destination devices
132  * are opened/closed correctly.
133  */
134 int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
135 		  struct dm_dev **result);
136 void dm_put_device(struct dm_target *ti, struct dm_dev *d);
137 
138 /*
139  * Information about a target type
140  */
141 
142 struct target_type {
143 	uint64_t features;
144 	const char *name;
145 	struct module *module;
146 	unsigned version[3];
147 	dm_ctr_fn ctr;
148 	dm_dtr_fn dtr;
149 	dm_map_fn map;
150 	dm_map_request_fn map_rq;
151 	dm_clone_and_map_request_fn clone_and_map_rq;
152 	dm_release_clone_request_fn release_clone_rq;
153 	dm_endio_fn end_io;
154 	dm_request_endio_fn rq_end_io;
155 	dm_presuspend_fn presuspend;
156 	dm_presuspend_undo_fn presuspend_undo;
157 	dm_postsuspend_fn postsuspend;
158 	dm_preresume_fn preresume;
159 	dm_resume_fn resume;
160 	dm_status_fn status;
161 	dm_message_fn message;
162 	dm_ioctl_fn ioctl;
163 	dm_merge_fn merge;
164 	dm_busy_fn busy;
165 	dm_iterate_devices_fn iterate_devices;
166 	dm_io_hints_fn io_hints;
167 
168 	/* For internal device-mapper use. */
169 	struct list_head list;
170 };
171 
172 /*
173  * Target features
174  */
175 
176 /*
177  * Any table that contains an instance of this target must have only one.
178  */
179 #define DM_TARGET_SINGLETON		0x00000001
180 #define dm_target_needs_singleton(type)	((type)->features & DM_TARGET_SINGLETON)
181 
182 /*
183  * Indicates that a target does not support read-only devices.
184  */
185 #define DM_TARGET_ALWAYS_WRITEABLE	0x00000002
186 #define dm_target_always_writeable(type) \
187 		((type)->features & DM_TARGET_ALWAYS_WRITEABLE)
188 
189 /*
190  * Any device that contains a table with an instance of this target may never
191  * have tables containing any different target type.
192  */
193 #define DM_TARGET_IMMUTABLE		0x00000004
194 #define dm_target_is_immutable(type)	((type)->features & DM_TARGET_IMMUTABLE)
195 
196 /*
197  * Some targets need to be sent the same WRITE bio severals times so
198  * that they can send copies of it to different devices.  This function
199  * examines any supplied bio and returns the number of copies of it the
200  * target requires.
201  */
202 typedef unsigned (*dm_num_write_bios_fn) (struct dm_target *ti, struct bio *bio);
203 
204 struct dm_target {
205 	struct dm_table *table;
206 	struct target_type *type;
207 
208 	/* target limits */
209 	sector_t begin;
210 	sector_t len;
211 
212 	/* If non-zero, maximum size of I/O submitted to a target. */
213 	uint32_t max_io_len;
214 
215 	/*
216 	 * A number of zero-length barrier bios that will be submitted
217 	 * to the target for the purpose of flushing cache.
218 	 *
219 	 * The bio number can be accessed with dm_bio_get_target_bio_nr.
220 	 * It is a responsibility of the target driver to remap these bios
221 	 * to the real underlying devices.
222 	 */
223 	unsigned num_flush_bios;
224 
225 	/*
226 	 * The number of discard bios that will be submitted to the target.
227 	 * The bio number can be accessed with dm_bio_get_target_bio_nr.
228 	 */
229 	unsigned num_discard_bios;
230 
231 	/*
232 	 * The number of WRITE SAME bios that will be submitted to the target.
233 	 * The bio number can be accessed with dm_bio_get_target_bio_nr.
234 	 */
235 	unsigned num_write_same_bios;
236 
237 	/*
238 	 * The minimum number of extra bytes allocated in each bio for the
239 	 * target to use.  dm_per_bio_data returns the data location.
240 	 */
241 	unsigned per_bio_data_size;
242 
243 	/*
244 	 * If defined, this function is called to find out how many
245 	 * duplicate bios should be sent to the target when writing
246 	 * data.
247 	 */
248 	dm_num_write_bios_fn num_write_bios;
249 
250 	/* target specific data */
251 	void *private;
252 
253 	/* Used to provide an error string from the ctr */
254 	char *error;
255 
256 	/*
257 	 * Set if this target needs to receive flushes regardless of
258 	 * whether or not its underlying devices have support.
259 	 */
260 	bool flush_supported:1;
261 
262 	/*
263 	 * Set if this target needs to receive discards regardless of
264 	 * whether or not its underlying devices have support.
265 	 */
266 	bool discards_supported:1;
267 
268 	/*
269 	 * Set if the target required discard bios to be split
270 	 * on max_io_len boundary.
271 	 */
272 	bool split_discard_bios:1;
273 
274 	/*
275 	 * Set if this target does not return zeroes on discarded blocks.
276 	 */
277 	bool discard_zeroes_data_unsupported:1;
278 };
279 
280 /* Each target can link one of these into the table */
281 struct dm_target_callbacks {
282 	struct list_head list;
283 	int (*congested_fn) (struct dm_target_callbacks *, int);
284 };
285 
286 /*
287  * For bio-based dm.
288  * One of these is allocated for each bio.
289  * This structure shouldn't be touched directly by target drivers.
290  * It is here so that we can inline dm_per_bio_data and
291  * dm_bio_from_per_bio_data
292  */
293 struct dm_target_io {
294 	struct dm_io *io;
295 	struct dm_target *ti;
296 	unsigned target_bio_nr;
297 	unsigned *len_ptr;
298 	struct bio clone;
299 };
300 
301 static inline void *dm_per_bio_data(struct bio *bio, size_t data_size)
302 {
303 	return (char *)bio - offsetof(struct dm_target_io, clone) - data_size;
304 }
305 
306 static inline struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
307 {
308 	return (struct bio *)((char *)data + data_size + offsetof(struct dm_target_io, clone));
309 }
310 
311 static inline unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
312 {
313 	return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
314 }
315 
316 int dm_register_target(struct target_type *t);
317 void dm_unregister_target(struct target_type *t);
318 
319 /*
320  * Target argument parsing.
321  */
322 struct dm_arg_set {
323 	unsigned argc;
324 	char **argv;
325 };
326 
327 /*
328  * The minimum and maximum value of a numeric argument, together with
329  * the error message to use if the number is found to be outside that range.
330  */
331 struct dm_arg {
332 	unsigned min;
333 	unsigned max;
334 	char *error;
335 };
336 
337 /*
338  * Validate the next argument, either returning it as *value or, if invalid,
339  * returning -EINVAL and setting *error.
340  */
341 int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set,
342 		unsigned *value, char **error);
343 
344 /*
345  * Process the next argument as the start of a group containing between
346  * arg->min and arg->max further arguments. Either return the size as
347  * *num_args or, if invalid, return -EINVAL and set *error.
348  */
349 int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set,
350 		      unsigned *num_args, char **error);
351 
352 /*
353  * Return the current argument and shift to the next.
354  */
355 const char *dm_shift_arg(struct dm_arg_set *as);
356 
357 /*
358  * Move through num_args arguments.
359  */
360 void dm_consume_args(struct dm_arg_set *as, unsigned num_args);
361 
362 /*-----------------------------------------------------------------
363  * Functions for creating and manipulating mapped devices.
364  * Drop the reference with dm_put when you finish with the object.
365  *---------------------------------------------------------------*/
366 
367 /*
368  * DM_ANY_MINOR chooses the next available minor number.
369  */
370 #define DM_ANY_MINOR (-1)
371 int dm_create(int minor, struct mapped_device **md);
372 
373 /*
374  * Reference counting for md.
375  */
376 struct mapped_device *dm_get_md(dev_t dev);
377 void dm_get(struct mapped_device *md);
378 void dm_put(struct mapped_device *md);
379 
380 /*
381  * An arbitrary pointer may be stored alongside a mapped device.
382  */
383 void dm_set_mdptr(struct mapped_device *md, void *ptr);
384 void *dm_get_mdptr(struct mapped_device *md);
385 
386 /*
387  * A device can still be used while suspended, but I/O is deferred.
388  */
389 int dm_suspend(struct mapped_device *md, unsigned suspend_flags);
390 int dm_resume(struct mapped_device *md);
391 
392 /*
393  * Event functions.
394  */
395 uint32_t dm_get_event_nr(struct mapped_device *md);
396 int dm_wait_event(struct mapped_device *md, int event_nr);
397 uint32_t dm_next_uevent_seq(struct mapped_device *md);
398 void dm_uevent_add(struct mapped_device *md, struct list_head *elist);
399 
400 /*
401  * Info functions.
402  */
403 const char *dm_device_name(struct mapped_device *md);
404 int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
405 struct gendisk *dm_disk(struct mapped_device *md);
406 int dm_suspended(struct dm_target *ti);
407 int dm_noflush_suspending(struct dm_target *ti);
408 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
409 union map_info *dm_get_rq_mapinfo(struct request *rq);
410 
411 struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
412 
413 /*
414  * Geometry functions.
415  */
416 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
417 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
418 
419 /*-----------------------------------------------------------------
420  * Functions for manipulating device-mapper tables.
421  *---------------------------------------------------------------*/
422 
423 /*
424  * First create an empty table.
425  */
426 int dm_table_create(struct dm_table **result, fmode_t mode,
427 		    unsigned num_targets, struct mapped_device *md);
428 
429 /*
430  * Then call this once for each target.
431  */
432 int dm_table_add_target(struct dm_table *t, const char *type,
433 			sector_t start, sector_t len, char *params);
434 
435 /*
436  * Target_ctr should call this if it needs to add any callbacks.
437  */
438 void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb);
439 
440 /*
441  * Finally call this to make the table ready for use.
442  */
443 int dm_table_complete(struct dm_table *t);
444 
445 /*
446  * Target may require that it is never sent I/O larger than len.
447  */
448 int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len);
449 
450 /*
451  * Table reference counting.
452  */
453 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx);
454 void dm_put_live_table(struct mapped_device *md, int srcu_idx);
455 void dm_sync_table(struct mapped_device *md);
456 
457 /*
458  * Queries
459  */
460 sector_t dm_table_get_size(struct dm_table *t);
461 unsigned int dm_table_get_num_targets(struct dm_table *t);
462 fmode_t dm_table_get_mode(struct dm_table *t);
463 struct mapped_device *dm_table_get_md(struct dm_table *t);
464 
465 /*
466  * Trigger an event.
467  */
468 void dm_table_event(struct dm_table *t);
469 
470 /*
471  * Run the queue for request-based targets.
472  */
473 void dm_table_run_md_queue_async(struct dm_table *t);
474 
475 /*
476  * The device must be suspended before calling this method.
477  * Returns the previous table, which the caller must destroy.
478  */
479 struct dm_table *dm_swap_table(struct mapped_device *md,
480 			       struct dm_table *t);
481 
482 /*
483  * A wrapper around vmalloc.
484  */
485 void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
486 
487 /*-----------------------------------------------------------------
488  * Macros.
489  *---------------------------------------------------------------*/
490 #define DM_NAME "device-mapper"
491 
492 #ifdef CONFIG_PRINTK
493 extern struct ratelimit_state dm_ratelimit_state;
494 
495 #define dm_ratelimit()	__ratelimit(&dm_ratelimit_state)
496 #else
497 #define dm_ratelimit()	0
498 #endif
499 
500 #define DMCRIT(f, arg...) \
501 	printk(KERN_CRIT DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
502 
503 #define DMERR(f, arg...) \
504 	printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
505 #define DMERR_LIMIT(f, arg...) \
506 	do { \
507 		if (dm_ratelimit())	\
508 			printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " \
509 			       f "\n", ## arg); \
510 	} while (0)
511 
512 #define DMWARN(f, arg...) \
513 	printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
514 #define DMWARN_LIMIT(f, arg...) \
515 	do { \
516 		if (dm_ratelimit())	\
517 			printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " \
518 			       f "\n", ## arg); \
519 	} while (0)
520 
521 #define DMINFO(f, arg...) \
522 	printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
523 #define DMINFO_LIMIT(f, arg...) \
524 	do { \
525 		if (dm_ratelimit())	\
526 			printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f \
527 			       "\n", ## arg); \
528 	} while (0)
529 
530 #ifdef CONFIG_DM_DEBUG
531 #  define DMDEBUG(f, arg...) \
532 	printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX " DEBUG: " f "\n", ## arg)
533 #  define DMDEBUG_LIMIT(f, arg...) \
534 	do { \
535 		if (dm_ratelimit())	\
536 			printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX ": " f \
537 			       "\n", ## arg); \
538 	} while (0)
539 #else
540 #  define DMDEBUG(f, arg...) do {} while (0)
541 #  define DMDEBUG_LIMIT(f, arg...) do {} while (0)
542 #endif
543 
544 #define DMEMIT(x...) sz += ((sz >= maxlen) ? \
545 			  0 : scnprintf(result + sz, maxlen - sz, x))
546 
547 #define SECTOR_SHIFT 9
548 
549 /*
550  * Definitions of return values from target end_io function.
551  */
552 #define DM_ENDIO_INCOMPLETE	1
553 #define DM_ENDIO_REQUEUE	2
554 
555 /*
556  * Definitions of return values from target map function.
557  */
558 #define DM_MAPIO_SUBMITTED	0
559 #define DM_MAPIO_REMAPPED	1
560 #define DM_MAPIO_REQUEUE	DM_ENDIO_REQUEUE
561 
562 #define dm_sector_div64(x, y)( \
563 { \
564 	u64 _res; \
565 	(x) = div64_u64_rem(x, y, &_res); \
566 	_res; \
567 } \
568 )
569 
570 /*
571  * Ceiling(n / sz)
572  */
573 #define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz))
574 
575 #define dm_sector_div_up(n, sz) ( \
576 { \
577 	sector_t _r = ((n) + (sz) - 1); \
578 	sector_div(_r, (sz)); \
579 	_r; \
580 } \
581 )
582 
583 /*
584  * ceiling(n / size) * size
585  */
586 #define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
587 
588 #define dm_array_too_big(fixed, obj, num) \
589 	((num) > (UINT_MAX - (fixed)) / (obj))
590 
591 /*
592  * Sector offset taken relative to the start of the target instead of
593  * relative to the start of the device.
594  */
595 #define dm_target_offset(ti, sector) ((sector) - (ti)->begin)
596 
597 static inline sector_t to_sector(unsigned long n)
598 {
599 	return (n >> SECTOR_SHIFT);
600 }
601 
602 static inline unsigned long to_bytes(sector_t n)
603 {
604 	return (n << SECTOR_SHIFT);
605 }
606 
607 /*-----------------------------------------------------------------
608  * Helper for block layer and dm core operations
609  *---------------------------------------------------------------*/
610 int dm_underlying_device_busy(struct request_queue *q);
611 
612 #endif	/* _LINUX_DEVICE_MAPPER_H */
613