xref: /linux-6.15/include/linux/device-mapper.h (revision dec102aa)
1 /*
2  * Copyright (C) 2001 Sistina Software (UK) Limited.
3  * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4  *
5  * This file is released under the LGPL.
6  */
7 
8 #ifndef _LINUX_DEVICE_MAPPER_H
9 #define _LINUX_DEVICE_MAPPER_H
10 
11 #include <linux/bio.h>
12 #include <linux/blkdev.h>
13 #include <linux/math64.h>
14 #include <linux/ratelimit.h>
15 
16 struct dm_dev;
17 struct dm_target;
18 struct dm_table;
19 struct mapped_device;
20 struct bio_vec;
21 
22 typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
23 
24 union map_info {
25 	void *ptr;
26 };
27 
28 /*
29  * In the constructor the target parameter will already have the
30  * table, type, begin and len fields filled in.
31  */
32 typedef int (*dm_ctr_fn) (struct dm_target *target,
33 			  unsigned int argc, char **argv);
34 
35 /*
36  * The destructor doesn't need to free the dm_target, just
37  * anything hidden ti->private.
38  */
39 typedef void (*dm_dtr_fn) (struct dm_target *ti);
40 
41 /*
42  * The map function must return:
43  * < 0: error
44  * = 0: The target will handle the io by resubmitting it later
45  * = 1: simple remap complete
46  * = 2: The target wants to push back the io
47  */
48 typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio);
49 typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone,
50 				  union map_info *map_context);
51 
52 /*
53  * Returns:
54  * < 0 : error (currently ignored)
55  * 0   : ended successfully
56  * 1   : for some reason the io has still not completed (eg,
57  *       multipath target might want to requeue a failed io).
58  * 2   : The target wants to push back the io
59  */
60 typedef int (*dm_endio_fn) (struct dm_target *ti,
61 			    struct bio *bio, int error);
62 typedef int (*dm_request_endio_fn) (struct dm_target *ti,
63 				    struct request *clone, int error,
64 				    union map_info *map_context);
65 
66 typedef void (*dm_presuspend_fn) (struct dm_target *ti);
67 typedef void (*dm_postsuspend_fn) (struct dm_target *ti);
68 typedef int (*dm_preresume_fn) (struct dm_target *ti);
69 typedef void (*dm_resume_fn) (struct dm_target *ti);
70 
71 typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
72 			      unsigned status_flags, char *result, unsigned maxlen);
73 
74 typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv);
75 
76 typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd,
77 			    unsigned long arg);
78 
79 typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm,
80 			    struct bio_vec *biovec, int max_size);
81 
82 /*
83  * These iteration functions are typically used to check (and combine)
84  * properties of underlying devices.
85  * E.g. Does at least one underlying device support flush?
86  *      Does any underlying device not support WRITE_SAME?
87  *
88  * The callout function is called once for each contiguous section of
89  * an underlying device.  State can be maintained in *data.
90  * Return non-zero to stop iterating through any further devices.
91  */
92 typedef int (*iterate_devices_callout_fn) (struct dm_target *ti,
93 					   struct dm_dev *dev,
94 					   sector_t start, sector_t len,
95 					   void *data);
96 
97 /*
98  * This function must iterate through each section of device used by the
99  * target until it encounters a non-zero return code, which it then returns.
100  * Returns zero if no callout returned non-zero.
101  */
102 typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
103 				      iterate_devices_callout_fn fn,
104 				      void *data);
105 
106 typedef void (*dm_io_hints_fn) (struct dm_target *ti,
107 				struct queue_limits *limits);
108 
109 /*
110  * Returns:
111  *    0: The target can handle the next I/O immediately.
112  *    1: The target can't handle the next I/O immediately.
113  */
114 typedef int (*dm_busy_fn) (struct dm_target *ti);
115 
116 void dm_error(const char *message);
117 
118 /*
119  * Combine device limits.
120  */
121 int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
122 			 sector_t start, sector_t len, void *data);
123 
124 struct dm_dev {
125 	struct block_device *bdev;
126 	fmode_t mode;
127 	char name[16];
128 };
129 
130 /*
131  * Constructors should call these functions to ensure destination devices
132  * are opened/closed correctly.
133  */
134 int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
135 						 struct dm_dev **result);
136 void dm_put_device(struct dm_target *ti, struct dm_dev *d);
137 
138 /*
139  * Information about a target type
140  */
141 
142 struct target_type {
143 	uint64_t features;
144 	const char *name;
145 	struct module *module;
146 	unsigned version[3];
147 	dm_ctr_fn ctr;
148 	dm_dtr_fn dtr;
149 	dm_map_fn map;
150 	dm_map_request_fn map_rq;
151 	dm_endio_fn end_io;
152 	dm_request_endio_fn rq_end_io;
153 	dm_presuspend_fn presuspend;
154 	dm_postsuspend_fn postsuspend;
155 	dm_preresume_fn preresume;
156 	dm_resume_fn resume;
157 	dm_status_fn status;
158 	dm_message_fn message;
159 	dm_ioctl_fn ioctl;
160 	dm_merge_fn merge;
161 	dm_busy_fn busy;
162 	dm_iterate_devices_fn iterate_devices;
163 	dm_io_hints_fn io_hints;
164 
165 	/* For internal device-mapper use. */
166 	struct list_head list;
167 };
168 
169 /*
170  * Target features
171  */
172 
173 /*
174  * Any table that contains an instance of this target must have only one.
175  */
176 #define DM_TARGET_SINGLETON		0x00000001
177 #define dm_target_needs_singleton(type)	((type)->features & DM_TARGET_SINGLETON)
178 
179 /*
180  * Indicates that a target does not support read-only devices.
181  */
182 #define DM_TARGET_ALWAYS_WRITEABLE	0x00000002
183 #define dm_target_always_writeable(type) \
184 		((type)->features & DM_TARGET_ALWAYS_WRITEABLE)
185 
186 /*
187  * Any device that contains a table with an instance of this target may never
188  * have tables containing any different target type.
189  */
190 #define DM_TARGET_IMMUTABLE		0x00000004
191 #define dm_target_is_immutable(type)	((type)->features & DM_TARGET_IMMUTABLE)
192 
193 /*
194  * Some targets need to be sent the same WRITE bio severals times so
195  * that they can send copies of it to different devices.  This function
196  * examines any supplied bio and returns the number of copies of it the
197  * target requires.
198  */
199 typedef unsigned (*dm_num_write_bios_fn) (struct dm_target *ti, struct bio *bio);
200 
201 struct dm_target {
202 	struct dm_table *table;
203 	struct target_type *type;
204 
205 	/* target limits */
206 	sector_t begin;
207 	sector_t len;
208 
209 	/* If non-zero, maximum size of I/O submitted to a target. */
210 	uint32_t max_io_len;
211 
212 	/*
213 	 * A number of zero-length barrier bios that will be submitted
214 	 * to the target for the purpose of flushing cache.
215 	 *
216 	 * The bio number can be accessed with dm_bio_get_target_bio_nr.
217 	 * It is a responsibility of the target driver to remap these bios
218 	 * to the real underlying devices.
219 	 */
220 	unsigned num_flush_bios;
221 
222 	/*
223 	 * The number of discard bios that will be submitted to the target.
224 	 * The bio number can be accessed with dm_bio_get_target_bio_nr.
225 	 */
226 	unsigned num_discard_bios;
227 
228 	/*
229 	 * The number of WRITE SAME bios that will be submitted to the target.
230 	 * The bio number can be accessed with dm_bio_get_target_bio_nr.
231 	 */
232 	unsigned num_write_same_bios;
233 
234 	/*
235 	 * The minimum number of extra bytes allocated in each bio for the
236 	 * target to use.  dm_per_bio_data returns the data location.
237 	 */
238 	unsigned per_bio_data_size;
239 
240 	/*
241 	 * If defined, this function is called to find out how many
242 	 * duplicate bios should be sent to the target when writing
243 	 * data.
244 	 */
245 	dm_num_write_bios_fn num_write_bios;
246 
247 	/* target specific data */
248 	void *private;
249 
250 	/* Used to provide an error string from the ctr */
251 	char *error;
252 
253 	/*
254 	 * Set if this target needs to receive flushes regardless of
255 	 * whether or not its underlying devices have support.
256 	 */
257 	bool flush_supported:1;
258 
259 	/*
260 	 * Set if this target needs to receive discards regardless of
261 	 * whether or not its underlying devices have support.
262 	 */
263 	bool discards_supported:1;
264 
265 	/*
266 	 * Set if the target required discard bios to be split
267 	 * on max_io_len boundary.
268 	 */
269 	bool split_discard_bios:1;
270 
271 	/*
272 	 * Set if this target does not return zeroes on discarded blocks.
273 	 */
274 	bool discard_zeroes_data_unsupported:1;
275 };
276 
277 /* Each target can link one of these into the table */
278 struct dm_target_callbacks {
279 	struct list_head list;
280 	int (*congested_fn) (struct dm_target_callbacks *, int);
281 };
282 
283 /*
284  * For bio-based dm.
285  * One of these is allocated for each bio.
286  * This structure shouldn't be touched directly by target drivers.
287  * It is here so that we can inline dm_per_bio_data and
288  * dm_bio_from_per_bio_data
289  */
290 struct dm_target_io {
291 	struct dm_io *io;
292 	struct dm_target *ti;
293 	unsigned target_bio_nr;
294 	struct bio clone;
295 };
296 
297 static inline void *dm_per_bio_data(struct bio *bio, size_t data_size)
298 {
299 	return (char *)bio - offsetof(struct dm_target_io, clone) - data_size;
300 }
301 
302 static inline struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
303 {
304 	return (struct bio *)((char *)data + data_size + offsetof(struct dm_target_io, clone));
305 }
306 
307 static inline unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
308 {
309 	return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
310 }
311 
312 int dm_register_target(struct target_type *t);
313 void dm_unregister_target(struct target_type *t);
314 
315 /*
316  * Target argument parsing.
317  */
318 struct dm_arg_set {
319 	unsigned argc;
320 	char **argv;
321 };
322 
323 /*
324  * The minimum and maximum value of a numeric argument, together with
325  * the error message to use if the number is found to be outside that range.
326  */
327 struct dm_arg {
328 	unsigned min;
329 	unsigned max;
330 	char *error;
331 };
332 
333 /*
334  * Validate the next argument, either returning it as *value or, if invalid,
335  * returning -EINVAL and setting *error.
336  */
337 int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set,
338 		unsigned *value, char **error);
339 
340 /*
341  * Process the next argument as the start of a group containing between
342  * arg->min and arg->max further arguments. Either return the size as
343  * *num_args or, if invalid, return -EINVAL and set *error.
344  */
345 int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set,
346 		      unsigned *num_args, char **error);
347 
348 /*
349  * Return the current argument and shift to the next.
350  */
351 const char *dm_shift_arg(struct dm_arg_set *as);
352 
353 /*
354  * Move through num_args arguments.
355  */
356 void dm_consume_args(struct dm_arg_set *as, unsigned num_args);
357 
358 /*-----------------------------------------------------------------
359  * Functions for creating and manipulating mapped devices.
360  * Drop the reference with dm_put when you finish with the object.
361  *---------------------------------------------------------------*/
362 
363 /*
364  * DM_ANY_MINOR chooses the next available minor number.
365  */
366 #define DM_ANY_MINOR (-1)
367 int dm_create(int minor, struct mapped_device **md);
368 
369 /*
370  * Reference counting for md.
371  */
372 struct mapped_device *dm_get_md(dev_t dev);
373 void dm_get(struct mapped_device *md);
374 void dm_put(struct mapped_device *md);
375 
376 /*
377  * An arbitrary pointer may be stored alongside a mapped device.
378  */
379 void dm_set_mdptr(struct mapped_device *md, void *ptr);
380 void *dm_get_mdptr(struct mapped_device *md);
381 
382 /*
383  * A device can still be used while suspended, but I/O is deferred.
384  */
385 int dm_suspend(struct mapped_device *md, unsigned suspend_flags);
386 int dm_resume(struct mapped_device *md);
387 
388 /*
389  * Event functions.
390  */
391 uint32_t dm_get_event_nr(struct mapped_device *md);
392 int dm_wait_event(struct mapped_device *md, int event_nr);
393 uint32_t dm_next_uevent_seq(struct mapped_device *md);
394 void dm_uevent_add(struct mapped_device *md, struct list_head *elist);
395 
396 /*
397  * Info functions.
398  */
399 const char *dm_device_name(struct mapped_device *md);
400 int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
401 struct gendisk *dm_disk(struct mapped_device *md);
402 int dm_suspended(struct dm_target *ti);
403 int dm_noflush_suspending(struct dm_target *ti);
404 union map_info *dm_get_rq_mapinfo(struct request *rq);
405 
406 struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
407 
408 /*
409  * Geometry functions.
410  */
411 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
412 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
413 
414 /*-----------------------------------------------------------------
415  * Functions for manipulating device-mapper tables.
416  *---------------------------------------------------------------*/
417 
418 /*
419  * First create an empty table.
420  */
421 int dm_table_create(struct dm_table **result, fmode_t mode,
422 		    unsigned num_targets, struct mapped_device *md);
423 
424 /*
425  * Then call this once for each target.
426  */
427 int dm_table_add_target(struct dm_table *t, const char *type,
428 			sector_t start, sector_t len, char *params);
429 
430 /*
431  * Target_ctr should call this if it needs to add any callbacks.
432  */
433 void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb);
434 
435 /*
436  * Finally call this to make the table ready for use.
437  */
438 int dm_table_complete(struct dm_table *t);
439 
440 /*
441  * Target may require that it is never sent I/O larger than len.
442  */
443 int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len);
444 
445 /*
446  * Table reference counting.
447  */
448 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx);
449 void dm_put_live_table(struct mapped_device *md, int srcu_idx);
450 void dm_sync_table(struct mapped_device *md);
451 
452 /*
453  * Queries
454  */
455 sector_t dm_table_get_size(struct dm_table *t);
456 unsigned int dm_table_get_num_targets(struct dm_table *t);
457 fmode_t dm_table_get_mode(struct dm_table *t);
458 struct mapped_device *dm_table_get_md(struct dm_table *t);
459 
460 /*
461  * Trigger an event.
462  */
463 void dm_table_event(struct dm_table *t);
464 
465 /*
466  * Run the queue for request-based targets.
467  */
468 void dm_table_run_md_queue_async(struct dm_table *t);
469 
470 /*
471  * The device must be suspended before calling this method.
472  * Returns the previous table, which the caller must destroy.
473  */
474 struct dm_table *dm_swap_table(struct mapped_device *md,
475 			       struct dm_table *t);
476 
477 /*
478  * A wrapper around vmalloc.
479  */
480 void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
481 
482 /*-----------------------------------------------------------------
483  * Macros.
484  *---------------------------------------------------------------*/
485 #define DM_NAME "device-mapper"
486 
487 #ifdef CONFIG_PRINTK
488 extern struct ratelimit_state dm_ratelimit_state;
489 
490 #define dm_ratelimit()	__ratelimit(&dm_ratelimit_state)
491 #else
492 #define dm_ratelimit()	0
493 #endif
494 
495 #define DMCRIT(f, arg...) \
496 	printk(KERN_CRIT DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
497 
498 #define DMERR(f, arg...) \
499 	printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
500 #define DMERR_LIMIT(f, arg...) \
501 	do { \
502 		if (dm_ratelimit())	\
503 			printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " \
504 			       f "\n", ## arg); \
505 	} while (0)
506 
507 #define DMWARN(f, arg...) \
508 	printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
509 #define DMWARN_LIMIT(f, arg...) \
510 	do { \
511 		if (dm_ratelimit())	\
512 			printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " \
513 			       f "\n", ## arg); \
514 	} while (0)
515 
516 #define DMINFO(f, arg...) \
517 	printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
518 #define DMINFO_LIMIT(f, arg...) \
519 	do { \
520 		if (dm_ratelimit())	\
521 			printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f \
522 			       "\n", ## arg); \
523 	} while (0)
524 
525 #ifdef CONFIG_DM_DEBUG
526 #  define DMDEBUG(f, arg...) \
527 	printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX " DEBUG: " f "\n", ## arg)
528 #  define DMDEBUG_LIMIT(f, arg...) \
529 	do { \
530 		if (dm_ratelimit())	\
531 			printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX ": " f \
532 			       "\n", ## arg); \
533 	} while (0)
534 #else
535 #  define DMDEBUG(f, arg...) do {} while (0)
536 #  define DMDEBUG_LIMIT(f, arg...) do {} while (0)
537 #endif
538 
539 #define DMEMIT(x...) sz += ((sz >= maxlen) ? \
540 			  0 : scnprintf(result + sz, maxlen - sz, x))
541 
542 #define SECTOR_SHIFT 9
543 
544 /*
545  * Definitions of return values from target end_io function.
546  */
547 #define DM_ENDIO_INCOMPLETE	1
548 #define DM_ENDIO_REQUEUE	2
549 
550 /*
551  * Definitions of return values from target map function.
552  */
553 #define DM_MAPIO_SUBMITTED	0
554 #define DM_MAPIO_REMAPPED	1
555 #define DM_MAPIO_REQUEUE	DM_ENDIO_REQUEUE
556 
557 #define dm_sector_div64(x, y)( \
558 { \
559 	u64 _res; \
560 	(x) = div64_u64_rem(x, y, &_res); \
561 	_res; \
562 } \
563 )
564 
565 /*
566  * Ceiling(n / sz)
567  */
568 #define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz))
569 
570 #define dm_sector_div_up(n, sz) ( \
571 { \
572 	sector_t _r = ((n) + (sz) - 1); \
573 	sector_div(_r, (sz)); \
574 	_r; \
575 } \
576 )
577 
578 /*
579  * ceiling(n / size) * size
580  */
581 #define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
582 
583 #define dm_array_too_big(fixed, obj, num) \
584 	((num) > (UINT_MAX - (fixed)) / (obj))
585 
586 /*
587  * Sector offset taken relative to the start of the target instead of
588  * relative to the start of the device.
589  */
590 #define dm_target_offset(ti, sector) ((sector) - (ti)->begin)
591 
592 static inline sector_t to_sector(unsigned long n)
593 {
594 	return (n >> SECTOR_SHIFT);
595 }
596 
597 static inline unsigned long to_bytes(sector_t n)
598 {
599 	return (n << SECTOR_SHIFT);
600 }
601 
602 /*-----------------------------------------------------------------
603  * Helper for block layer and dm core operations
604  *---------------------------------------------------------------*/
605 void dm_dispatch_request(struct request *rq);
606 void dm_requeue_unmapped_request(struct request *rq);
607 void dm_kill_unmapped_request(struct request *rq, int error);
608 int dm_underlying_device_busy(struct request_queue *q);
609 
610 #endif	/* _LINUX_DEVICE_MAPPER_H */
611