xref: /linux-6.15/include/linux/device-mapper.h (revision b595076a)
1 /*
2  * Copyright (C) 2001 Sistina Software (UK) Limited.
3  * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4  *
5  * This file is released under the LGPL.
6  */
7 
8 #ifndef _LINUX_DEVICE_MAPPER_H
9 #define _LINUX_DEVICE_MAPPER_H
10 
11 #include <linux/bio.h>
12 #include <linux/blkdev.h>
13 
14 struct dm_dev;
15 struct dm_target;
16 struct dm_table;
17 struct mapped_device;
18 struct bio_vec;
19 
20 typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
21 
22 union map_info {
23 	void *ptr;
24 	unsigned long long ll;
25 	unsigned target_request_nr;
26 };
27 
28 /*
29  * In the constructor the target parameter will already have the
30  * table, type, begin and len fields filled in.
31  */
32 typedef int (*dm_ctr_fn) (struct dm_target *target,
33 			  unsigned int argc, char **argv);
34 
35 /*
36  * The destructor doesn't need to free the dm_target, just
37  * anything hidden ti->private.
38  */
39 typedef void (*dm_dtr_fn) (struct dm_target *ti);
40 
41 /*
42  * The map function must return:
43  * < 0: error
44  * = 0: The target will handle the io by resubmitting it later
45  * = 1: simple remap complete
46  * = 2: The target wants to push back the io
47  */
48 typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio,
49 			  union map_info *map_context);
50 typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone,
51 				  union map_info *map_context);
52 
53 /*
54  * Returns:
55  * < 0 : error (currently ignored)
56  * 0   : ended successfully
57  * 1   : for some reason the io has still not completed (eg,
58  *       multipath target might want to requeue a failed io).
59  * 2   : The target wants to push back the io
60  */
61 typedef int (*dm_endio_fn) (struct dm_target *ti,
62 			    struct bio *bio, int error,
63 			    union map_info *map_context);
64 typedef int (*dm_request_endio_fn) (struct dm_target *ti,
65 				    struct request *clone, int error,
66 				    union map_info *map_context);
67 
68 typedef void (*dm_flush_fn) (struct dm_target *ti);
69 typedef void (*dm_presuspend_fn) (struct dm_target *ti);
70 typedef void (*dm_postsuspend_fn) (struct dm_target *ti);
71 typedef int (*dm_preresume_fn) (struct dm_target *ti);
72 typedef void (*dm_resume_fn) (struct dm_target *ti);
73 
74 typedef int (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
75 			     char *result, unsigned int maxlen);
76 
77 typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv);
78 
79 typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd,
80 			    unsigned long arg);
81 
82 typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm,
83 			    struct bio_vec *biovec, int max_size);
84 
85 typedef int (*iterate_devices_callout_fn) (struct dm_target *ti,
86 					   struct dm_dev *dev,
87 					   sector_t start, sector_t len,
88 					   void *data);
89 
90 typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
91 				      iterate_devices_callout_fn fn,
92 				      void *data);
93 
94 typedef void (*dm_io_hints_fn) (struct dm_target *ti,
95 				struct queue_limits *limits);
96 
97 /*
98  * Returns:
99  *    0: The target can handle the next I/O immediately.
100  *    1: The target can't handle the next I/O immediately.
101  */
102 typedef int (*dm_busy_fn) (struct dm_target *ti);
103 
104 void dm_error(const char *message);
105 
106 /*
107  * Combine device limits.
108  */
109 int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
110 			 sector_t start, sector_t len, void *data);
111 
112 struct dm_dev {
113 	struct block_device *bdev;
114 	fmode_t mode;
115 	char name[16];
116 };
117 
118 /*
119  * Constructors should call these functions to ensure destination devices
120  * are opened/closed correctly.
121  */
122 int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
123 						 struct dm_dev **result);
124 void dm_put_device(struct dm_target *ti, struct dm_dev *d);
125 
126 /*
127  * Information about a target type
128  */
129 
130 /*
131  * Target features
132  */
133 
134 struct target_type {
135 	uint64_t features;
136 	const char *name;
137 	struct module *module;
138 	unsigned version[3];
139 	dm_ctr_fn ctr;
140 	dm_dtr_fn dtr;
141 	dm_map_fn map;
142 	dm_map_request_fn map_rq;
143 	dm_endio_fn end_io;
144 	dm_request_endio_fn rq_end_io;
145 	dm_flush_fn flush;
146 	dm_presuspend_fn presuspend;
147 	dm_postsuspend_fn postsuspend;
148 	dm_preresume_fn preresume;
149 	dm_resume_fn resume;
150 	dm_status_fn status;
151 	dm_message_fn message;
152 	dm_ioctl_fn ioctl;
153 	dm_merge_fn merge;
154 	dm_busy_fn busy;
155 	dm_iterate_devices_fn iterate_devices;
156 	dm_io_hints_fn io_hints;
157 
158 	/* For internal device-mapper use. */
159 	struct list_head list;
160 };
161 
162 struct dm_target {
163 	struct dm_table *table;
164 	struct target_type *type;
165 
166 	/* target limits */
167 	sector_t begin;
168 	sector_t len;
169 
170 	/* Always a power of 2 */
171 	sector_t split_io;
172 
173 	/*
174 	 * A number of zero-length barrier requests that will be submitted
175 	 * to the target for the purpose of flushing cache.
176 	 *
177 	 * The request number will be placed in union map_info->target_request_nr.
178 	 * It is a responsibility of the target driver to remap these requests
179 	 * to the real underlying devices.
180 	 */
181 	unsigned num_flush_requests;
182 
183 	/*
184 	 * The number of discard requests that will be submitted to the
185 	 * target.  map_info->request_nr is used just like num_flush_requests.
186 	 */
187 	unsigned num_discard_requests;
188 
189 	/* target specific data */
190 	void *private;
191 
192 	/* Used to provide an error string from the ctr */
193 	char *error;
194 };
195 
196 int dm_register_target(struct target_type *t);
197 void dm_unregister_target(struct target_type *t);
198 
199 /*-----------------------------------------------------------------
200  * Functions for creating and manipulating mapped devices.
201  * Drop the reference with dm_put when you finish with the object.
202  *---------------------------------------------------------------*/
203 
204 /*
205  * DM_ANY_MINOR chooses the next available minor number.
206  */
207 #define DM_ANY_MINOR (-1)
208 int dm_create(int minor, struct mapped_device **md);
209 
210 /*
211  * Reference counting for md.
212  */
213 struct mapped_device *dm_get_md(dev_t dev);
214 void dm_get(struct mapped_device *md);
215 void dm_put(struct mapped_device *md);
216 
217 /*
218  * An arbitrary pointer may be stored alongside a mapped device.
219  */
220 void dm_set_mdptr(struct mapped_device *md, void *ptr);
221 void *dm_get_mdptr(struct mapped_device *md);
222 
223 /*
224  * A device can still be used while suspended, but I/O is deferred.
225  */
226 int dm_suspend(struct mapped_device *md, unsigned suspend_flags);
227 int dm_resume(struct mapped_device *md);
228 
229 /*
230  * Event functions.
231  */
232 uint32_t dm_get_event_nr(struct mapped_device *md);
233 int dm_wait_event(struct mapped_device *md, int event_nr);
234 uint32_t dm_next_uevent_seq(struct mapped_device *md);
235 void dm_uevent_add(struct mapped_device *md, struct list_head *elist);
236 
237 /*
238  * Info functions.
239  */
240 const char *dm_device_name(struct mapped_device *md);
241 int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
242 struct gendisk *dm_disk(struct mapped_device *md);
243 int dm_suspended(struct dm_target *ti);
244 int dm_noflush_suspending(struct dm_target *ti);
245 union map_info *dm_get_mapinfo(struct bio *bio);
246 union map_info *dm_get_rq_mapinfo(struct request *rq);
247 
248 /*
249  * Geometry functions.
250  */
251 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
252 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
253 
254 
255 /*-----------------------------------------------------------------
256  * Functions for manipulating device-mapper tables.
257  *---------------------------------------------------------------*/
258 
259 /*
260  * First create an empty table.
261  */
262 int dm_table_create(struct dm_table **result, fmode_t mode,
263 		    unsigned num_targets, struct mapped_device *md);
264 
265 /*
266  * Then call this once for each target.
267  */
268 int dm_table_add_target(struct dm_table *t, const char *type,
269 			sector_t start, sector_t len, char *params);
270 
271 /*
272  * Finally call this to make the table ready for use.
273  */
274 int dm_table_complete(struct dm_table *t);
275 
276 /*
277  * Unplug all devices in a table.
278  */
279 void dm_table_unplug_all(struct dm_table *t);
280 
281 /*
282  * Table reference counting.
283  */
284 struct dm_table *dm_get_live_table(struct mapped_device *md);
285 void dm_table_get(struct dm_table *t);
286 void dm_table_put(struct dm_table *t);
287 
288 /*
289  * Queries
290  */
291 sector_t dm_table_get_size(struct dm_table *t);
292 unsigned int dm_table_get_num_targets(struct dm_table *t);
293 fmode_t dm_table_get_mode(struct dm_table *t);
294 struct mapped_device *dm_table_get_md(struct dm_table *t);
295 
296 /*
297  * Trigger an event.
298  */
299 void dm_table_event(struct dm_table *t);
300 
301 /*
302  * The device must be suspended before calling this method.
303  * Returns the previous table, which the caller must destroy.
304  */
305 struct dm_table *dm_swap_table(struct mapped_device *md,
306 			       struct dm_table *t);
307 
308 /*
309  * A wrapper around vmalloc.
310  */
311 void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
312 
313 /*-----------------------------------------------------------------
314  * Macros.
315  *---------------------------------------------------------------*/
316 #define DM_NAME "device-mapper"
317 
318 #define DMCRIT(f, arg...) \
319 	printk(KERN_CRIT DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
320 
321 #define DMERR(f, arg...) \
322 	printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
323 #define DMERR_LIMIT(f, arg...) \
324 	do { \
325 		if (printk_ratelimit())	\
326 			printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " \
327 			       f "\n", ## arg); \
328 	} while (0)
329 
330 #define DMWARN(f, arg...) \
331 	printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
332 #define DMWARN_LIMIT(f, arg...) \
333 	do { \
334 		if (printk_ratelimit())	\
335 			printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " \
336 			       f "\n", ## arg); \
337 	} while (0)
338 
339 #define DMINFO(f, arg...) \
340 	printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
341 #define DMINFO_LIMIT(f, arg...) \
342 	do { \
343 		if (printk_ratelimit())	\
344 			printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f \
345 			       "\n", ## arg); \
346 	} while (0)
347 
348 #ifdef CONFIG_DM_DEBUG
349 #  define DMDEBUG(f, arg...) \
350 	printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX " DEBUG: " f "\n", ## arg)
351 #  define DMDEBUG_LIMIT(f, arg...) \
352 	do { \
353 		if (printk_ratelimit())	\
354 			printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX ": " f \
355 			       "\n", ## arg); \
356 	} while (0)
357 #else
358 #  define DMDEBUG(f, arg...) do {} while (0)
359 #  define DMDEBUG_LIMIT(f, arg...) do {} while (0)
360 #endif
361 
362 #define DMEMIT(x...) sz += ((sz >= maxlen) ? \
363 			  0 : scnprintf(result + sz, maxlen - sz, x))
364 
365 #define SECTOR_SHIFT 9
366 
367 /*
368  * Definitions of return values from target end_io function.
369  */
370 #define DM_ENDIO_INCOMPLETE	1
371 #define DM_ENDIO_REQUEUE	2
372 
373 /*
374  * Definitions of return values from target map function.
375  */
376 #define DM_MAPIO_SUBMITTED	0
377 #define DM_MAPIO_REMAPPED	1
378 #define DM_MAPIO_REQUEUE	DM_ENDIO_REQUEUE
379 
380 /*
381  * Ceiling(n / sz)
382  */
383 #define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz))
384 
385 #define dm_sector_div_up(n, sz) ( \
386 { \
387 	sector_t _r = ((n) + (sz) - 1); \
388 	sector_div(_r, (sz)); \
389 	_r; \
390 } \
391 )
392 
393 /*
394  * ceiling(n / size) * size
395  */
396 #define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
397 
398 #define dm_array_too_big(fixed, obj, num) \
399 	((num) > (UINT_MAX - (fixed)) / (obj))
400 
401 /*
402  * Sector offset taken relative to the start of the target instead of
403  * relative to the start of the device.
404  */
405 #define dm_target_offset(ti, sector) ((sector) - (ti)->begin)
406 
407 static inline sector_t to_sector(unsigned long n)
408 {
409 	return (n >> SECTOR_SHIFT);
410 }
411 
412 static inline unsigned long to_bytes(sector_t n)
413 {
414 	return (n << SECTOR_SHIFT);
415 }
416 
417 /*-----------------------------------------------------------------
418  * Helper for block layer and dm core operations
419  *---------------------------------------------------------------*/
420 void dm_dispatch_request(struct request *rq);
421 void dm_requeue_unmapped_request(struct request *rq);
422 void dm_kill_unmapped_request(struct request *rq, int error);
423 int dm_underlying_device_busy(struct request_queue *q);
424 
425 #endif	/* _LINUX_DEVICE_MAPPER_H */
426