1 /* 2 * Copyright (C) 2001 Sistina Software (UK) Limited. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the LGPL. 6 */ 7 8 #ifndef _LINUX_DEVICE_MAPPER_H 9 #define _LINUX_DEVICE_MAPPER_H 10 11 #include <linux/bio.h> 12 #include <linux/blkdev.h> 13 14 struct dm_dev; 15 struct dm_target; 16 struct dm_table; 17 struct mapped_device; 18 struct bio_vec; 19 20 typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t; 21 22 union map_info { 23 void *ptr; 24 unsigned long long ll; 25 unsigned flush_request; 26 }; 27 28 /* 29 * In the constructor the target parameter will already have the 30 * table, type, begin and len fields filled in. 31 */ 32 typedef int (*dm_ctr_fn) (struct dm_target *target, 33 unsigned int argc, char **argv); 34 35 /* 36 * The destructor doesn't need to free the dm_target, just 37 * anything hidden ti->private. 38 */ 39 typedef void (*dm_dtr_fn) (struct dm_target *ti); 40 41 /* 42 * The map function must return: 43 * < 0: error 44 * = 0: The target will handle the io by resubmitting it later 45 * = 1: simple remap complete 46 * = 2: The target wants to push back the io 47 */ 48 typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio, 49 union map_info *map_context); 50 typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone, 51 union map_info *map_context); 52 53 /* 54 * Returns: 55 * < 0 : error (currently ignored) 56 * 0 : ended successfully 57 * 1 : for some reason the io has still not completed (eg, 58 * multipath target might want to requeue a failed io). 59 * 2 : The target wants to push back the io 60 */ 61 typedef int (*dm_endio_fn) (struct dm_target *ti, 62 struct bio *bio, int error, 63 union map_info *map_context); 64 typedef int (*dm_request_endio_fn) (struct dm_target *ti, 65 struct request *clone, int error, 66 union map_info *map_context); 67 68 typedef void (*dm_flush_fn) (struct dm_target *ti); 69 typedef void (*dm_presuspend_fn) (struct dm_target *ti); 70 typedef void (*dm_postsuspend_fn) (struct dm_target *ti); 71 typedef int (*dm_preresume_fn) (struct dm_target *ti); 72 typedef void (*dm_resume_fn) (struct dm_target *ti); 73 74 typedef int (*dm_status_fn) (struct dm_target *ti, status_type_t status_type, 75 char *result, unsigned int maxlen); 76 77 typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv); 78 79 typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd, 80 unsigned long arg); 81 82 typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm, 83 struct bio_vec *biovec, int max_size); 84 85 typedef int (*iterate_devices_callout_fn) (struct dm_target *ti, 86 struct dm_dev *dev, 87 sector_t start, sector_t len, 88 void *data); 89 90 typedef int (*dm_iterate_devices_fn) (struct dm_target *ti, 91 iterate_devices_callout_fn fn, 92 void *data); 93 94 typedef void (*dm_io_hints_fn) (struct dm_target *ti, 95 struct queue_limits *limits); 96 97 /* 98 * Returns: 99 * 0: The target can handle the next I/O immediately. 100 * 1: The target can't handle the next I/O immediately. 101 */ 102 typedef int (*dm_busy_fn) (struct dm_target *ti); 103 104 void dm_error(const char *message); 105 106 /* 107 * Combine device limits. 108 */ 109 int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, 110 sector_t start, sector_t len, void *data); 111 112 struct dm_dev { 113 struct block_device *bdev; 114 fmode_t mode; 115 char name[16]; 116 }; 117 118 /* 119 * Constructors should call these functions to ensure destination devices 120 * are opened/closed correctly. 121 */ 122 int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, 123 struct dm_dev **result); 124 void dm_put_device(struct dm_target *ti, struct dm_dev *d); 125 126 /* 127 * Information about a target type 128 */ 129 130 /* 131 * Target features 132 */ 133 134 struct target_type { 135 uint64_t features; 136 const char *name; 137 struct module *module; 138 unsigned version[3]; 139 dm_ctr_fn ctr; 140 dm_dtr_fn dtr; 141 dm_map_fn map; 142 dm_map_request_fn map_rq; 143 dm_endio_fn end_io; 144 dm_request_endio_fn rq_end_io; 145 dm_flush_fn flush; 146 dm_presuspend_fn presuspend; 147 dm_postsuspend_fn postsuspend; 148 dm_preresume_fn preresume; 149 dm_resume_fn resume; 150 dm_status_fn status; 151 dm_message_fn message; 152 dm_ioctl_fn ioctl; 153 dm_merge_fn merge; 154 dm_busy_fn busy; 155 dm_iterate_devices_fn iterate_devices; 156 dm_io_hints_fn io_hints; 157 158 /* For internal device-mapper use. */ 159 struct list_head list; 160 }; 161 162 struct dm_target { 163 struct dm_table *table; 164 struct target_type *type; 165 166 /* target limits */ 167 sector_t begin; 168 sector_t len; 169 170 /* Always a power of 2 */ 171 sector_t split_io; 172 173 /* 174 * A number of zero-length barrier requests that will be submitted 175 * to the target for the purpose of flushing cache. 176 * 177 * The request number will be placed in union map_info->flush_request. 178 * It is a responsibility of the target driver to remap these requests 179 * to the real underlying devices. 180 */ 181 unsigned num_flush_requests; 182 183 /* target specific data */ 184 void *private; 185 186 /* Used to provide an error string from the ctr */ 187 char *error; 188 }; 189 190 int dm_register_target(struct target_type *t); 191 void dm_unregister_target(struct target_type *t); 192 193 /*----------------------------------------------------------------- 194 * Functions for creating and manipulating mapped devices. 195 * Drop the reference with dm_put when you finish with the object. 196 *---------------------------------------------------------------*/ 197 198 /* 199 * DM_ANY_MINOR chooses the next available minor number. 200 */ 201 #define DM_ANY_MINOR (-1) 202 int dm_create(int minor, struct mapped_device **md); 203 204 /* 205 * Reference counting for md. 206 */ 207 struct mapped_device *dm_get_md(dev_t dev); 208 void dm_get(struct mapped_device *md); 209 void dm_put(struct mapped_device *md); 210 211 /* 212 * An arbitrary pointer may be stored alongside a mapped device. 213 */ 214 void dm_set_mdptr(struct mapped_device *md, void *ptr); 215 void *dm_get_mdptr(struct mapped_device *md); 216 217 /* 218 * A device can still be used while suspended, but I/O is deferred. 219 */ 220 int dm_suspend(struct mapped_device *md, unsigned suspend_flags); 221 int dm_resume(struct mapped_device *md); 222 223 /* 224 * Event functions. 225 */ 226 uint32_t dm_get_event_nr(struct mapped_device *md); 227 int dm_wait_event(struct mapped_device *md, int event_nr); 228 uint32_t dm_next_uevent_seq(struct mapped_device *md); 229 void dm_uevent_add(struct mapped_device *md, struct list_head *elist); 230 231 /* 232 * Info functions. 233 */ 234 const char *dm_device_name(struct mapped_device *md); 235 int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid); 236 struct gendisk *dm_disk(struct mapped_device *md); 237 int dm_suspended(struct dm_target *ti); 238 int dm_noflush_suspending(struct dm_target *ti); 239 union map_info *dm_get_mapinfo(struct bio *bio); 240 union map_info *dm_get_rq_mapinfo(struct request *rq); 241 242 /* 243 * Geometry functions. 244 */ 245 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo); 246 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo); 247 248 249 /*----------------------------------------------------------------- 250 * Functions for manipulating device-mapper tables. 251 *---------------------------------------------------------------*/ 252 253 /* 254 * First create an empty table. 255 */ 256 int dm_table_create(struct dm_table **result, fmode_t mode, 257 unsigned num_targets, struct mapped_device *md); 258 259 /* 260 * Then call this once for each target. 261 */ 262 int dm_table_add_target(struct dm_table *t, const char *type, 263 sector_t start, sector_t len, char *params); 264 265 /* 266 * Finally call this to make the table ready for use. 267 */ 268 int dm_table_complete(struct dm_table *t); 269 270 /* 271 * Unplug all devices in a table. 272 */ 273 void dm_table_unplug_all(struct dm_table *t); 274 275 /* 276 * Table reference counting. 277 */ 278 struct dm_table *dm_get_live_table(struct mapped_device *md); 279 void dm_table_get(struct dm_table *t); 280 void dm_table_put(struct dm_table *t); 281 282 /* 283 * Queries 284 */ 285 sector_t dm_table_get_size(struct dm_table *t); 286 unsigned int dm_table_get_num_targets(struct dm_table *t); 287 fmode_t dm_table_get_mode(struct dm_table *t); 288 struct mapped_device *dm_table_get_md(struct dm_table *t); 289 290 /* 291 * Trigger an event. 292 */ 293 void dm_table_event(struct dm_table *t); 294 295 /* 296 * The device must be suspended before calling this method. 297 * Returns the previous table, which the caller must destroy. 298 */ 299 struct dm_table *dm_swap_table(struct mapped_device *md, 300 struct dm_table *t); 301 302 /* 303 * A wrapper around vmalloc. 304 */ 305 void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size); 306 307 /*----------------------------------------------------------------- 308 * Macros. 309 *---------------------------------------------------------------*/ 310 #define DM_NAME "device-mapper" 311 312 #define DMCRIT(f, arg...) \ 313 printk(KERN_CRIT DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg) 314 315 #define DMERR(f, arg...) \ 316 printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg) 317 #define DMERR_LIMIT(f, arg...) \ 318 do { \ 319 if (printk_ratelimit()) \ 320 printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " \ 321 f "\n", ## arg); \ 322 } while (0) 323 324 #define DMWARN(f, arg...) \ 325 printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg) 326 #define DMWARN_LIMIT(f, arg...) \ 327 do { \ 328 if (printk_ratelimit()) \ 329 printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " \ 330 f "\n", ## arg); \ 331 } while (0) 332 333 #define DMINFO(f, arg...) \ 334 printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg) 335 #define DMINFO_LIMIT(f, arg...) \ 336 do { \ 337 if (printk_ratelimit()) \ 338 printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f \ 339 "\n", ## arg); \ 340 } while (0) 341 342 #ifdef CONFIG_DM_DEBUG 343 # define DMDEBUG(f, arg...) \ 344 printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX " DEBUG: " f "\n", ## arg) 345 # define DMDEBUG_LIMIT(f, arg...) \ 346 do { \ 347 if (printk_ratelimit()) \ 348 printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX ": " f \ 349 "\n", ## arg); \ 350 } while (0) 351 #else 352 # define DMDEBUG(f, arg...) do {} while (0) 353 # define DMDEBUG_LIMIT(f, arg...) do {} while (0) 354 #endif 355 356 #define DMEMIT(x...) sz += ((sz >= maxlen) ? \ 357 0 : scnprintf(result + sz, maxlen - sz, x)) 358 359 #define SECTOR_SHIFT 9 360 361 /* 362 * Definitions of return values from target end_io function. 363 */ 364 #define DM_ENDIO_INCOMPLETE 1 365 #define DM_ENDIO_REQUEUE 2 366 367 /* 368 * Definitions of return values from target map function. 369 */ 370 #define DM_MAPIO_SUBMITTED 0 371 #define DM_MAPIO_REMAPPED 1 372 #define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE 373 374 /* 375 * Ceiling(n / sz) 376 */ 377 #define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz)) 378 379 #define dm_sector_div_up(n, sz) ( \ 380 { \ 381 sector_t _r = ((n) + (sz) - 1); \ 382 sector_div(_r, (sz)); \ 383 _r; \ 384 } \ 385 ) 386 387 /* 388 * ceiling(n / size) * size 389 */ 390 #define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz)) 391 392 #define dm_array_too_big(fixed, obj, num) \ 393 ((num) > (UINT_MAX - (fixed)) / (obj)) 394 395 static inline sector_t to_sector(unsigned long n) 396 { 397 return (n >> SECTOR_SHIFT); 398 } 399 400 static inline unsigned long to_bytes(sector_t n) 401 { 402 return (n << SECTOR_SHIFT); 403 } 404 405 /*----------------------------------------------------------------- 406 * Helper for block layer and dm core operations 407 *---------------------------------------------------------------*/ 408 void dm_dispatch_request(struct request *rq); 409 void dm_requeue_unmapped_request(struct request *rq); 410 void dm_kill_unmapped_request(struct request *rq, int error); 411 int dm_underlying_device_busy(struct request_queue *q); 412 413 #endif /* _LINUX_DEVICE_MAPPER_H */ 414