1 /* 2 * Copyright (C) 2001 Sistina Software (UK) Limited. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the LGPL. 6 */ 7 8 #ifndef _LINUX_DEVICE_MAPPER_H 9 #define _LINUX_DEVICE_MAPPER_H 10 11 #include <linux/bio.h> 12 #include <linux/blkdev.h> 13 14 struct dm_dev; 15 struct dm_target; 16 struct dm_table; 17 struct mapped_device; 18 struct bio_vec; 19 20 typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t; 21 22 union map_info { 23 void *ptr; 24 unsigned long long ll; 25 unsigned target_request_nr; 26 }; 27 28 /* 29 * In the constructor the target parameter will already have the 30 * table, type, begin and len fields filled in. 31 */ 32 typedef int (*dm_ctr_fn) (struct dm_target *target, 33 unsigned int argc, char **argv); 34 35 /* 36 * The destructor doesn't need to free the dm_target, just 37 * anything hidden ti->private. 38 */ 39 typedef void (*dm_dtr_fn) (struct dm_target *ti); 40 41 /* 42 * The map function must return: 43 * < 0: error 44 * = 0: The target will handle the io by resubmitting it later 45 * = 1: simple remap complete 46 * = 2: The target wants to push back the io 47 */ 48 typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio, 49 union map_info *map_context); 50 typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone, 51 union map_info *map_context); 52 53 /* 54 * Returns: 55 * < 0 : error (currently ignored) 56 * 0 : ended successfully 57 * 1 : for some reason the io has still not completed (eg, 58 * multipath target might want to requeue a failed io). 59 * 2 : The target wants to push back the io 60 */ 61 typedef int (*dm_endio_fn) (struct dm_target *ti, 62 struct bio *bio, int error, 63 union map_info *map_context); 64 typedef int (*dm_request_endio_fn) (struct dm_target *ti, 65 struct request *clone, int error, 66 union map_info *map_context); 67 68 typedef void (*dm_flush_fn) (struct dm_target *ti); 69 typedef void (*dm_presuspend_fn) (struct dm_target *ti); 70 typedef void (*dm_postsuspend_fn) (struct dm_target *ti); 71 typedef int (*dm_preresume_fn) (struct dm_target *ti); 72 typedef void (*dm_resume_fn) (struct dm_target *ti); 73 74 typedef int (*dm_status_fn) (struct dm_target *ti, status_type_t status_type, 75 char *result, unsigned int maxlen); 76 77 typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv); 78 79 typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd, 80 unsigned long arg); 81 82 typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm, 83 struct bio_vec *biovec, int max_size); 84 85 typedef int (*iterate_devices_callout_fn) (struct dm_target *ti, 86 struct dm_dev *dev, 87 sector_t start, sector_t len, 88 void *data); 89 90 typedef int (*dm_iterate_devices_fn) (struct dm_target *ti, 91 iterate_devices_callout_fn fn, 92 void *data); 93 94 typedef void (*dm_io_hints_fn) (struct dm_target *ti, 95 struct queue_limits *limits); 96 97 /* 98 * Returns: 99 * 0: The target can handle the next I/O immediately. 100 * 1: The target can't handle the next I/O immediately. 101 */ 102 typedef int (*dm_busy_fn) (struct dm_target *ti); 103 104 void dm_error(const char *message); 105 106 /* 107 * Combine device limits. 108 */ 109 int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, 110 sector_t start, sector_t len, void *data); 111 112 struct dm_dev { 113 struct block_device *bdev; 114 fmode_t mode; 115 char name[16]; 116 }; 117 118 /* 119 * Constructors should call these functions to ensure destination devices 120 * are opened/closed correctly. 121 */ 122 int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, 123 struct dm_dev **result); 124 void dm_put_device(struct dm_target *ti, struct dm_dev *d); 125 126 /* 127 * Information about a target type 128 */ 129 130 /* 131 * Target features 132 */ 133 134 struct target_type { 135 uint64_t features; 136 const char *name; 137 struct module *module; 138 unsigned version[3]; 139 dm_ctr_fn ctr; 140 dm_dtr_fn dtr; 141 dm_map_fn map; 142 dm_map_request_fn map_rq; 143 dm_endio_fn end_io; 144 dm_request_endio_fn rq_end_io; 145 dm_flush_fn flush; 146 dm_presuspend_fn presuspend; 147 dm_postsuspend_fn postsuspend; 148 dm_preresume_fn preresume; 149 dm_resume_fn resume; 150 dm_status_fn status; 151 dm_message_fn message; 152 dm_ioctl_fn ioctl; 153 dm_merge_fn merge; 154 dm_busy_fn busy; 155 dm_iterate_devices_fn iterate_devices; 156 dm_io_hints_fn io_hints; 157 158 /* For internal device-mapper use. */ 159 struct list_head list; 160 }; 161 162 struct dm_target { 163 struct dm_table *table; 164 struct target_type *type; 165 166 /* target limits */ 167 sector_t begin; 168 sector_t len; 169 170 /* Always a power of 2 */ 171 sector_t split_io; 172 173 /* 174 * A number of zero-length barrier requests that will be submitted 175 * to the target for the purpose of flushing cache. 176 * 177 * The request number will be placed in union map_info->target_request_nr. 178 * It is a responsibility of the target driver to remap these requests 179 * to the real underlying devices. 180 */ 181 unsigned num_flush_requests; 182 183 /* 184 * The number of discard requests that will be submitted to the 185 * target. map_info->request_nr is used just like num_flush_requests. 186 */ 187 unsigned num_discard_requests; 188 189 /* target specific data */ 190 void *private; 191 192 /* Used to provide an error string from the ctr */ 193 char *error; 194 }; 195 196 /* Each target can link one of these into the table */ 197 struct dm_target_callbacks { 198 struct list_head list; 199 int (*congested_fn) (struct dm_target_callbacks *, int); 200 void (*unplug_fn)(struct dm_target_callbacks *); 201 }; 202 203 int dm_register_target(struct target_type *t); 204 void dm_unregister_target(struct target_type *t); 205 206 /*----------------------------------------------------------------- 207 * Functions for creating and manipulating mapped devices. 208 * Drop the reference with dm_put when you finish with the object. 209 *---------------------------------------------------------------*/ 210 211 /* 212 * DM_ANY_MINOR chooses the next available minor number. 213 */ 214 #define DM_ANY_MINOR (-1) 215 int dm_create(int minor, struct mapped_device **md); 216 217 /* 218 * Reference counting for md. 219 */ 220 struct mapped_device *dm_get_md(dev_t dev); 221 void dm_get(struct mapped_device *md); 222 void dm_put(struct mapped_device *md); 223 224 /* 225 * An arbitrary pointer may be stored alongside a mapped device. 226 */ 227 void dm_set_mdptr(struct mapped_device *md, void *ptr); 228 void *dm_get_mdptr(struct mapped_device *md); 229 230 /* 231 * A device can still be used while suspended, but I/O is deferred. 232 */ 233 int dm_suspend(struct mapped_device *md, unsigned suspend_flags); 234 int dm_resume(struct mapped_device *md); 235 236 /* 237 * Event functions. 238 */ 239 uint32_t dm_get_event_nr(struct mapped_device *md); 240 int dm_wait_event(struct mapped_device *md, int event_nr); 241 uint32_t dm_next_uevent_seq(struct mapped_device *md); 242 void dm_uevent_add(struct mapped_device *md, struct list_head *elist); 243 244 /* 245 * Info functions. 246 */ 247 const char *dm_device_name(struct mapped_device *md); 248 int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid); 249 struct gendisk *dm_disk(struct mapped_device *md); 250 int dm_suspended(struct dm_target *ti); 251 int dm_noflush_suspending(struct dm_target *ti); 252 union map_info *dm_get_mapinfo(struct bio *bio); 253 union map_info *dm_get_rq_mapinfo(struct request *rq); 254 255 /* 256 * Geometry functions. 257 */ 258 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo); 259 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo); 260 261 262 /*----------------------------------------------------------------- 263 * Functions for manipulating device-mapper tables. 264 *---------------------------------------------------------------*/ 265 266 /* 267 * First create an empty table. 268 */ 269 int dm_table_create(struct dm_table **result, fmode_t mode, 270 unsigned num_targets, struct mapped_device *md); 271 272 /* 273 * Then call this once for each target. 274 */ 275 int dm_table_add_target(struct dm_table *t, const char *type, 276 sector_t start, sector_t len, char *params); 277 278 /* 279 * Target_ctr should call this if it needs to add any callbacks. 280 */ 281 void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb); 282 283 /* 284 * Finally call this to make the table ready for use. 285 */ 286 int dm_table_complete(struct dm_table *t); 287 288 /* 289 * Table reference counting. 290 */ 291 struct dm_table *dm_get_live_table(struct mapped_device *md); 292 void dm_table_get(struct dm_table *t); 293 void dm_table_put(struct dm_table *t); 294 295 /* 296 * Queries 297 */ 298 sector_t dm_table_get_size(struct dm_table *t); 299 unsigned int dm_table_get_num_targets(struct dm_table *t); 300 fmode_t dm_table_get_mode(struct dm_table *t); 301 struct mapped_device *dm_table_get_md(struct dm_table *t); 302 303 /* 304 * Trigger an event. 305 */ 306 void dm_table_event(struct dm_table *t); 307 308 /* 309 * The device must be suspended before calling this method. 310 * Returns the previous table, which the caller must destroy. 311 */ 312 struct dm_table *dm_swap_table(struct mapped_device *md, 313 struct dm_table *t); 314 315 /* 316 * A wrapper around vmalloc. 317 */ 318 void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size); 319 320 /*----------------------------------------------------------------- 321 * Macros. 322 *---------------------------------------------------------------*/ 323 #define DM_NAME "device-mapper" 324 325 #define DMCRIT(f, arg...) \ 326 printk(KERN_CRIT DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg) 327 328 #define DMERR(f, arg...) \ 329 printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg) 330 #define DMERR_LIMIT(f, arg...) \ 331 do { \ 332 if (printk_ratelimit()) \ 333 printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " \ 334 f "\n", ## arg); \ 335 } while (0) 336 337 #define DMWARN(f, arg...) \ 338 printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg) 339 #define DMWARN_LIMIT(f, arg...) \ 340 do { \ 341 if (printk_ratelimit()) \ 342 printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " \ 343 f "\n", ## arg); \ 344 } while (0) 345 346 #define DMINFO(f, arg...) \ 347 printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg) 348 #define DMINFO_LIMIT(f, arg...) \ 349 do { \ 350 if (printk_ratelimit()) \ 351 printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f \ 352 "\n", ## arg); \ 353 } while (0) 354 355 #ifdef CONFIG_DM_DEBUG 356 # define DMDEBUG(f, arg...) \ 357 printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX " DEBUG: " f "\n", ## arg) 358 # define DMDEBUG_LIMIT(f, arg...) \ 359 do { \ 360 if (printk_ratelimit()) \ 361 printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX ": " f \ 362 "\n", ## arg); \ 363 } while (0) 364 #else 365 # define DMDEBUG(f, arg...) do {} while (0) 366 # define DMDEBUG_LIMIT(f, arg...) do {} while (0) 367 #endif 368 369 #define DMEMIT(x...) sz += ((sz >= maxlen) ? \ 370 0 : scnprintf(result + sz, maxlen - sz, x)) 371 372 #define SECTOR_SHIFT 9 373 374 /* 375 * Definitions of return values from target end_io function. 376 */ 377 #define DM_ENDIO_INCOMPLETE 1 378 #define DM_ENDIO_REQUEUE 2 379 380 /* 381 * Definitions of return values from target map function. 382 */ 383 #define DM_MAPIO_SUBMITTED 0 384 #define DM_MAPIO_REMAPPED 1 385 #define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE 386 387 /* 388 * Ceiling(n / sz) 389 */ 390 #define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz)) 391 392 #define dm_sector_div_up(n, sz) ( \ 393 { \ 394 sector_t _r = ((n) + (sz) - 1); \ 395 sector_div(_r, (sz)); \ 396 _r; \ 397 } \ 398 ) 399 400 /* 401 * ceiling(n / size) * size 402 */ 403 #define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz)) 404 405 #define dm_array_too_big(fixed, obj, num) \ 406 ((num) > (UINT_MAX - (fixed)) / (obj)) 407 408 /* 409 * Sector offset taken relative to the start of the target instead of 410 * relative to the start of the device. 411 */ 412 #define dm_target_offset(ti, sector) ((sector) - (ti)->begin) 413 414 static inline sector_t to_sector(unsigned long n) 415 { 416 return (n >> SECTOR_SHIFT); 417 } 418 419 static inline unsigned long to_bytes(sector_t n) 420 { 421 return (n << SECTOR_SHIFT); 422 } 423 424 /*----------------------------------------------------------------- 425 * Helper for block layer and dm core operations 426 *---------------------------------------------------------------*/ 427 void dm_dispatch_request(struct request *rq); 428 void dm_requeue_unmapped_request(struct request *rq); 429 void dm_kill_unmapped_request(struct request *rq, int error); 430 int dm_underlying_device_busy(struct request_queue *q); 431 432 #endif /* _LINUX_DEVICE_MAPPER_H */ 433