1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * DMA Pool allocator 4 * 5 * Copyright 2001 David Brownell 6 * Copyright 2007 Intel Corporation 7 * Author: Matthew Wilcox <[email protected]> 8 * 9 * This allocator returns small blocks of a given size which are DMA-able by 10 * the given device. It uses the dma_alloc_coherent page allocator to get 11 * new pages, then splits them up into blocks of the required size. 12 * Many older drivers still have their own code to do this. 13 * 14 * The current design of this allocator is fairly simple. The pool is 15 * represented by the 'struct dma_pool' which keeps a doubly-linked list of 16 * allocated pages. Each page in the page_list is split into blocks of at 17 * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked 18 * list of free blocks within the page. Used blocks aren't tracked, but we 19 * keep a count of how many are currently allocated from each page. 20 */ 21 22 #include <linux/device.h> 23 #include <linux/dma-mapping.h> 24 #include <linux/dmapool.h> 25 #include <linux/kernel.h> 26 #include <linux/list.h> 27 #include <linux/export.h> 28 #include <linux/mutex.h> 29 #include <linux/poison.h> 30 #include <linux/sched.h> 31 #include <linux/sched/mm.h> 32 #include <linux/slab.h> 33 #include <linux/stat.h> 34 #include <linux/spinlock.h> 35 #include <linux/string.h> 36 #include <linux/types.h> 37 #include <linux/wait.h> 38 39 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON) 40 #define DMAPOOL_DEBUG 1 41 #endif 42 43 struct dma_pool { /* the pool */ 44 struct list_head page_list; 45 spinlock_t lock; 46 struct device *dev; 47 unsigned int size; 48 unsigned int allocation; 49 unsigned int boundary; 50 char name[32]; 51 struct list_head pools; 52 }; 53 54 struct dma_page { /* cacheable header for 'allocation' bytes */ 55 struct list_head page_list; 56 void *vaddr; 57 dma_addr_t dma; 58 unsigned int in_use; 59 unsigned int offset; 60 }; 61 62 static DEFINE_MUTEX(pools_lock); 63 static DEFINE_MUTEX(pools_reg_lock); 64 65 static ssize_t pools_show(struct device *dev, struct device_attribute *attr, char *buf) 66 { 67 int size; 68 struct dma_page *page; 69 struct dma_pool *pool; 70 71 size = sysfs_emit(buf, "poolinfo - 0.1\n"); 72 73 mutex_lock(&pools_lock); 74 list_for_each_entry(pool, &dev->dma_pools, pools) { 75 unsigned pages = 0; 76 size_t blocks = 0; 77 78 spin_lock_irq(&pool->lock); 79 list_for_each_entry(page, &pool->page_list, page_list) { 80 pages++; 81 blocks += page->in_use; 82 } 83 spin_unlock_irq(&pool->lock); 84 85 /* per-pool info, no real statistics yet */ 86 size += sysfs_emit_at(buf, size, "%-16s %4zu %4zu %4u %2u\n", 87 pool->name, blocks, 88 (size_t) pages * 89 (pool->allocation / pool->size), 90 pool->size, pages); 91 } 92 mutex_unlock(&pools_lock); 93 94 return size; 95 } 96 97 static DEVICE_ATTR_RO(pools); 98 99 /** 100 * dma_pool_create - Creates a pool of consistent memory blocks, for dma. 101 * @name: name of pool, for diagnostics 102 * @dev: device that will be doing the DMA 103 * @size: size of the blocks in this pool. 104 * @align: alignment requirement for blocks; must be a power of two 105 * @boundary: returned blocks won't cross this power of two boundary 106 * Context: not in_interrupt() 107 * 108 * Given one of these pools, dma_pool_alloc() 109 * may be used to allocate memory. Such memory will all have "consistent" 110 * DMA mappings, accessible by the device and its driver without using 111 * cache flushing primitives. The actual size of blocks allocated may be 112 * larger than requested because of alignment. 113 * 114 * If @boundary is nonzero, objects returned from dma_pool_alloc() won't 115 * cross that size boundary. This is useful for devices which have 116 * addressing restrictions on individual DMA transfers, such as not crossing 117 * boundaries of 4KBytes. 118 * 119 * Return: a dma allocation pool with the requested characteristics, or 120 * %NULL if one can't be created. 121 */ 122 struct dma_pool *dma_pool_create(const char *name, struct device *dev, 123 size_t size, size_t align, size_t boundary) 124 { 125 struct dma_pool *retval; 126 size_t allocation; 127 bool empty = false; 128 129 if (!dev) 130 return NULL; 131 132 if (align == 0) 133 align = 1; 134 else if (align & (align - 1)) 135 return NULL; 136 137 if (size == 0 || size > INT_MAX) 138 return NULL; 139 else if (size < 4) 140 size = 4; 141 142 size = ALIGN(size, align); 143 allocation = max_t(size_t, size, PAGE_SIZE); 144 145 if (!boundary) 146 boundary = allocation; 147 else if ((boundary < size) || (boundary & (boundary - 1))) 148 return NULL; 149 150 boundary = min(boundary, allocation); 151 152 retval = kmalloc(sizeof(*retval), GFP_KERNEL); 153 if (!retval) 154 return retval; 155 156 strscpy(retval->name, name, sizeof(retval->name)); 157 158 retval->dev = dev; 159 160 INIT_LIST_HEAD(&retval->page_list); 161 spin_lock_init(&retval->lock); 162 retval->size = size; 163 retval->boundary = boundary; 164 retval->allocation = allocation; 165 166 INIT_LIST_HEAD(&retval->pools); 167 168 /* 169 * pools_lock ensures that the ->dma_pools list does not get corrupted. 170 * pools_reg_lock ensures that there is not a race between 171 * dma_pool_create() and dma_pool_destroy() or within dma_pool_create() 172 * when the first invocation of dma_pool_create() failed on 173 * device_create_file() and the second assumes that it has been done (I 174 * know it is a short window). 175 */ 176 mutex_lock(&pools_reg_lock); 177 mutex_lock(&pools_lock); 178 if (list_empty(&dev->dma_pools)) 179 empty = true; 180 list_add(&retval->pools, &dev->dma_pools); 181 mutex_unlock(&pools_lock); 182 if (empty) { 183 int err; 184 185 err = device_create_file(dev, &dev_attr_pools); 186 if (err) { 187 mutex_lock(&pools_lock); 188 list_del(&retval->pools); 189 mutex_unlock(&pools_lock); 190 mutex_unlock(&pools_reg_lock); 191 kfree(retval); 192 return NULL; 193 } 194 } 195 mutex_unlock(&pools_reg_lock); 196 return retval; 197 } 198 EXPORT_SYMBOL(dma_pool_create); 199 200 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page) 201 { 202 unsigned int offset = 0; 203 unsigned int next_boundary = pool->boundary; 204 205 do { 206 unsigned int next = offset + pool->size; 207 if (unlikely((next + pool->size) >= next_boundary)) { 208 next = next_boundary; 209 next_boundary += pool->boundary; 210 } 211 *(int *)(page->vaddr + offset) = next; 212 offset = next; 213 } while (offset < pool->allocation); 214 } 215 216 static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) 217 { 218 struct dma_page *page; 219 220 page = kmalloc(sizeof(*page), mem_flags); 221 if (!page) 222 return NULL; 223 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation, 224 &page->dma, mem_flags); 225 if (page->vaddr) { 226 #ifdef DMAPOOL_DEBUG 227 memset(page->vaddr, POOL_POISON_FREED, pool->allocation); 228 #endif 229 pool_initialise_page(pool, page); 230 page->in_use = 0; 231 page->offset = 0; 232 } else { 233 kfree(page); 234 page = NULL; 235 } 236 return page; 237 } 238 239 static inline bool is_page_busy(struct dma_page *page) 240 { 241 return page->in_use != 0; 242 } 243 244 static void pool_free_page(struct dma_pool *pool, struct dma_page *page) 245 { 246 dma_addr_t dma = page->dma; 247 248 #ifdef DMAPOOL_DEBUG 249 memset(page->vaddr, POOL_POISON_FREED, pool->allocation); 250 #endif 251 dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma); 252 list_del(&page->page_list); 253 kfree(page); 254 } 255 256 /** 257 * dma_pool_destroy - destroys a pool of dma memory blocks. 258 * @pool: dma pool that will be destroyed 259 * Context: !in_interrupt() 260 * 261 * Caller guarantees that no more memory from the pool is in use, 262 * and that nothing will try to use the pool after this call. 263 */ 264 void dma_pool_destroy(struct dma_pool *pool) 265 { 266 struct dma_page *page, *tmp; 267 bool empty = false; 268 269 if (unlikely(!pool)) 270 return; 271 272 mutex_lock(&pools_reg_lock); 273 mutex_lock(&pools_lock); 274 list_del(&pool->pools); 275 if (list_empty(&pool->dev->dma_pools)) 276 empty = true; 277 mutex_unlock(&pools_lock); 278 if (empty) 279 device_remove_file(pool->dev, &dev_attr_pools); 280 mutex_unlock(&pools_reg_lock); 281 282 list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) { 283 if (is_page_busy(page)) { 284 dev_err(pool->dev, "%s %s, %p busy\n", __func__, 285 pool->name, page->vaddr); 286 /* leak the still-in-use consistent memory */ 287 list_del(&page->page_list); 288 kfree(page); 289 } else 290 pool_free_page(pool, page); 291 } 292 293 kfree(pool); 294 } 295 EXPORT_SYMBOL(dma_pool_destroy); 296 297 /** 298 * dma_pool_alloc - get a block of consistent memory 299 * @pool: dma pool that will produce the block 300 * @mem_flags: GFP_* bitmask 301 * @handle: pointer to dma address of block 302 * 303 * Return: the kernel virtual address of a currently unused block, 304 * and reports its dma address through the handle. 305 * If such a memory block can't be allocated, %NULL is returned. 306 */ 307 void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, 308 dma_addr_t *handle) 309 { 310 unsigned long flags; 311 struct dma_page *page; 312 unsigned int offset; 313 void *retval; 314 315 might_alloc(mem_flags); 316 317 spin_lock_irqsave(&pool->lock, flags); 318 list_for_each_entry(page, &pool->page_list, page_list) { 319 if (page->offset < pool->allocation) 320 goto ready; 321 } 322 323 /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */ 324 spin_unlock_irqrestore(&pool->lock, flags); 325 326 page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO)); 327 if (!page) 328 return NULL; 329 330 spin_lock_irqsave(&pool->lock, flags); 331 332 list_add(&page->page_list, &pool->page_list); 333 ready: 334 page->in_use++; 335 offset = page->offset; 336 page->offset = *(int *)(page->vaddr + offset); 337 retval = offset + page->vaddr; 338 *handle = offset + page->dma; 339 #ifdef DMAPOOL_DEBUG 340 { 341 int i; 342 u8 *data = retval; 343 /* page->offset is stored in first 4 bytes */ 344 for (i = sizeof(page->offset); i < pool->size; i++) { 345 if (data[i] == POOL_POISON_FREED) 346 continue; 347 dev_err(pool->dev, "%s %s, %p (corrupted)\n", 348 __func__, pool->name, retval); 349 350 /* 351 * Dump the first 4 bytes even if they are not 352 * POOL_POISON_FREED 353 */ 354 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, 355 data, pool->size, 1); 356 break; 357 } 358 } 359 if (!(mem_flags & __GFP_ZERO)) 360 memset(retval, POOL_POISON_ALLOCATED, pool->size); 361 #endif 362 spin_unlock_irqrestore(&pool->lock, flags); 363 364 if (want_init_on_alloc(mem_flags)) 365 memset(retval, 0, pool->size); 366 367 return retval; 368 } 369 EXPORT_SYMBOL(dma_pool_alloc); 370 371 static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) 372 { 373 struct dma_page *page; 374 375 list_for_each_entry(page, &pool->page_list, page_list) { 376 if (dma < page->dma) 377 continue; 378 if ((dma - page->dma) < pool->allocation) 379 return page; 380 } 381 return NULL; 382 } 383 384 /** 385 * dma_pool_free - put block back into dma pool 386 * @pool: the dma pool holding the block 387 * @vaddr: virtual address of block 388 * @dma: dma address of block 389 * 390 * Caller promises neither device nor driver will again touch this block 391 * unless it is first re-allocated. 392 */ 393 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) 394 { 395 struct dma_page *page; 396 unsigned long flags; 397 unsigned int offset; 398 399 spin_lock_irqsave(&pool->lock, flags); 400 page = pool_find_page(pool, dma); 401 if (!page) { 402 spin_unlock_irqrestore(&pool->lock, flags); 403 dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n", 404 __func__, pool->name, vaddr, &dma); 405 return; 406 } 407 408 offset = vaddr - page->vaddr; 409 if (want_init_on_free()) 410 memset(vaddr, 0, pool->size); 411 #ifdef DMAPOOL_DEBUG 412 if ((dma - page->dma) != offset) { 413 spin_unlock_irqrestore(&pool->lock, flags); 414 dev_err(pool->dev, "%s %s, %p (bad vaddr)/%pad\n", 415 __func__, pool->name, vaddr, &dma); 416 return; 417 } 418 { 419 unsigned int chain = page->offset; 420 while (chain < pool->allocation) { 421 if (chain != offset) { 422 chain = *(int *)(page->vaddr + chain); 423 continue; 424 } 425 spin_unlock_irqrestore(&pool->lock, flags); 426 dev_err(pool->dev, "%s %s, dma %pad already free\n", 427 __func__, pool->name, &dma); 428 return; 429 } 430 } 431 memset(vaddr, POOL_POISON_FREED, pool->size); 432 #endif 433 434 page->in_use--; 435 *(int *)vaddr = page->offset; 436 page->offset = offset; 437 /* 438 * Resist a temptation to do 439 * if (!is_page_busy(page)) pool_free_page(pool, page); 440 * Better have a few empty pages hang around. 441 */ 442 spin_unlock_irqrestore(&pool->lock, flags); 443 } 444 EXPORT_SYMBOL(dma_pool_free); 445 446 /* 447 * Managed DMA pool 448 */ 449 static void dmam_pool_release(struct device *dev, void *res) 450 { 451 struct dma_pool *pool = *(struct dma_pool **)res; 452 453 dma_pool_destroy(pool); 454 } 455 456 static int dmam_pool_match(struct device *dev, void *res, void *match_data) 457 { 458 return *(struct dma_pool **)res == match_data; 459 } 460 461 /** 462 * dmam_pool_create - Managed dma_pool_create() 463 * @name: name of pool, for diagnostics 464 * @dev: device that will be doing the DMA 465 * @size: size of the blocks in this pool. 466 * @align: alignment requirement for blocks; must be a power of two 467 * @allocation: returned blocks won't cross this boundary (or zero) 468 * 469 * Managed dma_pool_create(). DMA pool created with this function is 470 * automatically destroyed on driver detach. 471 * 472 * Return: a managed dma allocation pool with the requested 473 * characteristics, or %NULL if one can't be created. 474 */ 475 struct dma_pool *dmam_pool_create(const char *name, struct device *dev, 476 size_t size, size_t align, size_t allocation) 477 { 478 struct dma_pool **ptr, *pool; 479 480 ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL); 481 if (!ptr) 482 return NULL; 483 484 pool = *ptr = dma_pool_create(name, dev, size, align, allocation); 485 if (pool) 486 devres_add(dev, ptr); 487 else 488 devres_free(ptr); 489 490 return pool; 491 } 492 EXPORT_SYMBOL(dmam_pool_create); 493 494 /** 495 * dmam_pool_destroy - Managed dma_pool_destroy() 496 * @pool: dma pool that will be destroyed 497 * 498 * Managed dma_pool_destroy(). 499 */ 500 void dmam_pool_destroy(struct dma_pool *pool) 501 { 502 struct device *dev = pool->dev; 503 504 WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool)); 505 } 506 EXPORT_SYMBOL(dmam_pool_destroy); 507