1 /* 2 * Basic general purpose allocator for managing special purpose 3 * memory, for example, memory that is not managed by the regular 4 * kmalloc/kfree interface. Uses for this includes on-device special 5 * memory, uncached memory etc. 6 * 7 * It is safe to use the allocator in NMI handlers and other special 8 * unblockable contexts that could otherwise deadlock on locks. This 9 * is implemented by using atomic operations and retries on any 10 * conflicts. The disadvantage is that there may be livelocks in 11 * extreme cases. For better scalability, one allocator can be used 12 * for each CPU. 13 * 14 * The lockless operation only works if there is enough memory 15 * available. If new memory is added to the pool a lock has to be 16 * still taken. So any user relying on locklessness has to ensure 17 * that sufficient memory is preallocated. 18 * 19 * The basic atomic operation of this allocator is cmpxchg on long. 20 * On architectures that don't have NMI-safe cmpxchg implementation, 21 * the allocator can NOT be used in NMI handler. So code uses the 22 * allocator in NMI handler should depend on 23 * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG. 24 * 25 * Copyright 2005 (C) Jes Sorensen <[email protected]> 26 * 27 * This source code is licensed under the GNU General Public License, 28 * Version 2. See the file COPYING for more details. 29 */ 30 31 #include <linux/slab.h> 32 #include <linux/export.h> 33 #include <linux/bitmap.h> 34 #include <linux/rculist.h> 35 #include <linux/interrupt.h> 36 #include <linux/genalloc.h> 37 #include <linux/of_device.h> 38 #include <linux/vmalloc.h> 39 40 static inline size_t chunk_size(const struct gen_pool_chunk *chunk) 41 { 42 return chunk->end_addr - chunk->start_addr + 1; 43 } 44 45 static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set) 46 { 47 unsigned long val, nval; 48 49 nval = *addr; 50 do { 51 val = nval; 52 if (val & mask_to_set) 53 return -EBUSY; 54 cpu_relax(); 55 } while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val); 56 57 return 0; 58 } 59 60 static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear) 61 { 62 unsigned long val, nval; 63 64 nval = *addr; 65 do { 66 val = nval; 67 if ((val & mask_to_clear) != mask_to_clear) 68 return -EBUSY; 69 cpu_relax(); 70 } while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val); 71 72 return 0; 73 } 74 75 /* 76 * bitmap_set_ll - set the specified number of bits at the specified position 77 * @map: pointer to a bitmap 78 * @start: a bit position in @map 79 * @nr: number of bits to set 80 * 81 * Set @nr bits start from @start in @map lock-lessly. Several users 82 * can set/clear the same bitmap simultaneously without lock. If two 83 * users set the same bit, one user will return remain bits, otherwise 84 * return 0. 85 */ 86 static int bitmap_set_ll(unsigned long *map, int start, int nr) 87 { 88 unsigned long *p = map + BIT_WORD(start); 89 const int size = start + nr; 90 int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); 91 unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start); 92 93 while (nr - bits_to_set >= 0) { 94 if (set_bits_ll(p, mask_to_set)) 95 return nr; 96 nr -= bits_to_set; 97 bits_to_set = BITS_PER_LONG; 98 mask_to_set = ~0UL; 99 p++; 100 } 101 if (nr) { 102 mask_to_set &= BITMAP_LAST_WORD_MASK(size); 103 if (set_bits_ll(p, mask_to_set)) 104 return nr; 105 } 106 107 return 0; 108 } 109 110 /* 111 * bitmap_clear_ll - clear the specified number of bits at the specified position 112 * @map: pointer to a bitmap 113 * @start: a bit position in @map 114 * @nr: number of bits to set 115 * 116 * Clear @nr bits start from @start in @map lock-lessly. Several users 117 * can set/clear the same bitmap simultaneously without lock. If two 118 * users clear the same bit, one user will return remain bits, 119 * otherwise return 0. 120 */ 121 static int bitmap_clear_ll(unsigned long *map, int start, int nr) 122 { 123 unsigned long *p = map + BIT_WORD(start); 124 const int size = start + nr; 125 int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); 126 unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); 127 128 while (nr - bits_to_clear >= 0) { 129 if (clear_bits_ll(p, mask_to_clear)) 130 return nr; 131 nr -= bits_to_clear; 132 bits_to_clear = BITS_PER_LONG; 133 mask_to_clear = ~0UL; 134 p++; 135 } 136 if (nr) { 137 mask_to_clear &= BITMAP_LAST_WORD_MASK(size); 138 if (clear_bits_ll(p, mask_to_clear)) 139 return nr; 140 } 141 142 return 0; 143 } 144 145 /** 146 * gen_pool_create - create a new special memory pool 147 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents 148 * @nid: node id of the node the pool structure should be allocated on, or -1 149 * 150 * Create a new special memory pool that can be used to manage special purpose 151 * memory not managed by the regular kmalloc/kfree interface. 152 */ 153 struct gen_pool *gen_pool_create(int min_alloc_order, int nid) 154 { 155 struct gen_pool *pool; 156 157 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); 158 if (pool != NULL) { 159 spin_lock_init(&pool->lock); 160 INIT_LIST_HEAD(&pool->chunks); 161 pool->min_alloc_order = min_alloc_order; 162 pool->algo = gen_pool_first_fit; 163 pool->data = NULL; 164 pool->name = NULL; 165 } 166 return pool; 167 } 168 EXPORT_SYMBOL(gen_pool_create); 169 170 /** 171 * gen_pool_add_virt - add a new chunk of special memory to the pool 172 * @pool: pool to add new memory chunk to 173 * @virt: virtual starting address of memory chunk to add to pool 174 * @phys: physical starting address of memory chunk to add to pool 175 * @size: size in bytes of the memory chunk to add to pool 176 * @nid: node id of the node the chunk structure and bitmap should be 177 * allocated on, or -1 178 * 179 * Add a new chunk of special memory to the specified pool. 180 * 181 * Returns 0 on success or a -ve errno on failure. 182 */ 183 int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys, 184 size_t size, int nid) 185 { 186 struct gen_pool_chunk *chunk; 187 int nbits = size >> pool->min_alloc_order; 188 int nbytes = sizeof(struct gen_pool_chunk) + 189 BITS_TO_LONGS(nbits) * sizeof(long); 190 191 chunk = vzalloc_node(nbytes, nid); 192 if (unlikely(chunk == NULL)) 193 return -ENOMEM; 194 195 chunk->phys_addr = phys; 196 chunk->start_addr = virt; 197 chunk->end_addr = virt + size - 1; 198 atomic_long_set(&chunk->avail, size); 199 200 spin_lock(&pool->lock); 201 list_add_rcu(&chunk->next_chunk, &pool->chunks); 202 spin_unlock(&pool->lock); 203 204 return 0; 205 } 206 EXPORT_SYMBOL(gen_pool_add_virt); 207 208 /** 209 * gen_pool_virt_to_phys - return the physical address of memory 210 * @pool: pool to allocate from 211 * @addr: starting address of memory 212 * 213 * Returns the physical address on success, or -1 on error. 214 */ 215 phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr) 216 { 217 struct gen_pool_chunk *chunk; 218 phys_addr_t paddr = -1; 219 220 rcu_read_lock(); 221 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { 222 if (addr >= chunk->start_addr && addr <= chunk->end_addr) { 223 paddr = chunk->phys_addr + (addr - chunk->start_addr); 224 break; 225 } 226 } 227 rcu_read_unlock(); 228 229 return paddr; 230 } 231 EXPORT_SYMBOL(gen_pool_virt_to_phys); 232 233 /** 234 * gen_pool_destroy - destroy a special memory pool 235 * @pool: pool to destroy 236 * 237 * Destroy the specified special memory pool. Verifies that there are no 238 * outstanding allocations. 239 */ 240 void gen_pool_destroy(struct gen_pool *pool) 241 { 242 struct list_head *_chunk, *_next_chunk; 243 struct gen_pool_chunk *chunk; 244 int order = pool->min_alloc_order; 245 int bit, end_bit; 246 247 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { 248 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); 249 list_del(&chunk->next_chunk); 250 251 end_bit = chunk_size(chunk) >> order; 252 bit = find_next_bit(chunk->bits, end_bit, 0); 253 BUG_ON(bit < end_bit); 254 255 vfree(chunk); 256 } 257 kfree_const(pool->name); 258 kfree(pool); 259 } 260 EXPORT_SYMBOL(gen_pool_destroy); 261 262 /** 263 * gen_pool_alloc - allocate special memory from the pool 264 * @pool: pool to allocate from 265 * @size: number of bytes to allocate from the pool 266 * 267 * Allocate the requested number of bytes from the specified pool. 268 * Uses the pool allocation function (with first-fit algorithm by default). 269 * Can not be used in NMI handler on architectures without 270 * NMI-safe cmpxchg implementation. 271 */ 272 unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) 273 { 274 return gen_pool_alloc_algo(pool, size, pool->algo, pool->data); 275 } 276 EXPORT_SYMBOL(gen_pool_alloc); 277 278 /** 279 * gen_pool_alloc_algo - allocate special memory from the pool 280 * @pool: pool to allocate from 281 * @size: number of bytes to allocate from the pool 282 * @algo: algorithm passed from caller 283 * @data: data passed to algorithm 284 * 285 * Allocate the requested number of bytes from the specified pool. 286 * Uses the pool allocation function (with first-fit algorithm by default). 287 * Can not be used in NMI handler on architectures without 288 * NMI-safe cmpxchg implementation. 289 */ 290 unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size, 291 genpool_algo_t algo, void *data) 292 { 293 struct gen_pool_chunk *chunk; 294 unsigned long addr = 0; 295 int order = pool->min_alloc_order; 296 int nbits, start_bit, end_bit, remain; 297 298 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG 299 BUG_ON(in_nmi()); 300 #endif 301 302 if (size == 0) 303 return 0; 304 305 nbits = (size + (1UL << order) - 1) >> order; 306 rcu_read_lock(); 307 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { 308 if (size > atomic_long_read(&chunk->avail)) 309 continue; 310 311 start_bit = 0; 312 end_bit = chunk_size(chunk) >> order; 313 retry: 314 start_bit = algo(chunk->bits, end_bit, start_bit, 315 nbits, data, pool, chunk->start_addr); 316 if (start_bit >= end_bit) 317 continue; 318 remain = bitmap_set_ll(chunk->bits, start_bit, nbits); 319 if (remain) { 320 remain = bitmap_clear_ll(chunk->bits, start_bit, 321 nbits - remain); 322 BUG_ON(remain); 323 goto retry; 324 } 325 326 addr = chunk->start_addr + ((unsigned long)start_bit << order); 327 size = nbits << order; 328 atomic_long_sub(size, &chunk->avail); 329 break; 330 } 331 rcu_read_unlock(); 332 return addr; 333 } 334 EXPORT_SYMBOL(gen_pool_alloc_algo); 335 336 /** 337 * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage 338 * @pool: pool to allocate from 339 * @size: number of bytes to allocate from the pool 340 * @dma: dma-view physical address return value. Use %NULL if unneeded. 341 * 342 * Allocate the requested number of bytes from the specified pool. 343 * Uses the pool allocation function (with first-fit algorithm by default). 344 * Can not be used in NMI handler on architectures without 345 * NMI-safe cmpxchg implementation. 346 * 347 * Return: virtual address of the allocated memory, or %NULL on failure 348 */ 349 void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma) 350 { 351 return gen_pool_dma_alloc_algo(pool, size, dma, pool->algo, pool->data); 352 } 353 EXPORT_SYMBOL(gen_pool_dma_alloc); 354 355 /** 356 * gen_pool_dma_alloc_algo - allocate special memory from the pool for DMA 357 * usage with the given pool algorithm 358 * @pool: pool to allocate from 359 * @size: number of bytes to allocate from the pool 360 * @dma: DMA-view physical address return value. Use %NULL if unneeded. 361 * @algo: algorithm passed from caller 362 * @data: data passed to algorithm 363 * 364 * Allocate the requested number of bytes from the specified pool. Uses the 365 * given pool allocation function. Can not be used in NMI handler on 366 * architectures without NMI-safe cmpxchg implementation. 367 * 368 * Return: virtual address of the allocated memory, or %NULL on failure 369 */ 370 void *gen_pool_dma_alloc_algo(struct gen_pool *pool, size_t size, 371 dma_addr_t *dma, genpool_algo_t algo, void *data) 372 { 373 unsigned long vaddr; 374 375 if (!pool) 376 return NULL; 377 378 vaddr = gen_pool_alloc_algo(pool, size, algo, data); 379 if (!vaddr) 380 return NULL; 381 382 if (dma) 383 *dma = gen_pool_virt_to_phys(pool, vaddr); 384 385 return (void *)vaddr; 386 } 387 EXPORT_SYMBOL(gen_pool_dma_alloc_algo); 388 389 /** 390 * gen_pool_dma_alloc_align - allocate special memory from the pool for DMA 391 * usage with the given alignment 392 * @pool: pool to allocate from 393 * @size: number of bytes to allocate from the pool 394 * @dma: DMA-view physical address return value. Use %NULL if unneeded. 395 * @align: alignment in bytes for starting address 396 * 397 * Allocate the requested number bytes from the specified pool, with the given 398 * alignment restriction. Can not be used in NMI handler on architectures 399 * without NMI-safe cmpxchg implementation. 400 * 401 * Return: virtual address of the allocated memory, or %NULL on failure 402 */ 403 void *gen_pool_dma_alloc_align(struct gen_pool *pool, size_t size, 404 dma_addr_t *dma, int align) 405 { 406 struct genpool_data_align data = { .align = align }; 407 408 return gen_pool_dma_alloc_algo(pool, size, dma, 409 gen_pool_first_fit_align, &data); 410 } 411 EXPORT_SYMBOL(gen_pool_dma_alloc_align); 412 413 /** 414 * gen_pool_dma_zalloc - allocate special zeroed memory from the pool for 415 * DMA usage 416 * @pool: pool to allocate from 417 * @size: number of bytes to allocate from the pool 418 * @dma: dma-view physical address return value. Use %NULL if unneeded. 419 * 420 * Allocate the requested number of zeroed bytes from the specified pool. 421 * Uses the pool allocation function (with first-fit algorithm by default). 422 * Can not be used in NMI handler on architectures without 423 * NMI-safe cmpxchg implementation. 424 * 425 * Return: virtual address of the allocated zeroed memory, or %NULL on failure 426 */ 427 void *gen_pool_dma_zalloc(struct gen_pool *pool, size_t size, dma_addr_t *dma) 428 { 429 return gen_pool_dma_zalloc_algo(pool, size, dma, pool->algo, pool->data); 430 } 431 EXPORT_SYMBOL(gen_pool_dma_zalloc); 432 433 /** 434 * gen_pool_dma_zalloc_algo - allocate special zeroed memory from the pool for 435 * DMA usage with the given pool algorithm 436 * @pool: pool to allocate from 437 * @size: number of bytes to allocate from the pool 438 * @dma: DMA-view physical address return value. Use %NULL if unneeded. 439 * @algo: algorithm passed from caller 440 * @data: data passed to algorithm 441 * 442 * Allocate the requested number of zeroed bytes from the specified pool. Uses 443 * the given pool allocation function. Can not be used in NMI handler on 444 * architectures without NMI-safe cmpxchg implementation. 445 * 446 * Return: virtual address of the allocated zeroed memory, or %NULL on failure 447 */ 448 void *gen_pool_dma_zalloc_algo(struct gen_pool *pool, size_t size, 449 dma_addr_t *dma, genpool_algo_t algo, void *data) 450 { 451 void *vaddr = gen_pool_dma_alloc_algo(pool, size, dma, algo, data); 452 453 if (vaddr) 454 memset(vaddr, 0, size); 455 456 return vaddr; 457 } 458 EXPORT_SYMBOL(gen_pool_dma_zalloc_algo); 459 460 /** 461 * gen_pool_dma_zalloc_align - allocate special zeroed memory from the pool for 462 * DMA usage with the given alignment 463 * @pool: pool to allocate from 464 * @size: number of bytes to allocate from the pool 465 * @dma: DMA-view physical address return value. Use %NULL if unneeded. 466 * @align: alignment in bytes for starting address 467 * 468 * Allocate the requested number of zeroed bytes from the specified pool, 469 * with the given alignment restriction. Can not be used in NMI handler on 470 * architectures without NMI-safe cmpxchg implementation. 471 * 472 * Return: virtual address of the allocated zeroed memory, or %NULL on failure 473 */ 474 void *gen_pool_dma_zalloc_align(struct gen_pool *pool, size_t size, 475 dma_addr_t *dma, int align) 476 { 477 struct genpool_data_align data = { .align = align }; 478 479 return gen_pool_dma_zalloc_algo(pool, size, dma, 480 gen_pool_first_fit_align, &data); 481 } 482 EXPORT_SYMBOL(gen_pool_dma_zalloc_align); 483 484 /** 485 * gen_pool_free - free allocated special memory back to the pool 486 * @pool: pool to free to 487 * @addr: starting address of memory to free back to pool 488 * @size: size in bytes of memory to free 489 * 490 * Free previously allocated special memory back to the specified 491 * pool. Can not be used in NMI handler on architectures without 492 * NMI-safe cmpxchg implementation. 493 */ 494 void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) 495 { 496 struct gen_pool_chunk *chunk; 497 int order = pool->min_alloc_order; 498 int start_bit, nbits, remain; 499 500 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG 501 BUG_ON(in_nmi()); 502 #endif 503 504 nbits = (size + (1UL << order) - 1) >> order; 505 rcu_read_lock(); 506 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { 507 if (addr >= chunk->start_addr && addr <= chunk->end_addr) { 508 BUG_ON(addr + size - 1 > chunk->end_addr); 509 start_bit = (addr - chunk->start_addr) >> order; 510 remain = bitmap_clear_ll(chunk->bits, start_bit, nbits); 511 BUG_ON(remain); 512 size = nbits << order; 513 atomic_long_add(size, &chunk->avail); 514 rcu_read_unlock(); 515 return; 516 } 517 } 518 rcu_read_unlock(); 519 BUG(); 520 } 521 EXPORT_SYMBOL(gen_pool_free); 522 523 /** 524 * gen_pool_for_each_chunk - call func for every chunk of generic memory pool 525 * @pool: the generic memory pool 526 * @func: func to call 527 * @data: additional data used by @func 528 * 529 * Call @func for every chunk of generic memory pool. The @func is 530 * called with rcu_read_lock held. 531 */ 532 void gen_pool_for_each_chunk(struct gen_pool *pool, 533 void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data), 534 void *data) 535 { 536 struct gen_pool_chunk *chunk; 537 538 rcu_read_lock(); 539 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) 540 func(pool, chunk, data); 541 rcu_read_unlock(); 542 } 543 EXPORT_SYMBOL(gen_pool_for_each_chunk); 544 545 /** 546 * addr_in_gen_pool - checks if an address falls within the range of a pool 547 * @pool: the generic memory pool 548 * @start: start address 549 * @size: size of the region 550 * 551 * Check if the range of addresses falls within the specified pool. Returns 552 * true if the entire range is contained in the pool and false otherwise. 553 */ 554 bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start, 555 size_t size) 556 { 557 bool found = false; 558 unsigned long end = start + size - 1; 559 struct gen_pool_chunk *chunk; 560 561 rcu_read_lock(); 562 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) { 563 if (start >= chunk->start_addr && start <= chunk->end_addr) { 564 if (end <= chunk->end_addr) { 565 found = true; 566 break; 567 } 568 } 569 } 570 rcu_read_unlock(); 571 return found; 572 } 573 574 /** 575 * gen_pool_avail - get available free space of the pool 576 * @pool: pool to get available free space 577 * 578 * Return available free space of the specified pool. 579 */ 580 size_t gen_pool_avail(struct gen_pool *pool) 581 { 582 struct gen_pool_chunk *chunk; 583 size_t avail = 0; 584 585 rcu_read_lock(); 586 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) 587 avail += atomic_long_read(&chunk->avail); 588 rcu_read_unlock(); 589 return avail; 590 } 591 EXPORT_SYMBOL_GPL(gen_pool_avail); 592 593 /** 594 * gen_pool_size - get size in bytes of memory managed by the pool 595 * @pool: pool to get size 596 * 597 * Return size in bytes of memory managed by the pool. 598 */ 599 size_t gen_pool_size(struct gen_pool *pool) 600 { 601 struct gen_pool_chunk *chunk; 602 size_t size = 0; 603 604 rcu_read_lock(); 605 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) 606 size += chunk_size(chunk); 607 rcu_read_unlock(); 608 return size; 609 } 610 EXPORT_SYMBOL_GPL(gen_pool_size); 611 612 /** 613 * gen_pool_set_algo - set the allocation algorithm 614 * @pool: pool to change allocation algorithm 615 * @algo: custom algorithm function 616 * @data: additional data used by @algo 617 * 618 * Call @algo for each memory allocation in the pool. 619 * If @algo is NULL use gen_pool_first_fit as default 620 * memory allocation function. 621 */ 622 void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data) 623 { 624 rcu_read_lock(); 625 626 pool->algo = algo; 627 if (!pool->algo) 628 pool->algo = gen_pool_first_fit; 629 630 pool->data = data; 631 632 rcu_read_unlock(); 633 } 634 EXPORT_SYMBOL(gen_pool_set_algo); 635 636 /** 637 * gen_pool_first_fit - find the first available region 638 * of memory matching the size requirement (no alignment constraint) 639 * @map: The address to base the search on 640 * @size: The bitmap size in bits 641 * @start: The bitnumber to start searching at 642 * @nr: The number of zeroed bits we're looking for 643 * @data: additional data - unused 644 * @pool: pool to find the fit region memory from 645 */ 646 unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size, 647 unsigned long start, unsigned int nr, void *data, 648 struct gen_pool *pool, unsigned long start_addr) 649 { 650 return bitmap_find_next_zero_area(map, size, start, nr, 0); 651 } 652 EXPORT_SYMBOL(gen_pool_first_fit); 653 654 /** 655 * gen_pool_first_fit_align - find the first available region 656 * of memory matching the size requirement (alignment constraint) 657 * @map: The address to base the search on 658 * @size: The bitmap size in bits 659 * @start: The bitnumber to start searching at 660 * @nr: The number of zeroed bits we're looking for 661 * @data: data for alignment 662 * @pool: pool to get order from 663 */ 664 unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size, 665 unsigned long start, unsigned int nr, void *data, 666 struct gen_pool *pool, unsigned long start_addr) 667 { 668 struct genpool_data_align *alignment; 669 unsigned long align_mask, align_off; 670 int order; 671 672 alignment = data; 673 order = pool->min_alloc_order; 674 align_mask = ((alignment->align + (1UL << order) - 1) >> order) - 1; 675 align_off = (start_addr & (alignment->align - 1)) >> order; 676 677 return bitmap_find_next_zero_area_off(map, size, start, nr, 678 align_mask, align_off); 679 } 680 EXPORT_SYMBOL(gen_pool_first_fit_align); 681 682 /** 683 * gen_pool_fixed_alloc - reserve a specific region 684 * @map: The address to base the search on 685 * @size: The bitmap size in bits 686 * @start: The bitnumber to start searching at 687 * @nr: The number of zeroed bits we're looking for 688 * @data: data for alignment 689 * @pool: pool to get order from 690 */ 691 unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size, 692 unsigned long start, unsigned int nr, void *data, 693 struct gen_pool *pool, unsigned long start_addr) 694 { 695 struct genpool_data_fixed *fixed_data; 696 int order; 697 unsigned long offset_bit; 698 unsigned long start_bit; 699 700 fixed_data = data; 701 order = pool->min_alloc_order; 702 offset_bit = fixed_data->offset >> order; 703 if (WARN_ON(fixed_data->offset & ((1UL << order) - 1))) 704 return size; 705 706 start_bit = bitmap_find_next_zero_area(map, size, 707 start + offset_bit, nr, 0); 708 if (start_bit != offset_bit) 709 start_bit = size; 710 return start_bit; 711 } 712 EXPORT_SYMBOL(gen_pool_fixed_alloc); 713 714 /** 715 * gen_pool_first_fit_order_align - find the first available region 716 * of memory matching the size requirement. The region will be aligned 717 * to the order of the size specified. 718 * @map: The address to base the search on 719 * @size: The bitmap size in bits 720 * @start: The bitnumber to start searching at 721 * @nr: The number of zeroed bits we're looking for 722 * @data: additional data - unused 723 * @pool: pool to find the fit region memory from 724 */ 725 unsigned long gen_pool_first_fit_order_align(unsigned long *map, 726 unsigned long size, unsigned long start, 727 unsigned int nr, void *data, struct gen_pool *pool, 728 unsigned long start_addr) 729 { 730 unsigned long align_mask = roundup_pow_of_two(nr) - 1; 731 732 return bitmap_find_next_zero_area(map, size, start, nr, align_mask); 733 } 734 EXPORT_SYMBOL(gen_pool_first_fit_order_align); 735 736 /** 737 * gen_pool_best_fit - find the best fitting region of memory 738 * macthing the size requirement (no alignment constraint) 739 * @map: The address to base the search on 740 * @size: The bitmap size in bits 741 * @start: The bitnumber to start searching at 742 * @nr: The number of zeroed bits we're looking for 743 * @data: additional data - unused 744 * @pool: pool to find the fit region memory from 745 * 746 * Iterate over the bitmap to find the smallest free region 747 * which we can allocate the memory. 748 */ 749 unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, 750 unsigned long start, unsigned int nr, void *data, 751 struct gen_pool *pool, unsigned long start_addr) 752 { 753 unsigned long start_bit = size; 754 unsigned long len = size + 1; 755 unsigned long index; 756 757 index = bitmap_find_next_zero_area(map, size, start, nr, 0); 758 759 while (index < size) { 760 int next_bit = find_next_bit(map, size, index + nr); 761 if ((next_bit - index) < len) { 762 len = next_bit - index; 763 start_bit = index; 764 if (len == nr) 765 return start_bit; 766 } 767 index = bitmap_find_next_zero_area(map, size, 768 next_bit + 1, nr, 0); 769 } 770 771 return start_bit; 772 } 773 EXPORT_SYMBOL(gen_pool_best_fit); 774 775 static void devm_gen_pool_release(struct device *dev, void *res) 776 { 777 gen_pool_destroy(*(struct gen_pool **)res); 778 } 779 780 static int devm_gen_pool_match(struct device *dev, void *res, void *data) 781 { 782 struct gen_pool **p = res; 783 784 /* NULL data matches only a pool without an assigned name */ 785 if (!data && !(*p)->name) 786 return 1; 787 788 if (!data || !(*p)->name) 789 return 0; 790 791 return !strcmp((*p)->name, data); 792 } 793 794 /** 795 * gen_pool_get - Obtain the gen_pool (if any) for a device 796 * @dev: device to retrieve the gen_pool from 797 * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device 798 * 799 * Returns the gen_pool for the device if one is present, or NULL. 800 */ 801 struct gen_pool *gen_pool_get(struct device *dev, const char *name) 802 { 803 struct gen_pool **p; 804 805 p = devres_find(dev, devm_gen_pool_release, devm_gen_pool_match, 806 (void *)name); 807 if (!p) 808 return NULL; 809 return *p; 810 } 811 EXPORT_SYMBOL_GPL(gen_pool_get); 812 813 /** 814 * devm_gen_pool_create - managed gen_pool_create 815 * @dev: device that provides the gen_pool 816 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents 817 * @nid: node selector for allocated gen_pool, %NUMA_NO_NODE for all nodes 818 * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device 819 * 820 * Create a new special memory pool that can be used to manage special purpose 821 * memory not managed by the regular kmalloc/kfree interface. The pool will be 822 * automatically destroyed by the device management code. 823 */ 824 struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order, 825 int nid, const char *name) 826 { 827 struct gen_pool **ptr, *pool; 828 const char *pool_name = NULL; 829 830 /* Check that genpool to be created is uniquely addressed on device */ 831 if (gen_pool_get(dev, name)) 832 return ERR_PTR(-EINVAL); 833 834 if (name) { 835 pool_name = kstrdup_const(name, GFP_KERNEL); 836 if (!pool_name) 837 return ERR_PTR(-ENOMEM); 838 } 839 840 ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL); 841 if (!ptr) 842 goto free_pool_name; 843 844 pool = gen_pool_create(min_alloc_order, nid); 845 if (!pool) 846 goto free_devres; 847 848 *ptr = pool; 849 pool->name = pool_name; 850 devres_add(dev, ptr); 851 852 return pool; 853 854 free_devres: 855 devres_free(ptr); 856 free_pool_name: 857 kfree_const(pool_name); 858 859 return ERR_PTR(-ENOMEM); 860 } 861 EXPORT_SYMBOL(devm_gen_pool_create); 862 863 #ifdef CONFIG_OF 864 /** 865 * of_gen_pool_get - find a pool by phandle property 866 * @np: device node 867 * @propname: property name containing phandle(s) 868 * @index: index into the phandle array 869 * 870 * Returns the pool that contains the chunk starting at the physical 871 * address of the device tree node pointed at by the phandle property, 872 * or NULL if not found. 873 */ 874 struct gen_pool *of_gen_pool_get(struct device_node *np, 875 const char *propname, int index) 876 { 877 struct platform_device *pdev; 878 struct device_node *np_pool, *parent; 879 const char *name = NULL; 880 struct gen_pool *pool = NULL; 881 882 np_pool = of_parse_phandle(np, propname, index); 883 if (!np_pool) 884 return NULL; 885 886 pdev = of_find_device_by_node(np_pool); 887 if (!pdev) { 888 /* Check if named gen_pool is created by parent node device */ 889 parent = of_get_parent(np_pool); 890 pdev = of_find_device_by_node(parent); 891 of_node_put(parent); 892 893 of_property_read_string(np_pool, "label", &name); 894 if (!name) 895 name = np_pool->name; 896 } 897 if (pdev) 898 pool = gen_pool_get(&pdev->dev, name); 899 of_node_put(np_pool); 900 901 return pool; 902 } 903 EXPORT_SYMBOL_GPL(of_gen_pool_get); 904 #endif /* CONFIG_OF */ 905