1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * drivers/base/devres.c - device resource management 4 * 5 * Copyright (c) 2006 SUSE Linux Products GmbH 6 * Copyright (c) 2006 Tejun Heo <[email protected]> 7 */ 8 9 #include <linux/device.h> 10 #include <linux/module.h> 11 #include <linux/slab.h> 12 #include <linux/percpu.h> 13 14 #include <asm/sections.h> 15 16 #include "base.h" 17 18 struct devres_node { 19 struct list_head entry; 20 dr_release_t release; 21 #ifdef CONFIG_DEBUG_DEVRES 22 const char *name; 23 size_t size; 24 #endif 25 }; 26 27 struct devres { 28 struct devres_node node; 29 /* 30 * Some archs want to perform DMA into kmalloc caches 31 * and need a guaranteed alignment larger than 32 * the alignment of a 64-bit integer. 33 * Thus we use ARCH_KMALLOC_MINALIGN here and get exactly the same 34 * buffer alignment as if it was allocated by plain kmalloc(). 35 */ 36 u8 __aligned(ARCH_KMALLOC_MINALIGN) data[]; 37 }; 38 39 struct devres_group { 40 struct devres_node node[2]; 41 void *id; 42 int color; 43 /* -- 8 pointers */ 44 }; 45 46 #ifdef CONFIG_DEBUG_DEVRES 47 static int log_devres = 0; 48 module_param_named(log, log_devres, int, S_IRUGO | S_IWUSR); 49 50 static void set_node_dbginfo(struct devres_node *node, const char *name, 51 size_t size) 52 { 53 node->name = name; 54 node->size = size; 55 } 56 57 static void devres_log(struct device *dev, struct devres_node *node, 58 const char *op) 59 { 60 if (unlikely(log_devres)) 61 dev_err(dev, "DEVRES %3s %p %s (%zu bytes)\n", 62 op, node, node->name, node->size); 63 } 64 #else /* CONFIG_DEBUG_DEVRES */ 65 #define set_node_dbginfo(node, n, s) do {} while (0) 66 #define devres_log(dev, node, op) do {} while (0) 67 #endif /* CONFIG_DEBUG_DEVRES */ 68 69 /* 70 * Release functions for devres group. These callbacks are used only 71 * for identification. 72 */ 73 static void group_open_release(struct device *dev, void *res) 74 { 75 /* noop */ 76 } 77 78 static void group_close_release(struct device *dev, void *res) 79 { 80 /* noop */ 81 } 82 83 static struct devres_group * node_to_group(struct devres_node *node) 84 { 85 if (node->release == &group_open_release) 86 return container_of(node, struct devres_group, node[0]); 87 if (node->release == &group_close_release) 88 return container_of(node, struct devres_group, node[1]); 89 return NULL; 90 } 91 92 static bool check_dr_size(size_t size, size_t *tot_size) 93 { 94 /* We must catch any near-SIZE_MAX cases that could overflow. */ 95 if (unlikely(check_add_overflow(sizeof(struct devres), 96 size, tot_size))) 97 return false; 98 99 return true; 100 } 101 102 static __always_inline struct devres * alloc_dr(dr_release_t release, 103 size_t size, gfp_t gfp, int nid) 104 { 105 size_t tot_size; 106 struct devres *dr; 107 108 if (!check_dr_size(size, &tot_size)) 109 return NULL; 110 111 dr = kmalloc_node_track_caller(tot_size, gfp, nid); 112 if (unlikely(!dr)) 113 return NULL; 114 115 memset(dr, 0, offsetof(struct devres, data)); 116 117 INIT_LIST_HEAD(&dr->node.entry); 118 dr->node.release = release; 119 return dr; 120 } 121 122 static void add_dr(struct device *dev, struct devres_node *node) 123 { 124 devres_log(dev, node, "ADD"); 125 BUG_ON(!list_empty(&node->entry)); 126 list_add_tail(&node->entry, &dev->devres_head); 127 } 128 129 static void replace_dr(struct device *dev, 130 struct devres_node *old, struct devres_node *new) 131 { 132 devres_log(dev, old, "REPLACE"); 133 BUG_ON(!list_empty(&new->entry)); 134 list_replace(&old->entry, &new->entry); 135 } 136 137 #ifdef CONFIG_DEBUG_DEVRES 138 void * __devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid, 139 const char *name) 140 { 141 struct devres *dr; 142 143 dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid); 144 if (unlikely(!dr)) 145 return NULL; 146 set_node_dbginfo(&dr->node, name, size); 147 return dr->data; 148 } 149 EXPORT_SYMBOL_GPL(__devres_alloc_node); 150 #else 151 /** 152 * devres_alloc_node - Allocate device resource data 153 * @release: Release function devres will be associated with 154 * @size: Allocation size 155 * @gfp: Allocation flags 156 * @nid: NUMA node 157 * 158 * Allocate devres of @size bytes. The allocated area is zeroed, then 159 * associated with @release. The returned pointer can be passed to 160 * other devres_*() functions. 161 * 162 * RETURNS: 163 * Pointer to allocated devres on success, NULL on failure. 164 */ 165 void * devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid) 166 { 167 struct devres *dr; 168 169 dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid); 170 if (unlikely(!dr)) 171 return NULL; 172 return dr->data; 173 } 174 EXPORT_SYMBOL_GPL(devres_alloc_node); 175 #endif 176 177 /** 178 * devres_for_each_res - Resource iterator 179 * @dev: Device to iterate resource from 180 * @release: Look for resources associated with this release function 181 * @match: Match function (optional) 182 * @match_data: Data for the match function 183 * @fn: Function to be called for each matched resource. 184 * @data: Data for @fn, the 3rd parameter of @fn 185 * 186 * Call @fn for each devres of @dev which is associated with @release 187 * and for which @match returns 1. 188 * 189 * RETURNS: 190 * void 191 */ 192 void devres_for_each_res(struct device *dev, dr_release_t release, 193 dr_match_t match, void *match_data, 194 void (*fn)(struct device *, void *, void *), 195 void *data) 196 { 197 struct devres_node *node; 198 struct devres_node *tmp; 199 unsigned long flags; 200 201 if (!fn) 202 return; 203 204 spin_lock_irqsave(&dev->devres_lock, flags); 205 list_for_each_entry_safe_reverse(node, tmp, 206 &dev->devres_head, entry) { 207 struct devres *dr = container_of(node, struct devres, node); 208 209 if (node->release != release) 210 continue; 211 if (match && !match(dev, dr->data, match_data)) 212 continue; 213 fn(dev, dr->data, data); 214 } 215 spin_unlock_irqrestore(&dev->devres_lock, flags); 216 } 217 EXPORT_SYMBOL_GPL(devres_for_each_res); 218 219 /** 220 * devres_free - Free device resource data 221 * @res: Pointer to devres data to free 222 * 223 * Free devres created with devres_alloc(). 224 */ 225 void devres_free(void *res) 226 { 227 if (res) { 228 struct devres *dr = container_of(res, struct devres, data); 229 230 BUG_ON(!list_empty(&dr->node.entry)); 231 kfree(dr); 232 } 233 } 234 EXPORT_SYMBOL_GPL(devres_free); 235 236 /** 237 * devres_add - Register device resource 238 * @dev: Device to add resource to 239 * @res: Resource to register 240 * 241 * Register devres @res to @dev. @res should have been allocated 242 * using devres_alloc(). On driver detach, the associated release 243 * function will be invoked and devres will be freed automatically. 244 */ 245 void devres_add(struct device *dev, void *res) 246 { 247 struct devres *dr = container_of(res, struct devres, data); 248 unsigned long flags; 249 250 spin_lock_irqsave(&dev->devres_lock, flags); 251 add_dr(dev, &dr->node); 252 spin_unlock_irqrestore(&dev->devres_lock, flags); 253 } 254 EXPORT_SYMBOL_GPL(devres_add); 255 256 static struct devres *find_dr(struct device *dev, dr_release_t release, 257 dr_match_t match, void *match_data) 258 { 259 struct devres_node *node; 260 261 list_for_each_entry_reverse(node, &dev->devres_head, entry) { 262 struct devres *dr = container_of(node, struct devres, node); 263 264 if (node->release != release) 265 continue; 266 if (match && !match(dev, dr->data, match_data)) 267 continue; 268 return dr; 269 } 270 271 return NULL; 272 } 273 274 /** 275 * devres_find - Find device resource 276 * @dev: Device to lookup resource from 277 * @release: Look for resources associated with this release function 278 * @match: Match function (optional) 279 * @match_data: Data for the match function 280 * 281 * Find the latest devres of @dev which is associated with @release 282 * and for which @match returns 1. If @match is NULL, it's considered 283 * to match all. 284 * 285 * RETURNS: 286 * Pointer to found devres, NULL if not found. 287 */ 288 void * devres_find(struct device *dev, dr_release_t release, 289 dr_match_t match, void *match_data) 290 { 291 struct devres *dr; 292 unsigned long flags; 293 294 spin_lock_irqsave(&dev->devres_lock, flags); 295 dr = find_dr(dev, release, match, match_data); 296 spin_unlock_irqrestore(&dev->devres_lock, flags); 297 298 if (dr) 299 return dr->data; 300 return NULL; 301 } 302 EXPORT_SYMBOL_GPL(devres_find); 303 304 /** 305 * devres_get - Find devres, if non-existent, add one atomically 306 * @dev: Device to lookup or add devres for 307 * @new_res: Pointer to new initialized devres to add if not found 308 * @match: Match function (optional) 309 * @match_data: Data for the match function 310 * 311 * Find the latest devres of @dev which has the same release function 312 * as @new_res and for which @match return 1. If found, @new_res is 313 * freed; otherwise, @new_res is added atomically. 314 * 315 * RETURNS: 316 * Pointer to found or added devres. 317 */ 318 void * devres_get(struct device *dev, void *new_res, 319 dr_match_t match, void *match_data) 320 { 321 struct devres *new_dr = container_of(new_res, struct devres, data); 322 struct devres *dr; 323 unsigned long flags; 324 325 spin_lock_irqsave(&dev->devres_lock, flags); 326 dr = find_dr(dev, new_dr->node.release, match, match_data); 327 if (!dr) { 328 add_dr(dev, &new_dr->node); 329 dr = new_dr; 330 new_res = NULL; 331 } 332 spin_unlock_irqrestore(&dev->devres_lock, flags); 333 devres_free(new_res); 334 335 return dr->data; 336 } 337 EXPORT_SYMBOL_GPL(devres_get); 338 339 /** 340 * devres_remove - Find a device resource and remove it 341 * @dev: Device to find resource from 342 * @release: Look for resources associated with this release function 343 * @match: Match function (optional) 344 * @match_data: Data for the match function 345 * 346 * Find the latest devres of @dev associated with @release and for 347 * which @match returns 1. If @match is NULL, it's considered to 348 * match all. If found, the resource is removed atomically and 349 * returned. 350 * 351 * RETURNS: 352 * Pointer to removed devres on success, NULL if not found. 353 */ 354 void * devres_remove(struct device *dev, dr_release_t release, 355 dr_match_t match, void *match_data) 356 { 357 struct devres *dr; 358 unsigned long flags; 359 360 spin_lock_irqsave(&dev->devres_lock, flags); 361 dr = find_dr(dev, release, match, match_data); 362 if (dr) { 363 list_del_init(&dr->node.entry); 364 devres_log(dev, &dr->node, "REM"); 365 } 366 spin_unlock_irqrestore(&dev->devres_lock, flags); 367 368 if (dr) 369 return dr->data; 370 return NULL; 371 } 372 EXPORT_SYMBOL_GPL(devres_remove); 373 374 /** 375 * devres_destroy - Find a device resource and destroy it 376 * @dev: Device to find resource from 377 * @release: Look for resources associated with this release function 378 * @match: Match function (optional) 379 * @match_data: Data for the match function 380 * 381 * Find the latest devres of @dev associated with @release and for 382 * which @match returns 1. If @match is NULL, it's considered to 383 * match all. If found, the resource is removed atomically and freed. 384 * 385 * Note that the release function for the resource will not be called, 386 * only the devres-allocated data will be freed. The caller becomes 387 * responsible for freeing any other data. 388 * 389 * RETURNS: 390 * 0 if devres is found and freed, -ENOENT if not found. 391 */ 392 int devres_destroy(struct device *dev, dr_release_t release, 393 dr_match_t match, void *match_data) 394 { 395 void *res; 396 397 res = devres_remove(dev, release, match, match_data); 398 if (unlikely(!res)) 399 return -ENOENT; 400 401 devres_free(res); 402 return 0; 403 } 404 EXPORT_SYMBOL_GPL(devres_destroy); 405 406 407 /** 408 * devres_release - Find a device resource and destroy it, calling release 409 * @dev: Device to find resource from 410 * @release: Look for resources associated with this release function 411 * @match: Match function (optional) 412 * @match_data: Data for the match function 413 * 414 * Find the latest devres of @dev associated with @release and for 415 * which @match returns 1. If @match is NULL, it's considered to 416 * match all. If found, the resource is removed atomically, the 417 * release function called and the resource freed. 418 * 419 * RETURNS: 420 * 0 if devres is found and freed, -ENOENT if not found. 421 */ 422 int devres_release(struct device *dev, dr_release_t release, 423 dr_match_t match, void *match_data) 424 { 425 void *res; 426 427 res = devres_remove(dev, release, match, match_data); 428 if (unlikely(!res)) 429 return -ENOENT; 430 431 (*release)(dev, res); 432 devres_free(res); 433 return 0; 434 } 435 EXPORT_SYMBOL_GPL(devres_release); 436 437 static int remove_nodes(struct device *dev, 438 struct list_head *first, struct list_head *end, 439 struct list_head *todo) 440 { 441 struct devres_node *node, *n; 442 int cnt = 0, nr_groups = 0; 443 444 /* First pass - move normal devres entries to @todo and clear 445 * devres_group colors. 446 */ 447 node = list_entry(first, struct devres_node, entry); 448 list_for_each_entry_safe_from(node, n, end, entry) { 449 struct devres_group *grp; 450 451 grp = node_to_group(node); 452 if (grp) { 453 /* clear color of group markers in the first pass */ 454 grp->color = 0; 455 nr_groups++; 456 } else { 457 /* regular devres entry */ 458 if (&node->entry == first) 459 first = first->next; 460 list_move_tail(&node->entry, todo); 461 cnt++; 462 } 463 } 464 465 if (!nr_groups) 466 return cnt; 467 468 /* Second pass - Scan groups and color them. A group gets 469 * color value of two iff the group is wholly contained in 470 * [current node, end). That is, for a closed group, both opening 471 * and closing markers should be in the range, while just the 472 * opening marker is enough for an open group. 473 */ 474 node = list_entry(first, struct devres_node, entry); 475 list_for_each_entry_safe_from(node, n, end, entry) { 476 struct devres_group *grp; 477 478 grp = node_to_group(node); 479 BUG_ON(!grp || list_empty(&grp->node[0].entry)); 480 481 grp->color++; 482 if (list_empty(&grp->node[1].entry)) 483 grp->color++; 484 485 BUG_ON(grp->color <= 0 || grp->color > 2); 486 if (grp->color == 2) { 487 /* No need to update current node or end. The removed 488 * nodes are always before both. 489 */ 490 list_move_tail(&grp->node[0].entry, todo); 491 list_del_init(&grp->node[1].entry); 492 } 493 } 494 495 return cnt; 496 } 497 498 static void release_nodes(struct device *dev, struct list_head *todo) 499 { 500 struct devres *dr, *tmp; 501 502 /* Release. Note that both devres and devres_group are 503 * handled as devres in the following loop. This is safe. 504 */ 505 list_for_each_entry_safe_reverse(dr, tmp, todo, node.entry) { 506 devres_log(dev, &dr->node, "REL"); 507 dr->node.release(dev, dr->data); 508 kfree(dr); 509 } 510 } 511 512 /** 513 * devres_release_all - Release all managed resources 514 * @dev: Device to release resources for 515 * 516 * Release all resources associated with @dev. This function is 517 * called on driver detach. 518 */ 519 int devres_release_all(struct device *dev) 520 { 521 unsigned long flags; 522 LIST_HEAD(todo); 523 int cnt; 524 525 /* Looks like an uninitialized device structure */ 526 if (WARN_ON(dev->devres_head.next == NULL)) 527 return -ENODEV; 528 529 spin_lock_irqsave(&dev->devres_lock, flags); 530 cnt = remove_nodes(dev, dev->devres_head.next, &dev->devres_head, &todo); 531 spin_unlock_irqrestore(&dev->devres_lock, flags); 532 533 release_nodes(dev, &todo); 534 return cnt; 535 } 536 537 /** 538 * devres_open_group - Open a new devres group 539 * @dev: Device to open devres group for 540 * @id: Separator ID 541 * @gfp: Allocation flags 542 * 543 * Open a new devres group for @dev with @id. For @id, using a 544 * pointer to an object which won't be used for another group is 545 * recommended. If @id is NULL, address-wise unique ID is created. 546 * 547 * RETURNS: 548 * ID of the new group, NULL on failure. 549 */ 550 void * devres_open_group(struct device *dev, void *id, gfp_t gfp) 551 { 552 struct devres_group *grp; 553 unsigned long flags; 554 555 grp = kmalloc(sizeof(*grp), gfp); 556 if (unlikely(!grp)) 557 return NULL; 558 559 grp->node[0].release = &group_open_release; 560 grp->node[1].release = &group_close_release; 561 INIT_LIST_HEAD(&grp->node[0].entry); 562 INIT_LIST_HEAD(&grp->node[1].entry); 563 set_node_dbginfo(&grp->node[0], "grp<", 0); 564 set_node_dbginfo(&grp->node[1], "grp>", 0); 565 grp->id = grp; 566 if (id) 567 grp->id = id; 568 569 spin_lock_irqsave(&dev->devres_lock, flags); 570 add_dr(dev, &grp->node[0]); 571 spin_unlock_irqrestore(&dev->devres_lock, flags); 572 return grp->id; 573 } 574 EXPORT_SYMBOL_GPL(devres_open_group); 575 576 /* Find devres group with ID @id. If @id is NULL, look for the latest. */ 577 static struct devres_group * find_group(struct device *dev, void *id) 578 { 579 struct devres_node *node; 580 581 list_for_each_entry_reverse(node, &dev->devres_head, entry) { 582 struct devres_group *grp; 583 584 if (node->release != &group_open_release) 585 continue; 586 587 grp = container_of(node, struct devres_group, node[0]); 588 589 if (id) { 590 if (grp->id == id) 591 return grp; 592 } else if (list_empty(&grp->node[1].entry)) 593 return grp; 594 } 595 596 return NULL; 597 } 598 599 /** 600 * devres_close_group - Close a devres group 601 * @dev: Device to close devres group for 602 * @id: ID of target group, can be NULL 603 * 604 * Close the group identified by @id. If @id is NULL, the latest open 605 * group is selected. 606 */ 607 void devres_close_group(struct device *dev, void *id) 608 { 609 struct devres_group *grp; 610 unsigned long flags; 611 612 spin_lock_irqsave(&dev->devres_lock, flags); 613 614 grp = find_group(dev, id); 615 if (grp) 616 add_dr(dev, &grp->node[1]); 617 else 618 WARN_ON(1); 619 620 spin_unlock_irqrestore(&dev->devres_lock, flags); 621 } 622 EXPORT_SYMBOL_GPL(devres_close_group); 623 624 /** 625 * devres_remove_group - Remove a devres group 626 * @dev: Device to remove group for 627 * @id: ID of target group, can be NULL 628 * 629 * Remove the group identified by @id. If @id is NULL, the latest 630 * open group is selected. Note that removing a group doesn't affect 631 * any other resources. 632 */ 633 void devres_remove_group(struct device *dev, void *id) 634 { 635 struct devres_group *grp; 636 unsigned long flags; 637 638 spin_lock_irqsave(&dev->devres_lock, flags); 639 640 grp = find_group(dev, id); 641 if (grp) { 642 list_del_init(&grp->node[0].entry); 643 list_del_init(&grp->node[1].entry); 644 devres_log(dev, &grp->node[0], "REM"); 645 } else 646 WARN_ON(1); 647 648 spin_unlock_irqrestore(&dev->devres_lock, flags); 649 650 kfree(grp); 651 } 652 EXPORT_SYMBOL_GPL(devres_remove_group); 653 654 /** 655 * devres_release_group - Release resources in a devres group 656 * @dev: Device to release group for 657 * @id: ID of target group, can be NULL 658 * 659 * Release all resources in the group identified by @id. If @id is 660 * NULL, the latest open group is selected. The selected group and 661 * groups properly nested inside the selected group are removed. 662 * 663 * RETURNS: 664 * The number of released non-group resources. 665 */ 666 int devres_release_group(struct device *dev, void *id) 667 { 668 struct devres_group *grp; 669 unsigned long flags; 670 LIST_HEAD(todo); 671 int cnt = 0; 672 673 spin_lock_irqsave(&dev->devres_lock, flags); 674 675 grp = find_group(dev, id); 676 if (grp) { 677 struct list_head *first = &grp->node[0].entry; 678 struct list_head *end = &dev->devres_head; 679 680 if (!list_empty(&grp->node[1].entry)) 681 end = grp->node[1].entry.next; 682 683 cnt = remove_nodes(dev, first, end, &todo); 684 spin_unlock_irqrestore(&dev->devres_lock, flags); 685 686 release_nodes(dev, &todo); 687 } else { 688 WARN_ON(1); 689 spin_unlock_irqrestore(&dev->devres_lock, flags); 690 } 691 692 return cnt; 693 } 694 EXPORT_SYMBOL_GPL(devres_release_group); 695 696 /* 697 * Custom devres actions allow inserting a simple function call 698 * into the teadown sequence. 699 */ 700 701 struct action_devres { 702 void *data; 703 void (*action)(void *); 704 }; 705 706 static int devm_action_match(struct device *dev, void *res, void *p) 707 { 708 struct action_devres *devres = res; 709 struct action_devres *target = p; 710 711 return devres->action == target->action && 712 devres->data == target->data; 713 } 714 715 static void devm_action_release(struct device *dev, void *res) 716 { 717 struct action_devres *devres = res; 718 719 devres->action(devres->data); 720 } 721 722 /** 723 * devm_add_action() - add a custom action to list of managed resources 724 * @dev: Device that owns the action 725 * @action: Function that should be called 726 * @data: Pointer to data passed to @action implementation 727 * 728 * This adds a custom action to the list of managed resources so that 729 * it gets executed as part of standard resource unwinding. 730 */ 731 int devm_add_action(struct device *dev, void (*action)(void *), void *data) 732 { 733 struct action_devres *devres; 734 735 devres = devres_alloc(devm_action_release, 736 sizeof(struct action_devres), GFP_KERNEL); 737 if (!devres) 738 return -ENOMEM; 739 740 devres->data = data; 741 devres->action = action; 742 743 devres_add(dev, devres); 744 return 0; 745 } 746 EXPORT_SYMBOL_GPL(devm_add_action); 747 748 /** 749 * devm_remove_action() - removes previously added custom action 750 * @dev: Device that owns the action 751 * @action: Function implementing the action 752 * @data: Pointer to data passed to @action implementation 753 * 754 * Removes instance of @action previously added by devm_add_action(). 755 * Both action and data should match one of the existing entries. 756 */ 757 void devm_remove_action(struct device *dev, void (*action)(void *), void *data) 758 { 759 struct action_devres devres = { 760 .data = data, 761 .action = action, 762 }; 763 764 WARN_ON(devres_destroy(dev, devm_action_release, devm_action_match, 765 &devres)); 766 } 767 EXPORT_SYMBOL_GPL(devm_remove_action); 768 769 /** 770 * devm_release_action() - release previously added custom action 771 * @dev: Device that owns the action 772 * @action: Function implementing the action 773 * @data: Pointer to data passed to @action implementation 774 * 775 * Releases and removes instance of @action previously added by 776 * devm_add_action(). Both action and data should match one of the 777 * existing entries. 778 */ 779 void devm_release_action(struct device *dev, void (*action)(void *), void *data) 780 { 781 struct action_devres devres = { 782 .data = data, 783 .action = action, 784 }; 785 786 WARN_ON(devres_release(dev, devm_action_release, devm_action_match, 787 &devres)); 788 789 } 790 EXPORT_SYMBOL_GPL(devm_release_action); 791 792 /* 793 * Managed kmalloc/kfree 794 */ 795 static void devm_kmalloc_release(struct device *dev, void *res) 796 { 797 /* noop */ 798 } 799 800 static int devm_kmalloc_match(struct device *dev, void *res, void *data) 801 { 802 return res == data; 803 } 804 805 /** 806 * devm_kmalloc - Resource-managed kmalloc 807 * @dev: Device to allocate memory for 808 * @size: Allocation size 809 * @gfp: Allocation gfp flags 810 * 811 * Managed kmalloc. Memory allocated with this function is 812 * automatically freed on driver detach. Like all other devres 813 * resources, guaranteed alignment is unsigned long long. 814 * 815 * RETURNS: 816 * Pointer to allocated memory on success, NULL on failure. 817 */ 818 void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) 819 { 820 struct devres *dr; 821 822 if (unlikely(!size)) 823 return ZERO_SIZE_PTR; 824 825 /* use raw alloc_dr for kmalloc caller tracing */ 826 dr = alloc_dr(devm_kmalloc_release, size, gfp, dev_to_node(dev)); 827 if (unlikely(!dr)) 828 return NULL; 829 830 /* 831 * This is named devm_kzalloc_release for historical reasons 832 * The initial implementation did not support kmalloc, only kzalloc 833 */ 834 set_node_dbginfo(&dr->node, "devm_kzalloc_release", size); 835 devres_add(dev, dr->data); 836 return dr->data; 837 } 838 EXPORT_SYMBOL_GPL(devm_kmalloc); 839 840 /** 841 * devm_krealloc - Resource-managed krealloc() 842 * @dev: Device to re-allocate memory for 843 * @ptr: Pointer to the memory chunk to re-allocate 844 * @new_size: New allocation size 845 * @gfp: Allocation gfp flags 846 * 847 * Managed krealloc(). Resizes the memory chunk allocated with devm_kmalloc(). 848 * Behaves similarly to regular krealloc(): if @ptr is NULL or ZERO_SIZE_PTR, 849 * it's the equivalent of devm_kmalloc(). If new_size is zero, it frees the 850 * previously allocated memory and returns ZERO_SIZE_PTR. This function doesn't 851 * change the order in which the release callback for the re-alloc'ed devres 852 * will be called (except when falling back to devm_kmalloc() or when freeing 853 * resources when new_size is zero). The contents of the memory are preserved 854 * up to the lesser of new and old sizes. 855 */ 856 void *devm_krealloc(struct device *dev, void *ptr, size_t new_size, gfp_t gfp) 857 { 858 size_t total_new_size, total_old_size; 859 struct devres *old_dr, *new_dr; 860 unsigned long flags; 861 862 if (unlikely(!new_size)) { 863 devm_kfree(dev, ptr); 864 return ZERO_SIZE_PTR; 865 } 866 867 if (unlikely(ZERO_OR_NULL_PTR(ptr))) 868 return devm_kmalloc(dev, new_size, gfp); 869 870 if (WARN_ON(is_kernel_rodata((unsigned long)ptr))) 871 /* 872 * We cannot reliably realloc a const string returned by 873 * devm_kstrdup_const(). 874 */ 875 return NULL; 876 877 if (!check_dr_size(new_size, &total_new_size)) 878 return NULL; 879 880 total_old_size = ksize(container_of(ptr, struct devres, data)); 881 if (total_old_size == 0) { 882 WARN(1, "Pointer doesn't point to dynamically allocated memory."); 883 return NULL; 884 } 885 886 /* 887 * If new size is smaller or equal to the actual number of bytes 888 * allocated previously - just return the same pointer. 889 */ 890 if (total_new_size <= total_old_size) 891 return ptr; 892 893 /* 894 * Otherwise: allocate new, larger chunk. We need to allocate before 895 * taking the lock as most probably the caller uses GFP_KERNEL. 896 */ 897 new_dr = alloc_dr(devm_kmalloc_release, 898 total_new_size, gfp, dev_to_node(dev)); 899 if (!new_dr) 900 return NULL; 901 902 /* 903 * The spinlock protects the linked list against concurrent 904 * modifications but not the resource itself. 905 */ 906 spin_lock_irqsave(&dev->devres_lock, flags); 907 908 old_dr = find_dr(dev, devm_kmalloc_release, devm_kmalloc_match, ptr); 909 if (!old_dr) { 910 spin_unlock_irqrestore(&dev->devres_lock, flags); 911 kfree(new_dr); 912 WARN(1, "Memory chunk not managed or managed by a different device."); 913 return NULL; 914 } 915 916 replace_dr(dev, &old_dr->node, &new_dr->node); 917 918 spin_unlock_irqrestore(&dev->devres_lock, flags); 919 920 /* 921 * We can copy the memory contents after releasing the lock as we're 922 * no longer modyfing the list links. 923 */ 924 memcpy(new_dr->data, old_dr->data, 925 total_old_size - offsetof(struct devres, data)); 926 /* 927 * Same for releasing the old devres - it's now been removed from the 928 * list. This is also the reason why we must not use devm_kfree() - the 929 * links are no longer valid. 930 */ 931 kfree(old_dr); 932 933 return new_dr->data; 934 } 935 EXPORT_SYMBOL_GPL(devm_krealloc); 936 937 /** 938 * devm_kstrdup - Allocate resource managed space and 939 * copy an existing string into that. 940 * @dev: Device to allocate memory for 941 * @s: the string to duplicate 942 * @gfp: the GFP mask used in the devm_kmalloc() call when 943 * allocating memory 944 * RETURNS: 945 * Pointer to allocated string on success, NULL on failure. 946 */ 947 char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) 948 { 949 size_t size; 950 char *buf; 951 952 if (!s) 953 return NULL; 954 955 size = strlen(s) + 1; 956 buf = devm_kmalloc(dev, size, gfp); 957 if (buf) 958 memcpy(buf, s, size); 959 return buf; 960 } 961 EXPORT_SYMBOL_GPL(devm_kstrdup); 962 963 /** 964 * devm_kstrdup_const - resource managed conditional string duplication 965 * @dev: device for which to duplicate the string 966 * @s: the string to duplicate 967 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 968 * 969 * Strings allocated by devm_kstrdup_const will be automatically freed when 970 * the associated device is detached. 971 * 972 * RETURNS: 973 * Source string if it is in .rodata section otherwise it falls back to 974 * devm_kstrdup. 975 */ 976 const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp) 977 { 978 if (is_kernel_rodata((unsigned long)s)) 979 return s; 980 981 return devm_kstrdup(dev, s, gfp); 982 } 983 EXPORT_SYMBOL_GPL(devm_kstrdup_const); 984 985 /** 986 * devm_kvasprintf - Allocate resource managed space and format a string 987 * into that. 988 * @dev: Device to allocate memory for 989 * @gfp: the GFP mask used in the devm_kmalloc() call when 990 * allocating memory 991 * @fmt: The printf()-style format string 992 * @ap: Arguments for the format string 993 * RETURNS: 994 * Pointer to allocated string on success, NULL on failure. 995 */ 996 char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, 997 va_list ap) 998 { 999 unsigned int len; 1000 char *p; 1001 va_list aq; 1002 1003 va_copy(aq, ap); 1004 len = vsnprintf(NULL, 0, fmt, aq); 1005 va_end(aq); 1006 1007 p = devm_kmalloc(dev, len+1, gfp); 1008 if (!p) 1009 return NULL; 1010 1011 vsnprintf(p, len+1, fmt, ap); 1012 1013 return p; 1014 } 1015 EXPORT_SYMBOL(devm_kvasprintf); 1016 1017 /** 1018 * devm_kasprintf - Allocate resource managed space and format a string 1019 * into that. 1020 * @dev: Device to allocate memory for 1021 * @gfp: the GFP mask used in the devm_kmalloc() call when 1022 * allocating memory 1023 * @fmt: The printf()-style format string 1024 * @...: Arguments for the format string 1025 * RETURNS: 1026 * Pointer to allocated string on success, NULL on failure. 1027 */ 1028 char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) 1029 { 1030 va_list ap; 1031 char *p; 1032 1033 va_start(ap, fmt); 1034 p = devm_kvasprintf(dev, gfp, fmt, ap); 1035 va_end(ap); 1036 1037 return p; 1038 } 1039 EXPORT_SYMBOL_GPL(devm_kasprintf); 1040 1041 /** 1042 * devm_kfree - Resource-managed kfree 1043 * @dev: Device this memory belongs to 1044 * @p: Memory to free 1045 * 1046 * Free memory allocated with devm_kmalloc(). 1047 */ 1048 void devm_kfree(struct device *dev, const void *p) 1049 { 1050 int rc; 1051 1052 /* 1053 * Special cases: pointer to a string in .rodata returned by 1054 * devm_kstrdup_const() or NULL/ZERO ptr. 1055 */ 1056 if (unlikely(is_kernel_rodata((unsigned long)p) || ZERO_OR_NULL_PTR(p))) 1057 return; 1058 1059 rc = devres_destroy(dev, devm_kmalloc_release, 1060 devm_kmalloc_match, (void *)p); 1061 WARN_ON(rc); 1062 } 1063 EXPORT_SYMBOL_GPL(devm_kfree); 1064 1065 /** 1066 * devm_kmemdup - Resource-managed kmemdup 1067 * @dev: Device this memory belongs to 1068 * @src: Memory region to duplicate 1069 * @len: Memory region length 1070 * @gfp: GFP mask to use 1071 * 1072 * Duplicate region of a memory using resource managed kmalloc 1073 */ 1074 void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp) 1075 { 1076 void *p; 1077 1078 p = devm_kmalloc(dev, len, gfp); 1079 if (p) 1080 memcpy(p, src, len); 1081 1082 return p; 1083 } 1084 EXPORT_SYMBOL_GPL(devm_kmemdup); 1085 1086 struct pages_devres { 1087 unsigned long addr; 1088 unsigned int order; 1089 }; 1090 1091 static int devm_pages_match(struct device *dev, void *res, void *p) 1092 { 1093 struct pages_devres *devres = res; 1094 struct pages_devres *target = p; 1095 1096 return devres->addr == target->addr; 1097 } 1098 1099 static void devm_pages_release(struct device *dev, void *res) 1100 { 1101 struct pages_devres *devres = res; 1102 1103 free_pages(devres->addr, devres->order); 1104 } 1105 1106 /** 1107 * devm_get_free_pages - Resource-managed __get_free_pages 1108 * @dev: Device to allocate memory for 1109 * @gfp_mask: Allocation gfp flags 1110 * @order: Allocation size is (1 << order) pages 1111 * 1112 * Managed get_free_pages. Memory allocated with this function is 1113 * automatically freed on driver detach. 1114 * 1115 * RETURNS: 1116 * Address of allocated memory on success, 0 on failure. 1117 */ 1118 1119 unsigned long devm_get_free_pages(struct device *dev, 1120 gfp_t gfp_mask, unsigned int order) 1121 { 1122 struct pages_devres *devres; 1123 unsigned long addr; 1124 1125 addr = __get_free_pages(gfp_mask, order); 1126 1127 if (unlikely(!addr)) 1128 return 0; 1129 1130 devres = devres_alloc(devm_pages_release, 1131 sizeof(struct pages_devres), GFP_KERNEL); 1132 if (unlikely(!devres)) { 1133 free_pages(addr, order); 1134 return 0; 1135 } 1136 1137 devres->addr = addr; 1138 devres->order = order; 1139 1140 devres_add(dev, devres); 1141 return addr; 1142 } 1143 EXPORT_SYMBOL_GPL(devm_get_free_pages); 1144 1145 /** 1146 * devm_free_pages - Resource-managed free_pages 1147 * @dev: Device this memory belongs to 1148 * @addr: Memory to free 1149 * 1150 * Free memory allocated with devm_get_free_pages(). Unlike free_pages, 1151 * there is no need to supply the @order. 1152 */ 1153 void devm_free_pages(struct device *dev, unsigned long addr) 1154 { 1155 struct pages_devres devres = { .addr = addr }; 1156 1157 WARN_ON(devres_release(dev, devm_pages_release, devm_pages_match, 1158 &devres)); 1159 } 1160 EXPORT_SYMBOL_GPL(devm_free_pages); 1161 1162 static void devm_percpu_release(struct device *dev, void *pdata) 1163 { 1164 void __percpu *p; 1165 1166 p = *(void __percpu **)pdata; 1167 free_percpu(p); 1168 } 1169 1170 static int devm_percpu_match(struct device *dev, void *data, void *p) 1171 { 1172 struct devres *devr = container_of(data, struct devres, data); 1173 1174 return *(void **)devr->data == p; 1175 } 1176 1177 /** 1178 * __devm_alloc_percpu - Resource-managed alloc_percpu 1179 * @dev: Device to allocate per-cpu memory for 1180 * @size: Size of per-cpu memory to allocate 1181 * @align: Alignment of per-cpu memory to allocate 1182 * 1183 * Managed alloc_percpu. Per-cpu memory allocated with this function is 1184 * automatically freed on driver detach. 1185 * 1186 * RETURNS: 1187 * Pointer to allocated memory on success, NULL on failure. 1188 */ 1189 void __percpu *__devm_alloc_percpu(struct device *dev, size_t size, 1190 size_t align) 1191 { 1192 void *p; 1193 void __percpu *pcpu; 1194 1195 pcpu = __alloc_percpu(size, align); 1196 if (!pcpu) 1197 return NULL; 1198 1199 p = devres_alloc(devm_percpu_release, sizeof(void *), GFP_KERNEL); 1200 if (!p) { 1201 free_percpu(pcpu); 1202 return NULL; 1203 } 1204 1205 *(void __percpu **)p = pcpu; 1206 1207 devres_add(dev, p); 1208 1209 return pcpu; 1210 } 1211 EXPORT_SYMBOL_GPL(__devm_alloc_percpu); 1212 1213 /** 1214 * devm_free_percpu - Resource-managed free_percpu 1215 * @dev: Device this memory belongs to 1216 * @pdata: Per-cpu memory to free 1217 * 1218 * Free memory allocated with devm_alloc_percpu(). 1219 */ 1220 void devm_free_percpu(struct device *dev, void __percpu *pdata) 1221 { 1222 WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match, 1223 (__force void *)pdata)); 1224 } 1225 EXPORT_SYMBOL_GPL(devm_free_percpu); 1226