Lines Matching refs:va
971 va_size(struct vmap_area *va) in va_size() argument
973 return (va->va_end - va->va_start); in va_size()
979 struct vmap_area *va; in get_subtree_max_size() local
981 va = rb_entry_safe(node, struct vmap_area, rb_node); in get_subtree_max_size()
982 return va ? va->subtree_max_size : 0; in get_subtree_max_size()
1007 struct vmap_area *va; in __find_vmap_area() local
1009 va = rb_entry(n, struct vmap_area, rb_node); in __find_vmap_area()
1010 if (addr < va->va_start) in __find_vmap_area()
1012 else if (addr >= va->va_end) in __find_vmap_area()
1015 return va; in __find_vmap_area()
1025 struct vmap_area *va = NULL; in __find_vmap_area_exceed_addr() local
1035 va = tmp; in __find_vmap_area_exceed_addr()
1044 return va; in __find_vmap_area_exceed_addr()
1055 find_vmap_area_exceed_addr_lock(unsigned long addr, struct vmap_area **va) in find_vmap_area_exceed_addr_lock() argument
1066 *va = __find_vmap_area_exceed_addr(addr, &vn->busy.root); in find_vmap_area_exceed_addr_lock()
1068 if (*va) in find_vmap_area_exceed_addr_lock()
1069 if (!va_start_lowest || (*va)->va_start < va_start_lowest) in find_vmap_area_exceed_addr_lock()
1070 va_start_lowest = (*va)->va_start; in find_vmap_area_exceed_addr_lock()
1083 *va = __find_vmap_area(va_start_lowest, &vn->busy.root); in find_vmap_area_exceed_addr_lock()
1085 if (*va) in find_vmap_area_exceed_addr_lock()
1104 find_va_links(struct vmap_area *va, in find_va_links() argument
1134 if (va->va_end <= tmp_va->va_start) in find_va_links()
1136 else if (va->va_start >= tmp_va->va_end) in find_va_links()
1140 va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end); in find_va_links()
1169 __link_va(struct vmap_area *va, struct rb_root *root, in __link_va() argument
1184 rb_link_node(&va->rb_node, parent, link); in __link_va()
1197 rb_insert_augmented(&va->rb_node, in __link_va()
1199 va->subtree_max_size = 0; in __link_va()
1201 rb_insert_color(&va->rb_node, root); in __link_va()
1205 list_add(&va->list, head); in __link_va()
1209 link_va(struct vmap_area *va, struct rb_root *root, in link_va() argument
1213 __link_va(va, root, parent, link, head, false); in link_va()
1217 link_va_augment(struct vmap_area *va, struct rb_root *root, in link_va_augment() argument
1221 __link_va(va, root, parent, link, head, true); in link_va_augment()
1225 __unlink_va(struct vmap_area *va, struct rb_root *root, bool augment) in __unlink_va() argument
1227 if (WARN_ON(RB_EMPTY_NODE(&va->rb_node))) in __unlink_va()
1231 rb_erase_augmented(&va->rb_node, in __unlink_va()
1234 rb_erase(&va->rb_node, root); in __unlink_va()
1236 list_del_init(&va->list); in __unlink_va()
1237 RB_CLEAR_NODE(&va->rb_node); in __unlink_va()
1241 unlink_va(struct vmap_area *va, struct rb_root *root) in unlink_va() argument
1243 __unlink_va(va, root, false); in unlink_va()
1247 unlink_va_augment(struct vmap_area *va, struct rb_root *root) in unlink_va_augment() argument
1249 __unlink_va(va, root, true); in unlink_va_augment()
1257 compute_subtree_max_size(struct vmap_area *va) in compute_subtree_max_size() argument
1259 return max3(va_size(va), in compute_subtree_max_size()
1260 get_subtree_max_size(va->rb_node.rb_left), in compute_subtree_max_size()
1261 get_subtree_max_size(va->rb_node.rb_right)); in compute_subtree_max_size()
1267 struct vmap_area *va; in augment_tree_propagate_check() local
1270 list_for_each_entry(va, &free_vmap_area_list, list) { in augment_tree_propagate_check()
1271 computed_size = compute_subtree_max_size(va); in augment_tree_propagate_check()
1272 if (computed_size != va->subtree_max_size) in augment_tree_propagate_check()
1274 va_size(va), va->subtree_max_size); in augment_tree_propagate_check()
1307 augment_tree_propagate_from(struct vmap_area *va) in augment_tree_propagate_from() argument
1314 free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL); in augment_tree_propagate_from()
1322 insert_vmap_area(struct vmap_area *va, in insert_vmap_area() argument
1328 link = find_va_links(va, root, NULL, &parent); in insert_vmap_area()
1330 link_va(va, root, parent, link, head); in insert_vmap_area()
1334 insert_vmap_area_augment(struct vmap_area *va, in insert_vmap_area_augment() argument
1342 link = find_va_links(va, NULL, from, &parent); in insert_vmap_area_augment()
1344 link = find_va_links(va, root, NULL, &parent); in insert_vmap_area_augment()
1347 link_va_augment(va, root, parent, link, head); in insert_vmap_area_augment()
1348 augment_tree_propagate_from(va); in insert_vmap_area_augment()
1364 __merge_or_add_vmap_area(struct vmap_area *va, in __merge_or_add_vmap_area() argument
1377 link = find_va_links(va, root, NULL, &parent); in __merge_or_add_vmap_area()
1397 if (sibling->va_start == va->va_end) { in __merge_or_add_vmap_area()
1398 sibling->va_start = va->va_start; in __merge_or_add_vmap_area()
1401 kmem_cache_free(vmap_area_cachep, va); in __merge_or_add_vmap_area()
1404 va = sibling; in __merge_or_add_vmap_area()
1418 if (sibling->va_end == va->va_start) { in __merge_or_add_vmap_area()
1427 __unlink_va(va, root, augment); in __merge_or_add_vmap_area()
1429 sibling->va_end = va->va_end; in __merge_or_add_vmap_area()
1432 kmem_cache_free(vmap_area_cachep, va); in __merge_or_add_vmap_area()
1435 va = sibling; in __merge_or_add_vmap_area()
1442 __link_va(va, root, parent, link, head, augment); in __merge_or_add_vmap_area()
1444 return va; in __merge_or_add_vmap_area()
1448 merge_or_add_vmap_area(struct vmap_area *va, in merge_or_add_vmap_area() argument
1451 return __merge_or_add_vmap_area(va, root, head, false); in merge_or_add_vmap_area()
1455 merge_or_add_vmap_area_augment(struct vmap_area *va, in merge_or_add_vmap_area_augment() argument
1458 va = __merge_or_add_vmap_area(va, root, head, true); in merge_or_add_vmap_area_augment()
1459 if (va) in merge_or_add_vmap_area_augment()
1460 augment_tree_propagate_from(va); in merge_or_add_vmap_area_augment()
1462 return va; in merge_or_add_vmap_area_augment()
1466 is_within_this_va(struct vmap_area *va, unsigned long size, in is_within_this_va() argument
1471 if (va->va_start > vstart) in is_within_this_va()
1472 nva_start_addr = ALIGN(va->va_start, align); in is_within_this_va()
1481 return (nva_start_addr + size <= va->va_end); in is_within_this_va()
1495 struct vmap_area *va; in find_vmap_lowest_match() local
1506 va = rb_entry(node, struct vmap_area, rb_node); in find_vmap_lowest_match()
1509 vstart < va->va_start) { in find_vmap_lowest_match()
1512 if (is_within_this_va(va, size, align, vstart)) in find_vmap_lowest_match()
1513 return va; in find_vmap_lowest_match()
1532 va = rb_entry(node, struct vmap_area, rb_node); in find_vmap_lowest_match()
1533 if (is_within_this_va(va, size, align, vstart)) in find_vmap_lowest_match()
1534 return va; in find_vmap_lowest_match()
1537 vstart <= va->va_start) { in find_vmap_lowest_match()
1544 vstart = va->va_start + 1; in find_vmap_lowest_match()
1562 struct vmap_area *va; in find_vmap_lowest_linear_match() local
1564 list_for_each_entry(va, head, list) { in find_vmap_lowest_linear_match()
1565 if (!is_within_this_va(va, size, align, vstart)) in find_vmap_lowest_linear_match()
1568 return va; in find_vmap_lowest_linear_match()
1603 classify_va_fit_type(struct vmap_area *va, in classify_va_fit_type() argument
1609 if (nva_start_addr < va->va_start || in classify_va_fit_type()
1610 nva_start_addr + size > va->va_end) in classify_va_fit_type()
1614 if (va->va_start == nva_start_addr) { in classify_va_fit_type()
1615 if (va->va_end == nva_start_addr + size) in classify_va_fit_type()
1619 } else if (va->va_end == nva_start_addr + size) { in classify_va_fit_type()
1630 struct vmap_area *va, unsigned long nva_start_addr, in va_clip() argument
1634 enum fit_type type = classify_va_fit_type(va, nva_start_addr, size); in va_clip()
1644 unlink_va_augment(va, root); in va_clip()
1645 kmem_cache_free(vmap_area_cachep, va); in va_clip()
1654 va->va_start += size; in va_clip()
1663 va->va_end = nva_start_addr; in va_clip()
1707 lva->va_start = va->va_start; in va_clip()
1713 va->va_start = nva_start_addr + size; in va_clip()
1719 augment_tree_propagate_from(va); in va_clip()
1722 insert_vmap_area_augment(lva, &va->rb_node, root, head); in va_clip()
1729 va_alloc(struct vmap_area *va, in va_alloc() argument
1737 if (va->va_start > vstart) in va_alloc()
1738 nva_start_addr = ALIGN(va->va_start, align); in va_alloc()
1747 ret = va_clip(root, head, va, nva_start_addr, size); in va_alloc()
1765 struct vmap_area *va; in __alloc_vmap_area() local
1779 va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size); in __alloc_vmap_area()
1780 if (unlikely(!va)) in __alloc_vmap_area()
1783 nva_start_addr = va_alloc(va, root, head, size, align, vstart, vend); in __alloc_vmap_area()
1797 static void free_vmap_area(struct vmap_area *va) in free_vmap_area() argument
1799 struct vmap_node *vn = addr_to_node(va->va_start); in free_vmap_area()
1805 unlink_va(va, &vn->busy.root); in free_vmap_area()
1812 merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list); in free_vmap_area()
1819 struct vmap_area *va = NULL, *tmp; in preload_this_cpu_lock() local
1831 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); in preload_this_cpu_lock()
1836 if (va && !__this_cpu_try_cmpxchg(ne_fit_preload_node, &tmp, va)) in preload_this_cpu_lock()
1837 kmem_cache_free(vmap_area_cachep, va); in preload_this_cpu_lock()
1852 node_pool_add_va(struct vmap_node *n, struct vmap_area *va) in node_pool_add_va() argument
1856 vp = size_to_va_pool(n, va_size(va)); in node_pool_add_va()
1861 list_add(&va->list, &vp->head); in node_pool_add_va()
1873 struct vmap_area *va = NULL; in node_pool_del_va() local
1883 va = list_first_entry(&vp->head, struct vmap_area, list); in node_pool_del_va()
1885 if (IS_ALIGNED(va->va_start, align)) { in node_pool_del_va()
1890 err |= (va_size(va) != size); in node_pool_del_va()
1891 err |= (va->va_start < vstart); in node_pool_del_va()
1892 err |= (va->va_end > vend); in node_pool_del_va()
1895 list_del_init(&va->list); in node_pool_del_va()
1898 va = NULL; in node_pool_del_va()
1901 list_move_tail(&va->list, &vp->head); in node_pool_del_va()
1902 va = NULL; in node_pool_del_va()
1907 return va; in node_pool_del_va()
1915 struct vmap_area *va; in node_alloc() local
1929 va = node_pool_del_va(id_to_node(*vn_id), size, align, vstart, vend); in node_alloc()
1932 if (va) in node_alloc()
1933 *addr = va->va_start; in node_alloc()
1935 return va; in node_alloc()
1939 struct vmap_area *va, unsigned long flags, const void *caller) in setup_vmalloc_vm() argument
1942 vm->addr = (void *)va->va_start; in setup_vmalloc_vm()
1943 vm->size = vm->requested_size = va_size(va); in setup_vmalloc_vm()
1945 va->vm = vm; in setup_vmalloc_vm()
1959 struct vmap_area *va; in alloc_vmap_area() local
1982 va = node_alloc(size, align, vstart, vend, &addr, &vn_id); in alloc_vmap_area()
1983 if (!va) { in alloc_vmap_area()
1986 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); in alloc_vmap_area()
1987 if (unlikely(!va)) in alloc_vmap_area()
1994 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask); in alloc_vmap_area()
2014 va->va_start = addr; in alloc_vmap_area()
2015 va->va_end = addr + size; in alloc_vmap_area()
2016 va->vm = NULL; in alloc_vmap_area()
2017 va->flags = (va_flags | vn_id); in alloc_vmap_area()
2020 vm->addr = (void *)va->va_start; in alloc_vmap_area()
2021 vm->size = va_size(va); in alloc_vmap_area()
2022 va->vm = vm; in alloc_vmap_area()
2025 vn = addr_to_node(va->va_start); in alloc_vmap_area()
2028 insert_vmap_area(va, &vn->busy.root, &vn->busy.head); in alloc_vmap_area()
2031 BUG_ON(!IS_ALIGNED(va->va_start, align)); in alloc_vmap_area()
2032 BUG_ON(va->va_start < vstart); in alloc_vmap_area()
2033 BUG_ON(va->va_end > vend); in alloc_vmap_area()
2037 free_vmap_area(va); in alloc_vmap_area()
2041 return va; in alloc_vmap_area()
2062 kmem_cache_free(vmap_area_cachep, va); in alloc_vmap_area()
2119 struct vmap_area *va, *n; in reclaim_list_global() local
2125 list_for_each_entry_safe(va, n, head, list) in reclaim_list_global()
2126 merge_or_add_vmap_area_augment(va, in reclaim_list_global()
2136 struct vmap_area *va, *nva; in decay_va_pool_node() local
2157 list_for_each_entry_safe(va, nva, &tmp_list, list) { in decay_va_pool_node()
2158 list_del_init(&va->list); in decay_va_pool_node()
2159 merge_or_add_vmap_area(va, &decay_root, &decay_list); in decay_va_pool_node()
2188 struct vmap_area *va; in kasan_release_vmalloc_node() local
2194 list_for_each_entry(va, &vn->purge_list, list) { in kasan_release_vmalloc_node()
2195 if (is_vmalloc_or_module_addr((void *) va->va_start)) in kasan_release_vmalloc_node()
2196 kasan_release_vmalloc(va->va_start, va->va_end, in kasan_release_vmalloc_node()
2197 va->va_start, va->va_end, in kasan_release_vmalloc_node()
2209 struct vmap_area *va, *n_va; in purge_vmap_node() local
2217 list_for_each_entry_safe(va, n_va, &vn->purge_list, list) { in purge_vmap_node()
2218 unsigned long nr = va_size(va) >> PAGE_SHIFT; in purge_vmap_node()
2219 unsigned int vn_id = decode_vn_id(va->flags); in purge_vmap_node()
2221 list_del_init(&va->list); in purge_vmap_node()
2227 if (node_pool_add_va(vn, va)) in purge_vmap_node()
2231 list_add(&va->list, &local_list); in purge_vmap_node()
2347 static void free_vmap_area_noflush(struct vmap_area *va) in free_vmap_area_noflush() argument
2350 unsigned long va_start = va->va_start; in free_vmap_area_noflush()
2351 unsigned int vn_id = decode_vn_id(va->flags); in free_vmap_area_noflush()
2355 if (WARN_ON_ONCE(!list_empty(&va->list))) in free_vmap_area_noflush()
2358 nr_lazy = atomic_long_add_return(va_size(va) >> PAGE_SHIFT, in free_vmap_area_noflush()
2366 id_to_node(vn_id):addr_to_node(va->va_start); in free_vmap_area_noflush()
2369 insert_vmap_area(va, &vn->lazy.root, &vn->lazy.head); in free_vmap_area_noflush()
2382 static void free_unmap_vmap_area(struct vmap_area *va) in free_unmap_vmap_area() argument
2384 flush_cache_vunmap(va->va_start, va->va_end); in free_unmap_vmap_area()
2385 vunmap_range_noflush(va->va_start, va->va_end); in free_unmap_vmap_area()
2387 flush_tlb_kernel_range(va->va_start, va->va_end); in free_unmap_vmap_area()
2389 free_vmap_area_noflush(va); in free_unmap_vmap_area()
2395 struct vmap_area *va; in find_vmap_area() local
2419 va = __find_vmap_area(addr, &vn->busy.root); in find_vmap_area()
2422 if (va) in find_vmap_area()
2423 return va; in find_vmap_area()
2432 struct vmap_area *va; in find_unlink_vmap_area() local
2443 va = __find_vmap_area(addr, &vn->busy.root); in find_unlink_vmap_area()
2444 if (va) in find_unlink_vmap_area()
2445 unlink_va(va, &vn->busy.root); in find_unlink_vmap_area()
2448 if (va) in find_unlink_vmap_area()
2449 return va; in find_unlink_vmap_area()
2509 struct vmap_area *va; member
2609 struct vmap_area *va; in new_vmap_block() local
2622 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, in new_vmap_block()
2626 if (IS_ERR(va)) { in new_vmap_block()
2628 return ERR_CAST(va); in new_vmap_block()
2631 vaddr = vmap_block_vaddr(va->va_start, 0); in new_vmap_block()
2633 vb->va = va; in new_vmap_block()
2645 xa = addr_to_vb_xa(va->va_start); in new_vmap_block()
2646 vb_idx = addr_to_vb_idx(va->va_start); in new_vmap_block()
2650 free_vmap_area(va); in new_vmap_block()
2674 xa = addr_to_vb_xa(vb->va->va_start); in free_vmap_block()
2675 tmp = xa_erase(xa, addr_to_vb_idx(vb->va->va_start)); in free_vmap_block()
2678 vn = addr_to_node(vb->va->va_start); in free_vmap_block()
2680 unlink_va(vb->va, &vn->busy.root); in free_vmap_block()
2683 free_vmap_area_noflush(vb->va); in free_vmap_block()
2788 vaddr = vmap_block_vaddr(vb->va->va_start, pages_off); in vb_alloc()
2878 unsigned long va_start = vb->va->va_start; in _vm_unmap_aliases()
2935 struct vmap_area *va; in vm_unmap_ram() local
2951 va = find_unlink_vmap_area(addr); in vm_unmap_ram()
2952 if (WARN_ON_ONCE(!va)) in vm_unmap_ram()
2955 debug_check_no_locks_freed((void *)va->va_start, va_size(va)); in vm_unmap_ram()
2956 free_unmap_vmap_area(va); in vm_unmap_ram()
2986 struct vmap_area *va; in vm_map_ram() local
2987 va = alloc_vmap_area(size, PAGE_SIZE, in vm_map_ram()
2991 if (IS_ERR(va)) in vm_map_ram()
2994 addr = va->va_start; in vm_map_ram()
3114 struct vmap_area *va; in __get_vm_area_node() local
3138 va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0, area); in __get_vm_area_node()
3139 if (IS_ERR(va)) { in __get_vm_area_node()
3206 struct vmap_area *va; in find_vm_area() local
3208 va = find_vmap_area((unsigned long)addr); in find_vm_area()
3209 if (!va) in find_vm_area()
3212 return va->vm; in find_vm_area()
3227 struct vmap_area *va; in remove_vm_area() local
3236 va = find_unlink_vmap_area((unsigned long)addr); in remove_vm_area()
3237 if (!va || !va->vm) in remove_vm_area()
3239 vm = va->vm; in remove_vm_area()
3246 free_unmap_vmap_area(va); in remove_vm_area()
4292 start = vmap_block_vaddr(vb->va->va_start, rs); in vmap_ram_vread_iter()
4358 struct vmap_area *va; in vread_iter() local
4372 vn = find_vmap_area_exceed_addr_lock((unsigned long) addr, &va); in vread_iter()
4377 if ((unsigned long)addr + remains <= va->va_start) in vread_iter()
4386 vm = va->vm; in vread_iter()
4387 flags = va->flags & VMAP_FLAGS_MASK; in vread_iter()
4403 vaddr = (char *) va->va_start; in vread_iter()
4404 size = vm ? get_vm_area_size(vm) : va_size(va); in vread_iter()
4438 next = va->va_end; in vread_iter()
4440 } while ((vn = find_vmap_area_exceed_addr_lock(next, &va))); in vread_iter()
4569 struct vmap_area *va, *tmp; in pvm_find_va_enclose_addr() local
4573 va = NULL; in pvm_find_va_enclose_addr()
4578 va = tmp; in pvm_find_va_enclose_addr()
4588 return va; in pvm_find_va_enclose_addr()
4602 pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align) in pvm_determine_end_from_reverse() argument
4607 if (likely(*va)) { in pvm_determine_end_from_reverse()
4608 list_for_each_entry_from_reverse((*va), in pvm_determine_end_from_reverse()
4610 addr = min((*va)->va_end & ~(align - 1), vmalloc_end); in pvm_determine_end_from_reverse()
4611 if ((*va)->va_start < addr) in pvm_determine_end_from_reverse()
4649 struct vmap_area **vas, *va; in pcpu_get_vm_areas() local
4702 va = pvm_find_va_enclose_addr(vmalloc_end); in pcpu_get_vm_areas()
4703 base = pvm_determine_end_from_reverse(&va, align) - end; in pcpu_get_vm_areas()
4716 if (va == NULL) in pcpu_get_vm_areas()
4723 if (base + end > va->va_end) { in pcpu_get_vm_areas()
4724 base = pvm_determine_end_from_reverse(&va, align) - end; in pcpu_get_vm_areas()
4732 if (base + start < va->va_start) { in pcpu_get_vm_areas()
4733 va = node_to_va(rb_prev(&va->rb_node)); in pcpu_get_vm_areas()
4734 base = pvm_determine_end_from_reverse(&va, align) - end; in pcpu_get_vm_areas()
4749 va = pvm_find_va_enclose_addr(base + end); in pcpu_get_vm_areas()
4759 va = pvm_find_va_enclose_addr(start); in pcpu_get_vm_areas()
4760 if (WARN_ON_ONCE(va == NULL)) in pcpu_get_vm_areas()
4765 &free_vmap_area_list, va, start, size); in pcpu_get_vm_areas()
4771 va = vas[area]; in pcpu_get_vm_areas()
4772 va->va_start = start; in pcpu_get_vm_areas()
4773 va->va_end = start + size; in pcpu_get_vm_areas()
4818 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, in pcpu_get_vm_areas()
4820 if (va) in pcpu_get_vm_areas()
4822 va->va_start, va->va_end, in pcpu_get_vm_areas()
4869 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, in pcpu_get_vm_areas()
4871 if (va) in pcpu_get_vm_areas()
4873 va->va_start, va->va_end, in pcpu_get_vm_areas()
4906 struct vmap_area *va; in vmalloc_dump_obj() local
4917 va = __find_vmap_area(addr, &vn->busy.root); in vmalloc_dump_obj()
4918 if (!va || !va->vm) { in vmalloc_dump_obj()
4923 vm = va->vm; in vmalloc_dump_obj()
4964 struct vmap_area *va; in show_purge_info() local
4971 list_for_each_entry(va, &vn->lazy.head, list) { in show_purge_info()
4973 (void *)va->va_start, (void *)va->va_end, in show_purge_info()
4974 va_size(va)); in show_purge_info()
4983 struct vmap_area *va; in vmalloc_info_show() local
4991 list_for_each_entry(va, &vn->busy.head, list) { in vmalloc_info_show()
4992 if (!va->vm) { in vmalloc_info_show()
4993 if (va->flags & VMAP_RAM) in vmalloc_info_show()
4995 (void *)va->va_start, (void *)va->va_end, in vmalloc_info_show()
4996 va_size(va)); in vmalloc_info_show()
5001 v = va->vm; in vmalloc_info_show()
5192 struct vmap_area *va; in vmalloc_init() local
5222 va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); in vmalloc_init()
5223 if (WARN_ON_ONCE(!va)) in vmalloc_init()
5226 va->va_start = (unsigned long)tmp->addr; in vmalloc_init()
5227 va->va_end = va->va_start + tmp->size; in vmalloc_init()
5228 va->vm = tmp; in vmalloc_init()
5230 vn = addr_to_node(va->va_start); in vmalloc_init()
5231 insert_vmap_area(va, &vn->busy.root, &vn->busy.head); in vmalloc_init()