1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds * Copyright (C) 1993 Linus Torvalds
41da177e4SLinus Torvalds * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
51da177e4SLinus Torvalds * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <[email protected]>, May 2000
61da177e4SLinus Torvalds * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
7930fc45aSChristoph Lameter * Numa awareness, Christoph Lameter, SGI, June 2005
8d758ffe6SUladzislau Rezki (Sony) * Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019
91da177e4SLinus Torvalds */
101da177e4SLinus Torvalds
11db64fe02SNick Piggin #include <linux/vmalloc.h>
121da177e4SLinus Torvalds #include <linux/mm.h>
131da177e4SLinus Torvalds #include <linux/module.h>
141da177e4SLinus Torvalds #include <linux/highmem.h>
15c3edc401SIngo Molnar #include <linux/sched/signal.h>
161da177e4SLinus Torvalds #include <linux/slab.h>
171da177e4SLinus Torvalds #include <linux/spinlock.h>
181da177e4SLinus Torvalds #include <linux/interrupt.h>
195f6a6a9cSAlexey Dobriyan #include <linux/proc_fs.h>
20a10aa579SChristoph Lameter #include <linux/seq_file.h>
21868b104dSRick Edgecombe #include <linux/set_memory.h>
223ac7fe5aSThomas Gleixner #include <linux/debugobjects.h>
2323016969SChristoph Lameter #include <linux/kallsyms.h>
24db64fe02SNick Piggin #include <linux/list.h>
254da56b99SChris Wilson #include <linux/notifier.h>
26db64fe02SNick Piggin #include <linux/rbtree.h>
270f14599cSMatthew Wilcox (Oracle) #include <linux/xarray.h>
285da96bddSMel Gorman #include <linux/io.h>
29db64fe02SNick Piggin #include <linux/rcupdate.h>
30f0aa6617STejun Heo #include <linux/pfn.h>
3189219d37SCatalin Marinas #include <linux/kmemleak.h>
3260063497SArun Sharma #include <linux/atomic.h>
333b32123dSGideon Israel Dsouza #include <linux/compiler.h>
344e5aa1f4SShakeel Butt #include <linux/memcontrol.h>
3532fcfd40SAl Viro #include <linux/llist.h>
364c91c07cSLorenzo Stoakes #include <linux/uio.h>
370f616be1SToshi Kani #include <linux/bitops.h>
3868ad4a33SUladzislau Rezki (Sony) #include <linux/rbtree_augmented.h>
39bdebd6a2SJann Horn #include <linux/overflow.h>
40c0eb315aSNicholas Piggin #include <linux/pgtable.h>
41f7ee1f13SChristophe Leroy #include <linux/hugetlb.h>
42451769ebSMichal Hocko #include <linux/sched/mm.h>
431da177e4SLinus Torvalds #include <asm/tlbflush.h>
442dca6999SDavid Miller #include <asm/shmparam.h>
4521e516b9SHariom Panthi #include <linux/page_owner.h>
461da177e4SLinus Torvalds
47cf243da6SUladzislau Rezki (Sony) #define CREATE_TRACE_POINTS
48cf243da6SUladzislau Rezki (Sony) #include <trace/events/vmalloc.h>
49cf243da6SUladzislau Rezki (Sony)
50dd56b046SMel Gorman #include "internal.h"
512a681cfaSJoerg Roedel #include "pgalloc-track.h"
52dd56b046SMel Gorman
5382a70ce0SChristoph Hellwig #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
5482a70ce0SChristoph Hellwig static unsigned int __ro_after_init ioremap_max_page_shift = BITS_PER_LONG - 1;
5582a70ce0SChristoph Hellwig
set_nohugeiomap(char * str)5682a70ce0SChristoph Hellwig static int __init set_nohugeiomap(char *str)
5782a70ce0SChristoph Hellwig {
5882a70ce0SChristoph Hellwig ioremap_max_page_shift = PAGE_SHIFT;
5982a70ce0SChristoph Hellwig return 0;
6082a70ce0SChristoph Hellwig }
6182a70ce0SChristoph Hellwig early_param("nohugeiomap", set_nohugeiomap);
6282a70ce0SChristoph Hellwig #else /* CONFIG_HAVE_ARCH_HUGE_VMAP */
6382a70ce0SChristoph Hellwig static const unsigned int ioremap_max_page_shift = PAGE_SHIFT;
6482a70ce0SChristoph Hellwig #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
6582a70ce0SChristoph Hellwig
66121e6f32SNicholas Piggin #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
67121e6f32SNicholas Piggin static bool __ro_after_init vmap_allow_huge = true;
68121e6f32SNicholas Piggin
set_nohugevmalloc(char * str)69121e6f32SNicholas Piggin static int __init set_nohugevmalloc(char *str)
70121e6f32SNicholas Piggin {
71121e6f32SNicholas Piggin vmap_allow_huge = false;
72121e6f32SNicholas Piggin return 0;
73121e6f32SNicholas Piggin }
74121e6f32SNicholas Piggin early_param("nohugevmalloc", set_nohugevmalloc);
75121e6f32SNicholas Piggin #else /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
76121e6f32SNicholas Piggin static const bool vmap_allow_huge = false;
77121e6f32SNicholas Piggin #endif /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
78121e6f32SNicholas Piggin
is_vmalloc_addr(const void * x)79186525bdSIngo Molnar bool is_vmalloc_addr(const void *x)
80186525bdSIngo Molnar {
814aff1dc4SAndrey Konovalov unsigned long addr = (unsigned long)kasan_reset_tag(x);
82186525bdSIngo Molnar
83186525bdSIngo Molnar return addr >= VMALLOC_START && addr < VMALLOC_END;
84186525bdSIngo Molnar }
85186525bdSIngo Molnar EXPORT_SYMBOL(is_vmalloc_addr);
86186525bdSIngo Molnar
8732fcfd40SAl Viro struct vfree_deferred {
8832fcfd40SAl Viro struct llist_head list;
8932fcfd40SAl Viro struct work_struct wq;
9032fcfd40SAl Viro };
9132fcfd40SAl Viro static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
9232fcfd40SAl Viro
93db64fe02SNick Piggin /*** Page table manipulation functions ***/
vmap_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift,pgtbl_mod_mask * mask)945e9e3d77SNicholas Piggin static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
955e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot,
96f7ee1f13SChristophe Leroy unsigned int max_page_shift, pgtbl_mod_mask *mask)
975e9e3d77SNicholas Piggin {
985e9e3d77SNicholas Piggin pte_t *pte;
995e9e3d77SNicholas Piggin u64 pfn;
10021e516b9SHariom Panthi struct page *page;
101f7ee1f13SChristophe Leroy unsigned long size = PAGE_SIZE;
1025e9e3d77SNicholas Piggin
1035e9e3d77SNicholas Piggin pfn = phys_addr >> PAGE_SHIFT;
1045e9e3d77SNicholas Piggin pte = pte_alloc_kernel_track(pmd, addr, mask);
1055e9e3d77SNicholas Piggin if (!pte)
1065e9e3d77SNicholas Piggin return -ENOMEM;
1075e9e3d77SNicholas Piggin do {
1086963f008SMiao Wang if (unlikely(!pte_none(ptep_get(pte)))) {
10921e516b9SHariom Panthi if (pfn_valid(pfn)) {
11021e516b9SHariom Panthi page = pfn_to_page(pfn);
11121e516b9SHariom Panthi dump_page(page, "remapping already mapped page");
11221e516b9SHariom Panthi }
11321e516b9SHariom Panthi BUG();
11421e516b9SHariom Panthi }
115f7ee1f13SChristophe Leroy
116f7ee1f13SChristophe Leroy #ifdef CONFIG_HUGETLB_PAGE
117f7ee1f13SChristophe Leroy size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift);
118f7ee1f13SChristophe Leroy if (size != PAGE_SIZE) {
119f7ee1f13SChristophe Leroy pte_t entry = pfn_pte(pfn, prot);
120f7ee1f13SChristophe Leroy
121f7ee1f13SChristophe Leroy entry = arch_make_huge_pte(entry, ilog2(size), 0);
122935d4f0cSRyan Roberts set_huge_pte_at(&init_mm, addr, pte, entry, size);
123f7ee1f13SChristophe Leroy pfn += PFN_DOWN(size);
124f7ee1f13SChristophe Leroy continue;
125f7ee1f13SChristophe Leroy }
126f7ee1f13SChristophe Leroy #endif
1275e9e3d77SNicholas Piggin set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
1285e9e3d77SNicholas Piggin pfn++;
129f7ee1f13SChristophe Leroy } while (pte += PFN_DOWN(size), addr += size, addr != end);
1305e9e3d77SNicholas Piggin *mask |= PGTBL_PTE_MODIFIED;
1315e9e3d77SNicholas Piggin return 0;
1325e9e3d77SNicholas Piggin }
1335e9e3d77SNicholas Piggin
vmap_try_huge_pmd(pmd_t * pmd,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift)1345e9e3d77SNicholas Piggin static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end,
1355e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot,
1365e9e3d77SNicholas Piggin unsigned int max_page_shift)
1375e9e3d77SNicholas Piggin {
1385e9e3d77SNicholas Piggin if (max_page_shift < PMD_SHIFT)
1395e9e3d77SNicholas Piggin return 0;
1405e9e3d77SNicholas Piggin
1415e9e3d77SNicholas Piggin if (!arch_vmap_pmd_supported(prot))
1425e9e3d77SNicholas Piggin return 0;
1435e9e3d77SNicholas Piggin
1445e9e3d77SNicholas Piggin if ((end - addr) != PMD_SIZE)
1455e9e3d77SNicholas Piggin return 0;
1465e9e3d77SNicholas Piggin
1475e9e3d77SNicholas Piggin if (!IS_ALIGNED(addr, PMD_SIZE))
1485e9e3d77SNicholas Piggin return 0;
1495e9e3d77SNicholas Piggin
1505e9e3d77SNicholas Piggin if (!IS_ALIGNED(phys_addr, PMD_SIZE))
1515e9e3d77SNicholas Piggin return 0;
1525e9e3d77SNicholas Piggin
1535e9e3d77SNicholas Piggin if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
1545e9e3d77SNicholas Piggin return 0;
1555e9e3d77SNicholas Piggin
1565e9e3d77SNicholas Piggin return pmd_set_huge(pmd, phys_addr, prot);
1575e9e3d77SNicholas Piggin }
1585e9e3d77SNicholas Piggin
vmap_pmd_range(pud_t * pud,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift,pgtbl_mod_mask * mask)1595e9e3d77SNicholas Piggin static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
1605e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot,
1615e9e3d77SNicholas Piggin unsigned int max_page_shift, pgtbl_mod_mask *mask)
1625e9e3d77SNicholas Piggin {
1635e9e3d77SNicholas Piggin pmd_t *pmd;
1645e9e3d77SNicholas Piggin unsigned long next;
1655e9e3d77SNicholas Piggin
1665e9e3d77SNicholas Piggin pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
1675e9e3d77SNicholas Piggin if (!pmd)
1685e9e3d77SNicholas Piggin return -ENOMEM;
1695e9e3d77SNicholas Piggin do {
1705e9e3d77SNicholas Piggin next = pmd_addr_end(addr, end);
1715e9e3d77SNicholas Piggin
1725e9e3d77SNicholas Piggin if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot,
1735e9e3d77SNicholas Piggin max_page_shift)) {
1745e9e3d77SNicholas Piggin *mask |= PGTBL_PMD_MODIFIED;
1755e9e3d77SNicholas Piggin continue;
1765e9e3d77SNicholas Piggin }
1775e9e3d77SNicholas Piggin
178f7ee1f13SChristophe Leroy if (vmap_pte_range(pmd, addr, next, phys_addr, prot, max_page_shift, mask))
1795e9e3d77SNicholas Piggin return -ENOMEM;
1805e9e3d77SNicholas Piggin } while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
1815e9e3d77SNicholas Piggin return 0;
1825e9e3d77SNicholas Piggin }
1835e9e3d77SNicholas Piggin
vmap_try_huge_pud(pud_t * pud,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift)1845e9e3d77SNicholas Piggin static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end,
1855e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot,
1865e9e3d77SNicholas Piggin unsigned int max_page_shift)
1875e9e3d77SNicholas Piggin {
1885e9e3d77SNicholas Piggin if (max_page_shift < PUD_SHIFT)
1895e9e3d77SNicholas Piggin return 0;
1905e9e3d77SNicholas Piggin
1915e9e3d77SNicholas Piggin if (!arch_vmap_pud_supported(prot))
1925e9e3d77SNicholas Piggin return 0;
1935e9e3d77SNicholas Piggin
1945e9e3d77SNicholas Piggin if ((end - addr) != PUD_SIZE)
1955e9e3d77SNicholas Piggin return 0;
1965e9e3d77SNicholas Piggin
1975e9e3d77SNicholas Piggin if (!IS_ALIGNED(addr, PUD_SIZE))
1985e9e3d77SNicholas Piggin return 0;
1995e9e3d77SNicholas Piggin
2005e9e3d77SNicholas Piggin if (!IS_ALIGNED(phys_addr, PUD_SIZE))
2015e9e3d77SNicholas Piggin return 0;
2025e9e3d77SNicholas Piggin
2035e9e3d77SNicholas Piggin if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
2045e9e3d77SNicholas Piggin return 0;
2055e9e3d77SNicholas Piggin
2065e9e3d77SNicholas Piggin return pud_set_huge(pud, phys_addr, prot);
2075e9e3d77SNicholas Piggin }
2085e9e3d77SNicholas Piggin
vmap_pud_range(p4d_t * p4d,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift,pgtbl_mod_mask * mask)2095e9e3d77SNicholas Piggin static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
2105e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot,
2115e9e3d77SNicholas Piggin unsigned int max_page_shift, pgtbl_mod_mask *mask)
2125e9e3d77SNicholas Piggin {
2135e9e3d77SNicholas Piggin pud_t *pud;
2145e9e3d77SNicholas Piggin unsigned long next;
2155e9e3d77SNicholas Piggin
2165e9e3d77SNicholas Piggin pud = pud_alloc_track(&init_mm, p4d, addr, mask);
2175e9e3d77SNicholas Piggin if (!pud)
2185e9e3d77SNicholas Piggin return -ENOMEM;
2195e9e3d77SNicholas Piggin do {
2205e9e3d77SNicholas Piggin next = pud_addr_end(addr, end);
2215e9e3d77SNicholas Piggin
2225e9e3d77SNicholas Piggin if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot,
2235e9e3d77SNicholas Piggin max_page_shift)) {
2245e9e3d77SNicholas Piggin *mask |= PGTBL_PUD_MODIFIED;
2255e9e3d77SNicholas Piggin continue;
2265e9e3d77SNicholas Piggin }
2275e9e3d77SNicholas Piggin
2285e9e3d77SNicholas Piggin if (vmap_pmd_range(pud, addr, next, phys_addr, prot,
2295e9e3d77SNicholas Piggin max_page_shift, mask))
2305e9e3d77SNicholas Piggin return -ENOMEM;
2315e9e3d77SNicholas Piggin } while (pud++, phys_addr += (next - addr), addr = next, addr != end);
2325e9e3d77SNicholas Piggin return 0;
2335e9e3d77SNicholas Piggin }
2345e9e3d77SNicholas Piggin
vmap_try_huge_p4d(p4d_t * p4d,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift)2355e9e3d77SNicholas Piggin static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end,
2365e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot,
2375e9e3d77SNicholas Piggin unsigned int max_page_shift)
2385e9e3d77SNicholas Piggin {
2395e9e3d77SNicholas Piggin if (max_page_shift < P4D_SHIFT)
2405e9e3d77SNicholas Piggin return 0;
2415e9e3d77SNicholas Piggin
2425e9e3d77SNicholas Piggin if (!arch_vmap_p4d_supported(prot))
2435e9e3d77SNicholas Piggin return 0;
2445e9e3d77SNicholas Piggin
2455e9e3d77SNicholas Piggin if ((end - addr) != P4D_SIZE)
2465e9e3d77SNicholas Piggin return 0;
2475e9e3d77SNicholas Piggin
2485e9e3d77SNicholas Piggin if (!IS_ALIGNED(addr, P4D_SIZE))
2495e9e3d77SNicholas Piggin return 0;
2505e9e3d77SNicholas Piggin
2515e9e3d77SNicholas Piggin if (!IS_ALIGNED(phys_addr, P4D_SIZE))
2525e9e3d77SNicholas Piggin return 0;
2535e9e3d77SNicholas Piggin
2545e9e3d77SNicholas Piggin if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
2555e9e3d77SNicholas Piggin return 0;
2565e9e3d77SNicholas Piggin
2575e9e3d77SNicholas Piggin return p4d_set_huge(p4d, phys_addr, prot);
2585e9e3d77SNicholas Piggin }
2595e9e3d77SNicholas Piggin
vmap_p4d_range(pgd_t * pgd,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift,pgtbl_mod_mask * mask)2605e9e3d77SNicholas Piggin static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
2615e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot,
2625e9e3d77SNicholas Piggin unsigned int max_page_shift, pgtbl_mod_mask *mask)
2635e9e3d77SNicholas Piggin {
2645e9e3d77SNicholas Piggin p4d_t *p4d;
2655e9e3d77SNicholas Piggin unsigned long next;
2665e9e3d77SNicholas Piggin
2675e9e3d77SNicholas Piggin p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
2685e9e3d77SNicholas Piggin if (!p4d)
2695e9e3d77SNicholas Piggin return -ENOMEM;
2705e9e3d77SNicholas Piggin do {
2715e9e3d77SNicholas Piggin next = p4d_addr_end(addr, end);
2725e9e3d77SNicholas Piggin
2735e9e3d77SNicholas Piggin if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot,
2745e9e3d77SNicholas Piggin max_page_shift)) {
2755e9e3d77SNicholas Piggin *mask |= PGTBL_P4D_MODIFIED;
2765e9e3d77SNicholas Piggin continue;
2775e9e3d77SNicholas Piggin }
2785e9e3d77SNicholas Piggin
2795e9e3d77SNicholas Piggin if (vmap_pud_range(p4d, addr, next, phys_addr, prot,
2805e9e3d77SNicholas Piggin max_page_shift, mask))
2815e9e3d77SNicholas Piggin return -ENOMEM;
2825e9e3d77SNicholas Piggin } while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
2835e9e3d77SNicholas Piggin return 0;
2845e9e3d77SNicholas Piggin }
2855e9e3d77SNicholas Piggin
vmap_range_noflush(unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift)2865d87510dSNicholas Piggin static int vmap_range_noflush(unsigned long addr, unsigned long end,
2875e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot,
2885e9e3d77SNicholas Piggin unsigned int max_page_shift)
2895e9e3d77SNicholas Piggin {
2905e9e3d77SNicholas Piggin pgd_t *pgd;
2915e9e3d77SNicholas Piggin unsigned long start;
2925e9e3d77SNicholas Piggin unsigned long next;
2935e9e3d77SNicholas Piggin int err;
2945e9e3d77SNicholas Piggin pgtbl_mod_mask mask = 0;
2955e9e3d77SNicholas Piggin
2965e9e3d77SNicholas Piggin might_sleep();
2975e9e3d77SNicholas Piggin BUG_ON(addr >= end);
2985e9e3d77SNicholas Piggin
2995e9e3d77SNicholas Piggin start = addr;
3005e9e3d77SNicholas Piggin pgd = pgd_offset_k(addr);
3015e9e3d77SNicholas Piggin do {
3025e9e3d77SNicholas Piggin next = pgd_addr_end(addr, end);
3035e9e3d77SNicholas Piggin err = vmap_p4d_range(pgd, addr, next, phys_addr, prot,
3045e9e3d77SNicholas Piggin max_page_shift, &mask);
3055e9e3d77SNicholas Piggin if (err)
3065e9e3d77SNicholas Piggin break;
3075e9e3d77SNicholas Piggin } while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
3085e9e3d77SNicholas Piggin
3095e9e3d77SNicholas Piggin if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
3105e9e3d77SNicholas Piggin arch_sync_kernel_mappings(start, end);
3115e9e3d77SNicholas Piggin
3125e9e3d77SNicholas Piggin return err;
3135e9e3d77SNicholas Piggin }
314b221385bSAdrian Bunk
vmap_page_range(unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot)315d7bca919SAlexei Starovoitov int vmap_page_range(unsigned long addr, unsigned long end,
316d7bca919SAlexei Starovoitov phys_addr_t phys_addr, pgprot_t prot)
317d7bca919SAlexei Starovoitov {
318d7bca919SAlexei Starovoitov int err;
319d7bca919SAlexei Starovoitov
320d7bca919SAlexei Starovoitov err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot),
321d7bca919SAlexei Starovoitov ioremap_max_page_shift);
322d7bca919SAlexei Starovoitov flush_cache_vmap(addr, end);
323d7bca919SAlexei Starovoitov if (!err)
324d7bca919SAlexei Starovoitov err = kmsan_ioremap_page_range(addr, end, phys_addr, prot,
325d7bca919SAlexei Starovoitov ioremap_max_page_shift);
326d7bca919SAlexei Starovoitov return err;
327d7bca919SAlexei Starovoitov }
328d7bca919SAlexei Starovoitov
ioremap_page_range(unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot)32982a70ce0SChristoph Hellwig int ioremap_page_range(unsigned long addr, unsigned long end,
33082a70ce0SChristoph Hellwig phys_addr_t phys_addr, pgprot_t prot)
3315d87510dSNicholas Piggin {
3323e49a866SAlexei Starovoitov struct vm_struct *area;
3335d87510dSNicholas Piggin
3343e49a866SAlexei Starovoitov area = find_vm_area((void *)addr);
3353e49a866SAlexei Starovoitov if (!area || !(area->flags & VM_IOREMAP)) {
3363e49a866SAlexei Starovoitov WARN_ONCE(1, "vm_area at addr %lx is not marked as VM_IOREMAP\n", addr);
3373e49a866SAlexei Starovoitov return -EINVAL;
3383e49a866SAlexei Starovoitov }
3393e49a866SAlexei Starovoitov if (addr != (unsigned long)area->addr ||
3403e49a866SAlexei Starovoitov (void *)end != area->addr + get_vm_area_size(area)) {
3413e49a866SAlexei Starovoitov WARN_ONCE(1, "ioremap request [%lx,%lx) doesn't match vm_area [%lx, %lx)\n",
3423e49a866SAlexei Starovoitov addr, end, (long)area->addr,
3433e49a866SAlexei Starovoitov (long)area->addr + get_vm_area_size(area));
3443e49a866SAlexei Starovoitov return -ERANGE;
3453e49a866SAlexei Starovoitov }
346d7bca919SAlexei Starovoitov return vmap_page_range(addr, end, phys_addr, prot);
3475d87510dSNicholas Piggin }
3485d87510dSNicholas Piggin
vunmap_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,pgtbl_mod_mask * mask)3492ba3e694SJoerg Roedel static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
3502ba3e694SJoerg Roedel pgtbl_mod_mask *mask)
3511da177e4SLinus Torvalds {
3521da177e4SLinus Torvalds pte_t *pte;
3531da177e4SLinus Torvalds
3541da177e4SLinus Torvalds pte = pte_offset_kernel(pmd, addr);
3551da177e4SLinus Torvalds do {
3561da177e4SLinus Torvalds pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
3571da177e4SLinus Torvalds WARN_ON(!pte_none(ptent) && !pte_present(ptent));
3581da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end);
3592ba3e694SJoerg Roedel *mask |= PGTBL_PTE_MODIFIED;
3601da177e4SLinus Torvalds }
3611da177e4SLinus Torvalds
vunmap_pmd_range(pud_t * pud,unsigned long addr,unsigned long end,pgtbl_mod_mask * mask)3622ba3e694SJoerg Roedel static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
3632ba3e694SJoerg Roedel pgtbl_mod_mask *mask)
3641da177e4SLinus Torvalds {
3651da177e4SLinus Torvalds pmd_t *pmd;
3661da177e4SLinus Torvalds unsigned long next;
3672ba3e694SJoerg Roedel int cleared;
3681da177e4SLinus Torvalds
3691da177e4SLinus Torvalds pmd = pmd_offset(pud, addr);
3701da177e4SLinus Torvalds do {
3711da177e4SLinus Torvalds next = pmd_addr_end(addr, end);
3722ba3e694SJoerg Roedel
3732ba3e694SJoerg Roedel cleared = pmd_clear_huge(pmd);
3742ba3e694SJoerg Roedel if (cleared || pmd_bad(*pmd))
3752ba3e694SJoerg Roedel *mask |= PGTBL_PMD_MODIFIED;
3762ba3e694SJoerg Roedel
3772ba3e694SJoerg Roedel if (cleared)
378b9820d8fSToshi Kani continue;
3791da177e4SLinus Torvalds if (pmd_none_or_clear_bad(pmd))
3801da177e4SLinus Torvalds continue;
3812ba3e694SJoerg Roedel vunmap_pte_range(pmd, addr, next, mask);
382e47110e9SAneesh Kumar K.V
383e47110e9SAneesh Kumar K.V cond_resched();
3841da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end);
3851da177e4SLinus Torvalds }
3861da177e4SLinus Torvalds
vunmap_pud_range(p4d_t * p4d,unsigned long addr,unsigned long end,pgtbl_mod_mask * mask)3872ba3e694SJoerg Roedel static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
3882ba3e694SJoerg Roedel pgtbl_mod_mask *mask)
3891da177e4SLinus Torvalds {
3901da177e4SLinus Torvalds pud_t *pud;
3911da177e4SLinus Torvalds unsigned long next;
3922ba3e694SJoerg Roedel int cleared;
3931da177e4SLinus Torvalds
394c2febafcSKirill A. Shutemov pud = pud_offset(p4d, addr);
3951da177e4SLinus Torvalds do {
3961da177e4SLinus Torvalds next = pud_addr_end(addr, end);
3972ba3e694SJoerg Roedel
3982ba3e694SJoerg Roedel cleared = pud_clear_huge(pud);
3992ba3e694SJoerg Roedel if (cleared || pud_bad(*pud))
4002ba3e694SJoerg Roedel *mask |= PGTBL_PUD_MODIFIED;
4012ba3e694SJoerg Roedel
4022ba3e694SJoerg Roedel if (cleared)
403b9820d8fSToshi Kani continue;
4041da177e4SLinus Torvalds if (pud_none_or_clear_bad(pud))
4051da177e4SLinus Torvalds continue;
4062ba3e694SJoerg Roedel vunmap_pmd_range(pud, addr, next, mask);
4071da177e4SLinus Torvalds } while (pud++, addr = next, addr != end);
4081da177e4SLinus Torvalds }
4091da177e4SLinus Torvalds
vunmap_p4d_range(pgd_t * pgd,unsigned long addr,unsigned long end,pgtbl_mod_mask * mask)4102ba3e694SJoerg Roedel static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
4112ba3e694SJoerg Roedel pgtbl_mod_mask *mask)
412c2febafcSKirill A. Shutemov {
413c2febafcSKirill A. Shutemov p4d_t *p4d;
414c2febafcSKirill A. Shutemov unsigned long next;
415c2febafcSKirill A. Shutemov
416c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, addr);
417c2febafcSKirill A. Shutemov do {
418c2febafcSKirill A. Shutemov next = p4d_addr_end(addr, end);
4192ba3e694SJoerg Roedel
420c8db8c26SLi kunyu p4d_clear_huge(p4d);
421c8db8c26SLi kunyu if (p4d_bad(*p4d))
4222ba3e694SJoerg Roedel *mask |= PGTBL_P4D_MODIFIED;
4232ba3e694SJoerg Roedel
424c2febafcSKirill A. Shutemov if (p4d_none_or_clear_bad(p4d))
425c2febafcSKirill A. Shutemov continue;
4262ba3e694SJoerg Roedel vunmap_pud_range(p4d, addr, next, mask);
427c2febafcSKirill A. Shutemov } while (p4d++, addr = next, addr != end);
428c2febafcSKirill A. Shutemov }
429c2febafcSKirill A. Shutemov
4304ad0ae8cSNicholas Piggin /*
4314ad0ae8cSNicholas Piggin * vunmap_range_noflush is similar to vunmap_range, but does not
4324ad0ae8cSNicholas Piggin * flush caches or TLBs.
433b521c43fSChristoph Hellwig *
4344ad0ae8cSNicholas Piggin * The caller is responsible for calling flush_cache_vmap() before calling
4354ad0ae8cSNicholas Piggin * this function, and flush_tlb_kernel_range after it has returned
4364ad0ae8cSNicholas Piggin * successfully (and before the addresses are expected to cause a page fault
4374ad0ae8cSNicholas Piggin * or be re-mapped for something else, if TLB flushes are being delayed or
4384ad0ae8cSNicholas Piggin * coalesced).
439b521c43fSChristoph Hellwig *
4404ad0ae8cSNicholas Piggin * This is an internal function only. Do not use outside mm/.
441b521c43fSChristoph Hellwig */
__vunmap_range_noflush(unsigned long start,unsigned long end)442b073d7f8SAlexander Potapenko void __vunmap_range_noflush(unsigned long start, unsigned long end)
4431da177e4SLinus Torvalds {
4441da177e4SLinus Torvalds unsigned long next;
445b521c43fSChristoph Hellwig pgd_t *pgd;
4462ba3e694SJoerg Roedel unsigned long addr = start;
4472ba3e694SJoerg Roedel pgtbl_mod_mask mask = 0;
4481da177e4SLinus Torvalds
4491da177e4SLinus Torvalds BUG_ON(addr >= end);
4501da177e4SLinus Torvalds pgd = pgd_offset_k(addr);
4511da177e4SLinus Torvalds do {
4521da177e4SLinus Torvalds next = pgd_addr_end(addr, end);
4532ba3e694SJoerg Roedel if (pgd_bad(*pgd))
4542ba3e694SJoerg Roedel mask |= PGTBL_PGD_MODIFIED;
4551da177e4SLinus Torvalds if (pgd_none_or_clear_bad(pgd))
4561da177e4SLinus Torvalds continue;
4572ba3e694SJoerg Roedel vunmap_p4d_range(pgd, addr, next, &mask);
4581da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end);
4592ba3e694SJoerg Roedel
4602ba3e694SJoerg Roedel if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
4612ba3e694SJoerg Roedel arch_sync_kernel_mappings(start, end);
4621da177e4SLinus Torvalds }
4631da177e4SLinus Torvalds
vunmap_range_noflush(unsigned long start,unsigned long end)464b073d7f8SAlexander Potapenko void vunmap_range_noflush(unsigned long start, unsigned long end)
465b073d7f8SAlexander Potapenko {
466b073d7f8SAlexander Potapenko kmsan_vunmap_range_noflush(start, end);
467b073d7f8SAlexander Potapenko __vunmap_range_noflush(start, end);
468b073d7f8SAlexander Potapenko }
469b073d7f8SAlexander Potapenko
4704ad0ae8cSNicholas Piggin /**
4714ad0ae8cSNicholas Piggin * vunmap_range - unmap kernel virtual addresses
4724ad0ae8cSNicholas Piggin * @addr: start of the VM area to unmap
4734ad0ae8cSNicholas Piggin * @end: end of the VM area to unmap (non-inclusive)
4744ad0ae8cSNicholas Piggin *
4754ad0ae8cSNicholas Piggin * Clears any present PTEs in the virtual address range, flushes TLBs and
4764ad0ae8cSNicholas Piggin * caches. Any subsequent access to the address before it has been re-mapped
4774ad0ae8cSNicholas Piggin * is a kernel bug.
4784ad0ae8cSNicholas Piggin */
vunmap_range(unsigned long addr,unsigned long end)4794ad0ae8cSNicholas Piggin void vunmap_range(unsigned long addr, unsigned long end)
4804ad0ae8cSNicholas Piggin {
4814ad0ae8cSNicholas Piggin flush_cache_vunmap(addr, end);
4824ad0ae8cSNicholas Piggin vunmap_range_noflush(addr, end);
4834ad0ae8cSNicholas Piggin flush_tlb_kernel_range(addr, end);
4844ad0ae8cSNicholas Piggin }
4854ad0ae8cSNicholas Piggin
vmap_pages_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,int * nr,pgtbl_mod_mask * mask)4860a264884SNicholas Piggin static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
4872ba3e694SJoerg Roedel unsigned long end, pgprot_t prot, struct page **pages, int *nr,
4882ba3e694SJoerg Roedel pgtbl_mod_mask *mask)
4891da177e4SLinus Torvalds {
4901da177e4SLinus Torvalds pte_t *pte;
4911da177e4SLinus Torvalds
492db64fe02SNick Piggin /*
493db64fe02SNick Piggin * nr is a running index into the array which helps higher level
494db64fe02SNick Piggin * callers keep track of where we're up to.
495db64fe02SNick Piggin */
496db64fe02SNick Piggin
4972ba3e694SJoerg Roedel pte = pte_alloc_kernel_track(pmd, addr, mask);
4981da177e4SLinus Torvalds if (!pte)
4991da177e4SLinus Torvalds return -ENOMEM;
5001da177e4SLinus Torvalds do {
501db64fe02SNick Piggin struct page *page = pages[*nr];
502db64fe02SNick Piggin
503c33c7948SRyan Roberts if (WARN_ON(!pte_none(ptep_get(pte))))
504db64fe02SNick Piggin return -EBUSY;
505db64fe02SNick Piggin if (WARN_ON(!page))
5061da177e4SLinus Torvalds return -ENOMEM;
5074fcdcc12SYury Norov if (WARN_ON(!pfn_valid(page_to_pfn(page))))
5084fcdcc12SYury Norov return -EINVAL;
5094fcdcc12SYury Norov
5101da177e4SLinus Torvalds set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
511db64fe02SNick Piggin (*nr)++;
5121da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end);
5132ba3e694SJoerg Roedel *mask |= PGTBL_PTE_MODIFIED;
5141da177e4SLinus Torvalds return 0;
5151da177e4SLinus Torvalds }
5161da177e4SLinus Torvalds
vmap_pages_pmd_range(pud_t * pud,unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,int * nr,pgtbl_mod_mask * mask)5170a264884SNicholas Piggin static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr,
5182ba3e694SJoerg Roedel unsigned long end, pgprot_t prot, struct page **pages, int *nr,
5192ba3e694SJoerg Roedel pgtbl_mod_mask *mask)
5201da177e4SLinus Torvalds {
5211da177e4SLinus Torvalds pmd_t *pmd;
5221da177e4SLinus Torvalds unsigned long next;
5231da177e4SLinus Torvalds
5242ba3e694SJoerg Roedel pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
5251da177e4SLinus Torvalds if (!pmd)
5261da177e4SLinus Torvalds return -ENOMEM;
5271da177e4SLinus Torvalds do {
5281da177e4SLinus Torvalds next = pmd_addr_end(addr, end);
5290a264884SNicholas Piggin if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr, mask))
5301da177e4SLinus Torvalds return -ENOMEM;
5311da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end);
5321da177e4SLinus Torvalds return 0;
5331da177e4SLinus Torvalds }
5341da177e4SLinus Torvalds
vmap_pages_pud_range(p4d_t * p4d,unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,int * nr,pgtbl_mod_mask * mask)5350a264884SNicholas Piggin static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr,
5362ba3e694SJoerg Roedel unsigned long end, pgprot_t prot, struct page **pages, int *nr,
5372ba3e694SJoerg Roedel pgtbl_mod_mask *mask)
5381da177e4SLinus Torvalds {
5391da177e4SLinus Torvalds pud_t *pud;
5401da177e4SLinus Torvalds unsigned long next;
5411da177e4SLinus Torvalds
5422ba3e694SJoerg Roedel pud = pud_alloc_track(&init_mm, p4d, addr, mask);
5431da177e4SLinus Torvalds if (!pud)
5441da177e4SLinus Torvalds return -ENOMEM;
5451da177e4SLinus Torvalds do {
5461da177e4SLinus Torvalds next = pud_addr_end(addr, end);
5470a264884SNicholas Piggin if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr, mask))
5481da177e4SLinus Torvalds return -ENOMEM;
5491da177e4SLinus Torvalds } while (pud++, addr = next, addr != end);
5501da177e4SLinus Torvalds return 0;
5511da177e4SLinus Torvalds }
5521da177e4SLinus Torvalds
vmap_pages_p4d_range(pgd_t * pgd,unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,int * nr,pgtbl_mod_mask * mask)5530a264884SNicholas Piggin static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr,
5542ba3e694SJoerg Roedel unsigned long end, pgprot_t prot, struct page **pages, int *nr,
5552ba3e694SJoerg Roedel pgtbl_mod_mask *mask)
556c2febafcSKirill A. Shutemov {
557c2febafcSKirill A. Shutemov p4d_t *p4d;
558c2febafcSKirill A. Shutemov unsigned long next;
559c2febafcSKirill A. Shutemov
5602ba3e694SJoerg Roedel p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
561c2febafcSKirill A. Shutemov if (!p4d)
562c2febafcSKirill A. Shutemov return -ENOMEM;
563c2febafcSKirill A. Shutemov do {
564c2febafcSKirill A. Shutemov next = p4d_addr_end(addr, end);
5650a264884SNicholas Piggin if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr, mask))
566c2febafcSKirill A. Shutemov return -ENOMEM;
567c2febafcSKirill A. Shutemov } while (p4d++, addr = next, addr != end);
568c2febafcSKirill A. Shutemov return 0;
569c2febafcSKirill A. Shutemov }
570c2febafcSKirill A. Shutemov
vmap_small_pages_range_noflush(unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages)571121e6f32SNicholas Piggin static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end,
572121e6f32SNicholas Piggin pgprot_t prot, struct page **pages)
573121e6f32SNicholas Piggin {
574121e6f32SNicholas Piggin unsigned long start = addr;
575121e6f32SNicholas Piggin pgd_t *pgd;
576121e6f32SNicholas Piggin unsigned long next;
577121e6f32SNicholas Piggin int err = 0;
578121e6f32SNicholas Piggin int nr = 0;
579121e6f32SNicholas Piggin pgtbl_mod_mask mask = 0;
580121e6f32SNicholas Piggin
581121e6f32SNicholas Piggin BUG_ON(addr >= end);
582121e6f32SNicholas Piggin pgd = pgd_offset_k(addr);
583121e6f32SNicholas Piggin do {
584121e6f32SNicholas Piggin next = pgd_addr_end(addr, end);
585121e6f32SNicholas Piggin if (pgd_bad(*pgd))
586121e6f32SNicholas Piggin mask |= PGTBL_PGD_MODIFIED;
587121e6f32SNicholas Piggin err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask);
588121e6f32SNicholas Piggin if (err)
5893685024eSRyan Roberts break;
590121e6f32SNicholas Piggin } while (pgd++, addr = next, addr != end);
591121e6f32SNicholas Piggin
592121e6f32SNicholas Piggin if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
593121e6f32SNicholas Piggin arch_sync_kernel_mappings(start, end);
594121e6f32SNicholas Piggin
5953685024eSRyan Roberts return err;
596121e6f32SNicholas Piggin }
597121e6f32SNicholas Piggin
598b67177ecSNicholas Piggin /*
599b67177ecSNicholas Piggin * vmap_pages_range_noflush is similar to vmap_pages_range, but does not
600b67177ecSNicholas Piggin * flush caches.
601b67177ecSNicholas Piggin *
602b67177ecSNicholas Piggin * The caller is responsible for calling flush_cache_vmap() after this
603b67177ecSNicholas Piggin * function returns successfully and before the addresses are accessed.
604b67177ecSNicholas Piggin *
605b67177ecSNicholas Piggin * This is an internal function only. Do not use outside mm/.
606b67177ecSNicholas Piggin */
__vmap_pages_range_noflush(unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,unsigned int page_shift)607b073d7f8SAlexander Potapenko int __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
608121e6f32SNicholas Piggin pgprot_t prot, struct page **pages, unsigned int page_shift)
609121e6f32SNicholas Piggin {
610121e6f32SNicholas Piggin unsigned int i, nr = (end - addr) >> PAGE_SHIFT;
611121e6f32SNicholas Piggin
612121e6f32SNicholas Piggin WARN_ON(page_shift < PAGE_SHIFT);
613121e6f32SNicholas Piggin
614121e6f32SNicholas Piggin if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) ||
615121e6f32SNicholas Piggin page_shift == PAGE_SHIFT)
616121e6f32SNicholas Piggin return vmap_small_pages_range_noflush(addr, end, prot, pages);
617121e6f32SNicholas Piggin
618121e6f32SNicholas Piggin for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) {
619121e6f32SNicholas Piggin int err;
620121e6f32SNicholas Piggin
621121e6f32SNicholas Piggin err = vmap_range_noflush(addr, addr + (1UL << page_shift),
62208262ac5SMatthew Wilcox page_to_phys(pages[i]), prot,
623121e6f32SNicholas Piggin page_shift);
624121e6f32SNicholas Piggin if (err)
625121e6f32SNicholas Piggin return err;
626121e6f32SNicholas Piggin
627121e6f32SNicholas Piggin addr += 1UL << page_shift;
628121e6f32SNicholas Piggin }
629121e6f32SNicholas Piggin
630121e6f32SNicholas Piggin return 0;
631121e6f32SNicholas Piggin }
632121e6f32SNicholas Piggin
vmap_pages_range_noflush(unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,unsigned int page_shift)633b073d7f8SAlexander Potapenko int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
634b073d7f8SAlexander Potapenko pgprot_t prot, struct page **pages, unsigned int page_shift)
635b073d7f8SAlexander Potapenko {
63647ebd031SAlexander Potapenko int ret = kmsan_vmap_pages_range_noflush(addr, end, prot, pages,
63747ebd031SAlexander Potapenko page_shift);
63847ebd031SAlexander Potapenko
63947ebd031SAlexander Potapenko if (ret)
64047ebd031SAlexander Potapenko return ret;
641b073d7f8SAlexander Potapenko return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
642b073d7f8SAlexander Potapenko }
643b073d7f8SAlexander Potapenko
644b67177ecSNicholas Piggin /**
645b67177ecSNicholas Piggin * vmap_pages_range - map pages to a kernel virtual address
646b67177ecSNicholas Piggin * @addr: start of the VM area to map
647b67177ecSNicholas Piggin * @end: end of the VM area to map (non-inclusive)
648b67177ecSNicholas Piggin * @prot: page protection flags to use
649b67177ecSNicholas Piggin * @pages: pages to map (always PAGE_SIZE pages)
650b67177ecSNicholas Piggin * @page_shift: maximum shift that the pages may be mapped with, @pages must
651b67177ecSNicholas Piggin * be aligned and contiguous up to at least this shift.
652b67177ecSNicholas Piggin *
653b67177ecSNicholas Piggin * RETURNS:
654b67177ecSNicholas Piggin * 0 on success, -errno on failure.
655b67177ecSNicholas Piggin */
vmap_pages_range(unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,unsigned int page_shift)6560f9b6856SSuren Baghdasaryan int vmap_pages_range(unsigned long addr, unsigned long end,
657121e6f32SNicholas Piggin pgprot_t prot, struct page **pages, unsigned int page_shift)
658121e6f32SNicholas Piggin {
659121e6f32SNicholas Piggin int err;
660121e6f32SNicholas Piggin
661121e6f32SNicholas Piggin err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
662121e6f32SNicholas Piggin flush_cache_vmap(addr, end);
663121e6f32SNicholas Piggin return err;
664121e6f32SNicholas Piggin }
665121e6f32SNicholas Piggin
check_sparse_vm_area(struct vm_struct * area,unsigned long start,unsigned long end)666e6f79822SAlexei Starovoitov static int check_sparse_vm_area(struct vm_struct *area, unsigned long start,
667e6f79822SAlexei Starovoitov unsigned long end)
668e6f79822SAlexei Starovoitov {
669e6f79822SAlexei Starovoitov might_sleep();
670e6f79822SAlexei Starovoitov if (WARN_ON_ONCE(area->flags & VM_FLUSH_RESET_PERMS))
671e6f79822SAlexei Starovoitov return -EINVAL;
672e6f79822SAlexei Starovoitov if (WARN_ON_ONCE(area->flags & VM_NO_GUARD))
673e6f79822SAlexei Starovoitov return -EINVAL;
674e6f79822SAlexei Starovoitov if (WARN_ON_ONCE(!(area->flags & VM_SPARSE)))
675e6f79822SAlexei Starovoitov return -EINVAL;
676e6f79822SAlexei Starovoitov if ((end - start) >> PAGE_SHIFT > totalram_pages())
677e6f79822SAlexei Starovoitov return -E2BIG;
678e6f79822SAlexei Starovoitov if (start < (unsigned long)area->addr ||
679e6f79822SAlexei Starovoitov (void *)end > area->addr + get_vm_area_size(area))
680e6f79822SAlexei Starovoitov return -ERANGE;
681e6f79822SAlexei Starovoitov return 0;
682e6f79822SAlexei Starovoitov }
683e6f79822SAlexei Starovoitov
684e6f79822SAlexei Starovoitov /**
685e6f79822SAlexei Starovoitov * vm_area_map_pages - map pages inside given sparse vm_area
686e6f79822SAlexei Starovoitov * @area: vm_area
687e6f79822SAlexei Starovoitov * @start: start address inside vm_area
688e6f79822SAlexei Starovoitov * @end: end address inside vm_area
689e6f79822SAlexei Starovoitov * @pages: pages to map (always PAGE_SIZE pages)
690e6f79822SAlexei Starovoitov */
vm_area_map_pages(struct vm_struct * area,unsigned long start,unsigned long end,struct page ** pages)691e6f79822SAlexei Starovoitov int vm_area_map_pages(struct vm_struct *area, unsigned long start,
692e6f79822SAlexei Starovoitov unsigned long end, struct page **pages)
693e6f79822SAlexei Starovoitov {
694e6f79822SAlexei Starovoitov int err;
695e6f79822SAlexei Starovoitov
696e6f79822SAlexei Starovoitov err = check_sparse_vm_area(area, start, end);
697e6f79822SAlexei Starovoitov if (err)
698e6f79822SAlexei Starovoitov return err;
699e6f79822SAlexei Starovoitov
700e6f79822SAlexei Starovoitov return vmap_pages_range(start, end, PAGE_KERNEL, pages, PAGE_SHIFT);
701e6f79822SAlexei Starovoitov }
702e6f79822SAlexei Starovoitov
703e6f79822SAlexei Starovoitov /**
704e6f79822SAlexei Starovoitov * vm_area_unmap_pages - unmap pages inside given sparse vm_area
705e6f79822SAlexei Starovoitov * @area: vm_area
706e6f79822SAlexei Starovoitov * @start: start address inside vm_area
707e6f79822SAlexei Starovoitov * @end: end address inside vm_area
708e6f79822SAlexei Starovoitov */
vm_area_unmap_pages(struct vm_struct * area,unsigned long start,unsigned long end)709e6f79822SAlexei Starovoitov void vm_area_unmap_pages(struct vm_struct *area, unsigned long start,
710e6f79822SAlexei Starovoitov unsigned long end)
711e6f79822SAlexei Starovoitov {
712e6f79822SAlexei Starovoitov if (check_sparse_vm_area(area, start, end))
713e6f79822SAlexei Starovoitov return;
714e6f79822SAlexei Starovoitov
715e6f79822SAlexei Starovoitov vunmap_range(start, end);
716e6f79822SAlexei Starovoitov }
717e6f79822SAlexei Starovoitov
is_vmalloc_or_module_addr(const void * x)71881ac3ad9SKAMEZAWA Hiroyuki int is_vmalloc_or_module_addr(const void *x)
71973bdf0a6SLinus Torvalds {
72073bdf0a6SLinus Torvalds /*
721ab4f2ee1SRussell King * ARM, x86-64 and sparc64 put modules in a special place,
72273bdf0a6SLinus Torvalds * and fall back on vmalloc() if that fails. Others
72373bdf0a6SLinus Torvalds * just put it in the vmalloc space.
72473bdf0a6SLinus Torvalds */
7250105eaabSCong Wang #if defined(CONFIG_EXECMEM) && defined(MODULES_VADDR)
7264aff1dc4SAndrey Konovalov unsigned long addr = (unsigned long)kasan_reset_tag(x);
72773bdf0a6SLinus Torvalds if (addr >= MODULES_VADDR && addr < MODULES_END)
72873bdf0a6SLinus Torvalds return 1;
72973bdf0a6SLinus Torvalds #endif
73073bdf0a6SLinus Torvalds return is_vmalloc_addr(x);
73173bdf0a6SLinus Torvalds }
73201858469SDavid Howells EXPORT_SYMBOL_GPL(is_vmalloc_or_module_addr);
73373bdf0a6SLinus Torvalds
73448667e7aSChristoph Lameter /*
735c0eb315aSNicholas Piggin * Walk a vmap address to the struct page it maps. Huge vmap mappings will
736c0eb315aSNicholas Piggin * return the tail page that corresponds to the base page address, which
737c0eb315aSNicholas Piggin * matches small vmap mappings.
73848667e7aSChristoph Lameter */
vmalloc_to_page(const void * vmalloc_addr)739add688fbSmalc struct page *vmalloc_to_page(const void *vmalloc_addr)
74048667e7aSChristoph Lameter {
74148667e7aSChristoph Lameter unsigned long addr = (unsigned long) vmalloc_addr;
742add688fbSmalc struct page *page = NULL;
74348667e7aSChristoph Lameter pgd_t *pgd = pgd_offset_k(addr);
744c2febafcSKirill A. Shutemov p4d_t *p4d;
745c2febafcSKirill A. Shutemov pud_t *pud;
746c2febafcSKirill A. Shutemov pmd_t *pmd;
747c2febafcSKirill A. Shutemov pte_t *ptep, pte;
74848667e7aSChristoph Lameter
7497aa413deSIngo Molnar /*
7507aa413deSIngo Molnar * XXX we might need to change this if we add VIRTUAL_BUG_ON for
7517aa413deSIngo Molnar * architectures that do not vmalloc module space
7527aa413deSIngo Molnar */
75373bdf0a6SLinus Torvalds VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
75459ea7463SJiri Slaby
755c2febafcSKirill A. Shutemov if (pgd_none(*pgd))
756c2febafcSKirill A. Shutemov return NULL;
757c0eb315aSNicholas Piggin if (WARN_ON_ONCE(pgd_leaf(*pgd)))
758c0eb315aSNicholas Piggin return NULL; /* XXX: no allowance for huge pgd */
759c0eb315aSNicholas Piggin if (WARN_ON_ONCE(pgd_bad(*pgd)))
760c0eb315aSNicholas Piggin return NULL;
761c0eb315aSNicholas Piggin
762c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, addr);
763c2febafcSKirill A. Shutemov if (p4d_none(*p4d))
764c2febafcSKirill A. Shutemov return NULL;
765c0eb315aSNicholas Piggin if (p4d_leaf(*p4d))
766c0eb315aSNicholas Piggin return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT);
767c0eb315aSNicholas Piggin if (WARN_ON_ONCE(p4d_bad(*p4d)))
768c2febafcSKirill A. Shutemov return NULL;
769c0eb315aSNicholas Piggin
770c0eb315aSNicholas Piggin pud = pud_offset(p4d, addr);
771c0eb315aSNicholas Piggin if (pud_none(*pud))
772c0eb315aSNicholas Piggin return NULL;
773c0eb315aSNicholas Piggin if (pud_leaf(*pud))
774c0eb315aSNicholas Piggin return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
775c0eb315aSNicholas Piggin if (WARN_ON_ONCE(pud_bad(*pud)))
776c0eb315aSNicholas Piggin return NULL;
777c0eb315aSNicholas Piggin
778c2febafcSKirill A. Shutemov pmd = pmd_offset(pud, addr);
779c0eb315aSNicholas Piggin if (pmd_none(*pmd))
780c0eb315aSNicholas Piggin return NULL;
781c0eb315aSNicholas Piggin if (pmd_leaf(*pmd))
782c0eb315aSNicholas Piggin return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
783c0eb315aSNicholas Piggin if (WARN_ON_ONCE(pmd_bad(*pmd)))
784c2febafcSKirill A. Shutemov return NULL;
785db64fe02SNick Piggin
7860d1c81edSHugh Dickins ptep = pte_offset_kernel(pmd, addr);
787c33c7948SRyan Roberts pte = ptep_get(ptep);
78848667e7aSChristoph Lameter if (pte_present(pte))
789add688fbSmalc page = pte_page(pte);
790c0eb315aSNicholas Piggin
791add688fbSmalc return page;
792ece86e22SJianyu Zhan }
793ece86e22SJianyu Zhan EXPORT_SYMBOL(vmalloc_to_page);
794ece86e22SJianyu Zhan
795add688fbSmalc /*
796add688fbSmalc * Map a vmalloc()-space virtual address to the physical page frame number.
797add688fbSmalc */
vmalloc_to_pfn(const void * vmalloc_addr)798add688fbSmalc unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
799add688fbSmalc {
800add688fbSmalc return page_to_pfn(vmalloc_to_page(vmalloc_addr));
801add688fbSmalc }
802add688fbSmalc EXPORT_SYMBOL(vmalloc_to_pfn);
803add688fbSmalc
804db64fe02SNick Piggin
805db64fe02SNick Piggin /*** Global kva allocator ***/
806db64fe02SNick Piggin
807bb850f4dSUladzislau Rezki (Sony) #define DEBUG_AUGMENT_PROPAGATE_CHECK 0
808a6cf4e0fSUladzislau Rezki (Sony) #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
809bb850f4dSUladzislau Rezki (Sony)
810db64fe02SNick Piggin
811e36176beSUladzislau Rezki (Sony) static DEFINE_SPINLOCK(free_vmap_area_lock);
81268ad4a33SUladzislau Rezki (Sony) static bool vmap_initialized __read_mostly;
81389699605SNick Piggin
81468ad4a33SUladzislau Rezki (Sony) /*
81568ad4a33SUladzislau Rezki (Sony) * This kmem_cache is used for vmap_area objects. Instead of
81668ad4a33SUladzislau Rezki (Sony) * allocating from slab we reuse an object from this cache to
81768ad4a33SUladzislau Rezki (Sony) * make things faster. Especially in "no edge" splitting of
81868ad4a33SUladzislau Rezki (Sony) * free block.
81968ad4a33SUladzislau Rezki (Sony) */
82068ad4a33SUladzislau Rezki (Sony) static struct kmem_cache *vmap_area_cachep;
82189699605SNick Piggin
82268ad4a33SUladzislau Rezki (Sony) /*
82368ad4a33SUladzislau Rezki (Sony) * This linked list is used in pair with free_vmap_area_root.
82468ad4a33SUladzislau Rezki (Sony) * It gives O(1) access to prev/next to perform fast coalescing.
82568ad4a33SUladzislau Rezki (Sony) */
82668ad4a33SUladzislau Rezki (Sony) static LIST_HEAD(free_vmap_area_list);
82768ad4a33SUladzislau Rezki (Sony)
82868ad4a33SUladzislau Rezki (Sony) /*
82968ad4a33SUladzislau Rezki (Sony) * This augment red-black tree represents the free vmap space.
83068ad4a33SUladzislau Rezki (Sony) * All vmap_area objects in this tree are sorted by va->va_start
83168ad4a33SUladzislau Rezki (Sony) * address. It is used for allocation and merging when a vmap
83268ad4a33SUladzislau Rezki (Sony) * object is released.
83368ad4a33SUladzislau Rezki (Sony) *
83468ad4a33SUladzislau Rezki (Sony) * Each vmap_area node contains a maximum available free block
83568ad4a33SUladzislau Rezki (Sony) * of its sub-tree, right or left. Therefore it is possible to
83668ad4a33SUladzislau Rezki (Sony) * find a lowest match of free area.
83768ad4a33SUladzislau Rezki (Sony) */
83868ad4a33SUladzislau Rezki (Sony) static struct rb_root free_vmap_area_root = RB_ROOT;
83968ad4a33SUladzislau Rezki (Sony)
84082dd23e8SUladzislau Rezki (Sony) /*
84182dd23e8SUladzislau Rezki (Sony) * Preload a CPU with one object for "no edge" split case. The
84282dd23e8SUladzislau Rezki (Sony) * aim is to get rid of allocations from the atomic context, thus
84382dd23e8SUladzislau Rezki (Sony) * to use more permissive allocation masks.
84482dd23e8SUladzislau Rezki (Sony) */
84582dd23e8SUladzislau Rezki (Sony) static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node);
84682dd23e8SUladzislau Rezki (Sony)
847d0936029SUladzislau Rezki (Sony) /*
84815e02a39SUladzislau Rezki (Sony) * This structure defines a single, solid model where a list and
84915e02a39SUladzislau Rezki (Sony) * rb-tree are part of one entity protected by the lock. Nodes are
85015e02a39SUladzislau Rezki (Sony) * sorted in ascending order, thus for O(1) access to left/right
85115e02a39SUladzislau Rezki (Sony) * neighbors a list is used as well as for sequential traversal.
852d0936029SUladzislau Rezki (Sony) */
853d0936029SUladzislau Rezki (Sony) struct rb_list {
854d0936029SUladzislau Rezki (Sony) struct rb_root root;
855d0936029SUladzislau Rezki (Sony) struct list_head head;
856d0936029SUladzislau Rezki (Sony) spinlock_t lock;
857d0936029SUladzislau Rezki (Sony) };
858d0936029SUladzislau Rezki (Sony)
85915e02a39SUladzislau Rezki (Sony) /*
86015e02a39SUladzislau Rezki (Sony) * A fast size storage contains VAs up to 1M size. A pool consists
86115e02a39SUladzislau Rezki (Sony) * of linked between each other ready to go VAs of certain sizes.
86215e02a39SUladzislau Rezki (Sony) * An index in the pool-array corresponds to number of pages + 1.
86315e02a39SUladzislau Rezki (Sony) */
86415e02a39SUladzislau Rezki (Sony) #define MAX_VA_SIZE_PAGES 256
86515e02a39SUladzislau Rezki (Sony)
86672210662SUladzislau Rezki (Sony) struct vmap_pool {
86772210662SUladzislau Rezki (Sony) struct list_head head;
86872210662SUladzislau Rezki (Sony) unsigned long len;
86972210662SUladzislau Rezki (Sony) };
87072210662SUladzislau Rezki (Sony)
87172210662SUladzislau Rezki (Sony) /*
87215e02a39SUladzislau Rezki (Sony) * An effective vmap-node logic. Users make use of nodes instead
87315e02a39SUladzislau Rezki (Sony) * of a global heap. It allows to balance an access and mitigate
87415e02a39SUladzislau Rezki (Sony) * contention.
87572210662SUladzislau Rezki (Sony) */
876d0936029SUladzislau Rezki (Sony) static struct vmap_node {
87772210662SUladzislau Rezki (Sony) /* Simple size segregated storage. */
87872210662SUladzislau Rezki (Sony) struct vmap_pool pool[MAX_VA_SIZE_PAGES];
87972210662SUladzislau Rezki (Sony) spinlock_t pool_lock;
88072210662SUladzislau Rezki (Sony) bool skip_populate;
88172210662SUladzislau Rezki (Sony)
882d0936029SUladzislau Rezki (Sony) /* Bookkeeping data of this node. */
883d0936029SUladzislau Rezki (Sony) struct rb_list busy;
884282631cbSUladzislau Rezki (Sony) struct rb_list lazy;
885282631cbSUladzislau Rezki (Sony)
886282631cbSUladzislau Rezki (Sony) /*
887282631cbSUladzislau Rezki (Sony) * Ready-to-free areas.
888282631cbSUladzislau Rezki (Sony) */
889282631cbSUladzislau Rezki (Sony) struct list_head purge_list;
89072210662SUladzislau Rezki (Sony) struct work_struct purge_work;
89172210662SUladzislau Rezki (Sony) unsigned long nr_purged;
892d0936029SUladzislau Rezki (Sony) } single;
893d0936029SUladzislau Rezki (Sony)
89415e02a39SUladzislau Rezki (Sony) /*
89515e02a39SUladzislau Rezki (Sony) * Initial setup consists of one single node, i.e. a balancing
89615e02a39SUladzislau Rezki (Sony) * is fully disabled. Later on, after vmap is initialized these
89715e02a39SUladzislau Rezki (Sony) * parameters are updated based on a system capacity.
89815e02a39SUladzislau Rezki (Sony) */
899d0936029SUladzislau Rezki (Sony) static struct vmap_node *vmap_nodes = &single;
900d0936029SUladzislau Rezki (Sony) static __read_mostly unsigned int nr_vmap_nodes = 1;
901d0936029SUladzislau Rezki (Sony) static __read_mostly unsigned int vmap_zone_size = 1;
902d0936029SUladzislau Rezki (Sony)
903d0936029SUladzislau Rezki (Sony) static inline unsigned int
addr_to_node_id(unsigned long addr)904d0936029SUladzislau Rezki (Sony) addr_to_node_id(unsigned long addr)
905d0936029SUladzislau Rezki (Sony) {
906d0936029SUladzislau Rezki (Sony) return (addr / vmap_zone_size) % nr_vmap_nodes;
907d0936029SUladzislau Rezki (Sony) }
908d0936029SUladzislau Rezki (Sony)
909d0936029SUladzislau Rezki (Sony) static inline struct vmap_node *
addr_to_node(unsigned long addr)910d0936029SUladzislau Rezki (Sony) addr_to_node(unsigned long addr)
911d0936029SUladzislau Rezki (Sony) {
912d0936029SUladzislau Rezki (Sony) return &vmap_nodes[addr_to_node_id(addr)];
913d0936029SUladzislau Rezki (Sony) }
914d0936029SUladzislau Rezki (Sony)
91572210662SUladzislau Rezki (Sony) static inline struct vmap_node *
id_to_node(unsigned int id)91672210662SUladzislau Rezki (Sony) id_to_node(unsigned int id)
91772210662SUladzislau Rezki (Sony) {
91872210662SUladzislau Rezki (Sony) return &vmap_nodes[id % nr_vmap_nodes];
91972210662SUladzislau Rezki (Sony) }
92072210662SUladzislau Rezki (Sony)
92172210662SUladzislau Rezki (Sony) /*
92272210662SUladzislau Rezki (Sony) * We use the value 0 to represent "no node", that is why
92372210662SUladzislau Rezki (Sony) * an encoded value will be the node-id incremented by 1.
92472210662SUladzislau Rezki (Sony) * It is always greater then 0. A valid node_id which can
92572210662SUladzislau Rezki (Sony) * be encoded is [0:nr_vmap_nodes - 1]. If a passed node_id
92672210662SUladzislau Rezki (Sony) * is not valid 0 is returned.
92772210662SUladzislau Rezki (Sony) */
92872210662SUladzislau Rezki (Sony) static unsigned int
encode_vn_id(unsigned int node_id)92972210662SUladzislau Rezki (Sony) encode_vn_id(unsigned int node_id)
93072210662SUladzislau Rezki (Sony) {
93172210662SUladzislau Rezki (Sony) /* Can store U8_MAX [0:254] nodes. */
93272210662SUladzislau Rezki (Sony) if (node_id < nr_vmap_nodes)
93372210662SUladzislau Rezki (Sony) return (node_id + 1) << BITS_PER_BYTE;
93472210662SUladzislau Rezki (Sony)
93572210662SUladzislau Rezki (Sony) /* Warn and no node encoded. */
93672210662SUladzislau Rezki (Sony) WARN_ONCE(1, "Encode wrong node id (%u)\n", node_id);
93772210662SUladzislau Rezki (Sony) return 0;
93872210662SUladzislau Rezki (Sony) }
93972210662SUladzislau Rezki (Sony)
94072210662SUladzislau Rezki (Sony) /*
94172210662SUladzislau Rezki (Sony) * Returns an encoded node-id, the valid range is within
94272210662SUladzislau Rezki (Sony) * [0:nr_vmap_nodes-1] values. Otherwise nr_vmap_nodes is
94372210662SUladzislau Rezki (Sony) * returned if extracted data is wrong.
94472210662SUladzislau Rezki (Sony) */
94572210662SUladzislau Rezki (Sony) static unsigned int
decode_vn_id(unsigned int val)94672210662SUladzislau Rezki (Sony) decode_vn_id(unsigned int val)
94772210662SUladzislau Rezki (Sony) {
94872210662SUladzislau Rezki (Sony) unsigned int node_id = (val >> BITS_PER_BYTE) - 1;
94972210662SUladzislau Rezki (Sony)
95072210662SUladzislau Rezki (Sony) /* Can store U8_MAX [0:254] nodes. */
95172210662SUladzislau Rezki (Sony) if (node_id < nr_vmap_nodes)
95272210662SUladzislau Rezki (Sony) return node_id;
95372210662SUladzislau Rezki (Sony)
95472210662SUladzislau Rezki (Sony) /* If it was _not_ zero, warn. */
95572210662SUladzislau Rezki (Sony) WARN_ONCE(node_id != UINT_MAX,
95672210662SUladzislau Rezki (Sony) "Decode wrong node id (%d)\n", node_id);
95772210662SUladzislau Rezki (Sony)
95872210662SUladzislau Rezki (Sony) return nr_vmap_nodes;
95972210662SUladzislau Rezki (Sony) }
96072210662SUladzislau Rezki (Sony)
96172210662SUladzislau Rezki (Sony) static bool
is_vn_id_valid(unsigned int node_id)96272210662SUladzislau Rezki (Sony) is_vn_id_valid(unsigned int node_id)
96372210662SUladzislau Rezki (Sony) {
96472210662SUladzislau Rezki (Sony) if (node_id < nr_vmap_nodes)
96572210662SUladzislau Rezki (Sony) return true;
96672210662SUladzislau Rezki (Sony)
96772210662SUladzislau Rezki (Sony) return false;
96872210662SUladzislau Rezki (Sony) }
96972210662SUladzislau Rezki (Sony)
97068ad4a33SUladzislau Rezki (Sony) static __always_inline unsigned long
va_size(struct vmap_area * va)97168ad4a33SUladzislau Rezki (Sony) va_size(struct vmap_area *va)
97268ad4a33SUladzislau Rezki (Sony) {
97368ad4a33SUladzislau Rezki (Sony) return (va->va_end - va->va_start);
97468ad4a33SUladzislau Rezki (Sony) }
97568ad4a33SUladzislau Rezki (Sony)
97668ad4a33SUladzislau Rezki (Sony) static __always_inline unsigned long
get_subtree_max_size(struct rb_node * node)97768ad4a33SUladzislau Rezki (Sony) get_subtree_max_size(struct rb_node *node)
97868ad4a33SUladzislau Rezki (Sony) {
97968ad4a33SUladzislau Rezki (Sony) struct vmap_area *va;
98068ad4a33SUladzislau Rezki (Sony)
98168ad4a33SUladzislau Rezki (Sony) va = rb_entry_safe(node, struct vmap_area, rb_node);
98268ad4a33SUladzislau Rezki (Sony) return va ? va->subtree_max_size : 0;
98368ad4a33SUladzislau Rezki (Sony) }
98468ad4a33SUladzislau Rezki (Sony)
985315cc066SMichel Lespinasse RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb,
986315cc066SMichel Lespinasse struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size)
98768ad4a33SUladzislau Rezki (Sony)
98877e50af0SThomas Gleixner static void reclaim_and_purge_vmap_areas(void);
98968ad4a33SUladzislau Rezki (Sony) static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
990690467c8SUladzislau Rezki (Sony) static void drain_vmap_area_work(struct work_struct *work);
991690467c8SUladzislau Rezki (Sony) static DECLARE_WORK(drain_vmap_work, drain_vmap_area_work);
992db64fe02SNick Piggin
99397105f0aSRoman Gushchin static atomic_long_t nr_vmalloc_pages;
99497105f0aSRoman Gushchin
vmalloc_nr_pages(void)99597105f0aSRoman Gushchin unsigned long vmalloc_nr_pages(void)
99697105f0aSRoman Gushchin {
99797105f0aSRoman Gushchin return atomic_long_read(&nr_vmalloc_pages);
99897105f0aSRoman Gushchin }
99997105f0aSRoman Gushchin
__find_vmap_area(unsigned long addr,struct rb_root * root)1000fc2c2269SUladzislau Rezki (Sony) static struct vmap_area *__find_vmap_area(unsigned long addr, struct rb_root *root)
1001fc2c2269SUladzislau Rezki (Sony) {
1002fc2c2269SUladzislau Rezki (Sony) struct rb_node *n = root->rb_node;
1003fc2c2269SUladzislau Rezki (Sony)
1004fc2c2269SUladzislau Rezki (Sony) addr = (unsigned long)kasan_reset_tag((void *)addr);
1005fc2c2269SUladzislau Rezki (Sony)
1006fc2c2269SUladzislau Rezki (Sony) while (n) {
1007fc2c2269SUladzislau Rezki (Sony) struct vmap_area *va;
1008fc2c2269SUladzislau Rezki (Sony)
1009fc2c2269SUladzislau Rezki (Sony) va = rb_entry(n, struct vmap_area, rb_node);
1010fc2c2269SUladzislau Rezki (Sony) if (addr < va->va_start)
1011fc2c2269SUladzislau Rezki (Sony) n = n->rb_left;
1012fc2c2269SUladzislau Rezki (Sony) else if (addr >= va->va_end)
1013fc2c2269SUladzislau Rezki (Sony) n = n->rb_right;
1014fc2c2269SUladzislau Rezki (Sony) else
1015fc2c2269SUladzislau Rezki (Sony) return va;
1016fc2c2269SUladzislau Rezki (Sony) }
1017fc2c2269SUladzislau Rezki (Sony)
1018fc2c2269SUladzislau Rezki (Sony) return NULL;
1019fc2c2269SUladzislau Rezki (Sony) }
1020fc2c2269SUladzislau Rezki (Sony)
1021153090f2SBaoquan He /* Look up the first VA which satisfies addr < va_end, NULL if none. */
1022d0936029SUladzislau Rezki (Sony) static struct vmap_area *
__find_vmap_area_exceed_addr(unsigned long addr,struct rb_root * root)102353becf32SUladzislau Rezki (Sony) __find_vmap_area_exceed_addr(unsigned long addr, struct rb_root *root)
1024f181234aSChen Wandun {
1025f181234aSChen Wandun struct vmap_area *va = NULL;
1026d0936029SUladzislau Rezki (Sony) struct rb_node *n = root->rb_node;
1027f181234aSChen Wandun
10284aff1dc4SAndrey Konovalov addr = (unsigned long)kasan_reset_tag((void *)addr);
10294aff1dc4SAndrey Konovalov
1030f181234aSChen Wandun while (n) {
1031f181234aSChen Wandun struct vmap_area *tmp;
1032f181234aSChen Wandun
1033f181234aSChen Wandun tmp = rb_entry(n, struct vmap_area, rb_node);
1034f181234aSChen Wandun if (tmp->va_end > addr) {
1035f181234aSChen Wandun va = tmp;
1036f181234aSChen Wandun if (tmp->va_start <= addr)
1037f181234aSChen Wandun break;
1038f181234aSChen Wandun
1039f181234aSChen Wandun n = n->rb_left;
1040f181234aSChen Wandun } else
1041f181234aSChen Wandun n = n->rb_right;
1042f181234aSChen Wandun }
1043f181234aSChen Wandun
1044f181234aSChen Wandun return va;
1045f181234aSChen Wandun }
1046f181234aSChen Wandun
104753becf32SUladzislau Rezki (Sony) /*
104853becf32SUladzislau Rezki (Sony) * Returns a node where a first VA, that satisfies addr < va_end, resides.
104953becf32SUladzislau Rezki (Sony) * If success, a node is locked. A user is responsible to unlock it when a
105053becf32SUladzislau Rezki (Sony) * VA is no longer needed to be accessed.
105153becf32SUladzislau Rezki (Sony) *
105253becf32SUladzislau Rezki (Sony) * Returns NULL if nothing found.
105353becf32SUladzislau Rezki (Sony) */
105453becf32SUladzislau Rezki (Sony) static struct vmap_node *
find_vmap_area_exceed_addr_lock(unsigned long addr,struct vmap_area ** va)105553becf32SUladzislau Rezki (Sony) find_vmap_area_exceed_addr_lock(unsigned long addr, struct vmap_area **va)
105653becf32SUladzislau Rezki (Sony) {
1057fc2c2269SUladzislau Rezki (Sony) unsigned long va_start_lowest;
1058fc2c2269SUladzislau Rezki (Sony) struct vmap_node *vn;
105953becf32SUladzislau Rezki (Sony) int i;
106053becf32SUladzislau Rezki (Sony)
1061fc2c2269SUladzislau Rezki (Sony) repeat:
1062fc2c2269SUladzislau Rezki (Sony) for (i = 0, va_start_lowest = 0; i < nr_vmap_nodes; i++) {
106353becf32SUladzislau Rezki (Sony) vn = &vmap_nodes[i];
106453becf32SUladzislau Rezki (Sony)
106553becf32SUladzislau Rezki (Sony) spin_lock(&vn->busy.lock);
1066fc2c2269SUladzislau Rezki (Sony) *va = __find_vmap_area_exceed_addr(addr, &vn->busy.root);
106753becf32SUladzislau Rezki (Sony)
1068fc2c2269SUladzislau Rezki (Sony) if (*va)
1069fc2c2269SUladzislau Rezki (Sony) if (!va_start_lowest || (*va)->va_start < va_start_lowest)
1070fc2c2269SUladzislau Rezki (Sony) va_start_lowest = (*va)->va_start;
107153becf32SUladzislau Rezki (Sony) spin_unlock(&vn->busy.lock);
107253becf32SUladzislau Rezki (Sony) }
107353becf32SUladzislau Rezki (Sony)
1074fc2c2269SUladzislau Rezki (Sony) /*
1075fc2c2269SUladzislau Rezki (Sony) * Check if found VA exists, it might have gone away. In this case we
1076fc2c2269SUladzislau Rezki (Sony) * repeat the search because a VA has been removed concurrently and we
1077fc2c2269SUladzislau Rezki (Sony) * need to proceed to the next one, which is a rare case.
1078fc2c2269SUladzislau Rezki (Sony) */
1079fc2c2269SUladzislau Rezki (Sony) if (va_start_lowest) {
1080fc2c2269SUladzislau Rezki (Sony) vn = addr_to_node(va_start_lowest);
108153becf32SUladzislau Rezki (Sony)
1082fc2c2269SUladzislau Rezki (Sony) spin_lock(&vn->busy.lock);
1083fc2c2269SUladzislau Rezki (Sony) *va = __find_vmap_area(va_start_lowest, &vn->busy.root);
1084db64fe02SNick Piggin
1085fc2c2269SUladzislau Rezki (Sony) if (*va)
1086fc2c2269SUladzislau Rezki (Sony) return vn;
10874aff1dc4SAndrey Konovalov
1088fc2c2269SUladzislau Rezki (Sony) spin_unlock(&vn->busy.lock);
1089fc2c2269SUladzislau Rezki (Sony) goto repeat;
1090db64fe02SNick Piggin }
1091db64fe02SNick Piggin
1092db64fe02SNick Piggin return NULL;
1093db64fe02SNick Piggin }
1094db64fe02SNick Piggin
109568ad4a33SUladzislau Rezki (Sony) /*
109668ad4a33SUladzislau Rezki (Sony) * This function returns back addresses of parent node
109768ad4a33SUladzislau Rezki (Sony) * and its left or right link for further processing.
10989c801f61SUladzislau Rezki (Sony) *
10999c801f61SUladzislau Rezki (Sony) * Otherwise NULL is returned. In that case all further
11009c801f61SUladzislau Rezki (Sony) * steps regarding inserting of conflicting overlap range
11019c801f61SUladzislau Rezki (Sony) * have to be declined and actually considered as a bug.
110268ad4a33SUladzislau Rezki (Sony) */
110368ad4a33SUladzislau Rezki (Sony) static __always_inline struct rb_node **
find_va_links(struct vmap_area * va,struct rb_root * root,struct rb_node * from,struct rb_node ** parent)110468ad4a33SUladzislau Rezki (Sony) find_va_links(struct vmap_area *va,
110568ad4a33SUladzislau Rezki (Sony) struct rb_root *root, struct rb_node *from,
110668ad4a33SUladzislau Rezki (Sony) struct rb_node **parent)
1107db64fe02SNick Piggin {
1108170168d0SNamhyung Kim struct vmap_area *tmp_va;
110968ad4a33SUladzislau Rezki (Sony) struct rb_node **link;
1110db64fe02SNick Piggin
111168ad4a33SUladzislau Rezki (Sony) if (root) {
111268ad4a33SUladzislau Rezki (Sony) link = &root->rb_node;
111368ad4a33SUladzislau Rezki (Sony) if (unlikely(!*link)) {
111468ad4a33SUladzislau Rezki (Sony) *parent = NULL;
111568ad4a33SUladzislau Rezki (Sony) return link;
111668ad4a33SUladzislau Rezki (Sony) }
111768ad4a33SUladzislau Rezki (Sony) } else {
111868ad4a33SUladzislau Rezki (Sony) link = &from;
111968ad4a33SUladzislau Rezki (Sony) }
112068ad4a33SUladzislau Rezki (Sony)
112168ad4a33SUladzislau Rezki (Sony) /*
112268ad4a33SUladzislau Rezki (Sony) * Go to the bottom of the tree. When we hit the last point
112368ad4a33SUladzislau Rezki (Sony) * we end up with parent rb_node and correct direction, i name
112468ad4a33SUladzislau Rezki (Sony) * it link, where the new va->rb_node will be attached to.
112568ad4a33SUladzislau Rezki (Sony) */
112668ad4a33SUladzislau Rezki (Sony) do {
112768ad4a33SUladzislau Rezki (Sony) tmp_va = rb_entry(*link, struct vmap_area, rb_node);
112868ad4a33SUladzislau Rezki (Sony)
112968ad4a33SUladzislau Rezki (Sony) /*
113068ad4a33SUladzislau Rezki (Sony) * During the traversal we also do some sanity check.
113168ad4a33SUladzislau Rezki (Sony) * Trigger the BUG() if there are sides(left/right)
113268ad4a33SUladzislau Rezki (Sony) * or full overlaps.
113368ad4a33SUladzislau Rezki (Sony) */
1134753df96bSBaoquan He if (va->va_end <= tmp_va->va_start)
113568ad4a33SUladzislau Rezki (Sony) link = &(*link)->rb_left;
1136753df96bSBaoquan He else if (va->va_start >= tmp_va->va_end)
113768ad4a33SUladzislau Rezki (Sony) link = &(*link)->rb_right;
11389c801f61SUladzislau Rezki (Sony) else {
11399c801f61SUladzislau Rezki (Sony) WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n",
11409c801f61SUladzislau Rezki (Sony) va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end);
11419c801f61SUladzislau Rezki (Sony)
11429c801f61SUladzislau Rezki (Sony) return NULL;
11439c801f61SUladzislau Rezki (Sony) }
114468ad4a33SUladzislau Rezki (Sony) } while (*link);
114568ad4a33SUladzislau Rezki (Sony)
114668ad4a33SUladzislau Rezki (Sony) *parent = &tmp_va->rb_node;
114768ad4a33SUladzislau Rezki (Sony) return link;
1148db64fe02SNick Piggin }
1149db64fe02SNick Piggin
115068ad4a33SUladzislau Rezki (Sony) static __always_inline struct list_head *
get_va_next_sibling(struct rb_node * parent,struct rb_node ** link)115168ad4a33SUladzislau Rezki (Sony) get_va_next_sibling(struct rb_node *parent, struct rb_node **link)
115268ad4a33SUladzislau Rezki (Sony) {
115368ad4a33SUladzislau Rezki (Sony) struct list_head *list;
1154db64fe02SNick Piggin
115568ad4a33SUladzislau Rezki (Sony) if (unlikely(!parent))
115668ad4a33SUladzislau Rezki (Sony) /*
115768ad4a33SUladzislau Rezki (Sony) * The red-black tree where we try to find VA neighbors
115868ad4a33SUladzislau Rezki (Sony) * before merging or inserting is empty, i.e. it means
115968ad4a33SUladzislau Rezki (Sony) * there is no free vmap space. Normally it does not
116068ad4a33SUladzislau Rezki (Sony) * happen but we handle this case anyway.
116168ad4a33SUladzislau Rezki (Sony) */
116268ad4a33SUladzislau Rezki (Sony) return NULL;
116368ad4a33SUladzislau Rezki (Sony)
116468ad4a33SUladzislau Rezki (Sony) list = &rb_entry(parent, struct vmap_area, rb_node)->list;
116568ad4a33SUladzislau Rezki (Sony) return (&parent->rb_right == link ? list->next : list);
1166db64fe02SNick Piggin }
1167db64fe02SNick Piggin
116868ad4a33SUladzislau Rezki (Sony) static __always_inline void
__link_va(struct vmap_area * va,struct rb_root * root,struct rb_node * parent,struct rb_node ** link,struct list_head * head,bool augment)11698eb510dbSUladzislau Rezki (Sony) __link_va(struct vmap_area *va, struct rb_root *root,
11708eb510dbSUladzislau Rezki (Sony) struct rb_node *parent, struct rb_node **link,
11718eb510dbSUladzislau Rezki (Sony) struct list_head *head, bool augment)
117268ad4a33SUladzislau Rezki (Sony) {
117368ad4a33SUladzislau Rezki (Sony) /*
117468ad4a33SUladzislau Rezki (Sony) * VA is still not in the list, but we can
117568ad4a33SUladzislau Rezki (Sony) * identify its future previous list_head node.
117668ad4a33SUladzislau Rezki (Sony) */
117768ad4a33SUladzislau Rezki (Sony) if (likely(parent)) {
117868ad4a33SUladzislau Rezki (Sony) head = &rb_entry(parent, struct vmap_area, rb_node)->list;
117968ad4a33SUladzislau Rezki (Sony) if (&parent->rb_right != link)
118068ad4a33SUladzislau Rezki (Sony) head = head->prev;
118168ad4a33SUladzislau Rezki (Sony) }
1182db64fe02SNick Piggin
118368ad4a33SUladzislau Rezki (Sony) /* Insert to the rb-tree */
118468ad4a33SUladzislau Rezki (Sony) rb_link_node(&va->rb_node, parent, link);
11858eb510dbSUladzislau Rezki (Sony) if (augment) {
118668ad4a33SUladzislau Rezki (Sony) /*
118768ad4a33SUladzislau Rezki (Sony) * Some explanation here. Just perform simple insertion
118868ad4a33SUladzislau Rezki (Sony) * to the tree. We do not set va->subtree_max_size to
118968ad4a33SUladzislau Rezki (Sony) * its current size before calling rb_insert_augmented().
1190153090f2SBaoquan He * It is because we populate the tree from the bottom
119168ad4a33SUladzislau Rezki (Sony) * to parent levels when the node _is_ in the tree.
119268ad4a33SUladzislau Rezki (Sony) *
119368ad4a33SUladzislau Rezki (Sony) * Therefore we set subtree_max_size to zero after insertion,
119468ad4a33SUladzislau Rezki (Sony) * to let __augment_tree_propagate_from() puts everything to
119568ad4a33SUladzislau Rezki (Sony) * the correct order later on.
119668ad4a33SUladzislau Rezki (Sony) */
119768ad4a33SUladzislau Rezki (Sony) rb_insert_augmented(&va->rb_node,
119868ad4a33SUladzislau Rezki (Sony) root, &free_vmap_area_rb_augment_cb);
119968ad4a33SUladzislau Rezki (Sony) va->subtree_max_size = 0;
120068ad4a33SUladzislau Rezki (Sony) } else {
120168ad4a33SUladzislau Rezki (Sony) rb_insert_color(&va->rb_node, root);
120268ad4a33SUladzislau Rezki (Sony) }
120368ad4a33SUladzislau Rezki (Sony)
120468ad4a33SUladzislau Rezki (Sony) /* Address-sort this list */
120568ad4a33SUladzislau Rezki (Sony) list_add(&va->list, head);
120668ad4a33SUladzislau Rezki (Sony) }
120768ad4a33SUladzislau Rezki (Sony)
120868ad4a33SUladzislau Rezki (Sony) static __always_inline void
link_va(struct vmap_area * va,struct rb_root * root,struct rb_node * parent,struct rb_node ** link,struct list_head * head)12098eb510dbSUladzislau Rezki (Sony) link_va(struct vmap_area *va, struct rb_root *root,
12108eb510dbSUladzislau Rezki (Sony) struct rb_node *parent, struct rb_node **link,
12118eb510dbSUladzislau Rezki (Sony) struct list_head *head)
12128eb510dbSUladzislau Rezki (Sony) {
12138eb510dbSUladzislau Rezki (Sony) __link_va(va, root, parent, link, head, false);
12148eb510dbSUladzislau Rezki (Sony) }
12158eb510dbSUladzislau Rezki (Sony)
12168eb510dbSUladzislau Rezki (Sony) static __always_inline void
link_va_augment(struct vmap_area * va,struct rb_root * root,struct rb_node * parent,struct rb_node ** link,struct list_head * head)12178eb510dbSUladzislau Rezki (Sony) link_va_augment(struct vmap_area *va, struct rb_root *root,
12188eb510dbSUladzislau Rezki (Sony) struct rb_node *parent, struct rb_node **link,
12198eb510dbSUladzislau Rezki (Sony) struct list_head *head)
12208eb510dbSUladzislau Rezki (Sony) {
12218eb510dbSUladzislau Rezki (Sony) __link_va(va, root, parent, link, head, true);
12228eb510dbSUladzislau Rezki (Sony) }
12238eb510dbSUladzislau Rezki (Sony)
12248eb510dbSUladzislau Rezki (Sony) static __always_inline void
__unlink_va(struct vmap_area * va,struct rb_root * root,bool augment)12258eb510dbSUladzislau Rezki (Sony) __unlink_va(struct vmap_area *va, struct rb_root *root, bool augment)
122668ad4a33SUladzislau Rezki (Sony) {
1227460e42d1SUladzislau Rezki (Sony) if (WARN_ON(RB_EMPTY_NODE(&va->rb_node)))
1228460e42d1SUladzislau Rezki (Sony) return;
1229460e42d1SUladzislau Rezki (Sony)
12308eb510dbSUladzislau Rezki (Sony) if (augment)
123168ad4a33SUladzislau Rezki (Sony) rb_erase_augmented(&va->rb_node,
123268ad4a33SUladzislau Rezki (Sony) root, &free_vmap_area_rb_augment_cb);
123368ad4a33SUladzislau Rezki (Sony) else
123468ad4a33SUladzislau Rezki (Sony) rb_erase(&va->rb_node, root);
123568ad4a33SUladzislau Rezki (Sony)
12365d7a7c54SUladzislau Rezki (Sony) list_del_init(&va->list);
123768ad4a33SUladzislau Rezki (Sony) RB_CLEAR_NODE(&va->rb_node);
123868ad4a33SUladzislau Rezki (Sony) }
123968ad4a33SUladzislau Rezki (Sony)
12408eb510dbSUladzislau Rezki (Sony) static __always_inline void
unlink_va(struct vmap_area * va,struct rb_root * root)12418eb510dbSUladzislau Rezki (Sony) unlink_va(struct vmap_area *va, struct rb_root *root)
12428eb510dbSUladzislau Rezki (Sony) {
12438eb510dbSUladzislau Rezki (Sony) __unlink_va(va, root, false);
12448eb510dbSUladzislau Rezki (Sony) }
12458eb510dbSUladzislau Rezki (Sony)
12468eb510dbSUladzislau Rezki (Sony) static __always_inline void
unlink_va_augment(struct vmap_area * va,struct rb_root * root)12478eb510dbSUladzislau Rezki (Sony) unlink_va_augment(struct vmap_area *va, struct rb_root *root)
12488eb510dbSUladzislau Rezki (Sony) {
12498eb510dbSUladzislau Rezki (Sony) __unlink_va(va, root, true);
12508eb510dbSUladzislau Rezki (Sony) }
12518eb510dbSUladzislau Rezki (Sony)
1252bb850f4dSUladzislau Rezki (Sony) #if DEBUG_AUGMENT_PROPAGATE_CHECK
1253c3385e84SJiapeng Chong /*
1254c3385e84SJiapeng Chong * Gets called when remove the node and rotate.
1255c3385e84SJiapeng Chong */
1256c3385e84SJiapeng Chong static __always_inline unsigned long
compute_subtree_max_size(struct vmap_area * va)1257c3385e84SJiapeng Chong compute_subtree_max_size(struct vmap_area *va)
1258c3385e84SJiapeng Chong {
1259c3385e84SJiapeng Chong return max3(va_size(va),
1260c3385e84SJiapeng Chong get_subtree_max_size(va->rb_node.rb_left),
1261c3385e84SJiapeng Chong get_subtree_max_size(va->rb_node.rb_right));
1262c3385e84SJiapeng Chong }
1263c3385e84SJiapeng Chong
1264bb850f4dSUladzislau Rezki (Sony) static void
augment_tree_propagate_check(void)1265da27c9edSUladzislau Rezki (Sony) augment_tree_propagate_check(void)
1266bb850f4dSUladzislau Rezki (Sony) {
1267bb850f4dSUladzislau Rezki (Sony) struct vmap_area *va;
1268da27c9edSUladzislau Rezki (Sony) unsigned long computed_size;
1269bb850f4dSUladzislau Rezki (Sony)
1270da27c9edSUladzislau Rezki (Sony) list_for_each_entry(va, &free_vmap_area_list, list) {
1271da27c9edSUladzislau Rezki (Sony) computed_size = compute_subtree_max_size(va);
1272da27c9edSUladzislau Rezki (Sony) if (computed_size != va->subtree_max_size)
1273bb850f4dSUladzislau Rezki (Sony) pr_emerg("tree is corrupted: %lu, %lu\n",
1274bb850f4dSUladzislau Rezki (Sony) va_size(va), va->subtree_max_size);
1275bb850f4dSUladzislau Rezki (Sony) }
1276bb850f4dSUladzislau Rezki (Sony) }
1277bb850f4dSUladzislau Rezki (Sony) #endif
1278bb850f4dSUladzislau Rezki (Sony)
127968ad4a33SUladzislau Rezki (Sony) /*
128068ad4a33SUladzislau Rezki (Sony) * This function populates subtree_max_size from bottom to upper
128168ad4a33SUladzislau Rezki (Sony) * levels starting from VA point. The propagation must be done
128268ad4a33SUladzislau Rezki (Sony) * when VA size is modified by changing its va_start/va_end. Or
128368ad4a33SUladzislau Rezki (Sony) * in case of newly inserting of VA to the tree.
128468ad4a33SUladzislau Rezki (Sony) *
128568ad4a33SUladzislau Rezki (Sony) * It means that __augment_tree_propagate_from() must be called:
128668ad4a33SUladzislau Rezki (Sony) * - After VA has been inserted to the tree(free path);
128768ad4a33SUladzislau Rezki (Sony) * - After VA has been shrunk(allocation path);
128868ad4a33SUladzislau Rezki (Sony) * - After VA has been increased(merging path).
128968ad4a33SUladzislau Rezki (Sony) *
129068ad4a33SUladzislau Rezki (Sony) * Please note that, it does not mean that upper parent nodes
129168ad4a33SUladzislau Rezki (Sony) * and their subtree_max_size are recalculated all the time up
129268ad4a33SUladzislau Rezki (Sony) * to the root node.
129368ad4a33SUladzislau Rezki (Sony) *
129468ad4a33SUladzislau Rezki (Sony) * 4--8
129568ad4a33SUladzislau Rezki (Sony) * /\
129668ad4a33SUladzislau Rezki (Sony) * / \
129768ad4a33SUladzislau Rezki (Sony) * / \
129868ad4a33SUladzislau Rezki (Sony) * 2--2 8--8
129968ad4a33SUladzislau Rezki (Sony) *
130068ad4a33SUladzislau Rezki (Sony) * For example if we modify the node 4, shrinking it to 2, then
130168ad4a33SUladzislau Rezki (Sony) * no any modification is required. If we shrink the node 2 to 1
130268ad4a33SUladzislau Rezki (Sony) * its subtree_max_size is updated only, and set to 1. If we shrink
130368ad4a33SUladzislau Rezki (Sony) * the node 8 to 6, then its subtree_max_size is set to 6 and parent
130468ad4a33SUladzislau Rezki (Sony) * node becomes 4--6.
130568ad4a33SUladzislau Rezki (Sony) */
130668ad4a33SUladzislau Rezki (Sony) static __always_inline void
augment_tree_propagate_from(struct vmap_area * va)130768ad4a33SUladzislau Rezki (Sony) augment_tree_propagate_from(struct vmap_area *va)
130868ad4a33SUladzislau Rezki (Sony) {
130968ad4a33SUladzislau Rezki (Sony) /*
131015ae144fSUladzislau Rezki (Sony) * Populate the tree from bottom towards the root until
131115ae144fSUladzislau Rezki (Sony) * the calculated maximum available size of checked node
131215ae144fSUladzislau Rezki (Sony) * is equal to its current one.
131368ad4a33SUladzislau Rezki (Sony) */
131415ae144fSUladzislau Rezki (Sony) free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL);
1315bb850f4dSUladzislau Rezki (Sony)
1316bb850f4dSUladzislau Rezki (Sony) #if DEBUG_AUGMENT_PROPAGATE_CHECK
1317da27c9edSUladzislau Rezki (Sony) augment_tree_propagate_check();
1318bb850f4dSUladzislau Rezki (Sony) #endif
131968ad4a33SUladzislau Rezki (Sony) }
132068ad4a33SUladzislau Rezki (Sony)
132168ad4a33SUladzislau Rezki (Sony) static void
insert_vmap_area(struct vmap_area * va,struct rb_root * root,struct list_head * head)132268ad4a33SUladzislau Rezki (Sony) insert_vmap_area(struct vmap_area *va,
132368ad4a33SUladzislau Rezki (Sony) struct rb_root *root, struct list_head *head)
132468ad4a33SUladzislau Rezki (Sony) {
132568ad4a33SUladzislau Rezki (Sony) struct rb_node **link;
132668ad4a33SUladzislau Rezki (Sony) struct rb_node *parent;
132768ad4a33SUladzislau Rezki (Sony)
132868ad4a33SUladzislau Rezki (Sony) link = find_va_links(va, root, NULL, &parent);
13299c801f61SUladzislau Rezki (Sony) if (link)
133068ad4a33SUladzislau Rezki (Sony) link_va(va, root, parent, link, head);
133168ad4a33SUladzislau Rezki (Sony) }
133268ad4a33SUladzislau Rezki (Sony)
133368ad4a33SUladzislau Rezki (Sony) static void
insert_vmap_area_augment(struct vmap_area * va,struct rb_node * from,struct rb_root * root,struct list_head * head)133468ad4a33SUladzislau Rezki (Sony) insert_vmap_area_augment(struct vmap_area *va,
133568ad4a33SUladzislau Rezki (Sony) struct rb_node *from, struct rb_root *root,
133668ad4a33SUladzislau Rezki (Sony) struct list_head *head)
133768ad4a33SUladzislau Rezki (Sony) {
133868ad4a33SUladzislau Rezki (Sony) struct rb_node **link;
133968ad4a33SUladzislau Rezki (Sony) struct rb_node *parent;
134068ad4a33SUladzislau Rezki (Sony)
134168ad4a33SUladzislau Rezki (Sony) if (from)
134268ad4a33SUladzislau Rezki (Sony) link = find_va_links(va, NULL, from, &parent);
134368ad4a33SUladzislau Rezki (Sony) else
134468ad4a33SUladzislau Rezki (Sony) link = find_va_links(va, root, NULL, &parent);
134568ad4a33SUladzislau Rezki (Sony)
13469c801f61SUladzislau Rezki (Sony) if (link) {
13478eb510dbSUladzislau Rezki (Sony) link_va_augment(va, root, parent, link, head);
134868ad4a33SUladzislau Rezki (Sony) augment_tree_propagate_from(va);
134968ad4a33SUladzislau Rezki (Sony) }
13509c801f61SUladzislau Rezki (Sony) }
135168ad4a33SUladzislau Rezki (Sony)
135268ad4a33SUladzislau Rezki (Sony) /*
135368ad4a33SUladzislau Rezki (Sony) * Merge de-allocated chunk of VA memory with previous
135468ad4a33SUladzislau Rezki (Sony) * and next free blocks. If coalesce is not done a new
135568ad4a33SUladzislau Rezki (Sony) * free area is inserted. If VA has been merged, it is
135668ad4a33SUladzislau Rezki (Sony) * freed.
13579c801f61SUladzislau Rezki (Sony) *
13589c801f61SUladzislau Rezki (Sony) * Please note, it can return NULL in case of overlap
13599c801f61SUladzislau Rezki (Sony) * ranges, followed by WARN() report. Despite it is a
13609c801f61SUladzislau Rezki (Sony) * buggy behaviour, a system can be alive and keep
13619c801f61SUladzislau Rezki (Sony) * ongoing.
136268ad4a33SUladzislau Rezki (Sony) */
13633c5c3cfbSDaniel Axtens static __always_inline struct vmap_area *
__merge_or_add_vmap_area(struct vmap_area * va,struct rb_root * root,struct list_head * head,bool augment)13648eb510dbSUladzislau Rezki (Sony) __merge_or_add_vmap_area(struct vmap_area *va,
13658eb510dbSUladzislau Rezki (Sony) struct rb_root *root, struct list_head *head, bool augment)
136668ad4a33SUladzislau Rezki (Sony) {
136768ad4a33SUladzislau Rezki (Sony) struct vmap_area *sibling;
136868ad4a33SUladzislau Rezki (Sony) struct list_head *next;
136968ad4a33SUladzislau Rezki (Sony) struct rb_node **link;
137068ad4a33SUladzislau Rezki (Sony) struct rb_node *parent;
137168ad4a33SUladzislau Rezki (Sony) bool merged = false;
137268ad4a33SUladzislau Rezki (Sony)
137368ad4a33SUladzislau Rezki (Sony) /*
137468ad4a33SUladzislau Rezki (Sony) * Find a place in the tree where VA potentially will be
137568ad4a33SUladzislau Rezki (Sony) * inserted, unless it is merged with its sibling/siblings.
137668ad4a33SUladzislau Rezki (Sony) */
137768ad4a33SUladzislau Rezki (Sony) link = find_va_links(va, root, NULL, &parent);
13789c801f61SUladzislau Rezki (Sony) if (!link)
13799c801f61SUladzislau Rezki (Sony) return NULL;
138068ad4a33SUladzislau Rezki (Sony)
138168ad4a33SUladzislau Rezki (Sony) /*
138268ad4a33SUladzislau Rezki (Sony) * Get next node of VA to check if merging can be done.
138368ad4a33SUladzislau Rezki (Sony) */
138468ad4a33SUladzislau Rezki (Sony) next = get_va_next_sibling(parent, link);
138568ad4a33SUladzislau Rezki (Sony) if (unlikely(next == NULL))
138668ad4a33SUladzislau Rezki (Sony) goto insert;
138768ad4a33SUladzislau Rezki (Sony)
138868ad4a33SUladzislau Rezki (Sony) /*
138968ad4a33SUladzislau Rezki (Sony) * start end
139068ad4a33SUladzislau Rezki (Sony) * | |
139168ad4a33SUladzislau Rezki (Sony) * |<------VA------>|<-----Next----->|
139268ad4a33SUladzislau Rezki (Sony) * | |
139368ad4a33SUladzislau Rezki (Sony) * start end
139468ad4a33SUladzislau Rezki (Sony) */
139568ad4a33SUladzislau Rezki (Sony) if (next != head) {
139668ad4a33SUladzislau Rezki (Sony) sibling = list_entry(next, struct vmap_area, list);
139768ad4a33SUladzislau Rezki (Sony) if (sibling->va_start == va->va_end) {
139868ad4a33SUladzislau Rezki (Sony) sibling->va_start = va->va_start;
139968ad4a33SUladzislau Rezki (Sony)
140068ad4a33SUladzislau Rezki (Sony) /* Free vmap_area object. */
140168ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va);
140268ad4a33SUladzislau Rezki (Sony)
140368ad4a33SUladzislau Rezki (Sony) /* Point to the new merged area. */
140468ad4a33SUladzislau Rezki (Sony) va = sibling;
140568ad4a33SUladzislau Rezki (Sony) merged = true;
140668ad4a33SUladzislau Rezki (Sony) }
140768ad4a33SUladzislau Rezki (Sony) }
140868ad4a33SUladzislau Rezki (Sony)
140968ad4a33SUladzislau Rezki (Sony) /*
141068ad4a33SUladzislau Rezki (Sony) * start end
141168ad4a33SUladzislau Rezki (Sony) * | |
141268ad4a33SUladzislau Rezki (Sony) * |<-----Prev----->|<------VA------>|
141368ad4a33SUladzislau Rezki (Sony) * | |
141468ad4a33SUladzislau Rezki (Sony) * start end
141568ad4a33SUladzislau Rezki (Sony) */
141668ad4a33SUladzislau Rezki (Sony) if (next->prev != head) {
141768ad4a33SUladzislau Rezki (Sony) sibling = list_entry(next->prev, struct vmap_area, list);
141868ad4a33SUladzislau Rezki (Sony) if (sibling->va_end == va->va_start) {
14195dd78640SUladzislau Rezki (Sony) /*
14205dd78640SUladzislau Rezki (Sony) * If both neighbors are coalesced, it is important
14215dd78640SUladzislau Rezki (Sony) * to unlink the "next" node first, followed by merging
14225dd78640SUladzislau Rezki (Sony) * with "previous" one. Otherwise the tree might not be
14235dd78640SUladzislau Rezki (Sony) * fully populated if a sibling's augmented value is
14245dd78640SUladzislau Rezki (Sony) * "normalized" because of rotation operations.
14255dd78640SUladzislau Rezki (Sony) */
142654f63d9dSUladzislau Rezki (Sony) if (merged)
14278eb510dbSUladzislau Rezki (Sony) __unlink_va(va, root, augment);
142868ad4a33SUladzislau Rezki (Sony)
14295dd78640SUladzislau Rezki (Sony) sibling->va_end = va->va_end;
14305dd78640SUladzislau Rezki (Sony)
143168ad4a33SUladzislau Rezki (Sony) /* Free vmap_area object. */
143268ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va);
14333c5c3cfbSDaniel Axtens
14343c5c3cfbSDaniel Axtens /* Point to the new merged area. */
14353c5c3cfbSDaniel Axtens va = sibling;
14363c5c3cfbSDaniel Axtens merged = true;
143768ad4a33SUladzislau Rezki (Sony) }
143868ad4a33SUladzislau Rezki (Sony) }
143968ad4a33SUladzislau Rezki (Sony)
144068ad4a33SUladzislau Rezki (Sony) insert:
14415dd78640SUladzislau Rezki (Sony) if (!merged)
14428eb510dbSUladzislau Rezki (Sony) __link_va(va, root, parent, link, head, augment);
14433c5c3cfbSDaniel Axtens
144496e2db45SUladzislau Rezki (Sony) return va;
144596e2db45SUladzislau Rezki (Sony) }
144696e2db45SUladzislau Rezki (Sony)
144796e2db45SUladzislau Rezki (Sony) static __always_inline struct vmap_area *
merge_or_add_vmap_area(struct vmap_area * va,struct rb_root * root,struct list_head * head)14488eb510dbSUladzislau Rezki (Sony) merge_or_add_vmap_area(struct vmap_area *va,
14498eb510dbSUladzislau Rezki (Sony) struct rb_root *root, struct list_head *head)
14508eb510dbSUladzislau Rezki (Sony) {
14518eb510dbSUladzislau Rezki (Sony) return __merge_or_add_vmap_area(va, root, head, false);
14528eb510dbSUladzislau Rezki (Sony) }
14538eb510dbSUladzislau Rezki (Sony)
14548eb510dbSUladzislau Rezki (Sony) static __always_inline struct vmap_area *
merge_or_add_vmap_area_augment(struct vmap_area * va,struct rb_root * root,struct list_head * head)145596e2db45SUladzislau Rezki (Sony) merge_or_add_vmap_area_augment(struct vmap_area *va,
145696e2db45SUladzislau Rezki (Sony) struct rb_root *root, struct list_head *head)
145796e2db45SUladzislau Rezki (Sony) {
14588eb510dbSUladzislau Rezki (Sony) va = __merge_or_add_vmap_area(va, root, head, true);
145996e2db45SUladzislau Rezki (Sony) if (va)
14605dd78640SUladzislau Rezki (Sony) augment_tree_propagate_from(va);
146196e2db45SUladzislau Rezki (Sony)
14623c5c3cfbSDaniel Axtens return va;
146368ad4a33SUladzislau Rezki (Sony) }
146468ad4a33SUladzislau Rezki (Sony)
146568ad4a33SUladzislau Rezki (Sony) static __always_inline bool
is_within_this_va(struct vmap_area * va,unsigned long size,unsigned long align,unsigned long vstart)146668ad4a33SUladzislau Rezki (Sony) is_within_this_va(struct vmap_area *va, unsigned long size,
146768ad4a33SUladzislau Rezki (Sony) unsigned long align, unsigned long vstart)
146868ad4a33SUladzislau Rezki (Sony) {
146968ad4a33SUladzislau Rezki (Sony) unsigned long nva_start_addr;
147068ad4a33SUladzislau Rezki (Sony)
147168ad4a33SUladzislau Rezki (Sony) if (va->va_start > vstart)
147268ad4a33SUladzislau Rezki (Sony) nva_start_addr = ALIGN(va->va_start, align);
147368ad4a33SUladzislau Rezki (Sony) else
147468ad4a33SUladzislau Rezki (Sony) nva_start_addr = ALIGN(vstart, align);
147568ad4a33SUladzislau Rezki (Sony)
147668ad4a33SUladzislau Rezki (Sony) /* Can be overflowed due to big size or alignment. */
147768ad4a33SUladzislau Rezki (Sony) if (nva_start_addr + size < nva_start_addr ||
147868ad4a33SUladzislau Rezki (Sony) nva_start_addr < vstart)
147968ad4a33SUladzislau Rezki (Sony) return false;
148068ad4a33SUladzislau Rezki (Sony)
148168ad4a33SUladzislau Rezki (Sony) return (nva_start_addr + size <= va->va_end);
148268ad4a33SUladzislau Rezki (Sony) }
148368ad4a33SUladzislau Rezki (Sony)
148468ad4a33SUladzislau Rezki (Sony) /*
148568ad4a33SUladzislau Rezki (Sony) * Find the first free block(lowest start address) in the tree,
148668ad4a33SUladzislau Rezki (Sony) * that will accomplish the request corresponding to passing
14879333fe98SUladzislau Rezki * parameters. Please note, with an alignment bigger than PAGE_SIZE,
14889333fe98SUladzislau Rezki * a search length is adjusted to account for worst case alignment
14899333fe98SUladzislau Rezki * overhead.
149068ad4a33SUladzislau Rezki (Sony) */
149168ad4a33SUladzislau Rezki (Sony) static __always_inline struct vmap_area *
find_vmap_lowest_match(struct rb_root * root,unsigned long size,unsigned long align,unsigned long vstart,bool adjust_search_size)1492f9863be4SUladzislau Rezki (Sony) find_vmap_lowest_match(struct rb_root *root, unsigned long size,
1493f9863be4SUladzislau Rezki (Sony) unsigned long align, unsigned long vstart, bool adjust_search_size)
149468ad4a33SUladzislau Rezki (Sony) {
149568ad4a33SUladzislau Rezki (Sony) struct vmap_area *va;
149668ad4a33SUladzislau Rezki (Sony) struct rb_node *node;
14979333fe98SUladzislau Rezki unsigned long length;
149868ad4a33SUladzislau Rezki (Sony)
149968ad4a33SUladzislau Rezki (Sony) /* Start from the root. */
1500f9863be4SUladzislau Rezki (Sony) node = root->rb_node;
150168ad4a33SUladzislau Rezki (Sony)
15029333fe98SUladzislau Rezki /* Adjust the search size for alignment overhead. */
15039333fe98SUladzislau Rezki length = adjust_search_size ? size + align - 1 : size;
15049333fe98SUladzislau Rezki
150568ad4a33SUladzislau Rezki (Sony) while (node) {
150668ad4a33SUladzislau Rezki (Sony) va = rb_entry(node, struct vmap_area, rb_node);
150768ad4a33SUladzislau Rezki (Sony)
15089333fe98SUladzislau Rezki if (get_subtree_max_size(node->rb_left) >= length &&
150968ad4a33SUladzislau Rezki (Sony) vstart < va->va_start) {
151068ad4a33SUladzislau Rezki (Sony) node = node->rb_left;
151168ad4a33SUladzislau Rezki (Sony) } else {
151268ad4a33SUladzislau Rezki (Sony) if (is_within_this_va(va, size, align, vstart))
151368ad4a33SUladzislau Rezki (Sony) return va;
151468ad4a33SUladzislau Rezki (Sony)
151568ad4a33SUladzislau Rezki (Sony) /*
151668ad4a33SUladzislau Rezki (Sony) * Does not make sense to go deeper towards the right
151768ad4a33SUladzislau Rezki (Sony) * sub-tree if it does not have a free block that is
15189333fe98SUladzislau Rezki * equal or bigger to the requested search length.
151968ad4a33SUladzislau Rezki (Sony) */
15209333fe98SUladzislau Rezki if (get_subtree_max_size(node->rb_right) >= length) {
152168ad4a33SUladzislau Rezki (Sony) node = node->rb_right;
152268ad4a33SUladzislau Rezki (Sony) continue;
152368ad4a33SUladzislau Rezki (Sony) }
152468ad4a33SUladzislau Rezki (Sony)
152568ad4a33SUladzislau Rezki (Sony) /*
15263806b041SAndrew Morton * OK. We roll back and find the first right sub-tree,
152768ad4a33SUladzislau Rezki (Sony) * that will satisfy the search criteria. It can happen
15289f531973SUladzislau Rezki (Sony) * due to "vstart" restriction or an alignment overhead
15299f531973SUladzislau Rezki (Sony) * that is bigger then PAGE_SIZE.
153068ad4a33SUladzislau Rezki (Sony) */
153168ad4a33SUladzislau Rezki (Sony) while ((node = rb_parent(node))) {
153268ad4a33SUladzislau Rezki (Sony) va = rb_entry(node, struct vmap_area, rb_node);
153368ad4a33SUladzislau Rezki (Sony) if (is_within_this_va(va, size, align, vstart))
153468ad4a33SUladzislau Rezki (Sony) return va;
153568ad4a33SUladzislau Rezki (Sony)
15369333fe98SUladzislau Rezki if (get_subtree_max_size(node->rb_right) >= length &&
153768ad4a33SUladzislau Rezki (Sony) vstart <= va->va_start) {
15389f531973SUladzislau Rezki (Sony) /*
15399f531973SUladzislau Rezki (Sony) * Shift the vstart forward. Please note, we update it with
15409f531973SUladzislau Rezki (Sony) * parent's start address adding "1" because we do not want
15419f531973SUladzislau Rezki (Sony) * to enter same sub-tree after it has already been checked
15429f531973SUladzislau Rezki (Sony) * and no suitable free block found there.
15439f531973SUladzislau Rezki (Sony) */
15449f531973SUladzislau Rezki (Sony) vstart = va->va_start + 1;
154568ad4a33SUladzislau Rezki (Sony) node = node->rb_right;
154668ad4a33SUladzislau Rezki (Sony) break;
154768ad4a33SUladzislau Rezki (Sony) }
154868ad4a33SUladzislau Rezki (Sony) }
154968ad4a33SUladzislau Rezki (Sony) }
155068ad4a33SUladzislau Rezki (Sony) }
155168ad4a33SUladzislau Rezki (Sony)
155268ad4a33SUladzislau Rezki (Sony) return NULL;
155368ad4a33SUladzislau Rezki (Sony) }
155468ad4a33SUladzislau Rezki (Sony)
1555a6cf4e0fSUladzislau Rezki (Sony) #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1556a6cf4e0fSUladzislau Rezki (Sony) #include <linux/random.h>
1557a6cf4e0fSUladzislau Rezki (Sony)
1558a6cf4e0fSUladzislau Rezki (Sony) static struct vmap_area *
find_vmap_lowest_linear_match(struct list_head * head,unsigned long size,unsigned long align,unsigned long vstart)1559bd1264c3SSong Liu find_vmap_lowest_linear_match(struct list_head *head, unsigned long size,
1560a6cf4e0fSUladzislau Rezki (Sony) unsigned long align, unsigned long vstart)
1561a6cf4e0fSUladzislau Rezki (Sony) {
1562a6cf4e0fSUladzislau Rezki (Sony) struct vmap_area *va;
1563a6cf4e0fSUladzislau Rezki (Sony)
1564bd1264c3SSong Liu list_for_each_entry(va, head, list) {
1565a6cf4e0fSUladzislau Rezki (Sony) if (!is_within_this_va(va, size, align, vstart))
1566a6cf4e0fSUladzislau Rezki (Sony) continue;
1567a6cf4e0fSUladzislau Rezki (Sony)
1568a6cf4e0fSUladzislau Rezki (Sony) return va;
1569a6cf4e0fSUladzislau Rezki (Sony) }
1570a6cf4e0fSUladzislau Rezki (Sony)
1571a6cf4e0fSUladzislau Rezki (Sony) return NULL;
1572a6cf4e0fSUladzislau Rezki (Sony) }
1573a6cf4e0fSUladzislau Rezki (Sony)
1574a6cf4e0fSUladzislau Rezki (Sony) static void
find_vmap_lowest_match_check(struct rb_root * root,struct list_head * head,unsigned long size,unsigned long align)1575bd1264c3SSong Liu find_vmap_lowest_match_check(struct rb_root *root, struct list_head *head,
1576bd1264c3SSong Liu unsigned long size, unsigned long align)
1577a6cf4e0fSUladzislau Rezki (Sony) {
1578a6cf4e0fSUladzislau Rezki (Sony) struct vmap_area *va_1, *va_2;
1579a6cf4e0fSUladzislau Rezki (Sony) unsigned long vstart;
1580a6cf4e0fSUladzislau Rezki (Sony) unsigned int rnd;
1581a6cf4e0fSUladzislau Rezki (Sony)
1582a6cf4e0fSUladzislau Rezki (Sony) get_random_bytes(&rnd, sizeof(rnd));
1583a6cf4e0fSUladzislau Rezki (Sony) vstart = VMALLOC_START + rnd;
1584a6cf4e0fSUladzislau Rezki (Sony)
1585bd1264c3SSong Liu va_1 = find_vmap_lowest_match(root, size, align, vstart, false);
1586bd1264c3SSong Liu va_2 = find_vmap_lowest_linear_match(head, size, align, vstart);
1587a6cf4e0fSUladzislau Rezki (Sony)
1588a6cf4e0fSUladzislau Rezki (Sony) if (va_1 != va_2)
1589a6cf4e0fSUladzislau Rezki (Sony) pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n",
1590a6cf4e0fSUladzislau Rezki (Sony) va_1, va_2, vstart);
1591a6cf4e0fSUladzislau Rezki (Sony) }
1592a6cf4e0fSUladzislau Rezki (Sony) #endif
1593a6cf4e0fSUladzislau Rezki (Sony)
159468ad4a33SUladzislau Rezki (Sony) enum fit_type {
159568ad4a33SUladzislau Rezki (Sony) NOTHING_FIT = 0,
159668ad4a33SUladzislau Rezki (Sony) FL_FIT_TYPE = 1, /* full fit */
159768ad4a33SUladzislau Rezki (Sony) LE_FIT_TYPE = 2, /* left edge fit */
159868ad4a33SUladzislau Rezki (Sony) RE_FIT_TYPE = 3, /* right edge fit */
159968ad4a33SUladzislau Rezki (Sony) NE_FIT_TYPE = 4 /* no edge fit */
160068ad4a33SUladzislau Rezki (Sony) };
160168ad4a33SUladzislau Rezki (Sony)
160268ad4a33SUladzislau Rezki (Sony) static __always_inline enum fit_type
classify_va_fit_type(struct vmap_area * va,unsigned long nva_start_addr,unsigned long size)160368ad4a33SUladzislau Rezki (Sony) classify_va_fit_type(struct vmap_area *va,
160468ad4a33SUladzislau Rezki (Sony) unsigned long nva_start_addr, unsigned long size)
160568ad4a33SUladzislau Rezki (Sony) {
160668ad4a33SUladzislau Rezki (Sony) enum fit_type type;
160768ad4a33SUladzislau Rezki (Sony)
160868ad4a33SUladzislau Rezki (Sony) /* Check if it is within VA. */
160968ad4a33SUladzislau Rezki (Sony) if (nva_start_addr < va->va_start ||
161068ad4a33SUladzislau Rezki (Sony) nva_start_addr + size > va->va_end)
161168ad4a33SUladzislau Rezki (Sony) return NOTHING_FIT;
161268ad4a33SUladzislau Rezki (Sony)
161368ad4a33SUladzislau Rezki (Sony) /* Now classify. */
161468ad4a33SUladzislau Rezki (Sony) if (va->va_start == nva_start_addr) {
161568ad4a33SUladzislau Rezki (Sony) if (va->va_end == nva_start_addr + size)
161668ad4a33SUladzislau Rezki (Sony) type = FL_FIT_TYPE;
161768ad4a33SUladzislau Rezki (Sony) else
161868ad4a33SUladzislau Rezki (Sony) type = LE_FIT_TYPE;
161968ad4a33SUladzislau Rezki (Sony) } else if (va->va_end == nva_start_addr + size) {
162068ad4a33SUladzislau Rezki (Sony) type = RE_FIT_TYPE;
162168ad4a33SUladzislau Rezki (Sony) } else {
162268ad4a33SUladzislau Rezki (Sony) type = NE_FIT_TYPE;
162368ad4a33SUladzislau Rezki (Sony) }
162468ad4a33SUladzislau Rezki (Sony)
162568ad4a33SUladzislau Rezki (Sony) return type;
162668ad4a33SUladzislau Rezki (Sony) }
162768ad4a33SUladzislau Rezki (Sony)
162868ad4a33SUladzislau Rezki (Sony) static __always_inline int
va_clip(struct rb_root * root,struct list_head * head,struct vmap_area * va,unsigned long nva_start_addr,unsigned long size)16295b75b8e1SUladzislau Rezki (Sony) va_clip(struct rb_root *root, struct list_head *head,
1630f9863be4SUladzislau Rezki (Sony) struct vmap_area *va, unsigned long nva_start_addr,
1631f9863be4SUladzislau Rezki (Sony) unsigned long size)
163268ad4a33SUladzislau Rezki (Sony) {
16332c929233SArnd Bergmann struct vmap_area *lva = NULL;
16341b23ff80SBaoquan He enum fit_type type = classify_va_fit_type(va, nva_start_addr, size);
163568ad4a33SUladzislau Rezki (Sony)
163668ad4a33SUladzislau Rezki (Sony) if (type == FL_FIT_TYPE) {
163768ad4a33SUladzislau Rezki (Sony) /*
163868ad4a33SUladzislau Rezki (Sony) * No need to split VA, it fully fits.
163968ad4a33SUladzislau Rezki (Sony) *
164068ad4a33SUladzislau Rezki (Sony) * | |
164168ad4a33SUladzislau Rezki (Sony) * V NVA V
164268ad4a33SUladzislau Rezki (Sony) * |---------------|
164368ad4a33SUladzislau Rezki (Sony) */
1644f9863be4SUladzislau Rezki (Sony) unlink_va_augment(va, root);
164568ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va);
164668ad4a33SUladzislau Rezki (Sony) } else if (type == LE_FIT_TYPE) {
164768ad4a33SUladzislau Rezki (Sony) /*
164868ad4a33SUladzislau Rezki (Sony) * Split left edge of fit VA.
164968ad4a33SUladzislau Rezki (Sony) *
165068ad4a33SUladzislau Rezki (Sony) * | |
165168ad4a33SUladzislau Rezki (Sony) * V NVA V R
165268ad4a33SUladzislau Rezki (Sony) * |-------|-------|
165368ad4a33SUladzislau Rezki (Sony) */
165468ad4a33SUladzislau Rezki (Sony) va->va_start += size;
165568ad4a33SUladzislau Rezki (Sony) } else if (type == RE_FIT_TYPE) {
165668ad4a33SUladzislau Rezki (Sony) /*
165768ad4a33SUladzislau Rezki (Sony) * Split right edge of fit VA.
165868ad4a33SUladzislau Rezki (Sony) *
165968ad4a33SUladzislau Rezki (Sony) * | |
166068ad4a33SUladzislau Rezki (Sony) * L V NVA V
166168ad4a33SUladzislau Rezki (Sony) * |-------|-------|
166268ad4a33SUladzislau Rezki (Sony) */
166368ad4a33SUladzislau Rezki (Sony) va->va_end = nva_start_addr;
166468ad4a33SUladzislau Rezki (Sony) } else if (type == NE_FIT_TYPE) {
166568ad4a33SUladzislau Rezki (Sony) /*
166668ad4a33SUladzislau Rezki (Sony) * Split no edge of fit VA.
166768ad4a33SUladzislau Rezki (Sony) *
166868ad4a33SUladzislau Rezki (Sony) * | |
166968ad4a33SUladzislau Rezki (Sony) * L V NVA V R
167068ad4a33SUladzislau Rezki (Sony) * |---|-------|---|
167168ad4a33SUladzislau Rezki (Sony) */
167282dd23e8SUladzislau Rezki (Sony) lva = __this_cpu_xchg(ne_fit_preload_node, NULL);
167382dd23e8SUladzislau Rezki (Sony) if (unlikely(!lva)) {
167482dd23e8SUladzislau Rezki (Sony) /*
167582dd23e8SUladzislau Rezki (Sony) * For percpu allocator we do not do any pre-allocation
167682dd23e8SUladzislau Rezki (Sony) * and leave it as it is. The reason is it most likely
167782dd23e8SUladzislau Rezki (Sony) * never ends up with NE_FIT_TYPE splitting. In case of
167882dd23e8SUladzislau Rezki (Sony) * percpu allocations offsets and sizes are aligned to
167982dd23e8SUladzislau Rezki (Sony) * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE
168082dd23e8SUladzislau Rezki (Sony) * are its main fitting cases.
168182dd23e8SUladzislau Rezki (Sony) *
168282dd23e8SUladzislau Rezki (Sony) * There are a few exceptions though, as an example it is
168382dd23e8SUladzislau Rezki (Sony) * a first allocation (early boot up) when we have "one"
168482dd23e8SUladzislau Rezki (Sony) * big free space that has to be split.
1685060650a2SUladzislau Rezki (Sony) *
1686060650a2SUladzislau Rezki (Sony) * Also we can hit this path in case of regular "vmap"
1687060650a2SUladzislau Rezki (Sony) * allocations, if "this" current CPU was not preloaded.
1688060650a2SUladzislau Rezki (Sony) * See the comment in alloc_vmap_area() why. If so, then
1689060650a2SUladzislau Rezki (Sony) * GFP_NOWAIT is used instead to get an extra object for
1690060650a2SUladzislau Rezki (Sony) * split purpose. That is rare and most time does not
1691060650a2SUladzislau Rezki (Sony) * occur.
1692060650a2SUladzislau Rezki (Sony) *
1693060650a2SUladzislau Rezki (Sony) * What happens if an allocation gets failed. Basically,
1694060650a2SUladzislau Rezki (Sony) * an "overflow" path is triggered to purge lazily freed
1695060650a2SUladzislau Rezki (Sony) * areas to free some memory, then, the "retry" path is
1696060650a2SUladzislau Rezki (Sony) * triggered to repeat one more time. See more details
1697060650a2SUladzislau Rezki (Sony) * in alloc_vmap_area() function.
169882dd23e8SUladzislau Rezki (Sony) */
169968ad4a33SUladzislau Rezki (Sony) lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
170082dd23e8SUladzislau Rezki (Sony) if (!lva)
170168ad4a33SUladzislau Rezki (Sony) return -1;
170282dd23e8SUladzislau Rezki (Sony) }
170368ad4a33SUladzislau Rezki (Sony)
170468ad4a33SUladzislau Rezki (Sony) /*
170568ad4a33SUladzislau Rezki (Sony) * Build the remainder.
170668ad4a33SUladzislau Rezki (Sony) */
170768ad4a33SUladzislau Rezki (Sony) lva->va_start = va->va_start;
170868ad4a33SUladzislau Rezki (Sony) lva->va_end = nva_start_addr;
170968ad4a33SUladzislau Rezki (Sony)
171068ad4a33SUladzislau Rezki (Sony) /*
171168ad4a33SUladzislau Rezki (Sony) * Shrink this VA to remaining size.
171268ad4a33SUladzislau Rezki (Sony) */
171368ad4a33SUladzislau Rezki (Sony) va->va_start = nva_start_addr + size;
171468ad4a33SUladzislau Rezki (Sony) } else {
171568ad4a33SUladzislau Rezki (Sony) return -1;
171668ad4a33SUladzislau Rezki (Sony) }
171768ad4a33SUladzislau Rezki (Sony)
171868ad4a33SUladzislau Rezki (Sony) if (type != FL_FIT_TYPE) {
171968ad4a33SUladzislau Rezki (Sony) augment_tree_propagate_from(va);
172068ad4a33SUladzislau Rezki (Sony)
17212c929233SArnd Bergmann if (lva) /* type == NE_FIT_TYPE */
1722f9863be4SUladzislau Rezki (Sony) insert_vmap_area_augment(lva, &va->rb_node, root, head);
172368ad4a33SUladzislau Rezki (Sony) }
172468ad4a33SUladzislau Rezki (Sony)
172568ad4a33SUladzislau Rezki (Sony) return 0;
172668ad4a33SUladzislau Rezki (Sony) }
172768ad4a33SUladzislau Rezki (Sony)
172838f6b9afSUladzislau Rezki (Sony) static unsigned long
va_alloc(struct vmap_area * va,struct rb_root * root,struct list_head * head,unsigned long size,unsigned long align,unsigned long vstart,unsigned long vend)172938f6b9afSUladzislau Rezki (Sony) va_alloc(struct vmap_area *va,
173038f6b9afSUladzislau Rezki (Sony) struct rb_root *root, struct list_head *head,
173138f6b9afSUladzislau Rezki (Sony) unsigned long size, unsigned long align,
173238f6b9afSUladzislau Rezki (Sony) unsigned long vstart, unsigned long vend)
173338f6b9afSUladzislau Rezki (Sony) {
173438f6b9afSUladzislau Rezki (Sony) unsigned long nva_start_addr;
173538f6b9afSUladzislau Rezki (Sony) int ret;
173638f6b9afSUladzislau Rezki (Sony)
173738f6b9afSUladzislau Rezki (Sony) if (va->va_start > vstart)
173838f6b9afSUladzislau Rezki (Sony) nva_start_addr = ALIGN(va->va_start, align);
173938f6b9afSUladzislau Rezki (Sony) else
174038f6b9afSUladzislau Rezki (Sony) nva_start_addr = ALIGN(vstart, align);
174138f6b9afSUladzislau Rezki (Sony)
174238f6b9afSUladzislau Rezki (Sony) /* Check the "vend" restriction. */
174338f6b9afSUladzislau Rezki (Sony) if (nva_start_addr + size > vend)
174438f6b9afSUladzislau Rezki (Sony) return vend;
174538f6b9afSUladzislau Rezki (Sony)
174638f6b9afSUladzislau Rezki (Sony) /* Update the free vmap_area. */
17475b75b8e1SUladzislau Rezki (Sony) ret = va_clip(root, head, va, nva_start_addr, size);
174838f6b9afSUladzislau Rezki (Sony) if (WARN_ON_ONCE(ret))
174938f6b9afSUladzislau Rezki (Sony) return vend;
175038f6b9afSUladzislau Rezki (Sony)
175138f6b9afSUladzislau Rezki (Sony) return nva_start_addr;
175238f6b9afSUladzislau Rezki (Sony) }
175338f6b9afSUladzislau Rezki (Sony)
175468ad4a33SUladzislau Rezki (Sony) /*
175568ad4a33SUladzislau Rezki (Sony) * Returns a start address of the newly allocated area, if success.
175668ad4a33SUladzislau Rezki (Sony) * Otherwise a vend is returned that indicates failure.
175768ad4a33SUladzislau Rezki (Sony) */
175868ad4a33SUladzislau Rezki (Sony) static __always_inline unsigned long
__alloc_vmap_area(struct rb_root * root,struct list_head * head,unsigned long size,unsigned long align,unsigned long vstart,unsigned long vend)1759f9863be4SUladzislau Rezki (Sony) __alloc_vmap_area(struct rb_root *root, struct list_head *head,
1760f9863be4SUladzislau Rezki (Sony) unsigned long size, unsigned long align,
1761cacca6baSUladzislau Rezki (Sony) unsigned long vstart, unsigned long vend)
176268ad4a33SUladzislau Rezki (Sony) {
17639333fe98SUladzislau Rezki bool adjust_search_size = true;
176468ad4a33SUladzislau Rezki (Sony) unsigned long nva_start_addr;
176568ad4a33SUladzislau Rezki (Sony) struct vmap_area *va;
176668ad4a33SUladzislau Rezki (Sony)
17679333fe98SUladzislau Rezki /*
17689333fe98SUladzislau Rezki * Do not adjust when:
17699333fe98SUladzislau Rezki * a) align <= PAGE_SIZE, because it does not make any sense.
17709333fe98SUladzislau Rezki * All blocks(their start addresses) are at least PAGE_SIZE
17719333fe98SUladzislau Rezki * aligned anyway;
17729333fe98SUladzislau Rezki * b) a short range where a requested size corresponds to exactly
17739333fe98SUladzislau Rezki * specified [vstart:vend] interval and an alignment > PAGE_SIZE.
17749333fe98SUladzislau Rezki * With adjusted search length an allocation would not succeed.
17759333fe98SUladzislau Rezki */
17769333fe98SUladzislau Rezki if (align <= PAGE_SIZE || (align > PAGE_SIZE && (vend - vstart) == size))
17779333fe98SUladzislau Rezki adjust_search_size = false;
17789333fe98SUladzislau Rezki
1779f9863be4SUladzislau Rezki (Sony) va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size);
178068ad4a33SUladzislau Rezki (Sony) if (unlikely(!va))
178168ad4a33SUladzislau Rezki (Sony) return vend;
178268ad4a33SUladzislau Rezki (Sony)
178338f6b9afSUladzislau Rezki (Sony) nva_start_addr = va_alloc(va, root, head, size, align, vstart, vend);
178438f6b9afSUladzislau Rezki (Sony) if (nva_start_addr == vend)
178568ad4a33SUladzislau Rezki (Sony) return vend;
178668ad4a33SUladzislau Rezki (Sony)
1787a6cf4e0fSUladzislau Rezki (Sony) #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1788bd1264c3SSong Liu find_vmap_lowest_match_check(root, head, size, align);
1789a6cf4e0fSUladzislau Rezki (Sony) #endif
1790a6cf4e0fSUladzislau Rezki (Sony)
179168ad4a33SUladzislau Rezki (Sony) return nva_start_addr;
179268ad4a33SUladzislau Rezki (Sony) }
17934da56b99SChris Wilson
1794db64fe02SNick Piggin /*
1795d98c9e83SAndrey Ryabinin * Free a region of KVA allocated by alloc_vmap_area
1796d98c9e83SAndrey Ryabinin */
free_vmap_area(struct vmap_area * va)1797d98c9e83SAndrey Ryabinin static void free_vmap_area(struct vmap_area *va)
1798d98c9e83SAndrey Ryabinin {
1799d0936029SUladzislau Rezki (Sony) struct vmap_node *vn = addr_to_node(va->va_start);
1800d0936029SUladzislau Rezki (Sony)
1801d98c9e83SAndrey Ryabinin /*
1802d98c9e83SAndrey Ryabinin * Remove from the busy tree/list.
1803d98c9e83SAndrey Ryabinin */
1804d0936029SUladzislau Rezki (Sony) spin_lock(&vn->busy.lock);
1805d0936029SUladzislau Rezki (Sony) unlink_va(va, &vn->busy.root);
1806d0936029SUladzislau Rezki (Sony) spin_unlock(&vn->busy.lock);
1807d98c9e83SAndrey Ryabinin
1808d98c9e83SAndrey Ryabinin /*
1809d98c9e83SAndrey Ryabinin * Insert/Merge it back to the free tree/list.
1810d98c9e83SAndrey Ryabinin */
1811d98c9e83SAndrey Ryabinin spin_lock(&free_vmap_area_lock);
181296e2db45SUladzislau Rezki (Sony) merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list);
1813d98c9e83SAndrey Ryabinin spin_unlock(&free_vmap_area_lock);
1814d98c9e83SAndrey Ryabinin }
1815d98c9e83SAndrey Ryabinin
1816187f8cc4SUladzislau Rezki (Sony) static inline void
preload_this_cpu_lock(spinlock_t * lock,gfp_t gfp_mask,int node)1817187f8cc4SUladzislau Rezki (Sony) preload_this_cpu_lock(spinlock_t *lock, gfp_t gfp_mask, int node)
1818187f8cc4SUladzislau Rezki (Sony) {
1819f56810c9SUros Bizjak struct vmap_area *va = NULL, *tmp;
1820187f8cc4SUladzislau Rezki (Sony)
1821187f8cc4SUladzislau Rezki (Sony) /*
1822187f8cc4SUladzislau Rezki (Sony) * Preload this CPU with one extra vmap_area object. It is used
1823187f8cc4SUladzislau Rezki (Sony) * when fit type of free area is NE_FIT_TYPE. It guarantees that
1824187f8cc4SUladzislau Rezki (Sony) * a CPU that does an allocation is preloaded.
1825187f8cc4SUladzislau Rezki (Sony) *
1826187f8cc4SUladzislau Rezki (Sony) * We do it in non-atomic context, thus it allows us to use more
1827187f8cc4SUladzislau Rezki (Sony) * permissive allocation masks to be more stable under low memory
1828187f8cc4SUladzislau Rezki (Sony) * condition and high memory pressure.
1829187f8cc4SUladzislau Rezki (Sony) */
1830187f8cc4SUladzislau Rezki (Sony) if (!this_cpu_read(ne_fit_preload_node))
1831187f8cc4SUladzislau Rezki (Sony) va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1832187f8cc4SUladzislau Rezki (Sony)
1833187f8cc4SUladzislau Rezki (Sony) spin_lock(lock);
1834187f8cc4SUladzislau Rezki (Sony)
1835f56810c9SUros Bizjak tmp = NULL;
1836f56810c9SUros Bizjak if (va && !__this_cpu_try_cmpxchg(ne_fit_preload_node, &tmp, va))
1837187f8cc4SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va);
1838187f8cc4SUladzislau Rezki (Sony) }
1839187f8cc4SUladzislau Rezki (Sony)
184072210662SUladzislau Rezki (Sony) static struct vmap_pool *
size_to_va_pool(struct vmap_node * vn,unsigned long size)184172210662SUladzislau Rezki (Sony) size_to_va_pool(struct vmap_node *vn, unsigned long size)
184272210662SUladzislau Rezki (Sony) {
184372210662SUladzislau Rezki (Sony) unsigned int idx = (size - 1) / PAGE_SIZE;
184472210662SUladzislau Rezki (Sony)
184572210662SUladzislau Rezki (Sony) if (idx < MAX_VA_SIZE_PAGES)
184672210662SUladzislau Rezki (Sony) return &vn->pool[idx];
184772210662SUladzislau Rezki (Sony)
184872210662SUladzislau Rezki (Sony) return NULL;
184972210662SUladzislau Rezki (Sony) }
185072210662SUladzislau Rezki (Sony)
185172210662SUladzislau Rezki (Sony) static bool
node_pool_add_va(struct vmap_node * n,struct vmap_area * va)185272210662SUladzislau Rezki (Sony) node_pool_add_va(struct vmap_node *n, struct vmap_area *va)
185372210662SUladzislau Rezki (Sony) {
185472210662SUladzislau Rezki (Sony) struct vmap_pool *vp;
185572210662SUladzislau Rezki (Sony)
185672210662SUladzislau Rezki (Sony) vp = size_to_va_pool(n, va_size(va));
185772210662SUladzislau Rezki (Sony) if (!vp)
185872210662SUladzislau Rezki (Sony) return false;
185972210662SUladzislau Rezki (Sony)
186072210662SUladzislau Rezki (Sony) spin_lock(&n->pool_lock);
186172210662SUladzislau Rezki (Sony) list_add(&va->list, &vp->head);
186272210662SUladzislau Rezki (Sony) WRITE_ONCE(vp->len, vp->len + 1);
186372210662SUladzislau Rezki (Sony) spin_unlock(&n->pool_lock);
186472210662SUladzislau Rezki (Sony)
186572210662SUladzislau Rezki (Sony) return true;
186672210662SUladzislau Rezki (Sony) }
186772210662SUladzislau Rezki (Sony)
186872210662SUladzislau Rezki (Sony) static struct vmap_area *
node_pool_del_va(struct vmap_node * vn,unsigned long size,unsigned long align,unsigned long vstart,unsigned long vend)186972210662SUladzislau Rezki (Sony) node_pool_del_va(struct vmap_node *vn, unsigned long size,
187072210662SUladzislau Rezki (Sony) unsigned long align, unsigned long vstart,
187172210662SUladzislau Rezki (Sony) unsigned long vend)
187272210662SUladzislau Rezki (Sony) {
187372210662SUladzislau Rezki (Sony) struct vmap_area *va = NULL;
187472210662SUladzislau Rezki (Sony) struct vmap_pool *vp;
187572210662SUladzislau Rezki (Sony) int err = 0;
187672210662SUladzislau Rezki (Sony)
187772210662SUladzislau Rezki (Sony) vp = size_to_va_pool(vn, size);
187872210662SUladzislau Rezki (Sony) if (!vp || list_empty(&vp->head))
187972210662SUladzislau Rezki (Sony) return NULL;
188072210662SUladzislau Rezki (Sony)
188172210662SUladzislau Rezki (Sony) spin_lock(&vn->pool_lock);
188272210662SUladzislau Rezki (Sony) if (!list_empty(&vp->head)) {
188372210662SUladzislau Rezki (Sony) va = list_first_entry(&vp->head, struct vmap_area, list);
188472210662SUladzislau Rezki (Sony)
188572210662SUladzislau Rezki (Sony) if (IS_ALIGNED(va->va_start, align)) {
188672210662SUladzislau Rezki (Sony) /*
188772210662SUladzislau Rezki (Sony) * Do some sanity check and emit a warning
188872210662SUladzislau Rezki (Sony) * if one of below checks detects an error.
188972210662SUladzislau Rezki (Sony) */
189072210662SUladzislau Rezki (Sony) err |= (va_size(va) != size);
189172210662SUladzislau Rezki (Sony) err |= (va->va_start < vstart);
189272210662SUladzislau Rezki (Sony) err |= (va->va_end > vend);
189372210662SUladzislau Rezki (Sony)
189472210662SUladzislau Rezki (Sony) if (!WARN_ON_ONCE(err)) {
189572210662SUladzislau Rezki (Sony) list_del_init(&va->list);
189672210662SUladzislau Rezki (Sony) WRITE_ONCE(vp->len, vp->len - 1);
189772210662SUladzislau Rezki (Sony) } else {
189872210662SUladzislau Rezki (Sony) va = NULL;
189972210662SUladzislau Rezki (Sony) }
190072210662SUladzislau Rezki (Sony) } else {
190172210662SUladzislau Rezki (Sony) list_move_tail(&va->list, &vp->head);
190272210662SUladzislau Rezki (Sony) va = NULL;
190372210662SUladzislau Rezki (Sony) }
190472210662SUladzislau Rezki (Sony) }
190572210662SUladzislau Rezki (Sony) spin_unlock(&vn->pool_lock);
190672210662SUladzislau Rezki (Sony)
190772210662SUladzislau Rezki (Sony) return va;
190872210662SUladzislau Rezki (Sony) }
190972210662SUladzislau Rezki (Sony)
191072210662SUladzislau Rezki (Sony) static struct vmap_area *
node_alloc(unsigned long size,unsigned long align,unsigned long vstart,unsigned long vend,unsigned long * addr,unsigned int * vn_id)191172210662SUladzislau Rezki (Sony) node_alloc(unsigned long size, unsigned long align,
191272210662SUladzislau Rezki (Sony) unsigned long vstart, unsigned long vend,
191372210662SUladzislau Rezki (Sony) unsigned long *addr, unsigned int *vn_id)
191472210662SUladzislau Rezki (Sony) {
191572210662SUladzislau Rezki (Sony) struct vmap_area *va;
191672210662SUladzislau Rezki (Sony)
191772210662SUladzislau Rezki (Sony) *vn_id = 0;
191872210662SUladzislau Rezki (Sony) *addr = vend;
191972210662SUladzislau Rezki (Sony)
192072210662SUladzislau Rezki (Sony) /*
192172210662SUladzislau Rezki (Sony) * Fallback to a global heap if not vmalloc or there
192272210662SUladzislau Rezki (Sony) * is only one node.
192372210662SUladzislau Rezki (Sony) */
192472210662SUladzislau Rezki (Sony) if (vstart != VMALLOC_START || vend != VMALLOC_END ||
192572210662SUladzislau Rezki (Sony) nr_vmap_nodes == 1)
192672210662SUladzislau Rezki (Sony) return NULL;
192772210662SUladzislau Rezki (Sony)
192872210662SUladzislau Rezki (Sony) *vn_id = raw_smp_processor_id() % nr_vmap_nodes;
192972210662SUladzislau Rezki (Sony) va = node_pool_del_va(id_to_node(*vn_id), size, align, vstart, vend);
193072210662SUladzislau Rezki (Sony) *vn_id = encode_vn_id(*vn_id);
193172210662SUladzislau Rezki (Sony)
193272210662SUladzislau Rezki (Sony) if (va)
193372210662SUladzislau Rezki (Sony) *addr = va->va_start;
193472210662SUladzislau Rezki (Sony)
193572210662SUladzislau Rezki (Sony) return va;
193672210662SUladzislau Rezki (Sony) }
193772210662SUladzislau Rezki (Sony)
setup_vmalloc_vm(struct vm_struct * vm,struct vmap_area * va,unsigned long flags,const void * caller)1938aaab830aSrulinhuang static inline void setup_vmalloc_vm(struct vm_struct *vm,
1939aaab830aSrulinhuang struct vmap_area *va, unsigned long flags, const void *caller)
1940aaab830aSrulinhuang {
1941aaab830aSrulinhuang vm->flags = flags;
1942aaab830aSrulinhuang vm->addr = (void *)va->va_start;
1943a0309fafSKees Cook vm->size = vm->requested_size = va_size(va);
1944aaab830aSrulinhuang vm->caller = caller;
1945aaab830aSrulinhuang va->vm = vm;
1946aaab830aSrulinhuang }
1947aaab830aSrulinhuang
1948d98c9e83SAndrey Ryabinin /*
1949db64fe02SNick Piggin * Allocate a region of KVA of the specified size and alignment, within the
1950aaab830aSrulinhuang * vstart and vend. If vm is passed in, the two will also be bound.
1951db64fe02SNick Piggin */
alloc_vmap_area(unsigned long size,unsigned long align,unsigned long vstart,unsigned long vend,int node,gfp_t gfp_mask,unsigned long va_flags,struct vm_struct * vm)1952db64fe02SNick Piggin static struct vmap_area *alloc_vmap_area(unsigned long size,
1953db64fe02SNick Piggin unsigned long align,
1954db64fe02SNick Piggin unsigned long vstart, unsigned long vend,
1955869176a0SBaoquan He int node, gfp_t gfp_mask,
19564b68a773SBaoquan He unsigned long va_flags, struct vm_struct *vm)
1957db64fe02SNick Piggin {
1958d0936029SUladzislau Rezki (Sony) struct vmap_node *vn;
1959187f8cc4SUladzislau Rezki (Sony) struct vmap_area *va;
196012e376a6SUladzislau Rezki (Sony) unsigned long freed;
19611da177e4SLinus Torvalds unsigned long addr;
196272210662SUladzislau Rezki (Sony) unsigned int vn_id;
1963db64fe02SNick Piggin int purged = 0;
1964d98c9e83SAndrey Ryabinin int ret;
1965db64fe02SNick Piggin
19667e4a32c0SHyunmin Lee if (unlikely(!size || offset_in_page(size) || !is_power_of_2(align)))
19677e4a32c0SHyunmin Lee return ERR_PTR(-EINVAL);
1968db64fe02SNick Piggin
196968ad4a33SUladzislau Rezki (Sony) if (unlikely(!vmap_initialized))
197068ad4a33SUladzislau Rezki (Sony) return ERR_PTR(-EBUSY);
197168ad4a33SUladzislau Rezki (Sony)
19725803ed29SChristoph Hellwig might_sleep();
197372210662SUladzislau Rezki (Sony)
197472210662SUladzislau Rezki (Sony) /*
197572210662SUladzislau Rezki (Sony) * If a VA is obtained from a global heap(if it fails here)
197672210662SUladzislau Rezki (Sony) * it is anyway marked with this "vn_id" so it is returned
197772210662SUladzislau Rezki (Sony) * to this pool's node later. Such way gives a possibility
197872210662SUladzislau Rezki (Sony) * to populate pools based on users demand.
197972210662SUladzislau Rezki (Sony) *
198072210662SUladzislau Rezki (Sony) * On success a ready to go VA is returned.
198172210662SUladzislau Rezki (Sony) */
198272210662SUladzislau Rezki (Sony) va = node_alloc(size, align, vstart, vend, &addr, &vn_id);
198372210662SUladzislau Rezki (Sony) if (!va) {
1984f07116d7SUladzislau Rezki (Sony) gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
19854da56b99SChris Wilson
1986f07116d7SUladzislau Rezki (Sony) va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1987db64fe02SNick Piggin if (unlikely(!va))
1988db64fe02SNick Piggin return ERR_PTR(-ENOMEM);
1989db64fe02SNick Piggin
19907f88f88fSCatalin Marinas /*
19917f88f88fSCatalin Marinas * Only scan the relevant parts containing pointers to other objects
19927f88f88fSCatalin Marinas * to avoid false negatives.
19937f88f88fSCatalin Marinas */
1994f07116d7SUladzislau Rezki (Sony) kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask);
199596aa8437SUladzislau Rezki (Sony) }
19967f88f88fSCatalin Marinas
1997db64fe02SNick Piggin retry:
199872210662SUladzislau Rezki (Sony) if (addr == vend) {
1999187f8cc4SUladzislau Rezki (Sony) preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
2000f9863be4SUladzislau Rezki (Sony) addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list,
2001f9863be4SUladzislau Rezki (Sony) size, align, vstart, vend);
2002187f8cc4SUladzislau Rezki (Sony) spin_unlock(&free_vmap_area_lock);
200372210662SUladzislau Rezki (Sony) }
200468ad4a33SUladzislau Rezki (Sony)
2005cf243da6SUladzislau Rezki (Sony) trace_alloc_vmap_area(addr, size, align, vstart, vend, addr == vend);
2006cf243da6SUladzislau Rezki (Sony)
200789699605SNick Piggin /*
200868ad4a33SUladzislau Rezki (Sony) * If an allocation fails, the "vend" address is
200968ad4a33SUladzislau Rezki (Sony) * returned. Therefore trigger the overflow path.
201089699605SNick Piggin */
201168ad4a33SUladzislau Rezki (Sony) if (unlikely(addr == vend))
201289699605SNick Piggin goto overflow;
201389699605SNick Piggin
201489699605SNick Piggin va->va_start = addr;
201589699605SNick Piggin va->va_end = addr + size;
2016688fcbfcSPengfei Li va->vm = NULL;
201772210662SUladzislau Rezki (Sony) va->flags = (va_flags | vn_id);
201868ad4a33SUladzislau Rezki (Sony)
20194b68a773SBaoquan He if (vm) {
20204b68a773SBaoquan He vm->addr = (void *)va->va_start;
2021b44f71e3SZhangPeng vm->size = va_size(va);
20224b68a773SBaoquan He va->vm = vm;
20234b68a773SBaoquan He }
2024aaab830aSrulinhuang
2025d0936029SUladzislau Rezki (Sony) vn = addr_to_node(va->va_start);
2026d0936029SUladzislau Rezki (Sony)
2027d0936029SUladzislau Rezki (Sony) spin_lock(&vn->busy.lock);
2028d0936029SUladzislau Rezki (Sony) insert_vmap_area(va, &vn->busy.root, &vn->busy.head);
2029d0936029SUladzislau Rezki (Sony) spin_unlock(&vn->busy.lock);
203089699605SNick Piggin
203161e16557SWang Xiaoqiang BUG_ON(!IS_ALIGNED(va->va_start, align));
203289699605SNick Piggin BUG_ON(va->va_start < vstart);
203389699605SNick Piggin BUG_ON(va->va_end > vend);
203489699605SNick Piggin
2035d98c9e83SAndrey Ryabinin ret = kasan_populate_vmalloc(addr, size);
2036d98c9e83SAndrey Ryabinin if (ret) {
2037d98c9e83SAndrey Ryabinin free_vmap_area(va);
2038d98c9e83SAndrey Ryabinin return ERR_PTR(ret);
2039d98c9e83SAndrey Ryabinin }
2040d98c9e83SAndrey Ryabinin
204189699605SNick Piggin return va;
204289699605SNick Piggin
20437766970cSNick Piggin overflow:
2044db64fe02SNick Piggin if (!purged) {
204577e50af0SThomas Gleixner reclaim_and_purge_vmap_areas();
2046db64fe02SNick Piggin purged = 1;
2047db64fe02SNick Piggin goto retry;
2048db64fe02SNick Piggin }
20494da56b99SChris Wilson
205012e376a6SUladzislau Rezki (Sony) freed = 0;
20514da56b99SChris Wilson blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
205212e376a6SUladzislau Rezki (Sony)
20534da56b99SChris Wilson if (freed > 0) {
20544da56b99SChris Wilson purged = 0;
20554da56b99SChris Wilson goto retry;
20564da56b99SChris Wilson }
20574da56b99SChris Wilson
205803497d76SFlorian Fainelli if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
205955ccad6fSShubhang Kaushik OS pr_warn("vmalloc_node_range for size %lu failed: Address range restricted to %#lx - %#lx\n",
206055ccad6fSShubhang Kaushik OS size, vstart, vend);
206168ad4a33SUladzislau Rezki (Sony)
206268ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va);
2063db64fe02SNick Piggin return ERR_PTR(-EBUSY);
2064db64fe02SNick Piggin }
2065db64fe02SNick Piggin
register_vmap_purge_notifier(struct notifier_block * nb)20664da56b99SChris Wilson int register_vmap_purge_notifier(struct notifier_block *nb)
20674da56b99SChris Wilson {
20684da56b99SChris Wilson return blocking_notifier_chain_register(&vmap_notify_list, nb);
20694da56b99SChris Wilson }
20704da56b99SChris Wilson EXPORT_SYMBOL_GPL(register_vmap_purge_notifier);
20714da56b99SChris Wilson
unregister_vmap_purge_notifier(struct notifier_block * nb)20724da56b99SChris Wilson int unregister_vmap_purge_notifier(struct notifier_block *nb)
20734da56b99SChris Wilson {
20744da56b99SChris Wilson return blocking_notifier_chain_unregister(&vmap_notify_list, nb);
20754da56b99SChris Wilson }
20764da56b99SChris Wilson EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
20774da56b99SChris Wilson
2078db64fe02SNick Piggin /*
2079db64fe02SNick Piggin * lazy_max_pages is the maximum amount of virtual address space we gather up
2080db64fe02SNick Piggin * before attempting to purge with a TLB flush.
2081db64fe02SNick Piggin *
2082db64fe02SNick Piggin * There is a tradeoff here: a larger number will cover more kernel page tables
2083db64fe02SNick Piggin * and take slightly longer to purge, but it will linearly reduce the number of
2084db64fe02SNick Piggin * global TLB flushes that must be performed. It would seem natural to scale
2085db64fe02SNick Piggin * this number up linearly with the number of CPUs (because vmapping activity
2086db64fe02SNick Piggin * could also scale linearly with the number of CPUs), however it is likely
2087db64fe02SNick Piggin * that in practice, workloads might be constrained in other ways that mean
2088db64fe02SNick Piggin * vmap activity will not scale linearly with CPUs. Also, I want to be
2089db64fe02SNick Piggin * conservative and not introduce a big latency on huge systems, so go with
2090db64fe02SNick Piggin * a less aggressive log scale. It will still be an improvement over the old
2091db64fe02SNick Piggin * code, and it will be simple to change the scale factor if we find that it
2092db64fe02SNick Piggin * becomes a problem on bigger systems.
2093db64fe02SNick Piggin */
lazy_max_pages(void)2094db64fe02SNick Piggin static unsigned long lazy_max_pages(void)
2095db64fe02SNick Piggin {
2096db64fe02SNick Piggin unsigned int log;
2097db64fe02SNick Piggin
2098db64fe02SNick Piggin log = fls(num_online_cpus());
2099db64fe02SNick Piggin
2100db64fe02SNick Piggin return log * (32UL * 1024 * 1024 / PAGE_SIZE);
2101db64fe02SNick Piggin }
2102db64fe02SNick Piggin
21034d36e6f8SUladzislau Rezki (Sony) static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0);
2104db64fe02SNick Piggin
21050574ecd1SChristoph Hellwig /*
2106f0953a1bSIngo Molnar * Serialize vmap purging. There is no actual critical section protected
2107153090f2SBaoquan He * by this lock, but we want to avoid concurrent calls for performance
21080574ecd1SChristoph Hellwig * reasons and to make the pcpu_get_vm_areas more deterministic.
21090574ecd1SChristoph Hellwig */
2110f9e09977SChristoph Hellwig static DEFINE_MUTEX(vmap_purge_lock);
21110574ecd1SChristoph Hellwig
211202b709dfSNick Piggin /* for per-CPU blocks */
211302b709dfSNick Piggin static void purge_fragmented_blocks_allcpus(void);
2114282631cbSUladzislau Rezki (Sony) static cpumask_t purge_nodes;
211502b709dfSNick Piggin
211672210662SUladzislau Rezki (Sony) static void
reclaim_list_global(struct list_head * head)211772210662SUladzislau Rezki (Sony) reclaim_list_global(struct list_head *head)
2118db64fe02SNick Piggin {
211972210662SUladzislau Rezki (Sony) struct vmap_area *va, *n;
2120db64fe02SNick Piggin
212172210662SUladzislau Rezki (Sony) if (list_empty(head))
212272210662SUladzislau Rezki (Sony) return;
2123db64fe02SNick Piggin
2124e36176beSUladzislau Rezki (Sony) spin_lock(&free_vmap_area_lock);
212572210662SUladzislau Rezki (Sony) list_for_each_entry_safe(va, n, head, list)
212672210662SUladzislau Rezki (Sony) merge_or_add_vmap_area_augment(va,
212772210662SUladzislau Rezki (Sony) &free_vmap_area_root, &free_vmap_area_list);
212872210662SUladzislau Rezki (Sony) spin_unlock(&free_vmap_area_lock);
212972210662SUladzislau Rezki (Sony) }
213072210662SUladzislau Rezki (Sony)
213172210662SUladzislau Rezki (Sony) static void
decay_va_pool_node(struct vmap_node * vn,bool full_decay)213272210662SUladzislau Rezki (Sony) decay_va_pool_node(struct vmap_node *vn, bool full_decay)
213372210662SUladzislau Rezki (Sony) {
21347ae12a57SHongbo Li LIST_HEAD(decay_list);
21357ae12a57SHongbo Li struct rb_root decay_root = RB_ROOT;
213672210662SUladzislau Rezki (Sony) struct vmap_area *va, *nva;
213772210662SUladzislau Rezki (Sony) unsigned long n_decay;
213872210662SUladzislau Rezki (Sony) int i;
213972210662SUladzislau Rezki (Sony)
214072210662SUladzislau Rezki (Sony) for (i = 0; i < MAX_VA_SIZE_PAGES; i++) {
21417ae12a57SHongbo Li LIST_HEAD(tmp_list);
214272210662SUladzislau Rezki (Sony)
214372210662SUladzislau Rezki (Sony) if (list_empty(&vn->pool[i].head))
214472210662SUladzislau Rezki (Sony) continue;
214572210662SUladzislau Rezki (Sony)
214672210662SUladzislau Rezki (Sony) /* Detach the pool, so no-one can access it. */
214772210662SUladzislau Rezki (Sony) spin_lock(&vn->pool_lock);
214872210662SUladzislau Rezki (Sony) list_replace_init(&vn->pool[i].head, &tmp_list);
214972210662SUladzislau Rezki (Sony) spin_unlock(&vn->pool_lock);
215072210662SUladzislau Rezki (Sony)
215172210662SUladzislau Rezki (Sony) if (full_decay)
215272210662SUladzislau Rezki (Sony) WRITE_ONCE(vn->pool[i].len, 0);
215372210662SUladzislau Rezki (Sony)
215472210662SUladzislau Rezki (Sony) /* Decay a pool by ~25% out of left objects. */
215572210662SUladzislau Rezki (Sony) n_decay = vn->pool[i].len >> 2;
215672210662SUladzislau Rezki (Sony)
215772210662SUladzislau Rezki (Sony) list_for_each_entry_safe(va, nva, &tmp_list, list) {
215872210662SUladzislau Rezki (Sony) list_del_init(&va->list);
215972210662SUladzislau Rezki (Sony) merge_or_add_vmap_area(va, &decay_root, &decay_list);
216072210662SUladzislau Rezki (Sony)
216172210662SUladzislau Rezki (Sony) if (!full_decay) {
216272210662SUladzislau Rezki (Sony) WRITE_ONCE(vn->pool[i].len, vn->pool[i].len - 1);
216372210662SUladzislau Rezki (Sony)
216472210662SUladzislau Rezki (Sony) if (!--n_decay)
216572210662SUladzislau Rezki (Sony) break;
216672210662SUladzislau Rezki (Sony) }
216772210662SUladzislau Rezki (Sony) }
216872210662SUladzislau Rezki (Sony)
216915e02a39SUladzislau Rezki (Sony) /*
217015e02a39SUladzislau Rezki (Sony) * Attach the pool back if it has been partly decayed.
217115e02a39SUladzislau Rezki (Sony) * Please note, it is supposed that nobody(other contexts)
217215e02a39SUladzislau Rezki (Sony) * can populate the pool therefore a simple list replace
217315e02a39SUladzislau Rezki (Sony) * operation takes place here.
217415e02a39SUladzislau Rezki (Sony) */
217572210662SUladzislau Rezki (Sony) if (!full_decay && !list_empty(&tmp_list)) {
217672210662SUladzislau Rezki (Sony) spin_lock(&vn->pool_lock);
217772210662SUladzislau Rezki (Sony) list_replace_init(&tmp_list, &vn->pool[i].head);
217872210662SUladzislau Rezki (Sony) spin_unlock(&vn->pool_lock);
217972210662SUladzislau Rezki (Sony) }
218072210662SUladzislau Rezki (Sony) }
218172210662SUladzislau Rezki (Sony)
218272210662SUladzislau Rezki (Sony) reclaim_list_global(&decay_list);
218372210662SUladzislau Rezki (Sony) }
218472210662SUladzislau Rezki (Sony)
21859e9e085eSAdrian Huang static void
kasan_release_vmalloc_node(struct vmap_node * vn)21869e9e085eSAdrian Huang kasan_release_vmalloc_node(struct vmap_node *vn)
21879e9e085eSAdrian Huang {
21889e9e085eSAdrian Huang struct vmap_area *va;
21899e9e085eSAdrian Huang unsigned long start, end;
21909e9e085eSAdrian Huang
21919e9e085eSAdrian Huang start = list_first_entry(&vn->purge_list, struct vmap_area, list)->va_start;
21929e9e085eSAdrian Huang end = list_last_entry(&vn->purge_list, struct vmap_area, list)->va_end;
21939e9e085eSAdrian Huang
21949e9e085eSAdrian Huang list_for_each_entry(va, &vn->purge_list, list) {
21959e9e085eSAdrian Huang if (is_vmalloc_or_module_addr((void *) va->va_start))
21969e9e085eSAdrian Huang kasan_release_vmalloc(va->va_start, va->va_end,
21979e9e085eSAdrian Huang va->va_start, va->va_end,
21989e9e085eSAdrian Huang KASAN_VMALLOC_PAGE_RANGE);
21999e9e085eSAdrian Huang }
22009e9e085eSAdrian Huang
22019e9e085eSAdrian Huang kasan_release_vmalloc(start, end, start, end, KASAN_VMALLOC_TLB_FLUSH);
22029e9e085eSAdrian Huang }
22039e9e085eSAdrian Huang
purge_vmap_node(struct work_struct * work)220472210662SUladzislau Rezki (Sony) static void purge_vmap_node(struct work_struct *work)
220572210662SUladzislau Rezki (Sony) {
220672210662SUladzislau Rezki (Sony) struct vmap_node *vn = container_of(work,
220772210662SUladzislau Rezki (Sony) struct vmap_node, purge_work);
2208409faf8cSAdrian Huang unsigned long nr_purged_pages = 0;
220972210662SUladzislau Rezki (Sony) struct vmap_area *va, *n_va;
221072210662SUladzislau Rezki (Sony) LIST_HEAD(local_list);
221172210662SUladzislau Rezki (Sony)
22129e9e085eSAdrian Huang if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
22139e9e085eSAdrian Huang kasan_release_vmalloc_node(vn);
22149e9e085eSAdrian Huang
221572210662SUladzislau Rezki (Sony) vn->nr_purged = 0;
221672210662SUladzislau Rezki (Sony)
2217282631cbSUladzislau Rezki (Sony) list_for_each_entry_safe(va, n_va, &vn->purge_list, list) {
2218b44f71e3SZhangPeng unsigned long nr = va_size(va) >> PAGE_SHIFT;
221972210662SUladzislau Rezki (Sony) unsigned int vn_id = decode_vn_id(va->flags);
2220763b218dSJoel Fernandes
222172210662SUladzislau Rezki (Sony) list_del_init(&va->list);
22229c801f61SUladzislau Rezki (Sony)
2223409faf8cSAdrian Huang nr_purged_pages += nr;
222472210662SUladzislau Rezki (Sony) vn->nr_purged++;
222568571be9SUladzislau Rezki (Sony)
222672210662SUladzislau Rezki (Sony) if (is_vn_id_valid(vn_id) && !vn->skip_populate)
222772210662SUladzislau Rezki (Sony) if (node_pool_add_va(vn, va))
222872210662SUladzislau Rezki (Sony) continue;
222972210662SUladzislau Rezki (Sony)
223072210662SUladzislau Rezki (Sony) /* Go back to global. */
223172210662SUladzislau Rezki (Sony) list_add(&va->list, &local_list);
2232763b218dSJoel Fernandes }
22336030fd5fSUladzislau Rezki (Sony)
2234409faf8cSAdrian Huang atomic_long_sub(nr_purged_pages, &vmap_lazy_nr);
2235409faf8cSAdrian Huang
223672210662SUladzislau Rezki (Sony) reclaim_list_global(&local_list);
2237282631cbSUladzislau Rezki (Sony) }
2238282631cbSUladzislau Rezki (Sony)
2239282631cbSUladzislau Rezki (Sony) /*
2240282631cbSUladzislau Rezki (Sony) * Purges all lazily-freed vmap areas.
2241282631cbSUladzislau Rezki (Sony) */
__purge_vmap_area_lazy(unsigned long start,unsigned long end,bool full_pool_decay)224272210662SUladzislau Rezki (Sony) static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end,
224372210662SUladzislau Rezki (Sony) bool full_pool_decay)
2244282631cbSUladzislau Rezki (Sony) {
224572210662SUladzislau Rezki (Sony) unsigned long nr_purged_areas = 0;
224672210662SUladzislau Rezki (Sony) unsigned int nr_purge_helpers;
224772210662SUladzislau Rezki (Sony) unsigned int nr_purge_nodes;
2248282631cbSUladzislau Rezki (Sony) struct vmap_node *vn;
2249282631cbSUladzislau Rezki (Sony) int i;
2250282631cbSUladzislau Rezki (Sony)
2251282631cbSUladzislau Rezki (Sony) lockdep_assert_held(&vmap_purge_lock);
225272210662SUladzislau Rezki (Sony)
225372210662SUladzislau Rezki (Sony) /*
225472210662SUladzislau Rezki (Sony) * Use cpumask to mark which node has to be processed.
225572210662SUladzislau Rezki (Sony) */
2256282631cbSUladzislau Rezki (Sony) purge_nodes = CPU_MASK_NONE;
2257282631cbSUladzislau Rezki (Sony)
2258282631cbSUladzislau Rezki (Sony) for (i = 0; i < nr_vmap_nodes; i++) {
2259282631cbSUladzislau Rezki (Sony) vn = &vmap_nodes[i];
2260282631cbSUladzislau Rezki (Sony)
2261282631cbSUladzislau Rezki (Sony) INIT_LIST_HEAD(&vn->purge_list);
226272210662SUladzislau Rezki (Sony) vn->skip_populate = full_pool_decay;
226372210662SUladzislau Rezki (Sony) decay_va_pool_node(vn, full_pool_decay);
2264282631cbSUladzislau Rezki (Sony)
2265282631cbSUladzislau Rezki (Sony) if (RB_EMPTY_ROOT(&vn->lazy.root))
2266282631cbSUladzislau Rezki (Sony) continue;
2267282631cbSUladzislau Rezki (Sony)
2268282631cbSUladzislau Rezki (Sony) spin_lock(&vn->lazy.lock);
2269282631cbSUladzislau Rezki (Sony) WRITE_ONCE(vn->lazy.root.rb_node, NULL);
2270282631cbSUladzislau Rezki (Sony) list_replace_init(&vn->lazy.head, &vn->purge_list);
2271282631cbSUladzislau Rezki (Sony) spin_unlock(&vn->lazy.lock);
2272282631cbSUladzislau Rezki (Sony)
2273282631cbSUladzislau Rezki (Sony) start = min(start, list_first_entry(&vn->purge_list,
2274282631cbSUladzislau Rezki (Sony) struct vmap_area, list)->va_start);
2275282631cbSUladzislau Rezki (Sony)
2276282631cbSUladzislau Rezki (Sony) end = max(end, list_last_entry(&vn->purge_list,
2277282631cbSUladzislau Rezki (Sony) struct vmap_area, list)->va_end);
2278282631cbSUladzislau Rezki (Sony)
2279282631cbSUladzislau Rezki (Sony) cpumask_set_cpu(i, &purge_nodes);
2280282631cbSUladzislau Rezki (Sony) }
2281282631cbSUladzislau Rezki (Sony)
228272210662SUladzislau Rezki (Sony) nr_purge_nodes = cpumask_weight(&purge_nodes);
228372210662SUladzislau Rezki (Sony) if (nr_purge_nodes > 0) {
2284282631cbSUladzislau Rezki (Sony) flush_tlb_kernel_range(start, end);
2285282631cbSUladzislau Rezki (Sony)
228672210662SUladzislau Rezki (Sony) /* One extra worker is per a lazy_max_pages() full set minus one. */
228772210662SUladzislau Rezki (Sony) nr_purge_helpers = atomic_long_read(&vmap_lazy_nr) / lazy_max_pages();
228872210662SUladzislau Rezki (Sony) nr_purge_helpers = clamp(nr_purge_helpers, 1U, nr_purge_nodes) - 1;
228972210662SUladzislau Rezki (Sony)
2290282631cbSUladzislau Rezki (Sony) for_each_cpu(i, &purge_nodes) {
229172210662SUladzislau Rezki (Sony) vn = &vmap_nodes[i];
229272210662SUladzislau Rezki (Sony)
229372210662SUladzislau Rezki (Sony) if (nr_purge_helpers > 0) {
229472210662SUladzislau Rezki (Sony) INIT_WORK(&vn->purge_work, purge_vmap_node);
229572210662SUladzislau Rezki (Sony)
229672210662SUladzislau Rezki (Sony) if (cpumask_test_cpu(i, cpu_online_mask))
229772210662SUladzislau Rezki (Sony) schedule_work_on(i, &vn->purge_work);
229872210662SUladzislau Rezki (Sony) else
229972210662SUladzislau Rezki (Sony) schedule_work(&vn->purge_work);
230072210662SUladzislau Rezki (Sony)
230172210662SUladzislau Rezki (Sony) nr_purge_helpers--;
230272210662SUladzislau Rezki (Sony) } else {
230372210662SUladzislau Rezki (Sony) vn->purge_work.func = NULL;
230472210662SUladzislau Rezki (Sony) purge_vmap_node(&vn->purge_work);
230572210662SUladzislau Rezki (Sony) nr_purged_areas += vn->nr_purged;
2306282631cbSUladzislau Rezki (Sony) }
2307282631cbSUladzislau Rezki (Sony) }
2308282631cbSUladzislau Rezki (Sony)
230972210662SUladzislau Rezki (Sony) for_each_cpu(i, &purge_nodes) {
231072210662SUladzislau Rezki (Sony) vn = &vmap_nodes[i];
231172210662SUladzislau Rezki (Sony)
231272210662SUladzislau Rezki (Sony) if (vn->purge_work.func) {
231372210662SUladzislau Rezki (Sony) flush_work(&vn->purge_work);
231472210662SUladzislau Rezki (Sony) nr_purged_areas += vn->nr_purged;
231572210662SUladzislau Rezki (Sony) }
231672210662SUladzislau Rezki (Sony) }
231772210662SUladzislau Rezki (Sony) }
231872210662SUladzislau Rezki (Sony)
231972210662SUladzislau Rezki (Sony) trace_purge_vmap_area_lazy(start, end, nr_purged_areas);
232072210662SUladzislau Rezki (Sony) return nr_purged_areas > 0;
2321db64fe02SNick Piggin }
2322db64fe02SNick Piggin
2323db64fe02SNick Piggin /*
232477e50af0SThomas Gleixner * Reclaim vmap areas by purging fragmented blocks and purge_vmap_area_list.
2325db64fe02SNick Piggin */
reclaim_and_purge_vmap_areas(void)232677e50af0SThomas Gleixner static void reclaim_and_purge_vmap_areas(void)
232777e50af0SThomas Gleixner
2328db64fe02SNick Piggin {
2329f9e09977SChristoph Hellwig mutex_lock(&vmap_purge_lock);
23300574ecd1SChristoph Hellwig purge_fragmented_blocks_allcpus();
233172210662SUladzislau Rezki (Sony) __purge_vmap_area_lazy(ULONG_MAX, 0, true);
2332f9e09977SChristoph Hellwig mutex_unlock(&vmap_purge_lock);
2333db64fe02SNick Piggin }
2334db64fe02SNick Piggin
drain_vmap_area_work(struct work_struct * work)2335690467c8SUladzislau Rezki (Sony) static void drain_vmap_area_work(struct work_struct *work)
2336690467c8SUladzislau Rezki (Sony) {
2337690467c8SUladzislau Rezki (Sony) mutex_lock(&vmap_purge_lock);
233872210662SUladzislau Rezki (Sony) __purge_vmap_area_lazy(ULONG_MAX, 0, false);
2339690467c8SUladzislau Rezki (Sony) mutex_unlock(&vmap_purge_lock);
2340690467c8SUladzislau Rezki (Sony) }
2341690467c8SUladzislau Rezki (Sony)
2342db64fe02SNick Piggin /*
2343edd89818SUladzislau Rezki (Sony) * Free a vmap area, caller ensuring that the area has been unmapped,
2344edd89818SUladzislau Rezki (Sony) * unlinked and flush_cache_vunmap had been called for the correct
2345edd89818SUladzislau Rezki (Sony) * range previously.
2346db64fe02SNick Piggin */
free_vmap_area_noflush(struct vmap_area * va)234764141da5SJeremy Fitzhardinge static void free_vmap_area_noflush(struct vmap_area *va)
2348db64fe02SNick Piggin {
23498c4196feSUladzislau Rezki (Sony) unsigned long nr_lazy_max = lazy_max_pages();
23508c4196feSUladzislau Rezki (Sony) unsigned long va_start = va->va_start;
235172210662SUladzislau Rezki (Sony) unsigned int vn_id = decode_vn_id(va->flags);
235272210662SUladzislau Rezki (Sony) struct vmap_node *vn;
23534d36e6f8SUladzislau Rezki (Sony) unsigned long nr_lazy;
235480c4bd7aSChris Wilson
2355edd89818SUladzislau Rezki (Sony) if (WARN_ON_ONCE(!list_empty(&va->list)))
2356edd89818SUladzislau Rezki (Sony) return;
2357dd3b8353SUladzislau Rezki (Sony)
2358b44f71e3SZhangPeng nr_lazy = atomic_long_add_return(va_size(va) >> PAGE_SHIFT,
2359b44f71e3SZhangPeng &vmap_lazy_nr);
236080c4bd7aSChris Wilson
236196e2db45SUladzislau Rezki (Sony) /*
236272210662SUladzislau Rezki (Sony) * If it was request by a certain node we would like to
236372210662SUladzislau Rezki (Sony) * return it to that node, i.e. its pool for later reuse.
236496e2db45SUladzislau Rezki (Sony) */
236572210662SUladzislau Rezki (Sony) vn = is_vn_id_valid(vn_id) ?
236672210662SUladzislau Rezki (Sony) id_to_node(vn_id):addr_to_node(va->va_start);
236772210662SUladzislau Rezki (Sony)
2368282631cbSUladzislau Rezki (Sony) spin_lock(&vn->lazy.lock);
236972210662SUladzislau Rezki (Sony) insert_vmap_area(va, &vn->lazy.root, &vn->lazy.head);
2370282631cbSUladzislau Rezki (Sony) spin_unlock(&vn->lazy.lock);
237180c4bd7aSChris Wilson
23728c4196feSUladzislau Rezki (Sony) trace_free_vmap_area_noflush(va_start, nr_lazy, nr_lazy_max);
23738c4196feSUladzislau Rezki (Sony)
237496e2db45SUladzislau Rezki (Sony) /* After this point, we may free va at any time */
23758c4196feSUladzislau Rezki (Sony) if (unlikely(nr_lazy > nr_lazy_max))
2376690467c8SUladzislau Rezki (Sony) schedule_work(&drain_vmap_work);
2377db64fe02SNick Piggin }
2378db64fe02SNick Piggin
2379b29acbdcSNick Piggin /*
2380b29acbdcSNick Piggin * Free and unmap a vmap area
2381b29acbdcSNick Piggin */
free_unmap_vmap_area(struct vmap_area * va)2382b29acbdcSNick Piggin static void free_unmap_vmap_area(struct vmap_area *va)
2383b29acbdcSNick Piggin {
2384b29acbdcSNick Piggin flush_cache_vunmap(va->va_start, va->va_end);
23854ad0ae8cSNicholas Piggin vunmap_range_noflush(va->va_start, va->va_end);
23868e57f8acSVlastimil Babka if (debug_pagealloc_enabled_static())
238782a2e924SChintan Pandya flush_tlb_kernel_range(va->va_start, va->va_end);
238882a2e924SChintan Pandya
2389c8eef01eSChristoph Hellwig free_vmap_area_noflush(va);
2390b29acbdcSNick Piggin }
2391b29acbdcSNick Piggin
find_vmap_area(unsigned long addr)2392993d0b28SMatthew Wilcox (Oracle) struct vmap_area *find_vmap_area(unsigned long addr)
2393db64fe02SNick Piggin {
2394d0936029SUladzislau Rezki (Sony) struct vmap_node *vn;
2395db64fe02SNick Piggin struct vmap_area *va;
2396d0936029SUladzislau Rezki (Sony) int i, j;
2397db64fe02SNick Piggin
23984ed91fa9SUladzislau Rezki (Sony) if (unlikely(!vmap_initialized))
23994ed91fa9SUladzislau Rezki (Sony) return NULL;
24004ed91fa9SUladzislau Rezki (Sony)
2401d0936029SUladzislau Rezki (Sony) /*
2402d0936029SUladzislau Rezki (Sony) * An addr_to_node_id(addr) converts an address to a node index
2403d0936029SUladzislau Rezki (Sony) * where a VA is located. If VA spans several zones and passed
2404d0936029SUladzislau Rezki (Sony) * addr is not the same as va->va_start, what is not common, we
240515e02a39SUladzislau Rezki (Sony) * may need to scan extra nodes. See an example:
2406d0936029SUladzislau Rezki (Sony) *
240715e02a39SUladzislau Rezki (Sony) * <----va---->
2408d0936029SUladzislau Rezki (Sony) * -|-----|-----|-----|-----|-
2409d0936029SUladzislau Rezki (Sony) * 1 2 0 1
2410d0936029SUladzislau Rezki (Sony) *
241115e02a39SUladzislau Rezki (Sony) * VA resides in node 1 whereas it spans 1, 2 an 0. If passed
241215e02a39SUladzislau Rezki (Sony) * addr is within 2 or 0 nodes we should do extra work.
2413d0936029SUladzislau Rezki (Sony) */
2414d0936029SUladzislau Rezki (Sony) i = j = addr_to_node_id(addr);
2415d0936029SUladzislau Rezki (Sony) do {
2416d0936029SUladzislau Rezki (Sony) vn = &vmap_nodes[i];
2417db64fe02SNick Piggin
2418d0936029SUladzislau Rezki (Sony) spin_lock(&vn->busy.lock);
2419d0936029SUladzislau Rezki (Sony) va = __find_vmap_area(addr, &vn->busy.root);
2420d0936029SUladzislau Rezki (Sony) spin_unlock(&vn->busy.lock);
2421d0936029SUladzislau Rezki (Sony)
2422d0936029SUladzislau Rezki (Sony) if (va)
2423db64fe02SNick Piggin return va;
2424d0936029SUladzislau Rezki (Sony) } while ((i = (i + 1) % nr_vmap_nodes) != j);
2425d0936029SUladzislau Rezki (Sony)
2426d0936029SUladzislau Rezki (Sony) return NULL;
2427db64fe02SNick Piggin }
2428db64fe02SNick Piggin
find_unlink_vmap_area(unsigned long addr)2429edd89818SUladzislau Rezki (Sony) static struct vmap_area *find_unlink_vmap_area(unsigned long addr)
2430edd89818SUladzislau Rezki (Sony) {
2431d0936029SUladzislau Rezki (Sony) struct vmap_node *vn;
2432edd89818SUladzislau Rezki (Sony) struct vmap_area *va;
2433d0936029SUladzislau Rezki (Sony) int i, j;
2434edd89818SUladzislau Rezki (Sony)
243515e02a39SUladzislau Rezki (Sony) /*
243615e02a39SUladzislau Rezki (Sony) * Check the comment in the find_vmap_area() about the loop.
243715e02a39SUladzislau Rezki (Sony) */
2438d0936029SUladzislau Rezki (Sony) i = j = addr_to_node_id(addr);
2439d0936029SUladzislau Rezki (Sony) do {
2440d0936029SUladzislau Rezki (Sony) vn = &vmap_nodes[i];
2441d0936029SUladzislau Rezki (Sony)
2442d0936029SUladzislau Rezki (Sony) spin_lock(&vn->busy.lock);
2443d0936029SUladzislau Rezki (Sony) va = __find_vmap_area(addr, &vn->busy.root);
2444edd89818SUladzislau Rezki (Sony) if (va)
2445d0936029SUladzislau Rezki (Sony) unlink_va(va, &vn->busy.root);
2446d0936029SUladzislau Rezki (Sony) spin_unlock(&vn->busy.lock);
2447edd89818SUladzislau Rezki (Sony)
2448d0936029SUladzislau Rezki (Sony) if (va)
2449edd89818SUladzislau Rezki (Sony) return va;
2450d0936029SUladzislau Rezki (Sony) } while ((i = (i + 1) % nr_vmap_nodes) != j);
2451d0936029SUladzislau Rezki (Sony)
2452d0936029SUladzislau Rezki (Sony) return NULL;
2453edd89818SUladzislau Rezki (Sony) }
2454edd89818SUladzislau Rezki (Sony)
2455db64fe02SNick Piggin /*** Per cpu kva allocator ***/
2456db64fe02SNick Piggin
2457db64fe02SNick Piggin /*
2458db64fe02SNick Piggin * vmap space is limited especially on 32 bit architectures. Ensure there is
2459db64fe02SNick Piggin * room for at least 16 percpu vmap blocks per CPU.
2460db64fe02SNick Piggin */
2461db64fe02SNick Piggin /*
2462db64fe02SNick Piggin * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
2463db64fe02SNick Piggin * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess
2464db64fe02SNick Piggin * instead (we just need a rough idea)
2465db64fe02SNick Piggin */
2466db64fe02SNick Piggin #if BITS_PER_LONG == 32
2467db64fe02SNick Piggin #define VMALLOC_SPACE (128UL*1024*1024)
2468db64fe02SNick Piggin #else
2469db64fe02SNick Piggin #define VMALLOC_SPACE (128UL*1024*1024*1024)
2470db64fe02SNick Piggin #endif
2471db64fe02SNick Piggin
2472db64fe02SNick Piggin #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
2473db64fe02SNick Piggin #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */
2474db64fe02SNick Piggin #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */
2475db64fe02SNick Piggin #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
2476db64fe02SNick Piggin #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */
2477db64fe02SNick Piggin #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */
2478f982f915SClemens Ladisch #define VMAP_BBMAP_BITS \
2479f982f915SClemens Ladisch VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
2480db64fe02SNick Piggin VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
2481f982f915SClemens Ladisch VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
2482db64fe02SNick Piggin
2483db64fe02SNick Piggin #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
2484db64fe02SNick Piggin
248577e50af0SThomas Gleixner /*
248677e50af0SThomas Gleixner * Purge threshold to prevent overeager purging of fragmented blocks for
248777e50af0SThomas Gleixner * regular operations: Purge if vb->free is less than 1/4 of the capacity.
248877e50af0SThomas Gleixner */
248977e50af0SThomas Gleixner #define VMAP_PURGE_THRESHOLD (VMAP_BBMAP_BITS / 4)
249077e50af0SThomas Gleixner
2491869176a0SBaoquan He #define VMAP_RAM 0x1 /* indicates vm_map_ram area*/
2492869176a0SBaoquan He #define VMAP_BLOCK 0x2 /* mark out the vmap_block sub-type*/
2493869176a0SBaoquan He #define VMAP_FLAGS_MASK 0x3
2494869176a0SBaoquan He
2495db64fe02SNick Piggin struct vmap_block_queue {
2496db64fe02SNick Piggin spinlock_t lock;
2497db64fe02SNick Piggin struct list_head free;
2498062eacf5SUladzislau Rezki (Sony)
2499062eacf5SUladzislau Rezki (Sony) /*
2500062eacf5SUladzislau Rezki (Sony) * An xarray requires an extra memory dynamically to
2501062eacf5SUladzislau Rezki (Sony) * be allocated. If it is an issue, we can use rb-tree
2502062eacf5SUladzislau Rezki (Sony) * instead.
2503062eacf5SUladzislau Rezki (Sony) */
2504062eacf5SUladzislau Rezki (Sony) struct xarray vmap_blocks;
2505db64fe02SNick Piggin };
2506db64fe02SNick Piggin
2507db64fe02SNick Piggin struct vmap_block {
2508db64fe02SNick Piggin spinlock_t lock;
2509db64fe02SNick Piggin struct vmap_area *va;
2510db64fe02SNick Piggin unsigned long free, dirty;
2511d76f9954SBaoquan He DECLARE_BITMAP(used_map, VMAP_BBMAP_BITS);
25127d61bfe8SRoman Pen unsigned long dirty_min, dirty_max; /*< dirty range */
2513db64fe02SNick Piggin struct list_head free_list;
2514db64fe02SNick Piggin struct rcu_head rcu_head;
251502b709dfSNick Piggin struct list_head purge;
25168c61291fSZhaoyang Huang unsigned int cpu;
2517db64fe02SNick Piggin };
2518db64fe02SNick Piggin
2519db64fe02SNick Piggin /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
2520db64fe02SNick Piggin static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
2521db64fe02SNick Piggin
2522db64fe02SNick Piggin /*
2523062eacf5SUladzislau Rezki (Sony) * In order to fast access to any "vmap_block" associated with a
2524062eacf5SUladzislau Rezki (Sony) * specific address, we use a hash.
2525062eacf5SUladzislau Rezki (Sony) *
2526062eacf5SUladzislau Rezki (Sony) * A per-cpu vmap_block_queue is used in both ways, to serialize
2527062eacf5SUladzislau Rezki (Sony) * an access to free block chains among CPUs(alloc path) and it
2528062eacf5SUladzislau Rezki (Sony) * also acts as a vmap_block hash(alloc/free paths). It means we
2529062eacf5SUladzislau Rezki (Sony) * overload it, since we already have the per-cpu array which is
2530062eacf5SUladzislau Rezki (Sony) * used as a hash table. When used as a hash a 'cpu' passed to
2531062eacf5SUladzislau Rezki (Sony) * per_cpu() is not actually a CPU but rather a hash index.
2532062eacf5SUladzislau Rezki (Sony) *
2533fa1c77c1SUladzislau Rezki (Sony) * A hash function is addr_to_vb_xa() which hashes any address
2534062eacf5SUladzislau Rezki (Sony) * to a specific index(in a hash) it belongs to. This then uses a
2535062eacf5SUladzislau Rezki (Sony) * per_cpu() macro to access an array with generated index.
2536062eacf5SUladzislau Rezki (Sony) *
2537062eacf5SUladzislau Rezki (Sony) * An example:
2538062eacf5SUladzislau Rezki (Sony) *
2539062eacf5SUladzislau Rezki (Sony) * CPU_1 CPU_2 CPU_0
2540062eacf5SUladzislau Rezki (Sony) * | | |
2541062eacf5SUladzislau Rezki (Sony) * V V V
2542062eacf5SUladzislau Rezki (Sony) * 0 10 20 30 40 50 60
2543062eacf5SUladzislau Rezki (Sony) * |------|------|------|------|------|------|...<vmap address space>
2544062eacf5SUladzislau Rezki (Sony) * CPU0 CPU1 CPU2 CPU0 CPU1 CPU2
2545062eacf5SUladzislau Rezki (Sony) *
2546062eacf5SUladzislau Rezki (Sony) * - CPU_1 invokes vm_unmap_ram(6), 6 belongs to CPU0 zone, thus
2547062eacf5SUladzislau Rezki (Sony) * it access: CPU0/INDEX0 -> vmap_blocks -> xa_lock;
2548062eacf5SUladzislau Rezki (Sony) *
2549062eacf5SUladzislau Rezki (Sony) * - CPU_2 invokes vm_unmap_ram(11), 11 belongs to CPU1 zone, thus
2550062eacf5SUladzislau Rezki (Sony) * it access: CPU1/INDEX1 -> vmap_blocks -> xa_lock;
2551062eacf5SUladzislau Rezki (Sony) *
2552062eacf5SUladzislau Rezki (Sony) * - CPU_0 invokes vm_unmap_ram(20), 20 belongs to CPU2 zone, thus
2553062eacf5SUladzislau Rezki (Sony) * it access: CPU2/INDEX2 -> vmap_blocks -> xa_lock.
2554062eacf5SUladzislau Rezki (Sony) *
2555062eacf5SUladzislau Rezki (Sony) * This technique almost always avoids lock contention on insert/remove,
2556062eacf5SUladzislau Rezki (Sony) * however xarray spinlocks protect against any contention that remains.
2557db64fe02SNick Piggin */
2558062eacf5SUladzislau Rezki (Sony) static struct xarray *
addr_to_vb_xa(unsigned long addr)2559fa1c77c1SUladzislau Rezki (Sony) addr_to_vb_xa(unsigned long addr)
2560062eacf5SUladzislau Rezki (Sony) {
2561a34acf30SUladzislau Rezki (Sony) int index = (addr / VMAP_BLOCK_SIZE) % nr_cpu_ids;
2562a34acf30SUladzislau Rezki (Sony)
2563a34acf30SUladzislau Rezki (Sony) /*
2564a34acf30SUladzislau Rezki (Sony) * Please note, nr_cpu_ids points on a highest set
2565a34acf30SUladzislau Rezki (Sony) * possible bit, i.e. we never invoke cpumask_next()
2566a34acf30SUladzislau Rezki (Sony) * if an index points on it which is nr_cpu_ids - 1.
2567a34acf30SUladzislau Rezki (Sony) */
2568a34acf30SUladzislau Rezki (Sony) if (!cpu_possible(index))
2569a34acf30SUladzislau Rezki (Sony) index = cpumask_next(index, cpu_possible_mask);
2570062eacf5SUladzislau Rezki (Sony)
2571062eacf5SUladzislau Rezki (Sony) return &per_cpu(vmap_block_queue, index).vmap_blocks;
2572062eacf5SUladzislau Rezki (Sony) }
2573db64fe02SNick Piggin
2574db64fe02SNick Piggin /*
2575db64fe02SNick Piggin * We should probably have a fallback mechanism to allocate virtual memory
2576db64fe02SNick Piggin * out of partially filled vmap blocks. However vmap block sizing should be
2577db64fe02SNick Piggin * fairly reasonable according to the vmalloc size, so it shouldn't be a
2578db64fe02SNick Piggin * big problem.
2579db64fe02SNick Piggin */
2580db64fe02SNick Piggin
addr_to_vb_idx(unsigned long addr)2581db64fe02SNick Piggin static unsigned long addr_to_vb_idx(unsigned long addr)
2582db64fe02SNick Piggin {
2583db64fe02SNick Piggin addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
2584db64fe02SNick Piggin addr /= VMAP_BLOCK_SIZE;
2585db64fe02SNick Piggin return addr;
2586db64fe02SNick Piggin }
2587db64fe02SNick Piggin
vmap_block_vaddr(unsigned long va_start,unsigned long pages_off)2588cf725ce2SRoman Pen static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
2589cf725ce2SRoman Pen {
2590cf725ce2SRoman Pen unsigned long addr;
2591cf725ce2SRoman Pen
2592cf725ce2SRoman Pen addr = va_start + (pages_off << PAGE_SHIFT);
2593cf725ce2SRoman Pen BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
2594cf725ce2SRoman Pen return (void *)addr;
2595cf725ce2SRoman Pen }
2596cf725ce2SRoman Pen
2597cf725ce2SRoman Pen /**
2598cf725ce2SRoman Pen * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
2599cf725ce2SRoman Pen * block. Of course pages number can't exceed VMAP_BBMAP_BITS
2600cf725ce2SRoman Pen * @order: how many 2^order pages should be occupied in newly allocated block
2601cf725ce2SRoman Pen * @gfp_mask: flags for the page level allocator
2602cf725ce2SRoman Pen *
2603a862f68aSMike Rapoport * Return: virtual address in a newly allocated block or ERR_PTR(-errno)
2604cf725ce2SRoman Pen */
new_vmap_block(unsigned int order,gfp_t gfp_mask)2605cf725ce2SRoman Pen static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
2606db64fe02SNick Piggin {
2607db64fe02SNick Piggin struct vmap_block_queue *vbq;
2608db64fe02SNick Piggin struct vmap_block *vb;
2609db64fe02SNick Piggin struct vmap_area *va;
2610062eacf5SUladzislau Rezki (Sony) struct xarray *xa;
2611db64fe02SNick Piggin unsigned long vb_idx;
2612db64fe02SNick Piggin int node, err;
2613cf725ce2SRoman Pen void *vaddr;
2614db64fe02SNick Piggin
2615db64fe02SNick Piggin node = numa_node_id();
2616db64fe02SNick Piggin
2617db64fe02SNick Piggin vb = kmalloc_node(sizeof(struct vmap_block),
2618db64fe02SNick Piggin gfp_mask & GFP_RECLAIM_MASK, node);
2619db64fe02SNick Piggin if (unlikely(!vb))
2620db64fe02SNick Piggin return ERR_PTR(-ENOMEM);
2621db64fe02SNick Piggin
2622db64fe02SNick Piggin va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
2623db64fe02SNick Piggin VMALLOC_START, VMALLOC_END,
2624869176a0SBaoquan He node, gfp_mask,
26254b68a773SBaoquan He VMAP_RAM|VMAP_BLOCK, NULL);
2626ddf9c6d4STobias Klauser if (IS_ERR(va)) {
2627db64fe02SNick Piggin kfree(vb);
2628e7d86340SJulia Lawall return ERR_CAST(va);
2629db64fe02SNick Piggin }
2630db64fe02SNick Piggin
2631cf725ce2SRoman Pen vaddr = vmap_block_vaddr(va->va_start, 0);
2632db64fe02SNick Piggin spin_lock_init(&vb->lock);
2633db64fe02SNick Piggin vb->va = va;
2634cf725ce2SRoman Pen /* At least something should be left free */
2635cf725ce2SRoman Pen BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
2636d76f9954SBaoquan He bitmap_zero(vb->used_map, VMAP_BBMAP_BITS);
2637cf725ce2SRoman Pen vb->free = VMAP_BBMAP_BITS - (1UL << order);
2638db64fe02SNick Piggin vb->dirty = 0;
26397d61bfe8SRoman Pen vb->dirty_min = VMAP_BBMAP_BITS;
26407d61bfe8SRoman Pen vb->dirty_max = 0;
2641d76f9954SBaoquan He bitmap_set(vb->used_map, 0, (1UL << order));
2642db64fe02SNick Piggin INIT_LIST_HEAD(&vb->free_list);
26433e3de794SWill Deacon vb->cpu = raw_smp_processor_id();
2644db64fe02SNick Piggin
2645fa1c77c1SUladzislau Rezki (Sony) xa = addr_to_vb_xa(va->va_start);
2646db64fe02SNick Piggin vb_idx = addr_to_vb_idx(va->va_start);
2647062eacf5SUladzislau Rezki (Sony) err = xa_insert(xa, vb_idx, vb, gfp_mask);
26480f14599cSMatthew Wilcox (Oracle) if (err) {
26490f14599cSMatthew Wilcox (Oracle) kfree(vb);
26500f14599cSMatthew Wilcox (Oracle) free_vmap_area(va);
26510f14599cSMatthew Wilcox (Oracle) return ERR_PTR(err);
26520f14599cSMatthew Wilcox (Oracle) }
26538c61291fSZhaoyang Huang /*
26548c61291fSZhaoyang Huang * list_add_tail_rcu could happened in another core
26558c61291fSZhaoyang Huang * rather than vb->cpu due to task migration, which
26568c61291fSZhaoyang Huang * is safe as list_add_tail_rcu will ensure the list's
26578c61291fSZhaoyang Huang * integrity together with list_for_each_rcu from read
26588c61291fSZhaoyang Huang * side.
26598c61291fSZhaoyang Huang */
26608c61291fSZhaoyang Huang vbq = per_cpu_ptr(&vmap_block_queue, vb->cpu);
2661db64fe02SNick Piggin spin_lock(&vbq->lock);
266268ac546fSRoman Pen list_add_tail_rcu(&vb->free_list, &vbq->free);
2663db64fe02SNick Piggin spin_unlock(&vbq->lock);
2664db64fe02SNick Piggin
2665cf725ce2SRoman Pen return vaddr;
2666db64fe02SNick Piggin }
2667db64fe02SNick Piggin
free_vmap_block(struct vmap_block * vb)2668db64fe02SNick Piggin static void free_vmap_block(struct vmap_block *vb)
2669db64fe02SNick Piggin {
2670d0936029SUladzislau Rezki (Sony) struct vmap_node *vn;
2671db64fe02SNick Piggin struct vmap_block *tmp;
2672062eacf5SUladzislau Rezki (Sony) struct xarray *xa;
2673db64fe02SNick Piggin
2674fa1c77c1SUladzislau Rezki (Sony) xa = addr_to_vb_xa(vb->va->va_start);
2675062eacf5SUladzislau Rezki (Sony) tmp = xa_erase(xa, addr_to_vb_idx(vb->va->va_start));
2676db64fe02SNick Piggin BUG_ON(tmp != vb);
2677db64fe02SNick Piggin
2678d0936029SUladzislau Rezki (Sony) vn = addr_to_node(vb->va->va_start);
2679d0936029SUladzislau Rezki (Sony) spin_lock(&vn->busy.lock);
2680d0936029SUladzislau Rezki (Sony) unlink_va(vb->va, &vn->busy.root);
2681d0936029SUladzislau Rezki (Sony) spin_unlock(&vn->busy.lock);
2682edd89818SUladzislau Rezki (Sony)
268364141da5SJeremy Fitzhardinge free_vmap_area_noflush(vb->va);
268422a3c7d1SLai Jiangshan kfree_rcu(vb, rcu_head);
2685db64fe02SNick Piggin }
2686db64fe02SNick Piggin
purge_fragmented_block(struct vmap_block * vb,struct list_head * purge_list,bool force_purge)2687ca5e46c3SThomas Gleixner static bool purge_fragmented_block(struct vmap_block *vb,
26888c61291fSZhaoyang Huang struct list_head *purge_list, bool force_purge)
268902b709dfSNick Piggin {
26908c61291fSZhaoyang Huang struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, vb->cpu);
26918c61291fSZhaoyang Huang
2692ca5e46c3SThomas Gleixner if (vb->free + vb->dirty != VMAP_BBMAP_BITS ||
2693ca5e46c3SThomas Gleixner vb->dirty == VMAP_BBMAP_BITS)
2694ca5e46c3SThomas Gleixner return false;
269502b709dfSNick Piggin
269677e50af0SThomas Gleixner /* Don't overeagerly purge usable blocks unless requested */
269777e50af0SThomas Gleixner if (!(force_purge || vb->free < VMAP_PURGE_THRESHOLD))
269877e50af0SThomas Gleixner return false;
269977e50af0SThomas Gleixner
2700ca5e46c3SThomas Gleixner /* prevent further allocs after releasing lock */
27017f48121eSThomas Gleixner WRITE_ONCE(vb->free, 0);
2702ca5e46c3SThomas Gleixner /* prevent purging it again */
27037f48121eSThomas Gleixner WRITE_ONCE(vb->dirty, VMAP_BBMAP_BITS);
27047d61bfe8SRoman Pen vb->dirty_min = 0;
27057d61bfe8SRoman Pen vb->dirty_max = VMAP_BBMAP_BITS;
270602b709dfSNick Piggin spin_lock(&vbq->lock);
270702b709dfSNick Piggin list_del_rcu(&vb->free_list);
270802b709dfSNick Piggin spin_unlock(&vbq->lock);
2709ca5e46c3SThomas Gleixner list_add_tail(&vb->purge, purge_list);
2710ca5e46c3SThomas Gleixner return true;
271102b709dfSNick Piggin }
271202b709dfSNick Piggin
free_purged_blocks(struct list_head * purge_list)2713ca5e46c3SThomas Gleixner static void free_purged_blocks(struct list_head *purge_list)
2714ca5e46c3SThomas Gleixner {
2715ca5e46c3SThomas Gleixner struct vmap_block *vb, *n_vb;
2716ca5e46c3SThomas Gleixner
2717ca5e46c3SThomas Gleixner list_for_each_entry_safe(vb, n_vb, purge_list, purge) {
271802b709dfSNick Piggin list_del(&vb->purge);
271902b709dfSNick Piggin free_vmap_block(vb);
272002b709dfSNick Piggin }
272102b709dfSNick Piggin }
272202b709dfSNick Piggin
purge_fragmented_blocks(int cpu)2723ca5e46c3SThomas Gleixner static void purge_fragmented_blocks(int cpu)
2724ca5e46c3SThomas Gleixner {
2725ca5e46c3SThomas Gleixner LIST_HEAD(purge);
2726ca5e46c3SThomas Gleixner struct vmap_block *vb;
2727ca5e46c3SThomas Gleixner struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
2728ca5e46c3SThomas Gleixner
2729ca5e46c3SThomas Gleixner rcu_read_lock();
2730ca5e46c3SThomas Gleixner list_for_each_entry_rcu(vb, &vbq->free, free_list) {
27317f48121eSThomas Gleixner unsigned long free = READ_ONCE(vb->free);
27327f48121eSThomas Gleixner unsigned long dirty = READ_ONCE(vb->dirty);
27337f48121eSThomas Gleixner
27347f48121eSThomas Gleixner if (free + dirty != VMAP_BBMAP_BITS ||
27357f48121eSThomas Gleixner dirty == VMAP_BBMAP_BITS)
2736ca5e46c3SThomas Gleixner continue;
2737ca5e46c3SThomas Gleixner
2738ca5e46c3SThomas Gleixner spin_lock(&vb->lock);
27398c61291fSZhaoyang Huang purge_fragmented_block(vb, &purge, true);
2740ca5e46c3SThomas Gleixner spin_unlock(&vb->lock);
2741ca5e46c3SThomas Gleixner }
2742ca5e46c3SThomas Gleixner rcu_read_unlock();
2743ca5e46c3SThomas Gleixner free_purged_blocks(&purge);
2744ca5e46c3SThomas Gleixner }
2745ca5e46c3SThomas Gleixner
purge_fragmented_blocks_allcpus(void)274602b709dfSNick Piggin static void purge_fragmented_blocks_allcpus(void)
274702b709dfSNick Piggin {
274802b709dfSNick Piggin int cpu;
274902b709dfSNick Piggin
275002b709dfSNick Piggin for_each_possible_cpu(cpu)
275102b709dfSNick Piggin purge_fragmented_blocks(cpu);
275202b709dfSNick Piggin }
275302b709dfSNick Piggin
vb_alloc(unsigned long size,gfp_t gfp_mask)2754db64fe02SNick Piggin static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
2755db64fe02SNick Piggin {
2756db64fe02SNick Piggin struct vmap_block_queue *vbq;
2757db64fe02SNick Piggin struct vmap_block *vb;
2758cf725ce2SRoman Pen void *vaddr = NULL;
2759db64fe02SNick Piggin unsigned int order;
2760db64fe02SNick Piggin
2761891c49abSAlexander Kuleshov BUG_ON(offset_in_page(size));
2762db64fe02SNick Piggin BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
2763aa91c4d8SJan Kara if (WARN_ON(size == 0)) {
2764aa91c4d8SJan Kara /*
2765aa91c4d8SJan Kara * Allocating 0 bytes isn't what caller wants since
2766aa91c4d8SJan Kara * get_order(0) returns funny result. Just warn and terminate
2767aa91c4d8SJan Kara * early.
2768aa91c4d8SJan Kara */
2769ac0476e8SHailong.Liu return ERR_PTR(-EINVAL);
2770aa91c4d8SJan Kara }
2771db64fe02SNick Piggin order = get_order(size);
2772db64fe02SNick Piggin
2773db64fe02SNick Piggin rcu_read_lock();
27743f804920SSebastian Andrzej Siewior vbq = raw_cpu_ptr(&vmap_block_queue);
2775db64fe02SNick Piggin list_for_each_entry_rcu(vb, &vbq->free, free_list) {
2776cf725ce2SRoman Pen unsigned long pages_off;
2777db64fe02SNick Piggin
277843d76502SThomas Gleixner if (READ_ONCE(vb->free) < (1UL << order))
277943d76502SThomas Gleixner continue;
278043d76502SThomas Gleixner
2781db64fe02SNick Piggin spin_lock(&vb->lock);
2782cf725ce2SRoman Pen if (vb->free < (1UL << order)) {
2783cf725ce2SRoman Pen spin_unlock(&vb->lock);
2784cf725ce2SRoman Pen continue;
2785cf725ce2SRoman Pen }
278602b709dfSNick Piggin
2787cf725ce2SRoman Pen pages_off = VMAP_BBMAP_BITS - vb->free;
2788cf725ce2SRoman Pen vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
278943d76502SThomas Gleixner WRITE_ONCE(vb->free, vb->free - (1UL << order));
2790d76f9954SBaoquan He bitmap_set(vb->used_map, pages_off, (1UL << order));
2791db64fe02SNick Piggin if (vb->free == 0) {
2792db64fe02SNick Piggin spin_lock(&vbq->lock);
2793de560423SNick Piggin list_del_rcu(&vb->free_list);
2794db64fe02SNick Piggin spin_unlock(&vbq->lock);
2795db64fe02SNick Piggin }
2796cf725ce2SRoman Pen
2797db64fe02SNick Piggin spin_unlock(&vb->lock);
2798db64fe02SNick Piggin break;
2799db64fe02SNick Piggin }
280002b709dfSNick Piggin
2801db64fe02SNick Piggin rcu_read_unlock();
2802db64fe02SNick Piggin
2803cf725ce2SRoman Pen /* Allocate new block if nothing was found */
2804cf725ce2SRoman Pen if (!vaddr)
2805cf725ce2SRoman Pen vaddr = new_vmap_block(order, gfp_mask);
2806db64fe02SNick Piggin
2807cf725ce2SRoman Pen return vaddr;
2808db64fe02SNick Piggin }
2809db64fe02SNick Piggin
vb_free(unsigned long addr,unsigned long size)281078a0e8c4SChristoph Hellwig static void vb_free(unsigned long addr, unsigned long size)
2811db64fe02SNick Piggin {
2812db64fe02SNick Piggin unsigned long offset;
2813db64fe02SNick Piggin unsigned int order;
2814db64fe02SNick Piggin struct vmap_block *vb;
2815062eacf5SUladzislau Rezki (Sony) struct xarray *xa;
2816db64fe02SNick Piggin
2817891c49abSAlexander Kuleshov BUG_ON(offset_in_page(size));
2818db64fe02SNick Piggin BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
2819b29acbdcSNick Piggin
282078a0e8c4SChristoph Hellwig flush_cache_vunmap(addr, addr + size);
2821b29acbdcSNick Piggin
2822db64fe02SNick Piggin order = get_order(size);
282378a0e8c4SChristoph Hellwig offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT;
2824062eacf5SUladzislau Rezki (Sony)
2825fa1c77c1SUladzislau Rezki (Sony) xa = addr_to_vb_xa(addr);
2826062eacf5SUladzislau Rezki (Sony) vb = xa_load(xa, addr_to_vb_idx(addr));
2827062eacf5SUladzislau Rezki (Sony)
2828d76f9954SBaoquan He spin_lock(&vb->lock);
2829d76f9954SBaoquan He bitmap_clear(vb->used_map, offset, (1UL << order));
2830d76f9954SBaoquan He spin_unlock(&vb->lock);
2831db64fe02SNick Piggin
28324ad0ae8cSNicholas Piggin vunmap_range_noflush(addr, addr + size);
283364141da5SJeremy Fitzhardinge
28348e57f8acSVlastimil Babka if (debug_pagealloc_enabled_static())
283578a0e8c4SChristoph Hellwig flush_tlb_kernel_range(addr, addr + size);
283682a2e924SChintan Pandya
2837db64fe02SNick Piggin spin_lock(&vb->lock);
28387d61bfe8SRoman Pen
2839a09fad96SThomas Gleixner /* Expand the not yet TLB flushed dirty range */
28407d61bfe8SRoman Pen vb->dirty_min = min(vb->dirty_min, offset);
28417d61bfe8SRoman Pen vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
2842d086817dSMinChan Kim
28437f48121eSThomas Gleixner WRITE_ONCE(vb->dirty, vb->dirty + (1UL << order));
2844db64fe02SNick Piggin if (vb->dirty == VMAP_BBMAP_BITS) {
2845de560423SNick Piggin BUG_ON(vb->free);
2846db64fe02SNick Piggin spin_unlock(&vb->lock);
2847db64fe02SNick Piggin free_vmap_block(vb);
2848db64fe02SNick Piggin } else
2849db64fe02SNick Piggin spin_unlock(&vb->lock);
2850db64fe02SNick Piggin }
2851db64fe02SNick Piggin
_vm_unmap_aliases(unsigned long start,unsigned long end,int flush)2852868b104dSRick Edgecombe static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
2853db64fe02SNick Piggin {
2854ca5e46c3SThomas Gleixner LIST_HEAD(purge_list);
2855db64fe02SNick Piggin int cpu;
2856db64fe02SNick Piggin
28579b463334SJeremy Fitzhardinge if (unlikely(!vmap_initialized))
28589b463334SJeremy Fitzhardinge return;
28599b463334SJeremy Fitzhardinge
2860ca5e46c3SThomas Gleixner mutex_lock(&vmap_purge_lock);
28615803ed29SChristoph Hellwig
2862db64fe02SNick Piggin for_each_possible_cpu(cpu) {
2863db64fe02SNick Piggin struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
2864db64fe02SNick Piggin struct vmap_block *vb;
2865fc1e0d98SThomas Gleixner unsigned long idx;
2866db64fe02SNick Piggin
2867db64fe02SNick Piggin rcu_read_lock();
2868fc1e0d98SThomas Gleixner xa_for_each(&vbq->vmap_blocks, idx, vb) {
2869db64fe02SNick Piggin spin_lock(&vb->lock);
2870ca5e46c3SThomas Gleixner
2871ca5e46c3SThomas Gleixner /*
2872ca5e46c3SThomas Gleixner * Try to purge a fragmented block first. If it's
2873ca5e46c3SThomas Gleixner * not purgeable, check whether there is dirty
2874ca5e46c3SThomas Gleixner * space to be flushed.
2875ca5e46c3SThomas Gleixner */
28768c61291fSZhaoyang Huang if (!purge_fragmented_block(vb, &purge_list, false) &&
2877a09fad96SThomas Gleixner vb->dirty_max && vb->dirty != VMAP_BBMAP_BITS) {
28787d61bfe8SRoman Pen unsigned long va_start = vb->va->va_start;
2879db64fe02SNick Piggin unsigned long s, e;
2880b136be5eSJoonsoo Kim
28817d61bfe8SRoman Pen s = va_start + (vb->dirty_min << PAGE_SHIFT);
28827d61bfe8SRoman Pen e = va_start + (vb->dirty_max << PAGE_SHIFT);
2883db64fe02SNick Piggin
28847d61bfe8SRoman Pen start = min(s, start);
28857d61bfe8SRoman Pen end = max(e, end);
28867d61bfe8SRoman Pen
2887a09fad96SThomas Gleixner /* Prevent that this is flushed again */
2888a09fad96SThomas Gleixner vb->dirty_min = VMAP_BBMAP_BITS;
2889a09fad96SThomas Gleixner vb->dirty_max = 0;
2890a09fad96SThomas Gleixner
2891db64fe02SNick Piggin flush = 1;
2892db64fe02SNick Piggin }
2893db64fe02SNick Piggin spin_unlock(&vb->lock);
2894db64fe02SNick Piggin }
2895db64fe02SNick Piggin rcu_read_unlock();
2896db64fe02SNick Piggin }
2897ca5e46c3SThomas Gleixner free_purged_blocks(&purge_list);
2898db64fe02SNick Piggin
289972210662SUladzislau Rezki (Sony) if (!__purge_vmap_area_lazy(start, end, false) && flush)
29000574ecd1SChristoph Hellwig flush_tlb_kernel_range(start, end);
2901f9e09977SChristoph Hellwig mutex_unlock(&vmap_purge_lock);
2902db64fe02SNick Piggin }
2903868b104dSRick Edgecombe
2904868b104dSRick Edgecombe /**
2905868b104dSRick Edgecombe * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
2906868b104dSRick Edgecombe *
2907868b104dSRick Edgecombe * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
2908868b104dSRick Edgecombe * to amortize TLB flushing overheads. What this means is that any page you
2909868b104dSRick Edgecombe * have now, may, in a former life, have been mapped into kernel virtual
2910868b104dSRick Edgecombe * address by the vmap layer and so there might be some CPUs with TLB entries
2911868b104dSRick Edgecombe * still referencing that page (additional to the regular 1:1 kernel mapping).
2912868b104dSRick Edgecombe *
2913868b104dSRick Edgecombe * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
2914868b104dSRick Edgecombe * be sure that none of the pages we have control over will have any aliases
2915868b104dSRick Edgecombe * from the vmap layer.
2916868b104dSRick Edgecombe */
vm_unmap_aliases(void)2917868b104dSRick Edgecombe void vm_unmap_aliases(void)
2918868b104dSRick Edgecombe {
2919868b104dSRick Edgecombe unsigned long start = ULONG_MAX, end = 0;
2920868b104dSRick Edgecombe int flush = 0;
2921868b104dSRick Edgecombe
2922868b104dSRick Edgecombe _vm_unmap_aliases(start, end, flush);
2923868b104dSRick Edgecombe }
2924db64fe02SNick Piggin EXPORT_SYMBOL_GPL(vm_unmap_aliases);
2925db64fe02SNick Piggin
2926db64fe02SNick Piggin /**
2927db64fe02SNick Piggin * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
2928db64fe02SNick Piggin * @mem: the pointer returned by vm_map_ram
2929db64fe02SNick Piggin * @count: the count passed to that vm_map_ram call (cannot unmap partial)
2930db64fe02SNick Piggin */
vm_unmap_ram(const void * mem,unsigned int count)2931db64fe02SNick Piggin void vm_unmap_ram(const void *mem, unsigned int count)
2932db64fe02SNick Piggin {
293365ee03c4SGuillermo Julián Moreno unsigned long size = (unsigned long)count << PAGE_SHIFT;
29344aff1dc4SAndrey Konovalov unsigned long addr = (unsigned long)kasan_reset_tag(mem);
29359c3acf60SChristoph Hellwig struct vmap_area *va;
2936db64fe02SNick Piggin
29375803ed29SChristoph Hellwig might_sleep();
2938db64fe02SNick Piggin BUG_ON(!addr);
2939db64fe02SNick Piggin BUG_ON(addr < VMALLOC_START);
2940db64fe02SNick Piggin BUG_ON(addr > VMALLOC_END);
2941a1c0b1a0SShawn Lin BUG_ON(!PAGE_ALIGNED(addr));
2942db64fe02SNick Piggin
2943d98c9e83SAndrey Ryabinin kasan_poison_vmalloc(mem, size);
2944d98c9e83SAndrey Ryabinin
29459c3acf60SChristoph Hellwig if (likely(count <= VMAP_MAX_ALLOC)) {
294605e3ff95SChintan Pandya debug_check_no_locks_freed(mem, size);
294778a0e8c4SChristoph Hellwig vb_free(addr, size);
29489c3acf60SChristoph Hellwig return;
29499c3acf60SChristoph Hellwig }
29509c3acf60SChristoph Hellwig
2951edd89818SUladzislau Rezki (Sony) va = find_unlink_vmap_area(addr);
295214687619SUladzislau Rezki (Sony) if (WARN_ON_ONCE(!va))
295314687619SUladzislau Rezki (Sony) return;
295414687619SUladzislau Rezki (Sony)
2955b44f71e3SZhangPeng debug_check_no_locks_freed((void *)va->va_start, va_size(va));
29569c3acf60SChristoph Hellwig free_unmap_vmap_area(va);
2957db64fe02SNick Piggin }
2958db64fe02SNick Piggin EXPORT_SYMBOL(vm_unmap_ram);
2959db64fe02SNick Piggin
2960db64fe02SNick Piggin /**
2961db64fe02SNick Piggin * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
2962db64fe02SNick Piggin * @pages: an array of pointers to the pages to be mapped
2963db64fe02SNick Piggin * @count: number of pages
2964db64fe02SNick Piggin * @node: prefer to allocate data structures on this node
2965e99c97adSRandy Dunlap *
296636437638SGioh Kim * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
296736437638SGioh Kim * faster than vmap so it's good. But if you mix long-life and short-life
296836437638SGioh Kim * objects with vm_map_ram(), it could consume lots of address space through
296936437638SGioh Kim * fragmentation (especially on a 32bit machine). You could see failures in
297036437638SGioh Kim * the end. Please use this function for short-lived objects.
297136437638SGioh Kim *
2972e99c97adSRandy Dunlap * Returns: a pointer to the address that has been mapped, or %NULL on failure
2973db64fe02SNick Piggin */
vm_map_ram(struct page ** pages,unsigned int count,int node)2974d4efd79aSChristoph Hellwig void *vm_map_ram(struct page **pages, unsigned int count, int node)
2975db64fe02SNick Piggin {
297665ee03c4SGuillermo Julián Moreno unsigned long size = (unsigned long)count << PAGE_SHIFT;
2977db64fe02SNick Piggin unsigned long addr;
2978db64fe02SNick Piggin void *mem;
2979db64fe02SNick Piggin
2980db64fe02SNick Piggin if (likely(count <= VMAP_MAX_ALLOC)) {
2981db64fe02SNick Piggin mem = vb_alloc(size, GFP_KERNEL);
2982db64fe02SNick Piggin if (IS_ERR(mem))
2983db64fe02SNick Piggin return NULL;
2984db64fe02SNick Piggin addr = (unsigned long)mem;
2985db64fe02SNick Piggin } else {
2986db64fe02SNick Piggin struct vmap_area *va;
2987db64fe02SNick Piggin va = alloc_vmap_area(size, PAGE_SIZE,
2988869176a0SBaoquan He VMALLOC_START, VMALLOC_END,
2989aaab830aSrulinhuang node, GFP_KERNEL, VMAP_RAM,
29904b68a773SBaoquan He NULL);
2991db64fe02SNick Piggin if (IS_ERR(va))
2992db64fe02SNick Piggin return NULL;
2993db64fe02SNick Piggin
2994db64fe02SNick Piggin addr = va->va_start;
2995db64fe02SNick Piggin mem = (void *)addr;
2996db64fe02SNick Piggin }
2997d98c9e83SAndrey Ryabinin
2998b67177ecSNicholas Piggin if (vmap_pages_range(addr, addr + size, PAGE_KERNEL,
2999b67177ecSNicholas Piggin pages, PAGE_SHIFT) < 0) {
3000db64fe02SNick Piggin vm_unmap_ram(mem, count);
3001db64fe02SNick Piggin return NULL;
3002db64fe02SNick Piggin }
3003b67177ecSNicholas Piggin
300423689e91SAndrey Konovalov /*
300523689e91SAndrey Konovalov * Mark the pages as accessible, now that they are mapped.
300623689e91SAndrey Konovalov * With hardware tag-based KASAN, marking is skipped for
300723689e91SAndrey Konovalov * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
300823689e91SAndrey Konovalov */
3009f6e39794SAndrey Konovalov mem = kasan_unpoison_vmalloc(mem, size, KASAN_VMALLOC_PROT_NORMAL);
301019f1c3acSAndrey Konovalov
3011db64fe02SNick Piggin return mem;
3012db64fe02SNick Piggin }
3013db64fe02SNick Piggin EXPORT_SYMBOL(vm_map_ram);
3014db64fe02SNick Piggin
30154341fa45SJoonsoo Kim static struct vm_struct *vmlist __initdata;
301692eac168SMike Rapoport
vm_area_page_order(struct vm_struct * vm)3017121e6f32SNicholas Piggin static inline unsigned int vm_area_page_order(struct vm_struct *vm)
3018121e6f32SNicholas Piggin {
3019121e6f32SNicholas Piggin #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
3020121e6f32SNicholas Piggin return vm->page_order;
3021121e6f32SNicholas Piggin #else
3022121e6f32SNicholas Piggin return 0;
3023121e6f32SNicholas Piggin #endif
3024121e6f32SNicholas Piggin }
3025121e6f32SNicholas Piggin
get_vm_area_page_order(struct vm_struct * vm)30262e45474aSMike Rapoport (Microsoft) unsigned int get_vm_area_page_order(struct vm_struct *vm)
30272e45474aSMike Rapoport (Microsoft) {
30282e45474aSMike Rapoport (Microsoft) return vm_area_page_order(vm);
30292e45474aSMike Rapoport (Microsoft) }
30302e45474aSMike Rapoport (Microsoft)
set_vm_area_page_order(struct vm_struct * vm,unsigned int order)3031121e6f32SNicholas Piggin static inline void set_vm_area_page_order(struct vm_struct *vm, unsigned int order)
3032121e6f32SNicholas Piggin {
3033121e6f32SNicholas Piggin #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
3034121e6f32SNicholas Piggin vm->page_order = order;
3035121e6f32SNicholas Piggin #else
3036121e6f32SNicholas Piggin BUG_ON(order != 0);
3037121e6f32SNicholas Piggin #endif
3038121e6f32SNicholas Piggin }
3039121e6f32SNicholas Piggin
3040f0aa6617STejun Heo /**
3041be9b7335SNicolas Pitre * vm_area_add_early - add vmap area early during boot
3042be9b7335SNicolas Pitre * @vm: vm_struct to add
3043be9b7335SNicolas Pitre *
3044be9b7335SNicolas Pitre * This function is used to add fixed kernel vm area to vmlist before
3045be9b7335SNicolas Pitre * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags
3046be9b7335SNicolas Pitre * should contain proper values and the other fields should be zero.
3047be9b7335SNicolas Pitre *
3048be9b7335SNicolas Pitre * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
3049be9b7335SNicolas Pitre */
vm_area_add_early(struct vm_struct * vm)3050be9b7335SNicolas Pitre void __init vm_area_add_early(struct vm_struct *vm)
3051be9b7335SNicolas Pitre {
3052be9b7335SNicolas Pitre struct vm_struct *tmp, **p;
3053be9b7335SNicolas Pitre
3054be9b7335SNicolas Pitre BUG_ON(vmap_initialized);
3055be9b7335SNicolas Pitre for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
3056be9b7335SNicolas Pitre if (tmp->addr >= vm->addr) {
3057be9b7335SNicolas Pitre BUG_ON(tmp->addr < vm->addr + vm->size);
3058be9b7335SNicolas Pitre break;
3059be9b7335SNicolas Pitre } else
3060be9b7335SNicolas Pitre BUG_ON(tmp->addr + tmp->size > vm->addr);
3061be9b7335SNicolas Pitre }
3062be9b7335SNicolas Pitre vm->next = *p;
3063be9b7335SNicolas Pitre *p = vm;
3064be9b7335SNicolas Pitre }
3065be9b7335SNicolas Pitre
3066be9b7335SNicolas Pitre /**
3067f0aa6617STejun Heo * vm_area_register_early - register vmap area early during boot
3068f0aa6617STejun Heo * @vm: vm_struct to register
3069c0c0a293STejun Heo * @align: requested alignment
3070f0aa6617STejun Heo *
3071f0aa6617STejun Heo * This function is used to register kernel vm area before
3072f0aa6617STejun Heo * vmalloc_init() is called. @vm->size and @vm->flags should contain
3073f0aa6617STejun Heo * proper values on entry and other fields should be zero. On return,
3074f0aa6617STejun Heo * vm->addr contains the allocated address.
3075f0aa6617STejun Heo *
3076f0aa6617STejun Heo * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
3077f0aa6617STejun Heo */
vm_area_register_early(struct vm_struct * vm,size_t align)3078c0c0a293STejun Heo void __init vm_area_register_early(struct vm_struct *vm, size_t align)
3079f0aa6617STejun Heo {
30800eb68437SKefeng Wang unsigned long addr = ALIGN(VMALLOC_START, align);
30810eb68437SKefeng Wang struct vm_struct *cur, **p;
3082f0aa6617STejun Heo
30830eb68437SKefeng Wang BUG_ON(vmap_initialized);
3084c0c0a293STejun Heo
30850eb68437SKefeng Wang for (p = &vmlist; (cur = *p) != NULL; p = &cur->next) {
30860eb68437SKefeng Wang if ((unsigned long)cur->addr - addr >= vm->size)
30870eb68437SKefeng Wang break;
30880eb68437SKefeng Wang addr = ALIGN((unsigned long)cur->addr + cur->size, align);
30890eb68437SKefeng Wang }
30900eb68437SKefeng Wang
30910eb68437SKefeng Wang BUG_ON(addr > VMALLOC_END - vm->size);
3092c0c0a293STejun Heo vm->addr = (void *)addr;
30930eb68437SKefeng Wang vm->next = *p;
30940eb68437SKefeng Wang *p = vm;
30953252b1d8SKefeng Wang kasan_populate_early_vm_area_shadow(vm->addr, vm->size);
3096f0aa6617STejun Heo }
3097f0aa6617STejun Heo
clear_vm_uninitialized_flag(struct vm_struct * vm)309820fc02b4SZhang Yanfei static void clear_vm_uninitialized_flag(struct vm_struct *vm)
3099f5252e00SMitsuo Hayasaka {
3100d4033afdSJoonsoo Kim /*
310120fc02b4SZhang Yanfei * Before removing VM_UNINITIALIZED,
3102d4033afdSJoonsoo Kim * we should make sure that vm has proper values.
3103d4033afdSJoonsoo Kim * Pair with smp_rmb() in show_numa_info().
3104d4033afdSJoonsoo Kim */
3105d4033afdSJoonsoo Kim smp_wmb();
310620fc02b4SZhang Yanfei vm->flags &= ~VM_UNINITIALIZED;
3107cf88c790STejun Heo }
3108cf88c790STejun Heo
__get_vm_area_node(unsigned long size,unsigned long align,unsigned long shift,unsigned long flags,unsigned long start,unsigned long end,int node,gfp_t gfp_mask,const void * caller)31090f9b6856SSuren Baghdasaryan struct vm_struct *__get_vm_area_node(unsigned long size,
31107ca3027bSDaniel Axtens unsigned long align, unsigned long shift, unsigned long flags,
31117ca3027bSDaniel Axtens unsigned long start, unsigned long end, int node,
31127ca3027bSDaniel Axtens gfp_t gfp_mask, const void *caller)
3113db64fe02SNick Piggin {
31140006526dSKautuk Consul struct vmap_area *va;
3115db64fe02SNick Piggin struct vm_struct *area;
3116d98c9e83SAndrey Ryabinin unsigned long requested_size = size;
31171da177e4SLinus Torvalds
311852fd24caSGiridhar Pemmasani BUG_ON(in_interrupt());
31197ca3027bSDaniel Axtens size = ALIGN(size, 1ul << shift);
312031be8309SOGAWA Hirofumi if (unlikely(!size))
312131be8309SOGAWA Hirofumi return NULL;
31221da177e4SLinus Torvalds
3123252e5c6eSzijun_hu if (flags & VM_IOREMAP)
3124252e5c6eSzijun_hu align = 1ul << clamp_t(int, get_count_order_long(size),
3125252e5c6eSzijun_hu PAGE_SHIFT, IOREMAP_MAX_ORDER);
3126252e5c6eSzijun_hu
3127cf88c790STejun Heo area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
31281da177e4SLinus Torvalds if (unlikely(!area))
31291da177e4SLinus Torvalds return NULL;
31301da177e4SLinus Torvalds
313171394fe5SAndrey Ryabinin if (!(flags & VM_NO_GUARD))
31321da177e4SLinus Torvalds size += PAGE_SIZE;
31331da177e4SLinus Torvalds
31344b68a773SBaoquan He area->flags = flags;
31354b68a773SBaoquan He area->caller = caller;
3136a0309fafSKees Cook area->requested_size = requested_size;
31374b68a773SBaoquan He
31384b68a773SBaoquan He va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0, area);
3139db64fe02SNick Piggin if (IS_ERR(va)) {
3140db64fe02SNick Piggin kfree(area);
3141db64fe02SNick Piggin return NULL;
31421da177e4SLinus Torvalds }
31431da177e4SLinus Torvalds
314419f1c3acSAndrey Konovalov /*
314519f1c3acSAndrey Konovalov * Mark pages for non-VM_ALLOC mappings as accessible. Do it now as a
314619f1c3acSAndrey Konovalov * best-effort approach, as they can be mapped outside of vmalloc code.
314719f1c3acSAndrey Konovalov * For VM_ALLOC mappings, the pages are marked as accessible after
314819f1c3acSAndrey Konovalov * getting mapped in __vmalloc_node_range().
314923689e91SAndrey Konovalov * With hardware tag-based KASAN, marking is skipped for
315023689e91SAndrey Konovalov * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
315119f1c3acSAndrey Konovalov */
315219f1c3acSAndrey Konovalov if (!(flags & VM_ALLOC))
315323689e91SAndrey Konovalov area->addr = kasan_unpoison_vmalloc(area->addr, requested_size,
3154f6e39794SAndrey Konovalov KASAN_VMALLOC_PROT_NORMAL);
31551d96320fSAndrey Konovalov
31561da177e4SLinus Torvalds return area;
31571da177e4SLinus Torvalds }
31581da177e4SLinus Torvalds
__get_vm_area_caller(unsigned long size,unsigned long flags,unsigned long start,unsigned long end,const void * caller)3159c2968612SBenjamin Herrenschmidt struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
3160c2968612SBenjamin Herrenschmidt unsigned long start, unsigned long end,
31615e6cafc8SMarek Szyprowski const void *caller)
3162c2968612SBenjamin Herrenschmidt {
31637ca3027bSDaniel Axtens return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end,
31647ca3027bSDaniel Axtens NUMA_NO_NODE, GFP_KERNEL, caller);
3165c2968612SBenjamin Herrenschmidt }
3166c2968612SBenjamin Herrenschmidt
31671da177e4SLinus Torvalds /**
3168183ff22bSSimon Arlott * get_vm_area - reserve a contiguous kernel virtual area
31691da177e4SLinus Torvalds * @size: size of the area
31701da177e4SLinus Torvalds * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
31711da177e4SLinus Torvalds *
31721da177e4SLinus Torvalds * Search an area of @size in the kernel virtual mapping area,
31731da177e4SLinus Torvalds * and reserved it for out purposes. Returns the area descriptor
31741da177e4SLinus Torvalds * on success or %NULL on failure.
3175a862f68aSMike Rapoport *
3176a862f68aSMike Rapoport * Return: the area descriptor on success or %NULL on failure.
31771da177e4SLinus Torvalds */
get_vm_area(unsigned long size,unsigned long flags)31781da177e4SLinus Torvalds struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
31791da177e4SLinus Torvalds {
31807ca3027bSDaniel Axtens return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
31817ca3027bSDaniel Axtens VMALLOC_START, VMALLOC_END,
318200ef2d2fSDavid Rientjes NUMA_NO_NODE, GFP_KERNEL,
318300ef2d2fSDavid Rientjes __builtin_return_address(0));
318423016969SChristoph Lameter }
318523016969SChristoph Lameter
get_vm_area_caller(unsigned long size,unsigned long flags,const void * caller)318623016969SChristoph Lameter struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
31875e6cafc8SMarek Szyprowski const void *caller)
318823016969SChristoph Lameter {
31897ca3027bSDaniel Axtens return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
31907ca3027bSDaniel Axtens VMALLOC_START, VMALLOC_END,
319100ef2d2fSDavid Rientjes NUMA_NO_NODE, GFP_KERNEL, caller);
31921da177e4SLinus Torvalds }
31931da177e4SLinus Torvalds
3194e9da6e99SMarek Szyprowski /**
3195e9da6e99SMarek Szyprowski * find_vm_area - find a continuous kernel virtual area
3196e9da6e99SMarek Szyprowski * @addr: base address
3197e9da6e99SMarek Szyprowski *
3198e9da6e99SMarek Szyprowski * Search for the kernel VM area starting at @addr, and return it.
3199e9da6e99SMarek Szyprowski * It is up to the caller to do all required locking to keep the returned
3200e9da6e99SMarek Szyprowski * pointer valid.
3201a862f68aSMike Rapoport *
320274640617SHui Su * Return: the area descriptor on success or %NULL on failure.
3203e9da6e99SMarek Szyprowski */
find_vm_area(const void * addr)3204e9da6e99SMarek Szyprowski struct vm_struct *find_vm_area(const void *addr)
320583342314SNick Piggin {
3206db64fe02SNick Piggin struct vmap_area *va;
320783342314SNick Piggin
3208db64fe02SNick Piggin va = find_vmap_area((unsigned long)addr);
3209688fcbfcSPengfei Li if (!va)
32107856dfebSAndi Kleen return NULL;
3211688fcbfcSPengfei Li
3212688fcbfcSPengfei Li return va->vm;
32137856dfebSAndi Kleen }
32147856dfebSAndi Kleen
32151da177e4SLinus Torvalds /**
3216183ff22bSSimon Arlott * remove_vm_area - find and remove a continuous kernel virtual area
32171da177e4SLinus Torvalds * @addr: base address
32181da177e4SLinus Torvalds *
32191da177e4SLinus Torvalds * Search for the kernel VM area starting at @addr, and remove it.
32201da177e4SLinus Torvalds * This function returns the found VM area, but using it is NOT safe
32217856dfebSAndi Kleen * on SMP machines, except for its size or flags.
3222a862f68aSMike Rapoport *
322374640617SHui Su * Return: the area descriptor on success or %NULL on failure.
32241da177e4SLinus Torvalds */
remove_vm_area(const void * addr)3225b3bdda02SChristoph Lameter struct vm_struct *remove_vm_area(const void *addr)
32261da177e4SLinus Torvalds {
3227db64fe02SNick Piggin struct vmap_area *va;
322875c59ce7SChristoph Hellwig struct vm_struct *vm;
3229db64fe02SNick Piggin
32305803ed29SChristoph Hellwig might_sleep();
32315803ed29SChristoph Hellwig
323217d3ef43SChristoph Hellwig if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
323317d3ef43SChristoph Hellwig addr))
3234db64fe02SNick Piggin return NULL;
323517d3ef43SChristoph Hellwig
323675c59ce7SChristoph Hellwig va = find_unlink_vmap_area((unsigned long)addr);
323775c59ce7SChristoph Hellwig if (!va || !va->vm)
323875c59ce7SChristoph Hellwig return NULL;
323975c59ce7SChristoph Hellwig vm = va->vm;
324017d3ef43SChristoph Hellwig
324117d3ef43SChristoph Hellwig debug_check_no_locks_freed(vm->addr, get_vm_area_size(vm));
324217d3ef43SChristoph Hellwig debug_check_no_obj_freed(vm->addr, get_vm_area_size(vm));
324375c59ce7SChristoph Hellwig kasan_free_module_shadow(vm);
324417d3ef43SChristoph Hellwig kasan_poison_vmalloc(vm->addr, get_vm_area_size(vm));
324517d3ef43SChristoph Hellwig
324675c59ce7SChristoph Hellwig free_unmap_vmap_area(va);
324775c59ce7SChristoph Hellwig return vm;
32481da177e4SLinus Torvalds }
32491da177e4SLinus Torvalds
set_area_direct_map(const struct vm_struct * area,int (* set_direct_map)(struct page * page))3250868b104dSRick Edgecombe static inline void set_area_direct_map(const struct vm_struct *area,
3251868b104dSRick Edgecombe int (*set_direct_map)(struct page *page))
3252868b104dSRick Edgecombe {
3253868b104dSRick Edgecombe int i;
3254868b104dSRick Edgecombe
3255121e6f32SNicholas Piggin /* HUGE_VMALLOC passes small pages to set_direct_map */
3256868b104dSRick Edgecombe for (i = 0; i < area->nr_pages; i++)
3257868b104dSRick Edgecombe if (page_address(area->pages[i]))
3258868b104dSRick Edgecombe set_direct_map(area->pages[i]);
3259868b104dSRick Edgecombe }
3260868b104dSRick Edgecombe
32619e5fa0aeSChristoph Hellwig /*
32629e5fa0aeSChristoph Hellwig * Flush the vm mapping and reset the direct map.
32639e5fa0aeSChristoph Hellwig */
vm_reset_perms(struct vm_struct * area)32649e5fa0aeSChristoph Hellwig static void vm_reset_perms(struct vm_struct *area)
3265868b104dSRick Edgecombe {
3266868b104dSRick Edgecombe unsigned long start = ULONG_MAX, end = 0;
3267121e6f32SNicholas Piggin unsigned int page_order = vm_area_page_order(area);
326831e67340SRick Edgecombe int flush_dmap = 0;
3269868b104dSRick Edgecombe int i;
3270868b104dSRick Edgecombe
3271868b104dSRick Edgecombe /*
32729e5fa0aeSChristoph Hellwig * Find the start and end range of the direct mappings to make sure that
3273868b104dSRick Edgecombe * the vm_unmap_aliases() flush includes the direct map.
3274868b104dSRick Edgecombe */
3275121e6f32SNicholas Piggin for (i = 0; i < area->nr_pages; i += 1U << page_order) {
32768e41f872SRick Edgecombe unsigned long addr = (unsigned long)page_address(area->pages[i]);
32779e5fa0aeSChristoph Hellwig
32788e41f872SRick Edgecombe if (addr) {
3279121e6f32SNicholas Piggin unsigned long page_size;
3280121e6f32SNicholas Piggin
3281121e6f32SNicholas Piggin page_size = PAGE_SIZE << page_order;
3282868b104dSRick Edgecombe start = min(addr, start);
3283121e6f32SNicholas Piggin end = max(addr + page_size, end);
328431e67340SRick Edgecombe flush_dmap = 1;
3285868b104dSRick Edgecombe }
3286868b104dSRick Edgecombe }
3287868b104dSRick Edgecombe
3288868b104dSRick Edgecombe /*
3289868b104dSRick Edgecombe * Set direct map to something invalid so that it won't be cached if
3290868b104dSRick Edgecombe * there are any accesses after the TLB flush, then flush the TLB and
3291868b104dSRick Edgecombe * reset the direct map permissions to the default.
3292868b104dSRick Edgecombe */
3293868b104dSRick Edgecombe set_area_direct_map(area, set_direct_map_invalid_noflush);
329431e67340SRick Edgecombe _vm_unmap_aliases(start, end, flush_dmap);
3295868b104dSRick Edgecombe set_area_direct_map(area, set_direct_map_default_noflush);
3296868b104dSRick Edgecombe }
3297868b104dSRick Edgecombe
delayed_vfree_work(struct work_struct * w)3298208162f4SChristoph Hellwig static void delayed_vfree_work(struct work_struct *w)
32991da177e4SLinus Torvalds {
3300208162f4SChristoph Hellwig struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
3301208162f4SChristoph Hellwig struct llist_node *t, *llnode;
33021da177e4SLinus Torvalds
3303208162f4SChristoph Hellwig llist_for_each_safe(llnode, t, llist_del_all(&p->list))
33045d3d31d6SChristoph Hellwig vfree(llnode);
3305bf22e37aSAndrey Ryabinin }
3306bf22e37aSAndrey Ryabinin
3307bf22e37aSAndrey Ryabinin /**
3308bf22e37aSAndrey Ryabinin * vfree_atomic - release memory allocated by vmalloc()
3309bf22e37aSAndrey Ryabinin * @addr: memory base address
3310bf22e37aSAndrey Ryabinin *
3311bf22e37aSAndrey Ryabinin * This one is just like vfree() but can be called in any atomic context
3312bf22e37aSAndrey Ryabinin * except NMIs.
3313bf22e37aSAndrey Ryabinin */
vfree_atomic(const void * addr)3314bf22e37aSAndrey Ryabinin void vfree_atomic(const void *addr)
3315bf22e37aSAndrey Ryabinin {
331601e2e839SChristoph Hellwig struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
3317bf22e37aSAndrey Ryabinin
331801e2e839SChristoph Hellwig BUG_ON(in_nmi());
3319bf22e37aSAndrey Ryabinin kmemleak_free(addr);
3320bf22e37aSAndrey Ryabinin
332101e2e839SChristoph Hellwig /*
332201e2e839SChristoph Hellwig * Use raw_cpu_ptr() because this can be called from preemptible
332301e2e839SChristoph Hellwig * context. Preemption is absolutely fine here, because the llist_add()
332401e2e839SChristoph Hellwig * implementation is lockless, so it works even if we are adding to
332501e2e839SChristoph Hellwig * another cpu's list. schedule_work() should be fine with this too.
332601e2e839SChristoph Hellwig */
332701e2e839SChristoph Hellwig if (addr && llist_add((struct llist_node *)addr, &p->list))
332801e2e839SChristoph Hellwig schedule_work(&p->wq);
3329c67dc624SRoman Penyaev }
3330c67dc624SRoman Penyaev
33311da177e4SLinus Torvalds /**
3332fa307474SMatthew Wilcox (Oracle) * vfree - Release memory allocated by vmalloc()
3333fa307474SMatthew Wilcox (Oracle) * @addr: Memory base address
33341da177e4SLinus Torvalds *
3335fa307474SMatthew Wilcox (Oracle) * Free the virtually continuous memory area starting at @addr, as obtained
3336fa307474SMatthew Wilcox (Oracle) * from one of the vmalloc() family of APIs. This will usually also free the
3337fa307474SMatthew Wilcox (Oracle) * physical memory underlying the virtual allocation, but that memory is
3338fa307474SMatthew Wilcox (Oracle) * reference counted, so it will not be freed until the last user goes away.
33391da177e4SLinus Torvalds *
3340fa307474SMatthew Wilcox (Oracle) * If @addr is NULL, no operation is performed.
334132fcfd40SAl Viro *
3342fa307474SMatthew Wilcox (Oracle) * Context:
33433ca4ea3aSAndrey Ryabinin * May sleep if called *not* from interrupt context.
3344fa307474SMatthew Wilcox (Oracle) * Must not be called in NMI context (strictly speaking, it could be
3345fa307474SMatthew Wilcox (Oracle) * if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
3346f0953a1bSIngo Molnar * conventions for vfree() arch-dependent would be a really bad idea).
33471da177e4SLinus Torvalds */
vfree(const void * addr)3348b3bdda02SChristoph Lameter void vfree(const void *addr)
33491da177e4SLinus Torvalds {
335079311c1fSChristoph Hellwig struct vm_struct *vm;
335179311c1fSChristoph Hellwig int i;
335279311c1fSChristoph Hellwig
335301e2e839SChristoph Hellwig if (unlikely(in_interrupt())) {
335401e2e839SChristoph Hellwig vfree_atomic(addr);
335532fcfd40SAl Viro return;
335601e2e839SChristoph Hellwig }
335701e2e839SChristoph Hellwig
33581da177e4SLinus Torvalds BUG_ON(in_nmi());
335989219d37SCatalin Marinas kmemleak_free(addr);
336001e2e839SChristoph Hellwig might_sleep();
336132fcfd40SAl Viro
3362bf22e37aSAndrey Ryabinin if (!addr)
3363bf22e37aSAndrey Ryabinin return;
3364c67dc624SRoman Penyaev
336579311c1fSChristoph Hellwig vm = remove_vm_area(addr);
336679311c1fSChristoph Hellwig if (unlikely(!vm)) {
336779311c1fSChristoph Hellwig WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
336879311c1fSChristoph Hellwig addr);
336979311c1fSChristoph Hellwig return;
337079311c1fSChristoph Hellwig }
337179311c1fSChristoph Hellwig
33729e5fa0aeSChristoph Hellwig if (unlikely(vm->flags & VM_FLUSH_RESET_PERMS))
33739e5fa0aeSChristoph Hellwig vm_reset_perms(vm);
337479311c1fSChristoph Hellwig for (i = 0; i < vm->nr_pages; i++) {
337579311c1fSChristoph Hellwig struct page *page = vm->pages[i];
337679311c1fSChristoph Hellwig
337779311c1fSChristoph Hellwig BUG_ON(!page);
3378a2e740e2SMatthew Wilcox (Oracle) if (!(vm->flags & VM_MAP_PUT_PAGES))
337979311c1fSChristoph Hellwig mod_memcg_page_state(page, MEMCG_VMALLOC, -1);
338079311c1fSChristoph Hellwig /*
338179311c1fSChristoph Hellwig * High-order allocs for huge vmallocs are split, so
338279311c1fSChristoph Hellwig * can be freed as an array of order-0 allocations
338379311c1fSChristoph Hellwig */
3384dcc1be11SLorenzo Stoakes __free_page(page);
338579311c1fSChristoph Hellwig cond_resched();
338679311c1fSChristoph Hellwig }
3387a2e740e2SMatthew Wilcox (Oracle) if (!(vm->flags & VM_MAP_PUT_PAGES))
338879311c1fSChristoph Hellwig atomic_long_sub(vm->nr_pages, &nr_vmalloc_pages);
338979311c1fSChristoph Hellwig kvfree(vm->pages);
339079311c1fSChristoph Hellwig kfree(vm);
33911da177e4SLinus Torvalds }
33921da177e4SLinus Torvalds EXPORT_SYMBOL(vfree);
33931da177e4SLinus Torvalds
33941da177e4SLinus Torvalds /**
33951da177e4SLinus Torvalds * vunmap - release virtual mapping obtained by vmap()
33961da177e4SLinus Torvalds * @addr: memory base address
33971da177e4SLinus Torvalds *
33981da177e4SLinus Torvalds * Free the virtually contiguous memory area starting at @addr,
33991da177e4SLinus Torvalds * which was created from the page array passed to vmap().
34001da177e4SLinus Torvalds *
340180e93effSPekka Enberg * Must not be called in interrupt context.
34021da177e4SLinus Torvalds */
vunmap(const void * addr)3403b3bdda02SChristoph Lameter void vunmap(const void *addr)
34041da177e4SLinus Torvalds {
340579311c1fSChristoph Hellwig struct vm_struct *vm;
340679311c1fSChristoph Hellwig
34071da177e4SLinus Torvalds BUG_ON(in_interrupt());
340834754b69SPeter Zijlstra might_sleep();
340979311c1fSChristoph Hellwig
341079311c1fSChristoph Hellwig if (!addr)
341179311c1fSChristoph Hellwig return;
341279311c1fSChristoph Hellwig vm = remove_vm_area(addr);
341379311c1fSChristoph Hellwig if (unlikely(!vm)) {
341479311c1fSChristoph Hellwig WARN(1, KERN_ERR "Trying to vunmap() nonexistent vm area (%p)\n",
341579311c1fSChristoph Hellwig addr);
341679311c1fSChristoph Hellwig return;
341779311c1fSChristoph Hellwig }
341879311c1fSChristoph Hellwig kfree(vm);
34191da177e4SLinus Torvalds }
34201da177e4SLinus Torvalds EXPORT_SYMBOL(vunmap);
34211da177e4SLinus Torvalds
34221da177e4SLinus Torvalds /**
34231da177e4SLinus Torvalds * vmap - map an array of pages into virtually contiguous space
34241da177e4SLinus Torvalds * @pages: array of page pointers
34251da177e4SLinus Torvalds * @count: number of pages to map
34261da177e4SLinus Torvalds * @flags: vm_area->flags
34271da177e4SLinus Torvalds * @prot: page protection for the mapping
34281da177e4SLinus Torvalds *
3429b944afc9SChristoph Hellwig * Maps @count pages from @pages into contiguous kernel virtual space.
3430b944afc9SChristoph Hellwig * If @flags contains %VM_MAP_PUT_PAGES the ownership of the pages array itself
3431b944afc9SChristoph Hellwig * (which must be kmalloc or vmalloc memory) and one reference per pages in it
3432b944afc9SChristoph Hellwig * are transferred from the caller to vmap(), and will be freed / dropped when
3433b944afc9SChristoph Hellwig * vfree() is called on the return value.
3434a862f68aSMike Rapoport *
3435a862f68aSMike Rapoport * Return: the address of the area or %NULL on failure
34361da177e4SLinus Torvalds */
vmap(struct page ** pages,unsigned int count,unsigned long flags,pgprot_t prot)34371da177e4SLinus Torvalds void *vmap(struct page **pages, unsigned int count,
34381da177e4SLinus Torvalds unsigned long flags, pgprot_t prot)
34391da177e4SLinus Torvalds {
34401da177e4SLinus Torvalds struct vm_struct *area;
3441b67177ecSNicholas Piggin unsigned long addr;
344265ee03c4SGuillermo Julián Moreno unsigned long size; /* In bytes */
34431da177e4SLinus Torvalds
344434754b69SPeter Zijlstra might_sleep();
344534754b69SPeter Zijlstra
344637f3605eSChristoph Hellwig if (WARN_ON_ONCE(flags & VM_FLUSH_RESET_PERMS))
344737f3605eSChristoph Hellwig return NULL;
344837f3605eSChristoph Hellwig
3449bd1a8fb2SPeter Zijlstra /*
3450bd1a8fb2SPeter Zijlstra * Your top guard is someone else's bottom guard. Not having a top
3451bd1a8fb2SPeter Zijlstra * guard compromises someone else's mappings too.
3452bd1a8fb2SPeter Zijlstra */
3453bd1a8fb2SPeter Zijlstra if (WARN_ON_ONCE(flags & VM_NO_GUARD))
3454bd1a8fb2SPeter Zijlstra flags &= ~VM_NO_GUARD;
3455bd1a8fb2SPeter Zijlstra
3456ca79b0c2SArun KS if (count > totalram_pages())
34571da177e4SLinus Torvalds return NULL;
34581da177e4SLinus Torvalds
345965ee03c4SGuillermo Julián Moreno size = (unsigned long)count << PAGE_SHIFT;
346065ee03c4SGuillermo Julián Moreno area = get_vm_area_caller(size, flags, __builtin_return_address(0));
34611da177e4SLinus Torvalds if (!area)
34621da177e4SLinus Torvalds return NULL;
346323016969SChristoph Lameter
3464b67177ecSNicholas Piggin addr = (unsigned long)area->addr;
3465b67177ecSNicholas Piggin if (vmap_pages_range(addr, addr + size, pgprot_nx(prot),
3466b67177ecSNicholas Piggin pages, PAGE_SHIFT) < 0) {
34671da177e4SLinus Torvalds vunmap(area->addr);
34681da177e4SLinus Torvalds return NULL;
34691da177e4SLinus Torvalds }
34701da177e4SLinus Torvalds
3471c22ee528SMiaohe Lin if (flags & VM_MAP_PUT_PAGES) {
3472b944afc9SChristoph Hellwig area->pages = pages;
3473c22ee528SMiaohe Lin area->nr_pages = count;
3474c22ee528SMiaohe Lin }
34751da177e4SLinus Torvalds return area->addr;
34761da177e4SLinus Torvalds }
34771da177e4SLinus Torvalds EXPORT_SYMBOL(vmap);
34781da177e4SLinus Torvalds
34793e9a9e25SChristoph Hellwig #ifdef CONFIG_VMAP_PFN
34803e9a9e25SChristoph Hellwig struct vmap_pfn_data {
34813e9a9e25SChristoph Hellwig unsigned long *pfns;
34823e9a9e25SChristoph Hellwig pgprot_t prot;
34833e9a9e25SChristoph Hellwig unsigned int idx;
34843e9a9e25SChristoph Hellwig };
34853e9a9e25SChristoph Hellwig
vmap_pfn_apply(pte_t * pte,unsigned long addr,void * private)34863e9a9e25SChristoph Hellwig static int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private)
34873e9a9e25SChristoph Hellwig {
34883e9a9e25SChristoph Hellwig struct vmap_pfn_data *data = private;
3489b3f78e74SRyan Roberts unsigned long pfn = data->pfns[data->idx];
3490b3f78e74SRyan Roberts pte_t ptent;
34913e9a9e25SChristoph Hellwig
3492b3f78e74SRyan Roberts if (WARN_ON_ONCE(pfn_valid(pfn)))
34933e9a9e25SChristoph Hellwig return -EINVAL;
3494b3f78e74SRyan Roberts
3495b3f78e74SRyan Roberts ptent = pte_mkspecial(pfn_pte(pfn, data->prot));
3496b3f78e74SRyan Roberts set_pte_at(&init_mm, addr, pte, ptent);
3497b3f78e74SRyan Roberts
3498b3f78e74SRyan Roberts data->idx++;
34993e9a9e25SChristoph Hellwig return 0;
35003e9a9e25SChristoph Hellwig }
35013e9a9e25SChristoph Hellwig
35023e9a9e25SChristoph Hellwig /**
35033e9a9e25SChristoph Hellwig * vmap_pfn - map an array of PFNs into virtually contiguous space
35043e9a9e25SChristoph Hellwig * @pfns: array of PFNs
35053e9a9e25SChristoph Hellwig * @count: number of pages to map
35063e9a9e25SChristoph Hellwig * @prot: page protection for the mapping
35073e9a9e25SChristoph Hellwig *
35083e9a9e25SChristoph Hellwig * Maps @count PFNs from @pfns into contiguous kernel virtual space and returns
35093e9a9e25SChristoph Hellwig * the start address of the mapping.
35103e9a9e25SChristoph Hellwig */
vmap_pfn(unsigned long * pfns,unsigned int count,pgprot_t prot)35113e9a9e25SChristoph Hellwig void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot)
35123e9a9e25SChristoph Hellwig {
35133e9a9e25SChristoph Hellwig struct vmap_pfn_data data = { .pfns = pfns, .prot = pgprot_nx(prot) };
35143e9a9e25SChristoph Hellwig struct vm_struct *area;
35153e9a9e25SChristoph Hellwig
35163e9a9e25SChristoph Hellwig area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP,
35173e9a9e25SChristoph Hellwig __builtin_return_address(0));
35183e9a9e25SChristoph Hellwig if (!area)
35193e9a9e25SChristoph Hellwig return NULL;
35203e9a9e25SChristoph Hellwig if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
35213e9a9e25SChristoph Hellwig count * PAGE_SIZE, vmap_pfn_apply, &data)) {
35223e9a9e25SChristoph Hellwig free_vm_area(area);
35233e9a9e25SChristoph Hellwig return NULL;
35243e9a9e25SChristoph Hellwig }
3525a50420c7SAlexandre Ghiti
3526a50420c7SAlexandre Ghiti flush_cache_vmap((unsigned long)area->addr,
3527a50420c7SAlexandre Ghiti (unsigned long)area->addr + count * PAGE_SIZE);
3528a50420c7SAlexandre Ghiti
35293e9a9e25SChristoph Hellwig return area->addr;
35303e9a9e25SChristoph Hellwig }
35313e9a9e25SChristoph Hellwig EXPORT_SYMBOL_GPL(vmap_pfn);
35323e9a9e25SChristoph Hellwig #endif /* CONFIG_VMAP_PFN */
35333e9a9e25SChristoph Hellwig
353412b9f873SUladzislau Rezki static inline unsigned int
vm_area_alloc_pages(gfp_t gfp,int nid,unsigned int order,unsigned int nr_pages,struct page ** pages)353512b9f873SUladzislau Rezki vm_area_alloc_pages(gfp_t gfp, int nid,
3536343ab817SUladzislau Rezki (Sony) unsigned int order, unsigned int nr_pages, struct page **pages)
353712b9f873SUladzislau Rezki {
353812b9f873SUladzislau Rezki unsigned int nr_allocated = 0;
3539ffb29b1cSChen Wandun struct page *page;
3540ffb29b1cSChen Wandun int i;
354112b9f873SUladzislau Rezki
354212b9f873SUladzislau Rezki /*
354312b9f873SUladzislau Rezki * For order-0 pages we make use of bulk allocator, if
354412b9f873SUladzislau Rezki * the page array is partly or not at all populated due
354512b9f873SUladzislau Rezki * to fails, fallback to a single page allocator that is
354612b9f873SUladzislau Rezki * more permissive.
354712b9f873SUladzislau Rezki */
3548c00b6b96SChen Wandun if (!order) {
3549343ab817SUladzislau Rezki (Sony) while (nr_allocated < nr_pages) {
3550343ab817SUladzislau Rezki (Sony) unsigned int nr, nr_pages_request;
3551343ab817SUladzislau Rezki (Sony)
3552343ab817SUladzislau Rezki (Sony) /*
3553343ab817SUladzislau Rezki (Sony) * A maximum allowed request is hard-coded and is 100
3554343ab817SUladzislau Rezki (Sony) * pages per call. That is done in order to prevent a
3555343ab817SUladzislau Rezki (Sony) * long preemption off scenario in the bulk-allocator
3556343ab817SUladzislau Rezki (Sony) * so the range is [1:100].
3557343ab817SUladzislau Rezki (Sony) */
3558343ab817SUladzislau Rezki (Sony) nr_pages_request = min(100U, nr_pages - nr_allocated);
3559343ab817SUladzislau Rezki (Sony)
3560c00b6b96SChen Wandun /* memory allocation should consider mempolicy, we can't
3561c00b6b96SChen Wandun * wrongly use nearest node when nid == NUMA_NO_NODE,
3562c00b6b96SChen Wandun * otherwise memory may be allocated in only one node,
356398af39d5SYixuan Cao * but mempolicy wants to alloc memory by interleaving.
3564c00b6b96SChen Wandun */
3565c00b6b96SChen Wandun if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE)
35666bf9b5b4SLuiz Capitulino nr = alloc_pages_bulk_mempolicy_noprof(gfp,
3567c00b6b96SChen Wandun nr_pages_request,
3568c00b6b96SChen Wandun pages + nr_allocated);
3569c00b6b96SChen Wandun else
35706bf9b5b4SLuiz Capitulino nr = alloc_pages_bulk_node_noprof(gfp, nid,
3571c00b6b96SChen Wandun nr_pages_request,
3572c00b6b96SChen Wandun pages + nr_allocated);
3573343ab817SUladzislau Rezki (Sony)
3574343ab817SUladzislau Rezki (Sony) nr_allocated += nr;
3575343ab817SUladzislau Rezki (Sony) cond_resched();
3576343ab817SUladzislau Rezki (Sony)
3577343ab817SUladzislau Rezki (Sony) /*
3578343ab817SUladzislau Rezki (Sony) * If zero or pages were obtained partly,
3579343ab817SUladzislau Rezki (Sony) * fallback to a single page allocator.
3580343ab817SUladzislau Rezki (Sony) */
3581343ab817SUladzislau Rezki (Sony) if (nr != nr_pages_request)
3582343ab817SUladzislau Rezki (Sony) break;
3583343ab817SUladzislau Rezki (Sony) }
35843b8000aeSNicholas Piggin }
358512b9f873SUladzislau Rezki
358612b9f873SUladzislau Rezki /* High-order pages or fallback path if "bulk" fails. */
3587ffb29b1cSChen Wandun while (nr_allocated < nr_pages) {
35887de8728fSUladzislau Rezki (Sony) if (!(gfp & __GFP_NOFAIL) && fatal_signal_pending(current))
3589dd544141SVasily Averin break;
3590dd544141SVasily Averin
3591ffb29b1cSChen Wandun if (nid == NUMA_NO_NODE)
35927de8728fSUladzislau Rezki (Sony) page = alloc_pages_noprof(gfp, order);
3593ffb29b1cSChen Wandun else
35947de8728fSUladzislau Rezki (Sony) page = alloc_pages_node_noprof(nid, gfp, order);
35957de8728fSUladzislau Rezki (Sony)
359661ebe5a7SHailong Liu if (unlikely(!page))
359712b9f873SUladzislau Rezki break;
3598e9c3cda4SMichal Hocko
35993b8000aeSNicholas Piggin /*
36006004fe00SUladzislau Rezki (Sony) * High-order allocations must be able to be treated as
36017de8728fSUladzislau Rezki (Sony) * independent small pages by callers (as they can with
36023b8000aeSNicholas Piggin * small-page vmallocs). Some drivers do their own refcounting
36033b8000aeSNicholas Piggin * on vmalloc_to_page() pages, some use page->mapping,
36043b8000aeSNicholas Piggin * page->lru, etc.
36053b8000aeSNicholas Piggin */
36063b8000aeSNicholas Piggin if (order)
36073b8000aeSNicholas Piggin split_page(page, order);
360812b9f873SUladzislau Rezki
360912b9f873SUladzislau Rezki /*
361012b9f873SUladzislau Rezki * Careful, we allocate and map page-order pages, but
361112b9f873SUladzislau Rezki * tracking is done per PAGE_SIZE page so as to keep the
361212b9f873SUladzislau Rezki * vm_struct APIs independent of the physical/mapped size.
361312b9f873SUladzislau Rezki */
361412b9f873SUladzislau Rezki for (i = 0; i < (1U << order); i++)
361512b9f873SUladzislau Rezki pages[nr_allocated + i] = page + i;
361612b9f873SUladzislau Rezki
361712b9f873SUladzislau Rezki cond_resched();
361812b9f873SUladzislau Rezki nr_allocated += 1U << order;
361912b9f873SUladzislau Rezki }
362012b9f873SUladzislau Rezki
362112b9f873SUladzislau Rezki return nr_allocated;
362212b9f873SUladzislau Rezki }
362312b9f873SUladzislau Rezki
__vmalloc_area_node(struct vm_struct * area,gfp_t gfp_mask,pgprot_t prot,unsigned int page_shift,int node)3624e31d9eb5SAdrian Bunk static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
3625121e6f32SNicholas Piggin pgprot_t prot, unsigned int page_shift,
3626121e6f32SNicholas Piggin int node)
36271da177e4SLinus Torvalds {
3628930f036bSDavid Rientjes const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
36299376130cSMichal Hocko bool nofail = gfp_mask & __GFP_NOFAIL;
3630121e6f32SNicholas Piggin unsigned long addr = (unsigned long)area->addr;
3631121e6f32SNicholas Piggin unsigned long size = get_vm_area_size(area);
363234fe6537SAndrew Morton unsigned long array_size;
3633121e6f32SNicholas Piggin unsigned int nr_small_pages = size >> PAGE_SHIFT;
3634121e6f32SNicholas Piggin unsigned int page_order;
3635451769ebSMichal Hocko unsigned int flags;
3636451769ebSMichal Hocko int ret;
36371da177e4SLinus Torvalds
3638121e6f32SNicholas Piggin array_size = (unsigned long)nr_small_pages * sizeof(struct page *);
363980b1d8fdSLorenzo Stoakes
3640f255935bSChristoph Hellwig if (!(gfp_mask & (GFP_DMA | GFP_DMA32)))
3641f255935bSChristoph Hellwig gfp_mask |= __GFP_HIGHMEM;
36421da177e4SLinus Torvalds
36431da177e4SLinus Torvalds /* Please note that the recursion is strictly bounded. */
36448757d5faSJan Kiszka if (array_size > PAGE_SIZE) {
364588ae5fb7SKent Overstreet area->pages = __vmalloc_node_noprof(array_size, 1, nested_gfp, node,
3646f255935bSChristoph Hellwig area->caller);
3647286e1ea3SAndrew Morton } else {
364888ae5fb7SKent Overstreet area->pages = kmalloc_node_noprof(array_size, nested_gfp, node);
3649286e1ea3SAndrew Morton }
36507ea36242SAustin Kim
36515c1f4e69SUladzislau Rezki (Sony) if (!area->pages) {
3652c3d77172SUladzislau Rezki (Sony) warn_alloc(gfp_mask, NULL,
3653f4bdfeafSUladzislau Rezki (Sony) "vmalloc error: size %lu, failed to allocated page array size %lu",
3654d70bec8cSNicholas Piggin nr_small_pages * PAGE_SIZE, array_size);
3655cd61413bSUladzislau Rezki (Sony) free_vm_area(area);
36561da177e4SLinus Torvalds return NULL;
36571da177e4SLinus Torvalds }
36581da177e4SLinus Torvalds
3659121e6f32SNicholas Piggin set_vm_area_page_order(area, page_shift - PAGE_SHIFT);
3660121e6f32SNicholas Piggin page_order = vm_area_page_order(area);
3661121e6f32SNicholas Piggin
36627de8728fSUladzislau Rezki (Sony) /*
36636004fe00SUladzislau Rezki (Sony) * High-order nofail allocations are really expensive and
36647de8728fSUladzislau Rezki (Sony) * potentially dangerous (pre-mature OOM, disruptive reclaim
36657de8728fSUladzislau Rezki (Sony) * and compaction etc.
36667de8728fSUladzislau Rezki (Sony) *
36677de8728fSUladzislau Rezki (Sony) * Please note, the __vmalloc_node_range_noprof() falls-back
36687de8728fSUladzislau Rezki (Sony) * to order-0 pages if high-order attempt is unsuccessful.
36697de8728fSUladzislau Rezki (Sony) */
36707de8728fSUladzislau Rezki (Sony) area->nr_pages = vm_area_alloc_pages((page_order ?
36717de8728fSUladzislau Rezki (Sony) gfp_mask & ~__GFP_NOFAIL : gfp_mask) | __GFP_NOWARN,
3672c3d77172SUladzislau Rezki (Sony) node, page_order, nr_small_pages, area->pages);
36735c1f4e69SUladzislau Rezki (Sony)
367497105f0aSRoman Gushchin atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
36754e5aa1f4SShakeel Butt if (gfp_mask & __GFP_ACCOUNT) {
36763b8000aeSNicholas Piggin int i;
36774e5aa1f4SShakeel Butt
36783b8000aeSNicholas Piggin for (i = 0; i < area->nr_pages; i++)
36793b8000aeSNicholas Piggin mod_memcg_page_state(area->pages[i], MEMCG_VMALLOC, 1);
36804e5aa1f4SShakeel Butt }
36815c1f4e69SUladzislau Rezki (Sony)
36825c1f4e69SUladzislau Rezki (Sony) /*
36835c1f4e69SUladzislau Rezki (Sony) * If not enough pages were obtained to accomplish an
3684f41f036bSChristoph Hellwig * allocation request, free them via vfree() if any.
36855c1f4e69SUladzislau Rezki (Sony) */
36865c1f4e69SUladzislau Rezki (Sony) if (area->nr_pages != nr_small_pages) {
368795a301eeSLorenzo Stoakes /*
368895a301eeSLorenzo Stoakes * vm_area_alloc_pages() can fail due to insufficient memory but
368995a301eeSLorenzo Stoakes * also:-
369095a301eeSLorenzo Stoakes *
369195a301eeSLorenzo Stoakes * - a pending fatal signal
369295a301eeSLorenzo Stoakes * - insufficient huge page-order pages
369395a301eeSLorenzo Stoakes *
369495a301eeSLorenzo Stoakes * Since we always retry allocations at order-0 in the huge page
369595a301eeSLorenzo Stoakes * case a warning for either is spurious.
369695a301eeSLorenzo Stoakes */
369795a301eeSLorenzo Stoakes if (!fatal_signal_pending(current) && page_order == 0)
3698c3d77172SUladzislau Rezki (Sony) warn_alloc(gfp_mask, NULL,
369995a301eeSLorenzo Stoakes "vmalloc error: size %lu, failed to allocate pages",
370095a301eeSLorenzo Stoakes area->nr_pages * PAGE_SIZE);
37011da177e4SLinus Torvalds goto fail;
37021da177e4SLinus Torvalds }
3703121e6f32SNicholas Piggin
3704451769ebSMichal Hocko /*
3705451769ebSMichal Hocko * page tables allocations ignore external gfp mask, enforce it
3706451769ebSMichal Hocko * by the scope API
3707451769ebSMichal Hocko */
3708451769ebSMichal Hocko if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
3709451769ebSMichal Hocko flags = memalloc_nofs_save();
3710451769ebSMichal Hocko else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
3711451769ebSMichal Hocko flags = memalloc_noio_save();
3712451769ebSMichal Hocko
37139376130cSMichal Hocko do {
3714451769ebSMichal Hocko ret = vmap_pages_range(addr, addr + size, prot, area->pages,
3715451769ebSMichal Hocko page_shift);
37169376130cSMichal Hocko if (nofail && (ret < 0))
37179376130cSMichal Hocko schedule_timeout_uninterruptible(1);
37189376130cSMichal Hocko } while (nofail && (ret < 0));
3719451769ebSMichal Hocko
3720451769ebSMichal Hocko if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
3721451769ebSMichal Hocko memalloc_nofs_restore(flags);
3722451769ebSMichal Hocko else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
3723451769ebSMichal Hocko memalloc_noio_restore(flags);
3724451769ebSMichal Hocko
3725451769ebSMichal Hocko if (ret < 0) {
3726c3d77172SUladzislau Rezki (Sony) warn_alloc(gfp_mask, NULL,
3727f4bdfeafSUladzislau Rezki (Sony) "vmalloc error: size %lu, failed to map pages",
3728d70bec8cSNicholas Piggin area->nr_pages * PAGE_SIZE);
37291da177e4SLinus Torvalds goto fail;
3730d70bec8cSNicholas Piggin }
3731ed1f324cSChristoph Hellwig
37321da177e4SLinus Torvalds return area->addr;
37331da177e4SLinus Torvalds
37341da177e4SLinus Torvalds fail:
3735f41f036bSChristoph Hellwig vfree(area->addr);
37361da177e4SLinus Torvalds return NULL;
37371da177e4SLinus Torvalds }
37381da177e4SLinus Torvalds
3739d0a21265SDavid Rientjes /**
3740d0a21265SDavid Rientjes * __vmalloc_node_range - allocate virtually contiguous memory
3741d0a21265SDavid Rientjes * @size: allocation size
3742d0a21265SDavid Rientjes * @align: desired alignment
3743d0a21265SDavid Rientjes * @start: vm area range start
3744d0a21265SDavid Rientjes * @end: vm area range end
3745d0a21265SDavid Rientjes * @gfp_mask: flags for the page level allocator
3746d0a21265SDavid Rientjes * @prot: protection mask for the allocated pages
3747cb9e3c29SAndrey Ryabinin * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD)
374800ef2d2fSDavid Rientjes * @node: node to use for allocation or NUMA_NO_NODE
3749d0a21265SDavid Rientjes * @caller: caller's return address
3750d0a21265SDavid Rientjes *
3751d0a21265SDavid Rientjes * Allocate enough pages to cover @size from the page level
3752b7d90e7aSMichal Hocko * allocator with @gfp_mask flags. Please note that the full set of gfp
375330d3f011SMichal Hocko * flags are not supported. GFP_KERNEL, GFP_NOFS and GFP_NOIO are all
375430d3f011SMichal Hocko * supported.
375530d3f011SMichal Hocko * Zone modifiers are not supported. From the reclaim modifiers
375630d3f011SMichal Hocko * __GFP_DIRECT_RECLAIM is required (aka GFP_NOWAIT is not supported)
375730d3f011SMichal Hocko * and only __GFP_NOFAIL is supported (i.e. __GFP_NORETRY and
375830d3f011SMichal Hocko * __GFP_RETRY_MAYFAIL are not supported).
375930d3f011SMichal Hocko *
376030d3f011SMichal Hocko * __GFP_NOWARN can be used to suppress failures messages.
3761b7d90e7aSMichal Hocko *
3762b7d90e7aSMichal Hocko * Map them into contiguous kernel virtual space, using a pagetable
3763b7d90e7aSMichal Hocko * protection of @prot.
3764a862f68aSMike Rapoport *
3765a862f68aSMike Rapoport * Return: the address of the area or %NULL on failure
3766d0a21265SDavid Rientjes */
__vmalloc_node_range_noprof(unsigned long size,unsigned long align,unsigned long start,unsigned long end,gfp_t gfp_mask,pgprot_t prot,unsigned long vm_flags,int node,const void * caller)376788ae5fb7SKent Overstreet void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align,
3768d0a21265SDavid Rientjes unsigned long start, unsigned long end, gfp_t gfp_mask,
3769cb9e3c29SAndrey Ryabinin pgprot_t prot, unsigned long vm_flags, int node,
3770cb9e3c29SAndrey Ryabinin const void *caller)
3771930fc45aSChristoph Lameter {
3772d0a21265SDavid Rientjes struct vm_struct *area;
377319f1c3acSAndrey Konovalov void *ret;
3774f6e39794SAndrey Konovalov kasan_vmalloc_flags_t kasan_flags = KASAN_VMALLOC_NONE;
3775f0e11a99SLiu Ye unsigned long original_align = align;
3776121e6f32SNicholas Piggin unsigned int shift = PAGE_SHIFT;
3777d0a21265SDavid Rientjes
3778d70bec8cSNicholas Piggin if (WARN_ON_ONCE(!size))
3779d70bec8cSNicholas Piggin return NULL;
3780d70bec8cSNicholas Piggin
3781d70bec8cSNicholas Piggin if ((size >> PAGE_SHIFT) > totalram_pages()) {
3782d70bec8cSNicholas Piggin warn_alloc(gfp_mask, NULL,
3783f4bdfeafSUladzislau Rezki (Sony) "vmalloc error: size %lu, exceeds total pages",
3784f0e11a99SLiu Ye size);
3785d70bec8cSNicholas Piggin return NULL;
3786121e6f32SNicholas Piggin }
3787d0a21265SDavid Rientjes
3788559089e0SSong Liu if (vmap_allow_huge && (vm_flags & VM_ALLOW_HUGE_VMAP)) {
3789121e6f32SNicholas Piggin /*
3790121e6f32SNicholas Piggin * Try huge pages. Only try for PAGE_KERNEL allocations,
3791121e6f32SNicholas Piggin * others like modules don't yet expect huge pages in
3792121e6f32SNicholas Piggin * their allocations due to apply_to_page_range not
3793121e6f32SNicholas Piggin * supporting them.
3794121e6f32SNicholas Piggin */
3795121e6f32SNicholas Piggin
3796c82be0beSMike Rapoport (Microsoft) if (arch_vmap_pmd_supported(prot) && size >= PMD_SIZE)
3797121e6f32SNicholas Piggin shift = PMD_SHIFT;
37983382bbeeSChristophe Leroy else
3799c82be0beSMike Rapoport (Microsoft) shift = arch_vmap_pte_supported_shift(size);
38003382bbeeSChristophe Leroy
3801f0e11a99SLiu Ye align = max(original_align, 1UL << shift);
3802121e6f32SNicholas Piggin }
3803121e6f32SNicholas Piggin
3804121e6f32SNicholas Piggin again:
3805f0e11a99SLiu Ye area = __get_vm_area_node(size, align, shift, VM_ALLOC |
38067ca3027bSDaniel Axtens VM_UNINITIALIZED | vm_flags, start, end, node,
38077ca3027bSDaniel Axtens gfp_mask, caller);
3808d70bec8cSNicholas Piggin if (!area) {
38099376130cSMichal Hocko bool nofail = gfp_mask & __GFP_NOFAIL;
3810d70bec8cSNicholas Piggin warn_alloc(gfp_mask, NULL,
38119376130cSMichal Hocko "vmalloc error: size %lu, vm_struct allocation failed%s",
3812f0e11a99SLiu Ye size, (nofail) ? ". Retrying." : "");
38139376130cSMichal Hocko if (nofail) {
38149376130cSMichal Hocko schedule_timeout_uninterruptible(1);
38159376130cSMichal Hocko goto again;
38169376130cSMichal Hocko }
3817de7d2b56SJoe Perches goto fail;
3818d70bec8cSNicholas Piggin }
3819d0a21265SDavid Rientjes
3820f6e39794SAndrey Konovalov /*
3821f6e39794SAndrey Konovalov * Prepare arguments for __vmalloc_area_node() and
3822f6e39794SAndrey Konovalov * kasan_unpoison_vmalloc().
3823f6e39794SAndrey Konovalov */
3824f6e39794SAndrey Konovalov if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) {
3825f6e39794SAndrey Konovalov if (kasan_hw_tags_enabled()) {
382601d92c7fSAndrey Konovalov /*
382701d92c7fSAndrey Konovalov * Modify protection bits to allow tagging.
3828f6e39794SAndrey Konovalov * This must be done before mapping.
382901d92c7fSAndrey Konovalov */
383001d92c7fSAndrey Konovalov prot = arch_vmap_pgprot_tagged(prot);
383101d92c7fSAndrey Konovalov
383223689e91SAndrey Konovalov /*
3833f6e39794SAndrey Konovalov * Skip page_alloc poisoning and zeroing for physical
3834f6e39794SAndrey Konovalov * pages backing VM_ALLOC mapping. Memory is instead
3835f6e39794SAndrey Konovalov * poisoned and zeroed by kasan_unpoison_vmalloc().
383623689e91SAndrey Konovalov */
38370a54864fSPeter Collingbourne gfp_mask |= __GFP_SKIP_KASAN | __GFP_SKIP_ZERO;
383823689e91SAndrey Konovalov }
383923689e91SAndrey Konovalov
3840f6e39794SAndrey Konovalov /* Take note that the mapping is PAGE_KERNEL. */
3841f6e39794SAndrey Konovalov kasan_flags |= KASAN_VMALLOC_PROT_NORMAL;
3842f6e39794SAndrey Konovalov }
3843f6e39794SAndrey Konovalov
384401d92c7fSAndrey Konovalov /* Allocate physical pages and map them into vmalloc space. */
384519f1c3acSAndrey Konovalov ret = __vmalloc_area_node(area, gfp_mask, prot, shift, node);
384619f1c3acSAndrey Konovalov if (!ret)
3847121e6f32SNicholas Piggin goto fail;
384889219d37SCatalin Marinas
384923689e91SAndrey Konovalov /*
385023689e91SAndrey Konovalov * Mark the pages as accessible, now that they are mapped.
38516c2f761dSAndrey Konovalov * The condition for setting KASAN_VMALLOC_INIT should complement the
38526c2f761dSAndrey Konovalov * one in post_alloc_hook() with regards to the __GFP_SKIP_ZERO check
38536c2f761dSAndrey Konovalov * to make sure that memory is initialized under the same conditions.
3854f6e39794SAndrey Konovalov * Tag-based KASAN modes only assign tags to normal non-executable
3855f6e39794SAndrey Konovalov * allocations, see __kasan_unpoison_vmalloc().
385623689e91SAndrey Konovalov */
3857f6e39794SAndrey Konovalov kasan_flags |= KASAN_VMALLOC_VM_ALLOC;
38586c2f761dSAndrey Konovalov if (!want_init_on_free() && want_init_on_alloc(gfp_mask) &&
38596c2f761dSAndrey Konovalov (gfp_mask & __GFP_SKIP_ZERO))
386023689e91SAndrey Konovalov kasan_flags |= KASAN_VMALLOC_INIT;
3861f6e39794SAndrey Konovalov /* KASAN_VMALLOC_PROT_NORMAL already set if required. */
3862f0e11a99SLiu Ye area->addr = kasan_unpoison_vmalloc(area->addr, size, kasan_flags);
386319f1c3acSAndrey Konovalov
386489219d37SCatalin Marinas /*
386520fc02b4SZhang Yanfei * In this function, newly allocated vm_struct has VM_UNINITIALIZED
386620fc02b4SZhang Yanfei * flag. It means that vm_struct is not fully initialized.
38674341fa45SJoonsoo Kim * Now, it is fully initialized, so remove this flag here.
3868f5252e00SMitsuo Hayasaka */
386920fc02b4SZhang Yanfei clear_vm_uninitialized_flag(area);
3870f5252e00SMitsuo Hayasaka
387160115fa5SKefeng Wang if (!(vm_flags & VM_DEFER_KMEMLEAK))
3872f0e11a99SLiu Ye kmemleak_vmalloc(area, PAGE_ALIGN(size), gfp_mask);
387389219d37SCatalin Marinas
387419f1c3acSAndrey Konovalov return area->addr;
3875de7d2b56SJoe Perches
3876de7d2b56SJoe Perches fail:
3877121e6f32SNicholas Piggin if (shift > PAGE_SHIFT) {
3878121e6f32SNicholas Piggin shift = PAGE_SHIFT;
3879f0e11a99SLiu Ye align = original_align;
3880121e6f32SNicholas Piggin goto again;
3881121e6f32SNicholas Piggin }
3882121e6f32SNicholas Piggin
3883de7d2b56SJoe Perches return NULL;
3884930fc45aSChristoph Lameter }
3885930fc45aSChristoph Lameter
38861da177e4SLinus Torvalds /**
3887930fc45aSChristoph Lameter * __vmalloc_node - allocate virtually contiguous memory
38881da177e4SLinus Torvalds * @size: allocation size
38892dca6999SDavid Miller * @align: desired alignment
38901da177e4SLinus Torvalds * @gfp_mask: flags for the page level allocator
389100ef2d2fSDavid Rientjes * @node: node to use for allocation or NUMA_NO_NODE
3892c85d194bSRandy Dunlap * @caller: caller's return address
38931da177e4SLinus Torvalds *
3894f38fcb9cSChristoph Hellwig * Allocate enough pages to cover @size from the page level allocator with
3895f38fcb9cSChristoph Hellwig * @gfp_mask flags. Map them into contiguous kernel virtual space.
3896a7c3e901SMichal Hocko *
3897dcda9b04SMichal Hocko * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
3898a7c3e901SMichal Hocko * and __GFP_NOFAIL are not supported
3899a7c3e901SMichal Hocko *
3900a7c3e901SMichal Hocko * Any use of gfp flags outside of GFP_KERNEL should be consulted
3901a7c3e901SMichal Hocko * with mm people.
3902a862f68aSMike Rapoport *
3903a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error
39041da177e4SLinus Torvalds */
__vmalloc_node_noprof(unsigned long size,unsigned long align,gfp_t gfp_mask,int node,const void * caller)390588ae5fb7SKent Overstreet void *__vmalloc_node_noprof(unsigned long size, unsigned long align,
3906f38fcb9cSChristoph Hellwig gfp_t gfp_mask, int node, const void *caller)
39071da177e4SLinus Torvalds {
390888ae5fb7SKent Overstreet return __vmalloc_node_range_noprof(size, align, VMALLOC_START, VMALLOC_END,
3909f38fcb9cSChristoph Hellwig gfp_mask, PAGE_KERNEL, 0, node, caller);
39101da177e4SLinus Torvalds }
3911c3f896dcSChristoph Hellwig /*
3912c3f896dcSChristoph Hellwig * This is only for performance analysis of vmalloc and stress purpose.
3913c3f896dcSChristoph Hellwig * It is required by vmalloc test module, therefore do not use it other
3914c3f896dcSChristoph Hellwig * than that.
3915c3f896dcSChristoph Hellwig */
3916c3f896dcSChristoph Hellwig #ifdef CONFIG_TEST_VMALLOC_MODULE
391788ae5fb7SKent Overstreet EXPORT_SYMBOL_GPL(__vmalloc_node_noprof);
3918c3f896dcSChristoph Hellwig #endif
39191da177e4SLinus Torvalds
__vmalloc_noprof(unsigned long size,gfp_t gfp_mask)392088ae5fb7SKent Overstreet void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask)
3921930fc45aSChristoph Lameter {
392288ae5fb7SKent Overstreet return __vmalloc_node_noprof(size, 1, gfp_mask, NUMA_NO_NODE,
392323016969SChristoph Lameter __builtin_return_address(0));
3924930fc45aSChristoph Lameter }
392588ae5fb7SKent Overstreet EXPORT_SYMBOL(__vmalloc_noprof);
39261da177e4SLinus Torvalds
39271da177e4SLinus Torvalds /**
39281da177e4SLinus Torvalds * vmalloc - allocate virtually contiguous memory
39291da177e4SLinus Torvalds * @size: allocation size
393092eac168SMike Rapoport *
39311da177e4SLinus Torvalds * Allocate enough pages to cover @size from the page level
39321da177e4SLinus Torvalds * allocator and map them into contiguous kernel virtual space.
39331da177e4SLinus Torvalds *
3934c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags
39351da177e4SLinus Torvalds * use __vmalloc() instead.
3936a862f68aSMike Rapoport *
3937a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error
39381da177e4SLinus Torvalds */
vmalloc_noprof(unsigned long size)393988ae5fb7SKent Overstreet void *vmalloc_noprof(unsigned long size)
39401da177e4SLinus Torvalds {
394188ae5fb7SKent Overstreet return __vmalloc_node_noprof(size, 1, GFP_KERNEL, NUMA_NO_NODE,
39424d39d728SChristoph Hellwig __builtin_return_address(0));
39431da177e4SLinus Torvalds }
394488ae5fb7SKent Overstreet EXPORT_SYMBOL(vmalloc_noprof);
39451da177e4SLinus Torvalds
3946930fc45aSChristoph Lameter /**
3947559089e0SSong Liu * vmalloc_huge - allocate virtually contiguous memory, allow huge pages
394815a64f5aSClaudio Imbrenda * @size: allocation size
3949559089e0SSong Liu * @gfp_mask: flags for the page level allocator
395015a64f5aSClaudio Imbrenda *
3951559089e0SSong Liu * Allocate enough pages to cover @size from the page level
395215a64f5aSClaudio Imbrenda * allocator and map them into contiguous kernel virtual space.
3953559089e0SSong Liu * If @size is greater than or equal to PMD_SIZE, allow using
3954559089e0SSong Liu * huge pages for the memory
395515a64f5aSClaudio Imbrenda *
395615a64f5aSClaudio Imbrenda * Return: pointer to the allocated memory or %NULL on error
395715a64f5aSClaudio Imbrenda */
vmalloc_huge_noprof(unsigned long size,gfp_t gfp_mask)395888ae5fb7SKent Overstreet void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask)
395915a64f5aSClaudio Imbrenda {
396088ae5fb7SKent Overstreet return __vmalloc_node_range_noprof(size, 1, VMALLOC_START, VMALLOC_END,
3961559089e0SSong Liu gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
396215a64f5aSClaudio Imbrenda NUMA_NO_NODE, __builtin_return_address(0));
396315a64f5aSClaudio Imbrenda }
396488ae5fb7SKent Overstreet EXPORT_SYMBOL_GPL(vmalloc_huge_noprof);
396515a64f5aSClaudio Imbrenda
396615a64f5aSClaudio Imbrenda /**
3967e1ca7788SDave Young * vzalloc - allocate virtually contiguous memory with zero fill
3968e1ca7788SDave Young * @size: allocation size
396992eac168SMike Rapoport *
3970e1ca7788SDave Young * Allocate enough pages to cover @size from the page level
3971e1ca7788SDave Young * allocator and map them into contiguous kernel virtual space.
3972e1ca7788SDave Young * The memory allocated is set to zero.
3973e1ca7788SDave Young *
3974e1ca7788SDave Young * For tight control over page level allocator and protection flags
3975e1ca7788SDave Young * use __vmalloc() instead.
3976a862f68aSMike Rapoport *
3977a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error
3978e1ca7788SDave Young */
vzalloc_noprof(unsigned long size)397988ae5fb7SKent Overstreet void *vzalloc_noprof(unsigned long size)
3980e1ca7788SDave Young {
398188ae5fb7SKent Overstreet return __vmalloc_node_noprof(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE,
39824d39d728SChristoph Hellwig __builtin_return_address(0));
3983e1ca7788SDave Young }
398488ae5fb7SKent Overstreet EXPORT_SYMBOL(vzalloc_noprof);
3985e1ca7788SDave Young
3986e1ca7788SDave Young /**
3987ead04089SRolf Eike Beer * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
398883342314SNick Piggin * @size: allocation size
3989ead04089SRolf Eike Beer *
3990ead04089SRolf Eike Beer * The resulting memory area is zeroed so it can be mapped to userspace
3991ead04089SRolf Eike Beer * without leaking data.
3992a862f68aSMike Rapoport *
3993a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error
399483342314SNick Piggin */
vmalloc_user_noprof(unsigned long size)399588ae5fb7SKent Overstreet void *vmalloc_user_noprof(unsigned long size)
399683342314SNick Piggin {
399788ae5fb7SKent Overstreet return __vmalloc_node_range_noprof(size, SHMLBA, VMALLOC_START, VMALLOC_END,
3998bc84c535SRoman Penyaev GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL,
3999bc84c535SRoman Penyaev VM_USERMAP, NUMA_NO_NODE,
400000ef2d2fSDavid Rientjes __builtin_return_address(0));
400183342314SNick Piggin }
400288ae5fb7SKent Overstreet EXPORT_SYMBOL(vmalloc_user_noprof);
400383342314SNick Piggin
400483342314SNick Piggin /**
4005930fc45aSChristoph Lameter * vmalloc_node - allocate memory on a specific node
4006930fc45aSChristoph Lameter * @size: allocation size
4007d44e0780SRandy Dunlap * @node: numa node
4008930fc45aSChristoph Lameter *
4009930fc45aSChristoph Lameter * Allocate enough pages to cover @size from the page level
4010930fc45aSChristoph Lameter * allocator and map them into contiguous kernel virtual space.
4011930fc45aSChristoph Lameter *
4012c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags
4013930fc45aSChristoph Lameter * use __vmalloc() instead.
4014a862f68aSMike Rapoport *
4015a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error
4016930fc45aSChristoph Lameter */
vmalloc_node_noprof(unsigned long size,int node)401788ae5fb7SKent Overstreet void *vmalloc_node_noprof(unsigned long size, int node)
4018930fc45aSChristoph Lameter {
401988ae5fb7SKent Overstreet return __vmalloc_node_noprof(size, 1, GFP_KERNEL, node,
4020f38fcb9cSChristoph Hellwig __builtin_return_address(0));
4021930fc45aSChristoph Lameter }
402288ae5fb7SKent Overstreet EXPORT_SYMBOL(vmalloc_node_noprof);
4023930fc45aSChristoph Lameter
4024e1ca7788SDave Young /**
4025e1ca7788SDave Young * vzalloc_node - allocate memory on a specific node with zero fill
4026e1ca7788SDave Young * @size: allocation size
4027e1ca7788SDave Young * @node: numa node
4028e1ca7788SDave Young *
4029e1ca7788SDave Young * Allocate enough pages to cover @size from the page level
4030e1ca7788SDave Young * allocator and map them into contiguous kernel virtual space.
4031e1ca7788SDave Young * The memory allocated is set to zero.
4032e1ca7788SDave Young *
4033a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error
4034e1ca7788SDave Young */
vzalloc_node_noprof(unsigned long size,int node)403588ae5fb7SKent Overstreet void *vzalloc_node_noprof(unsigned long size, int node)
4036e1ca7788SDave Young {
403788ae5fb7SKent Overstreet return __vmalloc_node_noprof(size, 1, GFP_KERNEL | __GFP_ZERO, node,
40384d39d728SChristoph Hellwig __builtin_return_address(0));
4039e1ca7788SDave Young }
404088ae5fb7SKent Overstreet EXPORT_SYMBOL(vzalloc_node_noprof);
4041e1ca7788SDave Young
40423ddc2fefSDanilo Krummrich /**
40433ddc2fefSDanilo Krummrich * vrealloc - reallocate virtually contiguous memory; contents remain unchanged
40443ddc2fefSDanilo Krummrich * @p: object to reallocate memory for
40453ddc2fefSDanilo Krummrich * @size: the size to reallocate
40463ddc2fefSDanilo Krummrich * @flags: the flags for the page level allocator
40473ddc2fefSDanilo Krummrich *
40483ddc2fefSDanilo Krummrich * If @p is %NULL, vrealloc() behaves exactly like vmalloc(). If @size is 0 and
40493ddc2fefSDanilo Krummrich * @p is not a %NULL pointer, the object pointed to is freed.
40503ddc2fefSDanilo Krummrich *
40513ddc2fefSDanilo Krummrich * If __GFP_ZERO logic is requested, callers must ensure that, starting with the
40523ddc2fefSDanilo Krummrich * initial memory allocation, every subsequent call to this API for the same
40533ddc2fefSDanilo Krummrich * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
40543ddc2fefSDanilo Krummrich * __GFP_ZERO is not fully honored by this API.
40553ddc2fefSDanilo Krummrich *
40563ddc2fefSDanilo Krummrich * In any case, the contents of the object pointed to are preserved up to the
40573ddc2fefSDanilo Krummrich * lesser of the new and old sizes.
40583ddc2fefSDanilo Krummrich *
40593ddc2fefSDanilo Krummrich * This function must not be called concurrently with itself or vfree() for the
40603ddc2fefSDanilo Krummrich * same memory allocation.
40613ddc2fefSDanilo Krummrich *
40623ddc2fefSDanilo Krummrich * Return: pointer to the allocated memory; %NULL if @size is zero or in case of
40633ddc2fefSDanilo Krummrich * failure
40643ddc2fefSDanilo Krummrich */
vrealloc_noprof(const void * p,size_t size,gfp_t flags)40653ddc2fefSDanilo Krummrich void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
40663ddc2fefSDanilo Krummrich {
4067a0309fafSKees Cook struct vm_struct *vm = NULL;
4068a0309fafSKees Cook size_t alloced_size = 0;
40693ddc2fefSDanilo Krummrich size_t old_size = 0;
40703ddc2fefSDanilo Krummrich void *n;
40713ddc2fefSDanilo Krummrich
40723ddc2fefSDanilo Krummrich if (!size) {
40733ddc2fefSDanilo Krummrich vfree(p);
40743ddc2fefSDanilo Krummrich return NULL;
40753ddc2fefSDanilo Krummrich }
40763ddc2fefSDanilo Krummrich
40773ddc2fefSDanilo Krummrich if (p) {
40783ddc2fefSDanilo Krummrich vm = find_vm_area(p);
40793ddc2fefSDanilo Krummrich if (unlikely(!vm)) {
40803ddc2fefSDanilo Krummrich WARN(1, "Trying to vrealloc() nonexistent vm area (%p)\n", p);
40813ddc2fefSDanilo Krummrich return NULL;
40823ddc2fefSDanilo Krummrich }
40833ddc2fefSDanilo Krummrich
4084a0309fafSKees Cook alloced_size = get_vm_area_size(vm);
4085a0309fafSKees Cook old_size = vm->requested_size;
4086a0309fafSKees Cook if (WARN(alloced_size < old_size,
4087a0309fafSKees Cook "vrealloc() has mismatched area vs requested sizes (%p)\n", p))
4088a0309fafSKees Cook return NULL;
40893ddc2fefSDanilo Krummrich }
40903ddc2fefSDanilo Krummrich
40913ddc2fefSDanilo Krummrich /*
40923ddc2fefSDanilo Krummrich * TODO: Shrink the vm_area, i.e. unmap and free unused pages. What
40933ddc2fefSDanilo Krummrich * would be a good heuristic for when to shrink the vm_area?
40943ddc2fefSDanilo Krummrich */
40953ddc2fefSDanilo Krummrich if (size <= old_size) {
4096*70d1eb03SKees Cook /* Zero out "freed" memory, potentially for future realloc. */
4097*70d1eb03SKees Cook if (want_init_on_free() || want_init_on_alloc(flags))
40983ddc2fefSDanilo Krummrich memset((void *)p + size, 0, old_size - size);
4099a0309fafSKees Cook vm->requested_size = size;
4100d699440fSAndrii Nakryiko kasan_poison_vmalloc(p + size, old_size - size);
41013ddc2fefSDanilo Krummrich return (void *)p;
41023ddc2fefSDanilo Krummrich }
41033ddc2fefSDanilo Krummrich
4104a0309fafSKees Cook /*
4105a0309fafSKees Cook * We already have the bytes available in the allocation; use them.
4106a0309fafSKees Cook */
4107a0309fafSKees Cook if (size <= alloced_size) {
4108a0309fafSKees Cook kasan_unpoison_vmalloc(p + old_size, size - old_size,
4109a0309fafSKees Cook KASAN_VMALLOC_PROT_NORMAL);
4110*70d1eb03SKees Cook /*
4111*70d1eb03SKees Cook * No need to zero memory here, as unused memory will have
4112*70d1eb03SKees Cook * already been zeroed at initial allocation time or during
4113*70d1eb03SKees Cook * realloc shrink time.
4114*70d1eb03SKees Cook */
4115a0309fafSKees Cook vm->requested_size = size;
4116f7a35a3cSKees Cook return (void *)p;
4117a0309fafSKees Cook }
4118a0309fafSKees Cook
41193ddc2fefSDanilo Krummrich /* TODO: Grow the vm_area, i.e. allocate and map additional pages. */
41203ddc2fefSDanilo Krummrich n = __vmalloc_noprof(size, flags);
41213ddc2fefSDanilo Krummrich if (!n)
41223ddc2fefSDanilo Krummrich return NULL;
41233ddc2fefSDanilo Krummrich
41243ddc2fefSDanilo Krummrich if (p) {
41253ddc2fefSDanilo Krummrich memcpy(n, p, old_size);
41263ddc2fefSDanilo Krummrich vfree(p);
41273ddc2fefSDanilo Krummrich }
41283ddc2fefSDanilo Krummrich
41293ddc2fefSDanilo Krummrich return n;
41303ddc2fefSDanilo Krummrich }
41313ddc2fefSDanilo Krummrich
41320d08e0d3SAndi Kleen #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
4133698d0831SMichal Hocko #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
41340d08e0d3SAndi Kleen #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
4135698d0831SMichal Hocko #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
41360d08e0d3SAndi Kleen #else
4137698d0831SMichal Hocko /*
4138698d0831SMichal Hocko * 64b systems should always have either DMA or DMA32 zones. For others
4139698d0831SMichal Hocko * GFP_DMA32 should do the right thing and use the normal zone.
4140698d0831SMichal Hocko */
414168d68ff6SZhiyuan Dai #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
41420d08e0d3SAndi Kleen #endif
41430d08e0d3SAndi Kleen
41441da177e4SLinus Torvalds /**
41451da177e4SLinus Torvalds * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
41461da177e4SLinus Torvalds * @size: allocation size
41471da177e4SLinus Torvalds *
41481da177e4SLinus Torvalds * Allocate enough 32bit PA addressable pages to cover @size from the
41491da177e4SLinus Torvalds * page level allocator and map them into contiguous kernel virtual space.
4150a862f68aSMike Rapoport *
4151a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error
41521da177e4SLinus Torvalds */
vmalloc_32_noprof(unsigned long size)415388ae5fb7SKent Overstreet void *vmalloc_32_noprof(unsigned long size)
41541da177e4SLinus Torvalds {
415588ae5fb7SKent Overstreet return __vmalloc_node_noprof(size, 1, GFP_VMALLOC32, NUMA_NO_NODE,
4156f38fcb9cSChristoph Hellwig __builtin_return_address(0));
41571da177e4SLinus Torvalds }
415888ae5fb7SKent Overstreet EXPORT_SYMBOL(vmalloc_32_noprof);
41591da177e4SLinus Torvalds
416083342314SNick Piggin /**
4161ead04089SRolf Eike Beer * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
416283342314SNick Piggin * @size: allocation size
4163ead04089SRolf Eike Beer *
4164ead04089SRolf Eike Beer * The resulting memory area is 32bit addressable and zeroed so it can be
4165ead04089SRolf Eike Beer * mapped to userspace without leaking data.
4166a862f68aSMike Rapoport *
4167a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error
416883342314SNick Piggin */
vmalloc_32_user_noprof(unsigned long size)416988ae5fb7SKent Overstreet void *vmalloc_32_user_noprof(unsigned long size)
417083342314SNick Piggin {
417188ae5fb7SKent Overstreet return __vmalloc_node_range_noprof(size, SHMLBA, VMALLOC_START, VMALLOC_END,
4172bc84c535SRoman Penyaev GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
4173bc84c535SRoman Penyaev VM_USERMAP, NUMA_NO_NODE,
41745a82ac71SRoman Penyaev __builtin_return_address(0));
417583342314SNick Piggin }
417688ae5fb7SKent Overstreet EXPORT_SYMBOL(vmalloc_32_user_noprof);
417783342314SNick Piggin
4178d0107eb0SKAMEZAWA Hiroyuki /*
41794c91c07cSLorenzo Stoakes * Atomically zero bytes in the iterator.
41804c91c07cSLorenzo Stoakes *
41814c91c07cSLorenzo Stoakes * Returns the number of zeroed bytes.
4182d0107eb0SKAMEZAWA Hiroyuki */
zero_iter(struct iov_iter * iter,size_t count)41834c91c07cSLorenzo Stoakes static size_t zero_iter(struct iov_iter *iter, size_t count)
4184d0107eb0SKAMEZAWA Hiroyuki {
41854c91c07cSLorenzo Stoakes size_t remains = count;
4186d0107eb0SKAMEZAWA Hiroyuki
41874c91c07cSLorenzo Stoakes while (remains > 0) {
41884c91c07cSLorenzo Stoakes size_t num, copied;
41894c91c07cSLorenzo Stoakes
41900e4bc271SLu Hongfei num = min_t(size_t, remains, PAGE_SIZE);
41914c91c07cSLorenzo Stoakes copied = copy_page_to_iter_nofault(ZERO_PAGE(0), 0, num, iter);
41924c91c07cSLorenzo Stoakes remains -= copied;
41934c91c07cSLorenzo Stoakes
41944c91c07cSLorenzo Stoakes if (copied < num)
41954c91c07cSLorenzo Stoakes break;
41964c91c07cSLorenzo Stoakes }
41974c91c07cSLorenzo Stoakes
41984c91c07cSLorenzo Stoakes return count - remains;
41994c91c07cSLorenzo Stoakes }
42004c91c07cSLorenzo Stoakes
42014c91c07cSLorenzo Stoakes /*
42024c91c07cSLorenzo Stoakes * small helper routine, copy contents to iter from addr.
42034c91c07cSLorenzo Stoakes * If the page is not present, fill zero.
42044c91c07cSLorenzo Stoakes *
42054c91c07cSLorenzo Stoakes * Returns the number of copied bytes.
42064c91c07cSLorenzo Stoakes */
aligned_vread_iter(struct iov_iter * iter,const char * addr,size_t count)42074c91c07cSLorenzo Stoakes static size_t aligned_vread_iter(struct iov_iter *iter,
42084c91c07cSLorenzo Stoakes const char *addr, size_t count)
42094c91c07cSLorenzo Stoakes {
42104c91c07cSLorenzo Stoakes size_t remains = count;
42114c91c07cSLorenzo Stoakes struct page *page;
42124c91c07cSLorenzo Stoakes
42134c91c07cSLorenzo Stoakes while (remains > 0) {
4214d0107eb0SKAMEZAWA Hiroyuki unsigned long offset, length;
42154c91c07cSLorenzo Stoakes size_t copied = 0;
4216d0107eb0SKAMEZAWA Hiroyuki
4217891c49abSAlexander Kuleshov offset = offset_in_page(addr);
4218d0107eb0SKAMEZAWA Hiroyuki length = PAGE_SIZE - offset;
42194c91c07cSLorenzo Stoakes if (length > remains)
42204c91c07cSLorenzo Stoakes length = remains;
42214c91c07cSLorenzo Stoakes page = vmalloc_to_page(addr);
4222d0107eb0SKAMEZAWA Hiroyuki /*
42234c91c07cSLorenzo Stoakes * To do safe access to this _mapped_ area, we need lock. But
42244c91c07cSLorenzo Stoakes * adding lock here means that we need to add overhead of
42254c91c07cSLorenzo Stoakes * vmalloc()/vfree() calls for this _debug_ interface, rarely
42264c91c07cSLorenzo Stoakes * used. Instead of that, we'll use an local mapping via
42274c91c07cSLorenzo Stoakes * copy_page_to_iter_nofault() and accept a small overhead in
42284c91c07cSLorenzo Stoakes * this access function.
4229d0107eb0SKAMEZAWA Hiroyuki */
42304c91c07cSLorenzo Stoakes if (page)
42314c91c07cSLorenzo Stoakes copied = copy_page_to_iter_nofault(page, offset,
42324c91c07cSLorenzo Stoakes length, iter);
42334c91c07cSLorenzo Stoakes else
42344c91c07cSLorenzo Stoakes copied = zero_iter(iter, length);
4235d0107eb0SKAMEZAWA Hiroyuki
42364c91c07cSLorenzo Stoakes addr += copied;
42374c91c07cSLorenzo Stoakes remains -= copied;
42384c91c07cSLorenzo Stoakes
42394c91c07cSLorenzo Stoakes if (copied != length)
42404c91c07cSLorenzo Stoakes break;
4241d0107eb0SKAMEZAWA Hiroyuki }
4242d0107eb0SKAMEZAWA Hiroyuki
42434c91c07cSLorenzo Stoakes return count - remains;
42444c91c07cSLorenzo Stoakes }
42454c91c07cSLorenzo Stoakes
42464c91c07cSLorenzo Stoakes /*
42474c91c07cSLorenzo Stoakes * Read from a vm_map_ram region of memory.
42484c91c07cSLorenzo Stoakes *
42494c91c07cSLorenzo Stoakes * Returns the number of copied bytes.
42504c91c07cSLorenzo Stoakes */
vmap_ram_vread_iter(struct iov_iter * iter,const char * addr,size_t count,unsigned long flags)42514c91c07cSLorenzo Stoakes static size_t vmap_ram_vread_iter(struct iov_iter *iter, const char *addr,
42524c91c07cSLorenzo Stoakes size_t count, unsigned long flags)
425306c89946SBaoquan He {
425406c89946SBaoquan He char *start;
425506c89946SBaoquan He struct vmap_block *vb;
4256062eacf5SUladzislau Rezki (Sony) struct xarray *xa;
425706c89946SBaoquan He unsigned long offset;
42584c91c07cSLorenzo Stoakes unsigned int rs, re;
42594c91c07cSLorenzo Stoakes size_t remains, n;
426006c89946SBaoquan He
426106c89946SBaoquan He /*
426206c89946SBaoquan He * If it's area created by vm_map_ram() interface directly, but
426306c89946SBaoquan He * not further subdividing and delegating management to vmap_block,
426406c89946SBaoquan He * handle it here.
426506c89946SBaoquan He */
42664c91c07cSLorenzo Stoakes if (!(flags & VMAP_BLOCK))
42674c91c07cSLorenzo Stoakes return aligned_vread_iter(iter, addr, count);
42684c91c07cSLorenzo Stoakes
42694c91c07cSLorenzo Stoakes remains = count;
427006c89946SBaoquan He
427106c89946SBaoquan He /*
427206c89946SBaoquan He * Area is split into regions and tracked with vmap_block, read out
427306c89946SBaoquan He * each region and zero fill the hole between regions.
427406c89946SBaoquan He */
4275fa1c77c1SUladzislau Rezki (Sony) xa = addr_to_vb_xa((unsigned long) addr);
4276062eacf5SUladzislau Rezki (Sony) vb = xa_load(xa, addr_to_vb_idx((unsigned long)addr));
427706c89946SBaoquan He if (!vb)
42784c91c07cSLorenzo Stoakes goto finished_zero;
427906c89946SBaoquan He
428006c89946SBaoquan He spin_lock(&vb->lock);
428106c89946SBaoquan He if (bitmap_empty(vb->used_map, VMAP_BBMAP_BITS)) {
428206c89946SBaoquan He spin_unlock(&vb->lock);
42834c91c07cSLorenzo Stoakes goto finished_zero;
42844c91c07cSLorenzo Stoakes }
42854c91c07cSLorenzo Stoakes
42864c91c07cSLorenzo Stoakes for_each_set_bitrange(rs, re, vb->used_map, VMAP_BBMAP_BITS) {
42874c91c07cSLorenzo Stoakes size_t copied;
42884c91c07cSLorenzo Stoakes
42894c91c07cSLorenzo Stoakes if (remains == 0)
42904c91c07cSLorenzo Stoakes goto finished;
42914c91c07cSLorenzo Stoakes
42924c91c07cSLorenzo Stoakes start = vmap_block_vaddr(vb->va->va_start, rs);
42934c91c07cSLorenzo Stoakes
42944c91c07cSLorenzo Stoakes if (addr < start) {
42954c91c07cSLorenzo Stoakes size_t to_zero = min_t(size_t, start - addr, remains);
42964c91c07cSLorenzo Stoakes size_t zeroed = zero_iter(iter, to_zero);
42974c91c07cSLorenzo Stoakes
42984c91c07cSLorenzo Stoakes addr += zeroed;
42994c91c07cSLorenzo Stoakes remains -= zeroed;
43004c91c07cSLorenzo Stoakes
43014c91c07cSLorenzo Stoakes if (remains == 0 || zeroed != to_zero)
430206c89946SBaoquan He goto finished;
430306c89946SBaoquan He }
43044c91c07cSLorenzo Stoakes
430506c89946SBaoquan He /*it could start reading from the middle of used region*/
430606c89946SBaoquan He offset = offset_in_page(addr);
430706c89946SBaoquan He n = ((re - rs + 1) << PAGE_SHIFT) - offset;
43084c91c07cSLorenzo Stoakes if (n > remains)
43094c91c07cSLorenzo Stoakes n = remains;
431006c89946SBaoquan He
43114c91c07cSLorenzo Stoakes copied = aligned_vread_iter(iter, start + offset, n);
43124c91c07cSLorenzo Stoakes
43134c91c07cSLorenzo Stoakes addr += copied;
43144c91c07cSLorenzo Stoakes remains -= copied;
43154c91c07cSLorenzo Stoakes
43164c91c07cSLorenzo Stoakes if (copied != n)
43174c91c07cSLorenzo Stoakes goto finished;
431806c89946SBaoquan He }
43194c91c07cSLorenzo Stoakes
432006c89946SBaoquan He spin_unlock(&vb->lock);
432106c89946SBaoquan He
43224c91c07cSLorenzo Stoakes finished_zero:
432306c89946SBaoquan He /* zero-fill the left dirty or free regions */
43244c91c07cSLorenzo Stoakes return count - remains + zero_iter(iter, remains);
43254c91c07cSLorenzo Stoakes finished:
43264c91c07cSLorenzo Stoakes /* We couldn't copy/zero everything */
43274c91c07cSLorenzo Stoakes spin_unlock(&vb->lock);
43284c91c07cSLorenzo Stoakes return count - remains;
432906c89946SBaoquan He }
433006c89946SBaoquan He
4331d0107eb0SKAMEZAWA Hiroyuki /**
43324c91c07cSLorenzo Stoakes * vread_iter() - read vmalloc area in a safe way to an iterator.
43334c91c07cSLorenzo Stoakes * @iter: the iterator to which data should be written.
4334d0107eb0SKAMEZAWA Hiroyuki * @addr: vm address.
4335d0107eb0SKAMEZAWA Hiroyuki * @count: number of bytes to be read.
4336d0107eb0SKAMEZAWA Hiroyuki *
4337d0107eb0SKAMEZAWA Hiroyuki * This function checks that addr is a valid vmalloc'ed area, and
4338d0107eb0SKAMEZAWA Hiroyuki * copy data from that area to a given buffer. If the given memory range
4339d0107eb0SKAMEZAWA Hiroyuki * of [addr...addr+count) includes some valid address, data is copied to
4340d0107eb0SKAMEZAWA Hiroyuki * proper area of @buf. If there are memory holes, they'll be zero-filled.
4341d0107eb0SKAMEZAWA Hiroyuki * IOREMAP area is treated as memory hole and no copy is done.
4342d0107eb0SKAMEZAWA Hiroyuki *
4343d0107eb0SKAMEZAWA Hiroyuki * If [addr...addr+count) doesn't includes any intersects with alive
4344a8e5202dSCong Wang * vm_struct area, returns 0. @buf should be kernel's buffer.
4345d0107eb0SKAMEZAWA Hiroyuki *
4346d0107eb0SKAMEZAWA Hiroyuki * Note: In usual ops, vread() is never necessary because the caller
4347d0107eb0SKAMEZAWA Hiroyuki * should know vmalloc() area is valid and can use memcpy().
4348d0107eb0SKAMEZAWA Hiroyuki * This is for routines which have to access vmalloc area without
4349bbcd53c9SDavid Hildenbrand * any information, as /proc/kcore.
4350a862f68aSMike Rapoport *
4351a862f68aSMike Rapoport * Return: number of bytes for which addr and buf should be increased
4352a862f68aSMike Rapoport * (same number as @count) or %0 if [addr...addr+count) doesn't
4353a862f68aSMike Rapoport * include any intersection with valid vmalloc area
4354d0107eb0SKAMEZAWA Hiroyuki */
vread_iter(struct iov_iter * iter,const char * addr,size_t count)43554c91c07cSLorenzo Stoakes long vread_iter(struct iov_iter *iter, const char *addr, size_t count)
43561da177e4SLinus Torvalds {
4357d0936029SUladzislau Rezki (Sony) struct vmap_node *vn;
4358e81ce85fSJoonsoo Kim struct vmap_area *va;
4359e81ce85fSJoonsoo Kim struct vm_struct *vm;
43604c91c07cSLorenzo Stoakes char *vaddr;
43614c91c07cSLorenzo Stoakes size_t n, size, flags, remains;
436253becf32SUladzislau Rezki (Sony) unsigned long next;
43631da177e4SLinus Torvalds
43644aff1dc4SAndrey Konovalov addr = kasan_reset_tag(addr);
43654aff1dc4SAndrey Konovalov
43661da177e4SLinus Torvalds /* Don't allow overflow */
43671da177e4SLinus Torvalds if ((unsigned long) addr + count < count)
43681da177e4SLinus Torvalds count = -(unsigned long) addr;
43691da177e4SLinus Torvalds
43704c91c07cSLorenzo Stoakes remains = count;
43714c91c07cSLorenzo Stoakes
437253becf32SUladzislau Rezki (Sony) vn = find_vmap_area_exceed_addr_lock((unsigned long) addr, &va);
437353becf32SUladzislau Rezki (Sony) if (!vn)
43744c91c07cSLorenzo Stoakes goto finished_zero;
4375f181234aSChen Wandun
4376f181234aSChen Wandun /* no intersects with alive vmap_area */
43774c91c07cSLorenzo Stoakes if ((unsigned long)addr + remains <= va->va_start)
43784c91c07cSLorenzo Stoakes goto finished_zero;
4379f181234aSChen Wandun
438053becf32SUladzislau Rezki (Sony) do {
43814c91c07cSLorenzo Stoakes size_t copied;
43824c91c07cSLorenzo Stoakes
43834c91c07cSLorenzo Stoakes if (remains == 0)
43844c91c07cSLorenzo Stoakes goto finished;
4385e81ce85fSJoonsoo Kim
438606c89946SBaoquan He vm = va->vm;
438706c89946SBaoquan He flags = va->flags & VMAP_FLAGS_MASK;
438806c89946SBaoquan He /*
438906c89946SBaoquan He * VMAP_BLOCK indicates a sub-type of vm_map_ram area, need
439006c89946SBaoquan He * be set together with VMAP_RAM.
439106c89946SBaoquan He */
439206c89946SBaoquan He WARN_ON(flags == VMAP_BLOCK);
439306c89946SBaoquan He
439406c89946SBaoquan He if (!vm && !flags)
439553becf32SUladzislau Rezki (Sony) goto next_va;
4396e81ce85fSJoonsoo Kim
439730a7a9b1SBaoquan He if (vm && (vm->flags & VM_UNINITIALIZED))
439853becf32SUladzislau Rezki (Sony) goto next_va;
43994c91c07cSLorenzo Stoakes
440030a7a9b1SBaoquan He /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
440130a7a9b1SBaoquan He smp_rmb();
440230a7a9b1SBaoquan He
440306c89946SBaoquan He vaddr = (char *) va->va_start;
440406c89946SBaoquan He size = vm ? get_vm_area_size(vm) : va_size(va);
440506c89946SBaoquan He
440606c89946SBaoquan He if (addr >= vaddr + size)
440753becf32SUladzislau Rezki (Sony) goto next_va;
44084c91c07cSLorenzo Stoakes
44094c91c07cSLorenzo Stoakes if (addr < vaddr) {
44104c91c07cSLorenzo Stoakes size_t to_zero = min_t(size_t, vaddr - addr, remains);
44114c91c07cSLorenzo Stoakes size_t zeroed = zero_iter(iter, to_zero);
44124c91c07cSLorenzo Stoakes
44134c91c07cSLorenzo Stoakes addr += zeroed;
44144c91c07cSLorenzo Stoakes remains -= zeroed;
44154c91c07cSLorenzo Stoakes
44164c91c07cSLorenzo Stoakes if (remains == 0 || zeroed != to_zero)
44171da177e4SLinus Torvalds goto finished;
44181da177e4SLinus Torvalds }
44194c91c07cSLorenzo Stoakes
442006c89946SBaoquan He n = vaddr + size - addr;
44214c91c07cSLorenzo Stoakes if (n > remains)
44224c91c07cSLorenzo Stoakes n = remains;
442306c89946SBaoquan He
442406c89946SBaoquan He if (flags & VMAP_RAM)
44254c91c07cSLorenzo Stoakes copied = vmap_ram_vread_iter(iter, addr, n, flags);
4426e6f79822SAlexei Starovoitov else if (!(vm && (vm->flags & (VM_IOREMAP | VM_SPARSE))))
44274c91c07cSLorenzo Stoakes copied = aligned_vread_iter(iter, addr, n);
4428e6f79822SAlexei Starovoitov else /* IOREMAP | SPARSE area is treated as memory hole */
44294c91c07cSLorenzo Stoakes copied = zero_iter(iter, n);
44304c91c07cSLorenzo Stoakes
44314c91c07cSLorenzo Stoakes addr += copied;
44324c91c07cSLorenzo Stoakes remains -= copied;
44334c91c07cSLorenzo Stoakes
44344c91c07cSLorenzo Stoakes if (copied != n)
44354c91c07cSLorenzo Stoakes goto finished;
443653becf32SUladzislau Rezki (Sony)
443753becf32SUladzislau Rezki (Sony) next_va:
443853becf32SUladzislau Rezki (Sony) next = va->va_end;
443953becf32SUladzislau Rezki (Sony) spin_unlock(&vn->busy.lock);
444053becf32SUladzislau Rezki (Sony) } while ((vn = find_vmap_area_exceed_addr_lock(next, &va)));
44414c91c07cSLorenzo Stoakes
44424c91c07cSLorenzo Stoakes finished_zero:
444353becf32SUladzislau Rezki (Sony) if (vn)
4444d0936029SUladzislau Rezki (Sony) spin_unlock(&vn->busy.lock);
444553becf32SUladzislau Rezki (Sony)
44464c91c07cSLorenzo Stoakes /* zero-fill memory holes */
44474c91c07cSLorenzo Stoakes return count - remains + zero_iter(iter, remains);
44481da177e4SLinus Torvalds finished:
44494c91c07cSLorenzo Stoakes /* Nothing remains, or We couldn't copy/zero everything. */
445053becf32SUladzislau Rezki (Sony) if (vn)
4451d0936029SUladzislau Rezki (Sony) spin_unlock(&vn->busy.lock);
4452d0107eb0SKAMEZAWA Hiroyuki
44534c91c07cSLorenzo Stoakes return count - remains;
44541da177e4SLinus Torvalds }
44551da177e4SLinus Torvalds
4456d0107eb0SKAMEZAWA Hiroyuki /**
4457e69e9d4aSHATAYAMA Daisuke * remap_vmalloc_range_partial - map vmalloc pages to userspace
4458e69e9d4aSHATAYAMA Daisuke * @vma: vma to cover
4459e69e9d4aSHATAYAMA Daisuke * @uaddr: target user address to start at
4460e69e9d4aSHATAYAMA Daisuke * @kaddr: virtual address of vmalloc kernel memory
4461bdebd6a2SJann Horn * @pgoff: offset from @kaddr to start at
4462e69e9d4aSHATAYAMA Daisuke * @size: size of map area
4463e69e9d4aSHATAYAMA Daisuke *
4464e69e9d4aSHATAYAMA Daisuke * Returns: 0 for success, -Exxx on failure
4465e69e9d4aSHATAYAMA Daisuke *
4466e69e9d4aSHATAYAMA Daisuke * This function checks that @kaddr is a valid vmalloc'ed area,
4467e69e9d4aSHATAYAMA Daisuke * and that it is big enough to cover the range starting at
4468e69e9d4aSHATAYAMA Daisuke * @uaddr in @vma. Will return failure if that criteria isn't
4469e69e9d4aSHATAYAMA Daisuke * met.
4470e69e9d4aSHATAYAMA Daisuke *
4471e69e9d4aSHATAYAMA Daisuke * Similar to remap_pfn_range() (see mm/memory.c)
4472e69e9d4aSHATAYAMA Daisuke */
remap_vmalloc_range_partial(struct vm_area_struct * vma,unsigned long uaddr,void * kaddr,unsigned long pgoff,unsigned long size)4473e69e9d4aSHATAYAMA Daisuke int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
4474bdebd6a2SJann Horn void *kaddr, unsigned long pgoff,
4475bdebd6a2SJann Horn unsigned long size)
4476e69e9d4aSHATAYAMA Daisuke {
4477e69e9d4aSHATAYAMA Daisuke struct vm_struct *area;
4478bdebd6a2SJann Horn unsigned long off;
4479bdebd6a2SJann Horn unsigned long end_index;
4480bdebd6a2SJann Horn
4481bdebd6a2SJann Horn if (check_shl_overflow(pgoff, PAGE_SHIFT, &off))
4482bdebd6a2SJann Horn return -EINVAL;
4483e69e9d4aSHATAYAMA Daisuke
4484e69e9d4aSHATAYAMA Daisuke size = PAGE_ALIGN(size);
4485e69e9d4aSHATAYAMA Daisuke
4486e69e9d4aSHATAYAMA Daisuke if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
4487e69e9d4aSHATAYAMA Daisuke return -EINVAL;
4488e69e9d4aSHATAYAMA Daisuke
4489e69e9d4aSHATAYAMA Daisuke area = find_vm_area(kaddr);
4490e69e9d4aSHATAYAMA Daisuke if (!area)
4491e69e9d4aSHATAYAMA Daisuke return -EINVAL;
4492e69e9d4aSHATAYAMA Daisuke
4493fe9041c2SChristoph Hellwig if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT)))
4494e69e9d4aSHATAYAMA Daisuke return -EINVAL;
4495e69e9d4aSHATAYAMA Daisuke
4496bdebd6a2SJann Horn if (check_add_overflow(size, off, &end_index) ||
4497bdebd6a2SJann Horn end_index > get_vm_area_size(area))
4498e69e9d4aSHATAYAMA Daisuke return -EINVAL;
4499bdebd6a2SJann Horn kaddr += off;
4500e69e9d4aSHATAYAMA Daisuke
4501e69e9d4aSHATAYAMA Daisuke do {
4502e69e9d4aSHATAYAMA Daisuke struct page *page = vmalloc_to_page(kaddr);
4503e69e9d4aSHATAYAMA Daisuke int ret;
4504e69e9d4aSHATAYAMA Daisuke
4505e69e9d4aSHATAYAMA Daisuke ret = vm_insert_page(vma, uaddr, page);
4506e69e9d4aSHATAYAMA Daisuke if (ret)
4507e69e9d4aSHATAYAMA Daisuke return ret;
4508e69e9d4aSHATAYAMA Daisuke
4509e69e9d4aSHATAYAMA Daisuke uaddr += PAGE_SIZE;
4510e69e9d4aSHATAYAMA Daisuke kaddr += PAGE_SIZE;
4511e69e9d4aSHATAYAMA Daisuke size -= PAGE_SIZE;
4512e69e9d4aSHATAYAMA Daisuke } while (size > 0);
4513e69e9d4aSHATAYAMA Daisuke
45141c71222eSSuren Baghdasaryan vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
4515e69e9d4aSHATAYAMA Daisuke
4516e69e9d4aSHATAYAMA Daisuke return 0;
4517e69e9d4aSHATAYAMA Daisuke }
4518e69e9d4aSHATAYAMA Daisuke
4519e69e9d4aSHATAYAMA Daisuke /**
452083342314SNick Piggin * remap_vmalloc_range - map vmalloc pages to userspace
452183342314SNick Piggin * @vma: vma to cover (map full range of vma)
452283342314SNick Piggin * @addr: vmalloc memory
452383342314SNick Piggin * @pgoff: number of pages into addr before first page to map
45247682486bSRandy Dunlap *
45257682486bSRandy Dunlap * Returns: 0 for success, -Exxx on failure
452683342314SNick Piggin *
452783342314SNick Piggin * This function checks that addr is a valid vmalloc'ed area, and
452883342314SNick Piggin * that it is big enough to cover the vma. Will return failure if
452983342314SNick Piggin * that criteria isn't met.
453083342314SNick Piggin *
453172fd4a35SRobert P. J. Day * Similar to remap_pfn_range() (see mm/memory.c)
453283342314SNick Piggin */
remap_vmalloc_range(struct vm_area_struct * vma,void * addr,unsigned long pgoff)453383342314SNick Piggin int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
453483342314SNick Piggin unsigned long pgoff)
453583342314SNick Piggin {
4536e69e9d4aSHATAYAMA Daisuke return remap_vmalloc_range_partial(vma, vma->vm_start,
4537bdebd6a2SJann Horn addr, pgoff,
4538e69e9d4aSHATAYAMA Daisuke vma->vm_end - vma->vm_start);
453983342314SNick Piggin }
454083342314SNick Piggin EXPORT_SYMBOL(remap_vmalloc_range);
454183342314SNick Piggin
free_vm_area(struct vm_struct * area)45425f4352fbSJeremy Fitzhardinge void free_vm_area(struct vm_struct *area)
45435f4352fbSJeremy Fitzhardinge {
45445f4352fbSJeremy Fitzhardinge struct vm_struct *ret;
45455f4352fbSJeremy Fitzhardinge ret = remove_vm_area(area->addr);
45465f4352fbSJeremy Fitzhardinge BUG_ON(ret != area);
45475f4352fbSJeremy Fitzhardinge kfree(area);
45485f4352fbSJeremy Fitzhardinge }
45495f4352fbSJeremy Fitzhardinge EXPORT_SYMBOL_GPL(free_vm_area);
4550a10aa579SChristoph Lameter
45514f8b02b4STejun Heo #ifdef CONFIG_SMP
node_to_va(struct rb_node * n)4552ca23e405STejun Heo static struct vmap_area *node_to_va(struct rb_node *n)
4553ca23e405STejun Heo {
45544583e773SGeliang Tang return rb_entry_safe(n, struct vmap_area, rb_node);
4555ca23e405STejun Heo }
4556ca23e405STejun Heo
4557ca23e405STejun Heo /**
455868ad4a33SUladzislau Rezki (Sony) * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to
455968ad4a33SUladzislau Rezki (Sony) * @addr: target address
4560ca23e405STejun Heo *
456168ad4a33SUladzislau Rezki (Sony) * Returns: vmap_area if it is found. If there is no such area
456268ad4a33SUladzislau Rezki (Sony) * the first highest(reverse order) vmap_area is returned
456368ad4a33SUladzislau Rezki (Sony) * i.e. va->va_start < addr && va->va_end < addr or NULL
456468ad4a33SUladzislau Rezki (Sony) * if there are no any areas before @addr.
4565ca23e405STejun Heo */
456668ad4a33SUladzislau Rezki (Sony) static struct vmap_area *
pvm_find_va_enclose_addr(unsigned long addr)456768ad4a33SUladzislau Rezki (Sony) pvm_find_va_enclose_addr(unsigned long addr)
4568ca23e405STejun Heo {
456968ad4a33SUladzislau Rezki (Sony) struct vmap_area *va, *tmp;
457068ad4a33SUladzislau Rezki (Sony) struct rb_node *n;
457168ad4a33SUladzislau Rezki (Sony)
457268ad4a33SUladzislau Rezki (Sony) n = free_vmap_area_root.rb_node;
457368ad4a33SUladzislau Rezki (Sony) va = NULL;
4574ca23e405STejun Heo
4575ca23e405STejun Heo while (n) {
457668ad4a33SUladzislau Rezki (Sony) tmp = rb_entry(n, struct vmap_area, rb_node);
457768ad4a33SUladzislau Rezki (Sony) if (tmp->va_start <= addr) {
457868ad4a33SUladzislau Rezki (Sony) va = tmp;
457968ad4a33SUladzislau Rezki (Sony) if (tmp->va_end >= addr)
4580ca23e405STejun Heo break;
4581ca23e405STejun Heo
458268ad4a33SUladzislau Rezki (Sony) n = n->rb_right;
4583ca23e405STejun Heo } else {
458468ad4a33SUladzislau Rezki (Sony) n = n->rb_left;
4585ca23e405STejun Heo }
458668ad4a33SUladzislau Rezki (Sony) }
458768ad4a33SUladzislau Rezki (Sony)
458868ad4a33SUladzislau Rezki (Sony) return va;
4589ca23e405STejun Heo }
4590ca23e405STejun Heo
4591ca23e405STejun Heo /**
459268ad4a33SUladzislau Rezki (Sony) * pvm_determine_end_from_reverse - find the highest aligned address
459368ad4a33SUladzislau Rezki (Sony) * of free block below VMALLOC_END
459468ad4a33SUladzislau Rezki (Sony) * @va:
459568ad4a33SUladzislau Rezki (Sony) * in - the VA we start the search(reverse order);
459668ad4a33SUladzislau Rezki (Sony) * out - the VA with the highest aligned end address.
4597799fa85dSAlex Shi * @align: alignment for required highest address
4598ca23e405STejun Heo *
459968ad4a33SUladzislau Rezki (Sony) * Returns: determined end address within vmap_area
4600ca23e405STejun Heo */
460168ad4a33SUladzislau Rezki (Sony) static unsigned long
pvm_determine_end_from_reverse(struct vmap_area ** va,unsigned long align)460268ad4a33SUladzislau Rezki (Sony) pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align)
4603ca23e405STejun Heo {
460468ad4a33SUladzislau Rezki (Sony) unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
4605ca23e405STejun Heo unsigned long addr;
4606ca23e405STejun Heo
460768ad4a33SUladzislau Rezki (Sony) if (likely(*va)) {
460868ad4a33SUladzislau Rezki (Sony) list_for_each_entry_from_reverse((*va),
460968ad4a33SUladzislau Rezki (Sony) &free_vmap_area_list, list) {
461068ad4a33SUladzislau Rezki (Sony) addr = min((*va)->va_end & ~(align - 1), vmalloc_end);
461168ad4a33SUladzislau Rezki (Sony) if ((*va)->va_start < addr)
461268ad4a33SUladzislau Rezki (Sony) return addr;
461368ad4a33SUladzislau Rezki (Sony) }
4614ca23e405STejun Heo }
4615ca23e405STejun Heo
461668ad4a33SUladzislau Rezki (Sony) return 0;
4617ca23e405STejun Heo }
4618ca23e405STejun Heo
4619ca23e405STejun Heo /**
4620ca23e405STejun Heo * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
4621ca23e405STejun Heo * @offsets: array containing offset of each area
4622ca23e405STejun Heo * @sizes: array containing size of each area
4623ca23e405STejun Heo * @nr_vms: the number of areas to allocate
4624ca23e405STejun Heo * @align: alignment, all entries in @offsets and @sizes must be aligned to this
4625ca23e405STejun Heo *
4626ca23e405STejun Heo * Returns: kmalloc'd vm_struct pointer array pointing to allocated
4627ca23e405STejun Heo * vm_structs on success, %NULL on failure
4628ca23e405STejun Heo *
4629ca23e405STejun Heo * Percpu allocator wants to use congruent vm areas so that it can
4630ca23e405STejun Heo * maintain the offsets among percpu areas. This function allocates
4631ec3f64fcSDavid Rientjes * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to
4632ec3f64fcSDavid Rientjes * be scattered pretty far, distance between two areas easily going up
4633ec3f64fcSDavid Rientjes * to gigabytes. To avoid interacting with regular vmallocs, these
4634ec3f64fcSDavid Rientjes * areas are allocated from top.
4635ca23e405STejun Heo *
4636ca23e405STejun Heo * Despite its complicated look, this allocator is rather simple. It
463768ad4a33SUladzislau Rezki (Sony) * does everything top-down and scans free blocks from the end looking
463868ad4a33SUladzislau Rezki (Sony) * for matching base. While scanning, if any of the areas do not fit the
463968ad4a33SUladzislau Rezki (Sony) * base address is pulled down to fit the area. Scanning is repeated till
464068ad4a33SUladzislau Rezki (Sony) * all the areas fit and then all necessary data structures are inserted
464168ad4a33SUladzislau Rezki (Sony) * and the result is returned.
4642ca23e405STejun Heo */
pcpu_get_vm_areas(const unsigned long * offsets,const size_t * sizes,int nr_vms,size_t align)4643ca23e405STejun Heo struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
4644ca23e405STejun Heo const size_t *sizes, int nr_vms,
4645ec3f64fcSDavid Rientjes size_t align)
4646ca23e405STejun Heo {
4647ca23e405STejun Heo const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
4648ca23e405STejun Heo const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
464968ad4a33SUladzislau Rezki (Sony) struct vmap_area **vas, *va;
4650ca23e405STejun Heo struct vm_struct **vms;
4651ca23e405STejun Heo int area, area2, last_area, term_area;
4652253a496dSDaniel Axtens unsigned long base, start, size, end, last_end, orig_start, orig_end;
4653ca23e405STejun Heo bool purged = false;
4654ca23e405STejun Heo
4655ca23e405STejun Heo /* verify parameters and allocate data structures */
4656891c49abSAlexander Kuleshov BUG_ON(offset_in_page(align) || !is_power_of_2(align));
4657ca23e405STejun Heo for (last_area = 0, area = 0; area < nr_vms; area++) {
4658ca23e405STejun Heo start = offsets[area];
4659ca23e405STejun Heo end = start + sizes[area];
4660ca23e405STejun Heo
4661ca23e405STejun Heo /* is everything aligned properly? */
4662ca23e405STejun Heo BUG_ON(!IS_ALIGNED(offsets[area], align));
4663ca23e405STejun Heo BUG_ON(!IS_ALIGNED(sizes[area], align));
4664ca23e405STejun Heo
4665ca23e405STejun Heo /* detect the area with the highest address */
4666ca23e405STejun Heo if (start > offsets[last_area])
4667ca23e405STejun Heo last_area = area;
4668ca23e405STejun Heo
4669c568da28SWei Yang for (area2 = area + 1; area2 < nr_vms; area2++) {
4670ca23e405STejun Heo unsigned long start2 = offsets[area2];
4671ca23e405STejun Heo unsigned long end2 = start2 + sizes[area2];
4672ca23e405STejun Heo
4673c568da28SWei Yang BUG_ON(start2 < end && start < end2);
4674ca23e405STejun Heo }
4675ca23e405STejun Heo }
4676ca23e405STejun Heo last_end = offsets[last_area] + sizes[last_area];
4677ca23e405STejun Heo
4678ca23e405STejun Heo if (vmalloc_end - vmalloc_start < last_end) {
4679ca23e405STejun Heo WARN_ON(true);
4680ca23e405STejun Heo return NULL;
4681ca23e405STejun Heo }
4682ca23e405STejun Heo
46834d67d860SThomas Meyer vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
46844d67d860SThomas Meyer vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
4685ca23e405STejun Heo if (!vas || !vms)
4686f1db7afdSKautuk Consul goto err_free2;
4687ca23e405STejun Heo
4688ca23e405STejun Heo for (area = 0; area < nr_vms; area++) {
468968ad4a33SUladzislau Rezki (Sony) vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL);
4690ec3f64fcSDavid Rientjes vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
4691ca23e405STejun Heo if (!vas[area] || !vms[area])
4692ca23e405STejun Heo goto err_free;
4693ca23e405STejun Heo }
4694ca23e405STejun Heo retry:
4695e36176beSUladzislau Rezki (Sony) spin_lock(&free_vmap_area_lock);
4696ca23e405STejun Heo
4697ca23e405STejun Heo /* start scanning - we scan from the top, begin with the last area */
4698ca23e405STejun Heo area = term_area = last_area;
4699ca23e405STejun Heo start = offsets[area];
4700ca23e405STejun Heo end = start + sizes[area];
4701ca23e405STejun Heo
470268ad4a33SUladzislau Rezki (Sony) va = pvm_find_va_enclose_addr(vmalloc_end);
470368ad4a33SUladzislau Rezki (Sony) base = pvm_determine_end_from_reverse(&va, align) - end;
4704ca23e405STejun Heo
4705ca23e405STejun Heo while (true) {
4706ca23e405STejun Heo /*
4707ca23e405STejun Heo * base might have underflowed, add last_end before
4708ca23e405STejun Heo * comparing.
4709ca23e405STejun Heo */
471068ad4a33SUladzislau Rezki (Sony) if (base + last_end < vmalloc_start + last_end)
471168ad4a33SUladzislau Rezki (Sony) goto overflow;
4712ca23e405STejun Heo
4713ca23e405STejun Heo /*
471468ad4a33SUladzislau Rezki (Sony) * Fitting base has not been found.
4715ca23e405STejun Heo */
471668ad4a33SUladzislau Rezki (Sony) if (va == NULL)
471768ad4a33SUladzislau Rezki (Sony) goto overflow;
4718ca23e405STejun Heo
4719ca23e405STejun Heo /*
4720d8cc323dSQiujun Huang * If required width exceeds current VA block, move
47215336e52cSKuppuswamy Sathyanarayanan * base downwards and then recheck.
47225336e52cSKuppuswamy Sathyanarayanan */
47235336e52cSKuppuswamy Sathyanarayanan if (base + end > va->va_end) {
47245336e52cSKuppuswamy Sathyanarayanan base = pvm_determine_end_from_reverse(&va, align) - end;
47255336e52cSKuppuswamy Sathyanarayanan term_area = area;
47265336e52cSKuppuswamy Sathyanarayanan continue;
47275336e52cSKuppuswamy Sathyanarayanan }
47285336e52cSKuppuswamy Sathyanarayanan
47295336e52cSKuppuswamy Sathyanarayanan /*
473068ad4a33SUladzislau Rezki (Sony) * If this VA does not fit, move base downwards and recheck.
4731ca23e405STejun Heo */
47325336e52cSKuppuswamy Sathyanarayanan if (base + start < va->va_start) {
473368ad4a33SUladzislau Rezki (Sony) va = node_to_va(rb_prev(&va->rb_node));
473468ad4a33SUladzislau Rezki (Sony) base = pvm_determine_end_from_reverse(&va, align) - end;
4735ca23e405STejun Heo term_area = area;
4736ca23e405STejun Heo continue;
4737ca23e405STejun Heo }
4738ca23e405STejun Heo
4739ca23e405STejun Heo /*
4740ca23e405STejun Heo * This area fits, move on to the previous one. If
4741ca23e405STejun Heo * the previous one is the terminal one, we're done.
4742ca23e405STejun Heo */
4743ca23e405STejun Heo area = (area + nr_vms - 1) % nr_vms;
4744ca23e405STejun Heo if (area == term_area)
4745ca23e405STejun Heo break;
474668ad4a33SUladzislau Rezki (Sony)
4747ca23e405STejun Heo start = offsets[area];
4748ca23e405STejun Heo end = start + sizes[area];
474968ad4a33SUladzislau Rezki (Sony) va = pvm_find_va_enclose_addr(base + end);
4750ca23e405STejun Heo }
475168ad4a33SUladzislau Rezki (Sony)
4752ca23e405STejun Heo /* we've found a fitting base, insert all va's */
4753ca23e405STejun Heo for (area = 0; area < nr_vms; area++) {
475468ad4a33SUladzislau Rezki (Sony) int ret;
4755ca23e405STejun Heo
475668ad4a33SUladzislau Rezki (Sony) start = base + offsets[area];
475768ad4a33SUladzislau Rezki (Sony) size = sizes[area];
475868ad4a33SUladzislau Rezki (Sony)
475968ad4a33SUladzislau Rezki (Sony) va = pvm_find_va_enclose_addr(start);
476068ad4a33SUladzislau Rezki (Sony) if (WARN_ON_ONCE(va == NULL))
476168ad4a33SUladzislau Rezki (Sony) /* It is a BUG(), but trigger recovery instead. */
476268ad4a33SUladzislau Rezki (Sony) goto recovery;
476368ad4a33SUladzislau Rezki (Sony)
47645b75b8e1SUladzislau Rezki (Sony) ret = va_clip(&free_vmap_area_root,
47655b75b8e1SUladzislau Rezki (Sony) &free_vmap_area_list, va, start, size);
47661b23ff80SBaoquan He if (WARN_ON_ONCE(unlikely(ret)))
476768ad4a33SUladzislau Rezki (Sony) /* It is a BUG(), but trigger recovery instead. */
476868ad4a33SUladzislau Rezki (Sony) goto recovery;
476968ad4a33SUladzislau Rezki (Sony)
477068ad4a33SUladzislau Rezki (Sony) /* Allocated area. */
477168ad4a33SUladzislau Rezki (Sony) va = vas[area];
477268ad4a33SUladzislau Rezki (Sony) va->va_start = start;
477368ad4a33SUladzislau Rezki (Sony) va->va_end = start + size;
4774ca23e405STejun Heo }
4775ca23e405STejun Heo
4776e36176beSUladzislau Rezki (Sony) spin_unlock(&free_vmap_area_lock);
4777ca23e405STejun Heo
4778253a496dSDaniel Axtens /* populate the kasan shadow space */
4779253a496dSDaniel Axtens for (area = 0; area < nr_vms; area++) {
4780253a496dSDaniel Axtens if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area]))
4781253a496dSDaniel Axtens goto err_free_shadow;
4782253a496dSDaniel Axtens }
4783253a496dSDaniel Axtens
4784ca23e405STejun Heo /* insert all vm's */
4785e36176beSUladzislau Rezki (Sony) for (area = 0; area < nr_vms; area++) {
4786d0936029SUladzislau Rezki (Sony) struct vmap_node *vn = addr_to_node(vas[area]->va_start);
4787e36176beSUladzislau Rezki (Sony)
4788d0936029SUladzislau Rezki (Sony) spin_lock(&vn->busy.lock);
4789d0936029SUladzislau Rezki (Sony) insert_vmap_area(vas[area], &vn->busy.root, &vn->busy.head);
4790aaab830aSrulinhuang setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
4791ca23e405STejun Heo pcpu_get_vm_areas);
4792d0936029SUladzislau Rezki (Sony) spin_unlock(&vn->busy.lock);
4793e36176beSUladzislau Rezki (Sony) }
4794ca23e405STejun Heo
479519f1c3acSAndrey Konovalov /*
479619f1c3acSAndrey Konovalov * Mark allocated areas as accessible. Do it now as a best-effort
479719f1c3acSAndrey Konovalov * approach, as they can be mapped outside of vmalloc code.
479823689e91SAndrey Konovalov * With hardware tag-based KASAN, marking is skipped for
479923689e91SAndrey Konovalov * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
480019f1c3acSAndrey Konovalov */
48011d96320fSAndrey Konovalov for (area = 0; area < nr_vms; area++)
48021d96320fSAndrey Konovalov vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr,
4803f6e39794SAndrey Konovalov vms[area]->size, KASAN_VMALLOC_PROT_NORMAL);
48041d96320fSAndrey Konovalov
4805ca23e405STejun Heo kfree(vas);
4806ca23e405STejun Heo return vms;
4807ca23e405STejun Heo
480868ad4a33SUladzislau Rezki (Sony) recovery:
4809e36176beSUladzislau Rezki (Sony) /*
4810e36176beSUladzislau Rezki (Sony) * Remove previously allocated areas. There is no
4811e36176beSUladzislau Rezki (Sony) * need in removing these areas from the busy tree,
4812e36176beSUladzislau Rezki (Sony) * because they are inserted only on the final step
4813e36176beSUladzislau Rezki (Sony) * and when pcpu_get_vm_areas() is success.
4814e36176beSUladzislau Rezki (Sony) */
481568ad4a33SUladzislau Rezki (Sony) while (area--) {
4816253a496dSDaniel Axtens orig_start = vas[area]->va_start;
4817253a496dSDaniel Axtens orig_end = vas[area]->va_end;
481896e2db45SUladzislau Rezki (Sony) va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
48193c5c3cfbSDaniel Axtens &free_vmap_area_list);
48209c801f61SUladzislau Rezki (Sony) if (va)
4821253a496dSDaniel Axtens kasan_release_vmalloc(orig_start, orig_end,
48229e9e085eSAdrian Huang va->va_start, va->va_end,
48239e9e085eSAdrian Huang KASAN_VMALLOC_PAGE_RANGE | KASAN_VMALLOC_TLB_FLUSH);
482468ad4a33SUladzislau Rezki (Sony) vas[area] = NULL;
482568ad4a33SUladzislau Rezki (Sony) }
482668ad4a33SUladzislau Rezki (Sony)
482768ad4a33SUladzislau Rezki (Sony) overflow:
4828e36176beSUladzislau Rezki (Sony) spin_unlock(&free_vmap_area_lock);
482968ad4a33SUladzislau Rezki (Sony) if (!purged) {
483077e50af0SThomas Gleixner reclaim_and_purge_vmap_areas();
483168ad4a33SUladzislau Rezki (Sony) purged = true;
483268ad4a33SUladzislau Rezki (Sony)
483368ad4a33SUladzislau Rezki (Sony) /* Before "retry", check if we recover. */
483468ad4a33SUladzislau Rezki (Sony) for (area = 0; area < nr_vms; area++) {
483568ad4a33SUladzislau Rezki (Sony) if (vas[area])
483668ad4a33SUladzislau Rezki (Sony) continue;
483768ad4a33SUladzislau Rezki (Sony)
483868ad4a33SUladzislau Rezki (Sony) vas[area] = kmem_cache_zalloc(
483968ad4a33SUladzislau Rezki (Sony) vmap_area_cachep, GFP_KERNEL);
484068ad4a33SUladzislau Rezki (Sony) if (!vas[area])
484168ad4a33SUladzislau Rezki (Sony) goto err_free;
484268ad4a33SUladzislau Rezki (Sony) }
484368ad4a33SUladzislau Rezki (Sony)
484468ad4a33SUladzislau Rezki (Sony) goto retry;
484568ad4a33SUladzislau Rezki (Sony) }
484668ad4a33SUladzislau Rezki (Sony)
4847ca23e405STejun Heo err_free:
4848ca23e405STejun Heo for (area = 0; area < nr_vms; area++) {
484968ad4a33SUladzislau Rezki (Sony) if (vas[area])
485068ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, vas[area]);
485168ad4a33SUladzislau Rezki (Sony)
4852ca23e405STejun Heo kfree(vms[area]);
4853ca23e405STejun Heo }
4854f1db7afdSKautuk Consul err_free2:
4855ca23e405STejun Heo kfree(vas);
4856ca23e405STejun Heo kfree(vms);
4857ca23e405STejun Heo return NULL;
4858253a496dSDaniel Axtens
4859253a496dSDaniel Axtens err_free_shadow:
4860253a496dSDaniel Axtens spin_lock(&free_vmap_area_lock);
4861253a496dSDaniel Axtens /*
4862253a496dSDaniel Axtens * We release all the vmalloc shadows, even the ones for regions that
4863253a496dSDaniel Axtens * hadn't been successfully added. This relies on kasan_release_vmalloc
4864253a496dSDaniel Axtens * being able to tolerate this case.
4865253a496dSDaniel Axtens */
4866253a496dSDaniel Axtens for (area = 0; area < nr_vms; area++) {
4867253a496dSDaniel Axtens orig_start = vas[area]->va_start;
4868253a496dSDaniel Axtens orig_end = vas[area]->va_end;
486996e2db45SUladzislau Rezki (Sony) va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
4870253a496dSDaniel Axtens &free_vmap_area_list);
48719c801f61SUladzislau Rezki (Sony) if (va)
4872253a496dSDaniel Axtens kasan_release_vmalloc(orig_start, orig_end,
48739e9e085eSAdrian Huang va->va_start, va->va_end,
48749e9e085eSAdrian Huang KASAN_VMALLOC_PAGE_RANGE | KASAN_VMALLOC_TLB_FLUSH);
4875253a496dSDaniel Axtens vas[area] = NULL;
4876253a496dSDaniel Axtens kfree(vms[area]);
4877253a496dSDaniel Axtens }
4878253a496dSDaniel Axtens spin_unlock(&free_vmap_area_lock);
4879253a496dSDaniel Axtens kfree(vas);
4880253a496dSDaniel Axtens kfree(vms);
4881253a496dSDaniel Axtens return NULL;
4882ca23e405STejun Heo }
4883ca23e405STejun Heo
4884ca23e405STejun Heo /**
4885ca23e405STejun Heo * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
4886ca23e405STejun Heo * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
4887ca23e405STejun Heo * @nr_vms: the number of allocated areas
4888ca23e405STejun Heo *
4889ca23e405STejun Heo * Free vm_structs and the array allocated by pcpu_get_vm_areas().
4890ca23e405STejun Heo */
pcpu_free_vm_areas(struct vm_struct ** vms,int nr_vms)4891ca23e405STejun Heo void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
4892ca23e405STejun Heo {
4893ca23e405STejun Heo int i;
4894ca23e405STejun Heo
4895ca23e405STejun Heo for (i = 0; i < nr_vms; i++)
4896ca23e405STejun Heo free_vm_area(vms[i]);
4897ca23e405STejun Heo kfree(vms);
4898ca23e405STejun Heo }
48994f8b02b4STejun Heo #endif /* CONFIG_SMP */
4900a10aa579SChristoph Lameter
49015bb1bb35SPaul E. McKenney #ifdef CONFIG_PRINTK
vmalloc_dump_obj(void * object)490298f18083SPaul E. McKenney bool vmalloc_dump_obj(void *object)
490398f18083SPaul E. McKenney {
49040818e739SJoel Fernandes (Google) const void *caller;
49050818e739SJoel Fernandes (Google) struct vm_struct *vm;
49060818e739SJoel Fernandes (Google) struct vmap_area *va;
4907d0936029SUladzislau Rezki (Sony) struct vmap_node *vn;
49080818e739SJoel Fernandes (Google) unsigned long addr;
49090818e739SJoel Fernandes (Google) unsigned int nr_pages;
491098f18083SPaul E. McKenney
49118be4d46eSUladzislau Rezki (Sony) addr = PAGE_ALIGN((unsigned long) object);
49128be4d46eSUladzislau Rezki (Sony) vn = addr_to_node(addr);
4913d0936029SUladzislau Rezki (Sony)
49148be4d46eSUladzislau Rezki (Sony) if (!spin_trylock(&vn->busy.lock))
491598f18083SPaul E. McKenney return false;
4916d0936029SUladzislau Rezki (Sony)
49178be4d46eSUladzislau Rezki (Sony) va = __find_vmap_area(addr, &vn->busy.root);
49188be4d46eSUladzislau Rezki (Sony) if (!va || !va->vm) {
4919d0936029SUladzislau Rezki (Sony) spin_unlock(&vn->busy.lock);
49200818e739SJoel Fernandes (Google) return false;
49210818e739SJoel Fernandes (Google) }
49220818e739SJoel Fernandes (Google)
49230818e739SJoel Fernandes (Google) vm = va->vm;
49240818e739SJoel Fernandes (Google) addr = (unsigned long) vm->addr;
49250818e739SJoel Fernandes (Google) caller = vm->caller;
49260818e739SJoel Fernandes (Google) nr_pages = vm->nr_pages;
49278be4d46eSUladzislau Rezki (Sony) spin_unlock(&vn->busy.lock);
49288be4d46eSUladzislau Rezki (Sony)
4929bd34dcd4SPaul E. McKenney pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
49300818e739SJoel Fernandes (Google) nr_pages, addr, caller);
4931d0936029SUladzislau Rezki (Sony)
493298f18083SPaul E. McKenney return true;
493398f18083SPaul E. McKenney }
49345bb1bb35SPaul E. McKenney #endif
493598f18083SPaul E. McKenney
4936a10aa579SChristoph Lameter #ifdef CONFIG_PROC_FS
show_numa_info(struct seq_file * m,struct vm_struct * v)4937a47a126aSEric Dumazet static void show_numa_info(struct seq_file *m, struct vm_struct *v)
4938a47a126aSEric Dumazet {
4939e5adfffcSKirill A. Shutemov if (IS_ENABLED(CONFIG_NUMA)) {
4940a47a126aSEric Dumazet unsigned int nr, *counters = m->private;
494151e50b3aSEric Dumazet unsigned int step = 1U << vm_area_page_order(v);
4942a47a126aSEric Dumazet
4943a47a126aSEric Dumazet if (!counters)
4944a47a126aSEric Dumazet return;
4945a47a126aSEric Dumazet
4946af12346cSWanpeng Li if (v->flags & VM_UNINITIALIZED)
4947af12346cSWanpeng Li return;
49487e5b528bSDmitry Vyukov /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
49497e5b528bSDmitry Vyukov smp_rmb();
4950af12346cSWanpeng Li
4951a47a126aSEric Dumazet memset(counters, 0, nr_node_ids * sizeof(unsigned int));
4952a47a126aSEric Dumazet
495351e50b3aSEric Dumazet for (nr = 0; nr < v->nr_pages; nr += step)
495451e50b3aSEric Dumazet counters[page_to_nid(v->pages[nr])] += step;
4955a47a126aSEric Dumazet for_each_node_state(nr, N_HIGH_MEMORY)
4956a47a126aSEric Dumazet if (counters[nr])
4957a47a126aSEric Dumazet seq_printf(m, " N%u=%u", nr, counters[nr]);
4958a47a126aSEric Dumazet }
4959a47a126aSEric Dumazet }
4960a47a126aSEric Dumazet
show_purge_info(struct seq_file * m)4961dd3b8353SUladzislau Rezki (Sony) static void show_purge_info(struct seq_file *m)
4962dd3b8353SUladzislau Rezki (Sony) {
4963282631cbSUladzislau Rezki (Sony) struct vmap_node *vn;
4964dd3b8353SUladzislau Rezki (Sony) struct vmap_area *va;
4965282631cbSUladzislau Rezki (Sony) int i;
4966dd3b8353SUladzislau Rezki (Sony)
4967282631cbSUladzislau Rezki (Sony) for (i = 0; i < nr_vmap_nodes; i++) {
4968282631cbSUladzislau Rezki (Sony) vn = &vmap_nodes[i];
4969282631cbSUladzislau Rezki (Sony)
4970282631cbSUladzislau Rezki (Sony) spin_lock(&vn->lazy.lock);
4971282631cbSUladzislau Rezki (Sony) list_for_each_entry(va, &vn->lazy.head, list) {
4972dd3b8353SUladzislau Rezki (Sony) seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
4973dd3b8353SUladzislau Rezki (Sony) (void *)va->va_start, (void *)va->va_end,
4974b44f71e3SZhangPeng va_size(va));
4975dd3b8353SUladzislau Rezki (Sony) }
4976282631cbSUladzislau Rezki (Sony) spin_unlock(&vn->lazy.lock);
4977282631cbSUladzislau Rezki (Sony) }
4978dd3b8353SUladzislau Rezki (Sony) }
4979dd3b8353SUladzislau Rezki (Sony)
vmalloc_info_show(struct seq_file * m,void * p)49808e1d743fSUladzislau Rezki (Sony) static int vmalloc_info_show(struct seq_file *m, void *p)
4981a10aa579SChristoph Lameter {
4982d0936029SUladzislau Rezki (Sony) struct vmap_node *vn;
49833f500069Szijun_hu struct vmap_area *va;
4984d4033afdSJoonsoo Kim struct vm_struct *v;
49858e1d743fSUladzislau Rezki (Sony) int i;
4986d4033afdSJoonsoo Kim
49878e1d743fSUladzislau Rezki (Sony) for (i = 0; i < nr_vmap_nodes; i++) {
49888e1d743fSUladzislau Rezki (Sony) vn = &vmap_nodes[i];
49893f500069Szijun_hu
49908e1d743fSUladzislau Rezki (Sony) spin_lock(&vn->busy.lock);
49918e1d743fSUladzislau Rezki (Sony) list_for_each_entry(va, &vn->busy.head, list) {
4992688fcbfcSPengfei Li if (!va->vm) {
4993bba9697bSBaoquan He if (va->flags & VMAP_RAM)
4994dd3b8353SUladzislau Rezki (Sony) seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
499578c72746SYisheng Xie (void *)va->va_start, (void *)va->va_end,
4996b44f71e3SZhangPeng va_size(va));
499778c72746SYisheng Xie
49988e1d743fSUladzislau Rezki (Sony) continue;
499978c72746SYisheng Xie }
5000d4033afdSJoonsoo Kim
5001d4033afdSJoonsoo Kim v = va->vm;
5002a10aa579SChristoph Lameter
500345ec1690SKees Cook seq_printf(m, "0x%pK-0x%pK %7ld",
5004a10aa579SChristoph Lameter v->addr, v->addr + v->size, v->size);
5005a10aa579SChristoph Lameter
500662c70bceSJoe Perches if (v->caller)
500762c70bceSJoe Perches seq_printf(m, " %pS", v->caller);
500823016969SChristoph Lameter
5009a10aa579SChristoph Lameter if (v->nr_pages)
5010a10aa579SChristoph Lameter seq_printf(m, " pages=%d", v->nr_pages);
5011a10aa579SChristoph Lameter
5012a10aa579SChristoph Lameter if (v->phys_addr)
5013199eaa05SMiles Chen seq_printf(m, " phys=%pa", &v->phys_addr);
5014a10aa579SChristoph Lameter
5015a10aa579SChristoph Lameter if (v->flags & VM_IOREMAP)
5016f4527c90SFabian Frederick seq_puts(m, " ioremap");
5017a10aa579SChristoph Lameter
5018e6f79822SAlexei Starovoitov if (v->flags & VM_SPARSE)
5019e6f79822SAlexei Starovoitov seq_puts(m, " sparse");
5020e6f79822SAlexei Starovoitov
5021a10aa579SChristoph Lameter if (v->flags & VM_ALLOC)
5022f4527c90SFabian Frederick seq_puts(m, " vmalloc");
5023a10aa579SChristoph Lameter
5024a10aa579SChristoph Lameter if (v->flags & VM_MAP)
5025f4527c90SFabian Frederick seq_puts(m, " vmap");
5026a10aa579SChristoph Lameter
5027a10aa579SChristoph Lameter if (v->flags & VM_USERMAP)
5028f4527c90SFabian Frederick seq_puts(m, " user");
5029a10aa579SChristoph Lameter
5030fe9041c2SChristoph Hellwig if (v->flags & VM_DMA_COHERENT)
5031fe9041c2SChristoph Hellwig seq_puts(m, " dma-coherent");
5032fe9041c2SChristoph Hellwig
5033244d63eeSDavid Rientjes if (is_vmalloc_addr(v->pages))
5034f4527c90SFabian Frederick seq_puts(m, " vpages");
5035a10aa579SChristoph Lameter
5036a47a126aSEric Dumazet show_numa_info(m, v);
5037a10aa579SChristoph Lameter seq_putc(m, '\n');
50388e1d743fSUladzislau Rezki (Sony) }
50398e1d743fSUladzislau Rezki (Sony) spin_unlock(&vn->busy.lock);
50408e1d743fSUladzislau Rezki (Sony) }
5041dd3b8353SUladzislau Rezki (Sony)
5042dd3b8353SUladzislau Rezki (Sony) /*
504396e2db45SUladzislau Rezki (Sony) * As a final step, dump "unpurged" areas.
5044dd3b8353SUladzislau Rezki (Sony) */
5045dd3b8353SUladzislau Rezki (Sony) show_purge_info(m);
5046a10aa579SChristoph Lameter return 0;
5047a10aa579SChristoph Lameter }
5048a10aa579SChristoph Lameter
proc_vmalloc_init(void)50495f6a6a9cSAlexey Dobriyan static int __init proc_vmalloc_init(void)
50505f6a6a9cSAlexey Dobriyan {
50518e1d743fSUladzislau Rezki (Sony) void *priv_data = NULL;
50528e1d743fSUladzislau Rezki (Sony)
5053fddda2b7SChristoph Hellwig if (IS_ENABLED(CONFIG_NUMA))
50548e1d743fSUladzislau Rezki (Sony) priv_data = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL);
50558e1d743fSUladzislau Rezki (Sony)
50568e1d743fSUladzislau Rezki (Sony) proc_create_single_data("vmallocinfo",
50578e1d743fSUladzislau Rezki (Sony) 0400, NULL, vmalloc_info_show, priv_data);
50588e1d743fSUladzislau Rezki (Sony)
50595f6a6a9cSAlexey Dobriyan return 0;
50605f6a6a9cSAlexey Dobriyan }
50615f6a6a9cSAlexey Dobriyan module_init(proc_vmalloc_init);
5062db3808c1SJoonsoo Kim
5063a10aa579SChristoph Lameter #endif
5064208162f4SChristoph Hellwig
vmap_init_free_space(void)5065d0936029SUladzislau Rezki (Sony) static void __init vmap_init_free_space(void)
50667fa8cee0SUladzislau Rezki (Sony) {
50677fa8cee0SUladzislau Rezki (Sony) unsigned long vmap_start = 1;
50687fa8cee0SUladzislau Rezki (Sony) const unsigned long vmap_end = ULONG_MAX;
5069d0936029SUladzislau Rezki (Sony) struct vmap_area *free;
5070d0936029SUladzislau Rezki (Sony) struct vm_struct *busy;
50717fa8cee0SUladzislau Rezki (Sony)
50727fa8cee0SUladzislau Rezki (Sony) /*
50737fa8cee0SUladzislau Rezki (Sony) * B F B B B F
50747fa8cee0SUladzislau Rezki (Sony) * -|-----|.....|-----|-----|-----|.....|-
50757fa8cee0SUladzislau Rezki (Sony) * | The KVA space |
50767fa8cee0SUladzislau Rezki (Sony) * |<--------------------------------->|
50777fa8cee0SUladzislau Rezki (Sony) */
5078d0936029SUladzislau Rezki (Sony) for (busy = vmlist; busy; busy = busy->next) {
5079d0936029SUladzislau Rezki (Sony) if ((unsigned long) busy->addr - vmap_start > 0) {
50807fa8cee0SUladzislau Rezki (Sony) free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
50817fa8cee0SUladzislau Rezki (Sony) if (!WARN_ON_ONCE(!free)) {
50827fa8cee0SUladzislau Rezki (Sony) free->va_start = vmap_start;
5083d0936029SUladzislau Rezki (Sony) free->va_end = (unsigned long) busy->addr;
50847fa8cee0SUladzislau Rezki (Sony)
50857fa8cee0SUladzislau Rezki (Sony) insert_vmap_area_augment(free, NULL,
50867fa8cee0SUladzislau Rezki (Sony) &free_vmap_area_root,
50877fa8cee0SUladzislau Rezki (Sony) &free_vmap_area_list);
50887fa8cee0SUladzislau Rezki (Sony) }
50897fa8cee0SUladzislau Rezki (Sony) }
50907fa8cee0SUladzislau Rezki (Sony)
5091d0936029SUladzislau Rezki (Sony) vmap_start = (unsigned long) busy->addr + busy->size;
50927fa8cee0SUladzislau Rezki (Sony) }
50937fa8cee0SUladzislau Rezki (Sony)
50947fa8cee0SUladzislau Rezki (Sony) if (vmap_end - vmap_start > 0) {
50957fa8cee0SUladzislau Rezki (Sony) free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
50967fa8cee0SUladzislau Rezki (Sony) if (!WARN_ON_ONCE(!free)) {
50977fa8cee0SUladzislau Rezki (Sony) free->va_start = vmap_start;
50987fa8cee0SUladzislau Rezki (Sony) free->va_end = vmap_end;
50997fa8cee0SUladzislau Rezki (Sony)
51007fa8cee0SUladzislau Rezki (Sony) insert_vmap_area_augment(free, NULL,
51017fa8cee0SUladzislau Rezki (Sony) &free_vmap_area_root,
51027fa8cee0SUladzislau Rezki (Sony) &free_vmap_area_list);
51037fa8cee0SUladzislau Rezki (Sony) }
51047fa8cee0SUladzislau Rezki (Sony) }
51057fa8cee0SUladzislau Rezki (Sony) }
51067fa8cee0SUladzislau Rezki (Sony)
vmap_init_nodes(void)5107d0936029SUladzislau Rezki (Sony) static void vmap_init_nodes(void)
5108d0936029SUladzislau Rezki (Sony) {
5109d0936029SUladzislau Rezki (Sony) struct vmap_node *vn;
51108f33a2ffSUladzislau Rezki (Sony) int i, n;
5111d0936029SUladzislau Rezki (Sony)
51128f33a2ffSUladzislau Rezki (Sony) #if BITS_PER_LONG == 64
511315e02a39SUladzislau Rezki (Sony) /*
511415e02a39SUladzislau Rezki (Sony) * A high threshold of max nodes is fixed and bound to 128,
511515e02a39SUladzislau Rezki (Sony) * thus a scale factor is 1 for systems where number of cores
511615e02a39SUladzislau Rezki (Sony) * are less or equal to specified threshold.
511715e02a39SUladzislau Rezki (Sony) *
511815e02a39SUladzislau Rezki (Sony) * As for NUMA-aware notes. For bigger systems, for example
511915e02a39SUladzislau Rezki (Sony) * NUMA with multi-sockets, where we can end-up with thousands
512015e02a39SUladzislau Rezki (Sony) * of cores in total, a "sub-numa-clustering" should be added.
512115e02a39SUladzislau Rezki (Sony) *
512215e02a39SUladzislau Rezki (Sony) * In this case a NUMA domain is considered as a single entity
512315e02a39SUladzislau Rezki (Sony) * with dedicated sub-nodes in it which describe one group or
512415e02a39SUladzislau Rezki (Sony) * set of cores. Therefore a per-domain purging is supposed to
512515e02a39SUladzislau Rezki (Sony) * be added as well as a per-domain balancing.
512615e02a39SUladzislau Rezki (Sony) */
51278f33a2ffSUladzislau Rezki (Sony) n = clamp_t(unsigned int, num_possible_cpus(), 1, 128);
51288f33a2ffSUladzislau Rezki (Sony)
51298f33a2ffSUladzislau Rezki (Sony) if (n > 1) {
51308f33a2ffSUladzislau Rezki (Sony) vn = kmalloc_array(n, sizeof(*vn), GFP_NOWAIT | __GFP_NOWARN);
51318f33a2ffSUladzislau Rezki (Sony) if (vn) {
51328f33a2ffSUladzislau Rezki (Sony) /* Node partition is 16 pages. */
51338f33a2ffSUladzislau Rezki (Sony) vmap_zone_size = (1 << 4) * PAGE_SIZE;
51348f33a2ffSUladzislau Rezki (Sony) nr_vmap_nodes = n;
51358f33a2ffSUladzislau Rezki (Sony) vmap_nodes = vn;
51368f33a2ffSUladzislau Rezki (Sony) } else {
51378f33a2ffSUladzislau Rezki (Sony) pr_err("Failed to allocate an array. Disable a node layer\n");
51388f33a2ffSUladzislau Rezki (Sony) }
51398f33a2ffSUladzislau Rezki (Sony) }
51408f33a2ffSUladzislau Rezki (Sony) #endif
51418f33a2ffSUladzislau Rezki (Sony)
51428f33a2ffSUladzislau Rezki (Sony) for (n = 0; n < nr_vmap_nodes; n++) {
51438f33a2ffSUladzislau Rezki (Sony) vn = &vmap_nodes[n];
5144d0936029SUladzislau Rezki (Sony) vn->busy.root = RB_ROOT;
5145d0936029SUladzislau Rezki (Sony) INIT_LIST_HEAD(&vn->busy.head);
5146d0936029SUladzislau Rezki (Sony) spin_lock_init(&vn->busy.lock);
5147282631cbSUladzislau Rezki (Sony)
5148282631cbSUladzislau Rezki (Sony) vn->lazy.root = RB_ROOT;
5149282631cbSUladzislau Rezki (Sony) INIT_LIST_HEAD(&vn->lazy.head);
5150282631cbSUladzislau Rezki (Sony) spin_lock_init(&vn->lazy.lock);
515172210662SUladzislau Rezki (Sony)
51528f33a2ffSUladzislau Rezki (Sony) for (i = 0; i < MAX_VA_SIZE_PAGES; i++) {
51538f33a2ffSUladzislau Rezki (Sony) INIT_LIST_HEAD(&vn->pool[i].head);
51548f33a2ffSUladzislau Rezki (Sony) WRITE_ONCE(vn->pool[i].len, 0);
515572210662SUladzislau Rezki (Sony) }
515672210662SUladzislau Rezki (Sony)
515772210662SUladzislau Rezki (Sony) spin_lock_init(&vn->pool_lock);
5158d0936029SUladzislau Rezki (Sony) }
5159d0936029SUladzislau Rezki (Sony) }
5160d0936029SUladzislau Rezki (Sony)
51617679ba6bSUladzislau Rezki (Sony) static unsigned long
vmap_node_shrink_count(struct shrinker * shrink,struct shrink_control * sc)51627679ba6bSUladzislau Rezki (Sony) vmap_node_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
51637679ba6bSUladzislau Rezki (Sony) {
51647679ba6bSUladzislau Rezki (Sony) unsigned long count;
51657679ba6bSUladzislau Rezki (Sony) struct vmap_node *vn;
51667679ba6bSUladzislau Rezki (Sony) int i, j;
51677679ba6bSUladzislau Rezki (Sony)
51687679ba6bSUladzislau Rezki (Sony) for (count = 0, i = 0; i < nr_vmap_nodes; i++) {
51697679ba6bSUladzislau Rezki (Sony) vn = &vmap_nodes[i];
51707679ba6bSUladzislau Rezki (Sony)
51717679ba6bSUladzislau Rezki (Sony) for (j = 0; j < MAX_VA_SIZE_PAGES; j++)
51727679ba6bSUladzislau Rezki (Sony) count += READ_ONCE(vn->pool[j].len);
51737679ba6bSUladzislau Rezki (Sony) }
51747679ba6bSUladzislau Rezki (Sony)
51757679ba6bSUladzislau Rezki (Sony) return count ? count : SHRINK_EMPTY;
51767679ba6bSUladzislau Rezki (Sony) }
51777679ba6bSUladzislau Rezki (Sony)
51787679ba6bSUladzislau Rezki (Sony) static unsigned long
vmap_node_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)51797679ba6bSUladzislau Rezki (Sony) vmap_node_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
51807679ba6bSUladzislau Rezki (Sony) {
51817679ba6bSUladzislau Rezki (Sony) int i;
51827679ba6bSUladzislau Rezki (Sony)
51837679ba6bSUladzislau Rezki (Sony) for (i = 0; i < nr_vmap_nodes; i++)
51847679ba6bSUladzislau Rezki (Sony) decay_va_pool_node(&vmap_nodes[i], true);
51857679ba6bSUladzislau Rezki (Sony)
51867679ba6bSUladzislau Rezki (Sony) return SHRINK_STOP;
51877679ba6bSUladzislau Rezki (Sony) }
51887679ba6bSUladzislau Rezki (Sony)
vmalloc_init(void)5189208162f4SChristoph Hellwig void __init vmalloc_init(void)
5190208162f4SChristoph Hellwig {
51917679ba6bSUladzislau Rezki (Sony) struct shrinker *vmap_node_shrinker;
5192208162f4SChristoph Hellwig struct vmap_area *va;
5193d0936029SUladzislau Rezki (Sony) struct vmap_node *vn;
5194208162f4SChristoph Hellwig struct vm_struct *tmp;
5195208162f4SChristoph Hellwig int i;
5196208162f4SChristoph Hellwig
5197208162f4SChristoph Hellwig /*
5198208162f4SChristoph Hellwig * Create the cache for vmap_area objects.
5199208162f4SChristoph Hellwig */
5200208162f4SChristoph Hellwig vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC);
5201208162f4SChristoph Hellwig
5202208162f4SChristoph Hellwig for_each_possible_cpu(i) {
5203208162f4SChristoph Hellwig struct vmap_block_queue *vbq;
5204208162f4SChristoph Hellwig struct vfree_deferred *p;
5205208162f4SChristoph Hellwig
5206208162f4SChristoph Hellwig vbq = &per_cpu(vmap_block_queue, i);
5207208162f4SChristoph Hellwig spin_lock_init(&vbq->lock);
5208208162f4SChristoph Hellwig INIT_LIST_HEAD(&vbq->free);
5209208162f4SChristoph Hellwig p = &per_cpu(vfree_deferred, i);
5210208162f4SChristoph Hellwig init_llist_head(&p->list);
5211208162f4SChristoph Hellwig INIT_WORK(&p->wq, delayed_vfree_work);
5212062eacf5SUladzislau Rezki (Sony) xa_init(&vbq->vmap_blocks);
5213208162f4SChristoph Hellwig }
5214208162f4SChristoph Hellwig
5215d0936029SUladzislau Rezki (Sony) /*
5216d0936029SUladzislau Rezki (Sony) * Setup nodes before importing vmlist.
5217d0936029SUladzislau Rezki (Sony) */
5218d0936029SUladzislau Rezki (Sony) vmap_init_nodes();
5219d0936029SUladzislau Rezki (Sony)
5220208162f4SChristoph Hellwig /* Import existing vmlist entries. */
5221208162f4SChristoph Hellwig for (tmp = vmlist; tmp; tmp = tmp->next) {
5222208162f4SChristoph Hellwig va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
5223208162f4SChristoph Hellwig if (WARN_ON_ONCE(!va))
5224208162f4SChristoph Hellwig continue;
5225208162f4SChristoph Hellwig
5226208162f4SChristoph Hellwig va->va_start = (unsigned long)tmp->addr;
5227208162f4SChristoph Hellwig va->va_end = va->va_start + tmp->size;
5228208162f4SChristoph Hellwig va->vm = tmp;
5229d0936029SUladzislau Rezki (Sony)
5230d0936029SUladzislau Rezki (Sony) vn = addr_to_node(va->va_start);
5231d0936029SUladzislau Rezki (Sony) insert_vmap_area(va, &vn->busy.root, &vn->busy.head);
5232208162f4SChristoph Hellwig }
5233208162f4SChristoph Hellwig
5234208162f4SChristoph Hellwig /*
5235208162f4SChristoph Hellwig * Now we can initialize a free vmap space.
5236208162f4SChristoph Hellwig */
5237208162f4SChristoph Hellwig vmap_init_free_space();
5238208162f4SChristoph Hellwig vmap_initialized = true;
52397679ba6bSUladzislau Rezki (Sony)
52407679ba6bSUladzislau Rezki (Sony) vmap_node_shrinker = shrinker_alloc(0, "vmap-node");
52417679ba6bSUladzislau Rezki (Sony) if (!vmap_node_shrinker) {
52427679ba6bSUladzislau Rezki (Sony) pr_err("Failed to allocate vmap-node shrinker!\n");
52437679ba6bSUladzislau Rezki (Sony) return;
52447679ba6bSUladzislau Rezki (Sony) }
52457679ba6bSUladzislau Rezki (Sony)
52467679ba6bSUladzislau Rezki (Sony) vmap_node_shrinker->count_objects = vmap_node_shrink_count;
52477679ba6bSUladzislau Rezki (Sony) vmap_node_shrinker->scan_objects = vmap_node_shrink_scan;
52487679ba6bSUladzislau Rezki (Sony) shrinker_register(vmap_node_shrinker);
5249208162f4SChristoph Hellwig }
5250