1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_VMALLOC_H 3 #define _LINUX_VMALLOC_H 4 5 #include <linux/spinlock.h> 6 #include <linux/init.h> 7 #include <linux/list.h> 8 #include <linux/llist.h> 9 #include <asm/page.h> /* pgprot_t */ 10 #include <linux/rbtree.h> 11 #include <linux/overflow.h> 12 13 #include <asm/vmalloc.h> 14 15 struct vm_area_struct; /* vma defining user mapping in mm_types.h */ 16 struct notifier_block; /* in notifier.h */ 17 18 /* bits in flags of vmalloc's vm_struct below */ 19 #define VM_IOREMAP 0x00000001 /* ioremap() and friends */ 20 #define VM_ALLOC 0x00000002 /* vmalloc() */ 21 #define VM_MAP 0x00000004 /* vmap()ed pages */ 22 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */ 23 #define VM_DMA_COHERENT 0x00000010 /* dma_alloc_coherent */ 24 #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ 25 #define VM_NO_GUARD 0x00000040 /* ***DANGEROUS*** don't add guard page */ 26 #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */ 27 #define VM_FLUSH_RESET_PERMS 0x00000100 /* reset direct map and flush TLB on unmap, can't be freed in atomic context */ 28 #define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */ 29 #define VM_ALLOW_HUGE_VMAP 0x00000400 /* Allow for huge pages on archs with HAVE_ARCH_HUGE_VMALLOC */ 30 31 #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \ 32 !defined(CONFIG_KASAN_VMALLOC) 33 #define VM_DEFER_KMEMLEAK 0x00000800 /* defer kmemleak object creation */ 34 #else 35 #define VM_DEFER_KMEMLEAK 0 36 #endif 37 38 /* bits [20..32] reserved for arch specific ioremap internals */ 39 40 /* 41 * Maximum alignment for ioremap() regions. 42 * Can be overridden by arch-specific value. 43 */ 44 #ifndef IOREMAP_MAX_ORDER 45 #define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */ 46 #endif 47 48 struct vm_struct { 49 struct vm_struct *next; 50 void *addr; 51 unsigned long size; 52 unsigned long flags; 53 struct page **pages; 54 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC 55 unsigned int page_order; 56 #endif 57 unsigned int nr_pages; 58 phys_addr_t phys_addr; 59 const void *caller; 60 }; 61 62 struct vmap_area { 63 unsigned long va_start; 64 unsigned long va_end; 65 66 struct rb_node rb_node; /* address sorted rbtree */ 67 struct list_head list; /* address sorted list */ 68 69 /* 70 * The following two variables can be packed, because 71 * a vmap_area object can be either: 72 * 1) in "free" tree (root is free_vmap_area_root) 73 * 2) or "busy" tree (root is vmap_area_root) 74 */ 75 union { 76 unsigned long subtree_max_size; /* in "free" tree */ 77 struct vm_struct *vm; /* in "busy" tree */ 78 }; 79 unsigned long flags; /* mark type of vm_map_ram area */ 80 }; 81 82 /* archs that select HAVE_ARCH_HUGE_VMAP should override one or more of these */ 83 #ifndef arch_vmap_p4d_supported 84 static inline bool arch_vmap_p4d_supported(pgprot_t prot) 85 { 86 return false; 87 } 88 #endif 89 90 #ifndef arch_vmap_pud_supported 91 static inline bool arch_vmap_pud_supported(pgprot_t prot) 92 { 93 return false; 94 } 95 #endif 96 97 #ifndef arch_vmap_pmd_supported 98 static inline bool arch_vmap_pmd_supported(pgprot_t prot) 99 { 100 return false; 101 } 102 #endif 103 104 #ifndef arch_vmap_pte_range_map_size 105 static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end, 106 u64 pfn, unsigned int max_page_shift) 107 { 108 return PAGE_SIZE; 109 } 110 #endif 111 112 #ifndef arch_vmap_pte_supported_shift 113 static inline int arch_vmap_pte_supported_shift(unsigned long size) 114 { 115 return PAGE_SHIFT; 116 } 117 #endif 118 119 #ifndef arch_vmap_pgprot_tagged 120 static inline pgprot_t arch_vmap_pgprot_tagged(pgprot_t prot) 121 { 122 return prot; 123 } 124 #endif 125 126 /* 127 * Highlevel APIs for driver use 128 */ 129 extern void vm_unmap_ram(const void *mem, unsigned int count); 130 extern void *vm_map_ram(struct page **pages, unsigned int count, int node); 131 extern void vm_unmap_aliases(void); 132 133 #ifdef CONFIG_MMU 134 extern unsigned long vmalloc_nr_pages(void); 135 #else 136 static inline unsigned long vmalloc_nr_pages(void) { return 0; } 137 #endif 138 139 extern void *vmalloc(unsigned long size) __alloc_size(1); 140 extern void *vzalloc(unsigned long size) __alloc_size(1); 141 extern void *vmalloc_user(unsigned long size) __alloc_size(1); 142 extern void *vmalloc_node(unsigned long size, int node) __alloc_size(1); 143 extern void *vzalloc_node(unsigned long size, int node) __alloc_size(1); 144 extern void *vmalloc_32(unsigned long size) __alloc_size(1); 145 extern void *vmalloc_32_user(unsigned long size) __alloc_size(1); 146 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask) __alloc_size(1); 147 extern void *__vmalloc_node_range(unsigned long size, unsigned long align, 148 unsigned long start, unsigned long end, gfp_t gfp_mask, 149 pgprot_t prot, unsigned long vm_flags, int node, 150 const void *caller) __alloc_size(1); 151 void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, 152 int node, const void *caller) __alloc_size(1); 153 void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __alloc_size(1); 154 155 extern void *__vmalloc_array(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2); 156 extern void *vmalloc_array(size_t n, size_t size) __alloc_size(1, 2); 157 extern void *__vcalloc(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2); 158 extern void *vcalloc(size_t n, size_t size) __alloc_size(1, 2); 159 160 extern void vfree(const void *addr); 161 extern void vfree_atomic(const void *addr); 162 163 extern void *vmap(struct page **pages, unsigned int count, 164 unsigned long flags, pgprot_t prot); 165 void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot); 166 extern void vunmap(const void *addr); 167 168 extern int remap_vmalloc_range_partial(struct vm_area_struct *vma, 169 unsigned long uaddr, void *kaddr, 170 unsigned long pgoff, unsigned long size); 171 172 extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 173 unsigned long pgoff); 174 175 /* 176 * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values 177 * and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings() 178 * needs to be called. 179 */ 180 #ifndef ARCH_PAGE_TABLE_SYNC_MASK 181 #define ARCH_PAGE_TABLE_SYNC_MASK 0 182 #endif 183 184 /* 185 * There is no default implementation for arch_sync_kernel_mappings(). It is 186 * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK 187 * is 0. 188 */ 189 void arch_sync_kernel_mappings(unsigned long start, unsigned long end); 190 191 /* 192 * Lowlevel-APIs (not for driver use!) 193 */ 194 195 static inline size_t get_vm_area_size(const struct vm_struct *area) 196 { 197 if (!(area->flags & VM_NO_GUARD)) 198 /* return actual size without guard page */ 199 return area->size - PAGE_SIZE; 200 else 201 return area->size; 202 203 } 204 205 extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags); 206 extern struct vm_struct *get_vm_area_caller(unsigned long size, 207 unsigned long flags, const void *caller); 208 extern struct vm_struct *__get_vm_area_caller(unsigned long size, 209 unsigned long flags, 210 unsigned long start, unsigned long end, 211 const void *caller); 212 void free_vm_area(struct vm_struct *area); 213 extern struct vm_struct *remove_vm_area(const void *addr); 214 extern struct vm_struct *find_vm_area(const void *addr); 215 struct vmap_area *find_vmap_area(unsigned long addr); 216 217 static inline bool is_vm_area_hugepages(const void *addr) 218 { 219 /* 220 * This may not 100% tell if the area is mapped with > PAGE_SIZE 221 * page table entries, if for some reason the architecture indicates 222 * larger sizes are available but decides not to use them, nothing 223 * prevents that. This only indicates the size of the physical page 224 * allocated in the vmalloc layer. 225 */ 226 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC 227 return find_vm_area(addr)->page_order > 0; 228 #else 229 return false; 230 #endif 231 } 232 233 #ifdef CONFIG_MMU 234 void vunmap_range(unsigned long addr, unsigned long end); 235 static inline void set_vm_flush_reset_perms(void *addr) 236 { 237 struct vm_struct *vm = find_vm_area(addr); 238 239 if (vm) 240 vm->flags |= VM_FLUSH_RESET_PERMS; 241 } 242 243 #else 244 static inline void set_vm_flush_reset_perms(void *addr) 245 { 246 } 247 #endif 248 249 /* for /proc/kcore */ 250 extern long vread(char *buf, char *addr, unsigned long count); 251 252 /* 253 * Internals. Don't use.. 254 */ 255 extern struct list_head vmap_area_list; 256 extern __init void vm_area_add_early(struct vm_struct *vm); 257 extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); 258 259 #ifdef CONFIG_SMP 260 # ifdef CONFIG_MMU 261 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, 262 const size_t *sizes, int nr_vms, 263 size_t align); 264 265 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms); 266 # else 267 static inline struct vm_struct ** 268 pcpu_get_vm_areas(const unsigned long *offsets, 269 const size_t *sizes, int nr_vms, 270 size_t align) 271 { 272 return NULL; 273 } 274 275 static inline void 276 pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) 277 { 278 } 279 # endif 280 #endif 281 282 #ifdef CONFIG_MMU 283 #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START) 284 #else 285 #define VMALLOC_TOTAL 0UL 286 #endif 287 288 int register_vmap_purge_notifier(struct notifier_block *nb); 289 int unregister_vmap_purge_notifier(struct notifier_block *nb); 290 291 #if defined(CONFIG_MMU) && defined(CONFIG_PRINTK) 292 bool vmalloc_dump_obj(void *object); 293 #else 294 static inline bool vmalloc_dump_obj(void *object) { return false; } 295 #endif 296 297 #endif /* _LINUX_VMALLOC_H */ 298