1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_VMALLOC_H 3 #define _LINUX_VMALLOC_H 4 5 #include <linux/spinlock.h> 6 #include <linux/init.h> 7 #include <linux/list.h> 8 #include <linux/llist.h> 9 #include <asm/page.h> /* pgprot_t */ 10 #include <linux/rbtree.h> 11 #include <linux/overflow.h> 12 13 #include <asm/vmalloc.h> 14 15 struct vm_area_struct; /* vma defining user mapping in mm_types.h */ 16 struct notifier_block; /* in notifier.h */ 17 18 /* bits in flags of vmalloc's vm_struct below */ 19 #define VM_IOREMAP 0x00000001 /* ioremap() and friends */ 20 #define VM_ALLOC 0x00000002 /* vmalloc() */ 21 #define VM_MAP 0x00000004 /* vmap()ed pages */ 22 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */ 23 #define VM_DMA_COHERENT 0x00000010 /* dma_alloc_coherent */ 24 #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ 25 #define VM_NO_GUARD 0x00000040 /* ***DANGEROUS*** don't add guard page */ 26 #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */ 27 #define VM_FLUSH_RESET_PERMS 0x00000100 /* reset direct map and flush TLB on unmap, can't be freed in atomic context */ 28 #define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */ 29 #define VM_NO_HUGE_VMAP 0x00000400 /* force PAGE_SIZE pte mapping */ 30 31 #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \ 32 !defined(CONFIG_KASAN_VMALLOC) 33 #define VM_DEFER_KMEMLEAK 0x00000800 /* defer kmemleak object creation */ 34 #else 35 #define VM_DEFER_KMEMLEAK 0 36 #endif 37 38 /* bits [20..32] reserved for arch specific ioremap internals */ 39 40 /* 41 * Maximum alignment for ioremap() regions. 42 * Can be overridden by arch-specific value. 43 */ 44 #ifndef IOREMAP_MAX_ORDER 45 #define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */ 46 #endif 47 48 struct vm_struct { 49 struct vm_struct *next; 50 void *addr; 51 unsigned long size; 52 unsigned long flags; 53 struct page **pages; 54 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC 55 unsigned int page_order; 56 #endif 57 unsigned int nr_pages; 58 phys_addr_t phys_addr; 59 const void *caller; 60 }; 61 62 struct vmap_area { 63 unsigned long va_start; 64 unsigned long va_end; 65 66 struct rb_node rb_node; /* address sorted rbtree */ 67 struct list_head list; /* address sorted list */ 68 69 /* 70 * The following two variables can be packed, because 71 * a vmap_area object can be either: 72 * 1) in "free" tree (root is free_vmap_area_root) 73 * 2) or "busy" tree (root is vmap_area_root) 74 */ 75 union { 76 unsigned long subtree_max_size; /* in "free" tree */ 77 struct vm_struct *vm; /* in "busy" tree */ 78 }; 79 }; 80 81 /* archs that select HAVE_ARCH_HUGE_VMAP should override one or more of these */ 82 #ifndef arch_vmap_p4d_supported 83 static inline bool arch_vmap_p4d_supported(pgprot_t prot) 84 { 85 return false; 86 } 87 #endif 88 89 #ifndef arch_vmap_pud_supported 90 static inline bool arch_vmap_pud_supported(pgprot_t prot) 91 { 92 return false; 93 } 94 #endif 95 96 #ifndef arch_vmap_pmd_supported 97 static inline bool arch_vmap_pmd_supported(pgprot_t prot) 98 { 99 return false; 100 } 101 #endif 102 103 #ifndef arch_vmap_pte_range_map_size 104 static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end, 105 u64 pfn, unsigned int max_page_shift) 106 { 107 return PAGE_SIZE; 108 } 109 #endif 110 111 #ifndef arch_vmap_pte_supported_shift 112 static inline int arch_vmap_pte_supported_shift(unsigned long size) 113 { 114 return PAGE_SHIFT; 115 } 116 #endif 117 118 /* 119 * Highlevel APIs for driver use 120 */ 121 extern void vm_unmap_ram(const void *mem, unsigned int count); 122 extern void *vm_map_ram(struct page **pages, unsigned int count, int node); 123 extern void vm_unmap_aliases(void); 124 125 #ifdef CONFIG_MMU 126 extern void __init vmalloc_init(void); 127 extern unsigned long vmalloc_nr_pages(void); 128 #else 129 static inline void vmalloc_init(void) 130 { 131 } 132 static inline unsigned long vmalloc_nr_pages(void) { return 0; } 133 #endif 134 135 extern void *vmalloc(unsigned long size) __alloc_size(1); 136 extern void *vzalloc(unsigned long size) __alloc_size(1); 137 extern void *vmalloc_user(unsigned long size) __alloc_size(1); 138 extern void *vmalloc_node(unsigned long size, int node) __alloc_size(1); 139 extern void *vzalloc_node(unsigned long size, int node) __alloc_size(1); 140 extern void *vmalloc_32(unsigned long size) __alloc_size(1); 141 extern void *vmalloc_32_user(unsigned long size) __alloc_size(1); 142 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask) __alloc_size(1); 143 extern void *__vmalloc_node_range(unsigned long size, unsigned long align, 144 unsigned long start, unsigned long end, gfp_t gfp_mask, 145 pgprot_t prot, unsigned long vm_flags, int node, 146 const void *caller) __alloc_size(1); 147 void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, 148 int node, const void *caller) __alloc_size(1); 149 void *vmalloc_no_huge(unsigned long size) __alloc_size(1); 150 151 extern void *__vmalloc_array(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2); 152 extern void *vmalloc_array(size_t n, size_t size) __alloc_size(1, 2); 153 extern void *__vcalloc(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2); 154 extern void *vcalloc(size_t n, size_t size) __alloc_size(1, 2); 155 156 extern void vfree(const void *addr); 157 extern void vfree_atomic(const void *addr); 158 159 extern void *vmap(struct page **pages, unsigned int count, 160 unsigned long flags, pgprot_t prot); 161 void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot); 162 extern void vunmap(const void *addr); 163 164 extern int remap_vmalloc_range_partial(struct vm_area_struct *vma, 165 unsigned long uaddr, void *kaddr, 166 unsigned long pgoff, unsigned long size); 167 168 extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 169 unsigned long pgoff); 170 171 /* 172 * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values 173 * and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings() 174 * needs to be called. 175 */ 176 #ifndef ARCH_PAGE_TABLE_SYNC_MASK 177 #define ARCH_PAGE_TABLE_SYNC_MASK 0 178 #endif 179 180 /* 181 * There is no default implementation for arch_sync_kernel_mappings(). It is 182 * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK 183 * is 0. 184 */ 185 void arch_sync_kernel_mappings(unsigned long start, unsigned long end); 186 187 /* 188 * Lowlevel-APIs (not for driver use!) 189 */ 190 191 static inline size_t get_vm_area_size(const struct vm_struct *area) 192 { 193 if (!(area->flags & VM_NO_GUARD)) 194 /* return actual size without guard page */ 195 return area->size - PAGE_SIZE; 196 else 197 return area->size; 198 199 } 200 201 extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags); 202 extern struct vm_struct *get_vm_area_caller(unsigned long size, 203 unsigned long flags, const void *caller); 204 extern struct vm_struct *__get_vm_area_caller(unsigned long size, 205 unsigned long flags, 206 unsigned long start, unsigned long end, 207 const void *caller); 208 void free_vm_area(struct vm_struct *area); 209 extern struct vm_struct *remove_vm_area(const void *addr); 210 extern struct vm_struct *find_vm_area(const void *addr); 211 212 static inline bool is_vm_area_hugepages(const void *addr) 213 { 214 /* 215 * This may not 100% tell if the area is mapped with > PAGE_SIZE 216 * page table entries, if for some reason the architecture indicates 217 * larger sizes are available but decides not to use them, nothing 218 * prevents that. This only indicates the size of the physical page 219 * allocated in the vmalloc layer. 220 */ 221 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC 222 return find_vm_area(addr)->page_order > 0; 223 #else 224 return false; 225 #endif 226 } 227 228 #ifdef CONFIG_MMU 229 void vunmap_range(unsigned long addr, unsigned long end); 230 static inline void set_vm_flush_reset_perms(void *addr) 231 { 232 struct vm_struct *vm = find_vm_area(addr); 233 234 if (vm) 235 vm->flags |= VM_FLUSH_RESET_PERMS; 236 } 237 238 #else 239 static inline void set_vm_flush_reset_perms(void *addr) 240 { 241 } 242 #endif 243 244 /* for /proc/kcore */ 245 extern long vread(char *buf, char *addr, unsigned long count); 246 247 /* 248 * Internals. Don't use.. 249 */ 250 extern struct list_head vmap_area_list; 251 extern __init void vm_area_add_early(struct vm_struct *vm); 252 extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); 253 254 #ifdef CONFIG_SMP 255 # ifdef CONFIG_MMU 256 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, 257 const size_t *sizes, int nr_vms, 258 size_t align); 259 260 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms); 261 # else 262 static inline struct vm_struct ** 263 pcpu_get_vm_areas(const unsigned long *offsets, 264 const size_t *sizes, int nr_vms, 265 size_t align) 266 { 267 return NULL; 268 } 269 270 static inline void 271 pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) 272 { 273 } 274 # endif 275 #endif 276 277 #ifdef CONFIG_MMU 278 #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START) 279 #else 280 #define VMALLOC_TOTAL 0UL 281 #endif 282 283 int register_vmap_purge_notifier(struct notifier_block *nb); 284 int unregister_vmap_purge_notifier(struct notifier_block *nb); 285 286 #if defined(CONFIG_MMU) && defined(CONFIG_PRINTK) 287 bool vmalloc_dump_obj(void *object); 288 #else 289 static inline bool vmalloc_dump_obj(void *object) { return false; } 290 #endif 291 292 #endif /* _LINUX_VMALLOC_H */ 293