xref: /linux-6.15/include/linux/vmalloc.h (revision e553d2a5)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_VMALLOC_H
3 #define _LINUX_VMALLOC_H
4 
5 #include <linux/spinlock.h>
6 #include <linux/init.h>
7 #include <linux/list.h>
8 #include <linux/llist.h>
9 #include <asm/page.h>		/* pgprot_t */
10 #include <linux/rbtree.h>
11 #include <linux/overflow.h>
12 
13 struct vm_area_struct;		/* vma defining user mapping in mm_types.h */
14 struct notifier_block;		/* in notifier.h */
15 
16 /* bits in flags of vmalloc's vm_struct below */
17 #define VM_IOREMAP		0x00000001	/* ioremap() and friends */
18 #define VM_ALLOC		0x00000002	/* vmalloc() */
19 #define VM_MAP			0x00000004	/* vmap()ed pages */
20 #define VM_USERMAP		0x00000008	/* suitable for remap_vmalloc_range */
21 #define VM_DMA_COHERENT		0x00000010	/* dma_alloc_coherent */
22 #define VM_UNINITIALIZED	0x00000020	/* vm_struct is not fully initialized */
23 #define VM_NO_GUARD		0x00000040      /* don't add guard page */
24 #define VM_KASAN		0x00000080      /* has allocated kasan shadow memory */
25 /*
26  * Memory with VM_FLUSH_RESET_PERMS cannot be freed in an interrupt or with
27  * vfree_atomic().
28  */
29 #define VM_FLUSH_RESET_PERMS	0x00000100      /* Reset direct map and flush TLB on unmap */
30 
31 /* bits [20..32] reserved for arch specific ioremap internals */
32 
33 /*
34  * Maximum alignment for ioremap() regions.
35  * Can be overriden by arch-specific value.
36  */
37 #ifndef IOREMAP_MAX_ORDER
38 #define IOREMAP_MAX_ORDER	(7 + PAGE_SHIFT)	/* 128 pages */
39 #endif
40 
41 struct vm_struct {
42 	struct vm_struct	*next;
43 	void			*addr;
44 	unsigned long		size;
45 	unsigned long		flags;
46 	struct page		**pages;
47 	unsigned int		nr_pages;
48 	phys_addr_t		phys_addr;
49 	const void		*caller;
50 };
51 
52 struct vmap_area {
53 	unsigned long va_start;
54 	unsigned long va_end;
55 
56 	/*
57 	 * Largest available free size in subtree.
58 	 */
59 	unsigned long subtree_max_size;
60 	unsigned long flags;
61 	struct rb_node rb_node;         /* address sorted rbtree */
62 	struct list_head list;          /* address sorted list */
63 	struct llist_node purge_list;    /* "lazy purge" list */
64 	struct vm_struct *vm;
65 };
66 
67 /*
68  *	Highlevel APIs for driver use
69  */
70 extern void vm_unmap_ram(const void *mem, unsigned int count);
71 extern void *vm_map_ram(struct page **pages, unsigned int count,
72 				int node, pgprot_t prot);
73 extern void vm_unmap_aliases(void);
74 
75 #ifdef CONFIG_MMU
76 extern void __init vmalloc_init(void);
77 extern unsigned long vmalloc_nr_pages(void);
78 #else
79 static inline void vmalloc_init(void)
80 {
81 }
82 static inline unsigned long vmalloc_nr_pages(void) { return 0; }
83 #endif
84 
85 extern void *vmalloc(unsigned long size);
86 extern void *vzalloc(unsigned long size);
87 extern void *vmalloc_user(unsigned long size);
88 extern void *vmalloc_node(unsigned long size, int node);
89 extern void *vzalloc_node(unsigned long size, int node);
90 extern void *vmalloc_exec(unsigned long size);
91 extern void *vmalloc_32(unsigned long size);
92 extern void *vmalloc_32_user(unsigned long size);
93 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
94 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
95 			unsigned long start, unsigned long end, gfp_t gfp_mask,
96 			pgprot_t prot, unsigned long vm_flags, int node,
97 			const void *caller);
98 #ifndef CONFIG_MMU
99 extern void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags);
100 static inline void *__vmalloc_node_flags_caller(unsigned long size, int node,
101 						gfp_t flags, void *caller)
102 {
103 	return __vmalloc_node_flags(size, node, flags);
104 }
105 #else
106 extern void *__vmalloc_node_flags_caller(unsigned long size,
107 					 int node, gfp_t flags, void *caller);
108 #endif
109 
110 extern void vfree(const void *addr);
111 extern void vfree_atomic(const void *addr);
112 
113 extern void *vmap(struct page **pages, unsigned int count,
114 			unsigned long flags, pgprot_t prot);
115 extern void vunmap(const void *addr);
116 
117 extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
118 				       unsigned long uaddr, void *kaddr,
119 				       unsigned long size);
120 
121 extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
122 							unsigned long pgoff);
123 void vmalloc_sync_all(void);
124 
125 /*
126  *	Lowlevel-APIs (not for driver use!)
127  */
128 
129 static inline size_t get_vm_area_size(const struct vm_struct *area)
130 {
131 	if (!(area->flags & VM_NO_GUARD))
132 		/* return actual size without guard page */
133 		return area->size - PAGE_SIZE;
134 	else
135 		return area->size;
136 
137 }
138 
139 extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
140 extern struct vm_struct *get_vm_area_caller(unsigned long size,
141 					unsigned long flags, const void *caller);
142 extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
143 					unsigned long start, unsigned long end);
144 extern struct vm_struct *__get_vm_area_caller(unsigned long size,
145 					unsigned long flags,
146 					unsigned long start, unsigned long end,
147 					const void *caller);
148 extern struct vm_struct *remove_vm_area(const void *addr);
149 extern struct vm_struct *find_vm_area(const void *addr);
150 
151 extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
152 			struct page **pages);
153 #ifdef CONFIG_MMU
154 extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
155 				    pgprot_t prot, struct page **pages);
156 extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
157 extern void unmap_kernel_range(unsigned long addr, unsigned long size);
158 static inline void set_vm_flush_reset_perms(void *addr)
159 {
160 	struct vm_struct *vm = find_vm_area(addr);
161 
162 	if (vm)
163 		vm->flags |= VM_FLUSH_RESET_PERMS;
164 }
165 #else
166 static inline int
167 map_kernel_range_noflush(unsigned long start, unsigned long size,
168 			pgprot_t prot, struct page **pages)
169 {
170 	return size >> PAGE_SHIFT;
171 }
172 static inline void
173 unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
174 {
175 }
176 static inline void
177 unmap_kernel_range(unsigned long addr, unsigned long size)
178 {
179 }
180 static inline void set_vm_flush_reset_perms(void *addr)
181 {
182 }
183 #endif
184 
185 /* Allocate/destroy a 'vmalloc' VM area. */
186 extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
187 extern void free_vm_area(struct vm_struct *area);
188 
189 /* for /dev/kmem */
190 extern long vread(char *buf, char *addr, unsigned long count);
191 extern long vwrite(char *buf, char *addr, unsigned long count);
192 
193 /*
194  *	Internals.  Dont't use..
195  */
196 extern struct list_head vmap_area_list;
197 extern __init void vm_area_add_early(struct vm_struct *vm);
198 extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
199 
200 #ifdef CONFIG_SMP
201 # ifdef CONFIG_MMU
202 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
203 				     const size_t *sizes, int nr_vms,
204 				     size_t align);
205 
206 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
207 # else
208 static inline struct vm_struct **
209 pcpu_get_vm_areas(const unsigned long *offsets,
210 		const size_t *sizes, int nr_vms,
211 		size_t align)
212 {
213 	return NULL;
214 }
215 
216 static inline void
217 pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
218 {
219 }
220 # endif
221 #endif
222 
223 #ifdef CONFIG_MMU
224 #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
225 #else
226 #define VMALLOC_TOTAL 0UL
227 #endif
228 
229 int register_vmap_purge_notifier(struct notifier_block *nb);
230 int unregister_vmap_purge_notifier(struct notifier_block *nb);
231 
232 #endif /* _LINUX_VMALLOC_H */
233