xref: /linux-6.15/include/linux/vmalloc.h (revision 4f4ddad3)
1 #ifndef _LINUX_VMALLOC_H
2 #define _LINUX_VMALLOC_H
3 
4 #include <linux/spinlock.h>
5 #include <linux/init.h>
6 #include <linux/list.h>
7 #include <linux/llist.h>
8 #include <asm/page.h>		/* pgprot_t */
9 #include <asm/pgtable.h>	/* PAGE_KERNEL */
10 #include <linux/rbtree.h>
11 
12 struct vm_area_struct;		/* vma defining user mapping in mm_types.h */
13 struct notifier_block;		/* in notifier.h */
14 
15 /* bits in flags of vmalloc's vm_struct below */
16 #define VM_IOREMAP		0x00000001	/* ioremap() and friends */
17 #define VM_ALLOC		0x00000002	/* vmalloc() */
18 #define VM_MAP			0x00000004	/* vmap()ed pages */
19 #define VM_USERMAP		0x00000008	/* suitable for remap_vmalloc_range */
20 #define VM_UNINITIALIZED	0x00000020	/* vm_struct is not fully initialized */
21 #define VM_NO_GUARD		0x00000040      /* don't add guard page */
22 #define VM_KASAN		0x00000080      /* has allocated kasan shadow memory */
23 /* bits [20..32] reserved for arch specific ioremap internals */
24 
25 /*
26  * Maximum alignment for ioremap() regions.
27  * Can be overriden by arch-specific value.
28  */
29 #ifndef IOREMAP_MAX_ORDER
30 #define IOREMAP_MAX_ORDER	(7 + PAGE_SHIFT)	/* 128 pages */
31 #endif
32 
33 struct vm_struct {
34 	struct vm_struct	*next;
35 	void			*addr;
36 	unsigned long		size;
37 	unsigned long		flags;
38 	struct page		**pages;
39 	unsigned int		nr_pages;
40 	phys_addr_t		phys_addr;
41 	const void		*caller;
42 };
43 
44 struct vmap_area {
45 	unsigned long va_start;
46 	unsigned long va_end;
47 	unsigned long flags;
48 	struct rb_node rb_node;         /* address sorted rbtree */
49 	struct list_head list;          /* address sorted list */
50 	struct llist_node purge_list;    /* "lazy purge" list */
51 	struct vm_struct *vm;
52 	struct rcu_head rcu_head;
53 };
54 
55 /*
56  *	Highlevel APIs for driver use
57  */
58 extern void vm_unmap_ram(const void *mem, unsigned int count);
59 extern void *vm_map_ram(struct page **pages, unsigned int count,
60 				int node, pgprot_t prot);
61 extern void vm_unmap_aliases(void);
62 
63 #ifdef CONFIG_MMU
64 extern void __init vmalloc_init(void);
65 #else
66 static inline void vmalloc_init(void)
67 {
68 }
69 #endif
70 
71 extern void *vmalloc(unsigned long size);
72 extern void *vzalloc(unsigned long size);
73 extern void *vmalloc_user(unsigned long size);
74 extern void *vmalloc_node(unsigned long size, int node);
75 extern void *vzalloc_node(unsigned long size, int node);
76 extern void *vmalloc_exec(unsigned long size);
77 extern void *vmalloc_32(unsigned long size);
78 extern void *vmalloc_32_user(unsigned long size);
79 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
80 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
81 			unsigned long start, unsigned long end, gfp_t gfp_mask,
82 			pgprot_t prot, unsigned long vm_flags, int node,
83 			const void *caller);
84 #ifndef CONFIG_MMU
85 extern void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags);
86 #else
87 extern void *__vmalloc_node(unsigned long size, unsigned long align,
88 			    gfp_t gfp_mask, pgprot_t prot,
89 			    int node, const void *caller);
90 
91 /*
92  * We really want to have this inlined due to caller tracking. This
93  * function is used by the highlevel vmalloc apis and so we want to track
94  * their callers and inlining will achieve that.
95  */
96 static inline void *__vmalloc_node_flags(unsigned long size,
97 					int node, gfp_t flags)
98 {
99 	return __vmalloc_node(size, 1, flags, PAGE_KERNEL,
100 					node, __builtin_return_address(0));
101 }
102 #endif
103 
104 extern void vfree(const void *addr);
105 extern void vfree_atomic(const void *addr);
106 
107 extern void *vmap(struct page **pages, unsigned int count,
108 			unsigned long flags, pgprot_t prot);
109 extern void vunmap(const void *addr);
110 
111 extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
112 				       unsigned long uaddr, void *kaddr,
113 				       unsigned long size);
114 
115 extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
116 							unsigned long pgoff);
117 void vmalloc_sync_all(void);
118 
119 /*
120  *	Lowlevel-APIs (not for driver use!)
121  */
122 
123 static inline size_t get_vm_area_size(const struct vm_struct *area)
124 {
125 	if (!(area->flags & VM_NO_GUARD))
126 		/* return actual size without guard page */
127 		return area->size - PAGE_SIZE;
128 	else
129 		return area->size;
130 
131 }
132 
133 extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
134 extern struct vm_struct *get_vm_area_caller(unsigned long size,
135 					unsigned long flags, const void *caller);
136 extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
137 					unsigned long start, unsigned long end);
138 extern struct vm_struct *__get_vm_area_caller(unsigned long size,
139 					unsigned long flags,
140 					unsigned long start, unsigned long end,
141 					const void *caller);
142 extern struct vm_struct *remove_vm_area(const void *addr);
143 extern struct vm_struct *find_vm_area(const void *addr);
144 
145 extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
146 			struct page **pages);
147 #ifdef CONFIG_MMU
148 extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
149 				    pgprot_t prot, struct page **pages);
150 extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
151 extern void unmap_kernel_range(unsigned long addr, unsigned long size);
152 #else
153 static inline int
154 map_kernel_range_noflush(unsigned long start, unsigned long size,
155 			pgprot_t prot, struct page **pages)
156 {
157 	return size >> PAGE_SHIFT;
158 }
159 static inline void
160 unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
161 {
162 }
163 static inline void
164 unmap_kernel_range(unsigned long addr, unsigned long size)
165 {
166 }
167 #endif
168 
169 /* Allocate/destroy a 'vmalloc' VM area. */
170 extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
171 extern void free_vm_area(struct vm_struct *area);
172 
173 /* for /dev/kmem */
174 extern long vread(char *buf, char *addr, unsigned long count);
175 extern long vwrite(char *buf, char *addr, unsigned long count);
176 
177 /*
178  *	Internals.  Dont't use..
179  */
180 extern struct list_head vmap_area_list;
181 extern __init void vm_area_add_early(struct vm_struct *vm);
182 extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
183 
184 #ifdef CONFIG_SMP
185 # ifdef CONFIG_MMU
186 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
187 				     const size_t *sizes, int nr_vms,
188 				     size_t align);
189 
190 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
191 # else
192 static inline struct vm_struct **
193 pcpu_get_vm_areas(const unsigned long *offsets,
194 		const size_t *sizes, int nr_vms,
195 		size_t align)
196 {
197 	return NULL;
198 }
199 
200 static inline void
201 pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
202 {
203 }
204 # endif
205 #endif
206 
207 #ifdef CONFIG_MMU
208 #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
209 #else
210 #define VMALLOC_TOTAL 0UL
211 #endif
212 
213 int register_vmap_purge_notifier(struct notifier_block *nb);
214 int unregister_vmap_purge_notifier(struct notifier_block *nb);
215 
216 #endif /* _LINUX_VMALLOC_H */
217