xref: /linux-6.15/include/linux/highmem.h (revision ea8a163e)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HIGHMEM_H
3 #define _LINUX_HIGHMEM_H
4 
5 #include <linux/fs.h>
6 #include <linux/kernel.h>
7 #include <linux/bug.h>
8 #include <linux/mm.h>
9 #include <linux/uaccess.h>
10 #include <linux/hardirq.h>
11 
12 #include <asm/cacheflush.h>
13 
14 #include "highmem-internal.h"
15 
16 /**
17  * kmap - Map a page for long term usage
18  * @page:	Pointer to the page to be mapped
19  *
20  * Returns: The virtual address of the mapping
21  *
22  * Can only be invoked from preemptible task context because on 32bit
23  * systems with CONFIG_HIGHMEM enabled this function might sleep.
24  *
25  * For systems with CONFIG_HIGHMEM=n and for pages in the low memory area
26  * this returns the virtual address of the direct kernel mapping.
27  *
28  * The returned virtual address is globally visible and valid up to the
29  * point where it is unmapped via kunmap(). The pointer can be handed to
30  * other contexts.
31  *
32  * For highmem pages on 32bit systems this can be slow as the mapping space
33  * is limited and protected by a global lock. In case that there is no
34  * mapping slot available the function blocks until a slot is released via
35  * kunmap().
36  */
37 static inline void *kmap(struct page *page);
38 
39 /**
40  * kunmap - Unmap the virtual address mapped by kmap()
41  * @addr:	Virtual address to be unmapped
42  *
43  * Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of
44  * pages in the low memory area.
45  */
46 static inline void kunmap(struct page *page);
47 
48 /**
49  * kmap_to_page - Get the page for a kmap'ed address
50  * @addr:	The address to look up
51  *
52  * Returns: The page which is mapped to @addr.
53  */
54 static inline struct page *kmap_to_page(void *addr);
55 
56 /**
57  * kmap_flush_unused - Flush all unused kmap mappings in order to
58  *		       remove stray mappings
59  */
60 static inline void kmap_flush_unused(void);
61 
62 /**
63  * kmap_local_page - Map a page for temporary usage
64  * @page:	Pointer to the page to be mapped
65  *
66  * Returns: The virtual address of the mapping
67  *
68  * Can be invoked from any context.
69  *
70  * Requires careful handling when nesting multiple mappings because the map
71  * management is stack based. The unmap has to be in the reverse order of
72  * the map operation:
73  *
74  * addr1 = kmap_local_page(page1);
75  * addr2 = kmap_local_page(page2);
76  * ...
77  * kunmap_local(addr2);
78  * kunmap_local(addr1);
79  *
80  * Unmapping addr1 before addr2 is invalid and causes malfunction.
81  *
82  * Contrary to kmap() mappings the mapping is only valid in the context of
83  * the caller and cannot be handed to other contexts.
84  *
85  * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
86  * virtual address of the direct mapping. Only real highmem pages are
87  * temporarily mapped.
88  *
89  * While it is significantly faster than kmap() for the higmem case it
90  * comes with restrictions about the pointer validity. Only use when really
91  * necessary.
92  *
93  * On HIGHMEM enabled systems mapping a highmem page has the side effect of
94  * disabling migration in order to keep the virtual address stable across
95  * preemption. No caller of kmap_local_page() can rely on this side effect.
96  */
97 static inline void *kmap_local_page(struct page *page);
98 
99 /**
100  * kmap_local_folio - Map a page in this folio for temporary usage
101  * @folio: The folio containing the page.
102  * @offset: The byte offset within the folio which identifies the page.
103  *
104  * Requires careful handling when nesting multiple mappings because the map
105  * management is stack based. The unmap has to be in the reverse order of
106  * the map operation::
107  *
108  *   addr1 = kmap_local_folio(folio1, offset1);
109  *   addr2 = kmap_local_folio(folio2, offset2);
110  *   ...
111  *   kunmap_local(addr2);
112  *   kunmap_local(addr1);
113  *
114  * Unmapping addr1 before addr2 is invalid and causes malfunction.
115  *
116  * Contrary to kmap() mappings the mapping is only valid in the context of
117  * the caller and cannot be handed to other contexts.
118  *
119  * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
120  * virtual address of the direct mapping. Only real highmem pages are
121  * temporarily mapped.
122  *
123  * While it is significantly faster than kmap() for the higmem case it
124  * comes with restrictions about the pointer validity. Only use when really
125  * necessary.
126  *
127  * On HIGHMEM enabled systems mapping a highmem page has the side effect of
128  * disabling migration in order to keep the virtual address stable across
129  * preemption. No caller of kmap_local_folio() can rely on this side effect.
130  *
131  * Context: Can be invoked from any context.
132  * Return: The virtual address of @offset.
133  */
134 static inline void *kmap_local_folio(struct folio *folio, size_t offset);
135 
136 /**
137  * kmap_atomic - Atomically map a page for temporary usage - Deprecated!
138  * @page:	Pointer to the page to be mapped
139  *
140  * Returns: The virtual address of the mapping
141  *
142  * Effectively a wrapper around kmap_local_page() which disables pagefaults
143  * and preemption.
144  *
145  * Do not use in new code. Use kmap_local_page() instead.
146  */
147 static inline void *kmap_atomic(struct page *page);
148 
149 /**
150  * kunmap_atomic - Unmap the virtual address mapped by kmap_atomic()
151  * @addr:	Virtual address to be unmapped
152  *
153  * Counterpart to kmap_atomic().
154  *
155  * Effectively a wrapper around kunmap_local() which additionally undoes
156  * the side effects of kmap_atomic(), i.e. reenabling pagefaults and
157  * preemption.
158  */
159 
160 /* Highmem related interfaces for management code */
161 static inline unsigned int nr_free_highpages(void);
162 static inline unsigned long totalhigh_pages(void);
163 
164 #ifndef ARCH_HAS_FLUSH_ANON_PAGE
165 static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
166 {
167 }
168 #endif
169 
170 #ifndef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE
171 static inline void flush_kernel_vmap_range(void *vaddr, int size)
172 {
173 }
174 static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
175 {
176 }
177 #endif
178 
179 /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
180 #ifndef clear_user_highpage
181 static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
182 {
183 	void *addr = kmap_local_page(page);
184 	clear_user_page(addr, vaddr, page);
185 	kunmap_local(addr);
186 }
187 #endif
188 
189 #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
190 /**
191  * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
192  * @vma: The VMA the page is to be allocated for
193  * @vaddr: The virtual address the page will be inserted into
194  *
195  * This function will allocate a page for a VMA that the caller knows will
196  * be able to migrate in the future using move_pages() or reclaimed
197  *
198  * An architecture may override this function by defining
199  * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE and providing their own
200  * implementation.
201  */
202 static inline struct page *
203 alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
204 				   unsigned long vaddr)
205 {
206 	struct page *page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
207 
208 	if (page)
209 		clear_user_highpage(page, vaddr);
210 
211 	return page;
212 }
213 #endif
214 
215 static inline void clear_highpage(struct page *page)
216 {
217 	void *kaddr = kmap_local_page(page);
218 	clear_page(kaddr);
219 	kunmap_local(kaddr);
220 }
221 
222 #ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
223 
224 static inline void tag_clear_highpage(struct page *page)
225 {
226 }
227 
228 #endif
229 
230 /*
231  * If we pass in a base or tail page, we can zero up to PAGE_SIZE.
232  * If we pass in a head page, we can zero up to the size of the compound page.
233  */
234 #if defined(CONFIG_HIGHMEM) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
235 void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
236 		unsigned start2, unsigned end2);
237 #else /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */
238 static inline void zero_user_segments(struct page *page,
239 		unsigned start1, unsigned end1,
240 		unsigned start2, unsigned end2)
241 {
242 	void *kaddr = kmap_local_page(page);
243 	unsigned int i;
244 
245 	BUG_ON(end1 > page_size(page) || end2 > page_size(page));
246 
247 	if (end1 > start1)
248 		memset(kaddr + start1, 0, end1 - start1);
249 
250 	if (end2 > start2)
251 		memset(kaddr + start2, 0, end2 - start2);
252 
253 	kunmap_local(kaddr);
254 	for (i = 0; i < compound_nr(page); i++)
255 		flush_dcache_page(page + i);
256 }
257 #endif /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */
258 
259 static inline void zero_user_segment(struct page *page,
260 	unsigned start, unsigned end)
261 {
262 	zero_user_segments(page, start, end, 0, 0);
263 }
264 
265 static inline void zero_user(struct page *page,
266 	unsigned start, unsigned size)
267 {
268 	zero_user_segments(page, start, start + size, 0, 0);
269 }
270 
271 #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
272 
273 static inline void copy_user_highpage(struct page *to, struct page *from,
274 	unsigned long vaddr, struct vm_area_struct *vma)
275 {
276 	char *vfrom, *vto;
277 
278 	vfrom = kmap_local_page(from);
279 	vto = kmap_local_page(to);
280 	copy_user_page(vto, vfrom, vaddr, to);
281 	kunmap_local(vto);
282 	kunmap_local(vfrom);
283 }
284 
285 #endif
286 
287 #ifndef __HAVE_ARCH_COPY_HIGHPAGE
288 
289 static inline void copy_highpage(struct page *to, struct page *from)
290 {
291 	char *vfrom, *vto;
292 
293 	vfrom = kmap_local_page(from);
294 	vto = kmap_local_page(to);
295 	copy_page(vto, vfrom);
296 	kunmap_local(vto);
297 	kunmap_local(vfrom);
298 }
299 
300 #endif
301 
302 static inline void memcpy_page(struct page *dst_page, size_t dst_off,
303 			       struct page *src_page, size_t src_off,
304 			       size_t len)
305 {
306 	char *dst = kmap_local_page(dst_page);
307 	char *src = kmap_local_page(src_page);
308 
309 	VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE);
310 	memcpy(dst + dst_off, src + src_off, len);
311 	kunmap_local(src);
312 	kunmap_local(dst);
313 }
314 
315 static inline void memmove_page(struct page *dst_page, size_t dst_off,
316 			       struct page *src_page, size_t src_off,
317 			       size_t len)
318 {
319 	char *dst = kmap_local_page(dst_page);
320 	char *src = kmap_local_page(src_page);
321 
322 	VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE);
323 	memmove(dst + dst_off, src + src_off, len);
324 	kunmap_local(src);
325 	kunmap_local(dst);
326 }
327 
328 static inline void memset_page(struct page *page, size_t offset, int val,
329 			       size_t len)
330 {
331 	char *addr = kmap_local_page(page);
332 
333 	VM_BUG_ON(offset + len > PAGE_SIZE);
334 	memset(addr + offset, val, len);
335 	kunmap_local(addr);
336 }
337 
338 static inline void memcpy_from_page(char *to, struct page *page,
339 				    size_t offset, size_t len)
340 {
341 	char *from = kmap_local_page(page);
342 
343 	VM_BUG_ON(offset + len > PAGE_SIZE);
344 	memcpy(to, from + offset, len);
345 	kunmap_local(from);
346 }
347 
348 static inline void memcpy_to_page(struct page *page, size_t offset,
349 				  const char *from, size_t len)
350 {
351 	char *to = kmap_local_page(page);
352 
353 	VM_BUG_ON(offset + len > PAGE_SIZE);
354 	memcpy(to + offset, from, len);
355 	flush_dcache_page(page);
356 	kunmap_local(to);
357 }
358 
359 static inline void memzero_page(struct page *page, size_t offset, size_t len)
360 {
361 	char *addr = kmap_local_page(page);
362 	memset(addr + offset, 0, len);
363 	flush_dcache_page(page);
364 	kunmap_local(addr);
365 }
366 
367 #endif /* _LINUX_HIGHMEM_H */
368