xref: /linux-6.15/include/linux/highmem.h (revision 97dfbbd1)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21da177e4SLinus Torvalds #ifndef _LINUX_HIGHMEM_H
31da177e4SLinus Torvalds #define _LINUX_HIGHMEM_H
41da177e4SLinus Torvalds 
51da177e4SLinus Torvalds #include <linux/fs.h>
6597781f3SCesar Eduardo Barros #include <linux/kernel.h>
7187f1882SPaul Gortmaker #include <linux/bug.h>
8522a0032SMatthew Wilcox (Oracle) #include <linux/cacheflush.h>
9b073d7f8SAlexander Potapenko #include <linux/kmsan.h>
101da177e4SLinus Torvalds #include <linux/mm.h>
11ad76fb6bSPeter Zijlstra #include <linux/uaccess.h>
1243b3a0c7SCatalin Marinas #include <linux/hardirq.h>
131da177e4SLinus Torvalds 
1413f876baSThomas Gleixner #include "highmem-internal.h"
1513f876baSThomas Gleixner 
1613f876baSThomas Gleixner /**
1713f876baSThomas Gleixner  * kmap - Map a page for long term usage
1813f876baSThomas Gleixner  * @page:	Pointer to the page to be mapped
1913f876baSThomas Gleixner  *
2013f876baSThomas Gleixner  * Returns: The virtual address of the mapping
2113f876baSThomas Gleixner  *
2213f876baSThomas Gleixner  * Can only be invoked from preemptible task context because on 32bit
2313f876baSThomas Gleixner  * systems with CONFIG_HIGHMEM enabled this function might sleep.
2413f876baSThomas Gleixner  *
2513f876baSThomas Gleixner  * For systems with CONFIG_HIGHMEM=n and for pages in the low memory area
2613f876baSThomas Gleixner  * this returns the virtual address of the direct kernel mapping.
2713f876baSThomas Gleixner  *
2813f876baSThomas Gleixner  * The returned virtual address is globally visible and valid up to the
2913f876baSThomas Gleixner  * point where it is unmapped via kunmap(). The pointer can be handed to
3013f876baSThomas Gleixner  * other contexts.
3113f876baSThomas Gleixner  *
3213f876baSThomas Gleixner  * For highmem pages on 32bit systems this can be slow as the mapping space
3313f876baSThomas Gleixner  * is limited and protected by a global lock. In case that there is no
3413f876baSThomas Gleixner  * mapping slot available the function blocks until a slot is released via
3513f876baSThomas Gleixner  * kunmap().
3613f876baSThomas Gleixner  */
3713f876baSThomas Gleixner static inline void *kmap(struct page *page);
3813f876baSThomas Gleixner 
3913f876baSThomas Gleixner /**
4013f876baSThomas Gleixner  * kunmap - Unmap the virtual address mapped by kmap()
41e7392b4eSFabio M. De Francesco  * @page:	Pointer to the page which was mapped by kmap()
4213f876baSThomas Gleixner  *
4313f876baSThomas Gleixner  * Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of
4413f876baSThomas Gleixner  * pages in the low memory area.
4513f876baSThomas Gleixner  */
4613f876baSThomas Gleixner static inline void kunmap(struct page *page);
4713f876baSThomas Gleixner 
4813f876baSThomas Gleixner /**
4913f876baSThomas Gleixner  * kmap_to_page - Get the page for a kmap'ed address
5013f876baSThomas Gleixner  * @addr:	The address to look up
5113f876baSThomas Gleixner  *
5213f876baSThomas Gleixner  * Returns: The page which is mapped to @addr.
5313f876baSThomas Gleixner  */
5413f876baSThomas Gleixner static inline struct page *kmap_to_page(void *addr);
5513f876baSThomas Gleixner 
5613f876baSThomas Gleixner /**
5713f876baSThomas Gleixner  * kmap_flush_unused - Flush all unused kmap mappings in order to
5813f876baSThomas Gleixner  *		       remove stray mappings
5913f876baSThomas Gleixner  */
6013f876baSThomas Gleixner static inline void kmap_flush_unused(void);
6113f876baSThomas Gleixner 
6213f876baSThomas Gleixner /**
63f3ba3c71SThomas Gleixner  * kmap_local_page - Map a page for temporary usage
6413f876baSThomas Gleixner  * @page: Pointer to the page to be mapped
6513f876baSThomas Gleixner  *
6613f876baSThomas Gleixner  * Returns: The virtual address of the mapping
6713f876baSThomas Gleixner  *
68383bbef2SFabio M. De Francesco  * Can be invoked from any context, including interrupts.
6913f876baSThomas Gleixner  *
7013f876baSThomas Gleixner  * Requires careful handling when nesting multiple mappings because the map
7113f876baSThomas Gleixner  * management is stack based. The unmap has to be in the reverse order of
7213f876baSThomas Gleixner  * the map operation:
7313f876baSThomas Gleixner  *
74f3ba3c71SThomas Gleixner  * addr1 = kmap_local_page(page1);
75f3ba3c71SThomas Gleixner  * addr2 = kmap_local_page(page2);
7613f876baSThomas Gleixner  * ...
77f3ba3c71SThomas Gleixner  * kunmap_local(addr2);
78f3ba3c71SThomas Gleixner  * kunmap_local(addr1);
7913f876baSThomas Gleixner  *
8013f876baSThomas Gleixner  * Unmapping addr1 before addr2 is invalid and causes malfunction.
8113f876baSThomas Gleixner  *
8213f876baSThomas Gleixner  * Contrary to kmap() mappings the mapping is only valid in the context of
8313f876baSThomas Gleixner  * the caller and cannot be handed to other contexts.
8413f876baSThomas Gleixner  *
8513f876baSThomas Gleixner  * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
8613f876baSThomas Gleixner  * virtual address of the direct mapping. Only real highmem pages are
8713f876baSThomas Gleixner  * temporarily mapped.
8813f876baSThomas Gleixner  *
891f8549fcSFabio M. De Francesco  * While kmap_local_page() is significantly faster than kmap() for the highmem
901f8549fcSFabio M. De Francesco  * case it comes with restrictions about the pointer validity.
91f3ba3c71SThomas Gleixner  *
92f3ba3c71SThomas Gleixner  * On HIGHMEM enabled systems mapping a highmem page has the side effect of
93f3ba3c71SThomas Gleixner  * disabling migration in order to keep the virtual address stable across
94f3ba3c71SThomas Gleixner  * preemption. No caller of kmap_local_page() can rely on this side effect.
95f3ba3c71SThomas Gleixner  */
96f3ba3c71SThomas Gleixner static inline void *kmap_local_page(struct page *page);
97f3ba3c71SThomas Gleixner 
98f3ba3c71SThomas Gleixner /**
9953c36de0SMatthew Wilcox (Oracle)  * kmap_local_folio - Map a page in this folio for temporary usage
10053c36de0SMatthew Wilcox (Oracle)  * @folio: The folio containing the page.
10153c36de0SMatthew Wilcox (Oracle)  * @offset: The byte offset within the folio which identifies the page.
10253c36de0SMatthew Wilcox (Oracle)  *
10353c36de0SMatthew Wilcox (Oracle)  * Requires careful handling when nesting multiple mappings because the map
10453c36de0SMatthew Wilcox (Oracle)  * management is stack based. The unmap has to be in the reverse order of
10553c36de0SMatthew Wilcox (Oracle)  * the map operation::
10653c36de0SMatthew Wilcox (Oracle)  *
10753c36de0SMatthew Wilcox (Oracle)  *   addr1 = kmap_local_folio(folio1, offset1);
10853c36de0SMatthew Wilcox (Oracle)  *   addr2 = kmap_local_folio(folio2, offset2);
10953c36de0SMatthew Wilcox (Oracle)  *   ...
11053c36de0SMatthew Wilcox (Oracle)  *   kunmap_local(addr2);
11153c36de0SMatthew Wilcox (Oracle)  *   kunmap_local(addr1);
11253c36de0SMatthew Wilcox (Oracle)  *
11353c36de0SMatthew Wilcox (Oracle)  * Unmapping addr1 before addr2 is invalid and causes malfunction.
11453c36de0SMatthew Wilcox (Oracle)  *
11553c36de0SMatthew Wilcox (Oracle)  * Contrary to kmap() mappings the mapping is only valid in the context of
11653c36de0SMatthew Wilcox (Oracle)  * the caller and cannot be handed to other contexts.
11753c36de0SMatthew Wilcox (Oracle)  *
11853c36de0SMatthew Wilcox (Oracle)  * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
11953c36de0SMatthew Wilcox (Oracle)  * virtual address of the direct mapping. Only real highmem pages are
12053c36de0SMatthew Wilcox (Oracle)  * temporarily mapped.
12153c36de0SMatthew Wilcox (Oracle)  *
1229eefefd8SFabio M. De Francesco  * While it is significantly faster than kmap() for the highmem case it
1239eefefd8SFabio M. De Francesco  * comes with restrictions about the pointer validity.
12453c36de0SMatthew Wilcox (Oracle)  *
12553c36de0SMatthew Wilcox (Oracle)  * On HIGHMEM enabled systems mapping a highmem page has the side effect of
12653c36de0SMatthew Wilcox (Oracle)  * disabling migration in order to keep the virtual address stable across
12753c36de0SMatthew Wilcox (Oracle)  * preemption. No caller of kmap_local_folio() can rely on this side effect.
12853c36de0SMatthew Wilcox (Oracle)  *
12953c36de0SMatthew Wilcox (Oracle)  * Context: Can be invoked from any context.
13053c36de0SMatthew Wilcox (Oracle)  * Return: The virtual address of @offset.
13153c36de0SMatthew Wilcox (Oracle)  */
13253c36de0SMatthew Wilcox (Oracle) static inline void *kmap_local_folio(struct folio *folio, size_t offset);
13353c36de0SMatthew Wilcox (Oracle) 
13453c36de0SMatthew Wilcox (Oracle) /**
135f3ba3c71SThomas Gleixner  * kmap_atomic - Atomically map a page for temporary usage - Deprecated!
136f3ba3c71SThomas Gleixner  * @page:	Pointer to the page to be mapped
137f3ba3c71SThomas Gleixner  *
138f3ba3c71SThomas Gleixner  * Returns: The virtual address of the mapping
139f3ba3c71SThomas Gleixner  *
140e7392b4eSFabio M. De Francesco  * In fact a wrapper around kmap_local_page() which also disables pagefaults
141e7392b4eSFabio M. De Francesco  * and, depending on PREEMPT_RT configuration, also CPU migration and
142e7392b4eSFabio M. De Francesco  * preemption. Therefore users should not count on the latter two side effects.
143e7392b4eSFabio M. De Francesco  *
144e7392b4eSFabio M. De Francesco  * Mappings should always be released by kunmap_atomic().
145f3ba3c71SThomas Gleixner  *
146f3ba3c71SThomas Gleixner  * Do not use in new code. Use kmap_local_page() instead.
14785a85e76SFabio M. De Francesco  *
14885a85e76SFabio M. De Francesco  * It is used in atomic context when code wants to access the contents of a
14985a85e76SFabio M. De Francesco  * page that might be allocated from high memory (see __GFP_HIGHMEM), for
15085a85e76SFabio M. De Francesco  * example a page in the pagecache.  The API has two functions, and they
151cffe57beSBagas Sanjaya  * can be used in a manner similar to the following::
15285a85e76SFabio M. De Francesco  *
153cffe57beSBagas Sanjaya  *   // Find the page of interest.
15485a85e76SFabio M. De Francesco  *   struct page *page = find_get_page(mapping, offset);
15585a85e76SFabio M. De Francesco  *
156cffe57beSBagas Sanjaya  *   // Gain access to the contents of that page.
15785a85e76SFabio M. De Francesco  *   void *vaddr = kmap_atomic(page);
15885a85e76SFabio M. De Francesco  *
159cffe57beSBagas Sanjaya  *   // Do something to the contents of that page.
16085a85e76SFabio M. De Francesco  *   memset(vaddr, 0, PAGE_SIZE);
16185a85e76SFabio M. De Francesco  *
162cffe57beSBagas Sanjaya  *   // Unmap that page.
16385a85e76SFabio M. De Francesco  *   kunmap_atomic(vaddr);
16485a85e76SFabio M. De Francesco  *
16585a85e76SFabio M. De Francesco  * Note that the kunmap_atomic() call takes the result of the kmap_atomic()
16685a85e76SFabio M. De Francesco  * call, not the argument.
16785a85e76SFabio M. De Francesco  *
16885a85e76SFabio M. De Francesco  * If you need to map two pages because you want to copy from one page to
16985a85e76SFabio M. De Francesco  * another you need to keep the kmap_atomic calls strictly nested, like:
17085a85e76SFabio M. De Francesco  *
17185a85e76SFabio M. De Francesco  * vaddr1 = kmap_atomic(page1);
17285a85e76SFabio M. De Francesco  * vaddr2 = kmap_atomic(page2);
17385a85e76SFabio M. De Francesco  *
17485a85e76SFabio M. De Francesco  * memcpy(vaddr1, vaddr2, PAGE_SIZE);
17585a85e76SFabio M. De Francesco  *
17685a85e76SFabio M. De Francesco  * kunmap_atomic(vaddr2);
17785a85e76SFabio M. De Francesco  * kunmap_atomic(vaddr1);
17813f876baSThomas Gleixner  */
17913f876baSThomas Gleixner static inline void *kmap_atomic(struct page *page);
18013f876baSThomas Gleixner 
18113f876baSThomas Gleixner /* Highmem related interfaces for management code */
18290b8fab5SDavid Hildenbrand static inline unsigned long nr_free_highpages(void);
18313f876baSThomas Gleixner static inline unsigned long totalhigh_pages(void);
18413f876baSThomas Gleixner 
18503beb076SJames Bottomley #ifndef ARCH_HAS_FLUSH_ANON_PAGE
flush_anon_page(struct vm_area_struct * vma,struct page * page,unsigned long vmaddr)186a6f36be3SRussell King static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
18703beb076SJames Bottomley {
18803beb076SJames Bottomley }
18903beb076SJames Bottomley #endif
19003beb076SJames Bottomley 
191f358afc5SChristoph Hellwig #ifndef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE
flush_kernel_vmap_range(void * vaddr,int size)1929df5f741SJames Bottomley static inline void flush_kernel_vmap_range(void *vaddr, int size)
1939df5f741SJames Bottomley {
1949df5f741SJames Bottomley }
invalidate_kernel_vmap_range(void * vaddr,int size)1959df5f741SJames Bottomley static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
1969df5f741SJames Bottomley {
1979df5f741SJames Bottomley }
1985a3a5a98SJames Bottomley #endif
1995a3a5a98SJames Bottomley 
2001da177e4SLinus Torvalds /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
201487ff320SRussell King #ifndef clear_user_highpage
clear_user_highpage(struct page * page,unsigned long vaddr)2021da177e4SLinus Torvalds static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
2031da177e4SLinus Torvalds {
204d2c20e51SIra Weiny 	void *addr = kmap_local_page(page);
2051da177e4SLinus Torvalds 	clear_user_page(addr, vaddr, page);
206d2c20e51SIra Weiny 	kunmap_local(addr);
2071da177e4SLinus Torvalds }
208487ff320SRussell King #endif
2091da177e4SLinus Torvalds 
2106bc56a4dSMatthew Wilcox (Oracle) #ifndef vma_alloc_zeroed_movable_folio
211769848c0SMel Gorman /**
2126bc56a4dSMatthew Wilcox (Oracle)  * vma_alloc_zeroed_movable_folio - Allocate a zeroed page for a VMA.
2136bc56a4dSMatthew Wilcox (Oracle)  * @vma: The VMA the page is to be allocated for.
2146bc56a4dSMatthew Wilcox (Oracle)  * @vaddr: The virtual address the page will be inserted into.
215769848c0SMel Gorman  *
2166bc56a4dSMatthew Wilcox (Oracle)  * This function will allocate a page suitable for inserting into this
2176bc56a4dSMatthew Wilcox (Oracle)  * VMA at this virtual address.  It may be allocated from highmem or
2186bc56a4dSMatthew Wilcox (Oracle)  * the movable zone.  An architecture may provide its own implementation.
219e7392b4eSFabio M. De Francesco  *
2206bc56a4dSMatthew Wilcox (Oracle)  * Return: A folio containing one allocated and zeroed page or NULL if
2216bc56a4dSMatthew Wilcox (Oracle)  * we are out of memory.
222769848c0SMel Gorman  */
2236bc56a4dSMatthew Wilcox (Oracle) static inline
vma_alloc_zeroed_movable_folio(struct vm_area_struct * vma,unsigned long vaddr)2246bc56a4dSMatthew Wilcox (Oracle) struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
225769848c0SMel Gorman 				   unsigned long vaddr)
226769848c0SMel Gorman {
227c51a4f11SZi Yan 	struct folio *folio;
228c51a4f11SZi Yan 
229c51a4f11SZi Yan 	folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr);
230c51a4f11SZi Yan 	if (folio && user_alloc_needs_zeroing())
231c51a4f11SZi Yan 		clear_user_highpage(&folio->page, vaddr);
232c51a4f11SZi Yan 
233c51a4f11SZi Yan 	return folio;
234769848c0SMel Gorman }
23592638b4eSPeter Collingbourne #endif
236769848c0SMel Gorman 
clear_highpage(struct page * page)2371da177e4SLinus Torvalds static inline void clear_highpage(struct page *page)
2381da177e4SLinus Torvalds {
239d2c20e51SIra Weiny 	void *kaddr = kmap_local_page(page);
2401da177e4SLinus Torvalds 	clear_page(kaddr);
241d2c20e51SIra Weiny 	kunmap_local(kaddr);
2421da177e4SLinus Torvalds }
2431da177e4SLinus Torvalds 
clear_highpage_kasan_tagged(struct page * page)244d9da8f6cSAndrey Konovalov static inline void clear_highpage_kasan_tagged(struct page *page)
245d9da8f6cSAndrey Konovalov {
24616d91fafSPeter Collingbourne 	void *kaddr = kmap_local_page(page);
247d9da8f6cSAndrey Konovalov 
24816d91fafSPeter Collingbourne 	clear_page(kasan_reset_tag(kaddr));
24916d91fafSPeter Collingbourne 	kunmap_local(kaddr);
250d9da8f6cSAndrey Konovalov }
251d9da8f6cSAndrey Konovalov 
252013bb59dSPeter Collingbourne #ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
253013bb59dSPeter Collingbourne 
tag_clear_highpage(struct page * page)254013bb59dSPeter Collingbourne static inline void tag_clear_highpage(struct page *page)
255013bb59dSPeter Collingbourne {
256013bb59dSPeter Collingbourne }
257013bb59dSPeter Collingbourne 
258013bb59dSPeter Collingbourne #endif
259013bb59dSPeter Collingbourne 
2600060ef3bSMatthew Wilcox (Oracle) /*
2610060ef3bSMatthew Wilcox (Oracle)  * If we pass in a base or tail page, we can zero up to PAGE_SIZE.
2620060ef3bSMatthew Wilcox (Oracle)  * If we pass in a head page, we can zero up to the size of the compound page.
2630060ef3bSMatthew Wilcox (Oracle)  */
264c0357139SMatthew Wilcox (Oracle) #ifdef CONFIG_HIGHMEM
2650060ef3bSMatthew Wilcox (Oracle) void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
2660060ef3bSMatthew Wilcox (Oracle) 		unsigned start2, unsigned end2);
267c0357139SMatthew Wilcox (Oracle) #else
zero_user_segments(struct page * page,unsigned start1,unsigned end1,unsigned start2,unsigned end2)268eebd2aa3SChristoph Lameter static inline void zero_user_segments(struct page *page,
269eebd2aa3SChristoph Lameter 		unsigned start1, unsigned end1,
270eebd2aa3SChristoph Lameter 		unsigned start2, unsigned end2)
271eebd2aa3SChristoph Lameter {
272d2c20e51SIra Weiny 	void *kaddr = kmap_local_page(page);
2730060ef3bSMatthew Wilcox (Oracle) 	unsigned int i;
274eebd2aa3SChristoph Lameter 
2750060ef3bSMatthew Wilcox (Oracle) 	BUG_ON(end1 > page_size(page) || end2 > page_size(page));
276eebd2aa3SChristoph Lameter 
277eebd2aa3SChristoph Lameter 	if (end1 > start1)
278eebd2aa3SChristoph Lameter 		memset(kaddr + start1, 0, end1 - start1);
279eebd2aa3SChristoph Lameter 
280eebd2aa3SChristoph Lameter 	if (end2 > start2)
281eebd2aa3SChristoph Lameter 		memset(kaddr + start2, 0, end2 - start2);
282eebd2aa3SChristoph Lameter 
283d2c20e51SIra Weiny 	kunmap_local(kaddr);
2840060ef3bSMatthew Wilcox (Oracle) 	for (i = 0; i < compound_nr(page); i++)
2850060ef3bSMatthew Wilcox (Oracle) 		flush_dcache_page(page + i);
286eebd2aa3SChristoph Lameter }
287c0357139SMatthew Wilcox (Oracle) #endif
288eebd2aa3SChristoph Lameter 
zero_user_segment(struct page * page,unsigned start,unsigned end)289eebd2aa3SChristoph Lameter static inline void zero_user_segment(struct page *page,
290eebd2aa3SChristoph Lameter 	unsigned start, unsigned end)
291eebd2aa3SChristoph Lameter {
292eebd2aa3SChristoph Lameter 	zero_user_segments(page, start, end, 0, 0);
293eebd2aa3SChristoph Lameter }
294eebd2aa3SChristoph Lameter 
zero_user(struct page * page,unsigned start,unsigned size)295eebd2aa3SChristoph Lameter static inline void zero_user(struct page *page,
296eebd2aa3SChristoph Lameter 	unsigned start, unsigned size)
297eebd2aa3SChristoph Lameter {
298eebd2aa3SChristoph Lameter 	zero_user_segments(page, start, start + size, 0, 0);
299eebd2aa3SChristoph Lameter }
30001f2705dSNate Diller 
30177fff4aeSAtsushi Nemoto #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
30277fff4aeSAtsushi Nemoto 
copy_user_highpage(struct page * to,struct page * from,unsigned long vaddr,struct vm_area_struct * vma)3039de455b2SAtsushi Nemoto static inline void copy_user_highpage(struct page *to, struct page *from,
3049de455b2SAtsushi Nemoto 	unsigned long vaddr, struct vm_area_struct *vma)
3051da177e4SLinus Torvalds {
3061da177e4SLinus Torvalds 	char *vfrom, *vto;
3071da177e4SLinus Torvalds 
308d2c20e51SIra Weiny 	vfrom = kmap_local_page(from);
309d2c20e51SIra Weiny 	vto = kmap_local_page(to);
3101da177e4SLinus Torvalds 	copy_user_page(vto, vfrom, vaddr, to);
311b073d7f8SAlexander Potapenko 	kmsan_unpoison_memory(page_address(to), PAGE_SIZE);
312d2c20e51SIra Weiny 	kunmap_local(vto);
313d2c20e51SIra Weiny 	kunmap_local(vfrom);
3141da177e4SLinus Torvalds }
3151da177e4SLinus Torvalds 
31677fff4aeSAtsushi Nemoto #endif
31777fff4aeSAtsushi Nemoto 
3186efc7afbSJiaqi Yan #ifndef __HAVE_ARCH_COPY_HIGHPAGE
3196efc7afbSJiaqi Yan 
copy_highpage(struct page * to,struct page * from)3206efc7afbSJiaqi Yan static inline void copy_highpage(struct page *to, struct page *from)
3216efc7afbSJiaqi Yan {
3226efc7afbSJiaqi Yan 	char *vfrom, *vto;
3236efc7afbSJiaqi Yan 
3246efc7afbSJiaqi Yan 	vfrom = kmap_local_page(from);
3256efc7afbSJiaqi Yan 	vto = kmap_local_page(to);
3266efc7afbSJiaqi Yan 	copy_page(vto, vfrom);
3276efc7afbSJiaqi Yan 	kmsan_copy_page_meta(to, from);
3286efc7afbSJiaqi Yan 	kunmap_local(vto);
3296efc7afbSJiaqi Yan 	kunmap_local(vfrom);
3306efc7afbSJiaqi Yan }
3316efc7afbSJiaqi Yan 
3326efc7afbSJiaqi Yan #endif
3336efc7afbSJiaqi Yan 
334a873dfe1STony Luck #ifdef copy_mc_to_kernel
3356efc7afbSJiaqi Yan /*
3366efc7afbSJiaqi Yan  * If architecture supports machine check exception handling, define the
3376efc7afbSJiaqi Yan  * #MC versions of copy_user_highpage and copy_highpage. They copy a memory
3386efc7afbSJiaqi Yan  * page with #MC in source page (@from) handled, and return the number
3396efc7afbSJiaqi Yan  * of bytes not copied if there was a #MC, otherwise 0 for success.
3406efc7afbSJiaqi Yan  */
copy_mc_user_highpage(struct page * to,struct page * from,unsigned long vaddr,struct vm_area_struct * vma)341a873dfe1STony Luck static inline int copy_mc_user_highpage(struct page *to, struct page *from,
342a873dfe1STony Luck 					unsigned long vaddr, struct vm_area_struct *vma)
343a873dfe1STony Luck {
344a873dfe1STony Luck 	unsigned long ret;
345a873dfe1STony Luck 	char *vfrom, *vto;
346a873dfe1STony Luck 
347a873dfe1STony Luck 	vfrom = kmap_local_page(from);
348a873dfe1STony Luck 	vto = kmap_local_page(to);
349a873dfe1STony Luck 	ret = copy_mc_to_kernel(vto, vfrom, PAGE_SIZE);
350a873dfe1STony Luck 	if (!ret)
351a873dfe1STony Luck 		kmsan_unpoison_memory(page_address(to), PAGE_SIZE);
352a873dfe1STony Luck 	kunmap_local(vto);
353a873dfe1STony Luck 	kunmap_local(vfrom);
354a873dfe1STony Luck 
35528bdacbcSKefeng Wang 	if (ret)
35628bdacbcSKefeng Wang 		memory_failure_queue(page_to_pfn(from), 0);
35728bdacbcSKefeng Wang 
358a873dfe1STony Luck 	return ret;
359a873dfe1STony Luck }
3606efc7afbSJiaqi Yan 
copy_mc_highpage(struct page * to,struct page * from)3616efc7afbSJiaqi Yan static inline int copy_mc_highpage(struct page *to, struct page *from)
3626efc7afbSJiaqi Yan {
3636efc7afbSJiaqi Yan 	unsigned long ret;
3646efc7afbSJiaqi Yan 	char *vfrom, *vto;
3656efc7afbSJiaqi Yan 
3666efc7afbSJiaqi Yan 	vfrom = kmap_local_page(from);
3676efc7afbSJiaqi Yan 	vto = kmap_local_page(to);
3686efc7afbSJiaqi Yan 	ret = copy_mc_to_kernel(vto, vfrom, PAGE_SIZE);
3696efc7afbSJiaqi Yan 	if (!ret)
3706efc7afbSJiaqi Yan 		kmsan_copy_page_meta(to, from);
3716efc7afbSJiaqi Yan 	kunmap_local(vto);
3726efc7afbSJiaqi Yan 	kunmap_local(vfrom);
3736efc7afbSJiaqi Yan 
37428bdacbcSKefeng Wang 	if (ret)
37528bdacbcSKefeng Wang 		memory_failure_queue(page_to_pfn(from), 0);
37628bdacbcSKefeng Wang 
3776efc7afbSJiaqi Yan 	return ret;
3786efc7afbSJiaqi Yan }
379a873dfe1STony Luck #else
copy_mc_user_highpage(struct page * to,struct page * from,unsigned long vaddr,struct vm_area_struct * vma)380a873dfe1STony Luck static inline int copy_mc_user_highpage(struct page *to, struct page *from,
381a873dfe1STony Luck 					unsigned long vaddr, struct vm_area_struct *vma)
382a873dfe1STony Luck {
383a873dfe1STony Luck 	copy_user_highpage(to, from, vaddr, vma);
384a873dfe1STony Luck 	return 0;
385a873dfe1STony Luck }
386a873dfe1STony Luck 
copy_mc_highpage(struct page * to,struct page * from)3876efc7afbSJiaqi Yan static inline int copy_mc_highpage(struct page *to, struct page *from)
3881da177e4SLinus Torvalds {
3896efc7afbSJiaqi Yan 	copy_highpage(to, from);
3906efc7afbSJiaqi Yan 	return 0;
3911da177e4SLinus Torvalds }
392a4602b62SKhalid Aziz #endif
393a4602b62SKhalid Aziz 
memcpy_page(struct page * dst_page,size_t dst_off,struct page * src_page,size_t src_off,size_t len)3946a0996dbSIra Weiny static inline void memcpy_page(struct page *dst_page, size_t dst_off,
3956a0996dbSIra Weiny 			       struct page *src_page, size_t src_off,
3966a0996dbSIra Weiny 			       size_t len)
3976a0996dbSIra Weiny {
3986a0996dbSIra Weiny 	char *dst = kmap_local_page(dst_page);
3996a0996dbSIra Weiny 	char *src = kmap_local_page(src_page);
4006a0996dbSIra Weiny 
401ca18f6eaSIra Weiny 	VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE);
4026a0996dbSIra Weiny 	memcpy(dst + dst_off, src + src_off, len);
4036a0996dbSIra Weiny 	kunmap_local(src);
4046a0996dbSIra Weiny 	kunmap_local(dst);
4056a0996dbSIra Weiny }
4066a0996dbSIra Weiny 
memset_page(struct page * page,size_t offset,int val,size_t len)4076a0996dbSIra Weiny static inline void memset_page(struct page *page, size_t offset, int val,
4086a0996dbSIra Weiny 			       size_t len)
4096a0996dbSIra Weiny {
4106a0996dbSIra Weiny 	char *addr = kmap_local_page(page);
4116a0996dbSIra Weiny 
412ca18f6eaSIra Weiny 	VM_BUG_ON(offset + len > PAGE_SIZE);
4136a0996dbSIra Weiny 	memset(addr + offset, val, len);
4146a0996dbSIra Weiny 	kunmap_local(addr);
4156a0996dbSIra Weiny }
4166a0996dbSIra Weiny 
memcpy_from_page(char * to,struct page * page,size_t offset,size_t len)417bb90d4bcSIra Weiny static inline void memcpy_from_page(char *to, struct page *page,
418bb90d4bcSIra Weiny 				    size_t offset, size_t len)
419bb90d4bcSIra Weiny {
42061b205f5SIra Weiny 	char *from = kmap_local_page(page);
421bb90d4bcSIra Weiny 
422ca18f6eaSIra Weiny 	VM_BUG_ON(offset + len > PAGE_SIZE);
423bb90d4bcSIra Weiny 	memcpy(to, from + offset, len);
42461b205f5SIra Weiny 	kunmap_local(from);
425bb90d4bcSIra Weiny }
426bb90d4bcSIra Weiny 
memcpy_to_page(struct page * page,size_t offset,const char * from,size_t len)427bb90d4bcSIra Weiny static inline void memcpy_to_page(struct page *page, size_t offset,
428bb90d4bcSIra Weiny 				  const char *from, size_t len)
429bb90d4bcSIra Weiny {
43061b205f5SIra Weiny 	char *to = kmap_local_page(page);
431bb90d4bcSIra Weiny 
432ca18f6eaSIra Weiny 	VM_BUG_ON(offset + len > PAGE_SIZE);
433bb90d4bcSIra Weiny 	memcpy(to + offset, from, len);
4348dad53a1SChristoph Hellwig 	flush_dcache_page(page);
43561b205f5SIra Weiny 	kunmap_local(to);
436bb90d4bcSIra Weiny }
437bb90d4bcSIra Weiny 
memzero_page(struct page * page,size_t offset,size_t len)43828961998SIra Weiny static inline void memzero_page(struct page *page, size_t offset, size_t len)
43928961998SIra Weiny {
440d9a42b53SChristoph Hellwig 	char *addr = kmap_local_page(page);
441f38adfefSFabio M. De Francesco 
442f38adfefSFabio M. De Francesco 	VM_BUG_ON(offset + len > PAGE_SIZE);
44328961998SIra Weiny 	memset(addr + offset, 0, len);
4448dad53a1SChristoph Hellwig 	flush_dcache_page(page);
445d9a42b53SChristoph Hellwig 	kunmap_local(addr);
44628961998SIra Weiny }
44728961998SIra Weiny 
4489af47276SMatthew Wilcox (Oracle) /**
4499af47276SMatthew Wilcox (Oracle)  * memcpy_from_folio - Copy a range of bytes from a folio.
4509af47276SMatthew Wilcox (Oracle)  * @to: The memory to copy to.
4519af47276SMatthew Wilcox (Oracle)  * @folio: The folio to read from.
4529af47276SMatthew Wilcox (Oracle)  * @offset: The first byte in the folio to read.
4539af47276SMatthew Wilcox (Oracle)  * @len: The number of bytes to copy.
4549af47276SMatthew Wilcox (Oracle)  */
memcpy_from_folio(char * to,struct folio * folio,size_t offset,size_t len)455b23d03efSMatthew Wilcox (Oracle) static inline void memcpy_from_folio(char *to, struct folio *folio,
456b23d03efSMatthew Wilcox (Oracle) 		size_t offset, size_t len)
457b23d03efSMatthew Wilcox (Oracle) {
458b23d03efSMatthew Wilcox (Oracle) 	VM_BUG_ON(offset + len > folio_size(folio));
459b23d03efSMatthew Wilcox (Oracle) 
460b23d03efSMatthew Wilcox (Oracle) 	do {
461b23d03efSMatthew Wilcox (Oracle) 		const char *from = kmap_local_folio(folio, offset);
462b23d03efSMatthew Wilcox (Oracle) 		size_t chunk = len;
463b23d03efSMatthew Wilcox (Oracle) 
464*97dfbbd1SMatthew Wilcox (Oracle) 		if (folio_test_partial_kmap(folio) &&
465b23d03efSMatthew Wilcox (Oracle) 		    chunk > PAGE_SIZE - offset_in_page(offset))
466b23d03efSMatthew Wilcox (Oracle) 			chunk = PAGE_SIZE - offset_in_page(offset);
467b23d03efSMatthew Wilcox (Oracle) 		memcpy(to, from, chunk);
468b23d03efSMatthew Wilcox (Oracle) 		kunmap_local(from);
469b23d03efSMatthew Wilcox (Oracle) 
47073424d00SSu Hui 		to += chunk;
471b23d03efSMatthew Wilcox (Oracle) 		offset += chunk;
472b23d03efSMatthew Wilcox (Oracle) 		len -= chunk;
473b23d03efSMatthew Wilcox (Oracle) 	} while (len > 0);
474b23d03efSMatthew Wilcox (Oracle) }
475b23d03efSMatthew Wilcox (Oracle) 
4769af47276SMatthew Wilcox (Oracle) /**
4779af47276SMatthew Wilcox (Oracle)  * memcpy_to_folio - Copy a range of bytes to a folio.
4789af47276SMatthew Wilcox (Oracle)  * @folio: The folio to write to.
4799af47276SMatthew Wilcox (Oracle)  * @offset: The first byte in the folio to store to.
4809af47276SMatthew Wilcox (Oracle)  * @from: The memory to copy from.
4819af47276SMatthew Wilcox (Oracle)  * @len: The number of bytes to copy.
4829af47276SMatthew Wilcox (Oracle)  */
memcpy_to_folio(struct folio * folio,size_t offset,const char * from,size_t len)483b23d03efSMatthew Wilcox (Oracle) static inline void memcpy_to_folio(struct folio *folio, size_t offset,
484b23d03efSMatthew Wilcox (Oracle) 		const char *from, size_t len)
485b23d03efSMatthew Wilcox (Oracle) {
486b23d03efSMatthew Wilcox (Oracle) 	VM_BUG_ON(offset + len > folio_size(folio));
487b23d03efSMatthew Wilcox (Oracle) 
488b23d03efSMatthew Wilcox (Oracle) 	do {
489b23d03efSMatthew Wilcox (Oracle) 		char *to = kmap_local_folio(folio, offset);
490b23d03efSMatthew Wilcox (Oracle) 		size_t chunk = len;
491b23d03efSMatthew Wilcox (Oracle) 
492*97dfbbd1SMatthew Wilcox (Oracle) 		if (folio_test_partial_kmap(folio) &&
493b23d03efSMatthew Wilcox (Oracle) 		    chunk > PAGE_SIZE - offset_in_page(offset))
494b23d03efSMatthew Wilcox (Oracle) 			chunk = PAGE_SIZE - offset_in_page(offset);
495b23d03efSMatthew Wilcox (Oracle) 		memcpy(to, from, chunk);
496b23d03efSMatthew Wilcox (Oracle) 		kunmap_local(to);
497b23d03efSMatthew Wilcox (Oracle) 
498b23d03efSMatthew Wilcox (Oracle) 		from += chunk;
499b23d03efSMatthew Wilcox (Oracle) 		offset += chunk;
500b23d03efSMatthew Wilcox (Oracle) 		len -= chunk;
501b23d03efSMatthew Wilcox (Oracle) 	} while (len > 0);
502b23d03efSMatthew Wilcox (Oracle) 
503b23d03efSMatthew Wilcox (Oracle) 	flush_dcache_folio(folio);
504b23d03efSMatthew Wilcox (Oracle) }
505b23d03efSMatthew Wilcox (Oracle) 
506c0357139SMatthew Wilcox (Oracle) /**
507a4fc4a0cSMatthew Wilcox (Oracle)  * folio_zero_tail - Zero the tail of a folio.
508a4fc4a0cSMatthew Wilcox (Oracle)  * @folio: The folio to zero.
509a4fc4a0cSMatthew Wilcox (Oracle)  * @offset: The byte offset in the folio to start zeroing at.
510a4fc4a0cSMatthew Wilcox (Oracle)  * @kaddr: The address the folio is currently mapped to.
511a4fc4a0cSMatthew Wilcox (Oracle)  *
512a4fc4a0cSMatthew Wilcox (Oracle)  * If you have already used kmap_local_folio() to map a folio, written
513a4fc4a0cSMatthew Wilcox (Oracle)  * some data to it and now need to zero the end of the folio (and flush
514a4fc4a0cSMatthew Wilcox (Oracle)  * the dcache), you can use this function.  If you do not have the
515a4fc4a0cSMatthew Wilcox (Oracle)  * folio kmapped (eg the folio has been partially populated by DMA),
516a4fc4a0cSMatthew Wilcox (Oracle)  * use folio_zero_range() or folio_zero_segment() instead.
517a4fc4a0cSMatthew Wilcox (Oracle)  *
518a4fc4a0cSMatthew Wilcox (Oracle)  * Return: An address which can be passed to kunmap_local().
519a4fc4a0cSMatthew Wilcox (Oracle)  */
folio_zero_tail(struct folio * folio,size_t offset,void * kaddr)520a4fc4a0cSMatthew Wilcox (Oracle) static inline __must_check void *folio_zero_tail(struct folio *folio,
521a4fc4a0cSMatthew Wilcox (Oracle) 		size_t offset, void *kaddr)
522a4fc4a0cSMatthew Wilcox (Oracle) {
523a4fc4a0cSMatthew Wilcox (Oracle) 	size_t len = folio_size(folio) - offset;
524a4fc4a0cSMatthew Wilcox (Oracle) 
525*97dfbbd1SMatthew Wilcox (Oracle) 	if (folio_test_partial_kmap(folio)) {
526a4fc4a0cSMatthew Wilcox (Oracle) 		size_t max = PAGE_SIZE - offset_in_page(offset);
527a4fc4a0cSMatthew Wilcox (Oracle) 
528a4fc4a0cSMatthew Wilcox (Oracle) 		while (len > max) {
529a4fc4a0cSMatthew Wilcox (Oracle) 			memset(kaddr, 0, max);
530a4fc4a0cSMatthew Wilcox (Oracle) 			kunmap_local(kaddr);
531a4fc4a0cSMatthew Wilcox (Oracle) 			len -= max;
532a4fc4a0cSMatthew Wilcox (Oracle) 			offset += max;
533a4fc4a0cSMatthew Wilcox (Oracle) 			max = PAGE_SIZE;
534a4fc4a0cSMatthew Wilcox (Oracle) 			kaddr = kmap_local_folio(folio, offset);
535a4fc4a0cSMatthew Wilcox (Oracle) 		}
536a4fc4a0cSMatthew Wilcox (Oracle) 	}
537a4fc4a0cSMatthew Wilcox (Oracle) 
538a4fc4a0cSMatthew Wilcox (Oracle) 	memset(kaddr, 0, len);
539a4fc4a0cSMatthew Wilcox (Oracle) 	flush_dcache_folio(folio);
540a4fc4a0cSMatthew Wilcox (Oracle) 
541a4fc4a0cSMatthew Wilcox (Oracle) 	return kaddr;
542a4fc4a0cSMatthew Wilcox (Oracle) }
543a4fc4a0cSMatthew Wilcox (Oracle) 
544a4fc4a0cSMatthew Wilcox (Oracle) /**
5456eaa266bSMatthew Wilcox (Oracle)  * folio_fill_tail - Copy some data to a folio and pad with zeroes.
5466eaa266bSMatthew Wilcox (Oracle)  * @folio: The destination folio.
5476eaa266bSMatthew Wilcox (Oracle)  * @offset: The offset into @folio at which to start copying.
5486eaa266bSMatthew Wilcox (Oracle)  * @from: The data to copy.
5496eaa266bSMatthew Wilcox (Oracle)  * @len: How many bytes of data to copy.
5506eaa266bSMatthew Wilcox (Oracle)  *
5516eaa266bSMatthew Wilcox (Oracle)  * This function is most useful for filesystems which support inline data.
5526eaa266bSMatthew Wilcox (Oracle)  * When they want to copy data from the inode into the page cache, this
5536eaa266bSMatthew Wilcox (Oracle)  * function does everything for them.  It supports large folios even on
5546eaa266bSMatthew Wilcox (Oracle)  * HIGHMEM configurations.
5556eaa266bSMatthew Wilcox (Oracle)  */
folio_fill_tail(struct folio * folio,size_t offset,const char * from,size_t len)5566eaa266bSMatthew Wilcox (Oracle) static inline void folio_fill_tail(struct folio *folio, size_t offset,
5576eaa266bSMatthew Wilcox (Oracle) 		const char *from, size_t len)
5586eaa266bSMatthew Wilcox (Oracle) {
5596eaa266bSMatthew Wilcox (Oracle) 	char *to = kmap_local_folio(folio, offset);
5606eaa266bSMatthew Wilcox (Oracle) 
5616eaa266bSMatthew Wilcox (Oracle) 	VM_BUG_ON(offset + len > folio_size(folio));
5626eaa266bSMatthew Wilcox (Oracle) 
563*97dfbbd1SMatthew Wilcox (Oracle) 	if (folio_test_partial_kmap(folio)) {
5646eaa266bSMatthew Wilcox (Oracle) 		size_t max = PAGE_SIZE - offset_in_page(offset);
5656eaa266bSMatthew Wilcox (Oracle) 
5666eaa266bSMatthew Wilcox (Oracle) 		while (len > max) {
5676eaa266bSMatthew Wilcox (Oracle) 			memcpy(to, from, max);
5686eaa266bSMatthew Wilcox (Oracle) 			kunmap_local(to);
5696eaa266bSMatthew Wilcox (Oracle) 			len -= max;
5706eaa266bSMatthew Wilcox (Oracle) 			from += max;
5716eaa266bSMatthew Wilcox (Oracle) 			offset += max;
5726eaa266bSMatthew Wilcox (Oracle) 			max = PAGE_SIZE;
5736eaa266bSMatthew Wilcox (Oracle) 			to = kmap_local_folio(folio, offset);
5746eaa266bSMatthew Wilcox (Oracle) 		}
5756eaa266bSMatthew Wilcox (Oracle) 	}
5766eaa266bSMatthew Wilcox (Oracle) 
5776eaa266bSMatthew Wilcox (Oracle) 	memcpy(to, from, len);
5786eaa266bSMatthew Wilcox (Oracle) 	to = folio_zero_tail(folio, offset + len, to + len);
5796eaa266bSMatthew Wilcox (Oracle) 	kunmap_local(to);
5806eaa266bSMatthew Wilcox (Oracle) }
5816eaa266bSMatthew Wilcox (Oracle) 
5826eaa266bSMatthew Wilcox (Oracle) /**
58300cdf760SMatthew Wilcox (Oracle)  * memcpy_from_file_folio - Copy some bytes from a file folio.
58400cdf760SMatthew Wilcox (Oracle)  * @to: The destination buffer.
58500cdf760SMatthew Wilcox (Oracle)  * @folio: The folio to copy from.
58600cdf760SMatthew Wilcox (Oracle)  * @pos: The position in the file.
58700cdf760SMatthew Wilcox (Oracle)  * @len: The maximum number of bytes to copy.
58800cdf760SMatthew Wilcox (Oracle)  *
58900cdf760SMatthew Wilcox (Oracle)  * Copy up to @len bytes from this folio.  This may be limited by PAGE_SIZE
59000cdf760SMatthew Wilcox (Oracle)  * if the folio comes from HIGHMEM, and by the size of the folio.
59100cdf760SMatthew Wilcox (Oracle)  *
59200cdf760SMatthew Wilcox (Oracle)  * Return: The number of bytes copied from the folio.
59300cdf760SMatthew Wilcox (Oracle)  */
memcpy_from_file_folio(char * to,struct folio * folio,loff_t pos,size_t len)59400cdf760SMatthew Wilcox (Oracle) static inline size_t memcpy_from_file_folio(char *to, struct folio *folio,
59500cdf760SMatthew Wilcox (Oracle) 		loff_t pos, size_t len)
59600cdf760SMatthew Wilcox (Oracle) {
59700cdf760SMatthew Wilcox (Oracle) 	size_t offset = offset_in_folio(folio, pos);
59800cdf760SMatthew Wilcox (Oracle) 	char *from = kmap_local_folio(folio, offset);
59900cdf760SMatthew Wilcox (Oracle) 
600*97dfbbd1SMatthew Wilcox (Oracle) 	if (folio_test_partial_kmap(folio)) {
601c643e6ebSMatthew Wilcox (Oracle) 		offset = offset_in_page(offset);
60200cdf760SMatthew Wilcox (Oracle) 		len = min_t(size_t, len, PAGE_SIZE - offset);
603c643e6ebSMatthew Wilcox (Oracle) 	} else
60400cdf760SMatthew Wilcox (Oracle) 		len = min(len, folio_size(folio) - offset);
60500cdf760SMatthew Wilcox (Oracle) 
60600cdf760SMatthew Wilcox (Oracle) 	memcpy(to, from, len);
60700cdf760SMatthew Wilcox (Oracle) 	kunmap_local(from);
60800cdf760SMatthew Wilcox (Oracle) 
60900cdf760SMatthew Wilcox (Oracle) 	return len;
61000cdf760SMatthew Wilcox (Oracle) }
61100cdf760SMatthew Wilcox (Oracle) 
61200cdf760SMatthew Wilcox (Oracle) /**
613c0357139SMatthew Wilcox (Oracle)  * folio_zero_segments() - Zero two byte ranges in a folio.
614c0357139SMatthew Wilcox (Oracle)  * @folio: The folio to write to.
615c0357139SMatthew Wilcox (Oracle)  * @start1: The first byte to zero.
616c0357139SMatthew Wilcox (Oracle)  * @xend1: One more than the last byte in the first range.
617c0357139SMatthew Wilcox (Oracle)  * @start2: The first byte to zero in the second range.
618c0357139SMatthew Wilcox (Oracle)  * @xend2: One more than the last byte in the second range.
619c0357139SMatthew Wilcox (Oracle)  */
folio_zero_segments(struct folio * folio,size_t start1,size_t xend1,size_t start2,size_t xend2)620c0357139SMatthew Wilcox (Oracle) static inline void folio_zero_segments(struct folio *folio,
621c0357139SMatthew Wilcox (Oracle) 		size_t start1, size_t xend1, size_t start2, size_t xend2)
622c0357139SMatthew Wilcox (Oracle) {
623c0357139SMatthew Wilcox (Oracle) 	zero_user_segments(&folio->page, start1, xend1, start2, xend2);
624c0357139SMatthew Wilcox (Oracle) }
625c0357139SMatthew Wilcox (Oracle) 
626c0357139SMatthew Wilcox (Oracle) /**
627c0357139SMatthew Wilcox (Oracle)  * folio_zero_segment() - Zero a byte range in a folio.
628c0357139SMatthew Wilcox (Oracle)  * @folio: The folio to write to.
629c0357139SMatthew Wilcox (Oracle)  * @start: The first byte to zero.
630c0357139SMatthew Wilcox (Oracle)  * @xend: One more than the last byte to zero.
631c0357139SMatthew Wilcox (Oracle)  */
folio_zero_segment(struct folio * folio,size_t start,size_t xend)632c0357139SMatthew Wilcox (Oracle) static inline void folio_zero_segment(struct folio *folio,
633c0357139SMatthew Wilcox (Oracle) 		size_t start, size_t xend)
634c0357139SMatthew Wilcox (Oracle) {
635c0357139SMatthew Wilcox (Oracle) 	zero_user_segments(&folio->page, start, xend, 0, 0);
636c0357139SMatthew Wilcox (Oracle) }
637c0357139SMatthew Wilcox (Oracle) 
638c0357139SMatthew Wilcox (Oracle) /**
639c0357139SMatthew Wilcox (Oracle)  * folio_zero_range() - Zero a byte range in a folio.
640c0357139SMatthew Wilcox (Oracle)  * @folio: The folio to write to.
641c0357139SMatthew Wilcox (Oracle)  * @start: The first byte to zero.
642c0357139SMatthew Wilcox (Oracle)  * @length: The number of bytes to zero.
643c0357139SMatthew Wilcox (Oracle)  */
folio_zero_range(struct folio * folio,size_t start,size_t length)644c0357139SMatthew Wilcox (Oracle) static inline void folio_zero_range(struct folio *folio,
645c0357139SMatthew Wilcox (Oracle) 		size_t start, size_t length)
646c0357139SMatthew Wilcox (Oracle) {
647c0357139SMatthew Wilcox (Oracle) 	zero_user_segments(&folio->page, start, start + length, 0, 0);
648c0357139SMatthew Wilcox (Oracle) }
649c0357139SMatthew Wilcox (Oracle) 
6503de6047fSMatthew Wilcox (Oracle) /**
6513de6047fSMatthew Wilcox (Oracle)  * folio_release_kmap - Unmap a folio and drop a refcount.
6523de6047fSMatthew Wilcox (Oracle)  * @folio: The folio to release.
6533de6047fSMatthew Wilcox (Oracle)  * @addr: The address previously returned by a call to kmap_local_folio().
6543de6047fSMatthew Wilcox (Oracle)  *
6553de6047fSMatthew Wilcox (Oracle)  * It is common, eg in directory handling to kmap a folio.  This function
6563de6047fSMatthew Wilcox (Oracle)  * unmaps the folio and drops the refcount that was being held to keep the
6573de6047fSMatthew Wilcox (Oracle)  * folio alive while we accessed it.
6583de6047fSMatthew Wilcox (Oracle)  */
folio_release_kmap(struct folio * folio,void * addr)6593de6047fSMatthew Wilcox (Oracle) static inline void folio_release_kmap(struct folio *folio, void *addr)
660849ad04cSAl Viro {
661849ad04cSAl Viro 	kunmap_local(addr);
6623de6047fSMatthew Wilcox (Oracle) 	folio_put(folio);
6633de6047fSMatthew Wilcox (Oracle) }
6643de6047fSMatthew Wilcox (Oracle) 
unmap_and_put_page(struct page * page,void * addr)6653de6047fSMatthew Wilcox (Oracle) static inline void unmap_and_put_page(struct page *page, void *addr)
6663de6047fSMatthew Wilcox (Oracle) {
6673de6047fSMatthew Wilcox (Oracle) 	folio_release_kmap(page_folio(page), addr);
668849ad04cSAl Viro }
669849ad04cSAl Viro 
6701da177e4SLinus Torvalds #endif /* _LINUX_HIGHMEM_H */
671