113f876baSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0 */
213f876baSThomas Gleixner #ifndef _LINUX_HIGHMEM_INTERNAL_H
313f876baSThomas Gleixner #define _LINUX_HIGHMEM_INTERNAL_H
413f876baSThomas Gleixner 
513f876baSThomas Gleixner /*
613f876baSThomas Gleixner  * Outside of CONFIG_HIGHMEM to support X86 32bit iomap_atomic() cruft.
713f876baSThomas Gleixner  */
813f876baSThomas Gleixner #ifdef CONFIG_KMAP_LOCAL
913f876baSThomas Gleixner void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
1013f876baSThomas Gleixner void *__kmap_local_page_prot(struct page *page, pgprot_t prot);
1139ade048SFabio M. De Francesco void kunmap_local_indexed(const void *vaddr);
125fbda3ecSThomas Gleixner void kmap_local_fork(struct task_struct *tsk);
135fbda3ecSThomas Gleixner void __kmap_local_sched_out(void);
145fbda3ecSThomas Gleixner void __kmap_local_sched_in(void);
kmap_assert_nomap(void)155fbda3ecSThomas Gleixner static inline void kmap_assert_nomap(void)
165fbda3ecSThomas Gleixner {
175fbda3ecSThomas Gleixner 	DEBUG_LOCKS_WARN_ON(current->kmap_ctrl.idx);
185fbda3ecSThomas Gleixner }
195fbda3ecSThomas Gleixner #else
kmap_local_fork(struct task_struct * tsk)205fbda3ecSThomas Gleixner static inline void kmap_local_fork(struct task_struct *tsk) { }
kmap_assert_nomap(void)215fbda3ecSThomas Gleixner static inline void kmap_assert_nomap(void) { }
2213f876baSThomas Gleixner #endif
2313f876baSThomas Gleixner 
2413f876baSThomas Gleixner #ifdef CONFIG_HIGHMEM
2513f876baSThomas Gleixner #include <asm/highmem.h>
2613f876baSThomas Gleixner 
2713f876baSThomas Gleixner #ifndef ARCH_HAS_KMAP_FLUSH_TLB
kmap_flush_tlb(unsigned long addr)2813f876baSThomas Gleixner static inline void kmap_flush_tlb(unsigned long addr) { }
2913f876baSThomas Gleixner #endif
3013f876baSThomas Gleixner 
3113f876baSThomas Gleixner #ifndef kmap_prot
3213f876baSThomas Gleixner #define kmap_prot PAGE_KERNEL
3313f876baSThomas Gleixner #endif
3413f876baSThomas Gleixner 
3513f876baSThomas Gleixner void *kmap_high(struct page *page);
3613f876baSThomas Gleixner void kunmap_high(struct page *page);
3713f876baSThomas Gleixner void __kmap_flush_unused(void);
3813f876baSThomas Gleixner struct page *__kmap_to_page(void *addr);
3913f876baSThomas Gleixner 
kmap(struct page * page)4013f876baSThomas Gleixner static inline void *kmap(struct page *page)
4113f876baSThomas Gleixner {
4213f876baSThomas Gleixner 	void *addr;
4313f876baSThomas Gleixner 
4413f876baSThomas Gleixner 	might_sleep();
4513f876baSThomas Gleixner 	if (!PageHighMem(page))
4613f876baSThomas Gleixner 		addr = page_address(page);
4713f876baSThomas Gleixner 	else
4813f876baSThomas Gleixner 		addr = kmap_high(page);
4913f876baSThomas Gleixner 	kmap_flush_tlb((unsigned long)addr);
5013f876baSThomas Gleixner 	return addr;
5113f876baSThomas Gleixner }
5213f876baSThomas Gleixner 
kunmap(struct page * page)5313f876baSThomas Gleixner static inline void kunmap(struct page *page)
5413f876baSThomas Gleixner {
5513f876baSThomas Gleixner 	might_sleep();
5613f876baSThomas Gleixner 	if (!PageHighMem(page))
5713f876baSThomas Gleixner 		return;
5813f876baSThomas Gleixner 	kunmap_high(page);
5913f876baSThomas Gleixner }
6013f876baSThomas Gleixner 
kmap_to_page(void * addr)6113f876baSThomas Gleixner static inline struct page *kmap_to_page(void *addr)
6213f876baSThomas Gleixner {
6313f876baSThomas Gleixner 	return __kmap_to_page(addr);
6413f876baSThomas Gleixner }
6513f876baSThomas Gleixner 
kmap_flush_unused(void)6613f876baSThomas Gleixner static inline void kmap_flush_unused(void)
6713f876baSThomas Gleixner {
6813f876baSThomas Gleixner 	__kmap_flush_unused();
6913f876baSThomas Gleixner }
7013f876baSThomas Gleixner 
kmap_local_page(struct page * page)71f3ba3c71SThomas Gleixner static inline void *kmap_local_page(struct page *page)
72f3ba3c71SThomas Gleixner {
73f3ba3c71SThomas Gleixner 	return __kmap_local_page_prot(page, kmap_prot);
74f3ba3c71SThomas Gleixner }
75f3ba3c71SThomas Gleixner 
kmap_local_folio(struct folio * folio,size_t offset)7653c36de0SMatthew Wilcox (Oracle) static inline void *kmap_local_folio(struct folio *folio, size_t offset)
7753c36de0SMatthew Wilcox (Oracle) {
7853c36de0SMatthew Wilcox (Oracle) 	struct page *page = folio_page(folio, offset / PAGE_SIZE);
7953c36de0SMatthew Wilcox (Oracle) 	return __kmap_local_page_prot(page, kmap_prot) + offset % PAGE_SIZE;
8053c36de0SMatthew Wilcox (Oracle) }
8153c36de0SMatthew Wilcox (Oracle) 
kmap_local_page_prot(struct page * page,pgprot_t prot)82f3ba3c71SThomas Gleixner static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
83f3ba3c71SThomas Gleixner {
84f3ba3c71SThomas Gleixner 	return __kmap_local_page_prot(page, prot);
85f3ba3c71SThomas Gleixner }
86f3ba3c71SThomas Gleixner 
kmap_local_pfn(unsigned long pfn)87f3ba3c71SThomas Gleixner static inline void *kmap_local_pfn(unsigned long pfn)
88f3ba3c71SThomas Gleixner {
89f3ba3c71SThomas Gleixner 	return __kmap_local_pfn_prot(pfn, kmap_prot);
90f3ba3c71SThomas Gleixner }
91f3ba3c71SThomas Gleixner 
__kunmap_local(const void * vaddr)9239ade048SFabio M. De Francesco static inline void __kunmap_local(const void *vaddr)
93f3ba3c71SThomas Gleixner {
94f3ba3c71SThomas Gleixner 	kunmap_local_indexed(vaddr);
95f3ba3c71SThomas Gleixner }
96f3ba3c71SThomas Gleixner 
kmap_atomic_prot(struct page * page,pgprot_t prot)9713f876baSThomas Gleixner static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
9813f876baSThomas Gleixner {
9951386120SSebastian Andrzej Siewior 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
10051386120SSebastian Andrzej Siewior 		migrate_disable();
10151386120SSebastian Andrzej Siewior 	else
10213f876baSThomas Gleixner 		preempt_disable();
10351386120SSebastian Andrzej Siewior 
10413f876baSThomas Gleixner 	pagefault_disable();
10513f876baSThomas Gleixner 	return __kmap_local_page_prot(page, prot);
10613f876baSThomas Gleixner }
10713f876baSThomas Gleixner 
kmap_atomic(struct page * page)10813f876baSThomas Gleixner static inline void *kmap_atomic(struct page *page)
10913f876baSThomas Gleixner {
11013f876baSThomas Gleixner 	return kmap_atomic_prot(page, kmap_prot);
11113f876baSThomas Gleixner }
11213f876baSThomas Gleixner 
kmap_atomic_pfn(unsigned long pfn)11313f876baSThomas Gleixner static inline void *kmap_atomic_pfn(unsigned long pfn)
11413f876baSThomas Gleixner {
11551386120SSebastian Andrzej Siewior 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
11651386120SSebastian Andrzej Siewior 		migrate_disable();
11751386120SSebastian Andrzej Siewior 	else
11813f876baSThomas Gleixner 		preempt_disable();
11951386120SSebastian Andrzej Siewior 
12013f876baSThomas Gleixner 	pagefault_disable();
12113f876baSThomas Gleixner 	return __kmap_local_pfn_prot(pfn, kmap_prot);
12213f876baSThomas Gleixner }
12313f876baSThomas Gleixner 
__kunmap_atomic(const void * addr)12439ade048SFabio M. De Francesco static inline void __kunmap_atomic(const void *addr)
12513f876baSThomas Gleixner {
12613f876baSThomas Gleixner 	kunmap_local_indexed(addr);
12713f876baSThomas Gleixner 	pagefault_enable();
12851386120SSebastian Andrzej Siewior 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
12951386120SSebastian Andrzej Siewior 		migrate_enable();
13051386120SSebastian Andrzej Siewior 	else
13113f876baSThomas Gleixner 		preempt_enable();
13213f876baSThomas Gleixner }
13313f876baSThomas Gleixner 
134*90b8fab5SDavid Hildenbrand unsigned long __nr_free_highpages(void);
1357a581204SDavid Hildenbrand unsigned long __totalhigh_pages(void);
13613f876baSThomas Gleixner 
nr_free_highpages(void)137*90b8fab5SDavid Hildenbrand static inline unsigned long nr_free_highpages(void)
13813f876baSThomas Gleixner {
13913f876baSThomas Gleixner 	return __nr_free_highpages();
14013f876baSThomas Gleixner }
14113f876baSThomas Gleixner 
totalhigh_pages(void)14213f876baSThomas Gleixner static inline unsigned long totalhigh_pages(void)
14313f876baSThomas Gleixner {
1447a581204SDavid Hildenbrand 	return __totalhigh_pages();
14513f876baSThomas Gleixner }
14613f876baSThomas Gleixner 
is_kmap_addr(const void * x)1474e140f59SMatthew Wilcox (Oracle) static inline bool is_kmap_addr(const void *x)
1484e140f59SMatthew Wilcox (Oracle) {
1494e140f59SMatthew Wilcox (Oracle) 	unsigned long addr = (unsigned long)x;
150f3837ab7SIra Weiny 
151f3837ab7SIra Weiny 	return (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) ||
152f3837ab7SIra Weiny 		(addr >= __fix_to_virt(FIX_KMAP_END) &&
153f3837ab7SIra Weiny 		 addr < __fix_to_virt(FIX_KMAP_BEGIN));
1544e140f59SMatthew Wilcox (Oracle) }
15513f876baSThomas Gleixner #else /* CONFIG_HIGHMEM */
15613f876baSThomas Gleixner 
kmap_to_page(void * addr)15713f876baSThomas Gleixner static inline struct page *kmap_to_page(void *addr)
15813f876baSThomas Gleixner {
15913f876baSThomas Gleixner 	return virt_to_page(addr);
16013f876baSThomas Gleixner }
16113f876baSThomas Gleixner 
kmap(struct page * page)16213f876baSThomas Gleixner static inline void *kmap(struct page *page)
16313f876baSThomas Gleixner {
16413f876baSThomas Gleixner 	might_sleep();
16513f876baSThomas Gleixner 	return page_address(page);
16613f876baSThomas Gleixner }
16713f876baSThomas Gleixner 
kunmap_high(struct page * page)16813f876baSThomas Gleixner static inline void kunmap_high(struct page *page) { }
kmap_flush_unused(void)16913f876baSThomas Gleixner static inline void kmap_flush_unused(void) { }
17013f876baSThomas Gleixner 
kunmap(struct page * page)17113f876baSThomas Gleixner static inline void kunmap(struct page *page)
17213f876baSThomas Gleixner {
17313f876baSThomas Gleixner #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
17413f876baSThomas Gleixner 	kunmap_flush_on_unmap(page_address(page));
17513f876baSThomas Gleixner #endif
17613f876baSThomas Gleixner }
17713f876baSThomas Gleixner 
kmap_local_page(struct page * page)178f3ba3c71SThomas Gleixner static inline void *kmap_local_page(struct page *page)
179f3ba3c71SThomas Gleixner {
180f3ba3c71SThomas Gleixner 	return page_address(page);
181f3ba3c71SThomas Gleixner }
182f3ba3c71SThomas Gleixner 
kmap_local_folio(struct folio * folio,size_t offset)18353c36de0SMatthew Wilcox (Oracle) static inline void *kmap_local_folio(struct folio *folio, size_t offset)
18453c36de0SMatthew Wilcox (Oracle) {
18553c36de0SMatthew Wilcox (Oracle) 	return page_address(&folio->page) + offset;
18653c36de0SMatthew Wilcox (Oracle) }
18753c36de0SMatthew Wilcox (Oracle) 
kmap_local_page_prot(struct page * page,pgprot_t prot)188f3ba3c71SThomas Gleixner static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
189f3ba3c71SThomas Gleixner {
190f3ba3c71SThomas Gleixner 	return kmap_local_page(page);
191f3ba3c71SThomas Gleixner }
192f3ba3c71SThomas Gleixner 
kmap_local_pfn(unsigned long pfn)193f3ba3c71SThomas Gleixner static inline void *kmap_local_pfn(unsigned long pfn)
194f3ba3c71SThomas Gleixner {
195f3ba3c71SThomas Gleixner 	return kmap_local_page(pfn_to_page(pfn));
196f3ba3c71SThomas Gleixner }
197f3ba3c71SThomas Gleixner 
__kunmap_local(const void * addr)19839ade048SFabio M. De Francesco static inline void __kunmap_local(const void *addr)
199f3ba3c71SThomas Gleixner {
200f3ba3c71SThomas Gleixner #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
20188d7b120SMatthew Wilcox (Oracle) 	kunmap_flush_on_unmap(PTR_ALIGN_DOWN(addr, PAGE_SIZE));
202f3ba3c71SThomas Gleixner #endif
203f3ba3c71SThomas Gleixner }
204f3ba3c71SThomas Gleixner 
kmap_atomic(struct page * page)20513f876baSThomas Gleixner static inline void *kmap_atomic(struct page *page)
20613f876baSThomas Gleixner {
20751386120SSebastian Andrzej Siewior 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
20851386120SSebastian Andrzej Siewior 		migrate_disable();
20951386120SSebastian Andrzej Siewior 	else
21013f876baSThomas Gleixner 		preempt_disable();
21113f876baSThomas Gleixner 	pagefault_disable();
21213f876baSThomas Gleixner 	return page_address(page);
21313f876baSThomas Gleixner }
21413f876baSThomas Gleixner 
kmap_atomic_prot(struct page * page,pgprot_t prot)21513f876baSThomas Gleixner static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
21613f876baSThomas Gleixner {
21713f876baSThomas Gleixner 	return kmap_atomic(page);
21813f876baSThomas Gleixner }
21913f876baSThomas Gleixner 
kmap_atomic_pfn(unsigned long pfn)22013f876baSThomas Gleixner static inline void *kmap_atomic_pfn(unsigned long pfn)
22113f876baSThomas Gleixner {
22213f876baSThomas Gleixner 	return kmap_atomic(pfn_to_page(pfn));
22313f876baSThomas Gleixner }
22413f876baSThomas Gleixner 
__kunmap_atomic(const void * addr)22539ade048SFabio M. De Francesco static inline void __kunmap_atomic(const void *addr)
22613f876baSThomas Gleixner {
22713f876baSThomas Gleixner #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
22888d7b120SMatthew Wilcox (Oracle) 	kunmap_flush_on_unmap(PTR_ALIGN_DOWN(addr, PAGE_SIZE));
22913f876baSThomas Gleixner #endif
23013f876baSThomas Gleixner 	pagefault_enable();
23151386120SSebastian Andrzej Siewior 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
23251386120SSebastian Andrzej Siewior 		migrate_enable();
23351386120SSebastian Andrzej Siewior 	else
23413f876baSThomas Gleixner 		preempt_enable();
23513f876baSThomas Gleixner }
23613f876baSThomas Gleixner 
nr_free_highpages(void)237*90b8fab5SDavid Hildenbrand static inline unsigned long nr_free_highpages(void) { return 0; }
totalhigh_pages(void)238*90b8fab5SDavid Hildenbrand static inline unsigned long totalhigh_pages(void) { return 0; }
23913f876baSThomas Gleixner 
is_kmap_addr(const void * x)2404e140f59SMatthew Wilcox (Oracle) static inline bool is_kmap_addr(const void *x)
2414e140f59SMatthew Wilcox (Oracle) {
2424e140f59SMatthew Wilcox (Oracle) 	return false;
2434e140f59SMatthew Wilcox (Oracle) }
2444e140f59SMatthew Wilcox (Oracle) 
24513f876baSThomas Gleixner #endif /* CONFIG_HIGHMEM */
24613f876baSThomas Gleixner 
247e7392b4eSFabio M. De Francesco /**
248e7392b4eSFabio M. De Francesco  * kunmap_atomic - Unmap the virtual address mapped by kmap_atomic() - deprecated!
249e7392b4eSFabio M. De Francesco  * @__addr:       Virtual address to be unmapped
250e7392b4eSFabio M. De Francesco  *
251e7392b4eSFabio M. De Francesco  * Unmaps an address previously mapped by kmap_atomic() and re-enables
252e7392b4eSFabio M. De Francesco  * pagefaults. Depending on PREEMP_RT configuration, re-enables also
253e7392b4eSFabio M. De Francesco  * migration and preemption. Users should not count on these side effects.
254e7392b4eSFabio M. De Francesco  *
255e7392b4eSFabio M. De Francesco  * Mappings should be unmapped in the reverse order that they were mapped.
256e7392b4eSFabio M. De Francesco  * See kmap_local_page() for details on nesting.
257e7392b4eSFabio M. De Francesco  *
258e7392b4eSFabio M. De Francesco  * @__addr can be any address within the mapped page, so there is no need
259e7392b4eSFabio M. De Francesco  * to subtract any offset that has been added. In contrast to kunmap(),
260e7392b4eSFabio M. De Francesco  * this function takes the address returned from kmap_atomic(), not the
261e7392b4eSFabio M. De Francesco  * page passed to it. The compiler will warn you if you pass the page.
26213f876baSThomas Gleixner  */
26313f876baSThomas Gleixner #define kunmap_atomic(__addr)					\
26413f876baSThomas Gleixner do {								\
26513f876baSThomas Gleixner 	BUILD_BUG_ON(__same_type((__addr), struct page *));	\
26613f876baSThomas Gleixner 	__kunmap_atomic(__addr);				\
26713f876baSThomas Gleixner } while (0)
26813f876baSThomas Gleixner 
269d7ca25c5SIra Weiny /**
270d7ca25c5SIra Weiny  * kunmap_local - Unmap a page mapped via kmap_local_page().
271d7ca25c5SIra Weiny  * @__addr: An address within the page mapped
272d7ca25c5SIra Weiny  *
273d7ca25c5SIra Weiny  * @__addr can be any address within the mapped page.  Commonly it is the
274d7ca25c5SIra Weiny  * address return from kmap_local_page(), but it can also include offsets.
275d7ca25c5SIra Weiny  *
276d7ca25c5SIra Weiny  * Unmapping should be done in the reverse order of the mapping.  See
277d7ca25c5SIra Weiny  * kmap_local_page() for details.
278d7ca25c5SIra Weiny  */
279f3ba3c71SThomas Gleixner #define kunmap_local(__addr)					\
280f3ba3c71SThomas Gleixner do {								\
281f3ba3c71SThomas Gleixner 	BUILD_BUG_ON(__same_type((__addr), struct page *));	\
282f3ba3c71SThomas Gleixner 	__kunmap_local(__addr);					\
283f3ba3c71SThomas Gleixner } while (0)
284f3ba3c71SThomas Gleixner 
28513f876baSThomas Gleixner #endif
286