1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HIGHMEM_INTERNAL_H
3 #define _LINUX_HIGHMEM_INTERNAL_H
4 
5 /*
6  * Outside of CONFIG_HIGHMEM to support X86 32bit iomap_atomic() cruft.
7  */
8 #ifdef CONFIG_KMAP_LOCAL
9 void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
10 void *__kmap_local_page_prot(struct page *page, pgprot_t prot);
11 void kunmap_local_indexed(void *vaddr);
12 void kmap_local_fork(struct task_struct *tsk);
13 void __kmap_local_sched_out(void);
14 void __kmap_local_sched_in(void);
15 static inline void kmap_assert_nomap(void)
16 {
17 	DEBUG_LOCKS_WARN_ON(current->kmap_ctrl.idx);
18 }
19 #else
20 static inline void kmap_local_fork(struct task_struct *tsk) { }
21 static inline void kmap_assert_nomap(void) { }
22 #endif
23 
24 #ifdef CONFIG_HIGHMEM
25 #include <asm/highmem.h>
26 
27 #ifndef ARCH_HAS_KMAP_FLUSH_TLB
28 static inline void kmap_flush_tlb(unsigned long addr) { }
29 #endif
30 
31 #ifndef kmap_prot
32 #define kmap_prot PAGE_KERNEL
33 #endif
34 
35 void *kmap_high(struct page *page);
36 void kunmap_high(struct page *page);
37 void __kmap_flush_unused(void);
38 struct page *__kmap_to_page(void *addr);
39 
40 static inline void *kmap(struct page *page)
41 {
42 	void *addr;
43 
44 	might_sleep();
45 	if (!PageHighMem(page))
46 		addr = page_address(page);
47 	else
48 		addr = kmap_high(page);
49 	kmap_flush_tlb((unsigned long)addr);
50 	return addr;
51 }
52 
53 static inline void kunmap(struct page *page)
54 {
55 	might_sleep();
56 	if (!PageHighMem(page))
57 		return;
58 	kunmap_high(page);
59 }
60 
61 static inline struct page *kmap_to_page(void *addr)
62 {
63 	return __kmap_to_page(addr);
64 }
65 
66 static inline void kmap_flush_unused(void)
67 {
68 	__kmap_flush_unused();
69 }
70 
71 static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
72 {
73 	preempt_disable();
74 	pagefault_disable();
75 	return __kmap_local_page_prot(page, prot);
76 }
77 
78 static inline void *kmap_atomic(struct page *page)
79 {
80 	return kmap_atomic_prot(page, kmap_prot);
81 }
82 
83 static inline void *kmap_atomic_pfn(unsigned long pfn)
84 {
85 	preempt_disable();
86 	pagefault_disable();
87 	return __kmap_local_pfn_prot(pfn, kmap_prot);
88 }
89 
90 static inline void __kunmap_atomic(void *addr)
91 {
92 	kunmap_local_indexed(addr);
93 	pagefault_enable();
94 	preempt_enable();
95 }
96 
97 unsigned int __nr_free_highpages(void);
98 extern atomic_long_t _totalhigh_pages;
99 
100 static inline unsigned int nr_free_highpages(void)
101 {
102 	return __nr_free_highpages();
103 }
104 
105 static inline unsigned long totalhigh_pages(void)
106 {
107 	return (unsigned long)atomic_long_read(&_totalhigh_pages);
108 }
109 
110 static inline void totalhigh_pages_inc(void)
111 {
112 	atomic_long_inc(&_totalhigh_pages);
113 }
114 
115 static inline void totalhigh_pages_add(long count)
116 {
117 	atomic_long_add(count, &_totalhigh_pages);
118 }
119 
120 #else /* CONFIG_HIGHMEM */
121 
122 static inline struct page *kmap_to_page(void *addr)
123 {
124 	return virt_to_page(addr);
125 }
126 
127 static inline void *kmap(struct page *page)
128 {
129 	might_sleep();
130 	return page_address(page);
131 }
132 
133 static inline void kunmap_high(struct page *page) { }
134 static inline void kmap_flush_unused(void) { }
135 
136 static inline void kunmap(struct page *page)
137 {
138 #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
139 	kunmap_flush_on_unmap(page_address(page));
140 #endif
141 }
142 
143 static inline void *kmap_atomic(struct page *page)
144 {
145 	preempt_disable();
146 	pagefault_disable();
147 	return page_address(page);
148 }
149 
150 static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
151 {
152 	return kmap_atomic(page);
153 }
154 
155 static inline void *kmap_atomic_pfn(unsigned long pfn)
156 {
157 	return kmap_atomic(pfn_to_page(pfn));
158 }
159 
160 static inline void __kunmap_atomic(void *addr)
161 {
162 #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
163 	kunmap_flush_on_unmap(addr);
164 #endif
165 	pagefault_enable();
166 	preempt_enable();
167 }
168 
169 static inline unsigned int nr_free_highpages(void) { return 0; }
170 static inline unsigned long totalhigh_pages(void) { return 0UL; }
171 
172 #endif /* CONFIG_HIGHMEM */
173 
174 /*
175  * Prevent people trying to call kunmap_atomic() as if it were kunmap()
176  * kunmap_atomic() should get the return value of kmap_atomic, not the page.
177  */
178 #define kunmap_atomic(__addr)					\
179 do {								\
180 	BUILD_BUG_ON(__same_type((__addr), struct page *));	\
181 	__kunmap_atomic(__addr);				\
182 } while (0)
183 
184 #endif
185