1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HIGHMEM_INTERNAL_H
3 #define _LINUX_HIGHMEM_INTERNAL_H
4 
5 /*
6  * Outside of CONFIG_HIGHMEM to support X86 32bit iomap_atomic() cruft.
7  */
8 #ifdef CONFIG_KMAP_LOCAL
9 void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
10 void *__kmap_local_page_prot(struct page *page, pgprot_t prot);
11 void kunmap_local_indexed(void *vaddr);
12 void kmap_local_fork(struct task_struct *tsk);
13 void __kmap_local_sched_out(void);
14 void __kmap_local_sched_in(void);
15 static inline void kmap_assert_nomap(void)
16 {
17 	DEBUG_LOCKS_WARN_ON(current->kmap_ctrl.idx);
18 }
19 #else
20 static inline void kmap_local_fork(struct task_struct *tsk) { }
21 static inline void kmap_assert_nomap(void) { }
22 #endif
23 
24 #ifdef CONFIG_HIGHMEM
25 #include <asm/highmem.h>
26 
27 #ifndef ARCH_HAS_KMAP_FLUSH_TLB
28 static inline void kmap_flush_tlb(unsigned long addr) { }
29 #endif
30 
31 #ifndef kmap_prot
32 #define kmap_prot PAGE_KERNEL
33 #endif
34 
35 void *kmap_high(struct page *page);
36 void kunmap_high(struct page *page);
37 void __kmap_flush_unused(void);
38 struct page *__kmap_to_page(void *addr);
39 
40 static inline void *kmap(struct page *page)
41 {
42 	void *addr;
43 
44 	might_sleep();
45 	if (!PageHighMem(page))
46 		addr = page_address(page);
47 	else
48 		addr = kmap_high(page);
49 	kmap_flush_tlb((unsigned long)addr);
50 	return addr;
51 }
52 
53 static inline void kunmap(struct page *page)
54 {
55 	might_sleep();
56 	if (!PageHighMem(page))
57 		return;
58 	kunmap_high(page);
59 }
60 
61 static inline struct page *kmap_to_page(void *addr)
62 {
63 	return __kmap_to_page(addr);
64 }
65 
66 static inline void kmap_flush_unused(void)
67 {
68 	__kmap_flush_unused();
69 }
70 
71 static inline void *kmap_local_page(struct page *page)
72 {
73 	return __kmap_local_page_prot(page, kmap_prot);
74 }
75 
76 static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
77 {
78 	return __kmap_local_page_prot(page, prot);
79 }
80 
81 static inline void *kmap_local_pfn(unsigned long pfn)
82 {
83 	return __kmap_local_pfn_prot(pfn, kmap_prot);
84 }
85 
86 static inline void __kunmap_local(void *vaddr)
87 {
88 	kunmap_local_indexed(vaddr);
89 }
90 
91 static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
92 {
93 	preempt_disable();
94 	pagefault_disable();
95 	return __kmap_local_page_prot(page, prot);
96 }
97 
98 static inline void *kmap_atomic(struct page *page)
99 {
100 	return kmap_atomic_prot(page, kmap_prot);
101 }
102 
103 static inline void *kmap_atomic_pfn(unsigned long pfn)
104 {
105 	preempt_disable();
106 	pagefault_disable();
107 	return __kmap_local_pfn_prot(pfn, kmap_prot);
108 }
109 
110 static inline void __kunmap_atomic(void *addr)
111 {
112 	kunmap_local_indexed(addr);
113 	pagefault_enable();
114 	preempt_enable();
115 }
116 
117 unsigned int __nr_free_highpages(void);
118 extern atomic_long_t _totalhigh_pages;
119 
120 static inline unsigned int nr_free_highpages(void)
121 {
122 	return __nr_free_highpages();
123 }
124 
125 static inline unsigned long totalhigh_pages(void)
126 {
127 	return (unsigned long)atomic_long_read(&_totalhigh_pages);
128 }
129 
130 static inline void totalhigh_pages_add(long count)
131 {
132 	atomic_long_add(count, &_totalhigh_pages);
133 }
134 
135 #else /* CONFIG_HIGHMEM */
136 
137 static inline struct page *kmap_to_page(void *addr)
138 {
139 	return virt_to_page(addr);
140 }
141 
142 static inline void *kmap(struct page *page)
143 {
144 	might_sleep();
145 	return page_address(page);
146 }
147 
148 static inline void kunmap_high(struct page *page) { }
149 static inline void kmap_flush_unused(void) { }
150 
151 static inline void kunmap(struct page *page)
152 {
153 #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
154 	kunmap_flush_on_unmap(page_address(page));
155 #endif
156 }
157 
158 static inline void *kmap_local_page(struct page *page)
159 {
160 	return page_address(page);
161 }
162 
163 static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
164 {
165 	return kmap_local_page(page);
166 }
167 
168 static inline void *kmap_local_pfn(unsigned long pfn)
169 {
170 	return kmap_local_page(pfn_to_page(pfn));
171 }
172 
173 static inline void __kunmap_local(void *addr)
174 {
175 #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
176 	kunmap_flush_on_unmap(addr);
177 #endif
178 }
179 
180 static inline void *kmap_atomic(struct page *page)
181 {
182 	preempt_disable();
183 	pagefault_disable();
184 	return page_address(page);
185 }
186 
187 static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
188 {
189 	return kmap_atomic(page);
190 }
191 
192 static inline void *kmap_atomic_pfn(unsigned long pfn)
193 {
194 	return kmap_atomic(pfn_to_page(pfn));
195 }
196 
197 static inline void __kunmap_atomic(void *addr)
198 {
199 #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
200 	kunmap_flush_on_unmap(addr);
201 #endif
202 	pagefault_enable();
203 	preempt_enable();
204 }
205 
206 static inline unsigned int nr_free_highpages(void) { return 0; }
207 static inline unsigned long totalhigh_pages(void) { return 0UL; }
208 
209 #endif /* CONFIG_HIGHMEM */
210 
211 /*
212  * Prevent people trying to call kunmap_atomic() as if it were kunmap()
213  * kunmap_atomic() should get the return value of kmap_atomic, not the page.
214  */
215 #define kunmap_atomic(__addr)					\
216 do {								\
217 	BUILD_BUG_ON(__same_type((__addr), struct page *));	\
218 	__kunmap_atomic(__addr);				\
219 } while (0)
220 
221 #define kunmap_local(__addr)					\
222 do {								\
223 	BUILD_BUG_ON(__same_type((__addr), struct page *));	\
224 	__kunmap_local(__addr);					\
225 } while (0)
226 
227 #endif
228