xref: /linux-6.15/include/linux/swapops.h (revision 2aec85b2)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SWAPOPS_H
3 #define _LINUX_SWAPOPS_H
4 
5 #include <linux/radix-tree.h>
6 #include <linux/bug.h>
7 #include <linux/mm_types.h>
8 
9 #ifdef CONFIG_MMU
10 
11 /*
12  * swapcache pages are stored in the swapper_space radix tree.  We want to
13  * get good packing density in that tree, so the index should be dense in
14  * the low-order bits.
15  *
16  * We arrange the `type' and `offset' fields so that `type' is at the six
17  * high-order bits of the swp_entry_t and `offset' is right-aligned in the
18  * remaining bits.  Although `type' itself needs only five bits, we allow for
19  * shmem/tmpfs to shift it all up a further one bit: see swp_to_radix_entry().
20  *
21  * swp_entry_t's are *never* stored anywhere in their arch-dependent format.
22  */
23 #define SWP_TYPE_SHIFT	(BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT)
24 #define SWP_OFFSET_MASK	((1UL << SWP_TYPE_SHIFT) - 1)
25 
26 /* Clear all flags but only keep swp_entry_t related information */
27 static inline pte_t pte_swp_clear_flags(pte_t pte)
28 {
29 	if (pte_swp_exclusive(pte))
30 		pte = pte_swp_clear_exclusive(pte);
31 	if (pte_swp_soft_dirty(pte))
32 		pte = pte_swp_clear_soft_dirty(pte);
33 	if (pte_swp_uffd_wp(pte))
34 		pte = pte_swp_clear_uffd_wp(pte);
35 	return pte;
36 }
37 
38 /*
39  * Store a type+offset into a swp_entry_t in an arch-independent format
40  */
41 static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset)
42 {
43 	swp_entry_t ret;
44 
45 	ret.val = (type << SWP_TYPE_SHIFT) | (offset & SWP_OFFSET_MASK);
46 	return ret;
47 }
48 
49 /*
50  * Extract the `type' field from a swp_entry_t.  The swp_entry_t is in
51  * arch-independent format
52  */
53 static inline unsigned swp_type(swp_entry_t entry)
54 {
55 	return (entry.val >> SWP_TYPE_SHIFT);
56 }
57 
58 /*
59  * Extract the `offset' field from a swp_entry_t.  The swp_entry_t is in
60  * arch-independent format
61  */
62 static inline pgoff_t swp_offset(swp_entry_t entry)
63 {
64 	return entry.val & SWP_OFFSET_MASK;
65 }
66 
67 /* check whether a pte points to a swap entry */
68 static inline int is_swap_pte(pte_t pte)
69 {
70 	return !pte_none(pte) && !pte_present(pte);
71 }
72 
73 /*
74  * Convert the arch-dependent pte representation of a swp_entry_t into an
75  * arch-independent swp_entry_t.
76  */
77 static inline swp_entry_t pte_to_swp_entry(pte_t pte)
78 {
79 	swp_entry_t arch_entry;
80 
81 	pte = pte_swp_clear_flags(pte);
82 	arch_entry = __pte_to_swp_entry(pte);
83 	return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
84 }
85 
86 /*
87  * Convert the arch-independent representation of a swp_entry_t into the
88  * arch-dependent pte representation.
89  */
90 static inline pte_t swp_entry_to_pte(swp_entry_t entry)
91 {
92 	swp_entry_t arch_entry;
93 
94 	arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
95 	return __swp_entry_to_pte(arch_entry);
96 }
97 
98 static inline swp_entry_t radix_to_swp_entry(void *arg)
99 {
100 	swp_entry_t entry;
101 
102 	entry.val = xa_to_value(arg);
103 	return entry;
104 }
105 
106 static inline void *swp_to_radix_entry(swp_entry_t entry)
107 {
108 	return xa_mk_value(entry.val);
109 }
110 
111 static inline swp_entry_t make_swapin_error_entry(struct page *page)
112 {
113 	return swp_entry(SWP_SWAPIN_ERROR, page_to_pfn(page));
114 }
115 
116 static inline int is_swapin_error_entry(swp_entry_t entry)
117 {
118 	return swp_type(entry) == SWP_SWAPIN_ERROR;
119 }
120 
121 #if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
122 static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset)
123 {
124 	return swp_entry(SWP_DEVICE_READ, offset);
125 }
126 
127 static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset)
128 {
129 	return swp_entry(SWP_DEVICE_WRITE, offset);
130 }
131 
132 static inline bool is_device_private_entry(swp_entry_t entry)
133 {
134 	int type = swp_type(entry);
135 	return type == SWP_DEVICE_READ || type == SWP_DEVICE_WRITE;
136 }
137 
138 static inline bool is_writable_device_private_entry(swp_entry_t entry)
139 {
140 	return unlikely(swp_type(entry) == SWP_DEVICE_WRITE);
141 }
142 
143 static inline swp_entry_t make_readable_device_exclusive_entry(pgoff_t offset)
144 {
145 	return swp_entry(SWP_DEVICE_EXCLUSIVE_READ, offset);
146 }
147 
148 static inline swp_entry_t make_writable_device_exclusive_entry(pgoff_t offset)
149 {
150 	return swp_entry(SWP_DEVICE_EXCLUSIVE_WRITE, offset);
151 }
152 
153 static inline bool is_device_exclusive_entry(swp_entry_t entry)
154 {
155 	return swp_type(entry) == SWP_DEVICE_EXCLUSIVE_READ ||
156 		swp_type(entry) == SWP_DEVICE_EXCLUSIVE_WRITE;
157 }
158 
159 static inline bool is_writable_device_exclusive_entry(swp_entry_t entry)
160 {
161 	return unlikely(swp_type(entry) == SWP_DEVICE_EXCLUSIVE_WRITE);
162 }
163 #else /* CONFIG_DEVICE_PRIVATE */
164 static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset)
165 {
166 	return swp_entry(0, 0);
167 }
168 
169 static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset)
170 {
171 	return swp_entry(0, 0);
172 }
173 
174 static inline bool is_device_private_entry(swp_entry_t entry)
175 {
176 	return false;
177 }
178 
179 static inline bool is_writable_device_private_entry(swp_entry_t entry)
180 {
181 	return false;
182 }
183 
184 static inline swp_entry_t make_readable_device_exclusive_entry(pgoff_t offset)
185 {
186 	return swp_entry(0, 0);
187 }
188 
189 static inline swp_entry_t make_writable_device_exclusive_entry(pgoff_t offset)
190 {
191 	return swp_entry(0, 0);
192 }
193 
194 static inline bool is_device_exclusive_entry(swp_entry_t entry)
195 {
196 	return false;
197 }
198 
199 static inline bool is_writable_device_exclusive_entry(swp_entry_t entry)
200 {
201 	return false;
202 }
203 #endif /* CONFIG_DEVICE_PRIVATE */
204 
205 #ifdef CONFIG_MIGRATION
206 static inline int is_migration_entry(swp_entry_t entry)
207 {
208 	return unlikely(swp_type(entry) == SWP_MIGRATION_READ ||
209 			swp_type(entry) == SWP_MIGRATION_READ_EXCLUSIVE ||
210 			swp_type(entry) == SWP_MIGRATION_WRITE);
211 }
212 
213 static inline int is_writable_migration_entry(swp_entry_t entry)
214 {
215 	return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE);
216 }
217 
218 static inline int is_readable_migration_entry(swp_entry_t entry)
219 {
220 	return unlikely(swp_type(entry) == SWP_MIGRATION_READ);
221 }
222 
223 static inline int is_readable_exclusive_migration_entry(swp_entry_t entry)
224 {
225 	return unlikely(swp_type(entry) == SWP_MIGRATION_READ_EXCLUSIVE);
226 }
227 
228 static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
229 {
230 	return swp_entry(SWP_MIGRATION_READ, offset);
231 }
232 
233 static inline swp_entry_t make_readable_exclusive_migration_entry(pgoff_t offset)
234 {
235 	return swp_entry(SWP_MIGRATION_READ_EXCLUSIVE, offset);
236 }
237 
238 static inline swp_entry_t make_writable_migration_entry(pgoff_t offset)
239 {
240 	return swp_entry(SWP_MIGRATION_WRITE, offset);
241 }
242 
243 extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
244 					spinlock_t *ptl);
245 extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
246 					unsigned long address);
247 extern void migration_entry_wait_huge(struct vm_area_struct *vma,
248 		struct mm_struct *mm, pte_t *pte);
249 #else
250 static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
251 {
252 	return swp_entry(0, 0);
253 }
254 
255 static inline swp_entry_t make_readable_exclusive_migration_entry(pgoff_t offset)
256 {
257 	return swp_entry(0, 0);
258 }
259 
260 static inline swp_entry_t make_writable_migration_entry(pgoff_t offset)
261 {
262 	return swp_entry(0, 0);
263 }
264 
265 static inline int is_migration_entry(swp_entry_t swp)
266 {
267 	return 0;
268 }
269 
270 static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
271 					spinlock_t *ptl) { }
272 static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
273 					 unsigned long address) { }
274 static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
275 		struct mm_struct *mm, pte_t *pte) { }
276 static inline int is_writable_migration_entry(swp_entry_t entry)
277 {
278 	return 0;
279 }
280 static inline int is_readable_migration_entry(swp_entry_t entry)
281 {
282 	return 0;
283 }
284 
285 #endif
286 
287 typedef unsigned long pte_marker;
288 
289 #define  PTE_MARKER_UFFD_WP  BIT(0)
290 #define  PTE_MARKER_MASK     (PTE_MARKER_UFFD_WP)
291 
292 #ifdef CONFIG_PTE_MARKER
293 
294 static inline swp_entry_t make_pte_marker_entry(pte_marker marker)
295 {
296 	return swp_entry(SWP_PTE_MARKER, marker);
297 }
298 
299 static inline bool is_pte_marker_entry(swp_entry_t entry)
300 {
301 	return swp_type(entry) == SWP_PTE_MARKER;
302 }
303 
304 static inline pte_marker pte_marker_get(swp_entry_t entry)
305 {
306 	return swp_offset(entry) & PTE_MARKER_MASK;
307 }
308 
309 static inline bool is_pte_marker(pte_t pte)
310 {
311 	return is_swap_pte(pte) && is_pte_marker_entry(pte_to_swp_entry(pte));
312 }
313 
314 #else /* CONFIG_PTE_MARKER */
315 
316 static inline swp_entry_t make_pte_marker_entry(pte_marker marker)
317 {
318 	/* This should never be called if !CONFIG_PTE_MARKER */
319 	WARN_ON_ONCE(1);
320 	return swp_entry(0, 0);
321 }
322 
323 static inline bool is_pte_marker_entry(swp_entry_t entry)
324 {
325 	return false;
326 }
327 
328 static inline pte_marker pte_marker_get(swp_entry_t entry)
329 {
330 	return 0;
331 }
332 
333 static inline bool is_pte_marker(pte_t pte)
334 {
335 	return false;
336 }
337 
338 #endif /* CONFIG_PTE_MARKER */
339 
340 static inline pte_t make_pte_marker(pte_marker marker)
341 {
342 	return swp_entry_to_pte(make_pte_marker_entry(marker));
343 }
344 
345 /*
346  * This is a special version to check pte_none() just to cover the case when
347  * the pte is a pte marker.  It existed because in many cases the pte marker
348  * should be seen as a none pte; it's just that we have stored some information
349  * onto the none pte so it becomes not-none any more.
350  *
351  * It should be used when the pte is file-backed, ram-based and backing
352  * userspace pages, like shmem.  It is not needed upon pgtables that do not
353  * support pte markers at all.  For example, it's not needed on anonymous
354  * memory, kernel-only memory (including when the system is during-boot),
355  * non-ram based generic file-system.  It's fine to be used even there, but the
356  * extra pte marker check will be pure overhead.
357  *
358  * For systems configured with !CONFIG_PTE_MARKER this will be automatically
359  * optimized to pte_none().
360  */
361 static inline int pte_none_mostly(pte_t pte)
362 {
363 	return pte_none(pte) || is_pte_marker(pte);
364 }
365 
366 static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry)
367 {
368 	struct page *p = pfn_to_page(swp_offset(entry));
369 
370 	/*
371 	 * Any use of migration entries may only occur while the
372 	 * corresponding page is locked
373 	 */
374 	BUG_ON(is_migration_entry(entry) && !PageLocked(p));
375 
376 	return p;
377 }
378 
379 /*
380  * A pfn swap entry is a special type of swap entry that always has a pfn stored
381  * in the swap offset. They are used to represent unaddressable device memory
382  * and to restrict access to a page undergoing migration.
383  */
384 static inline bool is_pfn_swap_entry(swp_entry_t entry)
385 {
386 	return is_migration_entry(entry) || is_device_private_entry(entry) ||
387 	       is_device_exclusive_entry(entry);
388 }
389 
390 struct page_vma_mapped_walk;
391 
392 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
393 extern int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
394 		struct page *page);
395 
396 extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
397 		struct page *new);
398 
399 extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd);
400 
401 static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
402 {
403 	swp_entry_t arch_entry;
404 
405 	if (pmd_swp_soft_dirty(pmd))
406 		pmd = pmd_swp_clear_soft_dirty(pmd);
407 	if (pmd_swp_uffd_wp(pmd))
408 		pmd = pmd_swp_clear_uffd_wp(pmd);
409 	arch_entry = __pmd_to_swp_entry(pmd);
410 	return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
411 }
412 
413 static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
414 {
415 	swp_entry_t arch_entry;
416 
417 	arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
418 	return __swp_entry_to_pmd(arch_entry);
419 }
420 
421 static inline int is_pmd_migration_entry(pmd_t pmd)
422 {
423 	return is_swap_pmd(pmd) && is_migration_entry(pmd_to_swp_entry(pmd));
424 }
425 #else
426 static inline int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
427 		struct page *page)
428 {
429 	BUILD_BUG();
430 }
431 
432 static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
433 		struct page *new)
434 {
435 	BUILD_BUG();
436 }
437 
438 static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { }
439 
440 static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
441 {
442 	return swp_entry(0, 0);
443 }
444 
445 static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
446 {
447 	return __pmd(0);
448 }
449 
450 static inline int is_pmd_migration_entry(pmd_t pmd)
451 {
452 	return 0;
453 }
454 #endif
455 
456 #ifdef CONFIG_MEMORY_FAILURE
457 
458 extern atomic_long_t num_poisoned_pages __read_mostly;
459 
460 /*
461  * Support for hardware poisoned pages
462  */
463 static inline swp_entry_t make_hwpoison_entry(struct page *page)
464 {
465 	BUG_ON(!PageLocked(page));
466 	return swp_entry(SWP_HWPOISON, page_to_pfn(page));
467 }
468 
469 static inline int is_hwpoison_entry(swp_entry_t entry)
470 {
471 	return swp_type(entry) == SWP_HWPOISON;
472 }
473 
474 static inline unsigned long hwpoison_entry_to_pfn(swp_entry_t entry)
475 {
476 	return swp_offset(entry);
477 }
478 
479 static inline void num_poisoned_pages_inc(void)
480 {
481 	atomic_long_inc(&num_poisoned_pages);
482 }
483 
484 static inline void num_poisoned_pages_dec(void)
485 {
486 	atomic_long_dec(&num_poisoned_pages);
487 }
488 
489 #else
490 
491 static inline swp_entry_t make_hwpoison_entry(struct page *page)
492 {
493 	return swp_entry(0, 0);
494 }
495 
496 static inline int is_hwpoison_entry(swp_entry_t swp)
497 {
498 	return 0;
499 }
500 
501 static inline void num_poisoned_pages_inc(void)
502 {
503 }
504 #endif
505 
506 static inline int non_swap_entry(swp_entry_t entry)
507 {
508 	return swp_type(entry) >= MAX_SWAPFILES;
509 }
510 
511 #endif /* CONFIG_MMU */
512 #endif /* _LINUX_SWAPOPS_H */
513