1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_SWAPOPS_H 3 #define _LINUX_SWAPOPS_H 4 5 #include <linux/radix-tree.h> 6 #include <linux/bug.h> 7 #include <linux/mm_types.h> 8 9 #ifdef CONFIG_MMU 10 11 /* 12 * swapcache pages are stored in the swapper_space radix tree. We want to 13 * get good packing density in that tree, so the index should be dense in 14 * the low-order bits. 15 * 16 * We arrange the `type' and `offset' fields so that `type' is at the seven 17 * high-order bits of the swp_entry_t and `offset' is right-aligned in the 18 * remaining bits. Although `type' itself needs only five bits, we allow for 19 * shmem/tmpfs to shift it all up a further two bits: see swp_to_radix_entry(). 20 * 21 * swp_entry_t's are *never* stored anywhere in their arch-dependent format. 22 */ 23 #define SWP_TYPE_SHIFT (BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT) 24 #define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1) 25 26 /* Clear all flags but only keep swp_entry_t related information */ 27 static inline pte_t pte_swp_clear_flags(pte_t pte) 28 { 29 if (pte_swp_exclusive(pte)) 30 pte = pte_swp_clear_exclusive(pte); 31 if (pte_swp_soft_dirty(pte)) 32 pte = pte_swp_clear_soft_dirty(pte); 33 if (pte_swp_uffd_wp(pte)) 34 pte = pte_swp_clear_uffd_wp(pte); 35 return pte; 36 } 37 38 /* 39 * Store a type+offset into a swp_entry_t in an arch-independent format 40 */ 41 static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset) 42 { 43 swp_entry_t ret; 44 45 ret.val = (type << SWP_TYPE_SHIFT) | (offset & SWP_OFFSET_MASK); 46 return ret; 47 } 48 49 /* 50 * Extract the `type' field from a swp_entry_t. The swp_entry_t is in 51 * arch-independent format 52 */ 53 static inline unsigned swp_type(swp_entry_t entry) 54 { 55 return (entry.val >> SWP_TYPE_SHIFT); 56 } 57 58 /* 59 * Extract the `offset' field from a swp_entry_t. The swp_entry_t is in 60 * arch-independent format 61 */ 62 static inline pgoff_t swp_offset(swp_entry_t entry) 63 { 64 return entry.val & SWP_OFFSET_MASK; 65 } 66 67 /* check whether a pte points to a swap entry */ 68 static inline int is_swap_pte(pte_t pte) 69 { 70 return !pte_none(pte) && !pte_present(pte); 71 } 72 73 /* 74 * Convert the arch-dependent pte representation of a swp_entry_t into an 75 * arch-independent swp_entry_t. 76 */ 77 static inline swp_entry_t pte_to_swp_entry(pte_t pte) 78 { 79 swp_entry_t arch_entry; 80 81 pte = pte_swp_clear_flags(pte); 82 arch_entry = __pte_to_swp_entry(pte); 83 return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); 84 } 85 86 /* 87 * Convert the arch-independent representation of a swp_entry_t into the 88 * arch-dependent pte representation. 89 */ 90 static inline pte_t swp_entry_to_pte(swp_entry_t entry) 91 { 92 swp_entry_t arch_entry; 93 94 arch_entry = __swp_entry(swp_type(entry), swp_offset(entry)); 95 return __swp_entry_to_pte(arch_entry); 96 } 97 98 static inline swp_entry_t radix_to_swp_entry(void *arg) 99 { 100 swp_entry_t entry; 101 102 entry.val = xa_to_value(arg); 103 return entry; 104 } 105 106 static inline void *swp_to_radix_entry(swp_entry_t entry) 107 { 108 return xa_mk_value(entry.val); 109 } 110 111 #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) 112 static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset) 113 { 114 return swp_entry(SWP_DEVICE_READ, offset); 115 } 116 117 static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset) 118 { 119 return swp_entry(SWP_DEVICE_WRITE, offset); 120 } 121 122 static inline bool is_device_private_entry(swp_entry_t entry) 123 { 124 int type = swp_type(entry); 125 return type == SWP_DEVICE_READ || type == SWP_DEVICE_WRITE; 126 } 127 128 static inline bool is_writable_device_private_entry(swp_entry_t entry) 129 { 130 return unlikely(swp_type(entry) == SWP_DEVICE_WRITE); 131 } 132 133 static inline swp_entry_t make_readable_device_exclusive_entry(pgoff_t offset) 134 { 135 return swp_entry(SWP_DEVICE_EXCLUSIVE_READ, offset); 136 } 137 138 static inline swp_entry_t make_writable_device_exclusive_entry(pgoff_t offset) 139 { 140 return swp_entry(SWP_DEVICE_EXCLUSIVE_WRITE, offset); 141 } 142 143 static inline bool is_device_exclusive_entry(swp_entry_t entry) 144 { 145 return swp_type(entry) == SWP_DEVICE_EXCLUSIVE_READ || 146 swp_type(entry) == SWP_DEVICE_EXCLUSIVE_WRITE; 147 } 148 149 static inline bool is_writable_device_exclusive_entry(swp_entry_t entry) 150 { 151 return unlikely(swp_type(entry) == SWP_DEVICE_EXCLUSIVE_WRITE); 152 } 153 #else /* CONFIG_DEVICE_PRIVATE */ 154 static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset) 155 { 156 return swp_entry(0, 0); 157 } 158 159 static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset) 160 { 161 return swp_entry(0, 0); 162 } 163 164 static inline bool is_device_private_entry(swp_entry_t entry) 165 { 166 return false; 167 } 168 169 static inline bool is_writable_device_private_entry(swp_entry_t entry) 170 { 171 return false; 172 } 173 174 static inline swp_entry_t make_readable_device_exclusive_entry(pgoff_t offset) 175 { 176 return swp_entry(0, 0); 177 } 178 179 static inline swp_entry_t make_writable_device_exclusive_entry(pgoff_t offset) 180 { 181 return swp_entry(0, 0); 182 } 183 184 static inline bool is_device_exclusive_entry(swp_entry_t entry) 185 { 186 return false; 187 } 188 189 static inline bool is_writable_device_exclusive_entry(swp_entry_t entry) 190 { 191 return false; 192 } 193 #endif /* CONFIG_DEVICE_PRIVATE */ 194 195 #ifdef CONFIG_MIGRATION 196 static inline int is_migration_entry(swp_entry_t entry) 197 { 198 return unlikely(swp_type(entry) == SWP_MIGRATION_READ || 199 swp_type(entry) == SWP_MIGRATION_READ_EXCLUSIVE || 200 swp_type(entry) == SWP_MIGRATION_WRITE); 201 } 202 203 static inline int is_writable_migration_entry(swp_entry_t entry) 204 { 205 return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE); 206 } 207 208 static inline int is_readable_migration_entry(swp_entry_t entry) 209 { 210 return unlikely(swp_type(entry) == SWP_MIGRATION_READ); 211 } 212 213 static inline int is_readable_exclusive_migration_entry(swp_entry_t entry) 214 { 215 return unlikely(swp_type(entry) == SWP_MIGRATION_READ_EXCLUSIVE); 216 } 217 218 static inline swp_entry_t make_readable_migration_entry(pgoff_t offset) 219 { 220 return swp_entry(SWP_MIGRATION_READ, offset); 221 } 222 223 static inline swp_entry_t make_readable_exclusive_migration_entry(pgoff_t offset) 224 { 225 return swp_entry(SWP_MIGRATION_READ_EXCLUSIVE, offset); 226 } 227 228 static inline swp_entry_t make_writable_migration_entry(pgoff_t offset) 229 { 230 return swp_entry(SWP_MIGRATION_WRITE, offset); 231 } 232 233 extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, 234 spinlock_t *ptl); 235 extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 236 unsigned long address); 237 extern void migration_entry_wait_huge(struct vm_area_struct *vma, 238 struct mm_struct *mm, pte_t *pte); 239 #else 240 static inline swp_entry_t make_readable_migration_entry(pgoff_t offset) 241 { 242 return swp_entry(0, 0); 243 } 244 245 static inline swp_entry_t make_readable_exclusive_migration_entry(pgoff_t offset) 246 { 247 return swp_entry(0, 0); 248 } 249 250 static inline swp_entry_t make_writable_migration_entry(pgoff_t offset) 251 { 252 return swp_entry(0, 0); 253 } 254 255 static inline int is_migration_entry(swp_entry_t swp) 256 { 257 return 0; 258 } 259 260 static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, 261 spinlock_t *ptl) { } 262 static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 263 unsigned long address) { } 264 static inline void migration_entry_wait_huge(struct vm_area_struct *vma, 265 struct mm_struct *mm, pte_t *pte) { } 266 static inline int is_writable_migration_entry(swp_entry_t entry) 267 { 268 return 0; 269 } 270 static inline int is_readable_migration_entry(swp_entry_t entry) 271 { 272 return 0; 273 } 274 275 #endif 276 277 static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry) 278 { 279 struct page *p = pfn_to_page(swp_offset(entry)); 280 281 /* 282 * Any use of migration entries may only occur while the 283 * corresponding page is locked 284 */ 285 BUG_ON(is_migration_entry(entry) && !PageLocked(p)); 286 287 return p; 288 } 289 290 /* 291 * A pfn swap entry is a special type of swap entry that always has a pfn stored 292 * in the swap offset. They are used to represent unaddressable device memory 293 * and to restrict access to a page undergoing migration. 294 */ 295 static inline bool is_pfn_swap_entry(swp_entry_t entry) 296 { 297 return is_migration_entry(entry) || is_device_private_entry(entry) || 298 is_device_exclusive_entry(entry); 299 } 300 301 struct page_vma_mapped_walk; 302 303 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 304 extern int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, 305 struct page *page); 306 307 extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, 308 struct page *new); 309 310 extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd); 311 312 static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd) 313 { 314 swp_entry_t arch_entry; 315 316 if (pmd_swp_soft_dirty(pmd)) 317 pmd = pmd_swp_clear_soft_dirty(pmd); 318 if (pmd_swp_uffd_wp(pmd)) 319 pmd = pmd_swp_clear_uffd_wp(pmd); 320 arch_entry = __pmd_to_swp_entry(pmd); 321 return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); 322 } 323 324 static inline pmd_t swp_entry_to_pmd(swp_entry_t entry) 325 { 326 swp_entry_t arch_entry; 327 328 arch_entry = __swp_entry(swp_type(entry), swp_offset(entry)); 329 return __swp_entry_to_pmd(arch_entry); 330 } 331 332 static inline int is_pmd_migration_entry(pmd_t pmd) 333 { 334 return !pmd_present(pmd) && is_migration_entry(pmd_to_swp_entry(pmd)); 335 } 336 #else 337 static inline int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, 338 struct page *page) 339 { 340 BUILD_BUG(); 341 } 342 343 static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, 344 struct page *new) 345 { 346 BUILD_BUG(); 347 } 348 349 static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { } 350 351 static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd) 352 { 353 return swp_entry(0, 0); 354 } 355 356 static inline pmd_t swp_entry_to_pmd(swp_entry_t entry) 357 { 358 return __pmd(0); 359 } 360 361 static inline int is_pmd_migration_entry(pmd_t pmd) 362 { 363 return 0; 364 } 365 #endif 366 367 #ifdef CONFIG_MEMORY_FAILURE 368 369 extern atomic_long_t num_poisoned_pages __read_mostly; 370 371 /* 372 * Support for hardware poisoned pages 373 */ 374 static inline swp_entry_t make_hwpoison_entry(struct page *page) 375 { 376 BUG_ON(!PageLocked(page)); 377 return swp_entry(SWP_HWPOISON, page_to_pfn(page)); 378 } 379 380 static inline int is_hwpoison_entry(swp_entry_t entry) 381 { 382 return swp_type(entry) == SWP_HWPOISON; 383 } 384 385 static inline unsigned long hwpoison_entry_to_pfn(swp_entry_t entry) 386 { 387 return swp_offset(entry); 388 } 389 390 static inline void num_poisoned_pages_inc(void) 391 { 392 atomic_long_inc(&num_poisoned_pages); 393 } 394 395 static inline void num_poisoned_pages_dec(void) 396 { 397 atomic_long_dec(&num_poisoned_pages); 398 } 399 400 #else 401 402 static inline swp_entry_t make_hwpoison_entry(struct page *page) 403 { 404 return swp_entry(0, 0); 405 } 406 407 static inline int is_hwpoison_entry(swp_entry_t swp) 408 { 409 return 0; 410 } 411 412 static inline void num_poisoned_pages_inc(void) 413 { 414 } 415 #endif 416 417 static inline int non_swap_entry(swp_entry_t entry) 418 { 419 return swp_type(entry) >= MAX_SWAPFILES; 420 } 421 422 #endif /* CONFIG_MMU */ 423 #endif /* _LINUX_SWAPOPS_H */ 424