1 #ifndef _LINUX_SWAPOPS_H 2 #define _LINUX_SWAPOPS_H 3 4 #include <linux/radix-tree.h> 5 #include <linux/bug.h> 6 7 /* 8 * swapcache pages are stored in the swapper_space radix tree. We want to 9 * get good packing density in that tree, so the index should be dense in 10 * the low-order bits. 11 * 12 * We arrange the `type' and `offset' fields so that `type' is at the seven 13 * high-order bits of the swp_entry_t and `offset' is right-aligned in the 14 * remaining bits. Although `type' itself needs only five bits, we allow for 15 * shmem/tmpfs to shift it all up a further two bits: see swp_to_radix_entry(). 16 * 17 * swp_entry_t's are *never* stored anywhere in their arch-dependent format. 18 */ 19 #define SWP_TYPE_SHIFT(e) ((sizeof(e.val) * 8) - \ 20 (MAX_SWAPFILES_SHIFT + RADIX_TREE_EXCEPTIONAL_SHIFT)) 21 #define SWP_OFFSET_MASK(e) ((1UL << SWP_TYPE_SHIFT(e)) - 1) 22 23 /* 24 * Store a type+offset into a swp_entry_t in an arch-independent format 25 */ 26 static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset) 27 { 28 swp_entry_t ret; 29 30 ret.val = (type << SWP_TYPE_SHIFT(ret)) | 31 (offset & SWP_OFFSET_MASK(ret)); 32 return ret; 33 } 34 35 /* 36 * Extract the `type' field from a swp_entry_t. The swp_entry_t is in 37 * arch-independent format 38 */ 39 static inline unsigned swp_type(swp_entry_t entry) 40 { 41 return (entry.val >> SWP_TYPE_SHIFT(entry)); 42 } 43 44 /* 45 * Extract the `offset' field from a swp_entry_t. The swp_entry_t is in 46 * arch-independent format 47 */ 48 static inline pgoff_t swp_offset(swp_entry_t entry) 49 { 50 return entry.val & SWP_OFFSET_MASK(entry); 51 } 52 53 #ifdef CONFIG_MMU 54 /* check whether a pte points to a swap entry */ 55 static inline int is_swap_pte(pte_t pte) 56 { 57 return !pte_none(pte) && !pte_present(pte); 58 } 59 #endif 60 61 /* 62 * Convert the arch-dependent pte representation of a swp_entry_t into an 63 * arch-independent swp_entry_t. 64 */ 65 static inline swp_entry_t pte_to_swp_entry(pte_t pte) 66 { 67 swp_entry_t arch_entry; 68 69 if (pte_swp_soft_dirty(pte)) 70 pte = pte_swp_clear_soft_dirty(pte); 71 arch_entry = __pte_to_swp_entry(pte); 72 return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); 73 } 74 75 /* 76 * Convert the arch-independent representation of a swp_entry_t into the 77 * arch-dependent pte representation. 78 */ 79 static inline pte_t swp_entry_to_pte(swp_entry_t entry) 80 { 81 swp_entry_t arch_entry; 82 83 arch_entry = __swp_entry(swp_type(entry), swp_offset(entry)); 84 return __swp_entry_to_pte(arch_entry); 85 } 86 87 static inline swp_entry_t radix_to_swp_entry(void *arg) 88 { 89 swp_entry_t entry; 90 91 entry.val = (unsigned long)arg >> RADIX_TREE_EXCEPTIONAL_SHIFT; 92 return entry; 93 } 94 95 static inline void *swp_to_radix_entry(swp_entry_t entry) 96 { 97 unsigned long value; 98 99 value = entry.val << RADIX_TREE_EXCEPTIONAL_SHIFT; 100 return (void *)(value | RADIX_TREE_EXCEPTIONAL_ENTRY); 101 } 102 103 #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) 104 static inline swp_entry_t make_device_private_entry(struct page *page, bool write) 105 { 106 return swp_entry(write ? SWP_DEVICE_WRITE : SWP_DEVICE_READ, 107 page_to_pfn(page)); 108 } 109 110 static inline bool is_device_private_entry(swp_entry_t entry) 111 { 112 int type = swp_type(entry); 113 return type == SWP_DEVICE_READ || type == SWP_DEVICE_WRITE; 114 } 115 116 static inline void make_device_private_entry_read(swp_entry_t *entry) 117 { 118 *entry = swp_entry(SWP_DEVICE_READ, swp_offset(*entry)); 119 } 120 121 static inline bool is_write_device_private_entry(swp_entry_t entry) 122 { 123 return unlikely(swp_type(entry) == SWP_DEVICE_WRITE); 124 } 125 126 static inline struct page *device_private_entry_to_page(swp_entry_t entry) 127 { 128 return pfn_to_page(swp_offset(entry)); 129 } 130 131 int device_private_entry_fault(struct vm_area_struct *vma, 132 unsigned long addr, 133 swp_entry_t entry, 134 unsigned int flags, 135 pmd_t *pmdp); 136 #else /* CONFIG_DEVICE_PRIVATE */ 137 static inline swp_entry_t make_device_private_entry(struct page *page, bool write) 138 { 139 return swp_entry(0, 0); 140 } 141 142 static inline void make_device_private_entry_read(swp_entry_t *entry) 143 { 144 } 145 146 static inline bool is_device_private_entry(swp_entry_t entry) 147 { 148 return false; 149 } 150 151 static inline bool is_write_device_private_entry(swp_entry_t entry) 152 { 153 return false; 154 } 155 156 static inline struct page *device_private_entry_to_page(swp_entry_t entry) 157 { 158 return NULL; 159 } 160 161 static inline int device_private_entry_fault(struct vm_area_struct *vma, 162 unsigned long addr, 163 swp_entry_t entry, 164 unsigned int flags, 165 pmd_t *pmdp) 166 { 167 return VM_FAULT_SIGBUS; 168 } 169 #endif /* CONFIG_DEVICE_PRIVATE */ 170 171 #ifdef CONFIG_MIGRATION 172 static inline swp_entry_t make_migration_entry(struct page *page, int write) 173 { 174 BUG_ON(!PageLocked(compound_head(page))); 175 176 return swp_entry(write ? SWP_MIGRATION_WRITE : SWP_MIGRATION_READ, 177 page_to_pfn(page)); 178 } 179 180 static inline int is_migration_entry(swp_entry_t entry) 181 { 182 return unlikely(swp_type(entry) == SWP_MIGRATION_READ || 183 swp_type(entry) == SWP_MIGRATION_WRITE); 184 } 185 186 static inline int is_write_migration_entry(swp_entry_t entry) 187 { 188 return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE); 189 } 190 191 static inline struct page *migration_entry_to_page(swp_entry_t entry) 192 { 193 struct page *p = pfn_to_page(swp_offset(entry)); 194 /* 195 * Any use of migration entries may only occur while the 196 * corresponding page is locked 197 */ 198 BUG_ON(!PageLocked(compound_head(p))); 199 return p; 200 } 201 202 static inline void make_migration_entry_read(swp_entry_t *entry) 203 { 204 *entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry)); 205 } 206 207 extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, 208 spinlock_t *ptl); 209 extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 210 unsigned long address); 211 extern void migration_entry_wait_huge(struct vm_area_struct *vma, 212 struct mm_struct *mm, pte_t *pte); 213 #else 214 215 #define make_migration_entry(page, write) swp_entry(0, 0) 216 static inline int is_migration_entry(swp_entry_t swp) 217 { 218 return 0; 219 } 220 static inline struct page *migration_entry_to_page(swp_entry_t entry) 221 { 222 return NULL; 223 } 224 225 static inline void make_migration_entry_read(swp_entry_t *entryp) { } 226 static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, 227 spinlock_t *ptl) { } 228 static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 229 unsigned long address) { } 230 static inline void migration_entry_wait_huge(struct vm_area_struct *vma, 231 struct mm_struct *mm, pte_t *pte) { } 232 static inline int is_write_migration_entry(swp_entry_t entry) 233 { 234 return 0; 235 } 236 237 #endif 238 239 struct page_vma_mapped_walk; 240 241 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 242 extern void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, 243 struct page *page); 244 245 extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, 246 struct page *new); 247 248 extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd); 249 250 static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd) 251 { 252 swp_entry_t arch_entry; 253 254 if (pmd_swp_soft_dirty(pmd)) 255 pmd = pmd_swp_clear_soft_dirty(pmd); 256 arch_entry = __pmd_to_swp_entry(pmd); 257 return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); 258 } 259 260 static inline pmd_t swp_entry_to_pmd(swp_entry_t entry) 261 { 262 swp_entry_t arch_entry; 263 264 arch_entry = __swp_entry(swp_type(entry), swp_offset(entry)); 265 return __swp_entry_to_pmd(arch_entry); 266 } 267 268 static inline int is_pmd_migration_entry(pmd_t pmd) 269 { 270 return !pmd_present(pmd) && is_migration_entry(pmd_to_swp_entry(pmd)); 271 } 272 #else 273 static inline void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, 274 struct page *page) 275 { 276 BUILD_BUG(); 277 } 278 279 static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, 280 struct page *new) 281 { 282 BUILD_BUG(); 283 } 284 285 static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { } 286 287 static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd) 288 { 289 return swp_entry(0, 0); 290 } 291 292 static inline pmd_t swp_entry_to_pmd(swp_entry_t entry) 293 { 294 return __pmd(0); 295 } 296 297 static inline int is_pmd_migration_entry(pmd_t pmd) 298 { 299 return 0; 300 } 301 #endif 302 303 #ifdef CONFIG_MEMORY_FAILURE 304 305 extern atomic_long_t num_poisoned_pages __read_mostly; 306 307 /* 308 * Support for hardware poisoned pages 309 */ 310 static inline swp_entry_t make_hwpoison_entry(struct page *page) 311 { 312 BUG_ON(!PageLocked(page)); 313 return swp_entry(SWP_HWPOISON, page_to_pfn(page)); 314 } 315 316 static inline int is_hwpoison_entry(swp_entry_t entry) 317 { 318 return swp_type(entry) == SWP_HWPOISON; 319 } 320 321 static inline bool test_set_page_hwpoison(struct page *page) 322 { 323 return TestSetPageHWPoison(page); 324 } 325 326 static inline void num_poisoned_pages_inc(void) 327 { 328 atomic_long_inc(&num_poisoned_pages); 329 } 330 331 static inline void num_poisoned_pages_dec(void) 332 { 333 atomic_long_dec(&num_poisoned_pages); 334 } 335 336 #else 337 338 static inline swp_entry_t make_hwpoison_entry(struct page *page) 339 { 340 return swp_entry(0, 0); 341 } 342 343 static inline int is_hwpoison_entry(swp_entry_t swp) 344 { 345 return 0; 346 } 347 348 static inline bool test_set_page_hwpoison(struct page *page) 349 { 350 return false; 351 } 352 353 static inline void num_poisoned_pages_inc(void) 354 { 355 } 356 #endif 357 358 #if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION) 359 static inline int non_swap_entry(swp_entry_t entry) 360 { 361 return swp_type(entry) >= MAX_SWAPFILES; 362 } 363 #else 364 static inline int non_swap_entry(swp_entry_t entry) 365 { 366 return 0; 367 } 368 #endif 369 370 #endif /* _LINUX_SWAPOPS_H */ 371