1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_RMAP_H 3 #define _LINUX_RMAP_H 4 /* 5 * Declarations for Reverse Mapping functions in mm/rmap.c 6 */ 7 8 #include <linux/list.h> 9 #include <linux/slab.h> 10 #include <linux/mm.h> 11 #include <linux/rwsem.h> 12 #include <linux/memcontrol.h> 13 #include <linux/highmem.h> 14 #include <linux/pagemap.h> 15 #include <linux/memremap.h> 16 17 /* 18 * The anon_vma heads a list of private "related" vmas, to scan if 19 * an anonymous page pointing to this anon_vma needs to be unmapped: 20 * the vmas on the list will be related by forking, or by splitting. 21 * 22 * Since vmas come and go as they are split and merged (particularly 23 * in mprotect), the mapping field of an anonymous page cannot point 24 * directly to a vma: instead it points to an anon_vma, on whose list 25 * the related vmas can be easily linked or unlinked. 26 * 27 * After unlinking the last vma on the list, we must garbage collect 28 * the anon_vma object itself: we're guaranteed no page can be 29 * pointing to this anon_vma once its vma list is empty. 30 */ 31 struct anon_vma { 32 struct anon_vma *root; /* Root of this anon_vma tree */ 33 struct rw_semaphore rwsem; /* W: modification, R: walking the list */ 34 /* 35 * The refcount is taken on an anon_vma when there is no 36 * guarantee that the vma of page tables will exist for 37 * the duration of the operation. A caller that takes 38 * the reference is responsible for clearing up the 39 * anon_vma if they are the last user on release 40 */ 41 atomic_t refcount; 42 43 /* 44 * Count of child anon_vmas. Equals to the count of all anon_vmas that 45 * have ->parent pointing to this one, including itself. 46 * 47 * This counter is used for making decision about reusing anon_vma 48 * instead of forking new one. See comments in function anon_vma_clone. 49 */ 50 unsigned long num_children; 51 /* Count of VMAs whose ->anon_vma pointer points to this object. */ 52 unsigned long num_active_vmas; 53 54 struct anon_vma *parent; /* Parent of this anon_vma */ 55 56 /* 57 * NOTE: the LSB of the rb_root.rb_node is set by 58 * mm_take_all_locks() _after_ taking the above lock. So the 59 * rb_root must only be read/written after taking the above lock 60 * to be sure to see a valid next pointer. The LSB bit itself 61 * is serialized by a system wide lock only visible to 62 * mm_take_all_locks() (mm_all_locks_mutex). 63 */ 64 65 /* Interval tree of private "related" vmas */ 66 struct rb_root_cached rb_root; 67 }; 68 69 /* 70 * The copy-on-write semantics of fork mean that an anon_vma 71 * can become associated with multiple processes. Furthermore, 72 * each child process will have its own anon_vma, where new 73 * pages for that process are instantiated. 74 * 75 * This structure allows us to find the anon_vmas associated 76 * with a VMA, or the VMAs associated with an anon_vma. 77 * The "same_vma" list contains the anon_vma_chains linking 78 * all the anon_vmas associated with this VMA. 79 * The "rb" field indexes on an interval tree the anon_vma_chains 80 * which link all the VMAs associated with this anon_vma. 81 */ 82 struct anon_vma_chain { 83 struct vm_area_struct *vma; 84 struct anon_vma *anon_vma; 85 struct list_head same_vma; /* locked by mmap_lock & page_table_lock */ 86 struct rb_node rb; /* locked by anon_vma->rwsem */ 87 unsigned long rb_subtree_last; 88 #ifdef CONFIG_DEBUG_VM_RB 89 unsigned long cached_vma_start, cached_vma_last; 90 #endif 91 }; 92 93 enum ttu_flags { 94 TTU_SPLIT_HUGE_PMD = 0x4, /* split huge PMD if any */ 95 TTU_IGNORE_MLOCK = 0x8, /* ignore mlock */ 96 TTU_SYNC = 0x10, /* avoid racy checks with PVMW_SYNC */ 97 TTU_HWPOISON = 0x20, /* do convert pte to hwpoison entry */ 98 TTU_BATCH_FLUSH = 0x40, /* Batch TLB flushes where possible 99 * and caller guarantees they will 100 * do a final flush if necessary */ 101 TTU_RMAP_LOCKED = 0x80, /* do not grab rmap lock: 102 * caller holds it */ 103 }; 104 105 #ifdef CONFIG_MMU 106 static inline void get_anon_vma(struct anon_vma *anon_vma) 107 { 108 atomic_inc(&anon_vma->refcount); 109 } 110 111 void __put_anon_vma(struct anon_vma *anon_vma); 112 113 static inline void put_anon_vma(struct anon_vma *anon_vma) 114 { 115 if (atomic_dec_and_test(&anon_vma->refcount)) 116 __put_anon_vma(anon_vma); 117 } 118 119 static inline void anon_vma_lock_write(struct anon_vma *anon_vma) 120 { 121 down_write(&anon_vma->root->rwsem); 122 } 123 124 static inline int anon_vma_trylock_write(struct anon_vma *anon_vma) 125 { 126 return down_write_trylock(&anon_vma->root->rwsem); 127 } 128 129 static inline void anon_vma_unlock_write(struct anon_vma *anon_vma) 130 { 131 up_write(&anon_vma->root->rwsem); 132 } 133 134 static inline void anon_vma_lock_read(struct anon_vma *anon_vma) 135 { 136 down_read(&anon_vma->root->rwsem); 137 } 138 139 static inline int anon_vma_trylock_read(struct anon_vma *anon_vma) 140 { 141 return down_read_trylock(&anon_vma->root->rwsem); 142 } 143 144 static inline void anon_vma_unlock_read(struct anon_vma *anon_vma) 145 { 146 up_read(&anon_vma->root->rwsem); 147 } 148 149 150 /* 151 * anon_vma helper functions. 152 */ 153 void anon_vma_init(void); /* create anon_vma_cachep */ 154 int __anon_vma_prepare(struct vm_area_struct *); 155 void unlink_anon_vmas(struct vm_area_struct *); 156 int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *); 157 int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *); 158 159 static inline int anon_vma_prepare(struct vm_area_struct *vma) 160 { 161 if (likely(vma->anon_vma)) 162 return 0; 163 164 return __anon_vma_prepare(vma); 165 } 166 167 static inline void anon_vma_merge(struct vm_area_struct *vma, 168 struct vm_area_struct *next) 169 { 170 VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma); 171 unlink_anon_vmas(next); 172 } 173 174 struct anon_vma *folio_get_anon_vma(struct folio *folio); 175 176 /* RMAP flags, currently only relevant for some anon rmap operations. */ 177 typedef int __bitwise rmap_t; 178 179 /* 180 * No special request: A mapped anonymous (sub)page is possibly shared between 181 * processes. 182 */ 183 #define RMAP_NONE ((__force rmap_t)0) 184 185 /* The anonymous (sub)page is exclusive to a single process. */ 186 #define RMAP_EXCLUSIVE ((__force rmap_t)BIT(0)) 187 188 /* 189 * Internally, we're using an enum to specify the granularity. We make the 190 * compiler emit specialized code for each granularity. 191 */ 192 enum rmap_level { 193 RMAP_LEVEL_PTE = 0, 194 RMAP_LEVEL_PMD, 195 }; 196 197 static inline void __folio_rmap_sanity_checks(struct folio *folio, 198 struct page *page, int nr_pages, enum rmap_level level) 199 { 200 /* hugetlb folios are handled separately. */ 201 VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); 202 203 /* 204 * TODO: we get driver-allocated folios that have nothing to do with 205 * the rmap using vm_insert_page(); therefore, we cannot assume that 206 * folio_test_large_rmappable() holds for large folios. We should 207 * handle any desired mapcount+stats accounting for these folios in 208 * VM_MIXEDMAP VMAs separately, and then sanity-check here that 209 * we really only get rmappable folios. 210 */ 211 212 VM_WARN_ON_ONCE(nr_pages <= 0); 213 VM_WARN_ON_FOLIO(page_folio(page) != folio, folio); 214 VM_WARN_ON_FOLIO(page_folio(page + nr_pages - 1) != folio, folio); 215 216 switch (level) { 217 case RMAP_LEVEL_PTE: 218 break; 219 case RMAP_LEVEL_PMD: 220 /* 221 * We don't support folios larger than a single PMD yet. So 222 * when RMAP_LEVEL_PMD is set, we assume that we are creating 223 * a single "entire" mapping of the folio. 224 */ 225 VM_WARN_ON_FOLIO(folio_nr_pages(folio) != HPAGE_PMD_NR, folio); 226 VM_WARN_ON_FOLIO(nr_pages != HPAGE_PMD_NR, folio); 227 break; 228 default: 229 VM_WARN_ON_ONCE(true); 230 } 231 } 232 233 /* 234 * rmap interfaces called when adding or removing pte of page 235 */ 236 void folio_move_anon_rmap(struct folio *, struct vm_area_struct *); 237 void folio_add_anon_rmap_ptes(struct folio *, struct page *, int nr_pages, 238 struct vm_area_struct *, unsigned long address, rmap_t flags); 239 #define folio_add_anon_rmap_pte(folio, page, vma, address, flags) \ 240 folio_add_anon_rmap_ptes(folio, page, 1, vma, address, flags) 241 void folio_add_anon_rmap_pmd(struct folio *, struct page *, 242 struct vm_area_struct *, unsigned long address, rmap_t flags); 243 void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *, 244 unsigned long address); 245 void folio_add_file_rmap_ptes(struct folio *, struct page *, int nr_pages, 246 struct vm_area_struct *); 247 #define folio_add_file_rmap_pte(folio, page, vma) \ 248 folio_add_file_rmap_ptes(folio, page, 1, vma) 249 void folio_add_file_rmap_pmd(struct folio *, struct page *, 250 struct vm_area_struct *); 251 void folio_remove_rmap_ptes(struct folio *, struct page *, int nr_pages, 252 struct vm_area_struct *); 253 #define folio_remove_rmap_pte(folio, page, vma) \ 254 folio_remove_rmap_ptes(folio, page, 1, vma) 255 void folio_remove_rmap_pmd(struct folio *, struct page *, 256 struct vm_area_struct *); 257 258 void hugetlb_add_anon_rmap(struct folio *, struct vm_area_struct *, 259 unsigned long address, rmap_t flags); 260 void hugetlb_add_new_anon_rmap(struct folio *, struct vm_area_struct *, 261 unsigned long address); 262 263 /* See folio_try_dup_anon_rmap_*() */ 264 static inline int hugetlb_try_dup_anon_rmap(struct folio *folio, 265 struct vm_area_struct *vma) 266 { 267 VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); 268 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); 269 270 if (PageAnonExclusive(&folio->page)) { 271 if (unlikely(folio_needs_cow_for_dma(vma, folio))) 272 return -EBUSY; 273 ClearPageAnonExclusive(&folio->page); 274 } 275 atomic_inc(&folio->_entire_mapcount); 276 return 0; 277 } 278 279 /* See folio_try_share_anon_rmap_*() */ 280 static inline int hugetlb_try_share_anon_rmap(struct folio *folio) 281 { 282 VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); 283 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); 284 VM_WARN_ON_FOLIO(!PageAnonExclusive(&folio->page), folio); 285 286 /* Paired with the memory barrier in try_grab_folio(). */ 287 if (IS_ENABLED(CONFIG_HAVE_GUP_FAST)) 288 smp_mb(); 289 290 if (unlikely(folio_maybe_dma_pinned(folio))) 291 return -EBUSY; 292 ClearPageAnonExclusive(&folio->page); 293 294 /* 295 * This is conceptually a smp_wmb() paired with the smp_rmb() in 296 * gup_must_unshare(). 297 */ 298 if (IS_ENABLED(CONFIG_HAVE_GUP_FAST)) 299 smp_mb__after_atomic(); 300 return 0; 301 } 302 303 static inline void hugetlb_add_file_rmap(struct folio *folio) 304 { 305 VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); 306 VM_WARN_ON_FOLIO(folio_test_anon(folio), folio); 307 308 atomic_inc(&folio->_entire_mapcount); 309 } 310 311 static inline void hugetlb_remove_rmap(struct folio *folio) 312 { 313 VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); 314 315 atomic_dec(&folio->_entire_mapcount); 316 } 317 318 static __always_inline void __folio_dup_file_rmap(struct folio *folio, 319 struct page *page, int nr_pages, enum rmap_level level) 320 { 321 __folio_rmap_sanity_checks(folio, page, nr_pages, level); 322 323 switch (level) { 324 case RMAP_LEVEL_PTE: 325 if (!folio_test_large(folio)) { 326 atomic_inc(&page->_mapcount); 327 break; 328 } 329 330 do { 331 atomic_inc(&page->_mapcount); 332 } while (page++, --nr_pages > 0); 333 break; 334 case RMAP_LEVEL_PMD: 335 atomic_inc(&folio->_entire_mapcount); 336 break; 337 } 338 } 339 340 /** 341 * folio_dup_file_rmap_ptes - duplicate PTE mappings of a page range of a folio 342 * @folio: The folio to duplicate the mappings of 343 * @page: The first page to duplicate the mappings of 344 * @nr_pages: The number of pages of which the mapping will be duplicated 345 * 346 * The page range of the folio is defined by [page, page + nr_pages) 347 * 348 * The caller needs to hold the page table lock. 349 */ 350 static inline void folio_dup_file_rmap_ptes(struct folio *folio, 351 struct page *page, int nr_pages) 352 { 353 __folio_dup_file_rmap(folio, page, nr_pages, RMAP_LEVEL_PTE); 354 } 355 356 static __always_inline void folio_dup_file_rmap_pte(struct folio *folio, 357 struct page *page) 358 { 359 __folio_dup_file_rmap(folio, page, 1, RMAP_LEVEL_PTE); 360 } 361 362 /** 363 * folio_dup_file_rmap_pmd - duplicate a PMD mapping of a page range of a folio 364 * @folio: The folio to duplicate the mapping of 365 * @page: The first page to duplicate the mapping of 366 * 367 * The page range of the folio is defined by [page, page + HPAGE_PMD_NR) 368 * 369 * The caller needs to hold the page table lock. 370 */ 371 static inline void folio_dup_file_rmap_pmd(struct folio *folio, 372 struct page *page) 373 { 374 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 375 __folio_dup_file_rmap(folio, page, HPAGE_PMD_NR, RMAP_LEVEL_PTE); 376 #else 377 WARN_ON_ONCE(true); 378 #endif 379 } 380 381 static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio, 382 struct page *page, int nr_pages, struct vm_area_struct *src_vma, 383 enum rmap_level level) 384 { 385 bool maybe_pinned; 386 int i; 387 388 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); 389 __folio_rmap_sanity_checks(folio, page, nr_pages, level); 390 391 /* 392 * If this folio may have been pinned by the parent process, 393 * don't allow to duplicate the mappings but instead require to e.g., 394 * copy the subpage immediately for the child so that we'll always 395 * guarantee the pinned folio won't be randomly replaced in the 396 * future on write faults. 397 */ 398 maybe_pinned = likely(!folio_is_device_private(folio)) && 399 unlikely(folio_needs_cow_for_dma(src_vma, folio)); 400 401 /* 402 * No need to check+clear for already shared PTEs/PMDs of the 403 * folio. But if any page is PageAnonExclusive, we must fallback to 404 * copying if the folio maybe pinned. 405 */ 406 switch (level) { 407 case RMAP_LEVEL_PTE: 408 if (unlikely(maybe_pinned)) { 409 for (i = 0; i < nr_pages; i++) 410 if (PageAnonExclusive(page + i)) 411 return -EBUSY; 412 } 413 414 if (!folio_test_large(folio)) { 415 if (PageAnonExclusive(page)) 416 ClearPageAnonExclusive(page); 417 atomic_inc(&page->_mapcount); 418 break; 419 } 420 421 do { 422 if (PageAnonExclusive(page)) 423 ClearPageAnonExclusive(page); 424 atomic_inc(&page->_mapcount); 425 } while (page++, --nr_pages > 0); 426 break; 427 case RMAP_LEVEL_PMD: 428 if (PageAnonExclusive(page)) { 429 if (unlikely(maybe_pinned)) 430 return -EBUSY; 431 ClearPageAnonExclusive(page); 432 } 433 atomic_inc(&folio->_entire_mapcount); 434 break; 435 } 436 return 0; 437 } 438 439 /** 440 * folio_try_dup_anon_rmap_ptes - try duplicating PTE mappings of a page range 441 * of a folio 442 * @folio: The folio to duplicate the mappings of 443 * @page: The first page to duplicate the mappings of 444 * @nr_pages: The number of pages of which the mapping will be duplicated 445 * @src_vma: The vm area from which the mappings are duplicated 446 * 447 * The page range of the folio is defined by [page, page + nr_pages) 448 * 449 * The caller needs to hold the page table lock and the 450 * vma->vma_mm->write_protect_seq. 451 * 452 * Duplicating the mappings can only fail if the folio may be pinned; device 453 * private folios cannot get pinned and consequently this function cannot fail 454 * for them. 455 * 456 * If duplicating the mappings succeeded, the duplicated PTEs have to be R/O in 457 * the parent and the child. They must *not* be writable after this call 458 * succeeded. 459 * 460 * Returns 0 if duplicating the mappings succeeded. Returns -EBUSY otherwise. 461 */ 462 static inline int folio_try_dup_anon_rmap_ptes(struct folio *folio, 463 struct page *page, int nr_pages, struct vm_area_struct *src_vma) 464 { 465 return __folio_try_dup_anon_rmap(folio, page, nr_pages, src_vma, 466 RMAP_LEVEL_PTE); 467 } 468 469 static __always_inline int folio_try_dup_anon_rmap_pte(struct folio *folio, 470 struct page *page, struct vm_area_struct *src_vma) 471 { 472 return __folio_try_dup_anon_rmap(folio, page, 1, src_vma, 473 RMAP_LEVEL_PTE); 474 } 475 476 /** 477 * folio_try_dup_anon_rmap_pmd - try duplicating a PMD mapping of a page range 478 * of a folio 479 * @folio: The folio to duplicate the mapping of 480 * @page: The first page to duplicate the mapping of 481 * @src_vma: The vm area from which the mapping is duplicated 482 * 483 * The page range of the folio is defined by [page, page + HPAGE_PMD_NR) 484 * 485 * The caller needs to hold the page table lock and the 486 * vma->vma_mm->write_protect_seq. 487 * 488 * Duplicating the mapping can only fail if the folio may be pinned; device 489 * private folios cannot get pinned and consequently this function cannot fail 490 * for them. 491 * 492 * If duplicating the mapping succeeds, the duplicated PMD has to be R/O in 493 * the parent and the child. They must *not* be writable after this call 494 * succeeded. 495 * 496 * Returns 0 if duplicating the mapping succeeded. Returns -EBUSY otherwise. 497 */ 498 static inline int folio_try_dup_anon_rmap_pmd(struct folio *folio, 499 struct page *page, struct vm_area_struct *src_vma) 500 { 501 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 502 return __folio_try_dup_anon_rmap(folio, page, HPAGE_PMD_NR, src_vma, 503 RMAP_LEVEL_PMD); 504 #else 505 WARN_ON_ONCE(true); 506 return -EBUSY; 507 #endif 508 } 509 510 static __always_inline int __folio_try_share_anon_rmap(struct folio *folio, 511 struct page *page, int nr_pages, enum rmap_level level) 512 { 513 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); 514 VM_WARN_ON_FOLIO(!PageAnonExclusive(page), folio); 515 __folio_rmap_sanity_checks(folio, page, nr_pages, level); 516 517 /* device private folios cannot get pinned via GUP. */ 518 if (unlikely(folio_is_device_private(folio))) { 519 ClearPageAnonExclusive(page); 520 return 0; 521 } 522 523 /* 524 * We have to make sure that when we clear PageAnonExclusive, that 525 * the page is not pinned and that concurrent GUP-fast won't succeed in 526 * concurrently pinning the page. 527 * 528 * Conceptually, PageAnonExclusive clearing consists of: 529 * (A1) Clear PTE 530 * (A2) Check if the page is pinned; back off if so. 531 * (A3) Clear PageAnonExclusive 532 * (A4) Restore PTE (optional, but certainly not writable) 533 * 534 * When clearing PageAnonExclusive, we cannot possibly map the page 535 * writable again, because anon pages that may be shared must never 536 * be writable. So in any case, if the PTE was writable it cannot 537 * be writable anymore afterwards and there would be a PTE change. Only 538 * if the PTE wasn't writable, there might not be a PTE change. 539 * 540 * Conceptually, GUP-fast pinning of an anon page consists of: 541 * (B1) Read the PTE 542 * (B2) FOLL_WRITE: check if the PTE is not writable; back off if so. 543 * (B3) Pin the mapped page 544 * (B4) Check if the PTE changed by re-reading it; back off if so. 545 * (B5) If the original PTE is not writable, check if 546 * PageAnonExclusive is not set; back off if so. 547 * 548 * If the PTE was writable, we only have to make sure that GUP-fast 549 * observes a PTE change and properly backs off. 550 * 551 * If the PTE was not writable, we have to make sure that GUP-fast either 552 * detects a (temporary) PTE change or that PageAnonExclusive is cleared 553 * and properly backs off. 554 * 555 * Consequently, when clearing PageAnonExclusive(), we have to make 556 * sure that (A1), (A2)/(A3) and (A4) happen in the right memory 557 * order. In GUP-fast pinning code, we have to make sure that (B3),(B4) 558 * and (B5) happen in the right memory order. 559 * 560 * We assume that there might not be a memory barrier after 561 * clearing/invalidating the PTE (A1) and before restoring the PTE (A4), 562 * so we use explicit ones here. 563 */ 564 565 /* Paired with the memory barrier in try_grab_folio(). */ 566 if (IS_ENABLED(CONFIG_HAVE_GUP_FAST)) 567 smp_mb(); 568 569 if (unlikely(folio_maybe_dma_pinned(folio))) 570 return -EBUSY; 571 ClearPageAnonExclusive(page); 572 573 /* 574 * This is conceptually a smp_wmb() paired with the smp_rmb() in 575 * gup_must_unshare(). 576 */ 577 if (IS_ENABLED(CONFIG_HAVE_GUP_FAST)) 578 smp_mb__after_atomic(); 579 return 0; 580 } 581 582 /** 583 * folio_try_share_anon_rmap_pte - try marking an exclusive anonymous page 584 * mapped by a PTE possibly shared to prepare 585 * for KSM or temporary unmapping 586 * @folio: The folio to share a mapping of 587 * @page: The mapped exclusive page 588 * 589 * The caller needs to hold the page table lock and has to have the page table 590 * entries cleared/invalidated. 591 * 592 * This is similar to folio_try_dup_anon_rmap_pte(), however, not used during 593 * fork() to duplicate mappings, but instead to prepare for KSM or temporarily 594 * unmapping parts of a folio (swap, migration) via folio_remove_rmap_pte(). 595 * 596 * Marking the mapped page shared can only fail if the folio maybe pinned; 597 * device private folios cannot get pinned and consequently this function cannot 598 * fail. 599 * 600 * Returns 0 if marking the mapped page possibly shared succeeded. Returns 601 * -EBUSY otherwise. 602 */ 603 static inline int folio_try_share_anon_rmap_pte(struct folio *folio, 604 struct page *page) 605 { 606 return __folio_try_share_anon_rmap(folio, page, 1, RMAP_LEVEL_PTE); 607 } 608 609 /** 610 * folio_try_share_anon_rmap_pmd - try marking an exclusive anonymous page 611 * range mapped by a PMD possibly shared to 612 * prepare for temporary unmapping 613 * @folio: The folio to share the mapping of 614 * @page: The first page to share the mapping of 615 * 616 * The page range of the folio is defined by [page, page + HPAGE_PMD_NR) 617 * 618 * The caller needs to hold the page table lock and has to have the page table 619 * entries cleared/invalidated. 620 * 621 * This is similar to folio_try_dup_anon_rmap_pmd(), however, not used during 622 * fork() to duplicate a mapping, but instead to prepare for temporarily 623 * unmapping parts of a folio (swap, migration) via folio_remove_rmap_pmd(). 624 * 625 * Marking the mapped pages shared can only fail if the folio maybe pinned; 626 * device private folios cannot get pinned and consequently this function cannot 627 * fail. 628 * 629 * Returns 0 if marking the mapped pages possibly shared succeeded. Returns 630 * -EBUSY otherwise. 631 */ 632 static inline int folio_try_share_anon_rmap_pmd(struct folio *folio, 633 struct page *page) 634 { 635 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 636 return __folio_try_share_anon_rmap(folio, page, HPAGE_PMD_NR, 637 RMAP_LEVEL_PMD); 638 #else 639 WARN_ON_ONCE(true); 640 return -EBUSY; 641 #endif 642 } 643 644 /* 645 * Called from mm/vmscan.c to handle paging out 646 */ 647 int folio_referenced(struct folio *, int is_locked, 648 struct mem_cgroup *memcg, unsigned long *vm_flags); 649 650 void try_to_migrate(struct folio *folio, enum ttu_flags flags); 651 void try_to_unmap(struct folio *, enum ttu_flags flags); 652 653 int make_device_exclusive_range(struct mm_struct *mm, unsigned long start, 654 unsigned long end, struct page **pages, 655 void *arg); 656 657 /* Avoid racy checks */ 658 #define PVMW_SYNC (1 << 0) 659 /* Look for migration entries rather than present PTEs */ 660 #define PVMW_MIGRATION (1 << 1) 661 662 struct page_vma_mapped_walk { 663 unsigned long pfn; 664 unsigned long nr_pages; 665 pgoff_t pgoff; 666 struct vm_area_struct *vma; 667 unsigned long address; 668 pmd_t *pmd; 669 pte_t *pte; 670 spinlock_t *ptl; 671 unsigned int flags; 672 }; 673 674 #define DEFINE_PAGE_VMA_WALK(name, _page, _vma, _address, _flags) \ 675 struct page_vma_mapped_walk name = { \ 676 .pfn = page_to_pfn(_page), \ 677 .nr_pages = compound_nr(_page), \ 678 .pgoff = page_to_pgoff(_page), \ 679 .vma = _vma, \ 680 .address = _address, \ 681 .flags = _flags, \ 682 } 683 684 #define DEFINE_FOLIO_VMA_WALK(name, _folio, _vma, _address, _flags) \ 685 struct page_vma_mapped_walk name = { \ 686 .pfn = folio_pfn(_folio), \ 687 .nr_pages = folio_nr_pages(_folio), \ 688 .pgoff = folio_pgoff(_folio), \ 689 .vma = _vma, \ 690 .address = _address, \ 691 .flags = _flags, \ 692 } 693 694 static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw) 695 { 696 /* HugeTLB pte is set to the relevant page table entry without pte_mapped. */ 697 if (pvmw->pte && !is_vm_hugetlb_page(pvmw->vma)) 698 pte_unmap(pvmw->pte); 699 if (pvmw->ptl) 700 spin_unlock(pvmw->ptl); 701 } 702 703 bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw); 704 705 /* 706 * Used by swapoff to help locate where page is expected in vma. 707 */ 708 unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); 709 710 /* 711 * Cleans the PTEs of shared mappings. 712 * (and since clean PTEs should also be readonly, write protects them too) 713 * 714 * returns the number of cleaned PTEs. 715 */ 716 int folio_mkclean(struct folio *); 717 718 int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff, 719 struct vm_area_struct *vma); 720 721 void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked); 722 723 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); 724 725 /* 726 * rmap_walk_control: To control rmap traversing for specific needs 727 * 728 * arg: passed to rmap_one() and invalid_vma() 729 * try_lock: bail out if the rmap lock is contended 730 * contended: indicate the rmap traversal bailed out due to lock contention 731 * rmap_one: executed on each vma where page is mapped 732 * done: for checking traversing termination condition 733 * anon_lock: for getting anon_lock by optimized way rather than default 734 * invalid_vma: for skipping uninterested vma 735 */ 736 struct rmap_walk_control { 737 void *arg; 738 bool try_lock; 739 bool contended; 740 /* 741 * Return false if page table scanning in rmap_walk should be stopped. 742 * Otherwise, return true. 743 */ 744 bool (*rmap_one)(struct folio *folio, struct vm_area_struct *vma, 745 unsigned long addr, void *arg); 746 int (*done)(struct folio *folio); 747 struct anon_vma *(*anon_lock)(struct folio *folio, 748 struct rmap_walk_control *rwc); 749 bool (*invalid_vma)(struct vm_area_struct *vma, void *arg); 750 }; 751 752 void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc); 753 void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc); 754 struct anon_vma *folio_lock_anon_vma_read(struct folio *folio, 755 struct rmap_walk_control *rwc); 756 757 #else /* !CONFIG_MMU */ 758 759 #define anon_vma_init() do {} while (0) 760 #define anon_vma_prepare(vma) (0) 761 762 static inline int folio_referenced(struct folio *folio, int is_locked, 763 struct mem_cgroup *memcg, 764 unsigned long *vm_flags) 765 { 766 *vm_flags = 0; 767 return 0; 768 } 769 770 static inline void try_to_unmap(struct folio *folio, enum ttu_flags flags) 771 { 772 } 773 774 static inline int folio_mkclean(struct folio *folio) 775 { 776 return 0; 777 } 778 #endif /* CONFIG_MMU */ 779 780 static inline int page_mkclean(struct page *page) 781 { 782 return folio_mkclean(page_folio(page)); 783 } 784 #endif /* _LINUX_RMAP_H */ 785