1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef LINUX_MM_INLINE_H 3 #define LINUX_MM_INLINE_H 4 5 #include <linux/atomic.h> 6 #include <linux/huge_mm.h> 7 #include <linux/mm_types.h> 8 #include <linux/swap.h> 9 #include <linux/string.h> 10 #include <linux/userfaultfd_k.h> 11 #include <linux/swapops.h> 12 13 /** 14 * folio_is_file_lru - Should the folio be on a file LRU or anon LRU? 15 * @folio: The folio to test. 16 * 17 * We would like to get this info without a page flag, but the state 18 * needs to survive until the folio is last deleted from the LRU, which 19 * could be as far down as __page_cache_release. 20 * 21 * Return: An integer (not a boolean!) used to sort a folio onto the 22 * right LRU list and to account folios correctly. 23 * 1 if @folio is a regular filesystem backed page cache folio 24 * or a lazily freed anonymous folio (e.g. via MADV_FREE). 25 * 0 if @folio is a normal anonymous folio, a tmpfs folio or otherwise 26 * ram or swap backed folio. 27 */ 28 static inline int folio_is_file_lru(struct folio *folio) 29 { 30 return !folio_test_swapbacked(folio); 31 } 32 33 static inline int page_is_file_lru(struct page *page) 34 { 35 return folio_is_file_lru(page_folio(page)); 36 } 37 38 static __always_inline void __update_lru_size(struct lruvec *lruvec, 39 enum lru_list lru, enum zone_type zid, 40 long nr_pages) 41 { 42 struct pglist_data *pgdat = lruvec_pgdat(lruvec); 43 44 lockdep_assert_held(&lruvec->lru_lock); 45 WARN_ON_ONCE(nr_pages != (int)nr_pages); 46 47 __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages); 48 __mod_zone_page_state(&pgdat->node_zones[zid], 49 NR_ZONE_LRU_BASE + lru, nr_pages); 50 } 51 52 static __always_inline void update_lru_size(struct lruvec *lruvec, 53 enum lru_list lru, enum zone_type zid, 54 long nr_pages) 55 { 56 __update_lru_size(lruvec, lru, zid, nr_pages); 57 #ifdef CONFIG_MEMCG 58 mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages); 59 #endif 60 } 61 62 /** 63 * __folio_clear_lru_flags - Clear page lru flags before releasing a page. 64 * @folio: The folio that was on lru and now has a zero reference. 65 */ 66 static __always_inline void __folio_clear_lru_flags(struct folio *folio) 67 { 68 VM_BUG_ON_FOLIO(!folio_test_lru(folio), folio); 69 70 __folio_clear_lru(folio); 71 72 /* this shouldn't happen, so leave the flags to bad_page() */ 73 if (folio_test_active(folio) && folio_test_unevictable(folio)) 74 return; 75 76 __folio_clear_active(folio); 77 __folio_clear_unevictable(folio); 78 } 79 80 /** 81 * folio_lru_list - Which LRU list should a folio be on? 82 * @folio: The folio to test. 83 * 84 * Return: The LRU list a folio should be on, as an index 85 * into the array of LRU lists. 86 */ 87 static __always_inline enum lru_list folio_lru_list(struct folio *folio) 88 { 89 enum lru_list lru; 90 91 VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio); 92 93 if (folio_test_unevictable(folio)) 94 return LRU_UNEVICTABLE; 95 96 lru = folio_is_file_lru(folio) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON; 97 if (folio_test_active(folio)) 98 lru += LRU_ACTIVE; 99 100 return lru; 101 } 102 103 #ifdef CONFIG_LRU_GEN 104 105 #ifdef CONFIG_LRU_GEN_ENABLED 106 static inline bool lru_gen_enabled(void) 107 { 108 DECLARE_STATIC_KEY_TRUE(lru_gen_caps[NR_LRU_GEN_CAPS]); 109 110 return static_branch_likely(&lru_gen_caps[LRU_GEN_CORE]); 111 } 112 #else 113 static inline bool lru_gen_enabled(void) 114 { 115 DECLARE_STATIC_KEY_FALSE(lru_gen_caps[NR_LRU_GEN_CAPS]); 116 117 return static_branch_unlikely(&lru_gen_caps[LRU_GEN_CORE]); 118 } 119 #endif 120 121 static inline bool lru_gen_in_fault(void) 122 { 123 return current->in_lru_fault; 124 } 125 126 static inline int lru_gen_from_seq(unsigned long seq) 127 { 128 return seq % MAX_NR_GENS; 129 } 130 131 static inline int lru_hist_from_seq(unsigned long seq) 132 { 133 return seq % NR_HIST_GENS; 134 } 135 136 static inline int lru_tier_from_refs(int refs) 137 { 138 VM_WARN_ON_ONCE(refs > BIT(LRU_REFS_WIDTH)); 139 140 /* see the comment in folio_lru_refs() */ 141 return order_base_2(refs + 1); 142 } 143 144 static inline int folio_lru_refs(struct folio *folio) 145 { 146 unsigned long flags = READ_ONCE(folio->flags); 147 bool workingset = flags & BIT(PG_workingset); 148 149 /* 150 * Return the number of accesses beyond PG_referenced, i.e., N-1 if the 151 * total number of accesses is N>1, since N=0,1 both map to the first 152 * tier. lru_tier_from_refs() will account for this off-by-one. Also see 153 * the comment on MAX_NR_TIERS. 154 */ 155 return ((flags & LRU_REFS_MASK) >> LRU_REFS_PGOFF) + workingset; 156 } 157 158 static inline void folio_clear_lru_refs(struct folio *folio) 159 { 160 set_mask_bits(&folio->flags, LRU_REFS_MASK | LRU_REFS_FLAGS, 0); 161 } 162 163 static inline int folio_lru_gen(struct folio *folio) 164 { 165 unsigned long flags = READ_ONCE(folio->flags); 166 167 return ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; 168 } 169 170 static inline bool lru_gen_is_active(struct lruvec *lruvec, int gen) 171 { 172 unsigned long max_seq = lruvec->lrugen.max_seq; 173 174 VM_WARN_ON_ONCE(gen >= MAX_NR_GENS); 175 176 /* see the comment on MIN_NR_GENS */ 177 return gen == lru_gen_from_seq(max_seq) || gen == lru_gen_from_seq(max_seq - 1); 178 } 179 180 static inline void lru_gen_update_size(struct lruvec *lruvec, struct folio *folio, 181 int old_gen, int new_gen) 182 { 183 int type = folio_is_file_lru(folio); 184 int zone = folio_zonenum(folio); 185 int delta = folio_nr_pages(folio); 186 enum lru_list lru = type * LRU_INACTIVE_FILE; 187 struct lru_gen_folio *lrugen = &lruvec->lrugen; 188 189 VM_WARN_ON_ONCE(old_gen != -1 && old_gen >= MAX_NR_GENS); 190 VM_WARN_ON_ONCE(new_gen != -1 && new_gen >= MAX_NR_GENS); 191 VM_WARN_ON_ONCE(old_gen == -1 && new_gen == -1); 192 193 if (old_gen >= 0) 194 WRITE_ONCE(lrugen->nr_pages[old_gen][type][zone], 195 lrugen->nr_pages[old_gen][type][zone] - delta); 196 if (new_gen >= 0) 197 WRITE_ONCE(lrugen->nr_pages[new_gen][type][zone], 198 lrugen->nr_pages[new_gen][type][zone] + delta); 199 200 /* addition */ 201 if (old_gen < 0) { 202 if (lru_gen_is_active(lruvec, new_gen)) 203 lru += LRU_ACTIVE; 204 __update_lru_size(lruvec, lru, zone, delta); 205 return; 206 } 207 208 /* deletion */ 209 if (new_gen < 0) { 210 if (lru_gen_is_active(lruvec, old_gen)) 211 lru += LRU_ACTIVE; 212 __update_lru_size(lruvec, lru, zone, -delta); 213 return; 214 } 215 216 /* promotion */ 217 if (!lru_gen_is_active(lruvec, old_gen) && lru_gen_is_active(lruvec, new_gen)) { 218 __update_lru_size(lruvec, lru, zone, -delta); 219 __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, delta); 220 } 221 222 /* demotion requires isolation, e.g., lru_deactivate_fn() */ 223 VM_WARN_ON_ONCE(lru_gen_is_active(lruvec, old_gen) && !lru_gen_is_active(lruvec, new_gen)); 224 } 225 226 static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming) 227 { 228 unsigned long seq; 229 unsigned long flags; 230 unsigned long mask; 231 int gen = folio_lru_gen(folio); 232 int type = folio_is_file_lru(folio); 233 int zone = folio_zonenum(folio); 234 struct lru_gen_folio *lrugen = &lruvec->lrugen; 235 236 VM_WARN_ON_ONCE_FOLIO(gen != -1, folio); 237 238 if (folio_test_unevictable(folio) || !lrugen->enabled) 239 return false; 240 /* 241 * There are four common cases for this page: 242 * 1. If it's hot, i.e., freshly faulted in, add it to the youngest 243 * generation, and it's protected over the rest below. 244 * 2. If it can't be evicted immediately, i.e., a dirty page pending 245 * writeback, add it to the second youngest generation. 246 * 3. If it should be evicted first, e.g., cold and clean from 247 * folio_rotate_reclaimable(), add it to the oldest generation. 248 * 4. Everything else falls between 2 & 3 above and is added to the 249 * second oldest generation if it's considered inactive, or the 250 * oldest generation otherwise. See lru_gen_is_active(). 251 */ 252 if (folio_test_active(folio)) 253 seq = lrugen->max_seq; 254 else if ((type == LRU_GEN_ANON && !folio_test_swapcache(folio)) || 255 (folio_test_reclaim(folio) && 256 (folio_test_dirty(folio) || folio_test_writeback(folio)))) 257 seq = lrugen->max_seq - 1; 258 else if (reclaiming || lrugen->min_seq[type] + MIN_NR_GENS >= lrugen->max_seq) 259 seq = lrugen->min_seq[type]; 260 else 261 seq = lrugen->min_seq[type] + 1; 262 263 gen = lru_gen_from_seq(seq); 264 flags = (gen + 1UL) << LRU_GEN_PGOFF; 265 /* see the comment on MIN_NR_GENS about PG_active */ 266 mask = LRU_GEN_MASK; 267 /* 268 * Don't clear PG_workingset here because it can affect PSI accounting 269 * if the activation is due to workingset refault. 270 */ 271 if (folio_test_active(folio)) 272 mask |= LRU_REFS_MASK | BIT(PG_referenced) | BIT(PG_active); 273 set_mask_bits(&folio->flags, mask, flags); 274 275 lru_gen_update_size(lruvec, folio, -1, gen); 276 /* for folio_rotate_reclaimable() */ 277 if (reclaiming) 278 list_add_tail(&folio->lru, &lrugen->folios[gen][type][zone]); 279 else 280 list_add(&folio->lru, &lrugen->folios[gen][type][zone]); 281 282 return true; 283 } 284 285 static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming) 286 { 287 unsigned long flags; 288 int gen = folio_lru_gen(folio); 289 290 if (gen < 0) 291 return false; 292 293 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio); 294 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio); 295 296 /* for folio_migrate_flags() */ 297 flags = !reclaiming && lru_gen_is_active(lruvec, gen) ? BIT(PG_active) : 0; 298 flags = set_mask_bits(&folio->flags, LRU_GEN_MASK, flags); 299 gen = ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; 300 301 lru_gen_update_size(lruvec, folio, gen, -1); 302 list_del(&folio->lru); 303 304 return true; 305 } 306 307 static inline void folio_migrate_refs(struct folio *new, struct folio *old) 308 { 309 unsigned long refs = READ_ONCE(old->flags) & LRU_REFS_MASK; 310 311 set_mask_bits(&new->flags, LRU_REFS_MASK, refs); 312 } 313 #else /* !CONFIG_LRU_GEN */ 314 315 static inline bool lru_gen_enabled(void) 316 { 317 return false; 318 } 319 320 static inline bool lru_gen_in_fault(void) 321 { 322 return false; 323 } 324 325 static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming) 326 { 327 return false; 328 } 329 330 static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming) 331 { 332 return false; 333 } 334 335 static inline void folio_migrate_refs(struct folio *new, struct folio *old) 336 { 337 338 } 339 #endif /* CONFIG_LRU_GEN */ 340 341 static __always_inline 342 void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio) 343 { 344 enum lru_list lru = folio_lru_list(folio); 345 346 if (lru_gen_add_folio(lruvec, folio, false)) 347 return; 348 349 update_lru_size(lruvec, lru, folio_zonenum(folio), 350 folio_nr_pages(folio)); 351 if (lru != LRU_UNEVICTABLE) 352 list_add(&folio->lru, &lruvec->lists[lru]); 353 } 354 355 static __always_inline 356 void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio) 357 { 358 enum lru_list lru = folio_lru_list(folio); 359 360 if (lru_gen_add_folio(lruvec, folio, true)) 361 return; 362 363 update_lru_size(lruvec, lru, folio_zonenum(folio), 364 folio_nr_pages(folio)); 365 /* This is not expected to be used on LRU_UNEVICTABLE */ 366 list_add_tail(&folio->lru, &lruvec->lists[lru]); 367 } 368 369 static __always_inline 370 void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio) 371 { 372 enum lru_list lru = folio_lru_list(folio); 373 374 if (lru_gen_del_folio(lruvec, folio, false)) 375 return; 376 377 if (lru != LRU_UNEVICTABLE) 378 list_del(&folio->lru); 379 update_lru_size(lruvec, lru, folio_zonenum(folio), 380 -folio_nr_pages(folio)); 381 } 382 383 #ifdef CONFIG_ANON_VMA_NAME 384 /* mmap_lock should be read-locked */ 385 static inline void anon_vma_name_get(struct anon_vma_name *anon_name) 386 { 387 if (anon_name) 388 kref_get(&anon_name->kref); 389 } 390 391 static inline void anon_vma_name_put(struct anon_vma_name *anon_name) 392 { 393 if (anon_name) 394 kref_put(&anon_name->kref, anon_vma_name_free); 395 } 396 397 static inline 398 struct anon_vma_name *anon_vma_name_reuse(struct anon_vma_name *anon_name) 399 { 400 /* Prevent anon_name refcount saturation early on */ 401 if (kref_read(&anon_name->kref) < REFCOUNT_MAX) { 402 anon_vma_name_get(anon_name); 403 return anon_name; 404 405 } 406 return anon_vma_name_alloc(anon_name->name); 407 } 408 409 static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma, 410 struct vm_area_struct *new_vma) 411 { 412 struct anon_vma_name *anon_name = anon_vma_name(orig_vma); 413 414 if (anon_name) 415 new_vma->anon_name = anon_vma_name_reuse(anon_name); 416 } 417 418 static inline void free_anon_vma_name(struct vm_area_struct *vma) 419 { 420 /* 421 * Not using anon_vma_name because it generates a warning if mmap_lock 422 * is not held, which might be the case here. 423 */ 424 anon_vma_name_put(vma->anon_name); 425 } 426 427 static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1, 428 struct anon_vma_name *anon_name2) 429 { 430 if (anon_name1 == anon_name2) 431 return true; 432 433 return anon_name1 && anon_name2 && 434 !strcmp(anon_name1->name, anon_name2->name); 435 } 436 437 #else /* CONFIG_ANON_VMA_NAME */ 438 static inline void anon_vma_name_get(struct anon_vma_name *anon_name) {} 439 static inline void anon_vma_name_put(struct anon_vma_name *anon_name) {} 440 static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma, 441 struct vm_area_struct *new_vma) {} 442 static inline void free_anon_vma_name(struct vm_area_struct *vma) {} 443 444 static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1, 445 struct anon_vma_name *anon_name2) 446 { 447 return true; 448 } 449 450 #endif /* CONFIG_ANON_VMA_NAME */ 451 452 static inline void init_tlb_flush_pending(struct mm_struct *mm) 453 { 454 atomic_set(&mm->tlb_flush_pending, 0); 455 } 456 457 static inline void inc_tlb_flush_pending(struct mm_struct *mm) 458 { 459 atomic_inc(&mm->tlb_flush_pending); 460 /* 461 * The only time this value is relevant is when there are indeed pages 462 * to flush. And we'll only flush pages after changing them, which 463 * requires the PTL. 464 * 465 * So the ordering here is: 466 * 467 * atomic_inc(&mm->tlb_flush_pending); 468 * spin_lock(&ptl); 469 * ... 470 * set_pte_at(); 471 * spin_unlock(&ptl); 472 * 473 * spin_lock(&ptl) 474 * mm_tlb_flush_pending(); 475 * .... 476 * spin_unlock(&ptl); 477 * 478 * flush_tlb_range(); 479 * atomic_dec(&mm->tlb_flush_pending); 480 * 481 * Where the increment if constrained by the PTL unlock, it thus 482 * ensures that the increment is visible if the PTE modification is 483 * visible. After all, if there is no PTE modification, nobody cares 484 * about TLB flushes either. 485 * 486 * This very much relies on users (mm_tlb_flush_pending() and 487 * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and 488 * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc 489 * locks (PPC) the unlock of one doesn't order against the lock of 490 * another PTL. 491 * 492 * The decrement is ordered by the flush_tlb_range(), such that 493 * mm_tlb_flush_pending() will not return false unless all flushes have 494 * completed. 495 */ 496 } 497 498 static inline void dec_tlb_flush_pending(struct mm_struct *mm) 499 { 500 /* 501 * See inc_tlb_flush_pending(). 502 * 503 * This cannot be smp_mb__before_atomic() because smp_mb() simply does 504 * not order against TLB invalidate completion, which is what we need. 505 * 506 * Therefore we must rely on tlb_flush_*() to guarantee order. 507 */ 508 atomic_dec(&mm->tlb_flush_pending); 509 } 510 511 static inline bool mm_tlb_flush_pending(struct mm_struct *mm) 512 { 513 /* 514 * Must be called after having acquired the PTL; orders against that 515 * PTLs release and therefore ensures that if we observe the modified 516 * PTE we must also observe the increment from inc_tlb_flush_pending(). 517 * 518 * That is, it only guarantees to return true if there is a flush 519 * pending for _this_ PTL. 520 */ 521 return atomic_read(&mm->tlb_flush_pending); 522 } 523 524 static inline bool mm_tlb_flush_nested(struct mm_struct *mm) 525 { 526 /* 527 * Similar to mm_tlb_flush_pending(), we must have acquired the PTL 528 * for which there is a TLB flush pending in order to guarantee 529 * we've seen both that PTE modification and the increment. 530 * 531 * (no requirement on actually still holding the PTL, that is irrelevant) 532 */ 533 return atomic_read(&mm->tlb_flush_pending) > 1; 534 } 535 536 #ifdef CONFIG_MMU 537 /* 538 * Computes the pte marker to copy from the given source entry into dst_vma. 539 * If no marker should be copied, returns 0. 540 * The caller should insert a new pte created with make_pte_marker(). 541 */ 542 static inline pte_marker copy_pte_marker( 543 swp_entry_t entry, struct vm_area_struct *dst_vma) 544 { 545 pte_marker srcm = pte_marker_get(entry); 546 /* Always copy error entries. */ 547 pte_marker dstm = srcm & (PTE_MARKER_POISONED | PTE_MARKER_GUARD); 548 549 /* Only copy PTE markers if UFFD register matches. */ 550 if ((srcm & PTE_MARKER_UFFD_WP) && userfaultfd_wp(dst_vma)) 551 dstm |= PTE_MARKER_UFFD_WP; 552 553 return dstm; 554 } 555 #endif 556 557 /* 558 * If this pte is wr-protected by uffd-wp in any form, arm the special pte to 559 * replace a none pte. NOTE! This should only be called when *pte is already 560 * cleared so we will never accidentally replace something valuable. Meanwhile 561 * none pte also means we are not demoting the pte so tlb flushed is not needed. 562 * E.g., when pte cleared the caller should have taken care of the tlb flush. 563 * 564 * Must be called with pgtable lock held so that no thread will see the none 565 * pte, and if they see it, they'll fault and serialize at the pgtable lock. 566 * 567 * This function is a no-op if PTE_MARKER_UFFD_WP is not enabled. 568 */ 569 static inline void 570 pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr, 571 pte_t *pte, pte_t pteval) 572 { 573 #ifdef CONFIG_PTE_MARKER_UFFD_WP 574 bool arm_uffd_pte = false; 575 576 /* The current status of the pte should be "cleared" before calling */ 577 WARN_ON_ONCE(!pte_none(ptep_get(pte))); 578 579 /* 580 * NOTE: userfaultfd_wp_unpopulated() doesn't need this whole 581 * thing, because when zapping either it means it's dropping the 582 * page, or in TTU where the present pte will be quickly replaced 583 * with a swap pte. There's no way of leaking the bit. 584 */ 585 if (vma_is_anonymous(vma) || !userfaultfd_wp(vma)) 586 return; 587 588 /* A uffd-wp wr-protected normal pte */ 589 if (unlikely(pte_present(pteval) && pte_uffd_wp(pteval))) 590 arm_uffd_pte = true; 591 592 /* 593 * A uffd-wp wr-protected swap pte. Note: this should even cover an 594 * existing pte marker with uffd-wp bit set. 595 */ 596 if (unlikely(pte_swp_uffd_wp_any(pteval))) 597 arm_uffd_pte = true; 598 599 if (unlikely(arm_uffd_pte)) 600 set_pte_at(vma->vm_mm, addr, pte, 601 make_pte_marker(PTE_MARKER_UFFD_WP)); 602 #endif 603 } 604 605 static inline bool vma_has_recency(struct vm_area_struct *vma) 606 { 607 if (vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ)) 608 return false; 609 610 if (vma->vm_file && (vma->vm_file->f_mode & FMODE_NOREUSE)) 611 return false; 612 613 return true; 614 } 615 616 #endif 617