1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef LINUX_MM_INLINE_H 3 #define LINUX_MM_INLINE_H 4 5 #include <linux/atomic.h> 6 #include <linux/huge_mm.h> 7 #include <linux/swap.h> 8 #include <linux/string.h> 9 #include <linux/userfaultfd_k.h> 10 #include <linux/swapops.h> 11 12 /** 13 * folio_is_file_lru - Should the folio be on a file LRU or anon LRU? 14 * @folio: The folio to test. 15 * 16 * We would like to get this info without a page flag, but the state 17 * needs to survive until the folio is last deleted from the LRU, which 18 * could be as far down as __page_cache_release. 19 * 20 * Return: An integer (not a boolean!) used to sort a folio onto the 21 * right LRU list and to account folios correctly. 22 * 1 if @folio is a regular filesystem backed page cache folio 23 * or a lazily freed anonymous folio (e.g. via MADV_FREE). 24 * 0 if @folio is a normal anonymous folio, a tmpfs folio or otherwise 25 * ram or swap backed folio. 26 */ 27 static inline int folio_is_file_lru(struct folio *folio) 28 { 29 return !folio_test_swapbacked(folio); 30 } 31 32 static inline int page_is_file_lru(struct page *page) 33 { 34 return folio_is_file_lru(page_folio(page)); 35 } 36 37 static __always_inline void __update_lru_size(struct lruvec *lruvec, 38 enum lru_list lru, enum zone_type zid, 39 long nr_pages) 40 { 41 struct pglist_data *pgdat = lruvec_pgdat(lruvec); 42 43 lockdep_assert_held(&lruvec->lru_lock); 44 WARN_ON_ONCE(nr_pages != (int)nr_pages); 45 46 __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages); 47 __mod_zone_page_state(&pgdat->node_zones[zid], 48 NR_ZONE_LRU_BASE + lru, nr_pages); 49 } 50 51 static __always_inline void update_lru_size(struct lruvec *lruvec, 52 enum lru_list lru, enum zone_type zid, 53 long nr_pages) 54 { 55 __update_lru_size(lruvec, lru, zid, nr_pages); 56 #ifdef CONFIG_MEMCG 57 mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages); 58 #endif 59 } 60 61 /** 62 * __folio_clear_lru_flags - Clear page lru flags before releasing a page. 63 * @folio: The folio that was on lru and now has a zero reference. 64 */ 65 static __always_inline void __folio_clear_lru_flags(struct folio *folio) 66 { 67 VM_BUG_ON_FOLIO(!folio_test_lru(folio), folio); 68 69 __folio_clear_lru(folio); 70 71 /* this shouldn't happen, so leave the flags to bad_page() */ 72 if (folio_test_active(folio) && folio_test_unevictable(folio)) 73 return; 74 75 __folio_clear_active(folio); 76 __folio_clear_unevictable(folio); 77 } 78 79 /** 80 * folio_lru_list - Which LRU list should a folio be on? 81 * @folio: The folio to test. 82 * 83 * Return: The LRU list a folio should be on, as an index 84 * into the array of LRU lists. 85 */ 86 static __always_inline enum lru_list folio_lru_list(struct folio *folio) 87 { 88 enum lru_list lru; 89 90 VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio); 91 92 if (folio_test_unevictable(folio)) 93 return LRU_UNEVICTABLE; 94 95 lru = folio_is_file_lru(folio) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON; 96 if (folio_test_active(folio)) 97 lru += LRU_ACTIVE; 98 99 return lru; 100 } 101 102 #ifdef CONFIG_LRU_GEN 103 104 #ifdef CONFIG_LRU_GEN_ENABLED 105 static inline bool lru_gen_enabled(void) 106 { 107 DECLARE_STATIC_KEY_TRUE(lru_gen_caps[NR_LRU_GEN_CAPS]); 108 109 return static_branch_likely(&lru_gen_caps[LRU_GEN_CORE]); 110 } 111 #else 112 static inline bool lru_gen_enabled(void) 113 { 114 DECLARE_STATIC_KEY_FALSE(lru_gen_caps[NR_LRU_GEN_CAPS]); 115 116 return static_branch_unlikely(&lru_gen_caps[LRU_GEN_CORE]); 117 } 118 #endif 119 120 static inline bool lru_gen_in_fault(void) 121 { 122 return current->in_lru_fault; 123 } 124 125 #ifdef CONFIG_MEMCG 126 static inline int lru_gen_memcg_seg(struct lruvec *lruvec) 127 { 128 return READ_ONCE(lruvec->lrugen.seg); 129 } 130 #else 131 static inline int lru_gen_memcg_seg(struct lruvec *lruvec) 132 { 133 return 0; 134 } 135 #endif 136 137 static inline int lru_gen_from_seq(unsigned long seq) 138 { 139 return seq % MAX_NR_GENS; 140 } 141 142 static inline int lru_hist_from_seq(unsigned long seq) 143 { 144 return seq % NR_HIST_GENS; 145 } 146 147 static inline int lru_tier_from_refs(int refs) 148 { 149 VM_WARN_ON_ONCE(refs > BIT(LRU_REFS_WIDTH)); 150 151 /* see the comment in folio_lru_refs() */ 152 return order_base_2(refs + 1); 153 } 154 155 static inline int folio_lru_refs(struct folio *folio) 156 { 157 unsigned long flags = READ_ONCE(folio->flags); 158 bool workingset = flags & BIT(PG_workingset); 159 160 /* 161 * Return the number of accesses beyond PG_referenced, i.e., N-1 if the 162 * total number of accesses is N>1, since N=0,1 both map to the first 163 * tier. lru_tier_from_refs() will account for this off-by-one. Also see 164 * the comment on MAX_NR_TIERS. 165 */ 166 return ((flags & LRU_REFS_MASK) >> LRU_REFS_PGOFF) + workingset; 167 } 168 169 static inline int folio_lru_gen(struct folio *folio) 170 { 171 unsigned long flags = READ_ONCE(folio->flags); 172 173 return ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; 174 } 175 176 static inline bool lru_gen_is_active(struct lruvec *lruvec, int gen) 177 { 178 unsigned long max_seq = lruvec->lrugen.max_seq; 179 180 VM_WARN_ON_ONCE(gen >= MAX_NR_GENS); 181 182 /* see the comment on MIN_NR_GENS */ 183 return gen == lru_gen_from_seq(max_seq) || gen == lru_gen_from_seq(max_seq - 1); 184 } 185 186 static inline void lru_gen_update_size(struct lruvec *lruvec, struct folio *folio, 187 int old_gen, int new_gen) 188 { 189 int type = folio_is_file_lru(folio); 190 int zone = folio_zonenum(folio); 191 int delta = folio_nr_pages(folio); 192 enum lru_list lru = type * LRU_INACTIVE_FILE; 193 struct lru_gen_folio *lrugen = &lruvec->lrugen; 194 195 VM_WARN_ON_ONCE(old_gen != -1 && old_gen >= MAX_NR_GENS); 196 VM_WARN_ON_ONCE(new_gen != -1 && new_gen >= MAX_NR_GENS); 197 VM_WARN_ON_ONCE(old_gen == -1 && new_gen == -1); 198 199 if (old_gen >= 0) 200 WRITE_ONCE(lrugen->nr_pages[old_gen][type][zone], 201 lrugen->nr_pages[old_gen][type][zone] - delta); 202 if (new_gen >= 0) 203 WRITE_ONCE(lrugen->nr_pages[new_gen][type][zone], 204 lrugen->nr_pages[new_gen][type][zone] + delta); 205 206 /* addition */ 207 if (old_gen < 0) { 208 if (lru_gen_is_active(lruvec, new_gen)) 209 lru += LRU_ACTIVE; 210 __update_lru_size(lruvec, lru, zone, delta); 211 return; 212 } 213 214 /* deletion */ 215 if (new_gen < 0) { 216 if (lru_gen_is_active(lruvec, old_gen)) 217 lru += LRU_ACTIVE; 218 __update_lru_size(lruvec, lru, zone, -delta); 219 return; 220 } 221 222 /* promotion */ 223 if (!lru_gen_is_active(lruvec, old_gen) && lru_gen_is_active(lruvec, new_gen)) { 224 __update_lru_size(lruvec, lru, zone, -delta); 225 __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, delta); 226 } 227 228 /* demotion requires isolation, e.g., lru_deactivate_fn() */ 229 VM_WARN_ON_ONCE(lru_gen_is_active(lruvec, old_gen) && !lru_gen_is_active(lruvec, new_gen)); 230 } 231 232 static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming) 233 { 234 unsigned long seq; 235 unsigned long flags; 236 int gen = folio_lru_gen(folio); 237 int type = folio_is_file_lru(folio); 238 int zone = folio_zonenum(folio); 239 struct lru_gen_folio *lrugen = &lruvec->lrugen; 240 241 VM_WARN_ON_ONCE_FOLIO(gen != -1, folio); 242 243 if (folio_test_unevictable(folio) || !lrugen->enabled) 244 return false; 245 /* 246 * There are three common cases for this page: 247 * 1. If it's hot, e.g., freshly faulted in or previously hot and 248 * migrated, add it to the youngest generation. 249 * 2. If it's cold but can't be evicted immediately, i.e., an anon page 250 * not in swapcache or a dirty page pending writeback, add it to the 251 * second oldest generation. 252 * 3. Everything else (clean, cold) is added to the oldest generation. 253 */ 254 if (folio_test_active(folio)) 255 seq = lrugen->max_seq; 256 else if ((type == LRU_GEN_ANON && !folio_test_swapcache(folio)) || 257 (folio_test_reclaim(folio) && 258 (folio_test_dirty(folio) || folio_test_writeback(folio)))) 259 seq = lrugen->min_seq[type] + 1; 260 else 261 seq = lrugen->min_seq[type]; 262 263 gen = lru_gen_from_seq(seq); 264 flags = (gen + 1UL) << LRU_GEN_PGOFF; 265 /* see the comment on MIN_NR_GENS about PG_active */ 266 set_mask_bits(&folio->flags, LRU_GEN_MASK | BIT(PG_active), flags); 267 268 lru_gen_update_size(lruvec, folio, -1, gen); 269 /* for folio_rotate_reclaimable() */ 270 if (reclaiming) 271 list_add_tail(&folio->lru, &lrugen->folios[gen][type][zone]); 272 else 273 list_add(&folio->lru, &lrugen->folios[gen][type][zone]); 274 275 return true; 276 } 277 278 static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming) 279 { 280 unsigned long flags; 281 int gen = folio_lru_gen(folio); 282 283 if (gen < 0) 284 return false; 285 286 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio); 287 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio); 288 289 /* for folio_migrate_flags() */ 290 flags = !reclaiming && lru_gen_is_active(lruvec, gen) ? BIT(PG_active) : 0; 291 flags = set_mask_bits(&folio->flags, LRU_GEN_MASK, flags); 292 gen = ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; 293 294 lru_gen_update_size(lruvec, folio, gen, -1); 295 list_del(&folio->lru); 296 297 return true; 298 } 299 300 #else /* !CONFIG_LRU_GEN */ 301 302 static inline bool lru_gen_enabled(void) 303 { 304 return false; 305 } 306 307 static inline bool lru_gen_in_fault(void) 308 { 309 return false; 310 } 311 312 static inline int lru_gen_memcg_seg(struct lruvec *lruvec) 313 { 314 return 0; 315 } 316 317 static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming) 318 { 319 return false; 320 } 321 322 static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming) 323 { 324 return false; 325 } 326 327 #endif /* CONFIG_LRU_GEN */ 328 329 static __always_inline 330 void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio) 331 { 332 enum lru_list lru = folio_lru_list(folio); 333 334 if (lru_gen_add_folio(lruvec, folio, false)) 335 return; 336 337 update_lru_size(lruvec, lru, folio_zonenum(folio), 338 folio_nr_pages(folio)); 339 if (lru != LRU_UNEVICTABLE) 340 list_add(&folio->lru, &lruvec->lists[lru]); 341 } 342 343 static __always_inline void add_page_to_lru_list(struct page *page, 344 struct lruvec *lruvec) 345 { 346 lruvec_add_folio(lruvec, page_folio(page)); 347 } 348 349 static __always_inline 350 void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio) 351 { 352 enum lru_list lru = folio_lru_list(folio); 353 354 if (lru_gen_add_folio(lruvec, folio, true)) 355 return; 356 357 update_lru_size(lruvec, lru, folio_zonenum(folio), 358 folio_nr_pages(folio)); 359 /* This is not expected to be used on LRU_UNEVICTABLE */ 360 list_add_tail(&folio->lru, &lruvec->lists[lru]); 361 } 362 363 static __always_inline 364 void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio) 365 { 366 enum lru_list lru = folio_lru_list(folio); 367 368 if (lru_gen_del_folio(lruvec, folio, false)) 369 return; 370 371 if (lru != LRU_UNEVICTABLE) 372 list_del(&folio->lru); 373 update_lru_size(lruvec, lru, folio_zonenum(folio), 374 -folio_nr_pages(folio)); 375 } 376 377 static __always_inline void del_page_from_lru_list(struct page *page, 378 struct lruvec *lruvec) 379 { 380 lruvec_del_folio(lruvec, page_folio(page)); 381 } 382 383 #ifdef CONFIG_ANON_VMA_NAME 384 /* 385 * mmap_lock should be read-locked when calling anon_vma_name(). Caller should 386 * either keep holding the lock while using the returned pointer or it should 387 * raise anon_vma_name refcount before releasing the lock. 388 */ 389 extern struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma); 390 extern struct anon_vma_name *anon_vma_name_alloc(const char *name); 391 extern void anon_vma_name_free(struct kref *kref); 392 393 /* mmap_lock should be read-locked */ 394 static inline void anon_vma_name_get(struct anon_vma_name *anon_name) 395 { 396 if (anon_name) 397 kref_get(&anon_name->kref); 398 } 399 400 static inline void anon_vma_name_put(struct anon_vma_name *anon_name) 401 { 402 if (anon_name) 403 kref_put(&anon_name->kref, anon_vma_name_free); 404 } 405 406 static inline 407 struct anon_vma_name *anon_vma_name_reuse(struct anon_vma_name *anon_name) 408 { 409 /* Prevent anon_name refcount saturation early on */ 410 if (kref_read(&anon_name->kref) < REFCOUNT_MAX) { 411 anon_vma_name_get(anon_name); 412 return anon_name; 413 414 } 415 return anon_vma_name_alloc(anon_name->name); 416 } 417 418 static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma, 419 struct vm_area_struct *new_vma) 420 { 421 struct anon_vma_name *anon_name = anon_vma_name(orig_vma); 422 423 if (anon_name) 424 new_vma->anon_name = anon_vma_name_reuse(anon_name); 425 } 426 427 static inline void free_anon_vma_name(struct vm_area_struct *vma) 428 { 429 /* 430 * Not using anon_vma_name because it generates a warning if mmap_lock 431 * is not held, which might be the case here. 432 */ 433 anon_vma_name_put(vma->anon_name); 434 } 435 436 static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1, 437 struct anon_vma_name *anon_name2) 438 { 439 if (anon_name1 == anon_name2) 440 return true; 441 442 return anon_name1 && anon_name2 && 443 !strcmp(anon_name1->name, anon_name2->name); 444 } 445 446 #else /* CONFIG_ANON_VMA_NAME */ 447 static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma) 448 { 449 return NULL; 450 } 451 452 static inline struct anon_vma_name *anon_vma_name_alloc(const char *name) 453 { 454 return NULL; 455 } 456 457 static inline void anon_vma_name_get(struct anon_vma_name *anon_name) {} 458 static inline void anon_vma_name_put(struct anon_vma_name *anon_name) {} 459 static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma, 460 struct vm_area_struct *new_vma) {} 461 static inline void free_anon_vma_name(struct vm_area_struct *vma) {} 462 463 static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1, 464 struct anon_vma_name *anon_name2) 465 { 466 return true; 467 } 468 469 #endif /* CONFIG_ANON_VMA_NAME */ 470 471 static inline void init_tlb_flush_pending(struct mm_struct *mm) 472 { 473 atomic_set(&mm->tlb_flush_pending, 0); 474 } 475 476 static inline void inc_tlb_flush_pending(struct mm_struct *mm) 477 { 478 atomic_inc(&mm->tlb_flush_pending); 479 /* 480 * The only time this value is relevant is when there are indeed pages 481 * to flush. And we'll only flush pages after changing them, which 482 * requires the PTL. 483 * 484 * So the ordering here is: 485 * 486 * atomic_inc(&mm->tlb_flush_pending); 487 * spin_lock(&ptl); 488 * ... 489 * set_pte_at(); 490 * spin_unlock(&ptl); 491 * 492 * spin_lock(&ptl) 493 * mm_tlb_flush_pending(); 494 * .... 495 * spin_unlock(&ptl); 496 * 497 * flush_tlb_range(); 498 * atomic_dec(&mm->tlb_flush_pending); 499 * 500 * Where the increment if constrained by the PTL unlock, it thus 501 * ensures that the increment is visible if the PTE modification is 502 * visible. After all, if there is no PTE modification, nobody cares 503 * about TLB flushes either. 504 * 505 * This very much relies on users (mm_tlb_flush_pending() and 506 * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and 507 * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc 508 * locks (PPC) the unlock of one doesn't order against the lock of 509 * another PTL. 510 * 511 * The decrement is ordered by the flush_tlb_range(), such that 512 * mm_tlb_flush_pending() will not return false unless all flushes have 513 * completed. 514 */ 515 } 516 517 static inline void dec_tlb_flush_pending(struct mm_struct *mm) 518 { 519 /* 520 * See inc_tlb_flush_pending(). 521 * 522 * This cannot be smp_mb__before_atomic() because smp_mb() simply does 523 * not order against TLB invalidate completion, which is what we need. 524 * 525 * Therefore we must rely on tlb_flush_*() to guarantee order. 526 */ 527 atomic_dec(&mm->tlb_flush_pending); 528 } 529 530 static inline bool mm_tlb_flush_pending(struct mm_struct *mm) 531 { 532 /* 533 * Must be called after having acquired the PTL; orders against that 534 * PTLs release and therefore ensures that if we observe the modified 535 * PTE we must also observe the increment from inc_tlb_flush_pending(). 536 * 537 * That is, it only guarantees to return true if there is a flush 538 * pending for _this_ PTL. 539 */ 540 return atomic_read(&mm->tlb_flush_pending); 541 } 542 543 static inline bool mm_tlb_flush_nested(struct mm_struct *mm) 544 { 545 /* 546 * Similar to mm_tlb_flush_pending(), we must have acquired the PTL 547 * for which there is a TLB flush pending in order to guarantee 548 * we've seen both that PTE modification and the increment. 549 * 550 * (no requirement on actually still holding the PTL, that is irrelevant) 551 */ 552 return atomic_read(&mm->tlb_flush_pending) > 1; 553 } 554 555 /* 556 * If this pte is wr-protected by uffd-wp in any form, arm the special pte to 557 * replace a none pte. NOTE! This should only be called when *pte is already 558 * cleared so we will never accidentally replace something valuable. Meanwhile 559 * none pte also means we are not demoting the pte so tlb flushed is not needed. 560 * E.g., when pte cleared the caller should have taken care of the tlb flush. 561 * 562 * Must be called with pgtable lock held so that no thread will see the none 563 * pte, and if they see it, they'll fault and serialize at the pgtable lock. 564 * 565 * This function is a no-op if PTE_MARKER_UFFD_WP is not enabled. 566 */ 567 static inline void 568 pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr, 569 pte_t *pte, pte_t pteval) 570 { 571 #ifdef CONFIG_PTE_MARKER_UFFD_WP 572 bool arm_uffd_pte = false; 573 574 /* The current status of the pte should be "cleared" before calling */ 575 WARN_ON_ONCE(!pte_none(*pte)); 576 577 if (vma_is_anonymous(vma) || !userfaultfd_wp(vma)) 578 return; 579 580 /* A uffd-wp wr-protected normal pte */ 581 if (unlikely(pte_present(pteval) && pte_uffd_wp(pteval))) 582 arm_uffd_pte = true; 583 584 /* 585 * A uffd-wp wr-protected swap pte. Note: this should even cover an 586 * existing pte marker with uffd-wp bit set. 587 */ 588 if (unlikely(pte_swp_uffd_wp_any(pteval))) 589 arm_uffd_pte = true; 590 591 if (unlikely(arm_uffd_pte)) 592 set_pte_at(vma->vm_mm, addr, pte, 593 make_pte_marker(PTE_MARKER_UFFD_WP)); 594 #endif 595 } 596 597 #endif 598