1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef LINUX_MM_INLINE_H 3 #define LINUX_MM_INLINE_H 4 5 #include <linux/atomic.h> 6 #include <linux/huge_mm.h> 7 #include <linux/swap.h> 8 #include <linux/string.h> 9 #include <linux/userfaultfd_k.h> 10 #include <linux/swapops.h> 11 12 /** 13 * folio_is_file_lru - Should the folio be on a file LRU or anon LRU? 14 * @folio: The folio to test. 15 * 16 * We would like to get this info without a page flag, but the state 17 * needs to survive until the folio is last deleted from the LRU, which 18 * could be as far down as __page_cache_release. 19 * 20 * Return: An integer (not a boolean!) used to sort a folio onto the 21 * right LRU list and to account folios correctly. 22 * 1 if @folio is a regular filesystem backed page cache folio 23 * or a lazily freed anonymous folio (e.g. via MADV_FREE). 24 * 0 if @folio is a normal anonymous folio, a tmpfs folio or otherwise 25 * ram or swap backed folio. 26 */ 27 static inline int folio_is_file_lru(struct folio *folio) 28 { 29 return !folio_test_swapbacked(folio); 30 } 31 32 static inline int page_is_file_lru(struct page *page) 33 { 34 return folio_is_file_lru(page_folio(page)); 35 } 36 37 static __always_inline void __update_lru_size(struct lruvec *lruvec, 38 enum lru_list lru, enum zone_type zid, 39 long nr_pages) 40 { 41 struct pglist_data *pgdat = lruvec_pgdat(lruvec); 42 43 lockdep_assert_held(&lruvec->lru_lock); 44 WARN_ON_ONCE(nr_pages != (int)nr_pages); 45 46 __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages); 47 __mod_zone_page_state(&pgdat->node_zones[zid], 48 NR_ZONE_LRU_BASE + lru, nr_pages); 49 } 50 51 static __always_inline void update_lru_size(struct lruvec *lruvec, 52 enum lru_list lru, enum zone_type zid, 53 long nr_pages) 54 { 55 __update_lru_size(lruvec, lru, zid, nr_pages); 56 #ifdef CONFIG_MEMCG 57 mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages); 58 #endif 59 } 60 61 /** 62 * __folio_clear_lru_flags - Clear page lru flags before releasing a page. 63 * @folio: The folio that was on lru and now has a zero reference. 64 */ 65 static __always_inline void __folio_clear_lru_flags(struct folio *folio) 66 { 67 VM_BUG_ON_FOLIO(!folio_test_lru(folio), folio); 68 69 __folio_clear_lru(folio); 70 71 /* this shouldn't happen, so leave the flags to bad_page() */ 72 if (folio_test_active(folio) && folio_test_unevictable(folio)) 73 return; 74 75 __folio_clear_active(folio); 76 __folio_clear_unevictable(folio); 77 } 78 79 static __always_inline void __clear_page_lru_flags(struct page *page) 80 { 81 __folio_clear_lru_flags(page_folio(page)); 82 } 83 84 /** 85 * folio_lru_list - Which LRU list should a folio be on? 86 * @folio: The folio to test. 87 * 88 * Return: The LRU list a folio should be on, as an index 89 * into the array of LRU lists. 90 */ 91 static __always_inline enum lru_list folio_lru_list(struct folio *folio) 92 { 93 enum lru_list lru; 94 95 VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio); 96 97 if (folio_test_unevictable(folio)) 98 return LRU_UNEVICTABLE; 99 100 lru = folio_is_file_lru(folio) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON; 101 if (folio_test_active(folio)) 102 lru += LRU_ACTIVE; 103 104 return lru; 105 } 106 107 #ifdef CONFIG_LRU_GEN 108 109 static inline bool lru_gen_enabled(void) 110 { 111 return true; 112 } 113 114 static inline bool lru_gen_in_fault(void) 115 { 116 return current->in_lru_fault; 117 } 118 119 static inline int lru_gen_from_seq(unsigned long seq) 120 { 121 return seq % MAX_NR_GENS; 122 } 123 124 static inline int folio_lru_gen(struct folio *folio) 125 { 126 unsigned long flags = READ_ONCE(folio->flags); 127 128 return ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; 129 } 130 131 static inline bool lru_gen_is_active(struct lruvec *lruvec, int gen) 132 { 133 unsigned long max_seq = lruvec->lrugen.max_seq; 134 135 VM_WARN_ON_ONCE(gen >= MAX_NR_GENS); 136 137 /* see the comment on MIN_NR_GENS */ 138 return gen == lru_gen_from_seq(max_seq) || gen == lru_gen_from_seq(max_seq - 1); 139 } 140 141 static inline void lru_gen_update_size(struct lruvec *lruvec, struct folio *folio, 142 int old_gen, int new_gen) 143 { 144 int type = folio_is_file_lru(folio); 145 int zone = folio_zonenum(folio); 146 int delta = folio_nr_pages(folio); 147 enum lru_list lru = type * LRU_INACTIVE_FILE; 148 struct lru_gen_struct *lrugen = &lruvec->lrugen; 149 150 VM_WARN_ON_ONCE(old_gen != -1 && old_gen >= MAX_NR_GENS); 151 VM_WARN_ON_ONCE(new_gen != -1 && new_gen >= MAX_NR_GENS); 152 VM_WARN_ON_ONCE(old_gen == -1 && new_gen == -1); 153 154 if (old_gen >= 0) 155 WRITE_ONCE(lrugen->nr_pages[old_gen][type][zone], 156 lrugen->nr_pages[old_gen][type][zone] - delta); 157 if (new_gen >= 0) 158 WRITE_ONCE(lrugen->nr_pages[new_gen][type][zone], 159 lrugen->nr_pages[new_gen][type][zone] + delta); 160 161 /* addition */ 162 if (old_gen < 0) { 163 if (lru_gen_is_active(lruvec, new_gen)) 164 lru += LRU_ACTIVE; 165 __update_lru_size(lruvec, lru, zone, delta); 166 return; 167 } 168 169 /* deletion */ 170 if (new_gen < 0) { 171 if (lru_gen_is_active(lruvec, old_gen)) 172 lru += LRU_ACTIVE; 173 __update_lru_size(lruvec, lru, zone, -delta); 174 return; 175 } 176 } 177 178 static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming) 179 { 180 unsigned long seq; 181 unsigned long flags; 182 int gen = folio_lru_gen(folio); 183 int type = folio_is_file_lru(folio); 184 int zone = folio_zonenum(folio); 185 struct lru_gen_struct *lrugen = &lruvec->lrugen; 186 187 VM_WARN_ON_ONCE_FOLIO(gen != -1, folio); 188 189 if (folio_test_unevictable(folio)) 190 return false; 191 /* 192 * There are three common cases for this page: 193 * 1. If it's hot, e.g., freshly faulted in or previously hot and 194 * migrated, add it to the youngest generation. 195 * 2. If it's cold but can't be evicted immediately, i.e., an anon page 196 * not in swapcache or a dirty page pending writeback, add it to the 197 * second oldest generation. 198 * 3. Everything else (clean, cold) is added to the oldest generation. 199 */ 200 if (folio_test_active(folio)) 201 seq = lrugen->max_seq; 202 else if ((type == LRU_GEN_ANON && !folio_test_swapcache(folio)) || 203 (folio_test_reclaim(folio) && 204 (folio_test_dirty(folio) || folio_test_writeback(folio)))) 205 seq = lrugen->min_seq[type] + 1; 206 else 207 seq = lrugen->min_seq[type]; 208 209 gen = lru_gen_from_seq(seq); 210 flags = (gen + 1UL) << LRU_GEN_PGOFF; 211 /* see the comment on MIN_NR_GENS about PG_active */ 212 set_mask_bits(&folio->flags, LRU_GEN_MASK | BIT(PG_active), flags); 213 214 lru_gen_update_size(lruvec, folio, -1, gen); 215 /* for folio_rotate_reclaimable() */ 216 if (reclaiming) 217 list_add_tail(&folio->lru, &lrugen->lists[gen][type][zone]); 218 else 219 list_add(&folio->lru, &lrugen->lists[gen][type][zone]); 220 221 return true; 222 } 223 224 static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming) 225 { 226 unsigned long flags; 227 int gen = folio_lru_gen(folio); 228 229 if (gen < 0) 230 return false; 231 232 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio); 233 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio); 234 235 /* for folio_migrate_flags() */ 236 flags = !reclaiming && lru_gen_is_active(lruvec, gen) ? BIT(PG_active) : 0; 237 flags = set_mask_bits(&folio->flags, LRU_GEN_MASK, flags); 238 gen = ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; 239 240 lru_gen_update_size(lruvec, folio, gen, -1); 241 list_del(&folio->lru); 242 243 return true; 244 } 245 246 #else /* !CONFIG_LRU_GEN */ 247 248 static inline bool lru_gen_enabled(void) 249 { 250 return false; 251 } 252 253 static inline bool lru_gen_in_fault(void) 254 { 255 return false; 256 } 257 258 static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming) 259 { 260 return false; 261 } 262 263 static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming) 264 { 265 return false; 266 } 267 268 #endif /* CONFIG_LRU_GEN */ 269 270 static __always_inline 271 void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio) 272 { 273 enum lru_list lru = folio_lru_list(folio); 274 275 if (lru_gen_add_folio(lruvec, folio, false)) 276 return; 277 278 update_lru_size(lruvec, lru, folio_zonenum(folio), 279 folio_nr_pages(folio)); 280 if (lru != LRU_UNEVICTABLE) 281 list_add(&folio->lru, &lruvec->lists[lru]); 282 } 283 284 static __always_inline void add_page_to_lru_list(struct page *page, 285 struct lruvec *lruvec) 286 { 287 lruvec_add_folio(lruvec, page_folio(page)); 288 } 289 290 static __always_inline 291 void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio) 292 { 293 enum lru_list lru = folio_lru_list(folio); 294 295 if (lru_gen_add_folio(lruvec, folio, true)) 296 return; 297 298 update_lru_size(lruvec, lru, folio_zonenum(folio), 299 folio_nr_pages(folio)); 300 /* This is not expected to be used on LRU_UNEVICTABLE */ 301 list_add_tail(&folio->lru, &lruvec->lists[lru]); 302 } 303 304 static __always_inline void add_page_to_lru_list_tail(struct page *page, 305 struct lruvec *lruvec) 306 { 307 lruvec_add_folio_tail(lruvec, page_folio(page)); 308 } 309 310 static __always_inline 311 void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio) 312 { 313 enum lru_list lru = folio_lru_list(folio); 314 315 if (lru_gen_del_folio(lruvec, folio, false)) 316 return; 317 318 if (lru != LRU_UNEVICTABLE) 319 list_del(&folio->lru); 320 update_lru_size(lruvec, lru, folio_zonenum(folio), 321 -folio_nr_pages(folio)); 322 } 323 324 static __always_inline void del_page_from_lru_list(struct page *page, 325 struct lruvec *lruvec) 326 { 327 lruvec_del_folio(lruvec, page_folio(page)); 328 } 329 330 #ifdef CONFIG_ANON_VMA_NAME 331 /* 332 * mmap_lock should be read-locked when calling anon_vma_name(). Caller should 333 * either keep holding the lock while using the returned pointer or it should 334 * raise anon_vma_name refcount before releasing the lock. 335 */ 336 extern struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma); 337 extern struct anon_vma_name *anon_vma_name_alloc(const char *name); 338 extern void anon_vma_name_free(struct kref *kref); 339 340 /* mmap_lock should be read-locked */ 341 static inline void anon_vma_name_get(struct anon_vma_name *anon_name) 342 { 343 if (anon_name) 344 kref_get(&anon_name->kref); 345 } 346 347 static inline void anon_vma_name_put(struct anon_vma_name *anon_name) 348 { 349 if (anon_name) 350 kref_put(&anon_name->kref, anon_vma_name_free); 351 } 352 353 static inline 354 struct anon_vma_name *anon_vma_name_reuse(struct anon_vma_name *anon_name) 355 { 356 /* Prevent anon_name refcount saturation early on */ 357 if (kref_read(&anon_name->kref) < REFCOUNT_MAX) { 358 anon_vma_name_get(anon_name); 359 return anon_name; 360 361 } 362 return anon_vma_name_alloc(anon_name->name); 363 } 364 365 static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma, 366 struct vm_area_struct *new_vma) 367 { 368 struct anon_vma_name *anon_name = anon_vma_name(orig_vma); 369 370 if (anon_name) 371 new_vma->anon_name = anon_vma_name_reuse(anon_name); 372 } 373 374 static inline void free_anon_vma_name(struct vm_area_struct *vma) 375 { 376 /* 377 * Not using anon_vma_name because it generates a warning if mmap_lock 378 * is not held, which might be the case here. 379 */ 380 if (!vma->vm_file) 381 anon_vma_name_put(vma->anon_name); 382 } 383 384 static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1, 385 struct anon_vma_name *anon_name2) 386 { 387 if (anon_name1 == anon_name2) 388 return true; 389 390 return anon_name1 && anon_name2 && 391 !strcmp(anon_name1->name, anon_name2->name); 392 } 393 394 #else /* CONFIG_ANON_VMA_NAME */ 395 static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma) 396 { 397 return NULL; 398 } 399 400 static inline struct anon_vma_name *anon_vma_name_alloc(const char *name) 401 { 402 return NULL; 403 } 404 405 static inline void anon_vma_name_get(struct anon_vma_name *anon_name) {} 406 static inline void anon_vma_name_put(struct anon_vma_name *anon_name) {} 407 static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma, 408 struct vm_area_struct *new_vma) {} 409 static inline void free_anon_vma_name(struct vm_area_struct *vma) {} 410 411 static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1, 412 struct anon_vma_name *anon_name2) 413 { 414 return true; 415 } 416 417 #endif /* CONFIG_ANON_VMA_NAME */ 418 419 static inline void init_tlb_flush_pending(struct mm_struct *mm) 420 { 421 atomic_set(&mm->tlb_flush_pending, 0); 422 } 423 424 static inline void inc_tlb_flush_pending(struct mm_struct *mm) 425 { 426 atomic_inc(&mm->tlb_flush_pending); 427 /* 428 * The only time this value is relevant is when there are indeed pages 429 * to flush. And we'll only flush pages after changing them, which 430 * requires the PTL. 431 * 432 * So the ordering here is: 433 * 434 * atomic_inc(&mm->tlb_flush_pending); 435 * spin_lock(&ptl); 436 * ... 437 * set_pte_at(); 438 * spin_unlock(&ptl); 439 * 440 * spin_lock(&ptl) 441 * mm_tlb_flush_pending(); 442 * .... 443 * spin_unlock(&ptl); 444 * 445 * flush_tlb_range(); 446 * atomic_dec(&mm->tlb_flush_pending); 447 * 448 * Where the increment if constrained by the PTL unlock, it thus 449 * ensures that the increment is visible if the PTE modification is 450 * visible. After all, if there is no PTE modification, nobody cares 451 * about TLB flushes either. 452 * 453 * This very much relies on users (mm_tlb_flush_pending() and 454 * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and 455 * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc 456 * locks (PPC) the unlock of one doesn't order against the lock of 457 * another PTL. 458 * 459 * The decrement is ordered by the flush_tlb_range(), such that 460 * mm_tlb_flush_pending() will not return false unless all flushes have 461 * completed. 462 */ 463 } 464 465 static inline void dec_tlb_flush_pending(struct mm_struct *mm) 466 { 467 /* 468 * See inc_tlb_flush_pending(). 469 * 470 * This cannot be smp_mb__before_atomic() because smp_mb() simply does 471 * not order against TLB invalidate completion, which is what we need. 472 * 473 * Therefore we must rely on tlb_flush_*() to guarantee order. 474 */ 475 atomic_dec(&mm->tlb_flush_pending); 476 } 477 478 static inline bool mm_tlb_flush_pending(struct mm_struct *mm) 479 { 480 /* 481 * Must be called after having acquired the PTL; orders against that 482 * PTLs release and therefore ensures that if we observe the modified 483 * PTE we must also observe the increment from inc_tlb_flush_pending(). 484 * 485 * That is, it only guarantees to return true if there is a flush 486 * pending for _this_ PTL. 487 */ 488 return atomic_read(&mm->tlb_flush_pending); 489 } 490 491 static inline bool mm_tlb_flush_nested(struct mm_struct *mm) 492 { 493 /* 494 * Similar to mm_tlb_flush_pending(), we must have acquired the PTL 495 * for which there is a TLB flush pending in order to guarantee 496 * we've seen both that PTE modification and the increment. 497 * 498 * (no requirement on actually still holding the PTL, that is irrelevant) 499 */ 500 return atomic_read(&mm->tlb_flush_pending) > 1; 501 } 502 503 /* 504 * If this pte is wr-protected by uffd-wp in any form, arm the special pte to 505 * replace a none pte. NOTE! This should only be called when *pte is already 506 * cleared so we will never accidentally replace something valuable. Meanwhile 507 * none pte also means we are not demoting the pte so tlb flushed is not needed. 508 * E.g., when pte cleared the caller should have taken care of the tlb flush. 509 * 510 * Must be called with pgtable lock held so that no thread will see the none 511 * pte, and if they see it, they'll fault and serialize at the pgtable lock. 512 * 513 * This function is a no-op if PTE_MARKER_UFFD_WP is not enabled. 514 */ 515 static inline void 516 pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr, 517 pte_t *pte, pte_t pteval) 518 { 519 #ifdef CONFIG_PTE_MARKER_UFFD_WP 520 bool arm_uffd_pte = false; 521 522 /* The current status of the pte should be "cleared" before calling */ 523 WARN_ON_ONCE(!pte_none(*pte)); 524 525 if (vma_is_anonymous(vma) || !userfaultfd_wp(vma)) 526 return; 527 528 /* A uffd-wp wr-protected normal pte */ 529 if (unlikely(pte_present(pteval) && pte_uffd_wp(pteval))) 530 arm_uffd_pte = true; 531 532 /* 533 * A uffd-wp wr-protected swap pte. Note: this should even cover an 534 * existing pte marker with uffd-wp bit set. 535 */ 536 if (unlikely(pte_swp_uffd_wp_any(pteval))) 537 arm_uffd_pte = true; 538 539 if (unlikely(arm_uffd_pte)) 540 set_pte_at(vma->vm_mm, addr, pte, 541 make_pte_marker(PTE_MARKER_UFFD_WP)); 542 #endif 543 } 544 545 #endif 546