1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef LINUX_MM_INLINE_H 3 #define LINUX_MM_INLINE_H 4 5 #include <linux/atomic.h> 6 #include <linux/huge_mm.h> 7 #include <linux/swap.h> 8 #include <linux/string.h> 9 #include <linux/userfaultfd_k.h> 10 #include <linux/swapops.h> 11 12 /** 13 * folio_is_file_lru - Should the folio be on a file LRU or anon LRU? 14 * @folio: The folio to test. 15 * 16 * We would like to get this info without a page flag, but the state 17 * needs to survive until the folio is last deleted from the LRU, which 18 * could be as far down as __page_cache_release. 19 * 20 * Return: An integer (not a boolean!) used to sort a folio onto the 21 * right LRU list and to account folios correctly. 22 * 1 if @folio is a regular filesystem backed page cache folio 23 * or a lazily freed anonymous folio (e.g. via MADV_FREE). 24 * 0 if @folio is a normal anonymous folio, a tmpfs folio or otherwise 25 * ram or swap backed folio. 26 */ 27 static inline int folio_is_file_lru(struct folio *folio) 28 { 29 return !folio_test_swapbacked(folio); 30 } 31 32 static inline int page_is_file_lru(struct page *page) 33 { 34 return folio_is_file_lru(page_folio(page)); 35 } 36 37 static __always_inline void __update_lru_size(struct lruvec *lruvec, 38 enum lru_list lru, enum zone_type zid, 39 long nr_pages) 40 { 41 struct pglist_data *pgdat = lruvec_pgdat(lruvec); 42 43 __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages); 44 __mod_zone_page_state(&pgdat->node_zones[zid], 45 NR_ZONE_LRU_BASE + lru, nr_pages); 46 } 47 48 static __always_inline void update_lru_size(struct lruvec *lruvec, 49 enum lru_list lru, enum zone_type zid, 50 long nr_pages) 51 { 52 __update_lru_size(lruvec, lru, zid, nr_pages); 53 #ifdef CONFIG_MEMCG 54 mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages); 55 #endif 56 } 57 58 /** 59 * __folio_clear_lru_flags - Clear page lru flags before releasing a page. 60 * @folio: The folio that was on lru and now has a zero reference. 61 */ 62 static __always_inline void __folio_clear_lru_flags(struct folio *folio) 63 { 64 VM_BUG_ON_FOLIO(!folio_test_lru(folio), folio); 65 66 __folio_clear_lru(folio); 67 68 /* this shouldn't happen, so leave the flags to bad_page() */ 69 if (folio_test_active(folio) && folio_test_unevictable(folio)) 70 return; 71 72 __folio_clear_active(folio); 73 __folio_clear_unevictable(folio); 74 } 75 76 static __always_inline void __clear_page_lru_flags(struct page *page) 77 { 78 __folio_clear_lru_flags(page_folio(page)); 79 } 80 81 /** 82 * folio_lru_list - Which LRU list should a folio be on? 83 * @folio: The folio to test. 84 * 85 * Return: The LRU list a folio should be on, as an index 86 * into the array of LRU lists. 87 */ 88 static __always_inline enum lru_list folio_lru_list(struct folio *folio) 89 { 90 enum lru_list lru; 91 92 VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio); 93 94 if (folio_test_unevictable(folio)) 95 return LRU_UNEVICTABLE; 96 97 lru = folio_is_file_lru(folio) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON; 98 if (folio_test_active(folio)) 99 lru += LRU_ACTIVE; 100 101 return lru; 102 } 103 104 static __always_inline 105 void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio) 106 { 107 enum lru_list lru = folio_lru_list(folio); 108 109 update_lru_size(lruvec, lru, folio_zonenum(folio), 110 folio_nr_pages(folio)); 111 if (lru != LRU_UNEVICTABLE) 112 list_add(&folio->lru, &lruvec->lists[lru]); 113 } 114 115 static __always_inline void add_page_to_lru_list(struct page *page, 116 struct lruvec *lruvec) 117 { 118 lruvec_add_folio(lruvec, page_folio(page)); 119 } 120 121 static __always_inline 122 void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio) 123 { 124 enum lru_list lru = folio_lru_list(folio); 125 126 update_lru_size(lruvec, lru, folio_zonenum(folio), 127 folio_nr_pages(folio)); 128 /* This is not expected to be used on LRU_UNEVICTABLE */ 129 list_add_tail(&folio->lru, &lruvec->lists[lru]); 130 } 131 132 static __always_inline void add_page_to_lru_list_tail(struct page *page, 133 struct lruvec *lruvec) 134 { 135 lruvec_add_folio_tail(lruvec, page_folio(page)); 136 } 137 138 static __always_inline 139 void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio) 140 { 141 enum lru_list lru = folio_lru_list(folio); 142 143 if (lru != LRU_UNEVICTABLE) 144 list_del(&folio->lru); 145 update_lru_size(lruvec, lru, folio_zonenum(folio), 146 -folio_nr_pages(folio)); 147 } 148 149 static __always_inline void del_page_from_lru_list(struct page *page, 150 struct lruvec *lruvec) 151 { 152 lruvec_del_folio(lruvec, page_folio(page)); 153 } 154 155 #ifdef CONFIG_ANON_VMA_NAME 156 /* 157 * mmap_lock should be read-locked when calling anon_vma_name(). Caller should 158 * either keep holding the lock while using the returned pointer or it should 159 * raise anon_vma_name refcount before releasing the lock. 160 */ 161 extern struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma); 162 extern struct anon_vma_name *anon_vma_name_alloc(const char *name); 163 extern void anon_vma_name_free(struct kref *kref); 164 165 /* mmap_lock should be read-locked */ 166 static inline void anon_vma_name_get(struct anon_vma_name *anon_name) 167 { 168 if (anon_name) 169 kref_get(&anon_name->kref); 170 } 171 172 static inline void anon_vma_name_put(struct anon_vma_name *anon_name) 173 { 174 if (anon_name) 175 kref_put(&anon_name->kref, anon_vma_name_free); 176 } 177 178 static inline 179 struct anon_vma_name *anon_vma_name_reuse(struct anon_vma_name *anon_name) 180 { 181 /* Prevent anon_name refcount saturation early on */ 182 if (kref_read(&anon_name->kref) < REFCOUNT_MAX) { 183 anon_vma_name_get(anon_name); 184 return anon_name; 185 186 } 187 return anon_vma_name_alloc(anon_name->name); 188 } 189 190 static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma, 191 struct vm_area_struct *new_vma) 192 { 193 struct anon_vma_name *anon_name = anon_vma_name(orig_vma); 194 195 if (anon_name) 196 new_vma->anon_name = anon_vma_name_reuse(anon_name); 197 } 198 199 static inline void free_anon_vma_name(struct vm_area_struct *vma) 200 { 201 /* 202 * Not using anon_vma_name because it generates a warning if mmap_lock 203 * is not held, which might be the case here. 204 */ 205 if (!vma->vm_file) 206 anon_vma_name_put(vma->anon_name); 207 } 208 209 static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1, 210 struct anon_vma_name *anon_name2) 211 { 212 if (anon_name1 == anon_name2) 213 return true; 214 215 return anon_name1 && anon_name2 && 216 !strcmp(anon_name1->name, anon_name2->name); 217 } 218 219 #else /* CONFIG_ANON_VMA_NAME */ 220 static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma) 221 { 222 return NULL; 223 } 224 225 static inline struct anon_vma_name *anon_vma_name_alloc(const char *name) 226 { 227 return NULL; 228 } 229 230 static inline void anon_vma_name_get(struct anon_vma_name *anon_name) {} 231 static inline void anon_vma_name_put(struct anon_vma_name *anon_name) {} 232 static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma, 233 struct vm_area_struct *new_vma) {} 234 static inline void free_anon_vma_name(struct vm_area_struct *vma) {} 235 236 static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1, 237 struct anon_vma_name *anon_name2) 238 { 239 return true; 240 } 241 242 #endif /* CONFIG_ANON_VMA_NAME */ 243 244 static inline void init_tlb_flush_pending(struct mm_struct *mm) 245 { 246 atomic_set(&mm->tlb_flush_pending, 0); 247 } 248 249 static inline void inc_tlb_flush_pending(struct mm_struct *mm) 250 { 251 atomic_inc(&mm->tlb_flush_pending); 252 /* 253 * The only time this value is relevant is when there are indeed pages 254 * to flush. And we'll only flush pages after changing them, which 255 * requires the PTL. 256 * 257 * So the ordering here is: 258 * 259 * atomic_inc(&mm->tlb_flush_pending); 260 * spin_lock(&ptl); 261 * ... 262 * set_pte_at(); 263 * spin_unlock(&ptl); 264 * 265 * spin_lock(&ptl) 266 * mm_tlb_flush_pending(); 267 * .... 268 * spin_unlock(&ptl); 269 * 270 * flush_tlb_range(); 271 * atomic_dec(&mm->tlb_flush_pending); 272 * 273 * Where the increment if constrained by the PTL unlock, it thus 274 * ensures that the increment is visible if the PTE modification is 275 * visible. After all, if there is no PTE modification, nobody cares 276 * about TLB flushes either. 277 * 278 * This very much relies on users (mm_tlb_flush_pending() and 279 * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and 280 * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc 281 * locks (PPC) the unlock of one doesn't order against the lock of 282 * another PTL. 283 * 284 * The decrement is ordered by the flush_tlb_range(), such that 285 * mm_tlb_flush_pending() will not return false unless all flushes have 286 * completed. 287 */ 288 } 289 290 static inline void dec_tlb_flush_pending(struct mm_struct *mm) 291 { 292 /* 293 * See inc_tlb_flush_pending(). 294 * 295 * This cannot be smp_mb__before_atomic() because smp_mb() simply does 296 * not order against TLB invalidate completion, which is what we need. 297 * 298 * Therefore we must rely on tlb_flush_*() to guarantee order. 299 */ 300 atomic_dec(&mm->tlb_flush_pending); 301 } 302 303 static inline bool mm_tlb_flush_pending(struct mm_struct *mm) 304 { 305 /* 306 * Must be called after having acquired the PTL; orders against that 307 * PTLs release and therefore ensures that if we observe the modified 308 * PTE we must also observe the increment from inc_tlb_flush_pending(). 309 * 310 * That is, it only guarantees to return true if there is a flush 311 * pending for _this_ PTL. 312 */ 313 return atomic_read(&mm->tlb_flush_pending); 314 } 315 316 static inline bool mm_tlb_flush_nested(struct mm_struct *mm) 317 { 318 /* 319 * Similar to mm_tlb_flush_pending(), we must have acquired the PTL 320 * for which there is a TLB flush pending in order to guarantee 321 * we've seen both that PTE modification and the increment. 322 * 323 * (no requirement on actually still holding the PTL, that is irrelevant) 324 */ 325 return atomic_read(&mm->tlb_flush_pending) > 1; 326 } 327 328 /* 329 * If this pte is wr-protected by uffd-wp in any form, arm the special pte to 330 * replace a none pte. NOTE! This should only be called when *pte is already 331 * cleared so we will never accidentally replace something valuable. Meanwhile 332 * none pte also means we are not demoting the pte so tlb flushed is not needed. 333 * E.g., when pte cleared the caller should have taken care of the tlb flush. 334 * 335 * Must be called with pgtable lock held so that no thread will see the none 336 * pte, and if they see it, they'll fault and serialize at the pgtable lock. 337 * 338 * This function is a no-op if PTE_MARKER_UFFD_WP is not enabled. 339 */ 340 static inline void 341 pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr, 342 pte_t *pte, pte_t pteval) 343 { 344 #ifdef CONFIG_PTE_MARKER_UFFD_WP 345 bool arm_uffd_pte = false; 346 347 /* The current status of the pte should be "cleared" before calling */ 348 WARN_ON_ONCE(!pte_none(*pte)); 349 350 if (vma_is_anonymous(vma) || !userfaultfd_wp(vma)) 351 return; 352 353 /* A uffd-wp wr-protected normal pte */ 354 if (unlikely(pte_present(pteval) && pte_uffd_wp(pteval))) 355 arm_uffd_pte = true; 356 357 /* 358 * A uffd-wp wr-protected swap pte. Note: this should even cover an 359 * existing pte marker with uffd-wp bit set. 360 */ 361 if (unlikely(pte_swp_uffd_wp_any(pteval))) 362 arm_uffd_pte = true; 363 364 if (unlikely(arm_uffd_pte)) 365 set_pte_at(vma->vm_mm, addr, pte, 366 make_pte_marker(PTE_MARKER_UFFD_WP)); 367 #endif 368 } 369 370 #endif 371