1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef LINUX_MM_INLINE_H 3 #define LINUX_MM_INLINE_H 4 5 #include <linux/atomic.h> 6 #include <linux/huge_mm.h> 7 #include <linux/swap.h> 8 #include <linux/string.h> 9 10 /** 11 * folio_is_file_lru - Should the folio be on a file LRU or anon LRU? 12 * @folio: The folio to test. 13 * 14 * We would like to get this info without a page flag, but the state 15 * needs to survive until the folio is last deleted from the LRU, which 16 * could be as far down as __page_cache_release. 17 * 18 * Return: An integer (not a boolean!) used to sort a folio onto the 19 * right LRU list and to account folios correctly. 20 * 1 if @folio is a regular filesystem backed page cache folio 21 * or a lazily freed anonymous folio (e.g. via MADV_FREE). 22 * 0 if @folio is a normal anonymous folio, a tmpfs folio or otherwise 23 * ram or swap backed folio. 24 */ 25 static inline int folio_is_file_lru(struct folio *folio) 26 { 27 return !folio_test_swapbacked(folio); 28 } 29 30 static inline int page_is_file_lru(struct page *page) 31 { 32 return folio_is_file_lru(page_folio(page)); 33 } 34 35 static __always_inline void update_lru_size(struct lruvec *lruvec, 36 enum lru_list lru, enum zone_type zid, 37 long nr_pages) 38 { 39 struct pglist_data *pgdat = lruvec_pgdat(lruvec); 40 41 __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages); 42 __mod_zone_page_state(&pgdat->node_zones[zid], 43 NR_ZONE_LRU_BASE + lru, nr_pages); 44 #ifdef CONFIG_MEMCG 45 mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages); 46 #endif 47 } 48 49 /** 50 * __folio_clear_lru_flags - Clear page lru flags before releasing a page. 51 * @folio: The folio that was on lru and now has a zero reference. 52 */ 53 static __always_inline void __folio_clear_lru_flags(struct folio *folio) 54 { 55 VM_BUG_ON_FOLIO(!folio_test_lru(folio), folio); 56 57 __folio_clear_lru(folio); 58 59 /* this shouldn't happen, so leave the flags to bad_page() */ 60 if (folio_test_active(folio) && folio_test_unevictable(folio)) 61 return; 62 63 __folio_clear_active(folio); 64 __folio_clear_unevictable(folio); 65 } 66 67 static __always_inline void __clear_page_lru_flags(struct page *page) 68 { 69 __folio_clear_lru_flags(page_folio(page)); 70 } 71 72 /** 73 * folio_lru_list - Which LRU list should a folio be on? 74 * @folio: The folio to test. 75 * 76 * Return: The LRU list a folio should be on, as an index 77 * into the array of LRU lists. 78 */ 79 static __always_inline enum lru_list folio_lru_list(struct folio *folio) 80 { 81 enum lru_list lru; 82 83 VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio); 84 85 if (folio_test_unevictable(folio)) 86 return LRU_UNEVICTABLE; 87 88 lru = folio_is_file_lru(folio) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON; 89 if (folio_test_active(folio)) 90 lru += LRU_ACTIVE; 91 92 return lru; 93 } 94 95 static __always_inline 96 void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio) 97 { 98 enum lru_list lru = folio_lru_list(folio); 99 100 update_lru_size(lruvec, lru, folio_zonenum(folio), 101 folio_nr_pages(folio)); 102 list_add(&folio->lru, &lruvec->lists[lru]); 103 } 104 105 static __always_inline void add_page_to_lru_list(struct page *page, 106 struct lruvec *lruvec) 107 { 108 lruvec_add_folio(lruvec, page_folio(page)); 109 } 110 111 static __always_inline 112 void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio) 113 { 114 enum lru_list lru = folio_lru_list(folio); 115 116 update_lru_size(lruvec, lru, folio_zonenum(folio), 117 folio_nr_pages(folio)); 118 list_add_tail(&folio->lru, &lruvec->lists[lru]); 119 } 120 121 static __always_inline void add_page_to_lru_list_tail(struct page *page, 122 struct lruvec *lruvec) 123 { 124 lruvec_add_folio_tail(lruvec, page_folio(page)); 125 } 126 127 static __always_inline 128 void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio) 129 { 130 list_del(&folio->lru); 131 update_lru_size(lruvec, folio_lru_list(folio), folio_zonenum(folio), 132 -folio_nr_pages(folio)); 133 } 134 135 static __always_inline void del_page_from_lru_list(struct page *page, 136 struct lruvec *lruvec) 137 { 138 lruvec_del_folio(lruvec, page_folio(page)); 139 } 140 141 #ifdef CONFIG_ANON_VMA_NAME 142 /* 143 * mmap_lock should be read-locked when calling vma_anon_name() and while using 144 * the returned pointer. 145 */ 146 extern const char *vma_anon_name(struct vm_area_struct *vma); 147 148 /* 149 * mmap_lock should be read-locked for orig_vma->vm_mm. 150 * mmap_lock should be write-locked for new_vma->vm_mm or new_vma should be 151 * isolated. 152 */ 153 extern void dup_vma_anon_name(struct vm_area_struct *orig_vma, 154 struct vm_area_struct *new_vma); 155 156 /* 157 * mmap_lock should be write-locked or vma should have been isolated under 158 * write-locked mmap_lock protection. 159 */ 160 extern void free_vma_anon_name(struct vm_area_struct *vma); 161 162 /* mmap_lock should be read-locked */ 163 static inline bool is_same_vma_anon_name(struct vm_area_struct *vma, 164 const char *name) 165 { 166 const char *vma_name = vma_anon_name(vma); 167 168 /* either both NULL, or pointers to same string */ 169 if (vma_name == name) 170 return true; 171 172 return name && vma_name && !strcmp(name, vma_name); 173 } 174 #else /* CONFIG_ANON_VMA_NAME */ 175 static inline const char *vma_anon_name(struct vm_area_struct *vma) 176 { 177 return NULL; 178 } 179 static inline void dup_vma_anon_name(struct vm_area_struct *orig_vma, 180 struct vm_area_struct *new_vma) {} 181 static inline void free_vma_anon_name(struct vm_area_struct *vma) {} 182 static inline bool is_same_vma_anon_name(struct vm_area_struct *vma, 183 const char *name) 184 { 185 return true; 186 } 187 #endif /* CONFIG_ANON_VMA_NAME */ 188 189 static inline void init_tlb_flush_pending(struct mm_struct *mm) 190 { 191 atomic_set(&mm->tlb_flush_pending, 0); 192 } 193 194 static inline void inc_tlb_flush_pending(struct mm_struct *mm) 195 { 196 atomic_inc(&mm->tlb_flush_pending); 197 /* 198 * The only time this value is relevant is when there are indeed pages 199 * to flush. And we'll only flush pages after changing them, which 200 * requires the PTL. 201 * 202 * So the ordering here is: 203 * 204 * atomic_inc(&mm->tlb_flush_pending); 205 * spin_lock(&ptl); 206 * ... 207 * set_pte_at(); 208 * spin_unlock(&ptl); 209 * 210 * spin_lock(&ptl) 211 * mm_tlb_flush_pending(); 212 * .... 213 * spin_unlock(&ptl); 214 * 215 * flush_tlb_range(); 216 * atomic_dec(&mm->tlb_flush_pending); 217 * 218 * Where the increment if constrained by the PTL unlock, it thus 219 * ensures that the increment is visible if the PTE modification is 220 * visible. After all, if there is no PTE modification, nobody cares 221 * about TLB flushes either. 222 * 223 * This very much relies on users (mm_tlb_flush_pending() and 224 * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and 225 * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc 226 * locks (PPC) the unlock of one doesn't order against the lock of 227 * another PTL. 228 * 229 * The decrement is ordered by the flush_tlb_range(), such that 230 * mm_tlb_flush_pending() will not return false unless all flushes have 231 * completed. 232 */ 233 } 234 235 static inline void dec_tlb_flush_pending(struct mm_struct *mm) 236 { 237 /* 238 * See inc_tlb_flush_pending(). 239 * 240 * This cannot be smp_mb__before_atomic() because smp_mb() simply does 241 * not order against TLB invalidate completion, which is what we need. 242 * 243 * Therefore we must rely on tlb_flush_*() to guarantee order. 244 */ 245 atomic_dec(&mm->tlb_flush_pending); 246 } 247 248 static inline bool mm_tlb_flush_pending(struct mm_struct *mm) 249 { 250 /* 251 * Must be called after having acquired the PTL; orders against that 252 * PTLs release and therefore ensures that if we observe the modified 253 * PTE we must also observe the increment from inc_tlb_flush_pending(). 254 * 255 * That is, it only guarantees to return true if there is a flush 256 * pending for _this_ PTL. 257 */ 258 return atomic_read(&mm->tlb_flush_pending); 259 } 260 261 static inline bool mm_tlb_flush_nested(struct mm_struct *mm) 262 { 263 /* 264 * Similar to mm_tlb_flush_pending(), we must have acquired the PTL 265 * for which there is a TLB flush pending in order to guarantee 266 * we've seen both that PTE modification and the increment. 267 * 268 * (no requirement on actually still holding the PTL, that is irrelevant) 269 */ 270 return atomic_read(&mm->tlb_flush_pending) > 1; 271 } 272 273 274 #endif 275