1 #ifndef _LINUX_HUGE_MM_H 2 #define _LINUX_HUGE_MM_H 3 4 #include <linux/sched/coredump.h> 5 6 #include <linux/fs.h> /* only for vma_is_dax() */ 7 8 extern int do_huge_pmd_anonymous_page(struct vm_fault *vmf); 9 extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, 10 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, 11 struct vm_area_struct *vma); 12 extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd); 13 extern int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, 14 pud_t *dst_pud, pud_t *src_pud, unsigned long addr, 15 struct vm_area_struct *vma); 16 17 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 18 extern void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud); 19 #else 20 static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) 21 { 22 } 23 #endif 24 25 extern int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd); 26 extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, 27 unsigned long addr, 28 pmd_t *pmd, 29 unsigned int flags); 30 extern bool madvise_free_huge_pmd(struct mmu_gather *tlb, 31 struct vm_area_struct *vma, 32 pmd_t *pmd, unsigned long addr, unsigned long next); 33 extern int zap_huge_pmd(struct mmu_gather *tlb, 34 struct vm_area_struct *vma, 35 pmd_t *pmd, unsigned long addr); 36 extern int zap_huge_pud(struct mmu_gather *tlb, 37 struct vm_area_struct *vma, 38 pud_t *pud, unsigned long addr); 39 extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 40 unsigned long addr, unsigned long end, 41 unsigned char *vec); 42 extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, 43 unsigned long new_addr, unsigned long old_end, 44 pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush); 45 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 46 unsigned long addr, pgprot_t newprot, 47 int prot_numa); 48 int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, 49 pmd_t *pmd, pfn_t pfn, bool write); 50 int vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, 51 pud_t *pud, pfn_t pfn, bool write); 52 enum transparent_hugepage_flag { 53 TRANSPARENT_HUGEPAGE_FLAG, 54 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 55 TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, 56 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, 57 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, 58 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, 59 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG, 60 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG, 61 #ifdef CONFIG_DEBUG_VM 62 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG, 63 #endif 64 }; 65 66 struct kobject; 67 struct kobj_attribute; 68 69 extern ssize_t single_hugepage_flag_store(struct kobject *kobj, 70 struct kobj_attribute *attr, 71 const char *buf, size_t count, 72 enum transparent_hugepage_flag flag); 73 extern ssize_t single_hugepage_flag_show(struct kobject *kobj, 74 struct kobj_attribute *attr, char *buf, 75 enum transparent_hugepage_flag flag); 76 extern struct kobj_attribute shmem_enabled_attr; 77 78 #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) 79 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) 80 81 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 82 #define HPAGE_PMD_SHIFT PMD_SHIFT 83 #define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT) 84 #define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1)) 85 86 #define HPAGE_PUD_SHIFT PUD_SHIFT 87 #define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT) 88 #define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1)) 89 90 extern bool is_vma_temporary_stack(struct vm_area_struct *vma); 91 92 extern unsigned long transparent_hugepage_flags; 93 94 static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma) 95 { 96 if (vma->vm_flags & VM_NOHUGEPAGE) 97 return false; 98 99 if (is_vma_temporary_stack(vma)) 100 return false; 101 102 if (test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) 103 return false; 104 105 if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG)) 106 return true; 107 108 if (vma_is_dax(vma)) 109 return true; 110 111 if (transparent_hugepage_flags & 112 (1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)) 113 return !!(vma->vm_flags & VM_HUGEPAGE); 114 115 return false; 116 } 117 118 #define transparent_hugepage_use_zero_page() \ 119 (transparent_hugepage_flags & \ 120 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG)) 121 #ifdef CONFIG_DEBUG_VM 122 #define transparent_hugepage_debug_cow() \ 123 (transparent_hugepage_flags & \ 124 (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG)) 125 #else /* CONFIG_DEBUG_VM */ 126 #define transparent_hugepage_debug_cow() 0 127 #endif /* CONFIG_DEBUG_VM */ 128 129 extern unsigned long thp_get_unmapped_area(struct file *filp, 130 unsigned long addr, unsigned long len, unsigned long pgoff, 131 unsigned long flags); 132 133 extern void prep_transhuge_page(struct page *page); 134 extern void free_transhuge_page(struct page *page); 135 136 bool can_split_huge_page(struct page *page, int *pextra_pins); 137 int split_huge_page_to_list(struct page *page, struct list_head *list); 138 static inline int split_huge_page(struct page *page) 139 { 140 return split_huge_page_to_list(page, NULL); 141 } 142 void deferred_split_huge_page(struct page *page); 143 144 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 145 unsigned long address, bool freeze, struct page *page); 146 147 #define split_huge_pmd(__vma, __pmd, __address) \ 148 do { \ 149 pmd_t *____pmd = (__pmd); \ 150 if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd) \ 151 || pmd_devmap(*____pmd)) \ 152 __split_huge_pmd(__vma, __pmd, __address, \ 153 false, NULL); \ 154 } while (0) 155 156 157 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, 158 bool freeze, struct page *page); 159 160 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, 161 unsigned long address); 162 163 #define split_huge_pud(__vma, __pud, __address) \ 164 do { \ 165 pud_t *____pud = (__pud); \ 166 if (pud_trans_huge(*____pud) \ 167 || pud_devmap(*____pud)) \ 168 __split_huge_pud(__vma, __pud, __address); \ 169 } while (0) 170 171 extern int hugepage_madvise(struct vm_area_struct *vma, 172 unsigned long *vm_flags, int advice); 173 extern void vma_adjust_trans_huge(struct vm_area_struct *vma, 174 unsigned long start, 175 unsigned long end, 176 long adjust_next); 177 extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, 178 struct vm_area_struct *vma); 179 extern spinlock_t *__pud_trans_huge_lock(pud_t *pud, 180 struct vm_area_struct *vma); 181 182 static inline int is_swap_pmd(pmd_t pmd) 183 { 184 return !pmd_none(pmd) && !pmd_present(pmd); 185 } 186 187 /* mmap_sem must be held on entry */ 188 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, 189 struct vm_area_struct *vma) 190 { 191 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); 192 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) 193 return __pmd_trans_huge_lock(pmd, vma); 194 else 195 return NULL; 196 } 197 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, 198 struct vm_area_struct *vma) 199 { 200 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); 201 if (pud_trans_huge(*pud) || pud_devmap(*pud)) 202 return __pud_trans_huge_lock(pud, vma); 203 else 204 return NULL; 205 } 206 static inline int hpage_nr_pages(struct page *page) 207 { 208 if (unlikely(PageTransHuge(page))) 209 return HPAGE_PMD_NR; 210 return 1; 211 } 212 213 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, 214 pmd_t *pmd, int flags); 215 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, 216 pud_t *pud, int flags); 217 218 extern int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd); 219 220 extern struct page *huge_zero_page; 221 222 static inline bool is_huge_zero_page(struct page *page) 223 { 224 return ACCESS_ONCE(huge_zero_page) == page; 225 } 226 227 static inline bool is_huge_zero_pmd(pmd_t pmd) 228 { 229 return is_huge_zero_page(pmd_page(pmd)); 230 } 231 232 static inline bool is_huge_zero_pud(pud_t pud) 233 { 234 return false; 235 } 236 237 struct page *mm_get_huge_zero_page(struct mm_struct *mm); 238 void mm_put_huge_zero_page(struct mm_struct *mm); 239 240 #define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot)) 241 242 static inline bool thp_migration_supported(void) 243 { 244 return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION); 245 } 246 247 #else /* CONFIG_TRANSPARENT_HUGEPAGE */ 248 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) 249 #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) 250 #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; }) 251 252 #define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; }) 253 #define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; }) 254 #define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; }) 255 256 #define hpage_nr_pages(x) 1 257 258 static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma) 259 { 260 return false; 261 } 262 263 static inline void prep_transhuge_page(struct page *page) {} 264 265 #define transparent_hugepage_flags 0UL 266 267 #define thp_get_unmapped_area NULL 268 269 static inline bool 270 can_split_huge_page(struct page *page, int *pextra_pins) 271 { 272 BUILD_BUG(); 273 return false; 274 } 275 static inline int 276 split_huge_page_to_list(struct page *page, struct list_head *list) 277 { 278 return 0; 279 } 280 static inline int split_huge_page(struct page *page) 281 { 282 return 0; 283 } 284 static inline void deferred_split_huge_page(struct page *page) {} 285 #define split_huge_pmd(__vma, __pmd, __address) \ 286 do { } while (0) 287 288 static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 289 unsigned long address, bool freeze, struct page *page) {} 290 static inline void split_huge_pmd_address(struct vm_area_struct *vma, 291 unsigned long address, bool freeze, struct page *page) {} 292 293 #define split_huge_pud(__vma, __pmd, __address) \ 294 do { } while (0) 295 296 static inline int hugepage_madvise(struct vm_area_struct *vma, 297 unsigned long *vm_flags, int advice) 298 { 299 BUG(); 300 return 0; 301 } 302 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, 303 unsigned long start, 304 unsigned long end, 305 long adjust_next) 306 { 307 } 308 static inline int is_swap_pmd(pmd_t pmd) 309 { 310 return 0; 311 } 312 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, 313 struct vm_area_struct *vma) 314 { 315 return NULL; 316 } 317 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, 318 struct vm_area_struct *vma) 319 { 320 return NULL; 321 } 322 323 static inline int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd) 324 { 325 return 0; 326 } 327 328 static inline bool is_huge_zero_page(struct page *page) 329 { 330 return false; 331 } 332 333 static inline bool is_huge_zero_pud(pud_t pud) 334 { 335 return false; 336 } 337 338 static inline void mm_put_huge_zero_page(struct mm_struct *mm) 339 { 340 return; 341 } 342 343 static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma, 344 unsigned long addr, pmd_t *pmd, int flags) 345 { 346 return NULL; 347 } 348 349 static inline struct page *follow_devmap_pud(struct vm_area_struct *vma, 350 unsigned long addr, pud_t *pud, int flags) 351 { 352 return NULL; 353 } 354 355 static inline bool thp_migration_supported(void) 356 { 357 return false; 358 } 359 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 360 361 #endif /* _LINUX_HUGE_MM_H */ 362