xref: /linux-6.15/include/linux/huge_mm.h (revision bf8a352d)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGE_MM_H
3 #define _LINUX_HUGE_MM_H
4 
5 #include <linux/sched/coredump.h>
6 #include <linux/mm_types.h>
7 
8 #include <linux/fs.h> /* only for vma_is_dax() */
9 
10 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
11 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
12 		  pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
13 		  struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
14 void huge_pmd_set_accessed(struct vm_fault *vmf);
15 int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
16 		  pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
17 		  struct vm_area_struct *vma);
18 
19 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
20 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
21 #else
22 static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
23 {
24 }
25 #endif
26 
27 vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf);
28 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
29 				   unsigned long addr, pmd_t *pmd,
30 				   unsigned int flags);
31 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
32 			   pmd_t *pmd, unsigned long addr, unsigned long next);
33 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
34 		 unsigned long addr);
35 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud,
36 		 unsigned long addr);
37 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
38 		   unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd);
39 int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
40 		    pmd_t *pmd, unsigned long addr, pgprot_t newprot,
41 		    unsigned long cp_flags);
42 
43 vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write);
44 vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write);
45 
46 enum transparent_hugepage_flag {
47 	TRANSPARENT_HUGEPAGE_UNSUPPORTED,
48 	TRANSPARENT_HUGEPAGE_FLAG,
49 	TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
50 	TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
51 	TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
52 	TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
53 	TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
54 	TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
55 	TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
56 };
57 
58 struct kobject;
59 struct kobj_attribute;
60 
61 ssize_t single_hugepage_flag_store(struct kobject *kobj,
62 				   struct kobj_attribute *attr,
63 				   const char *buf, size_t count,
64 				   enum transparent_hugepage_flag flag);
65 ssize_t single_hugepage_flag_show(struct kobject *kobj,
66 				  struct kobj_attribute *attr, char *buf,
67 				  enum transparent_hugepage_flag flag);
68 extern struct kobj_attribute shmem_enabled_attr;
69 
70 #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
71 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
72 
73 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
74 #define HPAGE_PMD_SHIFT PMD_SHIFT
75 #define HPAGE_PMD_SIZE	((1UL) << HPAGE_PMD_SHIFT)
76 #define HPAGE_PMD_MASK	(~(HPAGE_PMD_SIZE - 1))
77 
78 #define HPAGE_PUD_SHIFT PUD_SHIFT
79 #define HPAGE_PUD_SIZE	((1UL) << HPAGE_PUD_SHIFT)
80 #define HPAGE_PUD_MASK	(~(HPAGE_PUD_SIZE - 1))
81 
82 extern unsigned long transparent_hugepage_flags;
83 
84 #define hugepage_flags_enabled()					       \
85 	(transparent_hugepage_flags &				       \
86 	 ((1<<TRANSPARENT_HUGEPAGE_FLAG) |		       \
87 	  (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)))
88 #define hugepage_flags_always()				\
89 	(transparent_hugepage_flags &			\
90 	 (1<<TRANSPARENT_HUGEPAGE_FLAG))
91 
92 /*
93  * Do the below checks:
94  *   - For file vma, check if the linear page offset of vma is
95  *     HPAGE_PMD_NR aligned within the file.  The hugepage is
96  *     guaranteed to be hugepage-aligned within the file, but we must
97  *     check that the PMD-aligned addresses in the VMA map to
98  *     PMD-aligned offsets within the file, else the hugepage will
99  *     not be PMD-mappable.
100  *   - For all vmas, check if the haddr is in an aligned HPAGE_PMD_SIZE
101  *     area.
102  */
103 static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
104 		unsigned long addr)
105 {
106 	unsigned long haddr;
107 
108 	/* Don't have to check pgoff for anonymous vma */
109 	if (!vma_is_anonymous(vma)) {
110 		if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
111 				HPAGE_PMD_NR))
112 			return false;
113 	}
114 
115 	haddr = addr & HPAGE_PMD_MASK;
116 
117 	if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
118 		return false;
119 	return true;
120 }
121 
122 static inline bool file_thp_enabled(struct vm_area_struct *vma)
123 {
124 	struct inode *inode;
125 
126 	if (!vma->vm_file)
127 		return false;
128 
129 	inode = vma->vm_file->f_inode;
130 
131 	return (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS)) &&
132 	       (vma->vm_flags & VM_EXEC) &&
133 	       !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode);
134 }
135 
136 bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags,
137 			bool smaps, bool in_pf, bool enforce_sysfs);
138 
139 #define transparent_hugepage_use_zero_page()				\
140 	(transparent_hugepage_flags &					\
141 	 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
142 
143 unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
144 		unsigned long len, unsigned long pgoff, unsigned long flags);
145 
146 void prep_transhuge_page(struct page *page);
147 void free_transhuge_page(struct page *page);
148 
149 bool can_split_folio(struct folio *folio, int *pextra_pins);
150 int split_huge_page_to_list(struct page *page, struct list_head *list);
151 static inline int split_huge_page(struct page *page)
152 {
153 	return split_huge_page_to_list(page, NULL);
154 }
155 void deferred_split_folio(struct folio *folio);
156 
157 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
158 		unsigned long address, bool freeze, struct folio *folio);
159 
160 #define split_huge_pmd(__vma, __pmd, __address)				\
161 	do {								\
162 		pmd_t *____pmd = (__pmd);				\
163 		if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd)	\
164 					|| pmd_devmap(*____pmd))	\
165 			__split_huge_pmd(__vma, __pmd, __address,	\
166 						false, NULL);		\
167 	}  while (0)
168 
169 
170 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
171 		bool freeze, struct folio *folio);
172 
173 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
174 		unsigned long address);
175 
176 #define split_huge_pud(__vma, __pud, __address)				\
177 	do {								\
178 		pud_t *____pud = (__pud);				\
179 		if (pud_trans_huge(*____pud)				\
180 					|| pud_devmap(*____pud))	\
181 			__split_huge_pud(__vma, __pud, __address);	\
182 	}  while (0)
183 
184 int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags,
185 		     int advice);
186 int madvise_collapse(struct vm_area_struct *vma,
187 		     struct vm_area_struct **prev,
188 		     unsigned long start, unsigned long end);
189 void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
190 			   unsigned long end, long adjust_next);
191 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
192 spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma);
193 
194 static inline int is_swap_pmd(pmd_t pmd)
195 {
196 	return !pmd_none(pmd) && !pmd_present(pmd);
197 }
198 
199 /* mmap_lock must be held on entry */
200 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
201 		struct vm_area_struct *vma)
202 {
203 	if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
204 		return __pmd_trans_huge_lock(pmd, vma);
205 	else
206 		return NULL;
207 }
208 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
209 		struct vm_area_struct *vma)
210 {
211 	if (pud_trans_huge(*pud) || pud_devmap(*pud))
212 		return __pud_trans_huge_lock(pud, vma);
213 	else
214 		return NULL;
215 }
216 
217 /**
218  * folio_test_pmd_mappable - Can we map this folio with a PMD?
219  * @folio: The folio to test
220  */
221 static inline bool folio_test_pmd_mappable(struct folio *folio)
222 {
223 	return folio_order(folio) >= HPAGE_PMD_ORDER;
224 }
225 
226 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
227 		pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
228 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
229 		pud_t *pud, int flags, struct dev_pagemap **pgmap);
230 
231 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf);
232 
233 extern struct page *huge_zero_page;
234 extern unsigned long huge_zero_pfn;
235 
236 static inline bool is_huge_zero_page(struct page *page)
237 {
238 	return READ_ONCE(huge_zero_page) == page;
239 }
240 
241 static inline bool is_huge_zero_pmd(pmd_t pmd)
242 {
243 	return pmd_present(pmd) && READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd);
244 }
245 
246 static inline bool is_huge_zero_pud(pud_t pud)
247 {
248 	return false;
249 }
250 
251 struct page *mm_get_huge_zero_page(struct mm_struct *mm);
252 void mm_put_huge_zero_page(struct mm_struct *mm);
253 
254 #define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
255 
256 static inline bool thp_migration_supported(void)
257 {
258 	return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
259 }
260 
261 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
262 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
263 #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
264 #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
265 
266 #define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
267 #define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
268 #define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
269 
270 static inline bool folio_test_pmd_mappable(struct folio *folio)
271 {
272 	return false;
273 }
274 
275 static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
276 		unsigned long addr)
277 {
278 	return false;
279 }
280 
281 static inline bool hugepage_vma_check(struct vm_area_struct *vma,
282 				      unsigned long vm_flags, bool smaps,
283 				      bool in_pf, bool enforce_sysfs)
284 {
285 	return false;
286 }
287 
288 static inline void prep_transhuge_page(struct page *page) {}
289 
290 #define transparent_hugepage_flags 0UL
291 
292 #define thp_get_unmapped_area	NULL
293 
294 static inline bool
295 can_split_folio(struct folio *folio, int *pextra_pins)
296 {
297 	return false;
298 }
299 static inline int
300 split_huge_page_to_list(struct page *page, struct list_head *list)
301 {
302 	return 0;
303 }
304 static inline int split_huge_page(struct page *page)
305 {
306 	return 0;
307 }
308 static inline void deferred_split_folio(struct folio *folio) {}
309 #define split_huge_pmd(__vma, __pmd, __address)	\
310 	do { } while (0)
311 
312 static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
313 		unsigned long address, bool freeze, struct folio *folio) {}
314 static inline void split_huge_pmd_address(struct vm_area_struct *vma,
315 		unsigned long address, bool freeze, struct folio *folio) {}
316 
317 #define split_huge_pud(__vma, __pmd, __address)	\
318 	do { } while (0)
319 
320 static inline int hugepage_madvise(struct vm_area_struct *vma,
321 				   unsigned long *vm_flags, int advice)
322 {
323 	return -EINVAL;
324 }
325 
326 static inline int madvise_collapse(struct vm_area_struct *vma,
327 				   struct vm_area_struct **prev,
328 				   unsigned long start, unsigned long end)
329 {
330 	return -EINVAL;
331 }
332 
333 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
334 					 unsigned long start,
335 					 unsigned long end,
336 					 long adjust_next)
337 {
338 }
339 static inline int is_swap_pmd(pmd_t pmd)
340 {
341 	return 0;
342 }
343 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
344 		struct vm_area_struct *vma)
345 {
346 	return NULL;
347 }
348 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
349 		struct vm_area_struct *vma)
350 {
351 	return NULL;
352 }
353 
354 static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
355 {
356 	return 0;
357 }
358 
359 static inline bool is_huge_zero_page(struct page *page)
360 {
361 	return false;
362 }
363 
364 static inline bool is_huge_zero_pmd(pmd_t pmd)
365 {
366 	return false;
367 }
368 
369 static inline bool is_huge_zero_pud(pud_t pud)
370 {
371 	return false;
372 }
373 
374 static inline void mm_put_huge_zero_page(struct mm_struct *mm)
375 {
376 	return;
377 }
378 
379 static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
380 	unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
381 {
382 	return NULL;
383 }
384 
385 static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
386 	unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap)
387 {
388 	return NULL;
389 }
390 
391 static inline bool thp_migration_supported(void)
392 {
393 	return false;
394 }
395 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
396 
397 static inline int split_folio_to_list(struct folio *folio,
398 		struct list_head *list)
399 {
400 	return split_huge_page_to_list(&folio->page, list);
401 }
402 
403 static inline int split_folio(struct folio *folio)
404 {
405 	return split_folio_to_list(folio, NULL);
406 }
407 
408 /*
409  * archs that select ARCH_WANTS_THP_SWAP but don't support THP_SWP due to
410  * limitations in the implementation like arm64 MTE can override this to
411  * false
412  */
413 #ifndef arch_thp_swp_supported
414 static inline bool arch_thp_swp_supported(void)
415 {
416 	return true;
417 }
418 #endif
419 
420 #endif /* _LINUX_HUGE_MM_H */
421