xref: /linux-6.15/include/linux/huge_mm.h (revision 82d00a93)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGE_MM_H
3 #define _LINUX_HUGE_MM_H
4 
5 #include <linux/sched/coredump.h>
6 #include <linux/mm_types.h>
7 
8 #include <linux/fs.h> /* only for vma_is_dax() */
9 
10 extern vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
11 extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
12 			 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
13 			 struct vm_area_struct *vma);
14 extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
15 extern int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
16 			 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
17 			 struct vm_area_struct *vma);
18 
19 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
20 extern void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
21 #else
22 static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
23 {
24 }
25 #endif
26 
27 extern vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
28 extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
29 					  unsigned long addr,
30 					  pmd_t *pmd,
31 					  unsigned int flags);
32 extern bool madvise_free_huge_pmd(struct mmu_gather *tlb,
33 			struct vm_area_struct *vma,
34 			pmd_t *pmd, unsigned long addr, unsigned long next);
35 extern int zap_huge_pmd(struct mmu_gather *tlb,
36 			struct vm_area_struct *vma,
37 			pmd_t *pmd, unsigned long addr);
38 extern int zap_huge_pud(struct mmu_gather *tlb,
39 			struct vm_area_struct *vma,
40 			pud_t *pud, unsigned long addr);
41 extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
42 			unsigned long addr, unsigned long end,
43 			unsigned char *vec);
44 extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
45 			 unsigned long new_addr, unsigned long old_end,
46 			 pmd_t *old_pmd, pmd_t *new_pmd);
47 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
48 			unsigned long addr, pgprot_t newprot,
49 			unsigned long cp_flags);
50 vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn,
51 				   pgprot_t pgprot, bool write);
52 
53 /**
54  * vmf_insert_pfn_pmd - insert a pmd size pfn
55  * @vmf: Structure describing the fault
56  * @pfn: pfn to insert
57  * @pgprot: page protection to use
58  * @write: whether it's a write fault
59  *
60  * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
61  *
62  * Return: vm_fault_t value.
63  */
64 static inline vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn,
65 					    bool write)
66 {
67 	return vmf_insert_pfn_pmd_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
68 }
69 vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn,
70 				   pgprot_t pgprot, bool write);
71 
72 /**
73  * vmf_insert_pfn_pud - insert a pud size pfn
74  * @vmf: Structure describing the fault
75  * @pfn: pfn to insert
76  * @pgprot: page protection to use
77  * @write: whether it's a write fault
78  *
79  * Insert a pud size pfn. See vmf_insert_pfn() for additional info.
80  *
81  * Return: vm_fault_t value.
82  */
83 static inline vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn,
84 					    bool write)
85 {
86 	return vmf_insert_pfn_pud_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
87 }
88 
89 enum transparent_hugepage_flag {
90 	TRANSPARENT_HUGEPAGE_FLAG,
91 	TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
92 	TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
93 	TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
94 	TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
95 	TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
96 	TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
97 	TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
98 #ifdef CONFIG_DEBUG_VM
99 	TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
100 #endif
101 };
102 
103 struct kobject;
104 struct kobj_attribute;
105 
106 extern ssize_t single_hugepage_flag_store(struct kobject *kobj,
107 				 struct kobj_attribute *attr,
108 				 const char *buf, size_t count,
109 				 enum transparent_hugepage_flag flag);
110 extern ssize_t single_hugepage_flag_show(struct kobject *kobj,
111 				struct kobj_attribute *attr, char *buf,
112 				enum transparent_hugepage_flag flag);
113 extern struct kobj_attribute shmem_enabled_attr;
114 
115 #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
116 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
117 
118 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
119 #define HPAGE_PMD_SHIFT PMD_SHIFT
120 #define HPAGE_PMD_SIZE	((1UL) << HPAGE_PMD_SHIFT)
121 #define HPAGE_PMD_MASK	(~(HPAGE_PMD_SIZE - 1))
122 
123 #define HPAGE_PUD_SHIFT PUD_SHIFT
124 #define HPAGE_PUD_SIZE	((1UL) << HPAGE_PUD_SHIFT)
125 #define HPAGE_PUD_MASK	(~(HPAGE_PUD_SIZE - 1))
126 
127 extern unsigned long transparent_hugepage_flags;
128 
129 /*
130  * to be used on vmas which are known to support THP.
131  * Use transparent_hugepage_enabled otherwise
132  */
133 static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
134 {
135 	if (vma->vm_flags & VM_NOHUGEPAGE)
136 		return false;
137 
138 	if (vma_is_temporary_stack(vma))
139 		return false;
140 
141 	if (test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
142 		return false;
143 
144 	if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG))
145 		return true;
146 	/*
147 	 * For dax vmas, try to always use hugepage mappings. If the kernel does
148 	 * not support hugepages, fsdax mappings will fallback to PAGE_SIZE
149 	 * mappings, and device-dax namespaces, that try to guarantee a given
150 	 * mapping size, will fail to enable
151 	 */
152 	if (vma_is_dax(vma))
153 		return true;
154 
155 	if (transparent_hugepage_flags &
156 				(1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
157 		return !!(vma->vm_flags & VM_HUGEPAGE);
158 
159 	return false;
160 }
161 
162 bool transparent_hugepage_enabled(struct vm_area_struct *vma);
163 
164 #define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1)
165 
166 static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
167 		unsigned long haddr)
168 {
169 	/* Don't have to check pgoff for anonymous vma */
170 	if (!vma_is_anonymous(vma)) {
171 		if (((vma->vm_start >> PAGE_SHIFT) & HPAGE_CACHE_INDEX_MASK) !=
172 			(vma->vm_pgoff & HPAGE_CACHE_INDEX_MASK))
173 			return false;
174 	}
175 
176 	if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
177 		return false;
178 	return true;
179 }
180 
181 #define transparent_hugepage_use_zero_page()				\
182 	(transparent_hugepage_flags &					\
183 	 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
184 #ifdef CONFIG_DEBUG_VM
185 #define transparent_hugepage_debug_cow()				\
186 	(transparent_hugepage_flags &					\
187 	 (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
188 #else /* CONFIG_DEBUG_VM */
189 #define transparent_hugepage_debug_cow() 0
190 #endif /* CONFIG_DEBUG_VM */
191 
192 extern unsigned long thp_get_unmapped_area(struct file *filp,
193 		unsigned long addr, unsigned long len, unsigned long pgoff,
194 		unsigned long flags);
195 
196 extern void prep_transhuge_page(struct page *page);
197 extern void free_transhuge_page(struct page *page);
198 bool is_transparent_hugepage(struct page *page);
199 
200 bool can_split_huge_page(struct page *page, int *pextra_pins);
201 int split_huge_page_to_list(struct page *page, struct list_head *list);
202 static inline int split_huge_page(struct page *page)
203 {
204 	return split_huge_page_to_list(page, NULL);
205 }
206 void deferred_split_huge_page(struct page *page);
207 
208 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
209 		unsigned long address, bool freeze, struct page *page);
210 
211 #define split_huge_pmd(__vma, __pmd, __address)				\
212 	do {								\
213 		pmd_t *____pmd = (__pmd);				\
214 		if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd)	\
215 					|| pmd_devmap(*____pmd))	\
216 			__split_huge_pmd(__vma, __pmd, __address,	\
217 						false, NULL);		\
218 	}  while (0)
219 
220 
221 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
222 		bool freeze, struct page *page);
223 
224 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
225 		unsigned long address);
226 
227 #define split_huge_pud(__vma, __pud, __address)				\
228 	do {								\
229 		pud_t *____pud = (__pud);				\
230 		if (pud_trans_huge(*____pud)				\
231 					|| pud_devmap(*____pud))	\
232 			__split_huge_pud(__vma, __pud, __address);	\
233 	}  while (0)
234 
235 extern int hugepage_madvise(struct vm_area_struct *vma,
236 			    unsigned long *vm_flags, int advice);
237 extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
238 				    unsigned long start,
239 				    unsigned long end,
240 				    long adjust_next);
241 extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd,
242 		struct vm_area_struct *vma);
243 extern spinlock_t *__pud_trans_huge_lock(pud_t *pud,
244 		struct vm_area_struct *vma);
245 
246 static inline int is_swap_pmd(pmd_t pmd)
247 {
248 	return !pmd_none(pmd) && !pmd_present(pmd);
249 }
250 
251 /* mmap_sem must be held on entry */
252 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
253 		struct vm_area_struct *vma)
254 {
255 	if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
256 		return __pmd_trans_huge_lock(pmd, vma);
257 	else
258 		return NULL;
259 }
260 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
261 		struct vm_area_struct *vma)
262 {
263 	if (pud_trans_huge(*pud) || pud_devmap(*pud))
264 		return __pud_trans_huge_lock(pud, vma);
265 	else
266 		return NULL;
267 }
268 static inline int hpage_nr_pages(struct page *page)
269 {
270 	if (unlikely(PageTransHuge(page)))
271 		return HPAGE_PMD_NR;
272 	return 1;
273 }
274 
275 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
276 		pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
277 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
278 		pud_t *pud, int flags, struct dev_pagemap **pgmap);
279 
280 extern vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
281 
282 extern struct page *huge_zero_page;
283 
284 static inline bool is_huge_zero_page(struct page *page)
285 {
286 	return READ_ONCE(huge_zero_page) == page;
287 }
288 
289 static inline bool is_huge_zero_pmd(pmd_t pmd)
290 {
291 	return is_huge_zero_page(pmd_page(pmd));
292 }
293 
294 static inline bool is_huge_zero_pud(pud_t pud)
295 {
296 	return false;
297 }
298 
299 struct page *mm_get_huge_zero_page(struct mm_struct *mm);
300 void mm_put_huge_zero_page(struct mm_struct *mm);
301 
302 #define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
303 
304 static inline bool thp_migration_supported(void)
305 {
306 	return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
307 }
308 
309 static inline struct list_head *page_deferred_list(struct page *page)
310 {
311 	/*
312 	 * Global or memcg deferred list in the second tail pages is
313 	 * occupied by compound_head.
314 	 */
315 	return &page[2].deferred_list;
316 }
317 
318 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
319 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
320 #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
321 #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
322 
323 #define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
324 #define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
325 #define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
326 
327 static inline int hpage_nr_pages(struct page *page)
328 {
329 	VM_BUG_ON_PAGE(PageTail(page), page);
330 	return 1;
331 }
332 
333 static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
334 {
335 	return false;
336 }
337 
338 static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma)
339 {
340 	return false;
341 }
342 
343 static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
344 		unsigned long haddr)
345 {
346 	return false;
347 }
348 
349 static inline void prep_transhuge_page(struct page *page) {}
350 
351 static inline bool is_transparent_hugepage(struct page *page)
352 {
353 	return false;
354 }
355 
356 #define transparent_hugepage_flags 0UL
357 
358 #define thp_get_unmapped_area	NULL
359 
360 static inline bool
361 can_split_huge_page(struct page *page, int *pextra_pins)
362 {
363 	BUILD_BUG();
364 	return false;
365 }
366 static inline int
367 split_huge_page_to_list(struct page *page, struct list_head *list)
368 {
369 	return 0;
370 }
371 static inline int split_huge_page(struct page *page)
372 {
373 	return 0;
374 }
375 static inline void deferred_split_huge_page(struct page *page) {}
376 #define split_huge_pmd(__vma, __pmd, __address)	\
377 	do { } while (0)
378 
379 static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
380 		unsigned long address, bool freeze, struct page *page) {}
381 static inline void split_huge_pmd_address(struct vm_area_struct *vma,
382 		unsigned long address, bool freeze, struct page *page) {}
383 
384 #define split_huge_pud(__vma, __pmd, __address)	\
385 	do { } while (0)
386 
387 static inline int hugepage_madvise(struct vm_area_struct *vma,
388 				   unsigned long *vm_flags, int advice)
389 {
390 	BUG();
391 	return 0;
392 }
393 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
394 					 unsigned long start,
395 					 unsigned long end,
396 					 long adjust_next)
397 {
398 }
399 static inline int is_swap_pmd(pmd_t pmd)
400 {
401 	return 0;
402 }
403 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
404 		struct vm_area_struct *vma)
405 {
406 	return NULL;
407 }
408 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
409 		struct vm_area_struct *vma)
410 {
411 	return NULL;
412 }
413 
414 static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf,
415 		pmd_t orig_pmd)
416 {
417 	return 0;
418 }
419 
420 static inline bool is_huge_zero_page(struct page *page)
421 {
422 	return false;
423 }
424 
425 static inline bool is_huge_zero_pud(pud_t pud)
426 {
427 	return false;
428 }
429 
430 static inline void mm_put_huge_zero_page(struct mm_struct *mm)
431 {
432 	return;
433 }
434 
435 static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
436 	unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
437 {
438 	return NULL;
439 }
440 
441 static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
442 	unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap)
443 {
444 	return NULL;
445 }
446 
447 static inline bool thp_migration_supported(void)
448 {
449 	return false;
450 }
451 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
452 
453 #endif /* _LINUX_HUGE_MM_H */
454