xref: /linux-6.15/include/linux/huge_mm.h (revision 3c8d7ef8)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGE_MM_H
3 #define _LINUX_HUGE_MM_H
4 
5 #include <linux/sched/coredump.h>
6 #include <linux/mm_types.h>
7 
8 #include <linux/fs.h> /* only for vma_is_dax() */
9 
10 extern vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
11 extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
12 			 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
13 			 struct vm_area_struct *vma);
14 extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
15 extern int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
16 			 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
17 			 struct vm_area_struct *vma);
18 
19 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
20 extern void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
21 #else
22 static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
23 {
24 }
25 #endif
26 
27 extern vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
28 extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
29 					  unsigned long addr,
30 					  pmd_t *pmd,
31 					  unsigned int flags);
32 extern bool madvise_free_huge_pmd(struct mmu_gather *tlb,
33 			struct vm_area_struct *vma,
34 			pmd_t *pmd, unsigned long addr, unsigned long next);
35 extern int zap_huge_pmd(struct mmu_gather *tlb,
36 			struct vm_area_struct *vma,
37 			pmd_t *pmd, unsigned long addr);
38 extern int zap_huge_pud(struct mmu_gather *tlb,
39 			struct vm_area_struct *vma,
40 			pud_t *pud, unsigned long addr);
41 extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
42 			unsigned long addr, unsigned long end,
43 			unsigned char *vec);
44 extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
45 			 unsigned long new_addr, unsigned long old_end,
46 			 pmd_t *old_pmd, pmd_t *new_pmd);
47 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
48 			unsigned long addr, pgprot_t newprot,
49 			int prot_numa);
50 vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
51 			pmd_t *pmd, pfn_t pfn, bool write);
52 vm_fault_t vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
53 			pud_t *pud, pfn_t pfn, bool write);
54 enum transparent_hugepage_flag {
55 	TRANSPARENT_HUGEPAGE_FLAG,
56 	TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
57 	TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
58 	TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
59 	TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
60 	TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
61 	TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
62 	TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
63 #ifdef CONFIG_DEBUG_VM
64 	TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
65 #endif
66 };
67 
68 struct kobject;
69 struct kobj_attribute;
70 
71 extern ssize_t single_hugepage_flag_store(struct kobject *kobj,
72 				 struct kobj_attribute *attr,
73 				 const char *buf, size_t count,
74 				 enum transparent_hugepage_flag flag);
75 extern ssize_t single_hugepage_flag_show(struct kobject *kobj,
76 				struct kobj_attribute *attr, char *buf,
77 				enum transparent_hugepage_flag flag);
78 extern struct kobj_attribute shmem_enabled_attr;
79 
80 #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
81 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
82 
83 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
84 #define HPAGE_PMD_SHIFT PMD_SHIFT
85 #define HPAGE_PMD_SIZE	((1UL) << HPAGE_PMD_SHIFT)
86 #define HPAGE_PMD_MASK	(~(HPAGE_PMD_SIZE - 1))
87 
88 #define HPAGE_PUD_SHIFT PUD_SHIFT
89 #define HPAGE_PUD_SIZE	((1UL) << HPAGE_PUD_SHIFT)
90 #define HPAGE_PUD_MASK	(~(HPAGE_PUD_SIZE - 1))
91 
92 extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
93 
94 extern unsigned long transparent_hugepage_flags;
95 
96 /*
97  * to be used on vmas which are known to support THP.
98  * Use transparent_hugepage_enabled otherwise
99  */
100 static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
101 {
102 	if (vma->vm_flags & VM_NOHUGEPAGE)
103 		return false;
104 
105 	if (is_vma_temporary_stack(vma))
106 		return false;
107 
108 	if (test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
109 		return false;
110 
111 	if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG))
112 		return true;
113 
114 	if (vma_is_dax(vma))
115 		return true;
116 
117 	if (transparent_hugepage_flags &
118 				(1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
119 		return !!(vma->vm_flags & VM_HUGEPAGE);
120 
121 	return false;
122 }
123 
124 bool transparent_hugepage_enabled(struct vm_area_struct *vma);
125 
126 #define transparent_hugepage_use_zero_page()				\
127 	(transparent_hugepage_flags &					\
128 	 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
129 #ifdef CONFIG_DEBUG_VM
130 #define transparent_hugepage_debug_cow()				\
131 	(transparent_hugepage_flags &					\
132 	 (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
133 #else /* CONFIG_DEBUG_VM */
134 #define transparent_hugepage_debug_cow() 0
135 #endif /* CONFIG_DEBUG_VM */
136 
137 extern unsigned long thp_get_unmapped_area(struct file *filp,
138 		unsigned long addr, unsigned long len, unsigned long pgoff,
139 		unsigned long flags);
140 
141 extern void prep_transhuge_page(struct page *page);
142 extern void free_transhuge_page(struct page *page);
143 
144 bool can_split_huge_page(struct page *page, int *pextra_pins);
145 int split_huge_page_to_list(struct page *page, struct list_head *list);
146 static inline int split_huge_page(struct page *page)
147 {
148 	return split_huge_page_to_list(page, NULL);
149 }
150 void deferred_split_huge_page(struct page *page);
151 
152 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
153 		unsigned long address, bool freeze, struct page *page);
154 
155 #define split_huge_pmd(__vma, __pmd, __address)				\
156 	do {								\
157 		pmd_t *____pmd = (__pmd);				\
158 		if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd)	\
159 					|| pmd_devmap(*____pmd))	\
160 			__split_huge_pmd(__vma, __pmd, __address,	\
161 						false, NULL);		\
162 	}  while (0)
163 
164 
165 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
166 		bool freeze, struct page *page);
167 
168 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
169 		unsigned long address);
170 
171 #define split_huge_pud(__vma, __pud, __address)				\
172 	do {								\
173 		pud_t *____pud = (__pud);				\
174 		if (pud_trans_huge(*____pud)				\
175 					|| pud_devmap(*____pud))	\
176 			__split_huge_pud(__vma, __pud, __address);	\
177 	}  while (0)
178 
179 extern int hugepage_madvise(struct vm_area_struct *vma,
180 			    unsigned long *vm_flags, int advice);
181 extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
182 				    unsigned long start,
183 				    unsigned long end,
184 				    long adjust_next);
185 extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd,
186 		struct vm_area_struct *vma);
187 extern spinlock_t *__pud_trans_huge_lock(pud_t *pud,
188 		struct vm_area_struct *vma);
189 
190 static inline int is_swap_pmd(pmd_t pmd)
191 {
192 	return !pmd_none(pmd) && !pmd_present(pmd);
193 }
194 
195 /* mmap_sem must be held on entry */
196 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
197 		struct vm_area_struct *vma)
198 {
199 	VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
200 	if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
201 		return __pmd_trans_huge_lock(pmd, vma);
202 	else
203 		return NULL;
204 }
205 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
206 		struct vm_area_struct *vma)
207 {
208 	VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
209 	if (pud_trans_huge(*pud) || pud_devmap(*pud))
210 		return __pud_trans_huge_lock(pud, vma);
211 	else
212 		return NULL;
213 }
214 static inline int hpage_nr_pages(struct page *page)
215 {
216 	if (unlikely(PageTransHuge(page)))
217 		return HPAGE_PMD_NR;
218 	return 1;
219 }
220 
221 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
222 		pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
223 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
224 		pud_t *pud, int flags, struct dev_pagemap **pgmap);
225 
226 extern vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
227 
228 extern struct page *huge_zero_page;
229 
230 static inline bool is_huge_zero_page(struct page *page)
231 {
232 	return READ_ONCE(huge_zero_page) == page;
233 }
234 
235 static inline bool is_huge_zero_pmd(pmd_t pmd)
236 {
237 	return is_huge_zero_page(pmd_page(pmd));
238 }
239 
240 static inline bool is_huge_zero_pud(pud_t pud)
241 {
242 	return false;
243 }
244 
245 struct page *mm_get_huge_zero_page(struct mm_struct *mm);
246 void mm_put_huge_zero_page(struct mm_struct *mm);
247 
248 #define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
249 
250 static inline bool thp_migration_supported(void)
251 {
252 	return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
253 }
254 
255 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
256 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
257 #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
258 #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
259 
260 #define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
261 #define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
262 #define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
263 
264 #define hpage_nr_pages(x) 1
265 
266 static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
267 {
268 	return false;
269 }
270 
271 static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma)
272 {
273 	return false;
274 }
275 
276 static inline void prep_transhuge_page(struct page *page) {}
277 
278 #define transparent_hugepage_flags 0UL
279 
280 #define thp_get_unmapped_area	NULL
281 
282 static inline bool
283 can_split_huge_page(struct page *page, int *pextra_pins)
284 {
285 	BUILD_BUG();
286 	return false;
287 }
288 static inline int
289 split_huge_page_to_list(struct page *page, struct list_head *list)
290 {
291 	return 0;
292 }
293 static inline int split_huge_page(struct page *page)
294 {
295 	return 0;
296 }
297 static inline void deferred_split_huge_page(struct page *page) {}
298 #define split_huge_pmd(__vma, __pmd, __address)	\
299 	do { } while (0)
300 
301 static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
302 		unsigned long address, bool freeze, struct page *page) {}
303 static inline void split_huge_pmd_address(struct vm_area_struct *vma,
304 		unsigned long address, bool freeze, struct page *page) {}
305 
306 #define split_huge_pud(__vma, __pmd, __address)	\
307 	do { } while (0)
308 
309 static inline int hugepage_madvise(struct vm_area_struct *vma,
310 				   unsigned long *vm_flags, int advice)
311 {
312 	BUG();
313 	return 0;
314 }
315 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
316 					 unsigned long start,
317 					 unsigned long end,
318 					 long adjust_next)
319 {
320 }
321 static inline int is_swap_pmd(pmd_t pmd)
322 {
323 	return 0;
324 }
325 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
326 		struct vm_area_struct *vma)
327 {
328 	return NULL;
329 }
330 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
331 		struct vm_area_struct *vma)
332 {
333 	return NULL;
334 }
335 
336 static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf,
337 		pmd_t orig_pmd)
338 {
339 	return 0;
340 }
341 
342 static inline bool is_huge_zero_page(struct page *page)
343 {
344 	return false;
345 }
346 
347 static inline bool is_huge_zero_pud(pud_t pud)
348 {
349 	return false;
350 }
351 
352 static inline void mm_put_huge_zero_page(struct mm_struct *mm)
353 {
354 	return;
355 }
356 
357 static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
358 	unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
359 {
360 	return NULL;
361 }
362 
363 static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
364 	unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap)
365 {
366 	return NULL;
367 }
368 
369 static inline bool thp_migration_supported(void)
370 {
371 	return false;
372 }
373 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
374 
375 #endif /* _LINUX_HUGE_MM_H */
376