xref: /linux-6.15/include/linux/huge_mm.h (revision 78eb4ea2)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGE_MM_H
3 #define _LINUX_HUGE_MM_H
4 
5 #include <linux/sched/coredump.h>
6 #include <linux/mm_types.h>
7 
8 #include <linux/fs.h> /* only for vma_is_dax() */
9 #include <linux/kobject.h>
10 
11 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
12 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
13 		  pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
14 		  struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
15 void huge_pmd_set_accessed(struct vm_fault *vmf);
16 int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
17 		  pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
18 		  struct vm_area_struct *vma);
19 
20 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
21 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
22 #else
23 static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
24 {
25 }
26 #endif
27 
28 vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf);
29 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
30 			   pmd_t *pmd, unsigned long addr, unsigned long next);
31 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
32 		 unsigned long addr);
33 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud,
34 		 unsigned long addr);
35 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
36 		   unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd);
37 int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
38 		    pmd_t *pmd, unsigned long addr, pgprot_t newprot,
39 		    unsigned long cp_flags);
40 
41 vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write);
42 vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write);
43 
44 enum transparent_hugepage_flag {
45 	TRANSPARENT_HUGEPAGE_UNSUPPORTED,
46 	TRANSPARENT_HUGEPAGE_FLAG,
47 	TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
48 	TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
49 	TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
50 	TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
51 	TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
52 	TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
53 	TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
54 };
55 
56 struct kobject;
57 struct kobj_attribute;
58 
59 ssize_t single_hugepage_flag_store(struct kobject *kobj,
60 				   struct kobj_attribute *attr,
61 				   const char *buf, size_t count,
62 				   enum transparent_hugepage_flag flag);
63 ssize_t single_hugepage_flag_show(struct kobject *kobj,
64 				  struct kobj_attribute *attr, char *buf,
65 				  enum transparent_hugepage_flag flag);
66 extern struct kobj_attribute shmem_enabled_attr;
67 extern struct kobj_attribute thpsize_shmem_enabled_attr;
68 
69 /*
70  * Mask of all large folio orders supported for anonymous THP; all orders up to
71  * and including PMD_ORDER, except order-0 (which is not "huge") and order-1
72  * (which is a limitation of the THP implementation).
73  */
74 #define THP_ORDERS_ALL_ANON	((BIT(PMD_ORDER + 1) - 1) & ~(BIT(0) | BIT(1)))
75 
76 /*
77  * Mask of all large folio orders supported for file THP.
78  */
79 #define THP_ORDERS_ALL_FILE	(BIT(PMD_ORDER) | BIT(PUD_ORDER))
80 
81 /*
82  * Mask of all large folio orders supported for THP.
83  */
84 #define THP_ORDERS_ALL		(THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE)
85 
86 #define TVA_SMAPS		(1 << 0)	/* Will be used for procfs */
87 #define TVA_IN_PF		(1 << 1)	/* Page fault handler */
88 #define TVA_ENFORCE_SYSFS	(1 << 2)	/* Obey sysfs configuration */
89 
90 #define thp_vma_allowable_order(vma, vm_flags, tva_flags, order) \
91 	(!!thp_vma_allowable_orders(vma, vm_flags, tva_flags, BIT(order)))
92 
93 #ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
94 #define HPAGE_PMD_SHIFT PMD_SHIFT
95 #define HPAGE_PUD_SHIFT PUD_SHIFT
96 #else
97 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
98 #define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
99 #endif
100 
101 #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
102 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
103 #define HPAGE_PMD_MASK	(~(HPAGE_PMD_SIZE - 1))
104 #define HPAGE_PMD_SIZE	((1UL) << HPAGE_PMD_SHIFT)
105 
106 #define HPAGE_PUD_ORDER (HPAGE_PUD_SHIFT-PAGE_SHIFT)
107 #define HPAGE_PUD_NR (1<<HPAGE_PUD_ORDER)
108 #define HPAGE_PUD_MASK	(~(HPAGE_PUD_SIZE - 1))
109 #define HPAGE_PUD_SIZE	((1UL) << HPAGE_PUD_SHIFT)
110 
111 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
112 
113 extern unsigned long transparent_hugepage_flags;
114 extern unsigned long huge_anon_orders_always;
115 extern unsigned long huge_anon_orders_madvise;
116 extern unsigned long huge_anon_orders_inherit;
117 
118 static inline bool hugepage_global_enabled(void)
119 {
120 	return transparent_hugepage_flags &
121 			((1<<TRANSPARENT_HUGEPAGE_FLAG) |
122 			(1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG));
123 }
124 
125 static inline bool hugepage_global_always(void)
126 {
127 	return transparent_hugepage_flags &
128 			(1<<TRANSPARENT_HUGEPAGE_FLAG);
129 }
130 
131 static inline int highest_order(unsigned long orders)
132 {
133 	return fls_long(orders) - 1;
134 }
135 
136 static inline int next_order(unsigned long *orders, int prev)
137 {
138 	*orders &= ~BIT(prev);
139 	return highest_order(*orders);
140 }
141 
142 /*
143  * Do the below checks:
144  *   - For file vma, check if the linear page offset of vma is
145  *     order-aligned within the file.  The hugepage is
146  *     guaranteed to be order-aligned within the file, but we must
147  *     check that the order-aligned addresses in the VMA map to
148  *     order-aligned offsets within the file, else the hugepage will
149  *     not be mappable.
150  *   - For all vmas, check if the haddr is in an aligned hugepage
151  *     area.
152  */
153 static inline bool thp_vma_suitable_order(struct vm_area_struct *vma,
154 		unsigned long addr, int order)
155 {
156 	unsigned long hpage_size = PAGE_SIZE << order;
157 	unsigned long haddr;
158 
159 	/* Don't have to check pgoff for anonymous vma */
160 	if (!vma_is_anonymous(vma)) {
161 		if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
162 				hpage_size >> PAGE_SHIFT))
163 			return false;
164 	}
165 
166 	haddr = ALIGN_DOWN(addr, hpage_size);
167 
168 	if (haddr < vma->vm_start || haddr + hpage_size > vma->vm_end)
169 		return false;
170 	return true;
171 }
172 
173 /*
174  * Filter the bitfield of input orders to the ones suitable for use in the vma.
175  * See thp_vma_suitable_order().
176  * All orders that pass the checks are returned as a bitfield.
177  */
178 static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
179 		unsigned long addr, unsigned long orders)
180 {
181 	int order;
182 
183 	/*
184 	 * Iterate over orders, highest to lowest, removing orders that don't
185 	 * meet alignment requirements from the set. Exit loop at first order
186 	 * that meets requirements, since all lower orders must also meet
187 	 * requirements.
188 	 */
189 
190 	order = highest_order(orders);
191 
192 	while (orders) {
193 		if (thp_vma_suitable_order(vma, addr, order))
194 			break;
195 		order = next_order(&orders, order);
196 	}
197 
198 	return orders;
199 }
200 
201 static inline bool file_thp_enabled(struct vm_area_struct *vma)
202 {
203 	struct inode *inode;
204 
205 	if (!vma->vm_file)
206 		return false;
207 
208 	inode = vma->vm_file->f_inode;
209 
210 	return (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS)) &&
211 	       !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode);
212 }
213 
214 unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
215 					 unsigned long vm_flags,
216 					 unsigned long tva_flags,
217 					 unsigned long orders);
218 
219 /**
220  * thp_vma_allowable_orders - determine hugepage orders that are allowed for vma
221  * @vma:  the vm area to check
222  * @vm_flags: use these vm_flags instead of vma->vm_flags
223  * @tva_flags: Which TVA flags to honour
224  * @orders: bitfield of all orders to consider
225  *
226  * Calculates the intersection of the requested hugepage orders and the allowed
227  * hugepage orders for the provided vma. Permitted orders are encoded as a set
228  * bit at the corresponding bit position (bit-2 corresponds to order-2, bit-3
229  * corresponds to order-3, etc). Order-0 is never considered a hugepage order.
230  *
231  * Return: bitfield of orders allowed for hugepage in the vma. 0 if no hugepage
232  * orders are allowed.
233  */
234 static inline
235 unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
236 				       unsigned long vm_flags,
237 				       unsigned long tva_flags,
238 				       unsigned long orders)
239 {
240 	/* Optimization to check if required orders are enabled early. */
241 	if ((tva_flags & TVA_ENFORCE_SYSFS) && vma_is_anonymous(vma)) {
242 		unsigned long mask = READ_ONCE(huge_anon_orders_always);
243 
244 		if (vm_flags & VM_HUGEPAGE)
245 			mask |= READ_ONCE(huge_anon_orders_madvise);
246 		if (hugepage_global_always() ||
247 		    ((vm_flags & VM_HUGEPAGE) && hugepage_global_enabled()))
248 			mask |= READ_ONCE(huge_anon_orders_inherit);
249 
250 		orders &= mask;
251 		if (!orders)
252 			return 0;
253 	}
254 
255 	return __thp_vma_allowable_orders(vma, vm_flags, tva_flags, orders);
256 }
257 
258 struct thpsize {
259 	struct kobject kobj;
260 	struct list_head node;
261 	int order;
262 };
263 
264 #define to_thpsize(kobj) container_of(kobj, struct thpsize, kobj)
265 
266 enum mthp_stat_item {
267 	MTHP_STAT_ANON_FAULT_ALLOC,
268 	MTHP_STAT_ANON_FAULT_FALLBACK,
269 	MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
270 	MTHP_STAT_SWPOUT,
271 	MTHP_STAT_SWPOUT_FALLBACK,
272 	MTHP_STAT_SHMEM_ALLOC,
273 	MTHP_STAT_SHMEM_FALLBACK,
274 	MTHP_STAT_SHMEM_FALLBACK_CHARGE,
275 	MTHP_STAT_SPLIT,
276 	MTHP_STAT_SPLIT_FAILED,
277 	MTHP_STAT_SPLIT_DEFERRED,
278 	__MTHP_STAT_COUNT
279 };
280 
281 struct mthp_stat {
282 	unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT];
283 };
284 
285 #ifdef CONFIG_SYSFS
286 DECLARE_PER_CPU(struct mthp_stat, mthp_stats);
287 
288 static inline void count_mthp_stat(int order, enum mthp_stat_item item)
289 {
290 	if (order <= 0 || order > PMD_ORDER)
291 		return;
292 
293 	this_cpu_inc(mthp_stats.stats[order][item]);
294 }
295 #else
296 static inline void count_mthp_stat(int order, enum mthp_stat_item item)
297 {
298 }
299 #endif
300 
301 #define transparent_hugepage_use_zero_page()				\
302 	(transparent_hugepage_flags &					\
303 	 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
304 
305 unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
306 		unsigned long len, unsigned long pgoff, unsigned long flags);
307 unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
308 		unsigned long len, unsigned long pgoff, unsigned long flags,
309 		vm_flags_t vm_flags);
310 
311 bool can_split_folio(struct folio *folio, int *pextra_pins);
312 int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
313 		unsigned int new_order);
314 static inline int split_huge_page(struct page *page)
315 {
316 	return split_huge_page_to_list_to_order(page, NULL, 0);
317 }
318 void deferred_split_folio(struct folio *folio);
319 
320 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
321 		unsigned long address, bool freeze, struct folio *folio);
322 
323 #define split_huge_pmd(__vma, __pmd, __address)				\
324 	do {								\
325 		pmd_t *____pmd = (__pmd);				\
326 		if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd)	\
327 					|| pmd_devmap(*____pmd))	\
328 			__split_huge_pmd(__vma, __pmd, __address,	\
329 						false, NULL);		\
330 	}  while (0)
331 
332 
333 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
334 		bool freeze, struct folio *folio);
335 
336 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
337 		unsigned long address);
338 
339 #define split_huge_pud(__vma, __pud, __address)				\
340 	do {								\
341 		pud_t *____pud = (__pud);				\
342 		if (pud_trans_huge(*____pud)				\
343 					|| pud_devmap(*____pud))	\
344 			__split_huge_pud(__vma, __pud, __address);	\
345 	}  while (0)
346 
347 int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags,
348 		     int advice);
349 int madvise_collapse(struct vm_area_struct *vma,
350 		     struct vm_area_struct **prev,
351 		     unsigned long start, unsigned long end);
352 void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
353 			   unsigned long end, long adjust_next);
354 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
355 spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma);
356 
357 static inline int is_swap_pmd(pmd_t pmd)
358 {
359 	return !pmd_none(pmd) && !pmd_present(pmd);
360 }
361 
362 /* mmap_lock must be held on entry */
363 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
364 		struct vm_area_struct *vma)
365 {
366 	if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
367 		return __pmd_trans_huge_lock(pmd, vma);
368 	else
369 		return NULL;
370 }
371 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
372 		struct vm_area_struct *vma)
373 {
374 	if (pud_trans_huge(*pud) || pud_devmap(*pud))
375 		return __pud_trans_huge_lock(pud, vma);
376 	else
377 		return NULL;
378 }
379 
380 /**
381  * folio_test_pmd_mappable - Can we map this folio with a PMD?
382  * @folio: The folio to test
383  */
384 static inline bool folio_test_pmd_mappable(struct folio *folio)
385 {
386 	return folio_order(folio) >= HPAGE_PMD_ORDER;
387 }
388 
389 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
390 		pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
391 
392 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf);
393 
394 extern struct folio *huge_zero_folio;
395 extern unsigned long huge_zero_pfn;
396 
397 static inline bool is_huge_zero_folio(const struct folio *folio)
398 {
399 	return READ_ONCE(huge_zero_folio) == folio;
400 }
401 
402 static inline bool is_huge_zero_pmd(pmd_t pmd)
403 {
404 	return pmd_present(pmd) && READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd);
405 }
406 
407 static inline bool is_huge_zero_pud(pud_t pud)
408 {
409 	return false;
410 }
411 
412 struct folio *mm_get_huge_zero_folio(struct mm_struct *mm);
413 void mm_put_huge_zero_folio(struct mm_struct *mm);
414 
415 #define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
416 
417 static inline bool thp_migration_supported(void)
418 {
419 	return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
420 }
421 
422 void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address,
423 			   pmd_t *pmd, bool freeze, struct folio *folio);
424 bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr,
425 			   pmd_t *pmdp, struct folio *folio);
426 
427 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
428 
429 static inline bool folio_test_pmd_mappable(struct folio *folio)
430 {
431 	return false;
432 }
433 
434 static inline bool thp_vma_suitable_order(struct vm_area_struct *vma,
435 		unsigned long addr, int order)
436 {
437 	return false;
438 }
439 
440 static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
441 		unsigned long addr, unsigned long orders)
442 {
443 	return 0;
444 }
445 
446 static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
447 					unsigned long vm_flags,
448 					unsigned long tva_flags,
449 					unsigned long orders)
450 {
451 	return 0;
452 }
453 
454 #define transparent_hugepage_flags 0UL
455 
456 #define thp_get_unmapped_area	NULL
457 
458 static inline unsigned long
459 thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
460 			      unsigned long len, unsigned long pgoff,
461 			      unsigned long flags, vm_flags_t vm_flags)
462 {
463 	return 0;
464 }
465 
466 static inline bool
467 can_split_folio(struct folio *folio, int *pextra_pins)
468 {
469 	return false;
470 }
471 static inline int
472 split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
473 		unsigned int new_order)
474 {
475 	return 0;
476 }
477 static inline int split_huge_page(struct page *page)
478 {
479 	return 0;
480 }
481 static inline void deferred_split_folio(struct folio *folio) {}
482 #define split_huge_pmd(__vma, __pmd, __address)	\
483 	do { } while (0)
484 
485 static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
486 		unsigned long address, bool freeze, struct folio *folio) {}
487 static inline void split_huge_pmd_address(struct vm_area_struct *vma,
488 		unsigned long address, bool freeze, struct folio *folio) {}
489 static inline void split_huge_pmd_locked(struct vm_area_struct *vma,
490 					 unsigned long address, pmd_t *pmd,
491 					 bool freeze, struct folio *folio) {}
492 
493 static inline bool unmap_huge_pmd_locked(struct vm_area_struct *vma,
494 					 unsigned long addr, pmd_t *pmdp,
495 					 struct folio *folio)
496 {
497 	return false;
498 }
499 
500 #define split_huge_pud(__vma, __pmd, __address)	\
501 	do { } while (0)
502 
503 static inline int hugepage_madvise(struct vm_area_struct *vma,
504 				   unsigned long *vm_flags, int advice)
505 {
506 	return -EINVAL;
507 }
508 
509 static inline int madvise_collapse(struct vm_area_struct *vma,
510 				   struct vm_area_struct **prev,
511 				   unsigned long start, unsigned long end)
512 {
513 	return -EINVAL;
514 }
515 
516 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
517 					 unsigned long start,
518 					 unsigned long end,
519 					 long adjust_next)
520 {
521 }
522 static inline int is_swap_pmd(pmd_t pmd)
523 {
524 	return 0;
525 }
526 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
527 		struct vm_area_struct *vma)
528 {
529 	return NULL;
530 }
531 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
532 		struct vm_area_struct *vma)
533 {
534 	return NULL;
535 }
536 
537 static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
538 {
539 	return 0;
540 }
541 
542 static inline bool is_huge_zero_folio(const struct folio *folio)
543 {
544 	return false;
545 }
546 
547 static inline bool is_huge_zero_pmd(pmd_t pmd)
548 {
549 	return false;
550 }
551 
552 static inline bool is_huge_zero_pud(pud_t pud)
553 {
554 	return false;
555 }
556 
557 static inline void mm_put_huge_zero_folio(struct mm_struct *mm)
558 {
559 	return;
560 }
561 
562 static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
563 	unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
564 {
565 	return NULL;
566 }
567 
568 static inline bool thp_migration_supported(void)
569 {
570 	return false;
571 }
572 
573 static inline int highest_order(unsigned long orders)
574 {
575 	return 0;
576 }
577 
578 static inline int next_order(unsigned long *orders, int prev)
579 {
580 	return 0;
581 }
582 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
583 
584 static inline int split_folio_to_list_to_order(struct folio *folio,
585 		struct list_head *list, int new_order)
586 {
587 	return split_huge_page_to_list_to_order(&folio->page, list, new_order);
588 }
589 
590 static inline int split_folio_to_order(struct folio *folio, int new_order)
591 {
592 	return split_folio_to_list_to_order(folio, NULL, new_order);
593 }
594 
595 #define split_folio_to_list(f, l) split_folio_to_list_to_order(f, l, 0)
596 #define split_folio(f) split_folio_to_order(f, 0)
597 
598 #endif /* _LINUX_HUGE_MM_H */
599