xref: /linux-6.15/include/linux/khugepaged.h (revision 2bc7ea71)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_KHUGEPAGED_H
3 #define _LINUX_KHUGEPAGED_H
4 
5 #include <linux/sched/coredump.h> /* MMF_VM_HUGEPAGE */
6 
7 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
8 extern struct attribute_group khugepaged_attr_group;
9 
10 extern int khugepaged_init(void);
11 extern void khugepaged_destroy(void);
12 extern int start_stop_khugepaged(void);
13 extern bool hugepage_vma_check(struct vm_area_struct *vma,
14 			       unsigned long vm_flags);
15 extern void __khugepaged_enter(struct mm_struct *mm);
16 extern void __khugepaged_exit(struct mm_struct *mm);
17 extern void khugepaged_enter_vma(struct vm_area_struct *vma,
18 				 unsigned long vm_flags);
19 extern void khugepaged_min_free_kbytes_update(void);
20 #ifdef CONFIG_SHMEM
21 extern void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr);
22 #else
23 static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
24 					   unsigned long addr)
25 {
26 }
27 #endif
28 
29 #define khugepaged_enabled()					       \
30 	(transparent_hugepage_flags &				       \
31 	 ((1<<TRANSPARENT_HUGEPAGE_FLAG) |		       \
32 	  (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)))
33 #define khugepaged_always()				\
34 	(transparent_hugepage_flags &			\
35 	 (1<<TRANSPARENT_HUGEPAGE_FLAG))
36 #define khugepaged_req_madv()					\
37 	(transparent_hugepage_flags &				\
38 	 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
39 #define khugepaged_defrag()					\
40 	(transparent_hugepage_flags &				\
41 	 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
42 
43 static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
44 {
45 	if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags))
46 		__khugepaged_enter(mm);
47 }
48 
49 static inline void khugepaged_exit(struct mm_struct *mm)
50 {
51 	if (test_bit(MMF_VM_HUGEPAGE, &mm->flags))
52 		__khugepaged_exit(mm);
53 }
54 
55 static inline void khugepaged_enter(struct vm_area_struct *vma,
56 				   unsigned long vm_flags)
57 {
58 	if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
59 	    khugepaged_enabled()) {
60 		if (hugepage_vma_check(vma, vm_flags))
61 			__khugepaged_enter(vma->vm_mm);
62 	}
63 }
64 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
65 static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
66 {
67 }
68 static inline void khugepaged_exit(struct mm_struct *mm)
69 {
70 }
71 static inline void khugepaged_enter(struct vm_area_struct *vma,
72 				    unsigned long vm_flags)
73 {
74 }
75 static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
76 					unsigned long vm_flags)
77 {
78 }
79 static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
80 					   unsigned long addr)
81 {
82 }
83 
84 static inline void khugepaged_min_free_kbytes_update(void)
85 {
86 }
87 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
88 
89 #endif /* _LINUX_KHUGEPAGED_H */
90