xref: /linux-6.15/include/linux/huge_mm.h (revision 56d06fa2)
1 #ifndef _LINUX_HUGE_MM_H
2 #define _LINUX_HUGE_MM_H
3 
4 extern int do_huge_pmd_anonymous_page(struct mm_struct *mm,
5 				      struct vm_area_struct *vma,
6 				      unsigned long address, pmd_t *pmd,
7 				      unsigned int flags);
8 extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
9 			 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
10 			 struct vm_area_struct *vma);
11 extern void huge_pmd_set_accessed(struct mm_struct *mm,
12 				  struct vm_area_struct *vma,
13 				  unsigned long address, pmd_t *pmd,
14 				  pmd_t orig_pmd, int dirty);
15 extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
16 			       unsigned long address, pmd_t *pmd,
17 			       pmd_t orig_pmd);
18 extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
19 					  unsigned long addr,
20 					  pmd_t *pmd,
21 					  unsigned int flags);
22 extern int madvise_free_huge_pmd(struct mmu_gather *tlb,
23 			struct vm_area_struct *vma,
24 			pmd_t *pmd, unsigned long addr, unsigned long next);
25 extern int zap_huge_pmd(struct mmu_gather *tlb,
26 			struct vm_area_struct *vma,
27 			pmd_t *pmd, unsigned long addr);
28 extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
29 			unsigned long addr, unsigned long end,
30 			unsigned char *vec);
31 extern bool move_huge_pmd(struct vm_area_struct *vma,
32 			 struct vm_area_struct *new_vma,
33 			 unsigned long old_addr,
34 			 unsigned long new_addr, unsigned long old_end,
35 			 pmd_t *old_pmd, pmd_t *new_pmd);
36 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
37 			unsigned long addr, pgprot_t newprot,
38 			int prot_numa);
39 int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *,
40 			pfn_t pfn, bool write);
41 enum transparent_hugepage_flag {
42 	TRANSPARENT_HUGEPAGE_FLAG,
43 	TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
44 	TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
45 	TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
46 	TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
47 	TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
48 	TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
49 #ifdef CONFIG_DEBUG_VM
50 	TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
51 #endif
52 };
53 
54 #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
55 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
56 
57 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
58 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
59 		pmd_t *pmd, int flags);
60 
61 #define HPAGE_PMD_SHIFT PMD_SHIFT
62 #define HPAGE_PMD_SIZE	((1UL) << HPAGE_PMD_SHIFT)
63 #define HPAGE_PMD_MASK	(~(HPAGE_PMD_SIZE - 1))
64 
65 extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
66 
67 #define transparent_hugepage_enabled(__vma)				\
68 	((transparent_hugepage_flags &					\
69 	  (1<<TRANSPARENT_HUGEPAGE_FLAG) ||				\
70 	  (transparent_hugepage_flags &					\
71 	   (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) &&			\
72 	   ((__vma)->vm_flags & VM_HUGEPAGE))) &&			\
73 	 !((__vma)->vm_flags & VM_NOHUGEPAGE) &&			\
74 	 !is_vma_temporary_stack(__vma))
75 #define transparent_hugepage_use_zero_page()				\
76 	(transparent_hugepage_flags &					\
77 	 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
78 #ifdef CONFIG_DEBUG_VM
79 #define transparent_hugepage_debug_cow()				\
80 	(transparent_hugepage_flags &					\
81 	 (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
82 #else /* CONFIG_DEBUG_VM */
83 #define transparent_hugepage_debug_cow() 0
84 #endif /* CONFIG_DEBUG_VM */
85 
86 extern unsigned long transparent_hugepage_flags;
87 
88 extern void prep_transhuge_page(struct page *page);
89 extern void free_transhuge_page(struct page *page);
90 
91 int split_huge_page_to_list(struct page *page, struct list_head *list);
92 static inline int split_huge_page(struct page *page)
93 {
94 	return split_huge_page_to_list(page, NULL);
95 }
96 void deferred_split_huge_page(struct page *page);
97 
98 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
99 		unsigned long address, bool freeze);
100 
101 #define split_huge_pmd(__vma, __pmd, __address)				\
102 	do {								\
103 		pmd_t *____pmd = (__pmd);				\
104 		if (pmd_trans_huge(*____pmd)				\
105 					|| pmd_devmap(*____pmd))	\
106 			__split_huge_pmd(__vma, __pmd, __address,	\
107 						false);			\
108 	}  while (0)
109 
110 
111 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
112 		bool freeze, struct page *page);
113 
114 extern int hugepage_madvise(struct vm_area_struct *vma,
115 			    unsigned long *vm_flags, int advice);
116 extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
117 				    unsigned long start,
118 				    unsigned long end,
119 				    long adjust_next);
120 extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd,
121 		struct vm_area_struct *vma);
122 /* mmap_sem must be held on entry */
123 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
124 		struct vm_area_struct *vma)
125 {
126 	VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
127 	if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
128 		return __pmd_trans_huge_lock(pmd, vma);
129 	else
130 		return NULL;
131 }
132 static inline int hpage_nr_pages(struct page *page)
133 {
134 	if (unlikely(PageTransHuge(page)))
135 		return HPAGE_PMD_NR;
136 	return 1;
137 }
138 
139 extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
140 				unsigned long addr, pmd_t pmd, pmd_t *pmdp);
141 
142 extern struct page *huge_zero_page;
143 
144 static inline bool is_huge_zero_page(struct page *page)
145 {
146 	return ACCESS_ONCE(huge_zero_page) == page;
147 }
148 
149 static inline bool is_huge_zero_pmd(pmd_t pmd)
150 {
151 	return is_huge_zero_page(pmd_page(pmd));
152 }
153 
154 struct page *get_huge_zero_page(void);
155 void put_huge_zero_page(void);
156 
157 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
158 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
159 #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
160 #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
161 
162 #define hpage_nr_pages(x) 1
163 
164 #define transparent_hugepage_enabled(__vma) 0
165 
166 #define transparent_hugepage_flags 0UL
167 static inline int
168 split_huge_page_to_list(struct page *page, struct list_head *list)
169 {
170 	return 0;
171 }
172 static inline int split_huge_page(struct page *page)
173 {
174 	return 0;
175 }
176 static inline void deferred_split_huge_page(struct page *page) {}
177 #define split_huge_pmd(__vma, __pmd, __address)	\
178 	do { } while (0)
179 
180 static inline void split_huge_pmd_address(struct vm_area_struct *vma,
181 		unsigned long address, bool freeze, struct page *page) {}
182 
183 static inline int hugepage_madvise(struct vm_area_struct *vma,
184 				   unsigned long *vm_flags, int advice)
185 {
186 	BUG();
187 	return 0;
188 }
189 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
190 					 unsigned long start,
191 					 unsigned long end,
192 					 long adjust_next)
193 {
194 }
195 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
196 		struct vm_area_struct *vma)
197 {
198 	return NULL;
199 }
200 
201 static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
202 					unsigned long addr, pmd_t pmd, pmd_t *pmdp)
203 {
204 	return 0;
205 }
206 
207 static inline bool is_huge_zero_page(struct page *page)
208 {
209 	return false;
210 }
211 
212 static inline void put_huge_zero_page(void)
213 {
214 	BUILD_BUG();
215 }
216 
217 static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
218 		unsigned long addr, pmd_t *pmd, int flags)
219 {
220 	return NULL;
221 }
222 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
223 
224 #endif /* _LINUX_HUGE_MM_H */
225