xref: /linux-6.15/include/linux/huge_mm.h (revision 8ed7cf66)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGE_MM_H
3 #define _LINUX_HUGE_MM_H
4 
5 #include <linux/sched/coredump.h>
6 #include <linux/mm_types.h>
7 
8 #include <linux/fs.h> /* only for vma_is_dax() */
9 #include <linux/kobject.h>
10 
11 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
12 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
13 		  pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
14 		  struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
15 void huge_pmd_set_accessed(struct vm_fault *vmf);
16 int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
17 		  pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
18 		  struct vm_area_struct *vma);
19 
20 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
21 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
22 #else
23 static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
24 {
25 }
26 #endif
27 
28 vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf);
29 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
30 			   pmd_t *pmd, unsigned long addr, unsigned long next);
31 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
32 		 unsigned long addr);
33 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud,
34 		 unsigned long addr);
35 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
36 		   unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd);
37 int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
38 		    pmd_t *pmd, unsigned long addr, pgprot_t newprot,
39 		    unsigned long cp_flags);
40 
41 vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write);
42 vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write);
43 
44 enum transparent_hugepage_flag {
45 	TRANSPARENT_HUGEPAGE_UNSUPPORTED,
46 	TRANSPARENT_HUGEPAGE_FLAG,
47 	TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
48 	TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
49 	TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
50 	TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
51 	TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
52 	TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
53 	TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
54 };
55 
56 struct kobject;
57 struct kobj_attribute;
58 
59 ssize_t single_hugepage_flag_store(struct kobject *kobj,
60 				   struct kobj_attribute *attr,
61 				   const char *buf, size_t count,
62 				   enum transparent_hugepage_flag flag);
63 ssize_t single_hugepage_flag_show(struct kobject *kobj,
64 				  struct kobj_attribute *attr, char *buf,
65 				  enum transparent_hugepage_flag flag);
66 extern struct kobj_attribute shmem_enabled_attr;
67 extern struct kobj_attribute thpsize_shmem_enabled_attr;
68 
69 /*
70  * Mask of all large folio orders supported for anonymous THP; all orders up to
71  * and including PMD_ORDER, except order-0 (which is not "huge") and order-1
72  * (which is a limitation of the THP implementation).
73  */
74 #define THP_ORDERS_ALL_ANON	((BIT(PMD_ORDER + 1) - 1) & ~(BIT(0) | BIT(1)))
75 
76 /*
77  * Mask of all large folio orders supported for file THP. Folios in a DAX
78  * file is never split and the MAX_PAGECACHE_ORDER limit does not apply to
79  * it.  Same to PFNMAPs where there's neither page* nor pagecache.
80  */
81 #define THP_ORDERS_ALL_SPECIAL		\
82 	(BIT(PMD_ORDER) | BIT(PUD_ORDER))
83 #define THP_ORDERS_ALL_FILE_DEFAULT	\
84 	((BIT(MAX_PAGECACHE_ORDER + 1) - 1) & ~BIT(0))
85 
86 /*
87  * Mask of all large folio orders supported for THP.
88  */
89 #define THP_ORDERS_ALL	\
90 	(THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_SPECIAL | THP_ORDERS_ALL_FILE_DEFAULT)
91 
92 #define TVA_SMAPS		(1 << 0)	/* Will be used for procfs */
93 #define TVA_IN_PF		(1 << 1)	/* Page fault handler */
94 #define TVA_ENFORCE_SYSFS	(1 << 2)	/* Obey sysfs configuration */
95 
96 #define thp_vma_allowable_order(vma, vm_flags, tva_flags, order) \
97 	(!!thp_vma_allowable_orders(vma, vm_flags, tva_flags, BIT(order)))
98 
99 #define split_folio(f) split_folio_to_list(f, NULL)
100 
101 #ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
102 #define HPAGE_PMD_SHIFT PMD_SHIFT
103 #define HPAGE_PUD_SHIFT PUD_SHIFT
104 #else
105 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
106 #define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
107 #endif
108 
109 #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
110 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
111 #define HPAGE_PMD_MASK	(~(HPAGE_PMD_SIZE - 1))
112 #define HPAGE_PMD_SIZE	((1UL) << HPAGE_PMD_SHIFT)
113 
114 #define HPAGE_PUD_ORDER (HPAGE_PUD_SHIFT-PAGE_SHIFT)
115 #define HPAGE_PUD_NR (1<<HPAGE_PUD_ORDER)
116 #define HPAGE_PUD_MASK	(~(HPAGE_PUD_SIZE - 1))
117 #define HPAGE_PUD_SIZE	((1UL) << HPAGE_PUD_SHIFT)
118 
119 enum mthp_stat_item {
120 	MTHP_STAT_ANON_FAULT_ALLOC,
121 	MTHP_STAT_ANON_FAULT_FALLBACK,
122 	MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
123 	MTHP_STAT_SWPOUT,
124 	MTHP_STAT_SWPOUT_FALLBACK,
125 	MTHP_STAT_SHMEM_ALLOC,
126 	MTHP_STAT_SHMEM_FALLBACK,
127 	MTHP_STAT_SHMEM_FALLBACK_CHARGE,
128 	MTHP_STAT_SPLIT,
129 	MTHP_STAT_SPLIT_FAILED,
130 	MTHP_STAT_SPLIT_DEFERRED,
131 	MTHP_STAT_NR_ANON,
132 	MTHP_STAT_NR_ANON_PARTIALLY_MAPPED,
133 	__MTHP_STAT_COUNT
134 };
135 
136 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
137 struct mthp_stat {
138 	unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT];
139 };
140 
141 DECLARE_PER_CPU(struct mthp_stat, mthp_stats);
142 
143 static inline void mod_mthp_stat(int order, enum mthp_stat_item item, int delta)
144 {
145 	if (order <= 0 || order > PMD_ORDER)
146 		return;
147 
148 	this_cpu_add(mthp_stats.stats[order][item], delta);
149 }
150 
151 static inline void count_mthp_stat(int order, enum mthp_stat_item item)
152 {
153 	mod_mthp_stat(order, item, 1);
154 }
155 
156 #else
157 static inline void mod_mthp_stat(int order, enum mthp_stat_item item, int delta)
158 {
159 }
160 
161 static inline void count_mthp_stat(int order, enum mthp_stat_item item)
162 {
163 }
164 #endif
165 
166 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
167 
168 extern unsigned long transparent_hugepage_flags;
169 extern unsigned long huge_anon_orders_always;
170 extern unsigned long huge_anon_orders_madvise;
171 extern unsigned long huge_anon_orders_inherit;
172 
173 static inline bool hugepage_global_enabled(void)
174 {
175 	return transparent_hugepage_flags &
176 			((1<<TRANSPARENT_HUGEPAGE_FLAG) |
177 			(1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG));
178 }
179 
180 static inline bool hugepage_global_always(void)
181 {
182 	return transparent_hugepage_flags &
183 			(1<<TRANSPARENT_HUGEPAGE_FLAG);
184 }
185 
186 static inline int highest_order(unsigned long orders)
187 {
188 	return fls_long(orders) - 1;
189 }
190 
191 static inline int next_order(unsigned long *orders, int prev)
192 {
193 	*orders &= ~BIT(prev);
194 	return highest_order(*orders);
195 }
196 
197 /*
198  * Do the below checks:
199  *   - For file vma, check if the linear page offset of vma is
200  *     order-aligned within the file.  The hugepage is
201  *     guaranteed to be order-aligned within the file, but we must
202  *     check that the order-aligned addresses in the VMA map to
203  *     order-aligned offsets within the file, else the hugepage will
204  *     not be mappable.
205  *   - For all vmas, check if the haddr is in an aligned hugepage
206  *     area.
207  */
208 static inline bool thp_vma_suitable_order(struct vm_area_struct *vma,
209 		unsigned long addr, int order)
210 {
211 	unsigned long hpage_size = PAGE_SIZE << order;
212 	unsigned long haddr;
213 
214 	/* Don't have to check pgoff for anonymous vma */
215 	if (!vma_is_anonymous(vma)) {
216 		if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
217 				hpage_size >> PAGE_SHIFT))
218 			return false;
219 	}
220 
221 	haddr = ALIGN_DOWN(addr, hpage_size);
222 
223 	if (haddr < vma->vm_start || haddr + hpage_size > vma->vm_end)
224 		return false;
225 	return true;
226 }
227 
228 /*
229  * Filter the bitfield of input orders to the ones suitable for use in the vma.
230  * See thp_vma_suitable_order().
231  * All orders that pass the checks are returned as a bitfield.
232  */
233 static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
234 		unsigned long addr, unsigned long orders)
235 {
236 	int order;
237 
238 	/*
239 	 * Iterate over orders, highest to lowest, removing orders that don't
240 	 * meet alignment requirements from the set. Exit loop at first order
241 	 * that meets requirements, since all lower orders must also meet
242 	 * requirements.
243 	 */
244 
245 	order = highest_order(orders);
246 
247 	while (orders) {
248 		if (thp_vma_suitable_order(vma, addr, order))
249 			break;
250 		order = next_order(&orders, order);
251 	}
252 
253 	return orders;
254 }
255 
256 static inline bool file_thp_enabled(struct vm_area_struct *vma)
257 {
258 	struct inode *inode;
259 
260 	if (!vma->vm_file)
261 		return false;
262 
263 	inode = vma->vm_file->f_inode;
264 
265 	return (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS)) &&
266 	       !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode);
267 }
268 
269 unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
270 					 unsigned long vm_flags,
271 					 unsigned long tva_flags,
272 					 unsigned long orders);
273 
274 /**
275  * thp_vma_allowable_orders - determine hugepage orders that are allowed for vma
276  * @vma:  the vm area to check
277  * @vm_flags: use these vm_flags instead of vma->vm_flags
278  * @tva_flags: Which TVA flags to honour
279  * @orders: bitfield of all orders to consider
280  *
281  * Calculates the intersection of the requested hugepage orders and the allowed
282  * hugepage orders for the provided vma. Permitted orders are encoded as a set
283  * bit at the corresponding bit position (bit-2 corresponds to order-2, bit-3
284  * corresponds to order-3, etc). Order-0 is never considered a hugepage order.
285  *
286  * Return: bitfield of orders allowed for hugepage in the vma. 0 if no hugepage
287  * orders are allowed.
288  */
289 static inline
290 unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
291 				       unsigned long vm_flags,
292 				       unsigned long tva_flags,
293 				       unsigned long orders)
294 {
295 	/* Optimization to check if required orders are enabled early. */
296 	if ((tva_flags & TVA_ENFORCE_SYSFS) && vma_is_anonymous(vma)) {
297 		unsigned long mask = READ_ONCE(huge_anon_orders_always);
298 
299 		if (vm_flags & VM_HUGEPAGE)
300 			mask |= READ_ONCE(huge_anon_orders_madvise);
301 		if (hugepage_global_always() ||
302 		    ((vm_flags & VM_HUGEPAGE) && hugepage_global_enabled()))
303 			mask |= READ_ONCE(huge_anon_orders_inherit);
304 
305 		orders &= mask;
306 		if (!orders)
307 			return 0;
308 	}
309 
310 	return __thp_vma_allowable_orders(vma, vm_flags, tva_flags, orders);
311 }
312 
313 struct thpsize {
314 	struct kobject kobj;
315 	struct list_head node;
316 	int order;
317 };
318 
319 #define to_thpsize(kobj) container_of(kobj, struct thpsize, kobj)
320 
321 #define transparent_hugepage_use_zero_page()				\
322 	(transparent_hugepage_flags &					\
323 	 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
324 
325 unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
326 		unsigned long len, unsigned long pgoff, unsigned long flags);
327 unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
328 		unsigned long len, unsigned long pgoff, unsigned long flags,
329 		vm_flags_t vm_flags);
330 
331 bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins);
332 int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
333 		unsigned int new_order);
334 int min_order_for_split(struct folio *folio);
335 int split_folio_to_list(struct folio *folio, struct list_head *list);
336 static inline int split_huge_page(struct page *page)
337 {
338 	struct folio *folio = page_folio(page);
339 	int ret = min_order_for_split(folio);
340 
341 	if (ret < 0)
342 		return ret;
343 
344 	/*
345 	 * split_huge_page() locks the page before splitting and
346 	 * expects the same page that has been split to be locked when
347 	 * returned. split_folio(page_folio(page)) cannot be used here
348 	 * because it converts the page to folio and passes the head
349 	 * page to be split.
350 	 */
351 	return split_huge_page_to_list_to_order(page, NULL, ret);
352 }
353 void deferred_split_folio(struct folio *folio, bool partially_mapped);
354 
355 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
356 		unsigned long address, bool freeze, struct folio *folio);
357 
358 #define split_huge_pmd(__vma, __pmd, __address)				\
359 	do {								\
360 		pmd_t *____pmd = (__pmd);				\
361 		if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd)	\
362 					|| pmd_devmap(*____pmd))	\
363 			__split_huge_pmd(__vma, __pmd, __address,	\
364 						false, NULL);		\
365 	}  while (0)
366 
367 
368 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
369 		bool freeze, struct folio *folio);
370 
371 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
372 		unsigned long address);
373 
374 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
375 int change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
376 		    pud_t *pudp, unsigned long addr, pgprot_t newprot,
377 		    unsigned long cp_flags);
378 #else
379 static inline int
380 change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
381 		pud_t *pudp, unsigned long addr, pgprot_t newprot,
382 		unsigned long cp_flags) { return 0; }
383 #endif
384 
385 #define split_huge_pud(__vma, __pud, __address)				\
386 	do {								\
387 		pud_t *____pud = (__pud);				\
388 		if (pud_trans_huge(*____pud)				\
389 					|| pud_devmap(*____pud))	\
390 			__split_huge_pud(__vma, __pud, __address);	\
391 	}  while (0)
392 
393 int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags,
394 		     int advice);
395 int madvise_collapse(struct vm_area_struct *vma,
396 		     struct vm_area_struct **prev,
397 		     unsigned long start, unsigned long end);
398 void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
399 			   unsigned long end, long adjust_next);
400 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
401 spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma);
402 
403 static inline int is_swap_pmd(pmd_t pmd)
404 {
405 	return !pmd_none(pmd) && !pmd_present(pmd);
406 }
407 
408 /* mmap_lock must be held on entry */
409 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
410 		struct vm_area_struct *vma)
411 {
412 	if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
413 		return __pmd_trans_huge_lock(pmd, vma);
414 	else
415 		return NULL;
416 }
417 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
418 		struct vm_area_struct *vma)
419 {
420 	if (pud_trans_huge(*pud) || pud_devmap(*pud))
421 		return __pud_trans_huge_lock(pud, vma);
422 	else
423 		return NULL;
424 }
425 
426 /**
427  * folio_test_pmd_mappable - Can we map this folio with a PMD?
428  * @folio: The folio to test
429  */
430 static inline bool folio_test_pmd_mappable(struct folio *folio)
431 {
432 	return folio_order(folio) >= HPAGE_PMD_ORDER;
433 }
434 
435 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
436 		pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
437 
438 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf);
439 
440 extern struct folio *huge_zero_folio;
441 extern unsigned long huge_zero_pfn;
442 
443 static inline bool is_huge_zero_folio(const struct folio *folio)
444 {
445 	return READ_ONCE(huge_zero_folio) == folio;
446 }
447 
448 static inline bool is_huge_zero_pmd(pmd_t pmd)
449 {
450 	return pmd_present(pmd) && READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd);
451 }
452 
453 struct folio *mm_get_huge_zero_folio(struct mm_struct *mm);
454 void mm_put_huge_zero_folio(struct mm_struct *mm);
455 
456 #define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
457 
458 static inline bool thp_migration_supported(void)
459 {
460 	return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
461 }
462 
463 void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address,
464 			   pmd_t *pmd, bool freeze, struct folio *folio);
465 bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr,
466 			   pmd_t *pmdp, struct folio *folio);
467 
468 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
469 
470 static inline bool folio_test_pmd_mappable(struct folio *folio)
471 {
472 	return false;
473 }
474 
475 static inline bool thp_vma_suitable_order(struct vm_area_struct *vma,
476 		unsigned long addr, int order)
477 {
478 	return false;
479 }
480 
481 static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
482 		unsigned long addr, unsigned long orders)
483 {
484 	return 0;
485 }
486 
487 static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
488 					unsigned long vm_flags,
489 					unsigned long tva_flags,
490 					unsigned long orders)
491 {
492 	return 0;
493 }
494 
495 #define transparent_hugepage_flags 0UL
496 
497 #define thp_get_unmapped_area	NULL
498 
499 static inline unsigned long
500 thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
501 			      unsigned long len, unsigned long pgoff,
502 			      unsigned long flags, vm_flags_t vm_flags)
503 {
504 	return 0;
505 }
506 
507 static inline bool
508 can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins)
509 {
510 	return false;
511 }
512 static inline int
513 split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
514 		unsigned int new_order)
515 {
516 	return 0;
517 }
518 static inline int split_huge_page(struct page *page)
519 {
520 	return 0;
521 }
522 
523 static inline int split_folio_to_list(struct folio *folio, struct list_head *list)
524 {
525 	return 0;
526 }
527 
528 static inline void deferred_split_folio(struct folio *folio, bool partially_mapped) {}
529 #define split_huge_pmd(__vma, __pmd, __address)	\
530 	do { } while (0)
531 
532 static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
533 		unsigned long address, bool freeze, struct folio *folio) {}
534 static inline void split_huge_pmd_address(struct vm_area_struct *vma,
535 		unsigned long address, bool freeze, struct folio *folio) {}
536 static inline void split_huge_pmd_locked(struct vm_area_struct *vma,
537 					 unsigned long address, pmd_t *pmd,
538 					 bool freeze, struct folio *folio) {}
539 
540 static inline bool unmap_huge_pmd_locked(struct vm_area_struct *vma,
541 					 unsigned long addr, pmd_t *pmdp,
542 					 struct folio *folio)
543 {
544 	return false;
545 }
546 
547 #define split_huge_pud(__vma, __pmd, __address)	\
548 	do { } while (0)
549 
550 static inline int hugepage_madvise(struct vm_area_struct *vma,
551 				   unsigned long *vm_flags, int advice)
552 {
553 	return -EINVAL;
554 }
555 
556 static inline int madvise_collapse(struct vm_area_struct *vma,
557 				   struct vm_area_struct **prev,
558 				   unsigned long start, unsigned long end)
559 {
560 	return -EINVAL;
561 }
562 
563 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
564 					 unsigned long start,
565 					 unsigned long end,
566 					 long adjust_next)
567 {
568 }
569 static inline int is_swap_pmd(pmd_t pmd)
570 {
571 	return 0;
572 }
573 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
574 		struct vm_area_struct *vma)
575 {
576 	return NULL;
577 }
578 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
579 		struct vm_area_struct *vma)
580 {
581 	return NULL;
582 }
583 
584 static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
585 {
586 	return 0;
587 }
588 
589 static inline bool is_huge_zero_folio(const struct folio *folio)
590 {
591 	return false;
592 }
593 
594 static inline bool is_huge_zero_pmd(pmd_t pmd)
595 {
596 	return false;
597 }
598 
599 static inline void mm_put_huge_zero_folio(struct mm_struct *mm)
600 {
601 	return;
602 }
603 
604 static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
605 	unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
606 {
607 	return NULL;
608 }
609 
610 static inline bool thp_migration_supported(void)
611 {
612 	return false;
613 }
614 
615 static inline int highest_order(unsigned long orders)
616 {
617 	return 0;
618 }
619 
620 static inline int next_order(unsigned long *orders, int prev)
621 {
622 	return 0;
623 }
624 
625 static inline void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
626 				    unsigned long address)
627 {
628 }
629 
630 static inline int change_huge_pud(struct mmu_gather *tlb,
631 				  struct vm_area_struct *vma, pud_t *pudp,
632 				  unsigned long addr, pgprot_t newprot,
633 				  unsigned long cp_flags)
634 {
635 	return 0;
636 }
637 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
638 
639 static inline int split_folio_to_list_to_order(struct folio *folio,
640 		struct list_head *list, int new_order)
641 {
642 	return split_huge_page_to_list_to_order(&folio->page, list, new_order);
643 }
644 
645 static inline int split_folio_to_order(struct folio *folio, int new_order)
646 {
647 	return split_folio_to_list_to_order(folio, NULL, new_order);
648 }
649 
650 #endif /* _LINUX_HUGE_MM_H */
651