xref: /linux-6.15/include/linux/hugetlb.h (revision 39dfd52d)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGETLB_H
3 #define _LINUX_HUGETLB_H
4 
5 #include <linux/mm_types.h>
6 #include <linux/mmdebug.h>
7 #include <linux/fs.h>
8 #include <linux/hugetlb_inline.h>
9 #include <linux/cgroup.h>
10 #include <linux/list.h>
11 #include <linux/kref.h>
12 #include <linux/pgtable.h>
13 #include <linux/gfp.h>
14 #include <linux/userfaultfd_k.h>
15 
16 struct ctl_table;
17 struct user_struct;
18 struct mmu_gather;
19 
20 #ifndef is_hugepd
21 typedef struct { unsigned long pd; } hugepd_t;
22 #define is_hugepd(hugepd) (0)
23 #define __hugepd(x) ((hugepd_t) { (x) })
24 #endif
25 
26 #ifdef CONFIG_HUGETLB_PAGE
27 
28 #include <linux/mempolicy.h>
29 #include <linux/shm.h>
30 #include <asm/tlbflush.h>
31 
32 /*
33  * For HugeTLB page, there are more metadata to save in the struct page. But
34  * the head struct page cannot meet our needs, so we have to abuse other tail
35  * struct page to store the metadata. In order to avoid conflicts caused by
36  * subsequent use of more tail struct pages, we gather these discrete indexes
37  * of tail struct page here.
38  */
39 enum {
40 	SUBPAGE_INDEX_SUBPOOL = 1,	/* reuse page->private */
41 #ifdef CONFIG_CGROUP_HUGETLB
42 	SUBPAGE_INDEX_CGROUP,		/* reuse page->private */
43 	SUBPAGE_INDEX_CGROUP_RSVD,	/* reuse page->private */
44 	__MAX_CGROUP_SUBPAGE_INDEX = SUBPAGE_INDEX_CGROUP_RSVD,
45 #endif
46 #ifdef CONFIG_MEMORY_FAILURE
47 	SUBPAGE_INDEX_HWPOISON,
48 #endif
49 	__NR_USED_SUBPAGE,
50 };
51 
52 struct hugepage_subpool {
53 	spinlock_t lock;
54 	long count;
55 	long max_hpages;	/* Maximum huge pages or -1 if no maximum. */
56 	long used_hpages;	/* Used count against maximum, includes */
57 				/* both allocated and reserved pages. */
58 	struct hstate *hstate;
59 	long min_hpages;	/* Minimum huge pages or -1 if no minimum. */
60 	long rsv_hpages;	/* Pages reserved against global pool to */
61 				/* satisfy minimum size. */
62 };
63 
64 struct resv_map {
65 	struct kref refs;
66 	spinlock_t lock;
67 	struct list_head regions;
68 	long adds_in_progress;
69 	struct list_head region_cache;
70 	long region_cache_count;
71 #ifdef CONFIG_CGROUP_HUGETLB
72 	/*
73 	 * On private mappings, the counter to uncharge reservations is stored
74 	 * here. If these fields are 0, then either the mapping is shared, or
75 	 * cgroup accounting is disabled for this resv_map.
76 	 */
77 	struct page_counter *reservation_counter;
78 	unsigned long pages_per_hpage;
79 	struct cgroup_subsys_state *css;
80 #endif
81 };
82 
83 /*
84  * Region tracking -- allows tracking of reservations and instantiated pages
85  *                    across the pages in a mapping.
86  *
87  * The region data structures are embedded into a resv_map and protected
88  * by a resv_map's lock.  The set of regions within the resv_map represent
89  * reservations for huge pages, or huge pages that have already been
90  * instantiated within the map.  The from and to elements are huge page
91  * indices into the associated mapping.  from indicates the starting index
92  * of the region.  to represents the first index past the end of  the region.
93  *
94  * For example, a file region structure with from == 0 and to == 4 represents
95  * four huge pages in a mapping.  It is important to note that the to element
96  * represents the first element past the end of the region. This is used in
97  * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
98  *
99  * Interval notation of the form [from, to) will be used to indicate that
100  * the endpoint from is inclusive and to is exclusive.
101  */
102 struct file_region {
103 	struct list_head link;
104 	long from;
105 	long to;
106 #ifdef CONFIG_CGROUP_HUGETLB
107 	/*
108 	 * On shared mappings, each reserved region appears as a struct
109 	 * file_region in resv_map. These fields hold the info needed to
110 	 * uncharge each reservation.
111 	 */
112 	struct page_counter *reservation_counter;
113 	struct cgroup_subsys_state *css;
114 #endif
115 };
116 
117 extern struct resv_map *resv_map_alloc(void);
118 void resv_map_release(struct kref *ref);
119 
120 extern spinlock_t hugetlb_lock;
121 extern int hugetlb_max_hstate __read_mostly;
122 #define for_each_hstate(h) \
123 	for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
124 
125 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
126 						long min_hpages);
127 void hugepage_put_subpool(struct hugepage_subpool *spool);
128 
129 void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
130 void clear_vma_resv_huge_pages(struct vm_area_struct *vma);
131 int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
132 int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *,
133 		loff_t *);
134 int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *,
135 		loff_t *);
136 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *,
137 		loff_t *);
138 
139 int move_hugetlb_page_tables(struct vm_area_struct *vma,
140 			     struct vm_area_struct *new_vma,
141 			     unsigned long old_addr, unsigned long new_addr,
142 			     unsigned long len);
143 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *,
144 			    struct vm_area_struct *, struct vm_area_struct *);
145 long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
146 			 struct page **, struct vm_area_struct **,
147 			 unsigned long *, unsigned long *, long, unsigned int,
148 			 int *);
149 void unmap_hugepage_range(struct vm_area_struct *,
150 			  unsigned long, unsigned long, struct page *,
151 			  zap_flags_t);
152 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
153 			  struct vm_area_struct *vma,
154 			  unsigned long start, unsigned long end,
155 			  struct page *ref_page, zap_flags_t zap_flags);
156 void hugetlb_report_meminfo(struct seq_file *);
157 int hugetlb_report_node_meminfo(char *buf, int len, int nid);
158 void hugetlb_show_meminfo_node(int nid);
159 unsigned long hugetlb_total_pages(void);
160 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
161 			unsigned long address, unsigned int flags);
162 #ifdef CONFIG_USERFAULTFD
163 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
164 				struct vm_area_struct *dst_vma,
165 				unsigned long dst_addr,
166 				unsigned long src_addr,
167 				enum mcopy_atomic_mode mode,
168 				struct page **pagep,
169 				bool wp_copy);
170 #endif /* CONFIG_USERFAULTFD */
171 bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
172 						struct vm_area_struct *vma,
173 						vm_flags_t vm_flags);
174 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
175 						long freed);
176 int isolate_hugetlb(struct page *page, struct list_head *list);
177 int get_hwpoison_huge_page(struct page *page, bool *hugetlb);
178 int get_huge_page_for_hwpoison(unsigned long pfn, int flags);
179 void putback_active_hugepage(struct page *page);
180 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
181 void free_huge_page(struct page *page);
182 void hugetlb_fix_reserve_counts(struct inode *inode);
183 extern struct mutex *hugetlb_fault_mutex_table;
184 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
185 
186 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
187 		      unsigned long addr, pud_t *pud);
188 
189 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
190 
191 extern int sysctl_hugetlb_shm_group;
192 extern struct list_head huge_boot_pages;
193 
194 /* arch callbacks */
195 
196 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
197 			unsigned long addr, unsigned long sz);
198 pte_t *huge_pte_offset(struct mm_struct *mm,
199 		       unsigned long addr, unsigned long sz);
200 unsigned long hugetlb_mask_last_page(struct hstate *h);
201 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
202 				unsigned long addr, pte_t *ptep);
203 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
204 				unsigned long *start, unsigned long *end);
205 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
206 			      int write);
207 struct page *follow_huge_pd(struct vm_area_struct *vma,
208 			    unsigned long address, hugepd_t hpd,
209 			    int flags, int pdshift);
210 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
211 				pmd_t *pmd, int flags);
212 struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
213 				pud_t *pud, int flags);
214 struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
215 			     pgd_t *pgd, int flags);
216 
217 int pmd_huge(pmd_t pmd);
218 int pud_huge(pud_t pud);
219 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
220 		unsigned long address, unsigned long end, pgprot_t newprot,
221 		unsigned long cp_flags);
222 
223 bool is_hugetlb_entry_migration(pte_t pte);
224 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
225 
226 #else /* !CONFIG_HUGETLB_PAGE */
227 
228 static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
229 {
230 }
231 
232 static inline void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
233 {
234 }
235 
236 static inline unsigned long hugetlb_total_pages(void)
237 {
238 	return 0;
239 }
240 
241 static inline struct address_space *hugetlb_page_mapping_lock_write(
242 							struct page *hpage)
243 {
244 	return NULL;
245 }
246 
247 static inline int huge_pmd_unshare(struct mm_struct *mm,
248 					struct vm_area_struct *vma,
249 					unsigned long addr, pte_t *ptep)
250 {
251 	return 0;
252 }
253 
254 static inline void adjust_range_if_pmd_sharing_possible(
255 				struct vm_area_struct *vma,
256 				unsigned long *start, unsigned long *end)
257 {
258 }
259 
260 static inline long follow_hugetlb_page(struct mm_struct *mm,
261 			struct vm_area_struct *vma, struct page **pages,
262 			struct vm_area_struct **vmas, unsigned long *position,
263 			unsigned long *nr_pages, long i, unsigned int flags,
264 			int *nonblocking)
265 {
266 	BUG();
267 	return 0;
268 }
269 
270 static inline struct page *follow_huge_addr(struct mm_struct *mm,
271 					unsigned long address, int write)
272 {
273 	return ERR_PTR(-EINVAL);
274 }
275 
276 static inline int copy_hugetlb_page_range(struct mm_struct *dst,
277 					  struct mm_struct *src,
278 					  struct vm_area_struct *dst_vma,
279 					  struct vm_area_struct *src_vma)
280 {
281 	BUG();
282 	return 0;
283 }
284 
285 static inline int move_hugetlb_page_tables(struct vm_area_struct *vma,
286 					   struct vm_area_struct *new_vma,
287 					   unsigned long old_addr,
288 					   unsigned long new_addr,
289 					   unsigned long len)
290 {
291 	BUG();
292 	return 0;
293 }
294 
295 static inline void hugetlb_report_meminfo(struct seq_file *m)
296 {
297 }
298 
299 static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
300 {
301 	return 0;
302 }
303 
304 static inline void hugetlb_show_meminfo_node(int nid)
305 {
306 }
307 
308 static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
309 				unsigned long address, hugepd_t hpd, int flags,
310 				int pdshift)
311 {
312 	return NULL;
313 }
314 
315 static inline struct page *follow_huge_pmd(struct mm_struct *mm,
316 				unsigned long address, pmd_t *pmd, int flags)
317 {
318 	return NULL;
319 }
320 
321 static inline struct page *follow_huge_pud(struct mm_struct *mm,
322 				unsigned long address, pud_t *pud, int flags)
323 {
324 	return NULL;
325 }
326 
327 static inline struct page *follow_huge_pgd(struct mm_struct *mm,
328 				unsigned long address, pgd_t *pgd, int flags)
329 {
330 	return NULL;
331 }
332 
333 static inline int prepare_hugepage_range(struct file *file,
334 				unsigned long addr, unsigned long len)
335 {
336 	return -EINVAL;
337 }
338 
339 static inline int pmd_huge(pmd_t pmd)
340 {
341 	return 0;
342 }
343 
344 static inline int pud_huge(pud_t pud)
345 {
346 	return 0;
347 }
348 
349 static inline int is_hugepage_only_range(struct mm_struct *mm,
350 					unsigned long addr, unsigned long len)
351 {
352 	return 0;
353 }
354 
355 static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
356 				unsigned long addr, unsigned long end,
357 				unsigned long floor, unsigned long ceiling)
358 {
359 	BUG();
360 }
361 
362 #ifdef CONFIG_USERFAULTFD
363 static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
364 						pte_t *dst_pte,
365 						struct vm_area_struct *dst_vma,
366 						unsigned long dst_addr,
367 						unsigned long src_addr,
368 						enum mcopy_atomic_mode mode,
369 						struct page **pagep,
370 						bool wp_copy)
371 {
372 	BUG();
373 	return 0;
374 }
375 #endif /* CONFIG_USERFAULTFD */
376 
377 static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
378 					unsigned long sz)
379 {
380 	return NULL;
381 }
382 
383 static inline int isolate_hugetlb(struct page *page, struct list_head *list)
384 {
385 	return -EBUSY;
386 }
387 
388 static inline int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
389 {
390 	return 0;
391 }
392 
393 static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags)
394 {
395 	return 0;
396 }
397 
398 static inline void putback_active_hugepage(struct page *page)
399 {
400 }
401 
402 static inline void move_hugetlb_state(struct page *oldpage,
403 					struct page *newpage, int reason)
404 {
405 }
406 
407 static inline unsigned long hugetlb_change_protection(
408 			struct vm_area_struct *vma, unsigned long address,
409 			unsigned long end, pgprot_t newprot,
410 			unsigned long cp_flags)
411 {
412 	return 0;
413 }
414 
415 static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
416 			struct vm_area_struct *vma, unsigned long start,
417 			unsigned long end, struct page *ref_page,
418 			zap_flags_t zap_flags)
419 {
420 	BUG();
421 }
422 
423 static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
424 			struct vm_area_struct *vma, unsigned long address,
425 			unsigned int flags)
426 {
427 	BUG();
428 	return 0;
429 }
430 
431 static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
432 
433 #endif /* !CONFIG_HUGETLB_PAGE */
434 /*
435  * hugepages at page global directory. If arch support
436  * hugepages at pgd level, they need to define this.
437  */
438 #ifndef pgd_huge
439 #define pgd_huge(x)	0
440 #endif
441 #ifndef p4d_huge
442 #define p4d_huge(x)	0
443 #endif
444 
445 #ifndef pgd_write
446 static inline int pgd_write(pgd_t pgd)
447 {
448 	BUG();
449 	return 0;
450 }
451 #endif
452 
453 #define HUGETLB_ANON_FILE "anon_hugepage"
454 
455 enum {
456 	/*
457 	 * The file will be used as an shm file so shmfs accounting rules
458 	 * apply
459 	 */
460 	HUGETLB_SHMFS_INODE     = 1,
461 	/*
462 	 * The file is being created on the internal vfs mount and shmfs
463 	 * accounting rules do not apply
464 	 */
465 	HUGETLB_ANONHUGE_INODE  = 2,
466 };
467 
468 #ifdef CONFIG_HUGETLBFS
469 struct hugetlbfs_sb_info {
470 	long	max_inodes;   /* inodes allowed */
471 	long	free_inodes;  /* inodes free */
472 	spinlock_t	stat_lock;
473 	struct hstate *hstate;
474 	struct hugepage_subpool *spool;
475 	kuid_t	uid;
476 	kgid_t	gid;
477 	umode_t mode;
478 };
479 
480 static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
481 {
482 	return sb->s_fs_info;
483 }
484 
485 struct hugetlbfs_inode_info {
486 	struct shared_policy policy;
487 	struct inode vfs_inode;
488 	unsigned int seals;
489 };
490 
491 static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
492 {
493 	return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
494 }
495 
496 extern const struct file_operations hugetlbfs_file_operations;
497 extern const struct vm_operations_struct hugetlb_vm_ops;
498 struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
499 				int creat_flags, int page_size_log);
500 
501 static inline bool is_file_hugepages(struct file *file)
502 {
503 	if (file->f_op == &hugetlbfs_file_operations)
504 		return true;
505 
506 	return is_file_shm_hugepages(file);
507 }
508 
509 static inline struct hstate *hstate_inode(struct inode *i)
510 {
511 	return HUGETLBFS_SB(i->i_sb)->hstate;
512 }
513 #else /* !CONFIG_HUGETLBFS */
514 
515 #define is_file_hugepages(file)			false
516 static inline struct file *
517 hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
518 		int creat_flags, int page_size_log)
519 {
520 	return ERR_PTR(-ENOSYS);
521 }
522 
523 static inline struct hstate *hstate_inode(struct inode *i)
524 {
525 	return NULL;
526 }
527 #endif /* !CONFIG_HUGETLBFS */
528 
529 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
530 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
531 					unsigned long len, unsigned long pgoff,
532 					unsigned long flags);
533 #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
534 
535 unsigned long
536 generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
537 				  unsigned long len, unsigned long pgoff,
538 				  unsigned long flags);
539 
540 /*
541  * huegtlb page specific state flags.  These flags are located in page.private
542  * of the hugetlb head page.  Functions created via the below macros should be
543  * used to manipulate these flags.
544  *
545  * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at
546  *	allocation time.  Cleared when page is fully instantiated.  Free
547  *	routine checks flag to restore a reservation on error paths.
548  *	Synchronization:  Examined or modified by code that knows it has
549  *	the only reference to page.  i.e. After allocation but before use
550  *	or when the page is being freed.
551  * HPG_migratable  - Set after a newly allocated page is added to the page
552  *	cache and/or page tables.  Indicates the page is a candidate for
553  *	migration.
554  *	Synchronization:  Initially set after new page allocation with no
555  *	locking.  When examined and modified during migration processing
556  *	(isolate, migrate, putback) the hugetlb_lock is held.
557  * HPG_temporary - Set on a page that is temporarily allocated from the buddy
558  *	allocator.  Typically used for migration target pages when no pages
559  *	are available in the pool.  The hugetlb free page path will
560  *	immediately free pages with this flag set to the buddy allocator.
561  *	Synchronization: Can be set after huge page allocation from buddy when
562  *	code knows it has only reference.  All other examinations and
563  *	modifications require hugetlb_lock.
564  * HPG_freed - Set when page is on the free lists.
565  *	Synchronization: hugetlb_lock held for examination and modification.
566  * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed.
567  * HPG_raw_hwp_unreliable - Set when the hugetlb page has a hwpoison sub-page
568  *     that is not tracked by raw_hwp_page list.
569  */
570 enum hugetlb_page_flags {
571 	HPG_restore_reserve = 0,
572 	HPG_migratable,
573 	HPG_temporary,
574 	HPG_freed,
575 	HPG_vmemmap_optimized,
576 	HPG_raw_hwp_unreliable,
577 	__NR_HPAGEFLAGS,
578 };
579 
580 /*
581  * Macros to create test, set and clear function definitions for
582  * hugetlb specific page flags.
583  */
584 #ifdef CONFIG_HUGETLB_PAGE
585 #define TESTHPAGEFLAG(uname, flname)				\
586 static inline int HPage##uname(struct page *page)		\
587 	{ return test_bit(HPG_##flname, &(page->private)); }
588 
589 #define SETHPAGEFLAG(uname, flname)				\
590 static inline void SetHPage##uname(struct page *page)		\
591 	{ set_bit(HPG_##flname, &(page->private)); }
592 
593 #define CLEARHPAGEFLAG(uname, flname)				\
594 static inline void ClearHPage##uname(struct page *page)		\
595 	{ clear_bit(HPG_##flname, &(page->private)); }
596 #else
597 #define TESTHPAGEFLAG(uname, flname)				\
598 static inline int HPage##uname(struct page *page)		\
599 	{ return 0; }
600 
601 #define SETHPAGEFLAG(uname, flname)				\
602 static inline void SetHPage##uname(struct page *page)		\
603 	{ }
604 
605 #define CLEARHPAGEFLAG(uname, flname)				\
606 static inline void ClearHPage##uname(struct page *page)		\
607 	{ }
608 #endif
609 
610 #define HPAGEFLAG(uname, flname)				\
611 	TESTHPAGEFLAG(uname, flname)				\
612 	SETHPAGEFLAG(uname, flname)				\
613 	CLEARHPAGEFLAG(uname, flname)				\
614 
615 /*
616  * Create functions associated with hugetlb page flags
617  */
618 HPAGEFLAG(RestoreReserve, restore_reserve)
619 HPAGEFLAG(Migratable, migratable)
620 HPAGEFLAG(Temporary, temporary)
621 HPAGEFLAG(Freed, freed)
622 HPAGEFLAG(VmemmapOptimized, vmemmap_optimized)
623 HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable)
624 
625 #ifdef CONFIG_HUGETLB_PAGE
626 
627 #define HSTATE_NAME_LEN 32
628 /* Defines one hugetlb page size */
629 struct hstate {
630 	struct mutex resize_lock;
631 	int next_nid_to_alloc;
632 	int next_nid_to_free;
633 	unsigned int order;
634 	unsigned int demote_order;
635 	unsigned long mask;
636 	unsigned long max_huge_pages;
637 	unsigned long nr_huge_pages;
638 	unsigned long free_huge_pages;
639 	unsigned long resv_huge_pages;
640 	unsigned long surplus_huge_pages;
641 	unsigned long nr_overcommit_huge_pages;
642 	struct list_head hugepage_activelist;
643 	struct list_head hugepage_freelists[MAX_NUMNODES];
644 	unsigned int max_huge_pages_node[MAX_NUMNODES];
645 	unsigned int nr_huge_pages_node[MAX_NUMNODES];
646 	unsigned int free_huge_pages_node[MAX_NUMNODES];
647 	unsigned int surplus_huge_pages_node[MAX_NUMNODES];
648 #ifdef CONFIG_CGROUP_HUGETLB
649 	/* cgroup control files */
650 	struct cftype cgroup_files_dfl[8];
651 	struct cftype cgroup_files_legacy[10];
652 #endif
653 	char name[HSTATE_NAME_LEN];
654 };
655 
656 struct huge_bootmem_page {
657 	struct list_head list;
658 	struct hstate *hstate;
659 };
660 
661 int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
662 struct page *alloc_huge_page(struct vm_area_struct *vma,
663 				unsigned long addr, int avoid_reserve);
664 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
665 				nodemask_t *nmask, gfp_t gfp_mask);
666 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
667 				unsigned long address);
668 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
669 			pgoff_t idx);
670 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
671 				unsigned long address, struct page *page);
672 
673 /* arch callback */
674 int __init __alloc_bootmem_huge_page(struct hstate *h, int nid);
675 int __init alloc_bootmem_huge_page(struct hstate *h, int nid);
676 bool __init hugetlb_node_alloc_supported(void);
677 
678 void __init hugetlb_add_hstate(unsigned order);
679 bool __init arch_hugetlb_valid_size(unsigned long size);
680 struct hstate *size_to_hstate(unsigned long size);
681 
682 #ifndef HUGE_MAX_HSTATE
683 #define HUGE_MAX_HSTATE 1
684 #endif
685 
686 extern struct hstate hstates[HUGE_MAX_HSTATE];
687 extern unsigned int default_hstate_idx;
688 
689 #define default_hstate (hstates[default_hstate_idx])
690 
691 /*
692  * hugetlb page subpool pointer located in hpage[1].private
693  */
694 static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
695 {
696 	return (void *)page_private(hpage + SUBPAGE_INDEX_SUBPOOL);
697 }
698 
699 static inline void hugetlb_set_page_subpool(struct page *hpage,
700 					struct hugepage_subpool *subpool)
701 {
702 	set_page_private(hpage + SUBPAGE_INDEX_SUBPOOL, (unsigned long)subpool);
703 }
704 
705 static inline struct hstate *hstate_file(struct file *f)
706 {
707 	return hstate_inode(file_inode(f));
708 }
709 
710 static inline struct hstate *hstate_sizelog(int page_size_log)
711 {
712 	if (!page_size_log)
713 		return &default_hstate;
714 
715 	return size_to_hstate(1UL << page_size_log);
716 }
717 
718 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
719 {
720 	return hstate_file(vma->vm_file);
721 }
722 
723 static inline unsigned long huge_page_size(const struct hstate *h)
724 {
725 	return (unsigned long)PAGE_SIZE << h->order;
726 }
727 
728 extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
729 
730 extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
731 
732 static inline unsigned long huge_page_mask(struct hstate *h)
733 {
734 	return h->mask;
735 }
736 
737 static inline unsigned int huge_page_order(struct hstate *h)
738 {
739 	return h->order;
740 }
741 
742 static inline unsigned huge_page_shift(struct hstate *h)
743 {
744 	return h->order + PAGE_SHIFT;
745 }
746 
747 static inline bool hstate_is_gigantic(struct hstate *h)
748 {
749 	return huge_page_order(h) >= MAX_ORDER;
750 }
751 
752 static inline unsigned int pages_per_huge_page(const struct hstate *h)
753 {
754 	return 1 << h->order;
755 }
756 
757 static inline unsigned int blocks_per_huge_page(struct hstate *h)
758 {
759 	return huge_page_size(h) / 512;
760 }
761 
762 #include <asm/hugetlb.h>
763 
764 #ifndef is_hugepage_only_range
765 static inline int is_hugepage_only_range(struct mm_struct *mm,
766 					unsigned long addr, unsigned long len)
767 {
768 	return 0;
769 }
770 #define is_hugepage_only_range is_hugepage_only_range
771 #endif
772 
773 #ifndef arch_clear_hugepage_flags
774 static inline void arch_clear_hugepage_flags(struct page *page) { }
775 #define arch_clear_hugepage_flags arch_clear_hugepage_flags
776 #endif
777 
778 #ifndef arch_make_huge_pte
779 static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift,
780 				       vm_flags_t flags)
781 {
782 	return pte_mkhuge(entry);
783 }
784 #endif
785 
786 static inline struct hstate *page_hstate(struct page *page)
787 {
788 	VM_BUG_ON_PAGE(!PageHuge(page), page);
789 	return size_to_hstate(page_size(page));
790 }
791 
792 static inline unsigned hstate_index_to_shift(unsigned index)
793 {
794 	return hstates[index].order + PAGE_SHIFT;
795 }
796 
797 static inline int hstate_index(struct hstate *h)
798 {
799 	return h - hstates;
800 }
801 
802 extern int dissolve_free_huge_page(struct page *page);
803 extern int dissolve_free_huge_pages(unsigned long start_pfn,
804 				    unsigned long end_pfn);
805 
806 #ifdef CONFIG_MEMORY_FAILURE
807 extern void hugetlb_clear_page_hwpoison(struct page *hpage);
808 #else
809 static inline void hugetlb_clear_page_hwpoison(struct page *hpage)
810 {
811 }
812 #endif
813 
814 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
815 #ifndef arch_hugetlb_migration_supported
816 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
817 {
818 	if ((huge_page_shift(h) == PMD_SHIFT) ||
819 		(huge_page_shift(h) == PUD_SHIFT) ||
820 			(huge_page_shift(h) == PGDIR_SHIFT))
821 		return true;
822 	else
823 		return false;
824 }
825 #endif
826 #else
827 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
828 {
829 	return false;
830 }
831 #endif
832 
833 static inline bool hugepage_migration_supported(struct hstate *h)
834 {
835 	return arch_hugetlb_migration_supported(h);
836 }
837 
838 /*
839  * Movability check is different as compared to migration check.
840  * It determines whether or not a huge page should be placed on
841  * movable zone or not. Movability of any huge page should be
842  * required only if huge page size is supported for migration.
843  * There won't be any reason for the huge page to be movable if
844  * it is not migratable to start with. Also the size of the huge
845  * page should be large enough to be placed under a movable zone
846  * and still feasible enough to be migratable. Just the presence
847  * in movable zone does not make the migration feasible.
848  *
849  * So even though large huge page sizes like the gigantic ones
850  * are migratable they should not be movable because its not
851  * feasible to migrate them from movable zone.
852  */
853 static inline bool hugepage_movable_supported(struct hstate *h)
854 {
855 	if (!hugepage_migration_supported(h))
856 		return false;
857 
858 	if (hstate_is_gigantic(h))
859 		return false;
860 	return true;
861 }
862 
863 /* Movability of hugepages depends on migration support. */
864 static inline gfp_t htlb_alloc_mask(struct hstate *h)
865 {
866 	if (hugepage_movable_supported(h))
867 		return GFP_HIGHUSER_MOVABLE;
868 	else
869 		return GFP_HIGHUSER;
870 }
871 
872 static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
873 {
874 	gfp_t modified_mask = htlb_alloc_mask(h);
875 
876 	/* Some callers might want to enforce node */
877 	modified_mask |= (gfp_mask & __GFP_THISNODE);
878 
879 	modified_mask |= (gfp_mask & __GFP_NOWARN);
880 
881 	return modified_mask;
882 }
883 
884 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
885 					   struct mm_struct *mm, pte_t *pte)
886 {
887 	if (huge_page_size(h) == PMD_SIZE)
888 		return pmd_lockptr(mm, (pmd_t *) pte);
889 	VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
890 	return &mm->page_table_lock;
891 }
892 
893 #ifndef hugepages_supported
894 /*
895  * Some platform decide whether they support huge pages at boot
896  * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
897  * when there is no such support
898  */
899 #define hugepages_supported() (HPAGE_SHIFT != 0)
900 #endif
901 
902 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
903 
904 static inline void hugetlb_count_init(struct mm_struct *mm)
905 {
906 	atomic_long_set(&mm->hugetlb_usage, 0);
907 }
908 
909 static inline void hugetlb_count_add(long l, struct mm_struct *mm)
910 {
911 	atomic_long_add(l, &mm->hugetlb_usage);
912 }
913 
914 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
915 {
916 	atomic_long_sub(l, &mm->hugetlb_usage);
917 }
918 
919 #ifndef huge_ptep_modify_prot_start
920 #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
921 static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
922 						unsigned long addr, pte_t *ptep)
923 {
924 	return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
925 }
926 #endif
927 
928 #ifndef huge_ptep_modify_prot_commit
929 #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
930 static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
931 						unsigned long addr, pte_t *ptep,
932 						pte_t old_pte, pte_t pte)
933 {
934 	set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
935 }
936 #endif
937 
938 #else	/* CONFIG_HUGETLB_PAGE */
939 struct hstate {};
940 
941 static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
942 {
943 	return NULL;
944 }
945 
946 static inline int isolate_or_dissolve_huge_page(struct page *page,
947 						struct list_head *list)
948 {
949 	return -ENOMEM;
950 }
951 
952 static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
953 					   unsigned long addr,
954 					   int avoid_reserve)
955 {
956 	return NULL;
957 }
958 
959 static inline struct page *
960 alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
961 			nodemask_t *nmask, gfp_t gfp_mask)
962 {
963 	return NULL;
964 }
965 
966 static inline struct page *alloc_huge_page_vma(struct hstate *h,
967 					       struct vm_area_struct *vma,
968 					       unsigned long address)
969 {
970 	return NULL;
971 }
972 
973 static inline int __alloc_bootmem_huge_page(struct hstate *h)
974 {
975 	return 0;
976 }
977 
978 static inline struct hstate *hstate_file(struct file *f)
979 {
980 	return NULL;
981 }
982 
983 static inline struct hstate *hstate_sizelog(int page_size_log)
984 {
985 	return NULL;
986 }
987 
988 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
989 {
990 	return NULL;
991 }
992 
993 static inline struct hstate *page_hstate(struct page *page)
994 {
995 	return NULL;
996 }
997 
998 static inline struct hstate *size_to_hstate(unsigned long size)
999 {
1000 	return NULL;
1001 }
1002 
1003 static inline unsigned long huge_page_size(struct hstate *h)
1004 {
1005 	return PAGE_SIZE;
1006 }
1007 
1008 static inline unsigned long huge_page_mask(struct hstate *h)
1009 {
1010 	return PAGE_MASK;
1011 }
1012 
1013 static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
1014 {
1015 	return PAGE_SIZE;
1016 }
1017 
1018 static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
1019 {
1020 	return PAGE_SIZE;
1021 }
1022 
1023 static inline unsigned int huge_page_order(struct hstate *h)
1024 {
1025 	return 0;
1026 }
1027 
1028 static inline unsigned int huge_page_shift(struct hstate *h)
1029 {
1030 	return PAGE_SHIFT;
1031 }
1032 
1033 static inline bool hstate_is_gigantic(struct hstate *h)
1034 {
1035 	return false;
1036 }
1037 
1038 static inline unsigned int pages_per_huge_page(struct hstate *h)
1039 {
1040 	return 1;
1041 }
1042 
1043 static inline unsigned hstate_index_to_shift(unsigned index)
1044 {
1045 	return 0;
1046 }
1047 
1048 static inline int hstate_index(struct hstate *h)
1049 {
1050 	return 0;
1051 }
1052 
1053 static inline int dissolve_free_huge_page(struct page *page)
1054 {
1055 	return 0;
1056 }
1057 
1058 static inline int dissolve_free_huge_pages(unsigned long start_pfn,
1059 					   unsigned long end_pfn)
1060 {
1061 	return 0;
1062 }
1063 
1064 static inline bool hugepage_migration_supported(struct hstate *h)
1065 {
1066 	return false;
1067 }
1068 
1069 static inline bool hugepage_movable_supported(struct hstate *h)
1070 {
1071 	return false;
1072 }
1073 
1074 static inline gfp_t htlb_alloc_mask(struct hstate *h)
1075 {
1076 	return 0;
1077 }
1078 
1079 static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
1080 {
1081 	return 0;
1082 }
1083 
1084 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
1085 					   struct mm_struct *mm, pte_t *pte)
1086 {
1087 	return &mm->page_table_lock;
1088 }
1089 
1090 static inline void hugetlb_count_init(struct mm_struct *mm)
1091 {
1092 }
1093 
1094 static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
1095 {
1096 }
1097 
1098 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
1099 {
1100 }
1101 
1102 static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
1103 					  unsigned long addr, pte_t *ptep)
1104 {
1105 	return *ptep;
1106 }
1107 
1108 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
1109 				   pte_t *ptep, pte_t pte)
1110 {
1111 }
1112 #endif	/* CONFIG_HUGETLB_PAGE */
1113 
1114 static inline spinlock_t *huge_pte_lock(struct hstate *h,
1115 					struct mm_struct *mm, pte_t *pte)
1116 {
1117 	spinlock_t *ptl;
1118 
1119 	ptl = huge_pte_lockptr(h, mm, pte);
1120 	spin_lock(ptl);
1121 	return ptl;
1122 }
1123 
1124 #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
1125 extern void __init hugetlb_cma_reserve(int order);
1126 extern void __init hugetlb_cma_check(void);
1127 #else
1128 static inline __init void hugetlb_cma_reserve(int order)
1129 {
1130 }
1131 static inline __init void hugetlb_cma_check(void)
1132 {
1133 }
1134 #endif
1135 
1136 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
1137 
1138 #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
1139 /*
1140  * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
1141  * implement this.
1142  */
1143 #define flush_hugetlb_tlb_range(vma, addr, end)	flush_tlb_range(vma, addr, end)
1144 #endif
1145 
1146 #endif /* _LINUX_HUGETLB_H */
1147