1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_HUGETLB_H 3 #define _LINUX_HUGETLB_H 4 5 #include <linux/mm.h> 6 #include <linux/mm_types.h> 7 #include <linux/mmdebug.h> 8 #include <linux/fs.h> 9 #include <linux/hugetlb_inline.h> 10 #include <linux/cgroup.h> 11 #include <linux/page_ref.h> 12 #include <linux/list.h> 13 #include <linux/kref.h> 14 #include <linux/pgtable.h> 15 #include <linux/gfp.h> 16 #include <linux/userfaultfd_k.h> 17 18 struct ctl_table; 19 struct user_struct; 20 struct mmu_gather; 21 struct node; 22 23 void free_huge_folio(struct folio *folio); 24 25 #ifdef CONFIG_HUGETLB_PAGE 26 27 #include <linux/pagemap.h> 28 #include <linux/shm.h> 29 #include <asm/tlbflush.h> 30 31 /* 32 * For HugeTLB page, there are more metadata to save in the struct page. But 33 * the head struct page cannot meet our needs, so we have to abuse other tail 34 * struct page to store the metadata. 35 */ 36 #define __NR_USED_SUBPAGE 3 37 38 struct hugepage_subpool { 39 spinlock_t lock; 40 long count; 41 long max_hpages; /* Maximum huge pages or -1 if no maximum. */ 42 long used_hpages; /* Used count against maximum, includes */ 43 /* both allocated and reserved pages. */ 44 struct hstate *hstate; 45 long min_hpages; /* Minimum huge pages or -1 if no minimum. */ 46 long rsv_hpages; /* Pages reserved against global pool to */ 47 /* satisfy minimum size. */ 48 }; 49 50 struct resv_map { 51 struct kref refs; 52 spinlock_t lock; 53 struct list_head regions; 54 long adds_in_progress; 55 struct list_head region_cache; 56 long region_cache_count; 57 struct rw_semaphore rw_sema; 58 #ifdef CONFIG_CGROUP_HUGETLB 59 /* 60 * On private mappings, the counter to uncharge reservations is stored 61 * here. If these fields are 0, then either the mapping is shared, or 62 * cgroup accounting is disabled for this resv_map. 63 */ 64 struct page_counter *reservation_counter; 65 unsigned long pages_per_hpage; 66 struct cgroup_subsys_state *css; 67 #endif 68 }; 69 70 /* 71 * Region tracking -- allows tracking of reservations and instantiated pages 72 * across the pages in a mapping. 73 * 74 * The region data structures are embedded into a resv_map and protected 75 * by a resv_map's lock. The set of regions within the resv_map represent 76 * reservations for huge pages, or huge pages that have already been 77 * instantiated within the map. The from and to elements are huge page 78 * indices into the associated mapping. from indicates the starting index 79 * of the region. to represents the first index past the end of the region. 80 * 81 * For example, a file region structure with from == 0 and to == 4 represents 82 * four huge pages in a mapping. It is important to note that the to element 83 * represents the first element past the end of the region. This is used in 84 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region. 85 * 86 * Interval notation of the form [from, to) will be used to indicate that 87 * the endpoint from is inclusive and to is exclusive. 88 */ 89 struct file_region { 90 struct list_head link; 91 long from; 92 long to; 93 #ifdef CONFIG_CGROUP_HUGETLB 94 /* 95 * On shared mappings, each reserved region appears as a struct 96 * file_region in resv_map. These fields hold the info needed to 97 * uncharge each reservation. 98 */ 99 struct page_counter *reservation_counter; 100 struct cgroup_subsys_state *css; 101 #endif 102 }; 103 104 struct hugetlb_vma_lock { 105 struct kref refs; 106 struct rw_semaphore rw_sema; 107 struct vm_area_struct *vma; 108 }; 109 110 extern struct resv_map *resv_map_alloc(void); 111 void resv_map_release(struct kref *ref); 112 113 extern spinlock_t hugetlb_lock; 114 extern int hugetlb_max_hstate __read_mostly; 115 #define for_each_hstate(h) \ 116 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++) 117 118 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, 119 long min_hpages); 120 void hugepage_put_subpool(struct hugepage_subpool *spool); 121 122 void hugetlb_dup_vma_private(struct vm_area_struct *vma); 123 void clear_vma_resv_huge_pages(struct vm_area_struct *vma); 124 int move_hugetlb_page_tables(struct vm_area_struct *vma, 125 struct vm_area_struct *new_vma, 126 unsigned long old_addr, unsigned long new_addr, 127 unsigned long len); 128 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, 129 struct vm_area_struct *, struct vm_area_struct *); 130 void unmap_hugepage_range(struct vm_area_struct *, 131 unsigned long, unsigned long, struct page *, 132 zap_flags_t); 133 void __unmap_hugepage_range(struct mmu_gather *tlb, 134 struct vm_area_struct *vma, 135 unsigned long start, unsigned long end, 136 struct page *ref_page, zap_flags_t zap_flags); 137 void hugetlb_report_meminfo(struct seq_file *); 138 int hugetlb_report_node_meminfo(char *buf, int len, int nid); 139 void hugetlb_show_meminfo_node(int nid); 140 unsigned long hugetlb_total_pages(void); 141 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 142 unsigned long address, unsigned int flags); 143 #ifdef CONFIG_USERFAULTFD 144 int hugetlb_mfill_atomic_pte(pte_t *dst_pte, 145 struct vm_area_struct *dst_vma, 146 unsigned long dst_addr, 147 unsigned long src_addr, 148 uffd_flags_t flags, 149 struct folio **foliop); 150 #endif /* CONFIG_USERFAULTFD */ 151 bool hugetlb_reserve_pages(struct inode *inode, long from, long to, 152 struct vm_area_struct *vma, 153 vm_flags_t vm_flags); 154 long hugetlb_unreserve_pages(struct inode *inode, long start, long end, 155 long freed); 156 bool isolate_hugetlb(struct folio *folio, struct list_head *list); 157 int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison); 158 int get_huge_page_for_hwpoison(unsigned long pfn, int flags, 159 bool *migratable_cleared); 160 void folio_putback_active_hugetlb(struct folio *folio); 161 void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason); 162 void hugetlb_fix_reserve_counts(struct inode *inode); 163 extern struct mutex *hugetlb_fault_mutex_table; 164 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx); 165 166 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, 167 unsigned long addr, pud_t *pud); 168 bool hugetlbfs_pagecache_present(struct hstate *h, 169 struct vm_area_struct *vma, 170 unsigned long address); 171 172 struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio); 173 174 extern int sysctl_hugetlb_shm_group; 175 extern struct list_head huge_boot_pages[MAX_NUMNODES]; 176 177 /* arch callbacks */ 178 179 #ifndef CONFIG_HIGHPTE 180 /* 181 * pte_offset_huge() and pte_alloc_huge() are helpers for those architectures 182 * which may go down to the lowest PTE level in their huge_pte_offset() and 183 * huge_pte_alloc(): to avoid reliance on pte_offset_map() without pte_unmap(). 184 */ 185 static inline pte_t *pte_offset_huge(pmd_t *pmd, unsigned long address) 186 { 187 return pte_offset_kernel(pmd, address); 188 } 189 static inline pte_t *pte_alloc_huge(struct mm_struct *mm, pmd_t *pmd, 190 unsigned long address) 191 { 192 return pte_alloc(mm, pmd) ? NULL : pte_offset_huge(pmd, address); 193 } 194 #endif 195 196 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, 197 unsigned long addr, unsigned long sz); 198 /* 199 * huge_pte_offset(): Walk the hugetlb pgtable until the last level PTE. 200 * Returns the pte_t* if found, or NULL if the address is not mapped. 201 * 202 * IMPORTANT: we should normally not directly call this function, instead 203 * this is only a common interface to implement arch-specific 204 * walker. Please use hugetlb_walk() instead, because that will attempt to 205 * verify the locking for you. 206 * 207 * Since this function will walk all the pgtable pages (including not only 208 * high-level pgtable page, but also PUD entry that can be unshared 209 * concurrently for VM_SHARED), the caller of this function should be 210 * responsible of its thread safety. One can follow this rule: 211 * 212 * (1) For private mappings: pmd unsharing is not possible, so holding the 213 * mmap_lock for either read or write is sufficient. Most callers 214 * already hold the mmap_lock, so normally, no special action is 215 * required. 216 * 217 * (2) For shared mappings: pmd unsharing is possible (so the PUD-ranged 218 * pgtable page can go away from under us! It can be done by a pmd 219 * unshare with a follow up munmap() on the other process), then we 220 * need either: 221 * 222 * (2.1) hugetlb vma lock read or write held, to make sure pmd unshare 223 * won't happen upon the range (it also makes sure the pte_t we 224 * read is the right and stable one), or, 225 * 226 * (2.2) hugetlb mapping i_mmap_rwsem lock held read or write, to make 227 * sure even if unshare happened the racy unmap() will wait until 228 * i_mmap_rwsem is released. 229 * 230 * Option (2.1) is the safest, which guarantees pte stability from pmd 231 * sharing pov, until the vma lock released. Option (2.2) doesn't protect 232 * a concurrent pmd unshare, but it makes sure the pgtable page is safe to 233 * access. 234 */ 235 pte_t *huge_pte_offset(struct mm_struct *mm, 236 unsigned long addr, unsigned long sz); 237 unsigned long hugetlb_mask_last_page(struct hstate *h); 238 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, 239 unsigned long addr, pte_t *ptep); 240 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, 241 unsigned long *start, unsigned long *end); 242 243 extern void __hugetlb_zap_begin(struct vm_area_struct *vma, 244 unsigned long *begin, unsigned long *end); 245 extern void __hugetlb_zap_end(struct vm_area_struct *vma, 246 struct zap_details *details); 247 248 static inline void hugetlb_zap_begin(struct vm_area_struct *vma, 249 unsigned long *start, unsigned long *end) 250 { 251 if (is_vm_hugetlb_page(vma)) 252 __hugetlb_zap_begin(vma, start, end); 253 } 254 255 static inline void hugetlb_zap_end(struct vm_area_struct *vma, 256 struct zap_details *details) 257 { 258 if (is_vm_hugetlb_page(vma)) 259 __hugetlb_zap_end(vma, details); 260 } 261 262 void hugetlb_vma_lock_read(struct vm_area_struct *vma); 263 void hugetlb_vma_unlock_read(struct vm_area_struct *vma); 264 void hugetlb_vma_lock_write(struct vm_area_struct *vma); 265 void hugetlb_vma_unlock_write(struct vm_area_struct *vma); 266 int hugetlb_vma_trylock_write(struct vm_area_struct *vma); 267 void hugetlb_vma_assert_locked(struct vm_area_struct *vma); 268 void hugetlb_vma_lock_release(struct kref *kref); 269 long hugetlb_change_protection(struct vm_area_struct *vma, 270 unsigned long address, unsigned long end, pgprot_t newprot, 271 unsigned long cp_flags); 272 bool is_hugetlb_entry_migration(pte_t pte); 273 bool is_hugetlb_entry_hwpoisoned(pte_t pte); 274 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma); 275 276 #else /* !CONFIG_HUGETLB_PAGE */ 277 278 static inline void hugetlb_dup_vma_private(struct vm_area_struct *vma) 279 { 280 } 281 282 static inline void clear_vma_resv_huge_pages(struct vm_area_struct *vma) 283 { 284 } 285 286 static inline unsigned long hugetlb_total_pages(void) 287 { 288 return 0; 289 } 290 291 static inline struct address_space *hugetlb_folio_mapping_lock_write( 292 struct folio *folio) 293 { 294 return NULL; 295 } 296 297 static inline int huge_pmd_unshare(struct mm_struct *mm, 298 struct vm_area_struct *vma, 299 unsigned long addr, pte_t *ptep) 300 { 301 return 0; 302 } 303 304 static inline void adjust_range_if_pmd_sharing_possible( 305 struct vm_area_struct *vma, 306 unsigned long *start, unsigned long *end) 307 { 308 } 309 310 static inline void hugetlb_zap_begin( 311 struct vm_area_struct *vma, 312 unsigned long *start, unsigned long *end) 313 { 314 } 315 316 static inline void hugetlb_zap_end( 317 struct vm_area_struct *vma, 318 struct zap_details *details) 319 { 320 } 321 322 static inline int copy_hugetlb_page_range(struct mm_struct *dst, 323 struct mm_struct *src, 324 struct vm_area_struct *dst_vma, 325 struct vm_area_struct *src_vma) 326 { 327 BUG(); 328 return 0; 329 } 330 331 static inline int move_hugetlb_page_tables(struct vm_area_struct *vma, 332 struct vm_area_struct *new_vma, 333 unsigned long old_addr, 334 unsigned long new_addr, 335 unsigned long len) 336 { 337 BUG(); 338 return 0; 339 } 340 341 static inline void hugetlb_report_meminfo(struct seq_file *m) 342 { 343 } 344 345 static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid) 346 { 347 return 0; 348 } 349 350 static inline void hugetlb_show_meminfo_node(int nid) 351 { 352 } 353 354 static inline int prepare_hugepage_range(struct file *file, 355 unsigned long addr, unsigned long len) 356 { 357 return -EINVAL; 358 } 359 360 static inline void hugetlb_vma_lock_read(struct vm_area_struct *vma) 361 { 362 } 363 364 static inline void hugetlb_vma_unlock_read(struct vm_area_struct *vma) 365 { 366 } 367 368 static inline void hugetlb_vma_lock_write(struct vm_area_struct *vma) 369 { 370 } 371 372 static inline void hugetlb_vma_unlock_write(struct vm_area_struct *vma) 373 { 374 } 375 376 static inline int hugetlb_vma_trylock_write(struct vm_area_struct *vma) 377 { 378 return 1; 379 } 380 381 static inline void hugetlb_vma_assert_locked(struct vm_area_struct *vma) 382 { 383 } 384 385 static inline int is_hugepage_only_range(struct mm_struct *mm, 386 unsigned long addr, unsigned long len) 387 { 388 return 0; 389 } 390 391 static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, 392 unsigned long addr, unsigned long end, 393 unsigned long floor, unsigned long ceiling) 394 { 395 BUG(); 396 } 397 398 #ifdef CONFIG_USERFAULTFD 399 static inline int hugetlb_mfill_atomic_pte(pte_t *dst_pte, 400 struct vm_area_struct *dst_vma, 401 unsigned long dst_addr, 402 unsigned long src_addr, 403 uffd_flags_t flags, 404 struct folio **foliop) 405 { 406 BUG(); 407 return 0; 408 } 409 #endif /* CONFIG_USERFAULTFD */ 410 411 static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, 412 unsigned long sz) 413 { 414 return NULL; 415 } 416 417 static inline bool isolate_hugetlb(struct folio *folio, struct list_head *list) 418 { 419 return false; 420 } 421 422 static inline int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison) 423 { 424 return 0; 425 } 426 427 static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags, 428 bool *migratable_cleared) 429 { 430 return 0; 431 } 432 433 static inline void folio_putback_active_hugetlb(struct folio *folio) 434 { 435 } 436 437 static inline void move_hugetlb_state(struct folio *old_folio, 438 struct folio *new_folio, int reason) 439 { 440 } 441 442 static inline long hugetlb_change_protection( 443 struct vm_area_struct *vma, unsigned long address, 444 unsigned long end, pgprot_t newprot, 445 unsigned long cp_flags) 446 { 447 return 0; 448 } 449 450 static inline void __unmap_hugepage_range(struct mmu_gather *tlb, 451 struct vm_area_struct *vma, unsigned long start, 452 unsigned long end, struct page *ref_page, 453 zap_flags_t zap_flags) 454 { 455 BUG(); 456 } 457 458 static inline vm_fault_t hugetlb_fault(struct mm_struct *mm, 459 struct vm_area_struct *vma, unsigned long address, 460 unsigned int flags) 461 { 462 BUG(); 463 return 0; 464 } 465 466 static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { } 467 468 #endif /* !CONFIG_HUGETLB_PAGE */ 469 470 #ifndef pgd_write 471 static inline int pgd_write(pgd_t pgd) 472 { 473 BUG(); 474 return 0; 475 } 476 #endif 477 478 #define HUGETLB_ANON_FILE "anon_hugepage" 479 480 enum { 481 /* 482 * The file will be used as an shm file so shmfs accounting rules 483 * apply 484 */ 485 HUGETLB_SHMFS_INODE = 1, 486 /* 487 * The file is being created on the internal vfs mount and shmfs 488 * accounting rules do not apply 489 */ 490 HUGETLB_ANONHUGE_INODE = 2, 491 }; 492 493 #ifdef CONFIG_HUGETLBFS 494 struct hugetlbfs_sb_info { 495 long max_inodes; /* inodes allowed */ 496 long free_inodes; /* inodes free */ 497 spinlock_t stat_lock; 498 struct hstate *hstate; 499 struct hugepage_subpool *spool; 500 kuid_t uid; 501 kgid_t gid; 502 umode_t mode; 503 }; 504 505 static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) 506 { 507 return sb->s_fs_info; 508 } 509 510 struct hugetlbfs_inode_info { 511 struct inode vfs_inode; 512 unsigned int seals; 513 }; 514 515 static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode) 516 { 517 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode); 518 } 519 520 extern const struct vm_operations_struct hugetlb_vm_ops; 521 struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct, 522 int creat_flags, int page_size_log); 523 524 static inline bool is_file_hugepages(const struct file *file) 525 { 526 return file->f_op->fop_flags & FOP_HUGE_PAGES; 527 } 528 529 static inline struct hstate *hstate_inode(struct inode *i) 530 { 531 return HUGETLBFS_SB(i->i_sb)->hstate; 532 } 533 #else /* !CONFIG_HUGETLBFS */ 534 535 #define is_file_hugepages(file) false 536 static inline struct file * 537 hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag, 538 int creat_flags, int page_size_log) 539 { 540 return ERR_PTR(-ENOSYS); 541 } 542 543 static inline struct hstate *hstate_inode(struct inode *i) 544 { 545 return NULL; 546 } 547 #endif /* !CONFIG_HUGETLBFS */ 548 549 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA 550 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 551 unsigned long len, unsigned long pgoff, 552 unsigned long flags); 553 #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */ 554 555 unsigned long 556 generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 557 unsigned long len, unsigned long pgoff, 558 unsigned long flags); 559 560 /* 561 * huegtlb page specific state flags. These flags are located in page.private 562 * of the hugetlb head page. Functions created via the below macros should be 563 * used to manipulate these flags. 564 * 565 * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at 566 * allocation time. Cleared when page is fully instantiated. Free 567 * routine checks flag to restore a reservation on error paths. 568 * Synchronization: Examined or modified by code that knows it has 569 * the only reference to page. i.e. After allocation but before use 570 * or when the page is being freed. 571 * HPG_migratable - Set after a newly allocated page is added to the page 572 * cache and/or page tables. Indicates the page is a candidate for 573 * migration. 574 * Synchronization: Initially set after new page allocation with no 575 * locking. When examined and modified during migration processing 576 * (isolate, migrate, putback) the hugetlb_lock is held. 577 * HPG_temporary - Set on a page that is temporarily allocated from the buddy 578 * allocator. Typically used for migration target pages when no pages 579 * are available in the pool. The hugetlb free page path will 580 * immediately free pages with this flag set to the buddy allocator. 581 * Synchronization: Can be set after huge page allocation from buddy when 582 * code knows it has only reference. All other examinations and 583 * modifications require hugetlb_lock. 584 * HPG_freed - Set when page is on the free lists. 585 * Synchronization: hugetlb_lock held for examination and modification. 586 * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed. 587 * HPG_raw_hwp_unreliable - Set when the hugetlb page has a hwpoison sub-page 588 * that is not tracked by raw_hwp_page list. 589 */ 590 enum hugetlb_page_flags { 591 HPG_restore_reserve = 0, 592 HPG_migratable, 593 HPG_temporary, 594 HPG_freed, 595 HPG_vmemmap_optimized, 596 HPG_raw_hwp_unreliable, 597 __NR_HPAGEFLAGS, 598 }; 599 600 /* 601 * Macros to create test, set and clear function definitions for 602 * hugetlb specific page flags. 603 */ 604 #ifdef CONFIG_HUGETLB_PAGE 605 #define TESTHPAGEFLAG(uname, flname) \ 606 static __always_inline \ 607 bool folio_test_hugetlb_##flname(struct folio *folio) \ 608 { void *private = &folio->private; \ 609 return test_bit(HPG_##flname, private); \ 610 } 611 612 #define SETHPAGEFLAG(uname, flname) \ 613 static __always_inline \ 614 void folio_set_hugetlb_##flname(struct folio *folio) \ 615 { void *private = &folio->private; \ 616 set_bit(HPG_##flname, private); \ 617 } 618 619 #define CLEARHPAGEFLAG(uname, flname) \ 620 static __always_inline \ 621 void folio_clear_hugetlb_##flname(struct folio *folio) \ 622 { void *private = &folio->private; \ 623 clear_bit(HPG_##flname, private); \ 624 } 625 #else 626 #define TESTHPAGEFLAG(uname, flname) \ 627 static inline bool \ 628 folio_test_hugetlb_##flname(struct folio *folio) \ 629 { return 0; } 630 631 #define SETHPAGEFLAG(uname, flname) \ 632 static inline void \ 633 folio_set_hugetlb_##flname(struct folio *folio) \ 634 { } 635 636 #define CLEARHPAGEFLAG(uname, flname) \ 637 static inline void \ 638 folio_clear_hugetlb_##flname(struct folio *folio) \ 639 { } 640 #endif 641 642 #define HPAGEFLAG(uname, flname) \ 643 TESTHPAGEFLAG(uname, flname) \ 644 SETHPAGEFLAG(uname, flname) \ 645 CLEARHPAGEFLAG(uname, flname) \ 646 647 /* 648 * Create functions associated with hugetlb page flags 649 */ 650 HPAGEFLAG(RestoreReserve, restore_reserve) 651 HPAGEFLAG(Migratable, migratable) 652 HPAGEFLAG(Temporary, temporary) 653 HPAGEFLAG(Freed, freed) 654 HPAGEFLAG(VmemmapOptimized, vmemmap_optimized) 655 HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable) 656 657 #ifdef CONFIG_HUGETLB_PAGE 658 659 #define HSTATE_NAME_LEN 32 660 /* Defines one hugetlb page size */ 661 struct hstate { 662 struct mutex resize_lock; 663 struct lock_class_key resize_key; 664 int next_nid_to_alloc; 665 int next_nid_to_free; 666 unsigned int order; 667 unsigned int demote_order; 668 unsigned long mask; 669 unsigned long max_huge_pages; 670 unsigned long nr_huge_pages; 671 unsigned long free_huge_pages; 672 unsigned long resv_huge_pages; 673 unsigned long surplus_huge_pages; 674 unsigned long nr_overcommit_huge_pages; 675 struct list_head hugepage_activelist; 676 struct list_head hugepage_freelists[MAX_NUMNODES]; 677 unsigned int max_huge_pages_node[MAX_NUMNODES]; 678 unsigned int nr_huge_pages_node[MAX_NUMNODES]; 679 unsigned int free_huge_pages_node[MAX_NUMNODES]; 680 unsigned int surplus_huge_pages_node[MAX_NUMNODES]; 681 char name[HSTATE_NAME_LEN]; 682 }; 683 684 struct huge_bootmem_page { 685 struct list_head list; 686 struct hstate *hstate; 687 }; 688 689 int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list); 690 struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, 691 unsigned long addr, int avoid_reserve); 692 struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, 693 nodemask_t *nmask, gfp_t gfp_mask, 694 bool allow_alloc_fallback); 695 struct folio *alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid, 696 nodemask_t *nmask, gfp_t gfp_mask); 697 698 int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping, 699 pgoff_t idx); 700 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma, 701 unsigned long address, struct folio *folio); 702 703 /* arch callback */ 704 int __init __alloc_bootmem_huge_page(struct hstate *h, int nid); 705 int __init alloc_bootmem_huge_page(struct hstate *h, int nid); 706 bool __init hugetlb_node_alloc_supported(void); 707 708 void __init hugetlb_add_hstate(unsigned order); 709 bool __init arch_hugetlb_valid_size(unsigned long size); 710 struct hstate *size_to_hstate(unsigned long size); 711 712 #ifndef HUGE_MAX_HSTATE 713 #define HUGE_MAX_HSTATE 1 714 #endif 715 716 extern struct hstate hstates[HUGE_MAX_HSTATE]; 717 extern unsigned int default_hstate_idx; 718 719 #define default_hstate (hstates[default_hstate_idx]) 720 721 static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio) 722 { 723 return folio->_hugetlb_subpool; 724 } 725 726 static inline void hugetlb_set_folio_subpool(struct folio *folio, 727 struct hugepage_subpool *subpool) 728 { 729 folio->_hugetlb_subpool = subpool; 730 } 731 732 static inline struct hstate *hstate_file(struct file *f) 733 { 734 return hstate_inode(file_inode(f)); 735 } 736 737 static inline struct hstate *hstate_sizelog(int page_size_log) 738 { 739 if (!page_size_log) 740 return &default_hstate; 741 742 if (page_size_log < BITS_PER_LONG) 743 return size_to_hstate(1UL << page_size_log); 744 745 return NULL; 746 } 747 748 static inline struct hstate *hstate_vma(struct vm_area_struct *vma) 749 { 750 return hstate_file(vma->vm_file); 751 } 752 753 static inline unsigned long huge_page_size(const struct hstate *h) 754 { 755 return (unsigned long)PAGE_SIZE << h->order; 756 } 757 758 extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma); 759 760 extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma); 761 762 static inline unsigned long huge_page_mask(struct hstate *h) 763 { 764 return h->mask; 765 } 766 767 static inline unsigned int huge_page_order(struct hstate *h) 768 { 769 return h->order; 770 } 771 772 static inline unsigned huge_page_shift(struct hstate *h) 773 { 774 return h->order + PAGE_SHIFT; 775 } 776 777 static inline bool hstate_is_gigantic(struct hstate *h) 778 { 779 return huge_page_order(h) > MAX_PAGE_ORDER; 780 } 781 782 static inline unsigned int pages_per_huge_page(const struct hstate *h) 783 { 784 return 1 << h->order; 785 } 786 787 static inline unsigned int blocks_per_huge_page(struct hstate *h) 788 { 789 return huge_page_size(h) / 512; 790 } 791 792 static inline struct folio *filemap_lock_hugetlb_folio(struct hstate *h, 793 struct address_space *mapping, pgoff_t idx) 794 { 795 return filemap_lock_folio(mapping, idx << huge_page_order(h)); 796 } 797 798 #include <asm/hugetlb.h> 799 800 #ifndef is_hugepage_only_range 801 static inline int is_hugepage_only_range(struct mm_struct *mm, 802 unsigned long addr, unsigned long len) 803 { 804 return 0; 805 } 806 #define is_hugepage_only_range is_hugepage_only_range 807 #endif 808 809 #ifndef arch_clear_hugetlb_flags 810 static inline void arch_clear_hugetlb_flags(struct folio *folio) { } 811 #define arch_clear_hugetlb_flags arch_clear_hugetlb_flags 812 #endif 813 814 #ifndef arch_make_huge_pte 815 static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, 816 vm_flags_t flags) 817 { 818 return pte_mkhuge(entry); 819 } 820 #endif 821 822 static inline struct hstate *folio_hstate(struct folio *folio) 823 { 824 VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio); 825 return size_to_hstate(folio_size(folio)); 826 } 827 828 static inline unsigned hstate_index_to_shift(unsigned index) 829 { 830 return hstates[index].order + PAGE_SHIFT; 831 } 832 833 static inline int hstate_index(struct hstate *h) 834 { 835 return h - hstates; 836 } 837 838 int dissolve_free_hugetlb_folio(struct folio *folio); 839 int dissolve_free_hugetlb_folios(unsigned long start_pfn, 840 unsigned long end_pfn); 841 842 #ifdef CONFIG_MEMORY_FAILURE 843 extern void folio_clear_hugetlb_hwpoison(struct folio *folio); 844 #else 845 static inline void folio_clear_hugetlb_hwpoison(struct folio *folio) 846 { 847 } 848 #endif 849 850 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION 851 #ifndef arch_hugetlb_migration_supported 852 static inline bool arch_hugetlb_migration_supported(struct hstate *h) 853 { 854 if ((huge_page_shift(h) == PMD_SHIFT) || 855 (huge_page_shift(h) == PUD_SHIFT) || 856 (huge_page_shift(h) == PGDIR_SHIFT)) 857 return true; 858 else 859 return false; 860 } 861 #endif 862 #else 863 static inline bool arch_hugetlb_migration_supported(struct hstate *h) 864 { 865 return false; 866 } 867 #endif 868 869 static inline bool hugepage_migration_supported(struct hstate *h) 870 { 871 return arch_hugetlb_migration_supported(h); 872 } 873 874 /* 875 * Movability check is different as compared to migration check. 876 * It determines whether or not a huge page should be placed on 877 * movable zone or not. Movability of any huge page should be 878 * required only if huge page size is supported for migration. 879 * There won't be any reason for the huge page to be movable if 880 * it is not migratable to start with. Also the size of the huge 881 * page should be large enough to be placed under a movable zone 882 * and still feasible enough to be migratable. Just the presence 883 * in movable zone does not make the migration feasible. 884 * 885 * So even though large huge page sizes like the gigantic ones 886 * are migratable they should not be movable because its not 887 * feasible to migrate them from movable zone. 888 */ 889 static inline bool hugepage_movable_supported(struct hstate *h) 890 { 891 if (!hugepage_migration_supported(h)) 892 return false; 893 894 if (hstate_is_gigantic(h)) 895 return false; 896 return true; 897 } 898 899 /* Movability of hugepages depends on migration support. */ 900 static inline gfp_t htlb_alloc_mask(struct hstate *h) 901 { 902 gfp_t gfp = __GFP_COMP | __GFP_NOWARN; 903 904 gfp |= hugepage_movable_supported(h) ? GFP_HIGHUSER_MOVABLE : GFP_HIGHUSER; 905 906 return gfp; 907 } 908 909 static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask) 910 { 911 gfp_t modified_mask = htlb_alloc_mask(h); 912 913 /* Some callers might want to enforce node */ 914 modified_mask |= (gfp_mask & __GFP_THISNODE); 915 916 modified_mask |= (gfp_mask & __GFP_NOWARN); 917 918 return modified_mask; 919 } 920 921 static inline bool htlb_allow_alloc_fallback(int reason) 922 { 923 bool allowed_fallback = false; 924 925 /* 926 * Note: the memory offline, memory failure and migration syscalls will 927 * be allowed to fallback to other nodes due to lack of a better chioce, 928 * that might break the per-node hugetlb pool. While other cases will 929 * set the __GFP_THISNODE to avoid breaking the per-node hugetlb pool. 930 */ 931 switch (reason) { 932 case MR_MEMORY_HOTPLUG: 933 case MR_MEMORY_FAILURE: 934 case MR_SYSCALL: 935 case MR_MEMPOLICY_MBIND: 936 allowed_fallback = true; 937 break; 938 default: 939 break; 940 } 941 942 return allowed_fallback; 943 } 944 945 static inline spinlock_t *huge_pte_lockptr(struct hstate *h, 946 struct mm_struct *mm, pte_t *pte) 947 { 948 const unsigned long size = huge_page_size(h); 949 950 VM_WARN_ON(size == PAGE_SIZE); 951 952 /* 953 * hugetlb must use the exact same PT locks as core-mm page table 954 * walkers would. When modifying a PTE table, hugetlb must take the 955 * PTE PT lock, when modifying a PMD table, hugetlb must take the PMD 956 * PT lock etc. 957 * 958 * The expectation is that any hugetlb folio smaller than a PMD is 959 * always mapped into a single PTE table and that any hugetlb folio 960 * smaller than a PUD (but at least as big as a PMD) is always mapped 961 * into a single PMD table. 962 * 963 * If that does not hold for an architecture, then that architecture 964 * must disable split PT locks such that all *_lockptr() functions 965 * will give us the same result: the per-MM PT lock. 966 * 967 * Note that with e.g., CONFIG_PGTABLE_LEVELS=2 where 968 * PGDIR_SIZE==P4D_SIZE==PUD_SIZE==PMD_SIZE, we'd use pud_lockptr() 969 * and core-mm would use pmd_lockptr(). However, in such configurations 970 * split PMD locks are disabled -- they don't make sense on a single 971 * PGDIR page table -- and the end result is the same. 972 */ 973 if (size >= PUD_SIZE) 974 return pud_lockptr(mm, (pud_t *) pte); 975 else if (size >= PMD_SIZE || IS_ENABLED(CONFIG_HIGHPTE)) 976 return pmd_lockptr(mm, (pmd_t *) pte); 977 /* pte_alloc_huge() only applies with !CONFIG_HIGHPTE */ 978 return ptep_lockptr(mm, pte); 979 } 980 981 #ifndef hugepages_supported 982 /* 983 * Some platform decide whether they support huge pages at boot 984 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0 985 * when there is no such support 986 */ 987 #define hugepages_supported() (HPAGE_SHIFT != 0) 988 #endif 989 990 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm); 991 992 static inline void hugetlb_count_init(struct mm_struct *mm) 993 { 994 atomic_long_set(&mm->hugetlb_usage, 0); 995 } 996 997 static inline void hugetlb_count_add(long l, struct mm_struct *mm) 998 { 999 atomic_long_add(l, &mm->hugetlb_usage); 1000 } 1001 1002 static inline void hugetlb_count_sub(long l, struct mm_struct *mm) 1003 { 1004 atomic_long_sub(l, &mm->hugetlb_usage); 1005 } 1006 1007 #ifndef huge_ptep_modify_prot_start 1008 #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start 1009 static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, 1010 unsigned long addr, pte_t *ptep) 1011 { 1012 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); 1013 } 1014 #endif 1015 1016 #ifndef huge_ptep_modify_prot_commit 1017 #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit 1018 static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, 1019 unsigned long addr, pte_t *ptep, 1020 pte_t old_pte, pte_t pte) 1021 { 1022 unsigned long psize = huge_page_size(hstate_vma(vma)); 1023 1024 set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize); 1025 } 1026 #endif 1027 1028 #ifdef CONFIG_NUMA 1029 void hugetlb_register_node(struct node *node); 1030 void hugetlb_unregister_node(struct node *node); 1031 #endif 1032 1033 /* 1034 * Check if a given raw @page in a hugepage is HWPOISON. 1035 */ 1036 bool is_raw_hwpoison_page_in_hugepage(struct page *page); 1037 1038 #else /* CONFIG_HUGETLB_PAGE */ 1039 struct hstate {}; 1040 1041 static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio) 1042 { 1043 return NULL; 1044 } 1045 1046 static inline struct folio *filemap_lock_hugetlb_folio(struct hstate *h, 1047 struct address_space *mapping, pgoff_t idx) 1048 { 1049 return NULL; 1050 } 1051 1052 static inline int isolate_or_dissolve_huge_page(struct page *page, 1053 struct list_head *list) 1054 { 1055 return -ENOMEM; 1056 } 1057 1058 static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, 1059 unsigned long addr, 1060 int avoid_reserve) 1061 { 1062 return NULL; 1063 } 1064 1065 static inline struct folio * 1066 alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid, 1067 nodemask_t *nmask, gfp_t gfp_mask) 1068 { 1069 return NULL; 1070 } 1071 1072 static inline struct folio * 1073 alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, 1074 nodemask_t *nmask, gfp_t gfp_mask, 1075 bool allow_alloc_fallback) 1076 { 1077 return NULL; 1078 } 1079 1080 static inline int __alloc_bootmem_huge_page(struct hstate *h) 1081 { 1082 return 0; 1083 } 1084 1085 static inline struct hstate *hstate_file(struct file *f) 1086 { 1087 return NULL; 1088 } 1089 1090 static inline struct hstate *hstate_sizelog(int page_size_log) 1091 { 1092 return NULL; 1093 } 1094 1095 static inline struct hstate *hstate_vma(struct vm_area_struct *vma) 1096 { 1097 return NULL; 1098 } 1099 1100 static inline struct hstate *folio_hstate(struct folio *folio) 1101 { 1102 return NULL; 1103 } 1104 1105 static inline struct hstate *size_to_hstate(unsigned long size) 1106 { 1107 return NULL; 1108 } 1109 1110 static inline unsigned long huge_page_size(struct hstate *h) 1111 { 1112 return PAGE_SIZE; 1113 } 1114 1115 static inline unsigned long huge_page_mask(struct hstate *h) 1116 { 1117 return PAGE_MASK; 1118 } 1119 1120 static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) 1121 { 1122 return PAGE_SIZE; 1123 } 1124 1125 static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) 1126 { 1127 return PAGE_SIZE; 1128 } 1129 1130 static inline unsigned int huge_page_order(struct hstate *h) 1131 { 1132 return 0; 1133 } 1134 1135 static inline unsigned int huge_page_shift(struct hstate *h) 1136 { 1137 return PAGE_SHIFT; 1138 } 1139 1140 static inline bool hstate_is_gigantic(struct hstate *h) 1141 { 1142 return false; 1143 } 1144 1145 static inline unsigned int pages_per_huge_page(struct hstate *h) 1146 { 1147 return 1; 1148 } 1149 1150 static inline unsigned hstate_index_to_shift(unsigned index) 1151 { 1152 return 0; 1153 } 1154 1155 static inline int hstate_index(struct hstate *h) 1156 { 1157 return 0; 1158 } 1159 1160 static inline int dissolve_free_hugetlb_folio(struct folio *folio) 1161 { 1162 return 0; 1163 } 1164 1165 static inline int dissolve_free_hugetlb_folios(unsigned long start_pfn, 1166 unsigned long end_pfn) 1167 { 1168 return 0; 1169 } 1170 1171 static inline bool hugepage_migration_supported(struct hstate *h) 1172 { 1173 return false; 1174 } 1175 1176 static inline bool hugepage_movable_supported(struct hstate *h) 1177 { 1178 return false; 1179 } 1180 1181 static inline gfp_t htlb_alloc_mask(struct hstate *h) 1182 { 1183 return 0; 1184 } 1185 1186 static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask) 1187 { 1188 return 0; 1189 } 1190 1191 static inline bool htlb_allow_alloc_fallback(int reason) 1192 { 1193 return false; 1194 } 1195 1196 static inline spinlock_t *huge_pte_lockptr(struct hstate *h, 1197 struct mm_struct *mm, pte_t *pte) 1198 { 1199 return &mm->page_table_lock; 1200 } 1201 1202 static inline void hugetlb_count_init(struct mm_struct *mm) 1203 { 1204 } 1205 1206 static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m) 1207 { 1208 } 1209 1210 static inline void hugetlb_count_sub(long l, struct mm_struct *mm) 1211 { 1212 } 1213 1214 static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, 1215 unsigned long addr, pte_t *ptep) 1216 { 1217 #ifdef CONFIG_MMU 1218 return ptep_get(ptep); 1219 #else 1220 return *ptep; 1221 #endif 1222 } 1223 1224 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 1225 pte_t *ptep, pte_t pte, unsigned long sz) 1226 { 1227 } 1228 1229 static inline void hugetlb_register_node(struct node *node) 1230 { 1231 } 1232 1233 static inline void hugetlb_unregister_node(struct node *node) 1234 { 1235 } 1236 1237 static inline bool hugetlbfs_pagecache_present( 1238 struct hstate *h, struct vm_area_struct *vma, unsigned long address) 1239 { 1240 return false; 1241 } 1242 #endif /* CONFIG_HUGETLB_PAGE */ 1243 1244 static inline spinlock_t *huge_pte_lock(struct hstate *h, 1245 struct mm_struct *mm, pte_t *pte) 1246 { 1247 spinlock_t *ptl; 1248 1249 ptl = huge_pte_lockptr(h, mm, pte); 1250 spin_lock(ptl); 1251 return ptl; 1252 } 1253 1254 #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA) 1255 extern void __init hugetlb_cma_reserve(int order); 1256 #else 1257 static inline __init void hugetlb_cma_reserve(int order) 1258 { 1259 } 1260 #endif 1261 1262 #ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING 1263 static inline bool hugetlb_pmd_shared(pte_t *pte) 1264 { 1265 return page_count(virt_to_page(pte)) > 1; 1266 } 1267 #else 1268 static inline bool hugetlb_pmd_shared(pte_t *pte) 1269 { 1270 return false; 1271 } 1272 #endif 1273 1274 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr); 1275 1276 #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE 1277 /* 1278 * ARCHes with special requirements for evicting HUGETLB backing TLB entries can 1279 * implement this. 1280 */ 1281 #define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) 1282 #endif 1283 1284 static inline bool __vma_shareable_lock(struct vm_area_struct *vma) 1285 { 1286 return (vma->vm_flags & VM_MAYSHARE) && vma->vm_private_data; 1287 } 1288 1289 bool __vma_private_lock(struct vm_area_struct *vma); 1290 1291 /* 1292 * Safe version of huge_pte_offset() to check the locks. See comments 1293 * above huge_pte_offset(). 1294 */ 1295 static inline pte_t * 1296 hugetlb_walk(struct vm_area_struct *vma, unsigned long addr, unsigned long sz) 1297 { 1298 #if defined(CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING) && defined(CONFIG_LOCKDEP) 1299 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 1300 1301 /* 1302 * If pmd sharing possible, locking needed to safely walk the 1303 * hugetlb pgtables. More information can be found at the comment 1304 * above huge_pte_offset() in the same file. 1305 * 1306 * NOTE: lockdep_is_held() is only defined with CONFIG_LOCKDEP. 1307 */ 1308 if (__vma_shareable_lock(vma)) 1309 WARN_ON_ONCE(!lockdep_is_held(&vma_lock->rw_sema) && 1310 !lockdep_is_held( 1311 &vma->vm_file->f_mapping->i_mmap_rwsem)); 1312 #endif 1313 return huge_pte_offset(vma->vm_mm, addr, sz); 1314 } 1315 1316 #endif /* _LINUX_HUGETLB_H */ 1317