1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_HUGETLB_H 3 #define _LINUX_HUGETLB_H 4 5 #include <linux/mm_types.h> 6 #include <linux/mmdebug.h> 7 #include <linux/fs.h> 8 #include <linux/hugetlb_inline.h> 9 #include <linux/cgroup.h> 10 #include <linux/list.h> 11 #include <linux/kref.h> 12 #include <linux/pgtable.h> 13 14 struct ctl_table; 15 struct user_struct; 16 struct mmu_gather; 17 18 #ifndef is_hugepd 19 typedef struct { unsigned long pd; } hugepd_t; 20 #define is_hugepd(hugepd) (0) 21 #define __hugepd(x) ((hugepd_t) { (x) }) 22 #endif 23 24 #ifdef CONFIG_HUGETLB_PAGE 25 26 #include <linux/mempolicy.h> 27 #include <linux/shm.h> 28 #include <asm/tlbflush.h> 29 30 struct hugepage_subpool { 31 spinlock_t lock; 32 long count; 33 long max_hpages; /* Maximum huge pages or -1 if no maximum. */ 34 long used_hpages; /* Used count against maximum, includes */ 35 /* both alloced and reserved pages. */ 36 struct hstate *hstate; 37 long min_hpages; /* Minimum huge pages or -1 if no minimum. */ 38 long rsv_hpages; /* Pages reserved against global pool to */ 39 /* sasitfy minimum size. */ 40 }; 41 42 struct resv_map { 43 struct kref refs; 44 spinlock_t lock; 45 struct list_head regions; 46 long adds_in_progress; 47 struct list_head region_cache; 48 long region_cache_count; 49 #ifdef CONFIG_CGROUP_HUGETLB 50 /* 51 * On private mappings, the counter to uncharge reservations is stored 52 * here. If these fields are 0, then either the mapping is shared, or 53 * cgroup accounting is disabled for this resv_map. 54 */ 55 struct page_counter *reservation_counter; 56 unsigned long pages_per_hpage; 57 struct cgroup_subsys_state *css; 58 #endif 59 }; 60 61 /* 62 * Region tracking -- allows tracking of reservations and instantiated pages 63 * across the pages in a mapping. 64 * 65 * The region data structures are embedded into a resv_map and protected 66 * by a resv_map's lock. The set of regions within the resv_map represent 67 * reservations for huge pages, or huge pages that have already been 68 * instantiated within the map. The from and to elements are huge page 69 * indicies into the associated mapping. from indicates the starting index 70 * of the region. to represents the first index past the end of the region. 71 * 72 * For example, a file region structure with from == 0 and to == 4 represents 73 * four huge pages in a mapping. It is important to note that the to element 74 * represents the first element past the end of the region. This is used in 75 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region. 76 * 77 * Interval notation of the form [from, to) will be used to indicate that 78 * the endpoint from is inclusive and to is exclusive. 79 */ 80 struct file_region { 81 struct list_head link; 82 long from; 83 long to; 84 #ifdef CONFIG_CGROUP_HUGETLB 85 /* 86 * On shared mappings, each reserved region appears as a struct 87 * file_region in resv_map. These fields hold the info needed to 88 * uncharge each reservation. 89 */ 90 struct page_counter *reservation_counter; 91 struct cgroup_subsys_state *css; 92 #endif 93 }; 94 95 extern struct resv_map *resv_map_alloc(void); 96 void resv_map_release(struct kref *ref); 97 98 extern spinlock_t hugetlb_lock; 99 extern int hugetlb_max_hstate __read_mostly; 100 #define for_each_hstate(h) \ 101 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++) 102 103 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, 104 long min_hpages); 105 void hugepage_put_subpool(struct hugepage_subpool *spool); 106 107 void reset_vma_resv_huge_pages(struct vm_area_struct *vma); 108 int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *); 109 int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *, 110 loff_t *); 111 int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *, 112 loff_t *); 113 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *, 114 loff_t *); 115 116 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); 117 long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, 118 struct page **, struct vm_area_struct **, 119 unsigned long *, unsigned long *, long, unsigned int, 120 int *); 121 void unmap_hugepage_range(struct vm_area_struct *, 122 unsigned long, unsigned long, struct page *); 123 void __unmap_hugepage_range_final(struct mmu_gather *tlb, 124 struct vm_area_struct *vma, 125 unsigned long start, unsigned long end, 126 struct page *ref_page); 127 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, 128 unsigned long start, unsigned long end, 129 struct page *ref_page); 130 void hugetlb_report_meminfo(struct seq_file *); 131 int hugetlb_report_node_meminfo(int, char *); 132 void hugetlb_show_meminfo(void); 133 unsigned long hugetlb_total_pages(void); 134 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 135 unsigned long address, unsigned int flags); 136 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte, 137 struct vm_area_struct *dst_vma, 138 unsigned long dst_addr, 139 unsigned long src_addr, 140 struct page **pagep); 141 int hugetlb_reserve_pages(struct inode *inode, long from, long to, 142 struct vm_area_struct *vma, 143 vm_flags_t vm_flags); 144 long hugetlb_unreserve_pages(struct inode *inode, long start, long end, 145 long freed); 146 bool isolate_huge_page(struct page *page, struct list_head *list); 147 void putback_active_hugepage(struct page *page); 148 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason); 149 void free_huge_page(struct page *page); 150 void hugetlb_fix_reserve_counts(struct inode *inode); 151 extern struct mutex *hugetlb_fault_mutex_table; 152 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx); 153 154 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); 155 156 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage); 157 158 extern int sysctl_hugetlb_shm_group; 159 extern struct list_head huge_boot_pages; 160 161 /* arch callbacks */ 162 163 pte_t *huge_pte_alloc(struct mm_struct *mm, 164 unsigned long addr, unsigned long sz); 165 pte_t *huge_pte_offset(struct mm_struct *mm, 166 unsigned long addr, unsigned long sz); 167 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep); 168 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, 169 unsigned long *start, unsigned long *end); 170 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, 171 int write); 172 struct page *follow_huge_pd(struct vm_area_struct *vma, 173 unsigned long address, hugepd_t hpd, 174 int flags, int pdshift); 175 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, 176 pmd_t *pmd, int flags); 177 struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address, 178 pud_t *pud, int flags); 179 struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address, 180 pgd_t *pgd, int flags); 181 182 int pmd_huge(pmd_t pmd); 183 int pud_huge(pud_t pud); 184 unsigned long hugetlb_change_protection(struct vm_area_struct *vma, 185 unsigned long address, unsigned long end, pgprot_t newprot); 186 187 bool is_hugetlb_entry_migration(pte_t pte); 188 189 #else /* !CONFIG_HUGETLB_PAGE */ 190 191 static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma) 192 { 193 } 194 195 static inline unsigned long hugetlb_total_pages(void) 196 { 197 return 0; 198 } 199 200 static inline struct address_space *hugetlb_page_mapping_lock_write( 201 struct page *hpage) 202 { 203 return NULL; 204 } 205 206 static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, 207 pte_t *ptep) 208 { 209 return 0; 210 } 211 212 static inline void adjust_range_if_pmd_sharing_possible( 213 struct vm_area_struct *vma, 214 unsigned long *start, unsigned long *end) 215 { 216 } 217 218 static inline long follow_hugetlb_page(struct mm_struct *mm, 219 struct vm_area_struct *vma, struct page **pages, 220 struct vm_area_struct **vmas, unsigned long *position, 221 unsigned long *nr_pages, long i, unsigned int flags, 222 int *nonblocking) 223 { 224 BUG(); 225 return 0; 226 } 227 228 static inline struct page *follow_huge_addr(struct mm_struct *mm, 229 unsigned long address, int write) 230 { 231 return ERR_PTR(-EINVAL); 232 } 233 234 static inline int copy_hugetlb_page_range(struct mm_struct *dst, 235 struct mm_struct *src, struct vm_area_struct *vma) 236 { 237 BUG(); 238 return 0; 239 } 240 241 static inline void hugetlb_report_meminfo(struct seq_file *m) 242 { 243 } 244 245 static inline int hugetlb_report_node_meminfo(int nid, char *buf) 246 { 247 return 0; 248 } 249 250 static inline void hugetlb_show_meminfo(void) 251 { 252 } 253 254 static inline struct page *follow_huge_pd(struct vm_area_struct *vma, 255 unsigned long address, hugepd_t hpd, int flags, 256 int pdshift) 257 { 258 return NULL; 259 } 260 261 static inline struct page *follow_huge_pmd(struct mm_struct *mm, 262 unsigned long address, pmd_t *pmd, int flags) 263 { 264 return NULL; 265 } 266 267 static inline struct page *follow_huge_pud(struct mm_struct *mm, 268 unsigned long address, pud_t *pud, int flags) 269 { 270 return NULL; 271 } 272 273 static inline struct page *follow_huge_pgd(struct mm_struct *mm, 274 unsigned long address, pgd_t *pgd, int flags) 275 { 276 return NULL; 277 } 278 279 static inline int prepare_hugepage_range(struct file *file, 280 unsigned long addr, unsigned long len) 281 { 282 return -EINVAL; 283 } 284 285 static inline int pmd_huge(pmd_t pmd) 286 { 287 return 0; 288 } 289 290 static inline int pud_huge(pud_t pud) 291 { 292 return 0; 293 } 294 295 static inline int is_hugepage_only_range(struct mm_struct *mm, 296 unsigned long addr, unsigned long len) 297 { 298 return 0; 299 } 300 301 static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, 302 unsigned long addr, unsigned long end, 303 unsigned long floor, unsigned long ceiling) 304 { 305 BUG(); 306 } 307 308 static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, 309 pte_t *dst_pte, 310 struct vm_area_struct *dst_vma, 311 unsigned long dst_addr, 312 unsigned long src_addr, 313 struct page **pagep) 314 { 315 BUG(); 316 return 0; 317 } 318 319 static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, 320 unsigned long sz) 321 { 322 return NULL; 323 } 324 325 static inline bool isolate_huge_page(struct page *page, struct list_head *list) 326 { 327 return false; 328 } 329 330 static inline void putback_active_hugepage(struct page *page) 331 { 332 } 333 334 static inline void move_hugetlb_state(struct page *oldpage, 335 struct page *newpage, int reason) 336 { 337 } 338 339 static inline unsigned long hugetlb_change_protection( 340 struct vm_area_struct *vma, unsigned long address, 341 unsigned long end, pgprot_t newprot) 342 { 343 return 0; 344 } 345 346 static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb, 347 struct vm_area_struct *vma, unsigned long start, 348 unsigned long end, struct page *ref_page) 349 { 350 BUG(); 351 } 352 353 static inline void __unmap_hugepage_range(struct mmu_gather *tlb, 354 struct vm_area_struct *vma, unsigned long start, 355 unsigned long end, struct page *ref_page) 356 { 357 BUG(); 358 } 359 360 static inline vm_fault_t hugetlb_fault(struct mm_struct *mm, 361 struct vm_area_struct *vma, unsigned long address, 362 unsigned int flags) 363 { 364 BUG(); 365 return 0; 366 } 367 368 #endif /* !CONFIG_HUGETLB_PAGE */ 369 /* 370 * hugepages at page global directory. If arch support 371 * hugepages at pgd level, they need to define this. 372 */ 373 #ifndef pgd_huge 374 #define pgd_huge(x) 0 375 #endif 376 #ifndef p4d_huge 377 #define p4d_huge(x) 0 378 #endif 379 380 #ifndef pgd_write 381 static inline int pgd_write(pgd_t pgd) 382 { 383 BUG(); 384 return 0; 385 } 386 #endif 387 388 #define HUGETLB_ANON_FILE "anon_hugepage" 389 390 enum { 391 /* 392 * The file will be used as an shm file so shmfs accounting rules 393 * apply 394 */ 395 HUGETLB_SHMFS_INODE = 1, 396 /* 397 * The file is being created on the internal vfs mount and shmfs 398 * accounting rules do not apply 399 */ 400 HUGETLB_ANONHUGE_INODE = 2, 401 }; 402 403 #ifdef CONFIG_HUGETLBFS 404 struct hugetlbfs_sb_info { 405 long max_inodes; /* inodes allowed */ 406 long free_inodes; /* inodes free */ 407 spinlock_t stat_lock; 408 struct hstate *hstate; 409 struct hugepage_subpool *spool; 410 kuid_t uid; 411 kgid_t gid; 412 umode_t mode; 413 }; 414 415 static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) 416 { 417 return sb->s_fs_info; 418 } 419 420 struct hugetlbfs_inode_info { 421 struct shared_policy policy; 422 struct inode vfs_inode; 423 unsigned int seals; 424 }; 425 426 static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode) 427 { 428 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode); 429 } 430 431 extern const struct file_operations hugetlbfs_file_operations; 432 extern const struct vm_operations_struct hugetlb_vm_ops; 433 struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct, 434 struct user_struct **user, int creat_flags, 435 int page_size_log); 436 437 static inline bool is_file_hugepages(struct file *file) 438 { 439 if (file->f_op == &hugetlbfs_file_operations) 440 return true; 441 442 return is_file_shm_hugepages(file); 443 } 444 445 static inline struct hstate *hstate_inode(struct inode *i) 446 { 447 return HUGETLBFS_SB(i->i_sb)->hstate; 448 } 449 #else /* !CONFIG_HUGETLBFS */ 450 451 #define is_file_hugepages(file) false 452 static inline struct file * 453 hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag, 454 struct user_struct **user, int creat_flags, 455 int page_size_log) 456 { 457 return ERR_PTR(-ENOSYS); 458 } 459 460 static inline struct hstate *hstate_inode(struct inode *i) 461 { 462 return NULL; 463 } 464 #endif /* !CONFIG_HUGETLBFS */ 465 466 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA 467 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 468 unsigned long len, unsigned long pgoff, 469 unsigned long flags); 470 #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */ 471 472 #ifdef CONFIG_HUGETLB_PAGE 473 474 #define HSTATE_NAME_LEN 32 475 /* Defines one hugetlb page size */ 476 struct hstate { 477 int next_nid_to_alloc; 478 int next_nid_to_free; 479 unsigned int order; 480 unsigned long mask; 481 unsigned long max_huge_pages; 482 unsigned long nr_huge_pages; 483 unsigned long free_huge_pages; 484 unsigned long resv_huge_pages; 485 unsigned long surplus_huge_pages; 486 unsigned long nr_overcommit_huge_pages; 487 struct list_head hugepage_activelist; 488 struct list_head hugepage_freelists[MAX_NUMNODES]; 489 unsigned int nr_huge_pages_node[MAX_NUMNODES]; 490 unsigned int free_huge_pages_node[MAX_NUMNODES]; 491 unsigned int surplus_huge_pages_node[MAX_NUMNODES]; 492 #ifdef CONFIG_CGROUP_HUGETLB 493 /* cgroup control files */ 494 struct cftype cgroup_files_dfl[7]; 495 struct cftype cgroup_files_legacy[9]; 496 #endif 497 char name[HSTATE_NAME_LEN]; 498 }; 499 500 struct huge_bootmem_page { 501 struct list_head list; 502 struct hstate *hstate; 503 }; 504 505 struct page *alloc_huge_page(struct vm_area_struct *vma, 506 unsigned long addr, int avoid_reserve); 507 struct page *alloc_huge_page_node(struct hstate *h, int nid); 508 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, 509 nodemask_t *nmask); 510 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, 511 unsigned long address); 512 struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask, 513 int nid, nodemask_t *nmask); 514 int huge_add_to_page_cache(struct page *page, struct address_space *mapping, 515 pgoff_t idx); 516 517 /* arch callback */ 518 int __init __alloc_bootmem_huge_page(struct hstate *h); 519 int __init alloc_bootmem_huge_page(struct hstate *h); 520 521 void __init hugetlb_add_hstate(unsigned order); 522 bool __init arch_hugetlb_valid_size(unsigned long size); 523 struct hstate *size_to_hstate(unsigned long size); 524 525 #ifndef HUGE_MAX_HSTATE 526 #define HUGE_MAX_HSTATE 1 527 #endif 528 529 extern struct hstate hstates[HUGE_MAX_HSTATE]; 530 extern unsigned int default_hstate_idx; 531 532 #define default_hstate (hstates[default_hstate_idx]) 533 534 static inline struct hstate *hstate_file(struct file *f) 535 { 536 return hstate_inode(file_inode(f)); 537 } 538 539 static inline struct hstate *hstate_sizelog(int page_size_log) 540 { 541 if (!page_size_log) 542 return &default_hstate; 543 544 return size_to_hstate(1UL << page_size_log); 545 } 546 547 static inline struct hstate *hstate_vma(struct vm_area_struct *vma) 548 { 549 return hstate_file(vma->vm_file); 550 } 551 552 static inline unsigned long huge_page_size(struct hstate *h) 553 { 554 return (unsigned long)PAGE_SIZE << h->order; 555 } 556 557 extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma); 558 559 extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma); 560 561 static inline unsigned long huge_page_mask(struct hstate *h) 562 { 563 return h->mask; 564 } 565 566 static inline unsigned int huge_page_order(struct hstate *h) 567 { 568 return h->order; 569 } 570 571 static inline unsigned huge_page_shift(struct hstate *h) 572 { 573 return h->order + PAGE_SHIFT; 574 } 575 576 static inline bool hstate_is_gigantic(struct hstate *h) 577 { 578 return huge_page_order(h) >= MAX_ORDER; 579 } 580 581 static inline unsigned int pages_per_huge_page(struct hstate *h) 582 { 583 return 1 << h->order; 584 } 585 586 static inline unsigned int blocks_per_huge_page(struct hstate *h) 587 { 588 return huge_page_size(h) / 512; 589 } 590 591 #include <asm/hugetlb.h> 592 593 #ifndef is_hugepage_only_range 594 static inline int is_hugepage_only_range(struct mm_struct *mm, 595 unsigned long addr, unsigned long len) 596 { 597 return 0; 598 } 599 #define is_hugepage_only_range is_hugepage_only_range 600 #endif 601 602 #ifndef arch_clear_hugepage_flags 603 static inline void arch_clear_hugepage_flags(struct page *page) { } 604 #define arch_clear_hugepage_flags arch_clear_hugepage_flags 605 #endif 606 607 #ifndef arch_make_huge_pte 608 static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, 609 struct page *page, int writable) 610 { 611 return entry; 612 } 613 #endif 614 615 static inline struct hstate *page_hstate(struct page *page) 616 { 617 VM_BUG_ON_PAGE(!PageHuge(page), page); 618 return size_to_hstate(page_size(page)); 619 } 620 621 static inline unsigned hstate_index_to_shift(unsigned index) 622 { 623 return hstates[index].order + PAGE_SHIFT; 624 } 625 626 static inline int hstate_index(struct hstate *h) 627 { 628 return h - hstates; 629 } 630 631 pgoff_t __basepage_index(struct page *page); 632 633 /* Return page->index in PAGE_SIZE units */ 634 static inline pgoff_t basepage_index(struct page *page) 635 { 636 if (!PageCompound(page)) 637 return page->index; 638 639 return __basepage_index(page); 640 } 641 642 extern int dissolve_free_huge_page(struct page *page); 643 extern int dissolve_free_huge_pages(unsigned long start_pfn, 644 unsigned long end_pfn); 645 646 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION 647 #ifndef arch_hugetlb_migration_supported 648 static inline bool arch_hugetlb_migration_supported(struct hstate *h) 649 { 650 if ((huge_page_shift(h) == PMD_SHIFT) || 651 (huge_page_shift(h) == PUD_SHIFT) || 652 (huge_page_shift(h) == PGDIR_SHIFT)) 653 return true; 654 else 655 return false; 656 } 657 #endif 658 #else 659 static inline bool arch_hugetlb_migration_supported(struct hstate *h) 660 { 661 return false; 662 } 663 #endif 664 665 static inline bool hugepage_migration_supported(struct hstate *h) 666 { 667 return arch_hugetlb_migration_supported(h); 668 } 669 670 /* 671 * Movability check is different as compared to migration check. 672 * It determines whether or not a huge page should be placed on 673 * movable zone or not. Movability of any huge page should be 674 * required only if huge page size is supported for migration. 675 * There wont be any reason for the huge page to be movable if 676 * it is not migratable to start with. Also the size of the huge 677 * page should be large enough to be placed under a movable zone 678 * and still feasible enough to be migratable. Just the presence 679 * in movable zone does not make the migration feasible. 680 * 681 * So even though large huge page sizes like the gigantic ones 682 * are migratable they should not be movable because its not 683 * feasible to migrate them from movable zone. 684 */ 685 static inline bool hugepage_movable_supported(struct hstate *h) 686 { 687 if (!hugepage_migration_supported(h)) 688 return false; 689 690 if (hstate_is_gigantic(h)) 691 return false; 692 return true; 693 } 694 695 static inline spinlock_t *huge_pte_lockptr(struct hstate *h, 696 struct mm_struct *mm, pte_t *pte) 697 { 698 if (huge_page_size(h) == PMD_SIZE) 699 return pmd_lockptr(mm, (pmd_t *) pte); 700 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE); 701 return &mm->page_table_lock; 702 } 703 704 #ifndef hugepages_supported 705 /* 706 * Some platform decide whether they support huge pages at boot 707 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0 708 * when there is no such support 709 */ 710 #define hugepages_supported() (HPAGE_SHIFT != 0) 711 #endif 712 713 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm); 714 715 static inline void hugetlb_count_add(long l, struct mm_struct *mm) 716 { 717 atomic_long_add(l, &mm->hugetlb_usage); 718 } 719 720 static inline void hugetlb_count_sub(long l, struct mm_struct *mm) 721 { 722 atomic_long_sub(l, &mm->hugetlb_usage); 723 } 724 725 #ifndef set_huge_swap_pte_at 726 static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, 727 pte_t *ptep, pte_t pte, unsigned long sz) 728 { 729 set_huge_pte_at(mm, addr, ptep, pte); 730 } 731 #endif 732 733 #ifndef huge_ptep_modify_prot_start 734 #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start 735 static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, 736 unsigned long addr, pte_t *ptep) 737 { 738 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); 739 } 740 #endif 741 742 #ifndef huge_ptep_modify_prot_commit 743 #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit 744 static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, 745 unsigned long addr, pte_t *ptep, 746 pte_t old_pte, pte_t pte) 747 { 748 set_huge_pte_at(vma->vm_mm, addr, ptep, pte); 749 } 750 #endif 751 752 #else /* CONFIG_HUGETLB_PAGE */ 753 struct hstate {}; 754 755 static inline struct page *alloc_huge_page(struct vm_area_struct *vma, 756 unsigned long addr, 757 int avoid_reserve) 758 { 759 return NULL; 760 } 761 762 static inline struct page *alloc_huge_page_node(struct hstate *h, int nid) 763 { 764 return NULL; 765 } 766 767 static inline struct page * 768 alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask) 769 { 770 return NULL; 771 } 772 773 static inline struct page *alloc_huge_page_vma(struct hstate *h, 774 struct vm_area_struct *vma, 775 unsigned long address) 776 { 777 return NULL; 778 } 779 780 static inline int __alloc_bootmem_huge_page(struct hstate *h) 781 { 782 return 0; 783 } 784 785 static inline struct hstate *hstate_file(struct file *f) 786 { 787 return NULL; 788 } 789 790 static inline struct hstate *hstate_sizelog(int page_size_log) 791 { 792 return NULL; 793 } 794 795 static inline struct hstate *hstate_vma(struct vm_area_struct *vma) 796 { 797 return NULL; 798 } 799 800 static inline struct hstate *page_hstate(struct page *page) 801 { 802 return NULL; 803 } 804 805 static inline unsigned long huge_page_size(struct hstate *h) 806 { 807 return PAGE_SIZE; 808 } 809 810 static inline unsigned long huge_page_mask(struct hstate *h) 811 { 812 return PAGE_MASK; 813 } 814 815 static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) 816 { 817 return PAGE_SIZE; 818 } 819 820 static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) 821 { 822 return PAGE_SIZE; 823 } 824 825 static inline unsigned int huge_page_order(struct hstate *h) 826 { 827 return 0; 828 } 829 830 static inline unsigned int huge_page_shift(struct hstate *h) 831 { 832 return PAGE_SHIFT; 833 } 834 835 static inline bool hstate_is_gigantic(struct hstate *h) 836 { 837 return false; 838 } 839 840 static inline unsigned int pages_per_huge_page(struct hstate *h) 841 { 842 return 1; 843 } 844 845 static inline unsigned hstate_index_to_shift(unsigned index) 846 { 847 return 0; 848 } 849 850 static inline int hstate_index(struct hstate *h) 851 { 852 return 0; 853 } 854 855 static inline pgoff_t basepage_index(struct page *page) 856 { 857 return page->index; 858 } 859 860 static inline int dissolve_free_huge_page(struct page *page) 861 { 862 return 0; 863 } 864 865 static inline int dissolve_free_huge_pages(unsigned long start_pfn, 866 unsigned long end_pfn) 867 { 868 return 0; 869 } 870 871 static inline bool hugepage_migration_supported(struct hstate *h) 872 { 873 return false; 874 } 875 876 static inline bool hugepage_movable_supported(struct hstate *h) 877 { 878 return false; 879 } 880 881 static inline spinlock_t *huge_pte_lockptr(struct hstate *h, 882 struct mm_struct *mm, pte_t *pte) 883 { 884 return &mm->page_table_lock; 885 } 886 887 static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m) 888 { 889 } 890 891 static inline void hugetlb_count_sub(long l, struct mm_struct *mm) 892 { 893 } 894 895 static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, 896 pte_t *ptep, pte_t pte, unsigned long sz) 897 { 898 } 899 #endif /* CONFIG_HUGETLB_PAGE */ 900 901 static inline spinlock_t *huge_pte_lock(struct hstate *h, 902 struct mm_struct *mm, pte_t *pte) 903 { 904 spinlock_t *ptl; 905 906 ptl = huge_pte_lockptr(h, mm, pte); 907 spin_lock(ptl); 908 return ptl; 909 } 910 911 #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA) 912 extern void __init hugetlb_cma_reserve(int order); 913 extern void __init hugetlb_cma_check(void); 914 #else 915 static inline __init void hugetlb_cma_reserve(int order) 916 { 917 } 918 static inline __init void hugetlb_cma_check(void) 919 { 920 } 921 #endif 922 923 #endif /* _LINUX_HUGETLB_H */ 924