1 #ifndef _LINUX_HUGETLB_H 2 #define _LINUX_HUGETLB_H 3 4 #include <linux/mm_types.h> 5 #include <linux/mmdebug.h> 6 #include <linux/fs.h> 7 #include <linux/hugetlb_inline.h> 8 #include <linux/cgroup.h> 9 #include <linux/list.h> 10 #include <linux/kref.h> 11 #include <asm/pgtable.h> 12 13 struct ctl_table; 14 struct user_struct; 15 struct mmu_gather; 16 17 #ifndef is_hugepd 18 /* 19 * Some architectures requires a hugepage directory format that is 20 * required to support multiple hugepage sizes. For example 21 * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables" 22 * introduced the same on powerpc. This allows for a more flexible hugepage 23 * pagetable layout. 24 */ 25 typedef struct { unsigned long pd; } hugepd_t; 26 #define is_hugepd(hugepd) (0) 27 #define __hugepd(x) ((hugepd_t) { (x) }) 28 static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr, 29 unsigned pdshift, unsigned long end, 30 int write, struct page **pages, int *nr) 31 { 32 return 0; 33 } 34 #else 35 extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr, 36 unsigned pdshift, unsigned long end, 37 int write, struct page **pages, int *nr); 38 #endif 39 40 41 #ifdef CONFIG_HUGETLB_PAGE 42 43 #include <linux/mempolicy.h> 44 #include <linux/shm.h> 45 #include <asm/tlbflush.h> 46 47 struct hugepage_subpool { 48 spinlock_t lock; 49 long count; 50 long max_hpages; /* Maximum huge pages or -1 if no maximum. */ 51 long used_hpages; /* Used count against maximum, includes */ 52 /* both alloced and reserved pages. */ 53 struct hstate *hstate; 54 long min_hpages; /* Minimum huge pages or -1 if no minimum. */ 55 long rsv_hpages; /* Pages reserved against global pool to */ 56 /* sasitfy minimum size. */ 57 }; 58 59 struct resv_map { 60 struct kref refs; 61 spinlock_t lock; 62 struct list_head regions; 63 long adds_in_progress; 64 struct list_head region_cache; 65 long region_cache_count; 66 }; 67 extern struct resv_map *resv_map_alloc(void); 68 void resv_map_release(struct kref *ref); 69 70 extern spinlock_t hugetlb_lock; 71 extern int hugetlb_max_hstate __read_mostly; 72 #define for_each_hstate(h) \ 73 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++) 74 75 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, 76 long min_hpages); 77 void hugepage_put_subpool(struct hugepage_subpool *spool); 78 79 void reset_vma_resv_huge_pages(struct vm_area_struct *vma); 80 int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); 81 int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); 82 int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); 83 84 #ifdef CONFIG_NUMA 85 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, 86 void __user *, size_t *, loff_t *); 87 #endif 88 89 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); 90 long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, 91 struct page **, struct vm_area_struct **, 92 unsigned long *, unsigned long *, long, unsigned int, 93 int *); 94 void unmap_hugepage_range(struct vm_area_struct *, 95 unsigned long, unsigned long, struct page *); 96 void __unmap_hugepage_range_final(struct mmu_gather *tlb, 97 struct vm_area_struct *vma, 98 unsigned long start, unsigned long end, 99 struct page *ref_page); 100 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, 101 unsigned long start, unsigned long end, 102 struct page *ref_page); 103 void hugetlb_report_meminfo(struct seq_file *); 104 int hugetlb_report_node_meminfo(int, char *); 105 void hugetlb_show_meminfo(void); 106 unsigned long hugetlb_total_pages(void); 107 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 108 unsigned long address, unsigned int flags); 109 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte, 110 struct vm_area_struct *dst_vma, 111 unsigned long dst_addr, 112 unsigned long src_addr, 113 struct page **pagep); 114 int hugetlb_reserve_pages(struct inode *inode, long from, long to, 115 struct vm_area_struct *vma, 116 vm_flags_t vm_flags); 117 long hugetlb_unreserve_pages(struct inode *inode, long start, long end, 118 long freed); 119 bool isolate_huge_page(struct page *page, struct list_head *list); 120 void putback_active_hugepage(struct page *page); 121 void free_huge_page(struct page *page); 122 void hugetlb_fix_reserve_counts(struct inode *inode); 123 extern struct mutex *hugetlb_fault_mutex_table; 124 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, 125 struct vm_area_struct *vma, 126 struct address_space *mapping, 127 pgoff_t idx, unsigned long address); 128 129 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); 130 131 extern int hugepages_treat_as_movable; 132 extern int sysctl_hugetlb_shm_group; 133 extern struct list_head huge_boot_pages; 134 135 /* arch callbacks */ 136 137 pte_t *huge_pte_alloc(struct mm_struct *mm, 138 unsigned long addr, unsigned long sz); 139 pte_t *huge_pte_offset(struct mm_struct *mm, 140 unsigned long addr, unsigned long sz); 141 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep); 142 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, 143 int write); 144 struct page *follow_huge_pd(struct vm_area_struct *vma, 145 unsigned long address, hugepd_t hpd, 146 int flags, int pdshift); 147 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, 148 pmd_t *pmd, int flags); 149 struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address, 150 pud_t *pud, int flags); 151 struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address, 152 pgd_t *pgd, int flags); 153 154 int pmd_huge(pmd_t pmd); 155 int pud_huge(pud_t pud); 156 unsigned long hugetlb_change_protection(struct vm_area_struct *vma, 157 unsigned long address, unsigned long end, pgprot_t newprot); 158 159 bool is_hugetlb_entry_migration(pte_t pte); 160 #else /* !CONFIG_HUGETLB_PAGE */ 161 162 static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma) 163 { 164 } 165 166 static inline unsigned long hugetlb_total_pages(void) 167 { 168 return 0; 169 } 170 171 #define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; }) 172 #define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL) 173 #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; }) 174 static inline void hugetlb_report_meminfo(struct seq_file *m) 175 { 176 } 177 #define hugetlb_report_node_meminfo(n, buf) 0 178 static inline void hugetlb_show_meminfo(void) 179 { 180 } 181 #define follow_huge_pd(vma, addr, hpd, flags, pdshift) NULL 182 #define follow_huge_pmd(mm, addr, pmd, flags) NULL 183 #define follow_huge_pud(mm, addr, pud, flags) NULL 184 #define follow_huge_pgd(mm, addr, pgd, flags) NULL 185 #define prepare_hugepage_range(file, addr, len) (-EINVAL) 186 #define pmd_huge(x) 0 187 #define pud_huge(x) 0 188 #define is_hugepage_only_range(mm, addr, len) 0 189 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; }) 190 #define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; }) 191 #define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \ 192 src_addr, pagep) ({ BUG(); 0; }) 193 #define huge_pte_offset(mm, address, sz) 0 194 195 static inline bool isolate_huge_page(struct page *page, struct list_head *list) 196 { 197 return false; 198 } 199 #define putback_active_hugepage(p) do {} while (0) 200 201 static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma, 202 unsigned long address, unsigned long end, pgprot_t newprot) 203 { 204 return 0; 205 } 206 207 static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb, 208 struct vm_area_struct *vma, unsigned long start, 209 unsigned long end, struct page *ref_page) 210 { 211 BUG(); 212 } 213 214 static inline void __unmap_hugepage_range(struct mmu_gather *tlb, 215 struct vm_area_struct *vma, unsigned long start, 216 unsigned long end, struct page *ref_page) 217 { 218 BUG(); 219 } 220 221 #endif /* !CONFIG_HUGETLB_PAGE */ 222 /* 223 * hugepages at page global directory. If arch support 224 * hugepages at pgd level, they need to define this. 225 */ 226 #ifndef pgd_huge 227 #define pgd_huge(x) 0 228 #endif 229 #ifndef p4d_huge 230 #define p4d_huge(x) 0 231 #endif 232 233 #ifndef pgd_write 234 static inline int pgd_write(pgd_t pgd) 235 { 236 BUG(); 237 return 0; 238 } 239 #endif 240 241 #ifndef pud_write 242 static inline int pud_write(pud_t pud) 243 { 244 BUG(); 245 return 0; 246 } 247 #endif 248 249 #define HUGETLB_ANON_FILE "anon_hugepage" 250 251 enum { 252 /* 253 * The file will be used as an shm file so shmfs accounting rules 254 * apply 255 */ 256 HUGETLB_SHMFS_INODE = 1, 257 /* 258 * The file is being created on the internal vfs mount and shmfs 259 * accounting rules do not apply 260 */ 261 HUGETLB_ANONHUGE_INODE = 2, 262 }; 263 264 #ifdef CONFIG_HUGETLBFS 265 struct hugetlbfs_sb_info { 266 long max_inodes; /* inodes allowed */ 267 long free_inodes; /* inodes free */ 268 spinlock_t stat_lock; 269 struct hstate *hstate; 270 struct hugepage_subpool *spool; 271 kuid_t uid; 272 kgid_t gid; 273 umode_t mode; 274 }; 275 276 static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) 277 { 278 return sb->s_fs_info; 279 } 280 281 extern const struct file_operations hugetlbfs_file_operations; 282 extern const struct vm_operations_struct hugetlb_vm_ops; 283 struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct, 284 struct user_struct **user, int creat_flags, 285 int page_size_log); 286 287 static inline bool is_file_hugepages(struct file *file) 288 { 289 if (file->f_op == &hugetlbfs_file_operations) 290 return true; 291 292 return is_file_shm_hugepages(file); 293 } 294 295 296 #else /* !CONFIG_HUGETLBFS */ 297 298 #define is_file_hugepages(file) false 299 static inline struct file * 300 hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag, 301 struct user_struct **user, int creat_flags, 302 int page_size_log) 303 { 304 return ERR_PTR(-ENOSYS); 305 } 306 307 #endif /* !CONFIG_HUGETLBFS */ 308 309 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA 310 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 311 unsigned long len, unsigned long pgoff, 312 unsigned long flags); 313 #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */ 314 315 #ifdef CONFIG_HUGETLB_PAGE 316 317 #define HSTATE_NAME_LEN 32 318 /* Defines one hugetlb page size */ 319 struct hstate { 320 int next_nid_to_alloc; 321 int next_nid_to_free; 322 unsigned int order; 323 unsigned long mask; 324 unsigned long max_huge_pages; 325 unsigned long nr_huge_pages; 326 unsigned long free_huge_pages; 327 unsigned long resv_huge_pages; 328 unsigned long surplus_huge_pages; 329 unsigned long nr_overcommit_huge_pages; 330 struct list_head hugepage_activelist; 331 struct list_head hugepage_freelists[MAX_NUMNODES]; 332 unsigned int nr_huge_pages_node[MAX_NUMNODES]; 333 unsigned int free_huge_pages_node[MAX_NUMNODES]; 334 unsigned int surplus_huge_pages_node[MAX_NUMNODES]; 335 #ifdef CONFIG_CGROUP_HUGETLB 336 /* cgroup control files */ 337 struct cftype cgroup_files[5]; 338 #endif 339 char name[HSTATE_NAME_LEN]; 340 }; 341 342 struct huge_bootmem_page { 343 struct list_head list; 344 struct hstate *hstate; 345 #ifdef CONFIG_HIGHMEM 346 phys_addr_t phys; 347 #endif 348 }; 349 350 struct page *alloc_huge_page(struct vm_area_struct *vma, 351 unsigned long addr, int avoid_reserve); 352 struct page *alloc_huge_page_node(struct hstate *h, int nid); 353 struct page *alloc_huge_page_noerr(struct vm_area_struct *vma, 354 unsigned long addr, int avoid_reserve); 355 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, 356 nodemask_t *nmask); 357 int huge_add_to_page_cache(struct page *page, struct address_space *mapping, 358 pgoff_t idx); 359 360 /* arch callback */ 361 int __init __alloc_bootmem_huge_page(struct hstate *h); 362 int __init alloc_bootmem_huge_page(struct hstate *h); 363 364 void __init hugetlb_bad_size(void); 365 void __init hugetlb_add_hstate(unsigned order); 366 struct hstate *size_to_hstate(unsigned long size); 367 368 #ifndef HUGE_MAX_HSTATE 369 #define HUGE_MAX_HSTATE 1 370 #endif 371 372 extern struct hstate hstates[HUGE_MAX_HSTATE]; 373 extern unsigned int default_hstate_idx; 374 375 #define default_hstate (hstates[default_hstate_idx]) 376 377 static inline struct hstate *hstate_inode(struct inode *i) 378 { 379 return HUGETLBFS_SB(i->i_sb)->hstate; 380 } 381 382 static inline struct hstate *hstate_file(struct file *f) 383 { 384 return hstate_inode(file_inode(f)); 385 } 386 387 static inline struct hstate *hstate_sizelog(int page_size_log) 388 { 389 if (!page_size_log) 390 return &default_hstate; 391 392 return size_to_hstate(1UL << page_size_log); 393 } 394 395 static inline struct hstate *hstate_vma(struct vm_area_struct *vma) 396 { 397 return hstate_file(vma->vm_file); 398 } 399 400 static inline unsigned long huge_page_size(struct hstate *h) 401 { 402 return (unsigned long)PAGE_SIZE << h->order; 403 } 404 405 extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma); 406 407 extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma); 408 409 static inline unsigned long huge_page_mask(struct hstate *h) 410 { 411 return h->mask; 412 } 413 414 static inline unsigned int huge_page_order(struct hstate *h) 415 { 416 return h->order; 417 } 418 419 static inline unsigned huge_page_shift(struct hstate *h) 420 { 421 return h->order + PAGE_SHIFT; 422 } 423 424 static inline bool hstate_is_gigantic(struct hstate *h) 425 { 426 return huge_page_order(h) >= MAX_ORDER; 427 } 428 429 static inline unsigned int pages_per_huge_page(struct hstate *h) 430 { 431 return 1 << h->order; 432 } 433 434 static inline unsigned int blocks_per_huge_page(struct hstate *h) 435 { 436 return huge_page_size(h) / 512; 437 } 438 439 #include <asm/hugetlb.h> 440 441 #ifndef arch_make_huge_pte 442 static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, 443 struct page *page, int writable) 444 { 445 return entry; 446 } 447 #endif 448 449 static inline struct hstate *page_hstate(struct page *page) 450 { 451 VM_BUG_ON_PAGE(!PageHuge(page), page); 452 return size_to_hstate(PAGE_SIZE << compound_order(page)); 453 } 454 455 static inline unsigned hstate_index_to_shift(unsigned index) 456 { 457 return hstates[index].order + PAGE_SHIFT; 458 } 459 460 static inline int hstate_index(struct hstate *h) 461 { 462 return h - hstates; 463 } 464 465 pgoff_t __basepage_index(struct page *page); 466 467 /* Return page->index in PAGE_SIZE units */ 468 static inline pgoff_t basepage_index(struct page *page) 469 { 470 if (!PageCompound(page)) 471 return page->index; 472 473 return __basepage_index(page); 474 } 475 476 extern int dissolve_free_huge_page(struct page *page); 477 extern int dissolve_free_huge_pages(unsigned long start_pfn, 478 unsigned long end_pfn); 479 static inline bool hugepage_migration_supported(struct hstate *h) 480 { 481 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION 482 if ((huge_page_shift(h) == PMD_SHIFT) || 483 (huge_page_shift(h) == PGDIR_SHIFT)) 484 return true; 485 else 486 return false; 487 #else 488 return false; 489 #endif 490 } 491 492 static inline spinlock_t *huge_pte_lockptr(struct hstate *h, 493 struct mm_struct *mm, pte_t *pte) 494 { 495 if (huge_page_size(h) == PMD_SIZE) 496 return pmd_lockptr(mm, (pmd_t *) pte); 497 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE); 498 return &mm->page_table_lock; 499 } 500 501 #ifndef hugepages_supported 502 /* 503 * Some platform decide whether they support huge pages at boot 504 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0 505 * when there is no such support 506 */ 507 #define hugepages_supported() (HPAGE_SHIFT != 0) 508 #endif 509 510 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm); 511 512 static inline void hugetlb_count_add(long l, struct mm_struct *mm) 513 { 514 atomic_long_add(l, &mm->hugetlb_usage); 515 } 516 517 static inline void hugetlb_count_sub(long l, struct mm_struct *mm) 518 { 519 atomic_long_sub(l, &mm->hugetlb_usage); 520 } 521 522 #ifndef set_huge_swap_pte_at 523 static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, 524 pte_t *ptep, pte_t pte, unsigned long sz) 525 { 526 set_huge_pte_at(mm, addr, ptep, pte); 527 } 528 #endif 529 #else /* CONFIG_HUGETLB_PAGE */ 530 struct hstate {}; 531 #define alloc_huge_page(v, a, r) NULL 532 #define alloc_huge_page_node(h, nid) NULL 533 #define alloc_huge_page_nodemask(h, preferred_nid, nmask) NULL 534 #define alloc_huge_page_noerr(v, a, r) NULL 535 #define alloc_bootmem_huge_page(h) NULL 536 #define hstate_file(f) NULL 537 #define hstate_sizelog(s) NULL 538 #define hstate_vma(v) NULL 539 #define hstate_inode(i) NULL 540 #define page_hstate(page) NULL 541 #define huge_page_size(h) PAGE_SIZE 542 #define huge_page_mask(h) PAGE_MASK 543 #define vma_kernel_pagesize(v) PAGE_SIZE 544 #define vma_mmu_pagesize(v) PAGE_SIZE 545 #define huge_page_order(h) 0 546 #define huge_page_shift(h) PAGE_SHIFT 547 static inline bool hstate_is_gigantic(struct hstate *h) 548 { 549 return false; 550 } 551 552 static inline unsigned int pages_per_huge_page(struct hstate *h) 553 { 554 return 1; 555 } 556 557 static inline unsigned hstate_index_to_shift(unsigned index) 558 { 559 return 0; 560 } 561 562 static inline int hstate_index(struct hstate *h) 563 { 564 return 0; 565 } 566 567 static inline pgoff_t basepage_index(struct page *page) 568 { 569 return page->index; 570 } 571 572 static inline int dissolve_free_huge_page(struct page *page) 573 { 574 return 0; 575 } 576 577 static inline int dissolve_free_huge_pages(unsigned long start_pfn, 578 unsigned long end_pfn) 579 { 580 return 0; 581 } 582 583 static inline bool hugepage_migration_supported(struct hstate *h) 584 { 585 return false; 586 } 587 588 static inline spinlock_t *huge_pte_lockptr(struct hstate *h, 589 struct mm_struct *mm, pte_t *pte) 590 { 591 return &mm->page_table_lock; 592 } 593 594 static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m) 595 { 596 } 597 598 static inline void hugetlb_count_sub(long l, struct mm_struct *mm) 599 { 600 } 601 602 static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, 603 pte_t *ptep, pte_t pte, unsigned long sz) 604 { 605 } 606 #endif /* CONFIG_HUGETLB_PAGE */ 607 608 static inline spinlock_t *huge_pte_lock(struct hstate *h, 609 struct mm_struct *mm, pte_t *pte) 610 { 611 spinlock_t *ptl; 612 613 ptl = huge_pte_lockptr(h, mm, pte); 614 spin_lock(ptl); 615 return ptl; 616 } 617 618 #endif /* _LINUX_HUGETLB_H */ 619