1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_PGTABLE_H 3 #define _LINUX_PGTABLE_H 4 5 #include <linux/pfn.h> 6 #include <asm/pgtable.h> 7 8 #define PMD_ORDER (PMD_SHIFT - PAGE_SHIFT) 9 #define PUD_ORDER (PUD_SHIFT - PAGE_SHIFT) 10 11 #ifndef __ASSEMBLY__ 12 #ifdef CONFIG_MMU 13 14 #include <linux/mm_types.h> 15 #include <linux/bug.h> 16 #include <linux/errno.h> 17 #include <asm-generic/pgtable_uffd.h> 18 #include <linux/page_table_check.h> 19 20 #if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \ 21 defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS 22 #error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{P4D,PUD,PMD}_FOLDED 23 #endif 24 25 /* 26 * On almost all architectures and configurations, 0 can be used as the 27 * upper ceiling to free_pgtables(): on many architectures it has the same 28 * effect as using TASK_SIZE. However, there is one configuration which 29 * must impose a more careful limit, to avoid freeing kernel pgtables. 30 */ 31 #ifndef USER_PGTABLES_CEILING 32 #define USER_PGTABLES_CEILING 0UL 33 #endif 34 35 /* 36 * This defines the first usable user address. Platforms 37 * can override its value with custom FIRST_USER_ADDRESS 38 * defined in their respective <asm/pgtable.h>. 39 */ 40 #ifndef FIRST_USER_ADDRESS 41 #define FIRST_USER_ADDRESS 0UL 42 #endif 43 44 /* 45 * This defines the generic helper for accessing PMD page 46 * table page. Although platforms can still override this 47 * via their respective <asm/pgtable.h>. 48 */ 49 #ifndef pmd_pgtable 50 #define pmd_pgtable(pmd) pmd_page(pmd) 51 #endif 52 53 #define pmd_folio(pmd) page_folio(pmd_page(pmd)) 54 55 /* 56 * A page table page can be thought of an array like this: pXd_t[PTRS_PER_PxD] 57 * 58 * The pXx_index() functions return the index of the entry in the page 59 * table page which would control the given virtual address 60 * 61 * As these functions may be used by the same code for different levels of 62 * the page table folding, they are always available, regardless of 63 * CONFIG_PGTABLE_LEVELS value. For the folded levels they simply return 0 64 * because in such cases PTRS_PER_PxD equals 1. 65 */ 66 67 static inline unsigned long pte_index(unsigned long address) 68 { 69 return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); 70 } 71 72 #ifndef pmd_index 73 static inline unsigned long pmd_index(unsigned long address) 74 { 75 return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); 76 } 77 #define pmd_index pmd_index 78 #endif 79 80 #ifndef pud_index 81 static inline unsigned long pud_index(unsigned long address) 82 { 83 return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1); 84 } 85 #define pud_index pud_index 86 #endif 87 88 #ifndef pgd_index 89 /* Must be a compile-time constant, so implement it as a macro */ 90 #define pgd_index(a) (((a) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) 91 #endif 92 93 #ifndef pte_offset_kernel 94 static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address) 95 { 96 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address); 97 } 98 #define pte_offset_kernel pte_offset_kernel 99 #endif 100 101 #ifdef CONFIG_HIGHPTE 102 #define __pte_map(pmd, address) \ 103 ((pte_t *)kmap_local_page(pmd_page(*(pmd))) + pte_index((address))) 104 #define pte_unmap(pte) do { \ 105 kunmap_local((pte)); \ 106 rcu_read_unlock(); \ 107 } while (0) 108 #else 109 static inline pte_t *__pte_map(pmd_t *pmd, unsigned long address) 110 { 111 return pte_offset_kernel(pmd, address); 112 } 113 static inline void pte_unmap(pte_t *pte) 114 { 115 rcu_read_unlock(); 116 } 117 #endif 118 119 void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable); 120 121 /* Find an entry in the second-level page table.. */ 122 #ifndef pmd_offset 123 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) 124 { 125 return pud_pgtable(*pud) + pmd_index(address); 126 } 127 #define pmd_offset pmd_offset 128 #endif 129 130 #ifndef pud_offset 131 static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) 132 { 133 return p4d_pgtable(*p4d) + pud_index(address); 134 } 135 #define pud_offset pud_offset 136 #endif 137 138 static inline pgd_t *pgd_offset_pgd(pgd_t *pgd, unsigned long address) 139 { 140 return (pgd + pgd_index(address)); 141 }; 142 143 /* 144 * a shortcut to get a pgd_t in a given mm 145 */ 146 #ifndef pgd_offset 147 #define pgd_offset(mm, address) pgd_offset_pgd((mm)->pgd, (address)) 148 #endif 149 150 /* 151 * a shortcut which implies the use of the kernel's pgd, instead 152 * of a process's 153 */ 154 #define pgd_offset_k(address) pgd_offset(&init_mm, (address)) 155 156 /* 157 * In many cases it is known that a virtual address is mapped at PMD or PTE 158 * level, so instead of traversing all the page table levels, we can get a 159 * pointer to the PMD entry in user or kernel page table or translate a virtual 160 * address to the pointer in the PTE in the kernel page tables with simple 161 * helpers. 162 */ 163 static inline pmd_t *pmd_off(struct mm_struct *mm, unsigned long va) 164 { 165 return pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, va), va), va), va); 166 } 167 168 static inline pmd_t *pmd_off_k(unsigned long va) 169 { 170 return pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va); 171 } 172 173 static inline pte_t *virt_to_kpte(unsigned long vaddr) 174 { 175 pmd_t *pmd = pmd_off_k(vaddr); 176 177 return pmd_none(*pmd) ? NULL : pte_offset_kernel(pmd, vaddr); 178 } 179 180 #ifndef pmd_young 181 static inline int pmd_young(pmd_t pmd) 182 { 183 return 0; 184 } 185 #endif 186 187 #ifndef pmd_dirty 188 static inline int pmd_dirty(pmd_t pmd) 189 { 190 return 0; 191 } 192 #endif 193 194 /* 195 * A facility to provide lazy MMU batching. This allows PTE updates and 196 * page invalidations to be delayed until a call to leave lazy MMU mode 197 * is issued. Some architectures may benefit from doing this, and it is 198 * beneficial for both shadow and direct mode hypervisors, which may batch 199 * the PTE updates which happen during this window. Note that using this 200 * interface requires that read hazards be removed from the code. A read 201 * hazard could result in the direct mode hypervisor case, since the actual 202 * write to the page tables may not yet have taken place, so reads though 203 * a raw PTE pointer after it has been modified are not guaranteed to be 204 * up to date. This mode can only be entered and left under the protection of 205 * the page table locks for all page tables which may be modified. In the UP 206 * case, this is required so that preemption is disabled, and in the SMP case, 207 * it must synchronize the delayed page table writes properly on other CPUs. 208 */ 209 #ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE 210 #define arch_enter_lazy_mmu_mode() do {} while (0) 211 #define arch_leave_lazy_mmu_mode() do {} while (0) 212 #define arch_flush_lazy_mmu_mode() do {} while (0) 213 #endif 214 215 #ifndef pte_batch_hint 216 /** 217 * pte_batch_hint - Number of pages that can be added to batch without scanning. 218 * @ptep: Page table pointer for the entry. 219 * @pte: Page table entry. 220 * 221 * Some architectures know that a set of contiguous ptes all map the same 222 * contiguous memory with the same permissions. In this case, it can provide a 223 * hint to aid pte batching without the core code needing to scan every pte. 224 * 225 * An architecture implementation may ignore the PTE accessed state. Further, 226 * the dirty state must apply atomically to all the PTEs described by the hint. 227 * 228 * May be overridden by the architecture, else pte_batch_hint is always 1. 229 */ 230 static inline unsigned int pte_batch_hint(pte_t *ptep, pte_t pte) 231 { 232 return 1; 233 } 234 #endif 235 236 #ifndef pte_advance_pfn 237 static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr) 238 { 239 return __pte(pte_val(pte) + (nr << PFN_PTE_SHIFT)); 240 } 241 #endif 242 243 #define pte_next_pfn(pte) pte_advance_pfn(pte, 1) 244 245 #ifndef set_ptes 246 /** 247 * set_ptes - Map consecutive pages to a contiguous range of addresses. 248 * @mm: Address space to map the pages into. 249 * @addr: Address to map the first page at. 250 * @ptep: Page table pointer for the first entry. 251 * @pte: Page table entry for the first page. 252 * @nr: Number of pages to map. 253 * 254 * When nr==1, initial state of pte may be present or not present, and new state 255 * may be present or not present. When nr>1, initial state of all ptes must be 256 * not present, and new state must be present. 257 * 258 * May be overridden by the architecture, or the architecture can define 259 * set_pte() and PFN_PTE_SHIFT. 260 * 261 * Context: The caller holds the page table lock. The pages all belong 262 * to the same folio. The PTEs are all in the same PMD. 263 */ 264 static inline void set_ptes(struct mm_struct *mm, unsigned long addr, 265 pte_t *ptep, pte_t pte, unsigned int nr) 266 { 267 page_table_check_ptes_set(mm, ptep, pte, nr); 268 269 arch_enter_lazy_mmu_mode(); 270 for (;;) { 271 set_pte(ptep, pte); 272 if (--nr == 0) 273 break; 274 ptep++; 275 pte = pte_next_pfn(pte); 276 } 277 arch_leave_lazy_mmu_mode(); 278 } 279 #endif 280 #define set_pte_at(mm, addr, ptep, pte) set_ptes(mm, addr, ptep, pte, 1) 281 282 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 283 extern int ptep_set_access_flags(struct vm_area_struct *vma, 284 unsigned long address, pte_t *ptep, 285 pte_t entry, int dirty); 286 #endif 287 288 #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 289 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 290 extern int pmdp_set_access_flags(struct vm_area_struct *vma, 291 unsigned long address, pmd_t *pmdp, 292 pmd_t entry, int dirty); 293 extern int pudp_set_access_flags(struct vm_area_struct *vma, 294 unsigned long address, pud_t *pudp, 295 pud_t entry, int dirty); 296 #else 297 static inline int pmdp_set_access_flags(struct vm_area_struct *vma, 298 unsigned long address, pmd_t *pmdp, 299 pmd_t entry, int dirty) 300 { 301 BUILD_BUG(); 302 return 0; 303 } 304 static inline int pudp_set_access_flags(struct vm_area_struct *vma, 305 unsigned long address, pud_t *pudp, 306 pud_t entry, int dirty) 307 { 308 BUILD_BUG(); 309 return 0; 310 } 311 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 312 #endif 313 314 #ifndef ptep_get 315 static inline pte_t ptep_get(pte_t *ptep) 316 { 317 return READ_ONCE(*ptep); 318 } 319 #endif 320 321 #ifndef pmdp_get 322 static inline pmd_t pmdp_get(pmd_t *pmdp) 323 { 324 return READ_ONCE(*pmdp); 325 } 326 #endif 327 328 #ifndef pudp_get 329 static inline pud_t pudp_get(pud_t *pudp) 330 { 331 return READ_ONCE(*pudp); 332 } 333 #endif 334 335 #ifndef p4dp_get 336 static inline p4d_t p4dp_get(p4d_t *p4dp) 337 { 338 return READ_ONCE(*p4dp); 339 } 340 #endif 341 342 #ifndef pgdp_get 343 static inline pgd_t pgdp_get(pgd_t *pgdp) 344 { 345 return READ_ONCE(*pgdp); 346 } 347 #endif 348 349 #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 350 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, 351 unsigned long address, 352 pte_t *ptep) 353 { 354 pte_t pte = ptep_get(ptep); 355 int r = 1; 356 if (!pte_young(pte)) 357 r = 0; 358 else 359 set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte)); 360 return r; 361 } 362 #endif 363 364 #ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG 365 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG) 366 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, 367 unsigned long address, 368 pmd_t *pmdp) 369 { 370 pmd_t pmd = *pmdp; 371 int r = 1; 372 if (!pmd_young(pmd)) 373 r = 0; 374 else 375 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd)); 376 return r; 377 } 378 #else 379 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, 380 unsigned long address, 381 pmd_t *pmdp) 382 { 383 BUILD_BUG(); 384 return 0; 385 } 386 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG */ 387 #endif 388 389 #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 390 int ptep_clear_flush_young(struct vm_area_struct *vma, 391 unsigned long address, pte_t *ptep); 392 #endif 393 394 #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH 395 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 396 extern int pmdp_clear_flush_young(struct vm_area_struct *vma, 397 unsigned long address, pmd_t *pmdp); 398 #else 399 /* 400 * Despite relevant to THP only, this API is called from generic rmap code 401 * under PageTransHuge(), hence needs a dummy implementation for !THP 402 */ 403 static inline int pmdp_clear_flush_young(struct vm_area_struct *vma, 404 unsigned long address, pmd_t *pmdp) 405 { 406 BUILD_BUG(); 407 return 0; 408 } 409 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 410 #endif 411 412 #ifndef arch_has_hw_nonleaf_pmd_young 413 /* 414 * Return whether the accessed bit in non-leaf PMD entries is supported on the 415 * local CPU. 416 */ 417 static inline bool arch_has_hw_nonleaf_pmd_young(void) 418 { 419 return IS_ENABLED(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG); 420 } 421 #endif 422 423 #ifndef arch_has_hw_pte_young 424 /* 425 * Return whether the accessed bit is supported on the local CPU. 426 * 427 * This stub assumes accessing through an old PTE triggers a page fault. 428 * Architectures that automatically set the access bit should overwrite it. 429 */ 430 static inline bool arch_has_hw_pte_young(void) 431 { 432 return IS_ENABLED(CONFIG_ARCH_HAS_HW_PTE_YOUNG); 433 } 434 #endif 435 436 #ifndef arch_check_zapped_pte 437 static inline void arch_check_zapped_pte(struct vm_area_struct *vma, 438 pte_t pte) 439 { 440 } 441 #endif 442 443 #ifndef arch_check_zapped_pmd 444 static inline void arch_check_zapped_pmd(struct vm_area_struct *vma, 445 pmd_t pmd) 446 { 447 } 448 #endif 449 450 #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR 451 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 452 unsigned long address, 453 pte_t *ptep) 454 { 455 pte_t pte = ptep_get(ptep); 456 pte_clear(mm, address, ptep); 457 page_table_check_pte_clear(mm, pte); 458 return pte; 459 } 460 #endif 461 462 static inline void ptep_clear(struct mm_struct *mm, unsigned long addr, 463 pte_t *ptep) 464 { 465 ptep_get_and_clear(mm, addr, ptep); 466 } 467 468 #ifdef CONFIG_GUP_GET_PXX_LOW_HIGH 469 /* 470 * For walking the pagetables without holding any locks. Some architectures 471 * (eg x86-32 PAE) cannot load the entries atomically without using expensive 472 * instructions. We are guaranteed that a PTE will only either go from not 473 * present to present, or present to not present -- it will not switch to a 474 * completely different present page without a TLB flush inbetween; which we 475 * are blocking by holding interrupts off. 476 * 477 * Setting ptes from not present to present goes: 478 * 479 * ptep->pte_high = h; 480 * smp_wmb(); 481 * ptep->pte_low = l; 482 * 483 * And present to not present goes: 484 * 485 * ptep->pte_low = 0; 486 * smp_wmb(); 487 * ptep->pte_high = 0; 488 * 489 * We must ensure here that the load of pte_low sees 'l' IFF pte_high sees 'h'. 490 * We load pte_high *after* loading pte_low, which ensures we don't see an older 491 * value of pte_high. *Then* we recheck pte_low, which ensures that we haven't 492 * picked up a changed pte high. We might have gotten rubbish values from 493 * pte_low and pte_high, but we are guaranteed that pte_low will not have the 494 * present bit set *unless* it is 'l'. Because get_user_pages_fast() only 495 * operates on present ptes we're safe. 496 */ 497 static inline pte_t ptep_get_lockless(pte_t *ptep) 498 { 499 pte_t pte; 500 501 do { 502 pte.pte_low = ptep->pte_low; 503 smp_rmb(); 504 pte.pte_high = ptep->pte_high; 505 smp_rmb(); 506 } while (unlikely(pte.pte_low != ptep->pte_low)); 507 508 return pte; 509 } 510 #define ptep_get_lockless ptep_get_lockless 511 512 #if CONFIG_PGTABLE_LEVELS > 2 513 static inline pmd_t pmdp_get_lockless(pmd_t *pmdp) 514 { 515 pmd_t pmd; 516 517 do { 518 pmd.pmd_low = pmdp->pmd_low; 519 smp_rmb(); 520 pmd.pmd_high = pmdp->pmd_high; 521 smp_rmb(); 522 } while (unlikely(pmd.pmd_low != pmdp->pmd_low)); 523 524 return pmd; 525 } 526 #define pmdp_get_lockless pmdp_get_lockless 527 #define pmdp_get_lockless_sync() tlb_remove_table_sync_one() 528 #endif /* CONFIG_PGTABLE_LEVELS > 2 */ 529 #endif /* CONFIG_GUP_GET_PXX_LOW_HIGH */ 530 531 /* 532 * We require that the PTE can be read atomically. 533 */ 534 #ifndef ptep_get_lockless 535 static inline pte_t ptep_get_lockless(pte_t *ptep) 536 { 537 return ptep_get(ptep); 538 } 539 #endif 540 541 #ifndef pmdp_get_lockless 542 static inline pmd_t pmdp_get_lockless(pmd_t *pmdp) 543 { 544 return pmdp_get(pmdp); 545 } 546 static inline void pmdp_get_lockless_sync(void) 547 { 548 } 549 #endif 550 551 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 552 #ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR 553 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, 554 unsigned long address, 555 pmd_t *pmdp) 556 { 557 pmd_t pmd = *pmdp; 558 559 pmd_clear(pmdp); 560 page_table_check_pmd_clear(mm, pmd); 561 562 return pmd; 563 } 564 #endif /* __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR */ 565 #ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR 566 static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm, 567 unsigned long address, 568 pud_t *pudp) 569 { 570 pud_t pud = *pudp; 571 572 pud_clear(pudp); 573 page_table_check_pud_clear(mm, pud); 574 575 return pud; 576 } 577 #endif /* __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR */ 578 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 579 580 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 581 #ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL 582 static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma, 583 unsigned long address, pmd_t *pmdp, 584 int full) 585 { 586 return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); 587 } 588 #endif 589 590 #ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL 591 static inline pud_t pudp_huge_get_and_clear_full(struct vm_area_struct *vma, 592 unsigned long address, pud_t *pudp, 593 int full) 594 { 595 return pudp_huge_get_and_clear(vma->vm_mm, address, pudp); 596 } 597 #endif 598 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 599 600 #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL 601 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, 602 unsigned long address, pte_t *ptep, 603 int full) 604 { 605 return ptep_get_and_clear(mm, address, ptep); 606 } 607 #endif 608 609 #ifndef get_and_clear_full_ptes 610 /** 611 * get_and_clear_full_ptes - Clear present PTEs that map consecutive pages of 612 * the same folio, collecting dirty/accessed bits. 613 * @mm: Address space the pages are mapped into. 614 * @addr: Address the first page is mapped at. 615 * @ptep: Page table pointer for the first entry. 616 * @nr: Number of entries to clear. 617 * @full: Whether we are clearing a full mm. 618 * 619 * May be overridden by the architecture; otherwise, implemented as a simple 620 * loop over ptep_get_and_clear_full(), merging dirty/accessed bits into the 621 * returned PTE. 622 * 623 * Note that PTE bits in the PTE range besides the PFN can differ. For example, 624 * some PTEs might be write-protected. 625 * 626 * Context: The caller holds the page table lock. The PTEs map consecutive 627 * pages that belong to the same folio. The PTEs are all in the same PMD. 628 */ 629 static inline pte_t get_and_clear_full_ptes(struct mm_struct *mm, 630 unsigned long addr, pte_t *ptep, unsigned int nr, int full) 631 { 632 pte_t pte, tmp_pte; 633 634 pte = ptep_get_and_clear_full(mm, addr, ptep, full); 635 while (--nr) { 636 ptep++; 637 addr += PAGE_SIZE; 638 tmp_pte = ptep_get_and_clear_full(mm, addr, ptep, full); 639 if (pte_dirty(tmp_pte)) 640 pte = pte_mkdirty(pte); 641 if (pte_young(tmp_pte)) 642 pte = pte_mkyoung(pte); 643 } 644 return pte; 645 } 646 #endif 647 648 #ifndef clear_full_ptes 649 /** 650 * clear_full_ptes - Clear present PTEs that map consecutive pages of the same 651 * folio. 652 * @mm: Address space the pages are mapped into. 653 * @addr: Address the first page is mapped at. 654 * @ptep: Page table pointer for the first entry. 655 * @nr: Number of entries to clear. 656 * @full: Whether we are clearing a full mm. 657 * 658 * May be overridden by the architecture; otherwise, implemented as a simple 659 * loop over ptep_get_and_clear_full(). 660 * 661 * Note that PTE bits in the PTE range besides the PFN can differ. For example, 662 * some PTEs might be write-protected. 663 * 664 * Context: The caller holds the page table lock. The PTEs map consecutive 665 * pages that belong to the same folio. The PTEs are all in the same PMD. 666 */ 667 static inline void clear_full_ptes(struct mm_struct *mm, unsigned long addr, 668 pte_t *ptep, unsigned int nr, int full) 669 { 670 for (;;) { 671 ptep_get_and_clear_full(mm, addr, ptep, full); 672 if (--nr == 0) 673 break; 674 ptep++; 675 addr += PAGE_SIZE; 676 } 677 } 678 #endif 679 680 /* 681 * If two threads concurrently fault at the same page, the thread that 682 * won the race updates the PTE and its local TLB/Cache. The other thread 683 * gives up, simply does nothing, and continues; on architectures where 684 * software can update TLB, local TLB can be updated here to avoid next page 685 * fault. This function updates TLB only, do nothing with cache or others. 686 * It is the difference with function update_mmu_cache. 687 */ 688 #ifndef __HAVE_ARCH_UPDATE_MMU_TLB 689 static inline void update_mmu_tlb(struct vm_area_struct *vma, 690 unsigned long address, pte_t *ptep) 691 { 692 } 693 #define __HAVE_ARCH_UPDATE_MMU_TLB 694 #endif 695 696 /* 697 * Some architectures may be able to avoid expensive synchronization 698 * primitives when modifications are made to PTE's which are already 699 * not present, or in the process of an address space destruction. 700 */ 701 #ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL 702 static inline void pte_clear_not_present_full(struct mm_struct *mm, 703 unsigned long address, 704 pte_t *ptep, 705 int full) 706 { 707 pte_clear(mm, address, ptep); 708 } 709 #endif 710 711 #ifndef clear_not_present_full_ptes 712 /** 713 * clear_not_present_full_ptes - Clear multiple not present PTEs which are 714 * consecutive in the pgtable. 715 * @mm: Address space the ptes represent. 716 * @addr: Address of the first pte. 717 * @ptep: Page table pointer for the first entry. 718 * @nr: Number of entries to clear. 719 * @full: Whether we are clearing a full mm. 720 * 721 * May be overridden by the architecture; otherwise, implemented as a simple 722 * loop over pte_clear_not_present_full(). 723 * 724 * Context: The caller holds the page table lock. The PTEs are all not present. 725 * The PTEs are all in the same PMD. 726 */ 727 static inline void clear_not_present_full_ptes(struct mm_struct *mm, 728 unsigned long addr, pte_t *ptep, unsigned int nr, int full) 729 { 730 for (;;) { 731 pte_clear_not_present_full(mm, addr, ptep, full); 732 if (--nr == 0) 733 break; 734 ptep++; 735 addr += PAGE_SIZE; 736 } 737 } 738 #endif 739 740 #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH 741 extern pte_t ptep_clear_flush(struct vm_area_struct *vma, 742 unsigned long address, 743 pte_t *ptep); 744 #endif 745 746 #ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH 747 extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, 748 unsigned long address, 749 pmd_t *pmdp); 750 extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, 751 unsigned long address, 752 pud_t *pudp); 753 #endif 754 755 #ifndef pte_mkwrite 756 static inline pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma) 757 { 758 return pte_mkwrite_novma(pte); 759 } 760 #endif 761 762 #if defined(CONFIG_ARCH_WANT_PMD_MKWRITE) && !defined(pmd_mkwrite) 763 static inline pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) 764 { 765 return pmd_mkwrite_novma(pmd); 766 } 767 #endif 768 769 #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT 770 struct mm_struct; 771 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) 772 { 773 pte_t old_pte = ptep_get(ptep); 774 set_pte_at(mm, address, ptep, pte_wrprotect(old_pte)); 775 } 776 #endif 777 778 #ifndef wrprotect_ptes 779 /** 780 * wrprotect_ptes - Write-protect PTEs that map consecutive pages of the same 781 * folio. 782 * @mm: Address space the pages are mapped into. 783 * @addr: Address the first page is mapped at. 784 * @ptep: Page table pointer for the first entry. 785 * @nr: Number of entries to write-protect. 786 * 787 * May be overridden by the architecture; otherwise, implemented as a simple 788 * loop over ptep_set_wrprotect(). 789 * 790 * Note that PTE bits in the PTE range besides the PFN can differ. For example, 791 * some PTEs might be write-protected. 792 * 793 * Context: The caller holds the page table lock. The PTEs map consecutive 794 * pages that belong to the same folio. The PTEs are all in the same PMD. 795 */ 796 static inline void wrprotect_ptes(struct mm_struct *mm, unsigned long addr, 797 pte_t *ptep, unsigned int nr) 798 { 799 for (;;) { 800 ptep_set_wrprotect(mm, addr, ptep); 801 if (--nr == 0) 802 break; 803 ptep++; 804 addr += PAGE_SIZE; 805 } 806 } 807 #endif 808 809 /* 810 * On some architectures hardware does not set page access bit when accessing 811 * memory page, it is responsibility of software setting this bit. It brings 812 * out extra page fault penalty to track page access bit. For optimization page 813 * access bit can be set during all page fault flow on these arches. 814 * To be differentiate with macro pte_mkyoung, this macro is used on platforms 815 * where software maintains page access bit. 816 */ 817 #ifndef pte_sw_mkyoung 818 static inline pte_t pte_sw_mkyoung(pte_t pte) 819 { 820 return pte; 821 } 822 #define pte_sw_mkyoung pte_sw_mkyoung 823 #endif 824 825 #ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT 826 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 827 static inline void pmdp_set_wrprotect(struct mm_struct *mm, 828 unsigned long address, pmd_t *pmdp) 829 { 830 pmd_t old_pmd = *pmdp; 831 set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd)); 832 } 833 #else 834 static inline void pmdp_set_wrprotect(struct mm_struct *mm, 835 unsigned long address, pmd_t *pmdp) 836 { 837 BUILD_BUG(); 838 } 839 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 840 #endif 841 #ifndef __HAVE_ARCH_PUDP_SET_WRPROTECT 842 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 843 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 844 static inline void pudp_set_wrprotect(struct mm_struct *mm, 845 unsigned long address, pud_t *pudp) 846 { 847 pud_t old_pud = *pudp; 848 849 set_pud_at(mm, address, pudp, pud_wrprotect(old_pud)); 850 } 851 #else 852 static inline void pudp_set_wrprotect(struct mm_struct *mm, 853 unsigned long address, pud_t *pudp) 854 { 855 BUILD_BUG(); 856 } 857 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 858 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 859 #endif 860 861 #ifndef pmdp_collapse_flush 862 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 863 extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, 864 unsigned long address, pmd_t *pmdp); 865 #else 866 static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, 867 unsigned long address, 868 pmd_t *pmdp) 869 { 870 BUILD_BUG(); 871 return *pmdp; 872 } 873 #define pmdp_collapse_flush pmdp_collapse_flush 874 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 875 #endif 876 877 #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT 878 extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 879 pgtable_t pgtable); 880 #endif 881 882 #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW 883 extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); 884 #endif 885 886 #ifndef arch_needs_pgtable_deposit 887 #define arch_needs_pgtable_deposit() (false) 888 #endif 889 890 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 891 /* 892 * This is an implementation of pmdp_establish() that is only suitable for an 893 * architecture that doesn't have hardware dirty/accessed bits. In this case we 894 * can't race with CPU which sets these bits and non-atomic approach is fine. 895 */ 896 static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma, 897 unsigned long address, pmd_t *pmdp, pmd_t pmd) 898 { 899 pmd_t old_pmd = *pmdp; 900 set_pmd_at(vma->vm_mm, address, pmdp, pmd); 901 return old_pmd; 902 } 903 #endif 904 905 #ifndef __HAVE_ARCH_PMDP_INVALIDATE 906 extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, 907 pmd_t *pmdp); 908 #endif 909 910 #ifndef __HAVE_ARCH_PMDP_INVALIDATE_AD 911 912 /* 913 * pmdp_invalidate_ad() invalidates the PMD while changing a transparent 914 * hugepage mapping in the page tables. This function is similar to 915 * pmdp_invalidate(), but should only be used if the access and dirty bits would 916 * not be cleared by the software in the new PMD value. The function ensures 917 * that hardware changes of the access and dirty bits updates would not be lost. 918 * 919 * Doing so can allow in certain architectures to avoid a TLB flush in most 920 * cases. Yet, another TLB flush might be necessary later if the PMD update 921 * itself requires such flush (e.g., if protection was set to be stricter). Yet, 922 * even when a TLB flush is needed because of the update, the caller may be able 923 * to batch these TLB flushing operations, so fewer TLB flush operations are 924 * needed. 925 */ 926 extern pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, 927 unsigned long address, pmd_t *pmdp); 928 #endif 929 930 #ifndef __HAVE_ARCH_PTE_SAME 931 static inline int pte_same(pte_t pte_a, pte_t pte_b) 932 { 933 return pte_val(pte_a) == pte_val(pte_b); 934 } 935 #endif 936 937 #ifndef __HAVE_ARCH_PTE_UNUSED 938 /* 939 * Some architectures provide facilities to virtualization guests 940 * so that they can flag allocated pages as unused. This allows the 941 * host to transparently reclaim unused pages. This function returns 942 * whether the pte's page is unused. 943 */ 944 static inline int pte_unused(pte_t pte) 945 { 946 return 0; 947 } 948 #endif 949 950 #ifndef pte_access_permitted 951 #define pte_access_permitted(pte, write) \ 952 (pte_present(pte) && (!(write) || pte_write(pte))) 953 #endif 954 955 #ifndef pmd_access_permitted 956 #define pmd_access_permitted(pmd, write) \ 957 (pmd_present(pmd) && (!(write) || pmd_write(pmd))) 958 #endif 959 960 #ifndef pud_access_permitted 961 #define pud_access_permitted(pud, write) \ 962 (pud_present(pud) && (!(write) || pud_write(pud))) 963 #endif 964 965 #ifndef p4d_access_permitted 966 #define p4d_access_permitted(p4d, write) \ 967 (p4d_present(p4d) && (!(write) || p4d_write(p4d))) 968 #endif 969 970 #ifndef pgd_access_permitted 971 #define pgd_access_permitted(pgd, write) \ 972 (pgd_present(pgd) && (!(write) || pgd_write(pgd))) 973 #endif 974 975 #ifndef __HAVE_ARCH_PMD_SAME 976 static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) 977 { 978 return pmd_val(pmd_a) == pmd_val(pmd_b); 979 } 980 #endif 981 982 #ifndef pud_same 983 static inline int pud_same(pud_t pud_a, pud_t pud_b) 984 { 985 return pud_val(pud_a) == pud_val(pud_b); 986 } 987 #define pud_same pud_same 988 #endif 989 990 #ifndef __HAVE_ARCH_P4D_SAME 991 static inline int p4d_same(p4d_t p4d_a, p4d_t p4d_b) 992 { 993 return p4d_val(p4d_a) == p4d_val(p4d_b); 994 } 995 #endif 996 997 #ifndef __HAVE_ARCH_PGD_SAME 998 static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b) 999 { 1000 return pgd_val(pgd_a) == pgd_val(pgd_b); 1001 } 1002 #endif 1003 1004 /* 1005 * Use set_p*_safe(), and elide TLB flushing, when confident that *no* 1006 * TLB flush will be required as a result of the "set". For example, use 1007 * in scenarios where it is known ahead of time that the routine is 1008 * setting non-present entries, or re-setting an existing entry to the 1009 * same value. Otherwise, use the typical "set" helpers and flush the 1010 * TLB. 1011 */ 1012 #define set_pte_safe(ptep, pte) \ 1013 ({ \ 1014 WARN_ON_ONCE(pte_present(*ptep) && !pte_same(*ptep, pte)); \ 1015 set_pte(ptep, pte); \ 1016 }) 1017 1018 #define set_pmd_safe(pmdp, pmd) \ 1019 ({ \ 1020 WARN_ON_ONCE(pmd_present(*pmdp) && !pmd_same(*pmdp, pmd)); \ 1021 set_pmd(pmdp, pmd); \ 1022 }) 1023 1024 #define set_pud_safe(pudp, pud) \ 1025 ({ \ 1026 WARN_ON_ONCE(pud_present(*pudp) && !pud_same(*pudp, pud)); \ 1027 set_pud(pudp, pud); \ 1028 }) 1029 1030 #define set_p4d_safe(p4dp, p4d) \ 1031 ({ \ 1032 WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \ 1033 set_p4d(p4dp, p4d); \ 1034 }) 1035 1036 #define set_pgd_safe(pgdp, pgd) \ 1037 ({ \ 1038 WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \ 1039 set_pgd(pgdp, pgd); \ 1040 }) 1041 1042 #ifndef __HAVE_ARCH_DO_SWAP_PAGE 1043 /* 1044 * Some architectures support metadata associated with a page. When a 1045 * page is being swapped out, this metadata must be saved so it can be 1046 * restored when the page is swapped back in. SPARC M7 and newer 1047 * processors support an ADI (Application Data Integrity) tag for the 1048 * page as metadata for the page. arch_do_swap_page() can restore this 1049 * metadata when a page is swapped back in. 1050 */ 1051 static inline void arch_do_swap_page(struct mm_struct *mm, 1052 struct vm_area_struct *vma, 1053 unsigned long addr, 1054 pte_t pte, pte_t oldpte) 1055 { 1056 1057 } 1058 #endif 1059 1060 #ifndef __HAVE_ARCH_UNMAP_ONE 1061 /* 1062 * Some architectures support metadata associated with a page. When a 1063 * page is being swapped out, this metadata must be saved so it can be 1064 * restored when the page is swapped back in. SPARC M7 and newer 1065 * processors support an ADI (Application Data Integrity) tag for the 1066 * page as metadata for the page. arch_unmap_one() can save this 1067 * metadata on a swap-out of a page. 1068 */ 1069 static inline int arch_unmap_one(struct mm_struct *mm, 1070 struct vm_area_struct *vma, 1071 unsigned long addr, 1072 pte_t orig_pte) 1073 { 1074 return 0; 1075 } 1076 #endif 1077 1078 /* 1079 * Allow architectures to preserve additional metadata associated with 1080 * swapped-out pages. The corresponding __HAVE_ARCH_SWAP_* macros and function 1081 * prototypes must be defined in the arch-specific asm/pgtable.h file. 1082 */ 1083 #ifndef __HAVE_ARCH_PREPARE_TO_SWAP 1084 static inline int arch_prepare_to_swap(struct folio *folio) 1085 { 1086 return 0; 1087 } 1088 #endif 1089 1090 #ifndef __HAVE_ARCH_SWAP_INVALIDATE 1091 static inline void arch_swap_invalidate_page(int type, pgoff_t offset) 1092 { 1093 } 1094 1095 static inline void arch_swap_invalidate_area(int type) 1096 { 1097 } 1098 #endif 1099 1100 #ifndef __HAVE_ARCH_SWAP_RESTORE 1101 static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio) 1102 { 1103 } 1104 #endif 1105 1106 #ifndef __HAVE_ARCH_PGD_OFFSET_GATE 1107 #define pgd_offset_gate(mm, addr) pgd_offset(mm, addr) 1108 #endif 1109 1110 #ifndef __HAVE_ARCH_MOVE_PTE 1111 #define move_pte(pte, old_addr, new_addr) (pte) 1112 #endif 1113 1114 #ifndef pte_accessible 1115 # define pte_accessible(mm, pte) ((void)(pte), 1) 1116 #endif 1117 1118 #ifndef flush_tlb_fix_spurious_fault 1119 #define flush_tlb_fix_spurious_fault(vma, address, ptep) flush_tlb_page(vma, address) 1120 #endif 1121 1122 /* 1123 * When walking page tables, get the address of the next boundary, 1124 * or the end address of the range if that comes earlier. Although no 1125 * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout. 1126 */ 1127 1128 #define pgd_addr_end(addr, end) \ 1129 ({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \ 1130 (__boundary - 1 < (end) - 1)? __boundary: (end); \ 1131 }) 1132 1133 #ifndef p4d_addr_end 1134 #define p4d_addr_end(addr, end) \ 1135 ({ unsigned long __boundary = ((addr) + P4D_SIZE) & P4D_MASK; \ 1136 (__boundary - 1 < (end) - 1)? __boundary: (end); \ 1137 }) 1138 #endif 1139 1140 #ifndef pud_addr_end 1141 #define pud_addr_end(addr, end) \ 1142 ({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \ 1143 (__boundary - 1 < (end) - 1)? __boundary: (end); \ 1144 }) 1145 #endif 1146 1147 #ifndef pmd_addr_end 1148 #define pmd_addr_end(addr, end) \ 1149 ({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \ 1150 (__boundary - 1 < (end) - 1)? __boundary: (end); \ 1151 }) 1152 #endif 1153 1154 /* 1155 * When walking page tables, we usually want to skip any p?d_none entries; 1156 * and any p?d_bad entries - reporting the error before resetting to none. 1157 * Do the tests inline, but report and clear the bad entry in mm/memory.c. 1158 */ 1159 void pgd_clear_bad(pgd_t *); 1160 1161 #ifndef __PAGETABLE_P4D_FOLDED 1162 void p4d_clear_bad(p4d_t *); 1163 #else 1164 #define p4d_clear_bad(p4d) do { } while (0) 1165 #endif 1166 1167 #ifndef __PAGETABLE_PUD_FOLDED 1168 void pud_clear_bad(pud_t *); 1169 #else 1170 #define pud_clear_bad(p4d) do { } while (0) 1171 #endif 1172 1173 void pmd_clear_bad(pmd_t *); 1174 1175 static inline int pgd_none_or_clear_bad(pgd_t *pgd) 1176 { 1177 if (pgd_none(*pgd)) 1178 return 1; 1179 if (unlikely(pgd_bad(*pgd))) { 1180 pgd_clear_bad(pgd); 1181 return 1; 1182 } 1183 return 0; 1184 } 1185 1186 static inline int p4d_none_or_clear_bad(p4d_t *p4d) 1187 { 1188 if (p4d_none(*p4d)) 1189 return 1; 1190 if (unlikely(p4d_bad(*p4d))) { 1191 p4d_clear_bad(p4d); 1192 return 1; 1193 } 1194 return 0; 1195 } 1196 1197 static inline int pud_none_or_clear_bad(pud_t *pud) 1198 { 1199 if (pud_none(*pud)) 1200 return 1; 1201 if (unlikely(pud_bad(*pud))) { 1202 pud_clear_bad(pud); 1203 return 1; 1204 } 1205 return 0; 1206 } 1207 1208 static inline int pmd_none_or_clear_bad(pmd_t *pmd) 1209 { 1210 if (pmd_none(*pmd)) 1211 return 1; 1212 if (unlikely(pmd_bad(*pmd))) { 1213 pmd_clear_bad(pmd); 1214 return 1; 1215 } 1216 return 0; 1217 } 1218 1219 static inline pte_t __ptep_modify_prot_start(struct vm_area_struct *vma, 1220 unsigned long addr, 1221 pte_t *ptep) 1222 { 1223 /* 1224 * Get the current pte state, but zero it out to make it 1225 * non-present, preventing the hardware from asynchronously 1226 * updating it. 1227 */ 1228 return ptep_get_and_clear(vma->vm_mm, addr, ptep); 1229 } 1230 1231 static inline void __ptep_modify_prot_commit(struct vm_area_struct *vma, 1232 unsigned long addr, 1233 pte_t *ptep, pte_t pte) 1234 { 1235 /* 1236 * The pte is non-present, so there's no hardware state to 1237 * preserve. 1238 */ 1239 set_pte_at(vma->vm_mm, addr, ptep, pte); 1240 } 1241 1242 #ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION 1243 /* 1244 * Start a pte protection read-modify-write transaction, which 1245 * protects against asynchronous hardware modifications to the pte. 1246 * The intention is not to prevent the hardware from making pte 1247 * updates, but to prevent any updates it may make from being lost. 1248 * 1249 * This does not protect against other software modifications of the 1250 * pte; the appropriate pte lock must be held over the transaction. 1251 * 1252 * Note that this interface is intended to be batchable, meaning that 1253 * ptep_modify_prot_commit may not actually update the pte, but merely 1254 * queue the update to be done at some later time. The update must be 1255 * actually committed before the pte lock is released, however. 1256 */ 1257 static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, 1258 unsigned long addr, 1259 pte_t *ptep) 1260 { 1261 return __ptep_modify_prot_start(vma, addr, ptep); 1262 } 1263 1264 /* 1265 * Commit an update to a pte, leaving any hardware-controlled bits in 1266 * the PTE unmodified. 1267 */ 1268 static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, 1269 unsigned long addr, 1270 pte_t *ptep, pte_t old_pte, pte_t pte) 1271 { 1272 __ptep_modify_prot_commit(vma, addr, ptep, pte); 1273 } 1274 #endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */ 1275 #endif /* CONFIG_MMU */ 1276 1277 /* 1278 * No-op macros that just return the current protection value. Defined here 1279 * because these macros can be used even if CONFIG_MMU is not defined. 1280 */ 1281 1282 #ifndef pgprot_nx 1283 #define pgprot_nx(prot) (prot) 1284 #endif 1285 1286 #ifndef pgprot_noncached 1287 #define pgprot_noncached(prot) (prot) 1288 #endif 1289 1290 #ifndef pgprot_writecombine 1291 #define pgprot_writecombine pgprot_noncached 1292 #endif 1293 1294 #ifndef pgprot_writethrough 1295 #define pgprot_writethrough pgprot_noncached 1296 #endif 1297 1298 #ifndef pgprot_device 1299 #define pgprot_device pgprot_noncached 1300 #endif 1301 1302 #ifndef pgprot_mhp 1303 #define pgprot_mhp(prot) (prot) 1304 #endif 1305 1306 #ifdef CONFIG_MMU 1307 #ifndef pgprot_modify 1308 #define pgprot_modify pgprot_modify 1309 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) 1310 { 1311 if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot))) 1312 newprot = pgprot_noncached(newprot); 1313 if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot))) 1314 newprot = pgprot_writecombine(newprot); 1315 if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot))) 1316 newprot = pgprot_device(newprot); 1317 return newprot; 1318 } 1319 #endif 1320 #endif /* CONFIG_MMU */ 1321 1322 #ifndef pgprot_encrypted 1323 #define pgprot_encrypted(prot) (prot) 1324 #endif 1325 1326 #ifndef pgprot_decrypted 1327 #define pgprot_decrypted(prot) (prot) 1328 #endif 1329 1330 /* 1331 * A facility to provide batching of the reload of page tables and 1332 * other process state with the actual context switch code for 1333 * paravirtualized guests. By convention, only one of the batched 1334 * update (lazy) modes (CPU, MMU) should be active at any given time, 1335 * entry should never be nested, and entry and exits should always be 1336 * paired. This is for sanity of maintaining and reasoning about the 1337 * kernel code. In this case, the exit (end of the context switch) is 1338 * in architecture-specific code, and so doesn't need a generic 1339 * definition. 1340 */ 1341 #ifndef __HAVE_ARCH_START_CONTEXT_SWITCH 1342 #define arch_start_context_switch(prev) do {} while (0) 1343 #endif 1344 1345 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY 1346 #ifndef CONFIG_ARCH_ENABLE_THP_MIGRATION 1347 static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd) 1348 { 1349 return pmd; 1350 } 1351 1352 static inline int pmd_swp_soft_dirty(pmd_t pmd) 1353 { 1354 return 0; 1355 } 1356 1357 static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd) 1358 { 1359 return pmd; 1360 } 1361 #endif 1362 #else /* !CONFIG_HAVE_ARCH_SOFT_DIRTY */ 1363 static inline int pte_soft_dirty(pte_t pte) 1364 { 1365 return 0; 1366 } 1367 1368 static inline int pmd_soft_dirty(pmd_t pmd) 1369 { 1370 return 0; 1371 } 1372 1373 static inline pte_t pte_mksoft_dirty(pte_t pte) 1374 { 1375 return pte; 1376 } 1377 1378 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) 1379 { 1380 return pmd; 1381 } 1382 1383 static inline pte_t pte_clear_soft_dirty(pte_t pte) 1384 { 1385 return pte; 1386 } 1387 1388 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) 1389 { 1390 return pmd; 1391 } 1392 1393 static inline pte_t pte_swp_mksoft_dirty(pte_t pte) 1394 { 1395 return pte; 1396 } 1397 1398 static inline int pte_swp_soft_dirty(pte_t pte) 1399 { 1400 return 0; 1401 } 1402 1403 static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) 1404 { 1405 return pte; 1406 } 1407 1408 static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd) 1409 { 1410 return pmd; 1411 } 1412 1413 static inline int pmd_swp_soft_dirty(pmd_t pmd) 1414 { 1415 return 0; 1416 } 1417 1418 static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd) 1419 { 1420 return pmd; 1421 } 1422 #endif 1423 1424 #ifndef __HAVE_PFNMAP_TRACKING 1425 /* 1426 * Interfaces that can be used by architecture code to keep track of 1427 * memory type of pfn mappings specified by the remap_pfn_range, 1428 * vmf_insert_pfn. 1429 */ 1430 1431 /* 1432 * track_pfn_remap is called when a _new_ pfn mapping is being established 1433 * by remap_pfn_range() for physical range indicated by pfn and size. 1434 */ 1435 static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, 1436 unsigned long pfn, unsigned long addr, 1437 unsigned long size) 1438 { 1439 return 0; 1440 } 1441 1442 /* 1443 * track_pfn_insert is called when a _new_ single pfn is established 1444 * by vmf_insert_pfn(). 1445 */ 1446 static inline void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, 1447 pfn_t pfn) 1448 { 1449 } 1450 1451 /* 1452 * track_pfn_copy is called when vma that is covering the pfnmap gets 1453 * copied through copy_page_range(). 1454 */ 1455 static inline int track_pfn_copy(struct vm_area_struct *vma) 1456 { 1457 return 0; 1458 } 1459 1460 /* 1461 * untrack_pfn is called while unmapping a pfnmap for a region. 1462 * untrack can be called for a specific region indicated by pfn and size or 1463 * can be for the entire vma (in which case pfn, size are zero). 1464 */ 1465 static inline void untrack_pfn(struct vm_area_struct *vma, 1466 unsigned long pfn, unsigned long size, 1467 bool mm_wr_locked) 1468 { 1469 } 1470 1471 /* 1472 * untrack_pfn_clear is called while mremapping a pfnmap for a new region 1473 * or fails to copy pgtable during duplicate vm area. 1474 */ 1475 static inline void untrack_pfn_clear(struct vm_area_struct *vma) 1476 { 1477 } 1478 #else 1479 extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, 1480 unsigned long pfn, unsigned long addr, 1481 unsigned long size); 1482 extern void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, 1483 pfn_t pfn); 1484 extern int track_pfn_copy(struct vm_area_struct *vma); 1485 extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, 1486 unsigned long size, bool mm_wr_locked); 1487 extern void untrack_pfn_clear(struct vm_area_struct *vma); 1488 #endif 1489 1490 #ifdef CONFIG_MMU 1491 #ifdef __HAVE_COLOR_ZERO_PAGE 1492 static inline int is_zero_pfn(unsigned long pfn) 1493 { 1494 extern unsigned long zero_pfn; 1495 unsigned long offset_from_zero_pfn = pfn - zero_pfn; 1496 return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT); 1497 } 1498 1499 #define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr)) 1500 1501 #else 1502 static inline int is_zero_pfn(unsigned long pfn) 1503 { 1504 extern unsigned long zero_pfn; 1505 return pfn == zero_pfn; 1506 } 1507 1508 static inline unsigned long my_zero_pfn(unsigned long addr) 1509 { 1510 extern unsigned long zero_pfn; 1511 return zero_pfn; 1512 } 1513 #endif 1514 #else 1515 static inline int is_zero_pfn(unsigned long pfn) 1516 { 1517 return 0; 1518 } 1519 1520 static inline unsigned long my_zero_pfn(unsigned long addr) 1521 { 1522 return 0; 1523 } 1524 #endif /* CONFIG_MMU */ 1525 1526 #ifdef CONFIG_MMU 1527 1528 #ifndef CONFIG_TRANSPARENT_HUGEPAGE 1529 static inline int pmd_trans_huge(pmd_t pmd) 1530 { 1531 return 0; 1532 } 1533 #ifndef pmd_write 1534 static inline int pmd_write(pmd_t pmd) 1535 { 1536 BUG(); 1537 return 0; 1538 } 1539 #endif /* pmd_write */ 1540 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1541 1542 #ifndef pud_write 1543 static inline int pud_write(pud_t pud) 1544 { 1545 BUG(); 1546 return 0; 1547 } 1548 #endif /* pud_write */ 1549 1550 #if !defined(CONFIG_ARCH_HAS_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE) 1551 static inline int pmd_devmap(pmd_t pmd) 1552 { 1553 return 0; 1554 } 1555 static inline int pud_devmap(pud_t pud) 1556 { 1557 return 0; 1558 } 1559 static inline int pgd_devmap(pgd_t pgd) 1560 { 1561 return 0; 1562 } 1563 #endif 1564 1565 #if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \ 1566 !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) 1567 static inline int pud_trans_huge(pud_t pud) 1568 { 1569 return 0; 1570 } 1571 #endif 1572 1573 static inline int pud_trans_unstable(pud_t *pud) 1574 { 1575 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ 1576 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) 1577 pud_t pudval = READ_ONCE(*pud); 1578 1579 if (pud_none(pudval) || pud_trans_huge(pudval) || pud_devmap(pudval)) 1580 return 1; 1581 if (unlikely(pud_bad(pudval))) { 1582 pud_clear_bad(pud); 1583 return 1; 1584 } 1585 #endif 1586 return 0; 1587 } 1588 1589 #ifndef CONFIG_NUMA_BALANCING 1590 /* 1591 * In an inaccessible (PROT_NONE) VMA, pte_protnone() may indicate "yes". It is 1592 * perfectly valid to indicate "no" in that case, which is why our default 1593 * implementation defaults to "always no". 1594 * 1595 * In an accessible VMA, however, pte_protnone() reliably indicates PROT_NONE 1596 * page protection due to NUMA hinting. NUMA hinting faults only apply in 1597 * accessible VMAs. 1598 * 1599 * So, to reliably identify PROT_NONE PTEs that require a NUMA hinting fault, 1600 * looking at the VMA accessibility is sufficient. 1601 */ 1602 static inline int pte_protnone(pte_t pte) 1603 { 1604 return 0; 1605 } 1606 1607 static inline int pmd_protnone(pmd_t pmd) 1608 { 1609 return 0; 1610 } 1611 #endif /* CONFIG_NUMA_BALANCING */ 1612 1613 #endif /* CONFIG_MMU */ 1614 1615 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP 1616 1617 #ifndef __PAGETABLE_P4D_FOLDED 1618 int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot); 1619 void p4d_clear_huge(p4d_t *p4d); 1620 #else 1621 static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) 1622 { 1623 return 0; 1624 } 1625 static inline void p4d_clear_huge(p4d_t *p4d) { } 1626 #endif /* !__PAGETABLE_P4D_FOLDED */ 1627 1628 int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot); 1629 int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot); 1630 int pud_clear_huge(pud_t *pud); 1631 int pmd_clear_huge(pmd_t *pmd); 1632 int p4d_free_pud_page(p4d_t *p4d, unsigned long addr); 1633 int pud_free_pmd_page(pud_t *pud, unsigned long addr); 1634 int pmd_free_pte_page(pmd_t *pmd, unsigned long addr); 1635 #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ 1636 static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) 1637 { 1638 return 0; 1639 } 1640 static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) 1641 { 1642 return 0; 1643 } 1644 static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) 1645 { 1646 return 0; 1647 } 1648 static inline void p4d_clear_huge(p4d_t *p4d) { } 1649 static inline int pud_clear_huge(pud_t *pud) 1650 { 1651 return 0; 1652 } 1653 static inline int pmd_clear_huge(pmd_t *pmd) 1654 { 1655 return 0; 1656 } 1657 static inline int p4d_free_pud_page(p4d_t *p4d, unsigned long addr) 1658 { 1659 return 0; 1660 } 1661 static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr) 1662 { 1663 return 0; 1664 } 1665 static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) 1666 { 1667 return 0; 1668 } 1669 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ 1670 1671 #ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE 1672 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1673 /* 1674 * ARCHes with special requirements for evicting THP backing TLB entries can 1675 * implement this. Otherwise also, it can help optimize normal TLB flush in 1676 * THP regime. Stock flush_tlb_range() typically has optimization to nuke the 1677 * entire TLB if flush span is greater than a threshold, which will 1678 * likely be true for a single huge page. Thus a single THP flush will 1679 * invalidate the entire TLB which is not desirable. 1680 * e.g. see arch/arc: flush_pmd_tlb_range 1681 */ 1682 #define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) 1683 #define flush_pud_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) 1684 #else 1685 #define flush_pmd_tlb_range(vma, addr, end) BUILD_BUG() 1686 #define flush_pud_tlb_range(vma, addr, end) BUILD_BUG() 1687 #endif 1688 #endif 1689 1690 struct file; 1691 int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, 1692 unsigned long size, pgprot_t *vma_prot); 1693 1694 #ifndef CONFIG_X86_ESPFIX64 1695 static inline void init_espfix_bsp(void) { } 1696 #endif 1697 1698 extern void __init pgtable_cache_init(void); 1699 1700 #ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED 1701 static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot) 1702 { 1703 return true; 1704 } 1705 1706 static inline bool arch_has_pfn_modify_check(void) 1707 { 1708 return false; 1709 } 1710 #endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */ 1711 1712 /* 1713 * Architecture PAGE_KERNEL_* fallbacks 1714 * 1715 * Some architectures don't define certain PAGE_KERNEL_* flags. This is either 1716 * because they really don't support them, or the port needs to be updated to 1717 * reflect the required functionality. Below are a set of relatively safe 1718 * fallbacks, as best effort, which we can count on in lieu of the architectures 1719 * not defining them on their own yet. 1720 */ 1721 1722 #ifndef PAGE_KERNEL_RO 1723 # define PAGE_KERNEL_RO PAGE_KERNEL 1724 #endif 1725 1726 #ifndef PAGE_KERNEL_EXEC 1727 # define PAGE_KERNEL_EXEC PAGE_KERNEL 1728 #endif 1729 1730 /* 1731 * Page Table Modification bits for pgtbl_mod_mask. 1732 * 1733 * These are used by the p?d_alloc_track*() set of functions an in the generic 1734 * vmalloc/ioremap code to track at which page-table levels entries have been 1735 * modified. Based on that the code can better decide when vmalloc and ioremap 1736 * mapping changes need to be synchronized to other page-tables in the system. 1737 */ 1738 #define __PGTBL_PGD_MODIFIED 0 1739 #define __PGTBL_P4D_MODIFIED 1 1740 #define __PGTBL_PUD_MODIFIED 2 1741 #define __PGTBL_PMD_MODIFIED 3 1742 #define __PGTBL_PTE_MODIFIED 4 1743 1744 #define PGTBL_PGD_MODIFIED BIT(__PGTBL_PGD_MODIFIED) 1745 #define PGTBL_P4D_MODIFIED BIT(__PGTBL_P4D_MODIFIED) 1746 #define PGTBL_PUD_MODIFIED BIT(__PGTBL_PUD_MODIFIED) 1747 #define PGTBL_PMD_MODIFIED BIT(__PGTBL_PMD_MODIFIED) 1748 #define PGTBL_PTE_MODIFIED BIT(__PGTBL_PTE_MODIFIED) 1749 1750 /* Page-Table Modification Mask */ 1751 typedef unsigned int pgtbl_mod_mask; 1752 1753 #endif /* !__ASSEMBLY__ */ 1754 1755 #if !defined(MAX_POSSIBLE_PHYSMEM_BITS) && !defined(CONFIG_64BIT) 1756 #ifdef CONFIG_PHYS_ADDR_T_64BIT 1757 /* 1758 * ZSMALLOC needs to know the highest PFN on 32-bit architectures 1759 * with physical address space extension, but falls back to 1760 * BITS_PER_LONG otherwise. 1761 */ 1762 #error Missing MAX_POSSIBLE_PHYSMEM_BITS definition 1763 #else 1764 #define MAX_POSSIBLE_PHYSMEM_BITS 32 1765 #endif 1766 #endif 1767 1768 #ifndef has_transparent_hugepage 1769 #define has_transparent_hugepage() IS_BUILTIN(CONFIG_TRANSPARENT_HUGEPAGE) 1770 #endif 1771 1772 #ifndef has_transparent_pud_hugepage 1773 #define has_transparent_pud_hugepage() IS_BUILTIN(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) 1774 #endif 1775 /* 1776 * On some architectures it depends on the mm if the p4d/pud or pmd 1777 * layer of the page table hierarchy is folded or not. 1778 */ 1779 #ifndef mm_p4d_folded 1780 #define mm_p4d_folded(mm) __is_defined(__PAGETABLE_P4D_FOLDED) 1781 #endif 1782 1783 #ifndef mm_pud_folded 1784 #define mm_pud_folded(mm) __is_defined(__PAGETABLE_PUD_FOLDED) 1785 #endif 1786 1787 #ifndef mm_pmd_folded 1788 #define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED) 1789 #endif 1790 1791 #ifndef p4d_offset_lockless 1792 #define p4d_offset_lockless(pgdp, pgd, address) p4d_offset(&(pgd), address) 1793 #endif 1794 #ifndef pud_offset_lockless 1795 #define pud_offset_lockless(p4dp, p4d, address) pud_offset(&(p4d), address) 1796 #endif 1797 #ifndef pmd_offset_lockless 1798 #define pmd_offset_lockless(pudp, pud, address) pmd_offset(&(pud), address) 1799 #endif 1800 1801 /* 1802 * pXd_leaf() is the API to check whether a pgtable entry is a huge page 1803 * mapping. It should work globally across all archs, without any 1804 * dependency on CONFIG_* options. For architectures that do not support 1805 * huge mappings on specific levels, below fallbacks will be used. 1806 * 1807 * A leaf pgtable entry should always imply the following: 1808 * 1809 * - It is a "present" entry. IOW, before using this API, please check it 1810 * with pXd_present() first. NOTE: it may not always mean the "present 1811 * bit" is set. For example, PROT_NONE entries are always "present". 1812 * 1813 * - It should _never_ be a swap entry of any type. Above "present" check 1814 * should have guarded this, but let's be crystal clear on this. 1815 * 1816 * - It should contain a huge PFN, which points to a huge page larger than 1817 * PAGE_SIZE of the platform. The PFN format isn't important here. 1818 * 1819 * - It should cover all kinds of huge mappings (e.g., pXd_trans_huge(), 1820 * pXd_devmap(), or hugetlb mappings). 1821 */ 1822 #ifndef pgd_leaf 1823 #define pgd_leaf(x) false 1824 #endif 1825 #ifndef p4d_leaf 1826 #define p4d_leaf(x) false 1827 #endif 1828 #ifndef pud_leaf 1829 #define pud_leaf(x) false 1830 #endif 1831 #ifndef pmd_leaf 1832 #define pmd_leaf(x) false 1833 #endif 1834 1835 #ifndef pgd_leaf_size 1836 #define pgd_leaf_size(x) (1ULL << PGDIR_SHIFT) 1837 #endif 1838 #ifndef p4d_leaf_size 1839 #define p4d_leaf_size(x) P4D_SIZE 1840 #endif 1841 #ifndef pud_leaf_size 1842 #define pud_leaf_size(x) PUD_SIZE 1843 #endif 1844 #ifndef pmd_leaf_size 1845 #define pmd_leaf_size(x) PMD_SIZE 1846 #endif 1847 #ifndef pte_leaf_size 1848 #define pte_leaf_size(x) PAGE_SIZE 1849 #endif 1850 1851 /* 1852 * We always define pmd_pfn for all archs as it's used in lots of generic 1853 * code. Now it happens too for pud_pfn (and can happen for larger 1854 * mappings too in the future; we're not there yet). Instead of defining 1855 * it for all archs (like pmd_pfn), provide a fallback. 1856 * 1857 * Note that returning 0 here means any arch that didn't define this can 1858 * get severely wrong when it hits a real pud leaf. It's arch's 1859 * responsibility to properly define it when a huge pud is possible. 1860 */ 1861 #ifndef pud_pfn 1862 #define pud_pfn(x) 0 1863 #endif 1864 1865 /* 1866 * Some architectures have MMUs that are configurable or selectable at boot 1867 * time. These lead to variable PTRS_PER_x. For statically allocated arrays it 1868 * helps to have a static maximum value. 1869 */ 1870 1871 #ifndef MAX_PTRS_PER_PTE 1872 #define MAX_PTRS_PER_PTE PTRS_PER_PTE 1873 #endif 1874 1875 #ifndef MAX_PTRS_PER_PMD 1876 #define MAX_PTRS_PER_PMD PTRS_PER_PMD 1877 #endif 1878 1879 #ifndef MAX_PTRS_PER_PUD 1880 #define MAX_PTRS_PER_PUD PTRS_PER_PUD 1881 #endif 1882 1883 #ifndef MAX_PTRS_PER_P4D 1884 #define MAX_PTRS_PER_P4D PTRS_PER_P4D 1885 #endif 1886 1887 /* description of effects of mapping type and prot in current implementation. 1888 * this is due to the limited x86 page protection hardware. The expected 1889 * behavior is in parens: 1890 * 1891 * map_type prot 1892 * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC 1893 * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes 1894 * w: (no) no w: (no) no w: (yes) yes w: (no) no 1895 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes 1896 * 1897 * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes 1898 * w: (no) no w: (no) no w: (copy) copy w: (no) no 1899 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes 1900 * 1901 * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and 1902 * MAP_PRIVATE (with Enhanced PAN supported): 1903 * r: (no) no 1904 * w: (no) no 1905 * x: (yes) yes 1906 */ 1907 #define DECLARE_VM_GET_PAGE_PROT \ 1908 pgprot_t vm_get_page_prot(unsigned long vm_flags) \ 1909 { \ 1910 return protection_map[vm_flags & \ 1911 (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)]; \ 1912 } \ 1913 EXPORT_SYMBOL(vm_get_page_prot); 1914 1915 #endif /* _LINUX_PGTABLE_H */ 1916