1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_PGTABLE_H 3 #define _LINUX_PGTABLE_H 4 5 #include <linux/pfn.h> 6 #include <asm/pgtable.h> 7 8 #define PMD_ORDER (PMD_SHIFT - PAGE_SHIFT) 9 #define PUD_ORDER (PUD_SHIFT - PAGE_SHIFT) 10 11 #ifndef __ASSEMBLY__ 12 #ifdef CONFIG_MMU 13 14 #include <linux/mm_types.h> 15 #include <linux/bug.h> 16 #include <linux/errno.h> 17 #include <asm-generic/pgtable_uffd.h> 18 #include <linux/page_table_check.h> 19 20 #if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \ 21 defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS 22 #error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{P4D,PUD,PMD}_FOLDED 23 #endif 24 25 /* 26 * On almost all architectures and configurations, 0 can be used as the 27 * upper ceiling to free_pgtables(): on many architectures it has the same 28 * effect as using TASK_SIZE. However, there is one configuration which 29 * must impose a more careful limit, to avoid freeing kernel pgtables. 30 */ 31 #ifndef USER_PGTABLES_CEILING 32 #define USER_PGTABLES_CEILING 0UL 33 #endif 34 35 /* 36 * This defines the first usable user address. Platforms 37 * can override its value with custom FIRST_USER_ADDRESS 38 * defined in their respective <asm/pgtable.h>. 39 */ 40 #ifndef FIRST_USER_ADDRESS 41 #define FIRST_USER_ADDRESS 0UL 42 #endif 43 44 /* 45 * This defines the generic helper for accessing PMD page 46 * table page. Although platforms can still override this 47 * via their respective <asm/pgtable.h>. 48 */ 49 #ifndef pmd_pgtable 50 #define pmd_pgtable(pmd) pmd_page(pmd) 51 #endif 52 53 #define pmd_folio(pmd) page_folio(pmd_page(pmd)) 54 55 /* 56 * A page table page can be thought of an array like this: pXd_t[PTRS_PER_PxD] 57 * 58 * The pXx_index() functions return the index of the entry in the page 59 * table page which would control the given virtual address 60 * 61 * As these functions may be used by the same code for different levels of 62 * the page table folding, they are always available, regardless of 63 * CONFIG_PGTABLE_LEVELS value. For the folded levels they simply return 0 64 * because in such cases PTRS_PER_PxD equals 1. 65 */ 66 67 static inline unsigned long pte_index(unsigned long address) 68 { 69 return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); 70 } 71 72 #ifndef pmd_index 73 static inline unsigned long pmd_index(unsigned long address) 74 { 75 return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); 76 } 77 #define pmd_index pmd_index 78 #endif 79 80 #ifndef pud_index 81 static inline unsigned long pud_index(unsigned long address) 82 { 83 return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1); 84 } 85 #define pud_index pud_index 86 #endif 87 88 #ifndef pgd_index 89 /* Must be a compile-time constant, so implement it as a macro */ 90 #define pgd_index(a) (((a) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) 91 #endif 92 93 #ifndef pte_offset_kernel 94 static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address) 95 { 96 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address); 97 } 98 #define pte_offset_kernel pte_offset_kernel 99 #endif 100 101 #ifdef CONFIG_HIGHPTE 102 #define __pte_map(pmd, address) \ 103 ((pte_t *)kmap_local_page(pmd_page(*(pmd))) + pte_index((address))) 104 #define pte_unmap(pte) do { \ 105 kunmap_local((pte)); \ 106 rcu_read_unlock(); \ 107 } while (0) 108 #else 109 static inline pte_t *__pte_map(pmd_t *pmd, unsigned long address) 110 { 111 return pte_offset_kernel(pmd, address); 112 } 113 static inline void pte_unmap(pte_t *pte) 114 { 115 rcu_read_unlock(); 116 } 117 #endif 118 119 void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable); 120 121 /* Find an entry in the second-level page table.. */ 122 #ifndef pmd_offset 123 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) 124 { 125 return pud_pgtable(*pud) + pmd_index(address); 126 } 127 #define pmd_offset pmd_offset 128 #endif 129 130 #ifndef pud_offset 131 static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) 132 { 133 return p4d_pgtable(*p4d) + pud_index(address); 134 } 135 #define pud_offset pud_offset 136 #endif 137 138 static inline pgd_t *pgd_offset_pgd(pgd_t *pgd, unsigned long address) 139 { 140 return (pgd + pgd_index(address)); 141 }; 142 143 /* 144 * a shortcut to get a pgd_t in a given mm 145 */ 146 #ifndef pgd_offset 147 #define pgd_offset(mm, address) pgd_offset_pgd((mm)->pgd, (address)) 148 #endif 149 150 /* 151 * a shortcut which implies the use of the kernel's pgd, instead 152 * of a process's 153 */ 154 #define pgd_offset_k(address) pgd_offset(&init_mm, (address)) 155 156 /* 157 * In many cases it is known that a virtual address is mapped at PMD or PTE 158 * level, so instead of traversing all the page table levels, we can get a 159 * pointer to the PMD entry in user or kernel page table or translate a virtual 160 * address to the pointer in the PTE in the kernel page tables with simple 161 * helpers. 162 */ 163 static inline pmd_t *pmd_off(struct mm_struct *mm, unsigned long va) 164 { 165 return pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, va), va), va), va); 166 } 167 168 static inline pmd_t *pmd_off_k(unsigned long va) 169 { 170 return pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va); 171 } 172 173 static inline pte_t *virt_to_kpte(unsigned long vaddr) 174 { 175 pmd_t *pmd = pmd_off_k(vaddr); 176 177 return pmd_none(*pmd) ? NULL : pte_offset_kernel(pmd, vaddr); 178 } 179 180 #ifndef pmd_young 181 static inline int pmd_young(pmd_t pmd) 182 { 183 return 0; 184 } 185 #endif 186 187 #ifndef pmd_dirty 188 static inline int pmd_dirty(pmd_t pmd) 189 { 190 return 0; 191 } 192 #endif 193 194 /* 195 * A facility to provide lazy MMU batching. This allows PTE updates and 196 * page invalidations to be delayed until a call to leave lazy MMU mode 197 * is issued. Some architectures may benefit from doing this, and it is 198 * beneficial for both shadow and direct mode hypervisors, which may batch 199 * the PTE updates which happen during this window. Note that using this 200 * interface requires that read hazards be removed from the code. A read 201 * hazard could result in the direct mode hypervisor case, since the actual 202 * write to the page tables may not yet have taken place, so reads though 203 * a raw PTE pointer after it has been modified are not guaranteed to be 204 * up to date. This mode can only be entered and left under the protection of 205 * the page table locks for all page tables which may be modified. In the UP 206 * case, this is required so that preemption is disabled, and in the SMP case, 207 * it must synchronize the delayed page table writes properly on other CPUs. 208 */ 209 #ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE 210 #define arch_enter_lazy_mmu_mode() do {} while (0) 211 #define arch_leave_lazy_mmu_mode() do {} while (0) 212 #define arch_flush_lazy_mmu_mode() do {} while (0) 213 #endif 214 215 #ifndef pte_batch_hint 216 /** 217 * pte_batch_hint - Number of pages that can be added to batch without scanning. 218 * @ptep: Page table pointer for the entry. 219 * @pte: Page table entry. 220 * 221 * Some architectures know that a set of contiguous ptes all map the same 222 * contiguous memory with the same permissions. In this case, it can provide a 223 * hint to aid pte batching without the core code needing to scan every pte. 224 * 225 * An architecture implementation may ignore the PTE accessed state. Further, 226 * the dirty state must apply atomically to all the PTEs described by the hint. 227 * 228 * May be overridden by the architecture, else pte_batch_hint is always 1. 229 */ 230 static inline unsigned int pte_batch_hint(pte_t *ptep, pte_t pte) 231 { 232 return 1; 233 } 234 #endif 235 236 #ifndef pte_advance_pfn 237 static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr) 238 { 239 return __pte(pte_val(pte) + (nr << PFN_PTE_SHIFT)); 240 } 241 #endif 242 243 #define pte_next_pfn(pte) pte_advance_pfn(pte, 1) 244 245 #ifndef set_ptes 246 /** 247 * set_ptes - Map consecutive pages to a contiguous range of addresses. 248 * @mm: Address space to map the pages into. 249 * @addr: Address to map the first page at. 250 * @ptep: Page table pointer for the first entry. 251 * @pte: Page table entry for the first page. 252 * @nr: Number of pages to map. 253 * 254 * When nr==1, initial state of pte may be present or not present, and new state 255 * may be present or not present. When nr>1, initial state of all ptes must be 256 * not present, and new state must be present. 257 * 258 * May be overridden by the architecture, or the architecture can define 259 * set_pte() and PFN_PTE_SHIFT. 260 * 261 * Context: The caller holds the page table lock. The pages all belong 262 * to the same folio. The PTEs are all in the same PMD. 263 */ 264 static inline void set_ptes(struct mm_struct *mm, unsigned long addr, 265 pte_t *ptep, pte_t pte, unsigned int nr) 266 { 267 page_table_check_ptes_set(mm, ptep, pte, nr); 268 269 arch_enter_lazy_mmu_mode(); 270 for (;;) { 271 set_pte(ptep, pte); 272 if (--nr == 0) 273 break; 274 ptep++; 275 pte = pte_next_pfn(pte); 276 } 277 arch_leave_lazy_mmu_mode(); 278 } 279 #endif 280 #define set_pte_at(mm, addr, ptep, pte) set_ptes(mm, addr, ptep, pte, 1) 281 282 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 283 extern int ptep_set_access_flags(struct vm_area_struct *vma, 284 unsigned long address, pte_t *ptep, 285 pte_t entry, int dirty); 286 #endif 287 288 #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 289 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 290 extern int pmdp_set_access_flags(struct vm_area_struct *vma, 291 unsigned long address, pmd_t *pmdp, 292 pmd_t entry, int dirty); 293 extern int pudp_set_access_flags(struct vm_area_struct *vma, 294 unsigned long address, pud_t *pudp, 295 pud_t entry, int dirty); 296 #else 297 static inline int pmdp_set_access_flags(struct vm_area_struct *vma, 298 unsigned long address, pmd_t *pmdp, 299 pmd_t entry, int dirty) 300 { 301 BUILD_BUG(); 302 return 0; 303 } 304 static inline int pudp_set_access_flags(struct vm_area_struct *vma, 305 unsigned long address, pud_t *pudp, 306 pud_t entry, int dirty) 307 { 308 BUILD_BUG(); 309 return 0; 310 } 311 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 312 #endif 313 314 #ifndef ptep_get 315 static inline pte_t ptep_get(pte_t *ptep) 316 { 317 return READ_ONCE(*ptep); 318 } 319 #endif 320 321 #ifndef pmdp_get 322 static inline pmd_t pmdp_get(pmd_t *pmdp) 323 { 324 return READ_ONCE(*pmdp); 325 } 326 #endif 327 328 #ifndef pudp_get 329 static inline pud_t pudp_get(pud_t *pudp) 330 { 331 return READ_ONCE(*pudp); 332 } 333 #endif 334 335 #ifndef p4dp_get 336 static inline p4d_t p4dp_get(p4d_t *p4dp) 337 { 338 return READ_ONCE(*p4dp); 339 } 340 #endif 341 342 #ifndef pgdp_get 343 static inline pgd_t pgdp_get(pgd_t *pgdp) 344 { 345 return READ_ONCE(*pgdp); 346 } 347 #endif 348 349 #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 350 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, 351 unsigned long address, 352 pte_t *ptep) 353 { 354 pte_t pte = ptep_get(ptep); 355 int r = 1; 356 if (!pte_young(pte)) 357 r = 0; 358 else 359 set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte)); 360 return r; 361 } 362 #endif 363 364 #ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG 365 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG) 366 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, 367 unsigned long address, 368 pmd_t *pmdp) 369 { 370 pmd_t pmd = *pmdp; 371 int r = 1; 372 if (!pmd_young(pmd)) 373 r = 0; 374 else 375 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd)); 376 return r; 377 } 378 #else 379 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, 380 unsigned long address, 381 pmd_t *pmdp) 382 { 383 BUILD_BUG(); 384 return 0; 385 } 386 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG */ 387 #endif 388 389 #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 390 int ptep_clear_flush_young(struct vm_area_struct *vma, 391 unsigned long address, pte_t *ptep); 392 #endif 393 394 #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH 395 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 396 extern int pmdp_clear_flush_young(struct vm_area_struct *vma, 397 unsigned long address, pmd_t *pmdp); 398 #else 399 /* 400 * Despite relevant to THP only, this API is called from generic rmap code 401 * under PageTransHuge(), hence needs a dummy implementation for !THP 402 */ 403 static inline int pmdp_clear_flush_young(struct vm_area_struct *vma, 404 unsigned long address, pmd_t *pmdp) 405 { 406 BUILD_BUG(); 407 return 0; 408 } 409 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 410 #endif 411 412 #ifndef arch_has_hw_nonleaf_pmd_young 413 /* 414 * Return whether the accessed bit in non-leaf PMD entries is supported on the 415 * local CPU. 416 */ 417 static inline bool arch_has_hw_nonleaf_pmd_young(void) 418 { 419 return IS_ENABLED(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG); 420 } 421 #endif 422 423 #ifndef arch_has_hw_pte_young 424 /* 425 * Return whether the accessed bit is supported on the local CPU. 426 * 427 * This stub assumes accessing through an old PTE triggers a page fault. 428 * Architectures that automatically set the access bit should overwrite it. 429 */ 430 static inline bool arch_has_hw_pte_young(void) 431 { 432 return IS_ENABLED(CONFIG_ARCH_HAS_HW_PTE_YOUNG); 433 } 434 #endif 435 436 #ifndef arch_check_zapped_pte 437 static inline void arch_check_zapped_pte(struct vm_area_struct *vma, 438 pte_t pte) 439 { 440 } 441 #endif 442 443 #ifndef arch_check_zapped_pmd 444 static inline void arch_check_zapped_pmd(struct vm_area_struct *vma, 445 pmd_t pmd) 446 { 447 } 448 #endif 449 450 #ifndef arch_check_zapped_pud 451 static inline void arch_check_zapped_pud(struct vm_area_struct *vma, pud_t pud) 452 { 453 } 454 #endif 455 456 #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR 457 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 458 unsigned long address, 459 pte_t *ptep) 460 { 461 pte_t pte = ptep_get(ptep); 462 pte_clear(mm, address, ptep); 463 page_table_check_pte_clear(mm, pte); 464 return pte; 465 } 466 #endif 467 468 #ifndef clear_young_dirty_ptes 469 /** 470 * clear_young_dirty_ptes - Mark PTEs that map consecutive pages of the 471 * same folio as old/clean. 472 * @mm: Address space the pages are mapped into. 473 * @addr: Address the first page is mapped at. 474 * @ptep: Page table pointer for the first entry. 475 * @nr: Number of entries to mark old/clean. 476 * @flags: Flags to modify the PTE batch semantics. 477 * 478 * May be overridden by the architecture; otherwise, implemented by 479 * get_and_clear/modify/set for each pte in the range. 480 * 481 * Note that PTE bits in the PTE range besides the PFN can differ. For example, 482 * some PTEs might be write-protected. 483 * 484 * Context: The caller holds the page table lock. The PTEs map consecutive 485 * pages that belong to the same folio. The PTEs are all in the same PMD. 486 */ 487 static inline void clear_young_dirty_ptes(struct vm_area_struct *vma, 488 unsigned long addr, pte_t *ptep, 489 unsigned int nr, cydp_t flags) 490 { 491 pte_t pte; 492 493 for (;;) { 494 if (flags == CYDP_CLEAR_YOUNG) 495 ptep_test_and_clear_young(vma, addr, ptep); 496 else { 497 pte = ptep_get_and_clear(vma->vm_mm, addr, ptep); 498 if (flags & CYDP_CLEAR_YOUNG) 499 pte = pte_mkold(pte); 500 if (flags & CYDP_CLEAR_DIRTY) 501 pte = pte_mkclean(pte); 502 set_pte_at(vma->vm_mm, addr, ptep, pte); 503 } 504 if (--nr == 0) 505 break; 506 ptep++; 507 addr += PAGE_SIZE; 508 } 509 } 510 #endif 511 512 static inline void ptep_clear(struct mm_struct *mm, unsigned long addr, 513 pte_t *ptep) 514 { 515 ptep_get_and_clear(mm, addr, ptep); 516 } 517 518 #ifdef CONFIG_GUP_GET_PXX_LOW_HIGH 519 /* 520 * For walking the pagetables without holding any locks. Some architectures 521 * (eg x86-32 PAE) cannot load the entries atomically without using expensive 522 * instructions. We are guaranteed that a PTE will only either go from not 523 * present to present, or present to not present -- it will not switch to a 524 * completely different present page without a TLB flush inbetween; which we 525 * are blocking by holding interrupts off. 526 * 527 * Setting ptes from not present to present goes: 528 * 529 * ptep->pte_high = h; 530 * smp_wmb(); 531 * ptep->pte_low = l; 532 * 533 * And present to not present goes: 534 * 535 * ptep->pte_low = 0; 536 * smp_wmb(); 537 * ptep->pte_high = 0; 538 * 539 * We must ensure here that the load of pte_low sees 'l' IFF pte_high sees 'h'. 540 * We load pte_high *after* loading pte_low, which ensures we don't see an older 541 * value of pte_high. *Then* we recheck pte_low, which ensures that we haven't 542 * picked up a changed pte high. We might have gotten rubbish values from 543 * pte_low and pte_high, but we are guaranteed that pte_low will not have the 544 * present bit set *unless* it is 'l'. Because get_user_pages_fast() only 545 * operates on present ptes we're safe. 546 */ 547 static inline pte_t ptep_get_lockless(pte_t *ptep) 548 { 549 pte_t pte; 550 551 do { 552 pte.pte_low = ptep->pte_low; 553 smp_rmb(); 554 pte.pte_high = ptep->pte_high; 555 smp_rmb(); 556 } while (unlikely(pte.pte_low != ptep->pte_low)); 557 558 return pte; 559 } 560 #define ptep_get_lockless ptep_get_lockless 561 562 #if CONFIG_PGTABLE_LEVELS > 2 563 static inline pmd_t pmdp_get_lockless(pmd_t *pmdp) 564 { 565 pmd_t pmd; 566 567 do { 568 pmd.pmd_low = pmdp->pmd_low; 569 smp_rmb(); 570 pmd.pmd_high = pmdp->pmd_high; 571 smp_rmb(); 572 } while (unlikely(pmd.pmd_low != pmdp->pmd_low)); 573 574 return pmd; 575 } 576 #define pmdp_get_lockless pmdp_get_lockless 577 #define pmdp_get_lockless_sync() tlb_remove_table_sync_one() 578 #endif /* CONFIG_PGTABLE_LEVELS > 2 */ 579 #endif /* CONFIG_GUP_GET_PXX_LOW_HIGH */ 580 581 /* 582 * We require that the PTE can be read atomically. 583 */ 584 #ifndef ptep_get_lockless 585 static inline pte_t ptep_get_lockless(pte_t *ptep) 586 { 587 return ptep_get(ptep); 588 } 589 #endif 590 591 #ifndef pmdp_get_lockless 592 static inline pmd_t pmdp_get_lockless(pmd_t *pmdp) 593 { 594 return pmdp_get(pmdp); 595 } 596 static inline void pmdp_get_lockless_sync(void) 597 { 598 } 599 #endif 600 601 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 602 #ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR 603 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, 604 unsigned long address, 605 pmd_t *pmdp) 606 { 607 pmd_t pmd = *pmdp; 608 609 pmd_clear(pmdp); 610 page_table_check_pmd_clear(mm, pmd); 611 612 return pmd; 613 } 614 #endif /* __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR */ 615 #ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR 616 static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm, 617 unsigned long address, 618 pud_t *pudp) 619 { 620 pud_t pud = *pudp; 621 622 pud_clear(pudp); 623 page_table_check_pud_clear(mm, pud); 624 625 return pud; 626 } 627 #endif /* __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR */ 628 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 629 630 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 631 #ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL 632 static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma, 633 unsigned long address, pmd_t *pmdp, 634 int full) 635 { 636 return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); 637 } 638 #endif 639 640 #ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL 641 static inline pud_t pudp_huge_get_and_clear_full(struct vm_area_struct *vma, 642 unsigned long address, pud_t *pudp, 643 int full) 644 { 645 return pudp_huge_get_and_clear(vma->vm_mm, address, pudp); 646 } 647 #endif 648 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 649 650 #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL 651 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, 652 unsigned long address, pte_t *ptep, 653 int full) 654 { 655 return ptep_get_and_clear(mm, address, ptep); 656 } 657 #endif 658 659 #ifndef get_and_clear_full_ptes 660 /** 661 * get_and_clear_full_ptes - Clear present PTEs that map consecutive pages of 662 * the same folio, collecting dirty/accessed bits. 663 * @mm: Address space the pages are mapped into. 664 * @addr: Address the first page is mapped at. 665 * @ptep: Page table pointer for the first entry. 666 * @nr: Number of entries to clear. 667 * @full: Whether we are clearing a full mm. 668 * 669 * May be overridden by the architecture; otherwise, implemented as a simple 670 * loop over ptep_get_and_clear_full(), merging dirty/accessed bits into the 671 * returned PTE. 672 * 673 * Note that PTE bits in the PTE range besides the PFN can differ. For example, 674 * some PTEs might be write-protected. 675 * 676 * Context: The caller holds the page table lock. The PTEs map consecutive 677 * pages that belong to the same folio. The PTEs are all in the same PMD. 678 */ 679 static inline pte_t get_and_clear_full_ptes(struct mm_struct *mm, 680 unsigned long addr, pte_t *ptep, unsigned int nr, int full) 681 { 682 pte_t pte, tmp_pte; 683 684 pte = ptep_get_and_clear_full(mm, addr, ptep, full); 685 while (--nr) { 686 ptep++; 687 addr += PAGE_SIZE; 688 tmp_pte = ptep_get_and_clear_full(mm, addr, ptep, full); 689 if (pte_dirty(tmp_pte)) 690 pte = pte_mkdirty(pte); 691 if (pte_young(tmp_pte)) 692 pte = pte_mkyoung(pte); 693 } 694 return pte; 695 } 696 #endif 697 698 #ifndef clear_full_ptes 699 /** 700 * clear_full_ptes - Clear present PTEs that map consecutive pages of the same 701 * folio. 702 * @mm: Address space the pages are mapped into. 703 * @addr: Address the first page is mapped at. 704 * @ptep: Page table pointer for the first entry. 705 * @nr: Number of entries to clear. 706 * @full: Whether we are clearing a full mm. 707 * 708 * May be overridden by the architecture; otherwise, implemented as a simple 709 * loop over ptep_get_and_clear_full(). 710 * 711 * Note that PTE bits in the PTE range besides the PFN can differ. For example, 712 * some PTEs might be write-protected. 713 * 714 * Context: The caller holds the page table lock. The PTEs map consecutive 715 * pages that belong to the same folio. The PTEs are all in the same PMD. 716 */ 717 static inline void clear_full_ptes(struct mm_struct *mm, unsigned long addr, 718 pte_t *ptep, unsigned int nr, int full) 719 { 720 for (;;) { 721 ptep_get_and_clear_full(mm, addr, ptep, full); 722 if (--nr == 0) 723 break; 724 ptep++; 725 addr += PAGE_SIZE; 726 } 727 } 728 #endif 729 730 /* 731 * If two threads concurrently fault at the same page, the thread that 732 * won the race updates the PTE and its local TLB/Cache. The other thread 733 * gives up, simply does nothing, and continues; on architectures where 734 * software can update TLB, local TLB can be updated here to avoid next page 735 * fault. This function updates TLB only, do nothing with cache or others. 736 * It is the difference with function update_mmu_cache. 737 */ 738 #ifndef update_mmu_tlb_range 739 static inline void update_mmu_tlb_range(struct vm_area_struct *vma, 740 unsigned long address, pte_t *ptep, unsigned int nr) 741 { 742 } 743 #endif 744 745 static inline void update_mmu_tlb(struct vm_area_struct *vma, 746 unsigned long address, pte_t *ptep) 747 { 748 update_mmu_tlb_range(vma, address, ptep, 1); 749 } 750 751 /* 752 * Some architectures may be able to avoid expensive synchronization 753 * primitives when modifications are made to PTE's which are already 754 * not present, or in the process of an address space destruction. 755 */ 756 #ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL 757 static inline void pte_clear_not_present_full(struct mm_struct *mm, 758 unsigned long address, 759 pte_t *ptep, 760 int full) 761 { 762 pte_clear(mm, address, ptep); 763 } 764 #endif 765 766 #ifndef clear_not_present_full_ptes 767 /** 768 * clear_not_present_full_ptes - Clear multiple not present PTEs which are 769 * consecutive in the pgtable. 770 * @mm: Address space the ptes represent. 771 * @addr: Address of the first pte. 772 * @ptep: Page table pointer for the first entry. 773 * @nr: Number of entries to clear. 774 * @full: Whether we are clearing a full mm. 775 * 776 * May be overridden by the architecture; otherwise, implemented as a simple 777 * loop over pte_clear_not_present_full(). 778 * 779 * Context: The caller holds the page table lock. The PTEs are all not present. 780 * The PTEs are all in the same PMD. 781 */ 782 static inline void clear_not_present_full_ptes(struct mm_struct *mm, 783 unsigned long addr, pte_t *ptep, unsigned int nr, int full) 784 { 785 for (;;) { 786 pte_clear_not_present_full(mm, addr, ptep, full); 787 if (--nr == 0) 788 break; 789 ptep++; 790 addr += PAGE_SIZE; 791 } 792 } 793 #endif 794 795 #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH 796 extern pte_t ptep_clear_flush(struct vm_area_struct *vma, 797 unsigned long address, 798 pte_t *ptep); 799 #endif 800 801 #ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH 802 extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, 803 unsigned long address, 804 pmd_t *pmdp); 805 extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, 806 unsigned long address, 807 pud_t *pudp); 808 #endif 809 810 #ifndef pte_mkwrite 811 static inline pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma) 812 { 813 return pte_mkwrite_novma(pte); 814 } 815 #endif 816 817 #if defined(CONFIG_ARCH_WANT_PMD_MKWRITE) && !defined(pmd_mkwrite) 818 static inline pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) 819 { 820 return pmd_mkwrite_novma(pmd); 821 } 822 #endif 823 824 #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT 825 struct mm_struct; 826 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) 827 { 828 pte_t old_pte = ptep_get(ptep); 829 set_pte_at(mm, address, ptep, pte_wrprotect(old_pte)); 830 } 831 #endif 832 833 #ifndef wrprotect_ptes 834 /** 835 * wrprotect_ptes - Write-protect PTEs that map consecutive pages of the same 836 * folio. 837 * @mm: Address space the pages are mapped into. 838 * @addr: Address the first page is mapped at. 839 * @ptep: Page table pointer for the first entry. 840 * @nr: Number of entries to write-protect. 841 * 842 * May be overridden by the architecture; otherwise, implemented as a simple 843 * loop over ptep_set_wrprotect(). 844 * 845 * Note that PTE bits in the PTE range besides the PFN can differ. For example, 846 * some PTEs might be write-protected. 847 * 848 * Context: The caller holds the page table lock. The PTEs map consecutive 849 * pages that belong to the same folio. The PTEs are all in the same PMD. 850 */ 851 static inline void wrprotect_ptes(struct mm_struct *mm, unsigned long addr, 852 pte_t *ptep, unsigned int nr) 853 { 854 for (;;) { 855 ptep_set_wrprotect(mm, addr, ptep); 856 if (--nr == 0) 857 break; 858 ptep++; 859 addr += PAGE_SIZE; 860 } 861 } 862 #endif 863 864 /* 865 * On some architectures hardware does not set page access bit when accessing 866 * memory page, it is responsibility of software setting this bit. It brings 867 * out extra page fault penalty to track page access bit. For optimization page 868 * access bit can be set during all page fault flow on these arches. 869 * To be differentiate with macro pte_mkyoung, this macro is used on platforms 870 * where software maintains page access bit. 871 */ 872 #ifndef pte_sw_mkyoung 873 static inline pte_t pte_sw_mkyoung(pte_t pte) 874 { 875 return pte; 876 } 877 #define pte_sw_mkyoung pte_sw_mkyoung 878 #endif 879 880 #ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT 881 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 882 static inline void pmdp_set_wrprotect(struct mm_struct *mm, 883 unsigned long address, pmd_t *pmdp) 884 { 885 pmd_t old_pmd = *pmdp; 886 set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd)); 887 } 888 #else 889 static inline void pmdp_set_wrprotect(struct mm_struct *mm, 890 unsigned long address, pmd_t *pmdp) 891 { 892 BUILD_BUG(); 893 } 894 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 895 #endif 896 #ifndef __HAVE_ARCH_PUDP_SET_WRPROTECT 897 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 898 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 899 static inline void pudp_set_wrprotect(struct mm_struct *mm, 900 unsigned long address, pud_t *pudp) 901 { 902 pud_t old_pud = *pudp; 903 904 set_pud_at(mm, address, pudp, pud_wrprotect(old_pud)); 905 } 906 #else 907 static inline void pudp_set_wrprotect(struct mm_struct *mm, 908 unsigned long address, pud_t *pudp) 909 { 910 BUILD_BUG(); 911 } 912 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 913 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 914 #endif 915 916 #ifndef pmdp_collapse_flush 917 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 918 extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, 919 unsigned long address, pmd_t *pmdp); 920 #else 921 static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, 922 unsigned long address, 923 pmd_t *pmdp) 924 { 925 BUILD_BUG(); 926 return *pmdp; 927 } 928 #define pmdp_collapse_flush pmdp_collapse_flush 929 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 930 #endif 931 932 #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT 933 extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 934 pgtable_t pgtable); 935 #endif 936 937 #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW 938 extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); 939 #endif 940 941 #ifndef arch_needs_pgtable_deposit 942 #define arch_needs_pgtable_deposit() (false) 943 #endif 944 945 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 946 /* 947 * This is an implementation of pmdp_establish() that is only suitable for an 948 * architecture that doesn't have hardware dirty/accessed bits. In this case we 949 * can't race with CPU which sets these bits and non-atomic approach is fine. 950 */ 951 static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma, 952 unsigned long address, pmd_t *pmdp, pmd_t pmd) 953 { 954 pmd_t old_pmd = *pmdp; 955 set_pmd_at(vma->vm_mm, address, pmdp, pmd); 956 return old_pmd; 957 } 958 #endif 959 960 #ifndef __HAVE_ARCH_PMDP_INVALIDATE 961 extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, 962 pmd_t *pmdp); 963 #endif 964 965 #ifndef __HAVE_ARCH_PMDP_INVALIDATE_AD 966 967 /* 968 * pmdp_invalidate_ad() invalidates the PMD while changing a transparent 969 * hugepage mapping in the page tables. This function is similar to 970 * pmdp_invalidate(), but should only be used if the access and dirty bits would 971 * not be cleared by the software in the new PMD value. The function ensures 972 * that hardware changes of the access and dirty bits updates would not be lost. 973 * 974 * Doing so can allow in certain architectures to avoid a TLB flush in most 975 * cases. Yet, another TLB flush might be necessary later if the PMD update 976 * itself requires such flush (e.g., if protection was set to be stricter). Yet, 977 * even when a TLB flush is needed because of the update, the caller may be able 978 * to batch these TLB flushing operations, so fewer TLB flush operations are 979 * needed. 980 */ 981 extern pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, 982 unsigned long address, pmd_t *pmdp); 983 #endif 984 985 #ifndef __HAVE_ARCH_PTE_SAME 986 static inline int pte_same(pte_t pte_a, pte_t pte_b) 987 { 988 return pte_val(pte_a) == pte_val(pte_b); 989 } 990 #endif 991 992 #ifndef __HAVE_ARCH_PTE_UNUSED 993 /* 994 * Some architectures provide facilities to virtualization guests 995 * so that they can flag allocated pages as unused. This allows the 996 * host to transparently reclaim unused pages. This function returns 997 * whether the pte's page is unused. 998 */ 999 static inline int pte_unused(pte_t pte) 1000 { 1001 return 0; 1002 } 1003 #endif 1004 1005 #ifndef pte_access_permitted 1006 #define pte_access_permitted(pte, write) \ 1007 (pte_present(pte) && (!(write) || pte_write(pte))) 1008 #endif 1009 1010 #ifndef pmd_access_permitted 1011 #define pmd_access_permitted(pmd, write) \ 1012 (pmd_present(pmd) && (!(write) || pmd_write(pmd))) 1013 #endif 1014 1015 #ifndef pud_access_permitted 1016 #define pud_access_permitted(pud, write) \ 1017 (pud_present(pud) && (!(write) || pud_write(pud))) 1018 #endif 1019 1020 #ifndef p4d_access_permitted 1021 #define p4d_access_permitted(p4d, write) \ 1022 (p4d_present(p4d) && (!(write) || p4d_write(p4d))) 1023 #endif 1024 1025 #ifndef pgd_access_permitted 1026 #define pgd_access_permitted(pgd, write) \ 1027 (pgd_present(pgd) && (!(write) || pgd_write(pgd))) 1028 #endif 1029 1030 #ifndef __HAVE_ARCH_PMD_SAME 1031 static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) 1032 { 1033 return pmd_val(pmd_a) == pmd_val(pmd_b); 1034 } 1035 #endif 1036 1037 #ifndef pud_same 1038 static inline int pud_same(pud_t pud_a, pud_t pud_b) 1039 { 1040 return pud_val(pud_a) == pud_val(pud_b); 1041 } 1042 #define pud_same pud_same 1043 #endif 1044 1045 #ifndef __HAVE_ARCH_P4D_SAME 1046 static inline int p4d_same(p4d_t p4d_a, p4d_t p4d_b) 1047 { 1048 return p4d_val(p4d_a) == p4d_val(p4d_b); 1049 } 1050 #endif 1051 1052 #ifndef __HAVE_ARCH_PGD_SAME 1053 static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b) 1054 { 1055 return pgd_val(pgd_a) == pgd_val(pgd_b); 1056 } 1057 #endif 1058 1059 #ifndef __HAVE_ARCH_DO_SWAP_PAGE 1060 static inline void arch_do_swap_page_nr(struct mm_struct *mm, 1061 struct vm_area_struct *vma, 1062 unsigned long addr, 1063 pte_t pte, pte_t oldpte, 1064 int nr) 1065 { 1066 1067 } 1068 #else 1069 /* 1070 * Some architectures support metadata associated with a page. When a 1071 * page is being swapped out, this metadata must be saved so it can be 1072 * restored when the page is swapped back in. SPARC M7 and newer 1073 * processors support an ADI (Application Data Integrity) tag for the 1074 * page as metadata for the page. arch_do_swap_page() can restore this 1075 * metadata when a page is swapped back in. 1076 */ 1077 static inline void arch_do_swap_page_nr(struct mm_struct *mm, 1078 struct vm_area_struct *vma, 1079 unsigned long addr, 1080 pte_t pte, pte_t oldpte, 1081 int nr) 1082 { 1083 for (int i = 0; i < nr; i++) { 1084 arch_do_swap_page(vma->vm_mm, vma, addr + i * PAGE_SIZE, 1085 pte_advance_pfn(pte, i), 1086 pte_advance_pfn(oldpte, i)); 1087 } 1088 } 1089 #endif 1090 1091 #ifndef __HAVE_ARCH_UNMAP_ONE 1092 /* 1093 * Some architectures support metadata associated with a page. When a 1094 * page is being swapped out, this metadata must be saved so it can be 1095 * restored when the page is swapped back in. SPARC M7 and newer 1096 * processors support an ADI (Application Data Integrity) tag for the 1097 * page as metadata for the page. arch_unmap_one() can save this 1098 * metadata on a swap-out of a page. 1099 */ 1100 static inline int arch_unmap_one(struct mm_struct *mm, 1101 struct vm_area_struct *vma, 1102 unsigned long addr, 1103 pte_t orig_pte) 1104 { 1105 return 0; 1106 } 1107 #endif 1108 1109 /* 1110 * Allow architectures to preserve additional metadata associated with 1111 * swapped-out pages. The corresponding __HAVE_ARCH_SWAP_* macros and function 1112 * prototypes must be defined in the arch-specific asm/pgtable.h file. 1113 */ 1114 #ifndef __HAVE_ARCH_PREPARE_TO_SWAP 1115 static inline int arch_prepare_to_swap(struct folio *folio) 1116 { 1117 return 0; 1118 } 1119 #endif 1120 1121 #ifndef __HAVE_ARCH_SWAP_INVALIDATE 1122 static inline void arch_swap_invalidate_page(int type, pgoff_t offset) 1123 { 1124 } 1125 1126 static inline void arch_swap_invalidate_area(int type) 1127 { 1128 } 1129 #endif 1130 1131 #ifndef __HAVE_ARCH_SWAP_RESTORE 1132 static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio) 1133 { 1134 } 1135 #endif 1136 1137 #ifndef __HAVE_ARCH_PGD_OFFSET_GATE 1138 #define pgd_offset_gate(mm, addr) pgd_offset(mm, addr) 1139 #endif 1140 1141 #ifndef __HAVE_ARCH_MOVE_PTE 1142 #define move_pte(pte, old_addr, new_addr) (pte) 1143 #endif 1144 1145 #ifndef pte_accessible 1146 # define pte_accessible(mm, pte) ((void)(pte), 1) 1147 #endif 1148 1149 #ifndef flush_tlb_fix_spurious_fault 1150 #define flush_tlb_fix_spurious_fault(vma, address, ptep) flush_tlb_page(vma, address) 1151 #endif 1152 1153 /* 1154 * When walking page tables, get the address of the next boundary, 1155 * or the end address of the range if that comes earlier. Although no 1156 * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout. 1157 */ 1158 1159 #define pgd_addr_end(addr, end) \ 1160 ({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \ 1161 (__boundary - 1 < (end) - 1)? __boundary: (end); \ 1162 }) 1163 1164 #ifndef p4d_addr_end 1165 #define p4d_addr_end(addr, end) \ 1166 ({ unsigned long __boundary = ((addr) + P4D_SIZE) & P4D_MASK; \ 1167 (__boundary - 1 < (end) - 1)? __boundary: (end); \ 1168 }) 1169 #endif 1170 1171 #ifndef pud_addr_end 1172 #define pud_addr_end(addr, end) \ 1173 ({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \ 1174 (__boundary - 1 < (end) - 1)? __boundary: (end); \ 1175 }) 1176 #endif 1177 1178 #ifndef pmd_addr_end 1179 #define pmd_addr_end(addr, end) \ 1180 ({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \ 1181 (__boundary - 1 < (end) - 1)? __boundary: (end); \ 1182 }) 1183 #endif 1184 1185 /* 1186 * When walking page tables, we usually want to skip any p?d_none entries; 1187 * and any p?d_bad entries - reporting the error before resetting to none. 1188 * Do the tests inline, but report and clear the bad entry in mm/memory.c. 1189 */ 1190 void pgd_clear_bad(pgd_t *); 1191 1192 #ifndef __PAGETABLE_P4D_FOLDED 1193 void p4d_clear_bad(p4d_t *); 1194 #else 1195 #define p4d_clear_bad(p4d) do { } while (0) 1196 #endif 1197 1198 #ifndef __PAGETABLE_PUD_FOLDED 1199 void pud_clear_bad(pud_t *); 1200 #else 1201 #define pud_clear_bad(p4d) do { } while (0) 1202 #endif 1203 1204 void pmd_clear_bad(pmd_t *); 1205 1206 static inline int pgd_none_or_clear_bad(pgd_t *pgd) 1207 { 1208 if (pgd_none(*pgd)) 1209 return 1; 1210 if (unlikely(pgd_bad(*pgd))) { 1211 pgd_clear_bad(pgd); 1212 return 1; 1213 } 1214 return 0; 1215 } 1216 1217 static inline int p4d_none_or_clear_bad(p4d_t *p4d) 1218 { 1219 if (p4d_none(*p4d)) 1220 return 1; 1221 if (unlikely(p4d_bad(*p4d))) { 1222 p4d_clear_bad(p4d); 1223 return 1; 1224 } 1225 return 0; 1226 } 1227 1228 static inline int pud_none_or_clear_bad(pud_t *pud) 1229 { 1230 if (pud_none(*pud)) 1231 return 1; 1232 if (unlikely(pud_bad(*pud))) { 1233 pud_clear_bad(pud); 1234 return 1; 1235 } 1236 return 0; 1237 } 1238 1239 static inline int pmd_none_or_clear_bad(pmd_t *pmd) 1240 { 1241 if (pmd_none(*pmd)) 1242 return 1; 1243 if (unlikely(pmd_bad(*pmd))) { 1244 pmd_clear_bad(pmd); 1245 return 1; 1246 } 1247 return 0; 1248 } 1249 1250 static inline pte_t __ptep_modify_prot_start(struct vm_area_struct *vma, 1251 unsigned long addr, 1252 pte_t *ptep) 1253 { 1254 /* 1255 * Get the current pte state, but zero it out to make it 1256 * non-present, preventing the hardware from asynchronously 1257 * updating it. 1258 */ 1259 return ptep_get_and_clear(vma->vm_mm, addr, ptep); 1260 } 1261 1262 static inline void __ptep_modify_prot_commit(struct vm_area_struct *vma, 1263 unsigned long addr, 1264 pte_t *ptep, pte_t pte) 1265 { 1266 /* 1267 * The pte is non-present, so there's no hardware state to 1268 * preserve. 1269 */ 1270 set_pte_at(vma->vm_mm, addr, ptep, pte); 1271 } 1272 1273 #ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION 1274 /* 1275 * Start a pte protection read-modify-write transaction, which 1276 * protects against asynchronous hardware modifications to the pte. 1277 * The intention is not to prevent the hardware from making pte 1278 * updates, but to prevent any updates it may make from being lost. 1279 * 1280 * This does not protect against other software modifications of the 1281 * pte; the appropriate pte lock must be held over the transaction. 1282 * 1283 * Note that this interface is intended to be batchable, meaning that 1284 * ptep_modify_prot_commit may not actually update the pte, but merely 1285 * queue the update to be done at some later time. The update must be 1286 * actually committed before the pte lock is released, however. 1287 */ 1288 static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, 1289 unsigned long addr, 1290 pte_t *ptep) 1291 { 1292 return __ptep_modify_prot_start(vma, addr, ptep); 1293 } 1294 1295 /* 1296 * Commit an update to a pte, leaving any hardware-controlled bits in 1297 * the PTE unmodified. 1298 */ 1299 static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, 1300 unsigned long addr, 1301 pte_t *ptep, pte_t old_pte, pte_t pte) 1302 { 1303 __ptep_modify_prot_commit(vma, addr, ptep, pte); 1304 } 1305 #endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */ 1306 #endif /* CONFIG_MMU */ 1307 1308 /* 1309 * No-op macros that just return the current protection value. Defined here 1310 * because these macros can be used even if CONFIG_MMU is not defined. 1311 */ 1312 1313 #ifndef pgprot_nx 1314 #define pgprot_nx(prot) (prot) 1315 #endif 1316 1317 #ifndef pgprot_noncached 1318 #define pgprot_noncached(prot) (prot) 1319 #endif 1320 1321 #ifndef pgprot_writecombine 1322 #define pgprot_writecombine pgprot_noncached 1323 #endif 1324 1325 #ifndef pgprot_writethrough 1326 #define pgprot_writethrough pgprot_noncached 1327 #endif 1328 1329 #ifndef pgprot_device 1330 #define pgprot_device pgprot_noncached 1331 #endif 1332 1333 #ifndef pgprot_mhp 1334 #define pgprot_mhp(prot) (prot) 1335 #endif 1336 1337 #ifdef CONFIG_MMU 1338 #ifndef pgprot_modify 1339 #define pgprot_modify pgprot_modify 1340 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) 1341 { 1342 if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot))) 1343 newprot = pgprot_noncached(newprot); 1344 if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot))) 1345 newprot = pgprot_writecombine(newprot); 1346 if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot))) 1347 newprot = pgprot_device(newprot); 1348 return newprot; 1349 } 1350 #endif 1351 #endif /* CONFIG_MMU */ 1352 1353 #ifndef pgprot_encrypted 1354 #define pgprot_encrypted(prot) (prot) 1355 #endif 1356 1357 #ifndef pgprot_decrypted 1358 #define pgprot_decrypted(prot) (prot) 1359 #endif 1360 1361 /* 1362 * A facility to provide batching of the reload of page tables and 1363 * other process state with the actual context switch code for 1364 * paravirtualized guests. By convention, only one of the batched 1365 * update (lazy) modes (CPU, MMU) should be active at any given time, 1366 * entry should never be nested, and entry and exits should always be 1367 * paired. This is for sanity of maintaining and reasoning about the 1368 * kernel code. In this case, the exit (end of the context switch) is 1369 * in architecture-specific code, and so doesn't need a generic 1370 * definition. 1371 */ 1372 #ifndef __HAVE_ARCH_START_CONTEXT_SWITCH 1373 #define arch_start_context_switch(prev) do {} while (0) 1374 #endif 1375 1376 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY 1377 #ifndef CONFIG_ARCH_ENABLE_THP_MIGRATION 1378 static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd) 1379 { 1380 return pmd; 1381 } 1382 1383 static inline int pmd_swp_soft_dirty(pmd_t pmd) 1384 { 1385 return 0; 1386 } 1387 1388 static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd) 1389 { 1390 return pmd; 1391 } 1392 #endif 1393 #else /* !CONFIG_HAVE_ARCH_SOFT_DIRTY */ 1394 static inline int pte_soft_dirty(pte_t pte) 1395 { 1396 return 0; 1397 } 1398 1399 static inline int pmd_soft_dirty(pmd_t pmd) 1400 { 1401 return 0; 1402 } 1403 1404 static inline pte_t pte_mksoft_dirty(pte_t pte) 1405 { 1406 return pte; 1407 } 1408 1409 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) 1410 { 1411 return pmd; 1412 } 1413 1414 static inline pte_t pte_clear_soft_dirty(pte_t pte) 1415 { 1416 return pte; 1417 } 1418 1419 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) 1420 { 1421 return pmd; 1422 } 1423 1424 static inline pte_t pte_swp_mksoft_dirty(pte_t pte) 1425 { 1426 return pte; 1427 } 1428 1429 static inline int pte_swp_soft_dirty(pte_t pte) 1430 { 1431 return 0; 1432 } 1433 1434 static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) 1435 { 1436 return pte; 1437 } 1438 1439 static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd) 1440 { 1441 return pmd; 1442 } 1443 1444 static inline int pmd_swp_soft_dirty(pmd_t pmd) 1445 { 1446 return 0; 1447 } 1448 1449 static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd) 1450 { 1451 return pmd; 1452 } 1453 #endif 1454 1455 #ifndef __HAVE_PFNMAP_TRACKING 1456 /* 1457 * Interfaces that can be used by architecture code to keep track of 1458 * memory type of pfn mappings specified by the remap_pfn_range, 1459 * vmf_insert_pfn. 1460 */ 1461 1462 /* 1463 * track_pfn_remap is called when a _new_ pfn mapping is being established 1464 * by remap_pfn_range() for physical range indicated by pfn and size. 1465 */ 1466 static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, 1467 unsigned long pfn, unsigned long addr, 1468 unsigned long size) 1469 { 1470 return 0; 1471 } 1472 1473 /* 1474 * track_pfn_insert is called when a _new_ single pfn is established 1475 * by vmf_insert_pfn(). 1476 */ 1477 static inline void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, 1478 pfn_t pfn) 1479 { 1480 } 1481 1482 /* 1483 * track_pfn_copy is called when vma that is covering the pfnmap gets 1484 * copied through copy_page_range(). 1485 */ 1486 static inline int track_pfn_copy(struct vm_area_struct *vma) 1487 { 1488 return 0; 1489 } 1490 1491 /* 1492 * untrack_pfn is called while unmapping a pfnmap for a region. 1493 * untrack can be called for a specific region indicated by pfn and size or 1494 * can be for the entire vma (in which case pfn, size are zero). 1495 */ 1496 static inline void untrack_pfn(struct vm_area_struct *vma, 1497 unsigned long pfn, unsigned long size, 1498 bool mm_wr_locked) 1499 { 1500 } 1501 1502 /* 1503 * untrack_pfn_clear is called while mremapping a pfnmap for a new region 1504 * or fails to copy pgtable during duplicate vm area. 1505 */ 1506 static inline void untrack_pfn_clear(struct vm_area_struct *vma) 1507 { 1508 } 1509 #else 1510 extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, 1511 unsigned long pfn, unsigned long addr, 1512 unsigned long size); 1513 extern void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, 1514 pfn_t pfn); 1515 extern int track_pfn_copy(struct vm_area_struct *vma); 1516 extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, 1517 unsigned long size, bool mm_wr_locked); 1518 extern void untrack_pfn_clear(struct vm_area_struct *vma); 1519 #endif 1520 1521 #ifdef CONFIG_MMU 1522 #ifdef __HAVE_COLOR_ZERO_PAGE 1523 static inline int is_zero_pfn(unsigned long pfn) 1524 { 1525 extern unsigned long zero_pfn; 1526 unsigned long offset_from_zero_pfn = pfn - zero_pfn; 1527 return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT); 1528 } 1529 1530 #define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr)) 1531 1532 #else 1533 static inline int is_zero_pfn(unsigned long pfn) 1534 { 1535 extern unsigned long zero_pfn; 1536 return pfn == zero_pfn; 1537 } 1538 1539 static inline unsigned long my_zero_pfn(unsigned long addr) 1540 { 1541 extern unsigned long zero_pfn; 1542 return zero_pfn; 1543 } 1544 #endif 1545 #else 1546 static inline int is_zero_pfn(unsigned long pfn) 1547 { 1548 return 0; 1549 } 1550 1551 static inline unsigned long my_zero_pfn(unsigned long addr) 1552 { 1553 return 0; 1554 } 1555 #endif /* CONFIG_MMU */ 1556 1557 #ifdef CONFIG_MMU 1558 1559 #ifndef CONFIG_TRANSPARENT_HUGEPAGE 1560 static inline int pmd_trans_huge(pmd_t pmd) 1561 { 1562 return 0; 1563 } 1564 #ifndef pmd_write 1565 static inline int pmd_write(pmd_t pmd) 1566 { 1567 BUG(); 1568 return 0; 1569 } 1570 #endif /* pmd_write */ 1571 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1572 1573 #ifndef pud_write 1574 static inline int pud_write(pud_t pud) 1575 { 1576 BUG(); 1577 return 0; 1578 } 1579 #endif /* pud_write */ 1580 1581 #if !defined(CONFIG_ARCH_HAS_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE) 1582 static inline int pmd_devmap(pmd_t pmd) 1583 { 1584 return 0; 1585 } 1586 static inline int pud_devmap(pud_t pud) 1587 { 1588 return 0; 1589 } 1590 static inline int pgd_devmap(pgd_t pgd) 1591 { 1592 return 0; 1593 } 1594 #endif 1595 1596 #if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \ 1597 !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) 1598 static inline int pud_trans_huge(pud_t pud) 1599 { 1600 return 0; 1601 } 1602 #endif 1603 1604 static inline int pud_trans_unstable(pud_t *pud) 1605 { 1606 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ 1607 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) 1608 pud_t pudval = READ_ONCE(*pud); 1609 1610 if (pud_none(pudval) || pud_trans_huge(pudval) || pud_devmap(pudval)) 1611 return 1; 1612 if (unlikely(pud_bad(pudval))) { 1613 pud_clear_bad(pud); 1614 return 1; 1615 } 1616 #endif 1617 return 0; 1618 } 1619 1620 #ifndef CONFIG_NUMA_BALANCING 1621 /* 1622 * In an inaccessible (PROT_NONE) VMA, pte_protnone() may indicate "yes". It is 1623 * perfectly valid to indicate "no" in that case, which is why our default 1624 * implementation defaults to "always no". 1625 * 1626 * In an accessible VMA, however, pte_protnone() reliably indicates PROT_NONE 1627 * page protection due to NUMA hinting. NUMA hinting faults only apply in 1628 * accessible VMAs. 1629 * 1630 * So, to reliably identify PROT_NONE PTEs that require a NUMA hinting fault, 1631 * looking at the VMA accessibility is sufficient. 1632 */ 1633 static inline int pte_protnone(pte_t pte) 1634 { 1635 return 0; 1636 } 1637 1638 static inline int pmd_protnone(pmd_t pmd) 1639 { 1640 return 0; 1641 } 1642 #endif /* CONFIG_NUMA_BALANCING */ 1643 1644 #endif /* CONFIG_MMU */ 1645 1646 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP 1647 1648 #ifndef __PAGETABLE_P4D_FOLDED 1649 int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot); 1650 void p4d_clear_huge(p4d_t *p4d); 1651 #else 1652 static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) 1653 { 1654 return 0; 1655 } 1656 static inline void p4d_clear_huge(p4d_t *p4d) { } 1657 #endif /* !__PAGETABLE_P4D_FOLDED */ 1658 1659 int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot); 1660 int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot); 1661 int pud_clear_huge(pud_t *pud); 1662 int pmd_clear_huge(pmd_t *pmd); 1663 int p4d_free_pud_page(p4d_t *p4d, unsigned long addr); 1664 int pud_free_pmd_page(pud_t *pud, unsigned long addr); 1665 int pmd_free_pte_page(pmd_t *pmd, unsigned long addr); 1666 #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ 1667 static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) 1668 { 1669 return 0; 1670 } 1671 static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) 1672 { 1673 return 0; 1674 } 1675 static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) 1676 { 1677 return 0; 1678 } 1679 static inline void p4d_clear_huge(p4d_t *p4d) { } 1680 static inline int pud_clear_huge(pud_t *pud) 1681 { 1682 return 0; 1683 } 1684 static inline int pmd_clear_huge(pmd_t *pmd) 1685 { 1686 return 0; 1687 } 1688 static inline int p4d_free_pud_page(p4d_t *p4d, unsigned long addr) 1689 { 1690 return 0; 1691 } 1692 static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr) 1693 { 1694 return 0; 1695 } 1696 static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) 1697 { 1698 return 0; 1699 } 1700 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ 1701 1702 #ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE 1703 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1704 /* 1705 * ARCHes with special requirements for evicting THP backing TLB entries can 1706 * implement this. Otherwise also, it can help optimize normal TLB flush in 1707 * THP regime. Stock flush_tlb_range() typically has optimization to nuke the 1708 * entire TLB if flush span is greater than a threshold, which will 1709 * likely be true for a single huge page. Thus a single THP flush will 1710 * invalidate the entire TLB which is not desirable. 1711 * e.g. see arch/arc: flush_pmd_tlb_range 1712 */ 1713 #define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) 1714 #define flush_pud_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) 1715 #else 1716 #define flush_pmd_tlb_range(vma, addr, end) BUILD_BUG() 1717 #define flush_pud_tlb_range(vma, addr, end) BUILD_BUG() 1718 #endif 1719 #endif 1720 1721 struct file; 1722 int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, 1723 unsigned long size, pgprot_t *vma_prot); 1724 1725 #ifndef CONFIG_X86_ESPFIX64 1726 static inline void init_espfix_bsp(void) { } 1727 #endif 1728 1729 extern void __init pgtable_cache_init(void); 1730 1731 #ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED 1732 static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot) 1733 { 1734 return true; 1735 } 1736 1737 static inline bool arch_has_pfn_modify_check(void) 1738 { 1739 return false; 1740 } 1741 #endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */ 1742 1743 /* 1744 * Architecture PAGE_KERNEL_* fallbacks 1745 * 1746 * Some architectures don't define certain PAGE_KERNEL_* flags. This is either 1747 * because they really don't support them, or the port needs to be updated to 1748 * reflect the required functionality. Below are a set of relatively safe 1749 * fallbacks, as best effort, which we can count on in lieu of the architectures 1750 * not defining them on their own yet. 1751 */ 1752 1753 #ifndef PAGE_KERNEL_RO 1754 # define PAGE_KERNEL_RO PAGE_KERNEL 1755 #endif 1756 1757 #ifndef PAGE_KERNEL_EXEC 1758 # define PAGE_KERNEL_EXEC PAGE_KERNEL 1759 #endif 1760 1761 /* 1762 * Page Table Modification bits for pgtbl_mod_mask. 1763 * 1764 * These are used by the p?d_alloc_track*() set of functions an in the generic 1765 * vmalloc/ioremap code to track at which page-table levels entries have been 1766 * modified. Based on that the code can better decide when vmalloc and ioremap 1767 * mapping changes need to be synchronized to other page-tables in the system. 1768 */ 1769 #define __PGTBL_PGD_MODIFIED 0 1770 #define __PGTBL_P4D_MODIFIED 1 1771 #define __PGTBL_PUD_MODIFIED 2 1772 #define __PGTBL_PMD_MODIFIED 3 1773 #define __PGTBL_PTE_MODIFIED 4 1774 1775 #define PGTBL_PGD_MODIFIED BIT(__PGTBL_PGD_MODIFIED) 1776 #define PGTBL_P4D_MODIFIED BIT(__PGTBL_P4D_MODIFIED) 1777 #define PGTBL_PUD_MODIFIED BIT(__PGTBL_PUD_MODIFIED) 1778 #define PGTBL_PMD_MODIFIED BIT(__PGTBL_PMD_MODIFIED) 1779 #define PGTBL_PTE_MODIFIED BIT(__PGTBL_PTE_MODIFIED) 1780 1781 /* Page-Table Modification Mask */ 1782 typedef unsigned int pgtbl_mod_mask; 1783 1784 #endif /* !__ASSEMBLY__ */ 1785 1786 #if !defined(MAX_POSSIBLE_PHYSMEM_BITS) && !defined(CONFIG_64BIT) 1787 #ifdef CONFIG_PHYS_ADDR_T_64BIT 1788 /* 1789 * ZSMALLOC needs to know the highest PFN on 32-bit architectures 1790 * with physical address space extension, but falls back to 1791 * BITS_PER_LONG otherwise. 1792 */ 1793 #error Missing MAX_POSSIBLE_PHYSMEM_BITS definition 1794 #else 1795 #define MAX_POSSIBLE_PHYSMEM_BITS 32 1796 #endif 1797 #endif 1798 1799 #ifndef has_transparent_hugepage 1800 #define has_transparent_hugepage() IS_BUILTIN(CONFIG_TRANSPARENT_HUGEPAGE) 1801 #endif 1802 1803 #ifndef has_transparent_pud_hugepage 1804 #define has_transparent_pud_hugepage() IS_BUILTIN(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) 1805 #endif 1806 /* 1807 * On some architectures it depends on the mm if the p4d/pud or pmd 1808 * layer of the page table hierarchy is folded or not. 1809 */ 1810 #ifndef mm_p4d_folded 1811 #define mm_p4d_folded(mm) __is_defined(__PAGETABLE_P4D_FOLDED) 1812 #endif 1813 1814 #ifndef mm_pud_folded 1815 #define mm_pud_folded(mm) __is_defined(__PAGETABLE_PUD_FOLDED) 1816 #endif 1817 1818 #ifndef mm_pmd_folded 1819 #define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED) 1820 #endif 1821 1822 #ifndef p4d_offset_lockless 1823 #define p4d_offset_lockless(pgdp, pgd, address) p4d_offset(&(pgd), address) 1824 #endif 1825 #ifndef pud_offset_lockless 1826 #define pud_offset_lockless(p4dp, p4d, address) pud_offset(&(p4d), address) 1827 #endif 1828 #ifndef pmd_offset_lockless 1829 #define pmd_offset_lockless(pudp, pud, address) pmd_offset(&(pud), address) 1830 #endif 1831 1832 /* 1833 * pXd_leaf() is the API to check whether a pgtable entry is a huge page 1834 * mapping. It should work globally across all archs, without any 1835 * dependency on CONFIG_* options. For architectures that do not support 1836 * huge mappings on specific levels, below fallbacks will be used. 1837 * 1838 * A leaf pgtable entry should always imply the following: 1839 * 1840 * - It is a "present" entry. IOW, before using this API, please check it 1841 * with pXd_present() first. NOTE: it may not always mean the "present 1842 * bit" is set. For example, PROT_NONE entries are always "present". 1843 * 1844 * - It should _never_ be a swap entry of any type. Above "present" check 1845 * should have guarded this, but let's be crystal clear on this. 1846 * 1847 * - It should contain a huge PFN, which points to a huge page larger than 1848 * PAGE_SIZE of the platform. The PFN format isn't important here. 1849 * 1850 * - It should cover all kinds of huge mappings (e.g., pXd_trans_huge(), 1851 * pXd_devmap(), or hugetlb mappings). 1852 */ 1853 #ifndef pgd_leaf 1854 #define pgd_leaf(x) false 1855 #endif 1856 #ifndef p4d_leaf 1857 #define p4d_leaf(x) false 1858 #endif 1859 #ifndef pud_leaf 1860 #define pud_leaf(x) false 1861 #endif 1862 #ifndef pmd_leaf 1863 #define pmd_leaf(x) false 1864 #endif 1865 1866 #ifndef pgd_leaf_size 1867 #define pgd_leaf_size(x) (1ULL << PGDIR_SHIFT) 1868 #endif 1869 #ifndef p4d_leaf_size 1870 #define p4d_leaf_size(x) P4D_SIZE 1871 #endif 1872 #ifndef pud_leaf_size 1873 #define pud_leaf_size(x) PUD_SIZE 1874 #endif 1875 #ifndef pmd_leaf_size 1876 #define pmd_leaf_size(x) PMD_SIZE 1877 #endif 1878 #ifndef __pte_leaf_size 1879 #ifndef pte_leaf_size 1880 #define pte_leaf_size(x) PAGE_SIZE 1881 #endif 1882 #define __pte_leaf_size(x,y) pte_leaf_size(y) 1883 #endif 1884 1885 /* 1886 * We always define pmd_pfn for all archs as it's used in lots of generic 1887 * code. Now it happens too for pud_pfn (and can happen for larger 1888 * mappings too in the future; we're not there yet). Instead of defining 1889 * it for all archs (like pmd_pfn), provide a fallback. 1890 * 1891 * Note that returning 0 here means any arch that didn't define this can 1892 * get severely wrong when it hits a real pud leaf. It's arch's 1893 * responsibility to properly define it when a huge pud is possible. 1894 */ 1895 #ifndef pud_pfn 1896 #define pud_pfn(x) 0 1897 #endif 1898 1899 /* 1900 * Some architectures have MMUs that are configurable or selectable at boot 1901 * time. These lead to variable PTRS_PER_x. For statically allocated arrays it 1902 * helps to have a static maximum value. 1903 */ 1904 1905 #ifndef MAX_PTRS_PER_PTE 1906 #define MAX_PTRS_PER_PTE PTRS_PER_PTE 1907 #endif 1908 1909 #ifndef MAX_PTRS_PER_PMD 1910 #define MAX_PTRS_PER_PMD PTRS_PER_PMD 1911 #endif 1912 1913 #ifndef MAX_PTRS_PER_PUD 1914 #define MAX_PTRS_PER_PUD PTRS_PER_PUD 1915 #endif 1916 1917 #ifndef MAX_PTRS_PER_P4D 1918 #define MAX_PTRS_PER_P4D PTRS_PER_P4D 1919 #endif 1920 1921 #ifndef pte_pgprot 1922 #define pte_pgprot(x) ((pgprot_t) {0}) 1923 #endif 1924 1925 #ifndef pmd_pgprot 1926 #define pmd_pgprot(x) ((pgprot_t) {0}) 1927 #endif 1928 1929 #ifndef pud_pgprot 1930 #define pud_pgprot(x) ((pgprot_t) {0}) 1931 #endif 1932 1933 /* description of effects of mapping type and prot in current implementation. 1934 * this is due to the limited x86 page protection hardware. The expected 1935 * behavior is in parens: 1936 * 1937 * map_type prot 1938 * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC 1939 * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes 1940 * w: (no) no w: (no) no w: (yes) yes w: (no) no 1941 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes 1942 * 1943 * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes 1944 * w: (no) no w: (no) no w: (copy) copy w: (no) no 1945 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes 1946 * 1947 * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and 1948 * MAP_PRIVATE (with Enhanced PAN supported): 1949 * r: (no) no 1950 * w: (no) no 1951 * x: (yes) yes 1952 */ 1953 #define DECLARE_VM_GET_PAGE_PROT \ 1954 pgprot_t vm_get_page_prot(unsigned long vm_flags) \ 1955 { \ 1956 return protection_map[vm_flags & \ 1957 (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)]; \ 1958 } \ 1959 EXPORT_SYMBOL(vm_get_page_prot); 1960 1961 #endif /* _LINUX_PGTABLE_H */ 1962