Lines Matching refs:vmf

97 static vm_fault_t do_fault(struct vm_fault *vmf);
98 static vm_fault_t do_anonymous_page(struct vm_fault *vmf);
99 static bool vmf_pte_changed(struct vm_fault *vmf);
105 static __always_inline bool vmf_orig_pte_uffd_wp(struct vm_fault *vmf) in vmf_orig_pte_uffd_wp() argument
107 if (!userfaultfd_wp(vmf->vma)) in vmf_orig_pte_uffd_wp()
109 if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID)) in vmf_orig_pte_uffd_wp()
112 return pte_marker_uffd_wp(vmf->orig_pte); in vmf_orig_pte_uffd_wp()
2626 vm_fault_t vmf_insert_page_mkwrite(struct vm_fault *vmf, struct page *page, in vmf_insert_page_mkwrite() argument
2629 pgprot_t pgprot = vmf->vma->vm_page_prot; in vmf_insert_page_mkwrite()
2630 unsigned long addr = vmf->address; in vmf_insert_page_mkwrite()
2633 if (addr < vmf->vma->vm_start || addr >= vmf->vma->vm_end) in vmf_insert_page_mkwrite()
2636 err = insert_page(vmf->vma, addr, page, pgprot, write); in vmf_insert_page_mkwrite()
3137 static inline int pte_unmap_same(struct vm_fault *vmf) in pte_unmap_same() argument
3142 spin_lock(vmf->ptl); in pte_unmap_same()
3143 same = pte_same(ptep_get(vmf->pte), vmf->orig_pte); in pte_unmap_same()
3144 spin_unlock(vmf->ptl); in pte_unmap_same()
3147 pte_unmap(vmf->pte); in pte_unmap_same()
3148 vmf->pte = NULL; in pte_unmap_same()
3159 struct vm_fault *vmf) in __wp_page_copy_user() argument
3164 struct vm_area_struct *vma = vmf->vma; in __wp_page_copy_user()
3166 unsigned long addr = vmf->address; in __wp_page_copy_user()
3188 vmf->pte = NULL; in __wp_page_copy_user()
3189 if (!arch_has_hw_pte_young() && !pte_young(vmf->orig_pte)) { in __wp_page_copy_user()
3192 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); in __wp_page_copy_user()
3193 if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { in __wp_page_copy_user()
3198 if (vmf->pte) in __wp_page_copy_user()
3199 update_mmu_tlb(vma, addr, vmf->pte); in __wp_page_copy_user()
3204 entry = pte_mkyoung(vmf->orig_pte); in __wp_page_copy_user()
3205 if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0)) in __wp_page_copy_user()
3206 update_mmu_cache_range(vmf, vma, addr, vmf->pte, 1); in __wp_page_copy_user()
3216 if (vmf->pte) in __wp_page_copy_user()
3220 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); in __wp_page_copy_user()
3221 if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { in __wp_page_copy_user()
3223 if (vmf->pte) in __wp_page_copy_user()
3224 update_mmu_tlb(vma, addr, vmf->pte); in __wp_page_copy_user()
3247 if (vmf->pte) in __wp_page_copy_user()
3248 pte_unmap_unlock(vmf->pte, vmf->ptl); in __wp_page_copy_user()
3276 static vm_fault_t do_page_mkwrite(struct vm_fault *vmf, struct folio *folio) in do_page_mkwrite() argument
3279 unsigned int old_flags = vmf->flags; in do_page_mkwrite()
3281 vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; in do_page_mkwrite()
3283 if (vmf->vma->vm_file && in do_page_mkwrite()
3284 IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host)) in do_page_mkwrite()
3287 ret = vmf->vma->vm_ops->page_mkwrite(vmf); in do_page_mkwrite()
3289 vmf->flags = old_flags; in do_page_mkwrite()
3309 static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf) in fault_dirty_shared_page() argument
3311 struct vm_area_struct *vma = vmf->vma; in fault_dirty_shared_page()
3313 struct folio *folio = page_folio(vmf->page); in fault_dirty_shared_page()
3343 fpin = maybe_unlock_mmap_for_io(vmf, NULL); in fault_dirty_shared_page()
3362 static inline void wp_page_reuse(struct vm_fault *vmf, struct folio *folio) in wp_page_reuse() argument
3363 __releases(vmf->ptl) in wp_page_reuse()
3365 struct vm_area_struct *vma = vmf->vma; in wp_page_reuse()
3368 VM_BUG_ON(!(vmf->flags & FAULT_FLAG_WRITE)); in wp_page_reuse()
3369 VM_WARN_ON(is_zero_pfn(pte_pfn(vmf->orig_pte))); in wp_page_reuse()
3373 !PageAnonExclusive(vmf->page)); in wp_page_reuse()
3382 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); in wp_page_reuse()
3383 entry = pte_mkyoung(vmf->orig_pte); in wp_page_reuse()
3385 if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1)) in wp_page_reuse()
3386 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); in wp_page_reuse()
3387 pte_unmap_unlock(vmf->pte, vmf->ptl); in wp_page_reuse()
3396 static inline vm_fault_t vmf_can_call_fault(const struct vm_fault *vmf) in vmf_can_call_fault() argument
3398 struct vm_area_struct *vma = vmf->vma; in vmf_can_call_fault()
3400 if (vma->vm_ops->map_pages || !(vmf->flags & FAULT_FLAG_VMA_LOCK)) in vmf_can_call_fault()
3421 vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf) in __vmf_anon_prepare() argument
3423 struct vm_area_struct *vma = vmf->vma; in __vmf_anon_prepare()
3428 if (vmf->flags & FAULT_FLAG_VMA_LOCK) { in __vmf_anon_prepare()
3434 if (vmf->flags & FAULT_FLAG_VMA_LOCK) in __vmf_anon_prepare()
3456 static vm_fault_t wp_page_copy(struct vm_fault *vmf) in wp_page_copy() argument
3458 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; in wp_page_copy()
3459 struct vm_area_struct *vma = vmf->vma; in wp_page_copy()
3471 if (vmf->page) in wp_page_copy()
3472 old_folio = page_folio(vmf->page); in wp_page_copy()
3473 ret = vmf_anon_prepare(vmf); in wp_page_copy()
3477 pfn_is_zero = is_zero_pfn(pte_pfn(vmf->orig_pte)); in wp_page_copy()
3478 new_folio = folio_prealloc(mm, vma, vmf->address, pfn_is_zero); in wp_page_copy()
3485 err = __wp_page_copy_user(&new_folio->page, vmf->page, vmf); in wp_page_copy()
3501 kmsan_copy_page_meta(&new_folio->page, vmf->page); in wp_page_copy()
3507 vmf->address & PAGE_MASK, in wp_page_copy()
3508 (vmf->address & PAGE_MASK) + PAGE_SIZE); in wp_page_copy()
3514 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl); in wp_page_copy()
3515 if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { in wp_page_copy()
3522 ksm_might_unmap_zero_page(mm, vmf->orig_pte); in wp_page_copy()
3525 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); in wp_page_copy()
3529 if (pte_soft_dirty(vmf->orig_pte)) in wp_page_copy()
3531 if (pte_uffd_wp(vmf->orig_pte)) in wp_page_copy()
3544 ptep_clear_flush(vma, vmf->address, vmf->pte); in wp_page_copy()
3545 folio_add_new_anon_rmap(new_folio, vma, vmf->address, RMAP_EXCLUSIVE); in wp_page_copy()
3548 set_pte_at(mm, vmf->address, vmf->pte, entry); in wp_page_copy()
3549 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); in wp_page_copy()
3573 folio_remove_rmap_pte(old_folio, vmf->page, vma); in wp_page_copy()
3579 pte_unmap_unlock(vmf->pte, vmf->ptl); in wp_page_copy()
3580 } else if (vmf->pte) { in wp_page_copy()
3581 update_mmu_tlb(vma, vmf->address, vmf->pte); in wp_page_copy()
3582 pte_unmap_unlock(vmf->pte, vmf->ptl); in wp_page_copy()
3624 static vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf, struct folio *folio) in finish_mkwrite_fault() argument
3626 WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED)); in finish_mkwrite_fault()
3627 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, in finish_mkwrite_fault()
3628 &vmf->ptl); in finish_mkwrite_fault()
3629 if (!vmf->pte) in finish_mkwrite_fault()
3635 if (!pte_same(ptep_get(vmf->pte), vmf->orig_pte)) { in finish_mkwrite_fault()
3636 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); in finish_mkwrite_fault()
3637 pte_unmap_unlock(vmf->pte, vmf->ptl); in finish_mkwrite_fault()
3640 wp_page_reuse(vmf, folio); in finish_mkwrite_fault()
3648 static vm_fault_t wp_pfn_shared(struct vm_fault *vmf) in wp_pfn_shared() argument
3650 struct vm_area_struct *vma = vmf->vma; in wp_pfn_shared()
3655 pte_unmap_unlock(vmf->pte, vmf->ptl); in wp_pfn_shared()
3656 ret = vmf_can_call_fault(vmf); in wp_pfn_shared()
3660 vmf->flags |= FAULT_FLAG_MKWRITE; in wp_pfn_shared()
3661 ret = vma->vm_ops->pfn_mkwrite(vmf); in wp_pfn_shared()
3664 return finish_mkwrite_fault(vmf, NULL); in wp_pfn_shared()
3666 wp_page_reuse(vmf, NULL); in wp_pfn_shared()
3670 static vm_fault_t wp_page_shared(struct vm_fault *vmf, struct folio *folio) in wp_page_shared() argument
3671 __releases(vmf->ptl) in wp_page_shared()
3673 struct vm_area_struct *vma = vmf->vma; in wp_page_shared()
3681 pte_unmap_unlock(vmf->pte, vmf->ptl); in wp_page_shared()
3682 tmp = vmf_can_call_fault(vmf); in wp_page_shared()
3688 tmp = do_page_mkwrite(vmf, folio); in wp_page_shared()
3694 tmp = finish_mkwrite_fault(vmf, folio); in wp_page_shared()
3701 wp_page_reuse(vmf, folio); in wp_page_shared()
3704 ret |= fault_dirty_shared_page(vmf); in wp_page_shared()
3847 static vm_fault_t do_wp_page(struct vm_fault *vmf) in do_wp_page() argument
3848 __releases(vmf->ptl) in do_wp_page()
3850 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; in do_wp_page()
3851 struct vm_area_struct *vma = vmf->vma; in do_wp_page()
3856 if (userfaultfd_pte_wp(vma, ptep_get(vmf->pte))) { in do_wp_page()
3858 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_wp_page()
3859 return handle_userfault(vmf, VM_UFFD_WP); in do_wp_page()
3867 pte = pte_clear_uffd_wp(ptep_get(vmf->pte)); in do_wp_page()
3869 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); in do_wp_page()
3874 vmf->orig_pte = pte; in do_wp_page()
3881 if (unlikely(userfaultfd_wp(vmf->vma) && in do_wp_page()
3882 mm_tlb_flush_pending(vmf->vma->vm_mm))) in do_wp_page()
3883 flush_tlb_page(vmf->vma, vmf->address); in do_wp_page()
3886 vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte); in do_wp_page()
3888 if (vmf->page) in do_wp_page()
3889 folio = page_folio(vmf->page); in do_wp_page()
3903 if (!vmf->page || is_fsdax_page(vmf->page)) { in do_wp_page()
3904 vmf->page = NULL; in do_wp_page()
3905 return wp_pfn_shared(vmf); in do_wp_page()
3907 return wp_page_shared(vmf, folio); in do_wp_page()
3918 (PageAnonExclusive(vmf->page) || wp_can_reuse_anon_folio(folio, vma))) { in do_wp_page()
3919 if (!PageAnonExclusive(vmf->page)) in do_wp_page()
3920 SetPageAnonExclusive(vmf->page); in do_wp_page()
3922 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_wp_page()
3925 wp_page_reuse(vmf, folio); in do_wp_page()
3934 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_wp_page()
3939 return wp_page_copy(vmf); in do_wp_page()
4073 static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf) in remove_device_exclusive_entry() argument
4075 struct folio *folio = page_folio(vmf->page); in remove_device_exclusive_entry()
4076 struct vm_area_struct *vma = vmf->vma; in remove_device_exclusive_entry()
4091 ret = folio_lock_or_retry(folio, vmf); in remove_device_exclusive_entry()
4097 vma->vm_mm, vmf->address & PAGE_MASK, in remove_device_exclusive_entry()
4098 (vmf->address & PAGE_MASK) + PAGE_SIZE, NULL); in remove_device_exclusive_entry()
4101 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in remove_device_exclusive_entry()
4102 &vmf->ptl); in remove_device_exclusive_entry()
4103 if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) in remove_device_exclusive_entry()
4104 restore_exclusive_pte(vma, folio, vmf->page, vmf->address, in remove_device_exclusive_entry()
4105 vmf->pte, vmf->orig_pte); in remove_device_exclusive_entry()
4107 if (vmf->pte) in remove_device_exclusive_entry()
4108 pte_unmap_unlock(vmf->pte, vmf->ptl); in remove_device_exclusive_entry()
4135 static vm_fault_t pte_marker_clear(struct vm_fault *vmf) in pte_marker_clear() argument
4137 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in pte_marker_clear()
4138 vmf->address, &vmf->ptl); in pte_marker_clear()
4139 if (!vmf->pte) in pte_marker_clear()
4149 if (pte_same(vmf->orig_pte, ptep_get(vmf->pte))) in pte_marker_clear()
4150 pte_clear(vmf->vma->vm_mm, vmf->address, vmf->pte); in pte_marker_clear()
4151 pte_unmap_unlock(vmf->pte, vmf->ptl); in pte_marker_clear()
4155 static vm_fault_t do_pte_missing(struct vm_fault *vmf) in do_pte_missing() argument
4157 if (vma_is_anonymous(vmf->vma)) in do_pte_missing()
4158 return do_anonymous_page(vmf); in do_pte_missing()
4160 return do_fault(vmf); in do_pte_missing()
4167 static vm_fault_t pte_marker_handle_uffd_wp(struct vm_fault *vmf) in pte_marker_handle_uffd_wp() argument
4173 if (unlikely(!userfaultfd_wp(vmf->vma))) in pte_marker_handle_uffd_wp()
4174 return pte_marker_clear(vmf); in pte_marker_handle_uffd_wp()
4176 return do_pte_missing(vmf); in pte_marker_handle_uffd_wp()
4179 static vm_fault_t handle_pte_marker(struct vm_fault *vmf) in handle_pte_marker() argument
4181 swp_entry_t entry = pte_to_swp_entry(vmf->orig_pte); in handle_pte_marker()
4200 return pte_marker_handle_uffd_wp(vmf); in handle_pte_marker()
4206 static struct folio *__alloc_swap_folio(struct vm_fault *vmf) in __alloc_swap_folio() argument
4208 struct vm_area_struct *vma = vmf->vma; in __alloc_swap_folio()
4212 folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vmf->address); in __alloc_swap_folio()
4216 entry = pte_to_swp_entry(vmf->orig_pte); in __alloc_swap_folio()
4251 static bool can_swapin_thp(struct vm_fault *vmf, pte_t *ptep, int nr_pages) in can_swapin_thp() argument
4258 addr = ALIGN_DOWN(vmf->address, nr_pages * PAGE_SIZE); in can_swapin_thp()
4259 idx = (vmf->address - addr) / PAGE_SIZE; in can_swapin_thp()
4262 if (!pte_same(pte, pte_move_swp_offset(vmf->orig_pte, -idx))) in can_swapin_thp()
4304 static struct folio *alloc_swap_folio(struct vm_fault *vmf) in alloc_swap_folio() argument
4306 struct vm_area_struct *vma = vmf->vma; in alloc_swap_folio()
4331 entry = pte_to_swp_entry(vmf->orig_pte); in alloc_swap_folio()
4338 orders = thp_vma_suitable_orders(vma, vmf->address, orders); in alloc_swap_folio()
4340 vmf->address, orders); in alloc_swap_folio()
4345 pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in alloc_swap_folio()
4346 vmf->address & PMD_MASK, &ptl); in alloc_swap_folio()
4356 addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order); in alloc_swap_folio()
4357 if (can_swapin_thp(vmf, pte + pte_index(addr), 1 << order)) in alloc_swap_folio()
4367 addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order); in alloc_swap_folio()
4381 return __alloc_swap_folio(vmf); in alloc_swap_folio()
4384 static struct folio *alloc_swap_folio(struct vm_fault *vmf) in alloc_swap_folio() argument
4386 return __alloc_swap_folio(vmf); in alloc_swap_folio()
4400 vm_fault_t do_swap_page(struct vm_fault *vmf) in do_swap_page() argument
4402 struct vm_area_struct *vma = vmf->vma; in do_swap_page()
4419 if (!pte_unmap_same(vmf)) in do_swap_page()
4422 entry = pte_to_swp_entry(vmf->orig_pte); in do_swap_page()
4425 migration_entry_wait(vma->vm_mm, vmf->pmd, in do_swap_page()
4426 vmf->address); in do_swap_page()
4428 vmf->page = pfn_swap_entry_to_page(entry); in do_swap_page()
4429 ret = remove_device_exclusive_entry(vmf); in do_swap_page()
4431 if (vmf->flags & FAULT_FLAG_VMA_LOCK) { in do_swap_page()
4441 vmf->page = pfn_swap_entry_to_page(entry); in do_swap_page()
4442 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_swap_page()
4443 vmf->address, &vmf->ptl); in do_swap_page()
4444 if (unlikely(!vmf->pte || in do_swap_page()
4445 !pte_same(ptep_get(vmf->pte), in do_swap_page()
4446 vmf->orig_pte))) in do_swap_page()
4453 if (trylock_page(vmf->page)) { in do_swap_page()
4456 get_page(vmf->page); in do_swap_page()
4457 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_swap_page()
4458 pgmap = page_pgmap(vmf->page); in do_swap_page()
4459 ret = pgmap->ops->migrate_to_ram(vmf); in do_swap_page()
4460 unlock_page(vmf->page); in do_swap_page()
4461 put_page(vmf->page); in do_swap_page()
4463 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_swap_page()
4468 ret = handle_pte_marker(vmf); in do_swap_page()
4470 print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL); in do_swap_page()
4481 folio = swap_cache_get_folio(entry, vma, vmf->address); in do_swap_page()
4490 folio = alloc_swap_folio(vmf); in do_swap_page()
4533 vmf); in do_swap_page()
4542 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_swap_page()
4543 vmf->address, &vmf->ptl); in do_swap_page()
4544 if (likely(vmf->pte && in do_swap_page()
4545 pte_same(ptep_get(vmf->pte), vmf->orig_pte))) in do_swap_page()
4564 ret |= folio_lock_or_retry(folio, vmf); in do_swap_page()
4585 folio = ksm_might_need_to_copy(folio, vma, vmf->address); in do_swap_page()
4604 if ((vmf->flags & FAULT_FLAG_WRITE) && folio == swapcache && in do_swap_page()
4614 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in do_swap_page()
4615 &vmf->ptl); in do_swap_page()
4616 if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) in do_swap_page()
4627 unsigned long folio_start = ALIGN_DOWN(vmf->address, nr * PAGE_SIZE); in do_swap_page()
4628 unsigned long idx = (vmf->address - folio_start) / PAGE_SIZE; in do_swap_page()
4629 pte_t *folio_ptep = vmf->pte - idx; in do_swap_page()
4632 if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) || in do_swap_page()
4644 address = vmf->address; in do_swap_page()
4645 ptep = vmf->pte; in do_swap_page()
4659 folio_ptep = vmf->pte - idx; in do_swap_page()
4661 if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) || in do_swap_page()
4690 exclusive = pte_swp_exclusive(vmf->orig_pte); in do_swap_page()
4734 if (should_try_to_free_swap(folio, vma, vmf->flags)) in do_swap_page()
4740 if (pte_swp_soft_dirty(vmf->orig_pte)) in do_swap_page()
4742 if (pte_swp_uffd_wp(vmf->orig_pte)) in do_swap_page()
4756 if (vmf->flags & FAULT_FLAG_WRITE) { in do_swap_page()
4758 vmf->flags &= ~FAULT_FLAG_WRITE; in do_swap_page()
4765 vmf->orig_pte = pte_advance_pfn(pte, page_idx); in do_swap_page()
4806 if (vmf->flags & FAULT_FLAG_WRITE) { in do_swap_page()
4807 ret |= do_wp_page(vmf); in do_swap_page()
4814 update_mmu_cache_range(vmf, vma, address, ptep, nr_pages); in do_swap_page()
4816 if (vmf->pte) in do_swap_page()
4817 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_swap_page()
4829 if (vmf->pte) in do_swap_page()
4830 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_swap_page()
4861 static struct folio *alloc_anon_folio(struct vm_fault *vmf) in alloc_anon_folio() argument
4863 struct vm_area_struct *vma = vmf->vma; in alloc_anon_folio()
4886 orders = thp_vma_suitable_orders(vma, vmf->address, orders); in alloc_anon_folio()
4891 pte = pte_offset_map(vmf->pmd, vmf->address & PMD_MASK); in alloc_anon_folio()
4902 addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order); in alloc_anon_folio()
4916 addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order); in alloc_anon_folio()
4933 folio_zero_user(folio, vmf->address); in alloc_anon_folio()
4943 return folio_prealloc(vma->vm_mm, vma, vmf->address, true); in alloc_anon_folio()
4951 static vm_fault_t do_anonymous_page(struct vm_fault *vmf) in do_anonymous_page() argument
4953 struct vm_area_struct *vma = vmf->vma; in do_anonymous_page()
4954 unsigned long addr = vmf->address; in do_anonymous_page()
4968 if (pte_alloc(vma->vm_mm, vmf->pmd)) in do_anonymous_page()
4972 if (!(vmf->flags & FAULT_FLAG_WRITE) && in do_anonymous_page()
4974 entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address), in do_anonymous_page()
4976 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_anonymous_page()
4977 vmf->address, &vmf->ptl); in do_anonymous_page()
4978 if (!vmf->pte) in do_anonymous_page()
4980 if (vmf_pte_changed(vmf)) { in do_anonymous_page()
4981 update_mmu_tlb(vma, vmf->address, vmf->pte); in do_anonymous_page()
4989 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_anonymous_page()
4990 return handle_userfault(vmf, VM_UFFD_MISSING); in do_anonymous_page()
4996 ret = vmf_anon_prepare(vmf); in do_anonymous_page()
5000 folio = alloc_anon_folio(vmf); in do_anonymous_page()
5007 addr = ALIGN_DOWN(vmf->address, nr_pages * PAGE_SIZE); in do_anonymous_page()
5021 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl); in do_anonymous_page()
5022 if (!vmf->pte) in do_anonymous_page()
5024 if (nr_pages == 1 && vmf_pte_changed(vmf)) { in do_anonymous_page()
5025 update_mmu_tlb(vma, addr, vmf->pte); in do_anonymous_page()
5027 } else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) { in do_anonymous_page()
5028 update_mmu_tlb_range(vma, addr, vmf->pte, nr_pages); in do_anonymous_page()
5038 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_anonymous_page()
5040 return handle_userfault(vmf, VM_UFFD_MISSING); in do_anonymous_page()
5049 if (vmf_orig_pte_uffd_wp(vmf)) in do_anonymous_page()
5051 set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr_pages); in do_anonymous_page()
5054 update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr_pages); in do_anonymous_page()
5056 if (vmf->pte) in do_anonymous_page()
5057 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_anonymous_page()
5071 static vm_fault_t __do_fault(struct vm_fault *vmf) in __do_fault() argument
5073 struct vm_area_struct *vma = vmf->vma; in __do_fault()
5092 if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) { in __do_fault()
5093 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); in __do_fault()
5094 if (!vmf->prealloc_pte) in __do_fault()
5098 ret = vma->vm_ops->fault(vmf); in __do_fault()
5103 folio = page_folio(vmf->page); in __do_fault()
5104 if (unlikely(PageHWPoison(vmf->page))) { in __do_fault()
5107 if (page_mapped(vmf->page)) in __do_fault()
5115 vmf->page = NULL; in __do_fault()
5122 VM_BUG_ON_PAGE(!folio_test_locked(folio), vmf->page); in __do_fault()
5128 static void deposit_prealloc_pte(struct vm_fault *vmf) in deposit_prealloc_pte() argument
5130 struct vm_area_struct *vma = vmf->vma; in deposit_prealloc_pte()
5132 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); in deposit_prealloc_pte()
5138 vmf->prealloc_pte = NULL; in deposit_prealloc_pte()
5141 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) in do_set_pmd() argument
5144 struct vm_area_struct *vma = vmf->vma; in do_set_pmd()
5145 bool write = vmf->flags & FAULT_FLAG_WRITE; in do_set_pmd()
5146 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; in do_set_pmd()
5179 if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) { in do_set_pmd()
5180 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); in do_set_pmd()
5181 if (!vmf->prealloc_pte) in do_set_pmd()
5185 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_set_pmd()
5186 if (unlikely(!pmd_none(*vmf->pmd))) in do_set_pmd()
5202 deposit_prealloc_pte(vmf); in do_set_pmd()
5204 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in do_set_pmd()
5206 update_mmu_cache_pmd(vma, haddr, vmf->pmd); in do_set_pmd()
5212 spin_unlock(vmf->ptl); in do_set_pmd()
5216 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) in do_set_pmd() argument
5230 void set_pte_range(struct vm_fault *vmf, struct folio *folio, in set_pte_range() argument
5233 struct vm_area_struct *vma = vmf->vma; in set_pte_range()
5234 bool write = vmf->flags & FAULT_FLAG_WRITE; in set_pte_range()
5235 bool prefault = !in_range(vmf->address, addr, nr * PAGE_SIZE); in set_pte_range()
5248 if (unlikely(vmf_orig_pte_uffd_wp(vmf))) in set_pte_range()
5258 set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr); in set_pte_range()
5261 update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr); in set_pte_range()
5264 static bool vmf_pte_changed(struct vm_fault *vmf) in vmf_pte_changed() argument
5266 if (vmf->flags & FAULT_FLAG_ORIG_PTE_VALID) in vmf_pte_changed()
5267 return !pte_same(ptep_get(vmf->pte), vmf->orig_pte); in vmf_pte_changed()
5269 return !pte_none(ptep_get(vmf->pte)); in vmf_pte_changed()
5287 vm_fault_t finish_fault(struct vm_fault *vmf) in finish_fault() argument
5289 struct vm_area_struct *vma = vmf->vma; in finish_fault()
5293 bool is_cow = (vmf->flags & FAULT_FLAG_WRITE) && in finish_fault()
5300 addr = vmf->address; in finish_fault()
5304 page = vmf->cow_page; in finish_fault()
5306 page = vmf->page; in finish_fault()
5318 if (pmd_none(*vmf->pmd)) { in finish_fault()
5320 ret = do_set_pmd(vmf, page); in finish_fault()
5325 if (vmf->prealloc_pte) in finish_fault()
5326 pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte); in finish_fault()
5327 else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) in finish_fault()
5345 pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff; in finish_fault()
5347 pgoff_t pte_off = pte_index(vmf->address); in finish_fault()
5360 addr = vmf->address - idx * PAGE_SIZE; in finish_fault()
5365 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in finish_fault()
5366 addr, &vmf->ptl); in finish_fault()
5367 if (!vmf->pte) in finish_fault()
5371 if (nr_pages == 1 && unlikely(vmf_pte_changed(vmf))) { in finish_fault()
5372 update_mmu_tlb(vma, addr, vmf->pte); in finish_fault()
5375 } else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) { in finish_fault()
5377 pte_unmap_unlock(vmf->pte, vmf->ptl); in finish_fault()
5382 set_pte_range(vmf, folio, page, nr_pages, addr); in finish_fault()
5388 pte_unmap_unlock(vmf->pte, vmf->ptl); in finish_fault()
5452 static vm_fault_t do_fault_around(struct vm_fault *vmf) in do_fault_around() argument
5455 pgoff_t pte_off = pte_index(vmf->address); in do_fault_around()
5457 pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff; in do_fault_around()
5467 pte_off + vma_pages(vmf->vma) - vma_off) - 1; in do_fault_around()
5469 if (pmd_none(*vmf->pmd)) { in do_fault_around()
5470 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm); in do_fault_around()
5471 if (!vmf->prealloc_pte) in do_fault_around()
5476 ret = vmf->vma->vm_ops->map_pages(vmf, in do_fault_around()
5477 vmf->pgoff + from_pte - pte_off, in do_fault_around()
5478 vmf->pgoff + to_pte - pte_off); in do_fault_around()
5485 static inline bool should_fault_around(struct vm_fault *vmf) in should_fault_around() argument
5488 if (!vmf->vma->vm_ops->map_pages) in should_fault_around()
5491 if (uffd_disable_fault_around(vmf->vma)) in should_fault_around()
5498 static vm_fault_t do_read_fault(struct vm_fault *vmf) in do_read_fault() argument
5508 if (should_fault_around(vmf)) { in do_read_fault()
5509 ret = do_fault_around(vmf); in do_read_fault()
5514 ret = vmf_can_call_fault(vmf); in do_read_fault()
5518 ret = __do_fault(vmf); in do_read_fault()
5522 ret |= finish_fault(vmf); in do_read_fault()
5523 folio = page_folio(vmf->page); in do_read_fault()
5530 static vm_fault_t do_cow_fault(struct vm_fault *vmf) in do_cow_fault() argument
5532 struct vm_area_struct *vma = vmf->vma; in do_cow_fault()
5536 ret = vmf_can_call_fault(vmf); in do_cow_fault()
5538 ret = vmf_anon_prepare(vmf); in do_cow_fault()
5542 folio = folio_prealloc(vma->vm_mm, vma, vmf->address, false); in do_cow_fault()
5546 vmf->cow_page = &folio->page; in do_cow_fault()
5548 ret = __do_fault(vmf); in do_cow_fault()
5554 if (copy_mc_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma)) { in do_cow_fault()
5560 ret |= finish_fault(vmf); in do_cow_fault()
5562 unlock_page(vmf->page); in do_cow_fault()
5563 put_page(vmf->page); in do_cow_fault()
5572 static vm_fault_t do_shared_fault(struct vm_fault *vmf) in do_shared_fault() argument
5574 struct vm_area_struct *vma = vmf->vma; in do_shared_fault()
5578 ret = vmf_can_call_fault(vmf); in do_shared_fault()
5582 ret = __do_fault(vmf); in do_shared_fault()
5586 folio = page_folio(vmf->page); in do_shared_fault()
5594 tmp = do_page_mkwrite(vmf, folio); in do_shared_fault()
5602 ret |= finish_fault(vmf); in do_shared_fault()
5610 ret |= fault_dirty_shared_page(vmf); in do_shared_fault()
5622 static vm_fault_t do_fault(struct vm_fault *vmf) in do_fault() argument
5624 struct vm_area_struct *vma = vmf->vma; in do_fault()
5632 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in do_fault()
5633 vmf->address, &vmf->ptl); in do_fault()
5634 if (unlikely(!vmf->pte)) in do_fault()
5644 if (unlikely(pte_none(ptep_get(vmf->pte)))) in do_fault()
5649 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_fault()
5651 } else if (!(vmf->flags & FAULT_FLAG_WRITE)) in do_fault()
5652 ret = do_read_fault(vmf); in do_fault()
5654 ret = do_cow_fault(vmf); in do_fault()
5656 ret = do_shared_fault(vmf); in do_fault()
5659 if (vmf->prealloc_pte) { in do_fault()
5660 pte_free(vm_mm, vmf->prealloc_pte); in do_fault()
5661 vmf->prealloc_pte = NULL; in do_fault()
5666 int numa_migrate_check(struct folio *folio, struct vm_fault *vmf, in numa_migrate_check() argument
5670 struct vm_area_struct *vma = vmf->vma; in numa_migrate_check()
5710 return mpol_misplaced(folio, vmf, addr); in numa_migrate_check()
5713 static void numa_rebuild_single_mapping(struct vm_fault *vmf, struct vm_area_struct *vma, in numa_rebuild_single_mapping() argument
5725 update_mmu_cache_range(vmf, vma, fault_addr, fault_pte, 1); in numa_rebuild_single_mapping()
5728 static void numa_rebuild_large_mapping(struct vm_fault *vmf, struct vm_area_struct *vma, in numa_rebuild_large_mapping() argument
5733 unsigned long start, end, addr = vmf->address; in numa_rebuild_large_mapping()
5742 start_ptep = vmf->pte - ((addr - start) >> PAGE_SHIFT); in numa_rebuild_large_mapping()
5763 numa_rebuild_single_mapping(vmf, vma, addr, start_ptep, writable); in numa_rebuild_large_mapping()
5767 static vm_fault_t do_numa_page(struct vm_fault *vmf) in do_numa_page() argument
5769 struct vm_area_struct *vma = vmf->vma; in do_numa_page()
5783 spin_lock(vmf->ptl); in do_numa_page()
5785 old_pte = ptep_get(vmf->pte); in do_numa_page()
5787 if (unlikely(!pte_same(old_pte, vmf->orig_pte))) { in do_numa_page()
5788 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_numa_page()
5800 can_change_pte_writable(vma, vmf->address, pte)) in do_numa_page()
5803 folio = vm_normal_folio(vma, vmf->address, pte); in do_numa_page()
5810 target_nid = numa_migrate_check(folio, vmf, vmf->address, &flags, in do_numa_page()
5819 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_numa_page()
5832 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_numa_page()
5833 vmf->address, &vmf->ptl); in do_numa_page()
5834 if (unlikely(!vmf->pte)) in do_numa_page()
5836 if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { in do_numa_page()
5837 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_numa_page()
5846 numa_rebuild_large_mapping(vmf, vma, folio, pte, ignore_writable, in do_numa_page()
5849 numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte, in do_numa_page()
5851 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_numa_page()
5858 static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf) in create_huge_pmd() argument
5860 struct vm_area_struct *vma = vmf->vma; in create_huge_pmd()
5862 return do_huge_pmd_anonymous_page(vmf); in create_huge_pmd()
5864 return vma->vm_ops->huge_fault(vmf, PMD_ORDER); in create_huge_pmd()
5869 static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf) in wp_huge_pmd() argument
5871 struct vm_area_struct *vma = vmf->vma; in wp_huge_pmd()
5872 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; in wp_huge_pmd()
5877 userfaultfd_huge_pmd_wp(vma, vmf->orig_pmd)) { in wp_huge_pmd()
5878 if (userfaultfd_wp_async(vmf->vma)) in wp_huge_pmd()
5880 return handle_userfault(vmf, VM_UFFD_WP); in wp_huge_pmd()
5882 return do_huge_pmd_wp_page(vmf); in wp_huge_pmd()
5887 ret = vma->vm_ops->huge_fault(vmf, PMD_ORDER); in wp_huge_pmd()
5895 __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL); in wp_huge_pmd()
5900 static vm_fault_t create_huge_pud(struct vm_fault *vmf) in create_huge_pud() argument
5904 struct vm_area_struct *vma = vmf->vma; in create_huge_pud()
5909 return vma->vm_ops->huge_fault(vmf, PUD_ORDER); in create_huge_pud()
5914 static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud) in wp_huge_pud() argument
5918 struct vm_area_struct *vma = vmf->vma; in wp_huge_pud()
5926 ret = vma->vm_ops->huge_fault(vmf, PUD_ORDER); in wp_huge_pud()
5933 __split_huge_pud(vma, vmf->pud, vmf->address); in wp_huge_pud()
5953 static vm_fault_t handle_pte_fault(struct vm_fault *vmf) in handle_pte_fault() argument
5957 if (unlikely(pmd_none(*vmf->pmd))) { in handle_pte_fault()
5964 vmf->pte = NULL; in handle_pte_fault()
5965 vmf->flags &= ~FAULT_FLAG_ORIG_PTE_VALID; in handle_pte_fault()
5982 vmf->pte = pte_offset_map_rw_nolock(vmf->vma->vm_mm, vmf->pmd, in handle_pte_fault()
5983 vmf->address, &dummy_pmdval, in handle_pte_fault()
5984 &vmf->ptl); in handle_pte_fault()
5985 if (unlikely(!vmf->pte)) in handle_pte_fault()
5987 vmf->orig_pte = ptep_get_lockless(vmf->pte); in handle_pte_fault()
5988 vmf->flags |= FAULT_FLAG_ORIG_PTE_VALID; in handle_pte_fault()
5990 if (pte_none(vmf->orig_pte)) { in handle_pte_fault()
5991 pte_unmap(vmf->pte); in handle_pte_fault()
5992 vmf->pte = NULL; in handle_pte_fault()
5996 if (!vmf->pte) in handle_pte_fault()
5997 return do_pte_missing(vmf); in handle_pte_fault()
5999 if (!pte_present(vmf->orig_pte)) in handle_pte_fault()
6000 return do_swap_page(vmf); in handle_pte_fault()
6002 if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma)) in handle_pte_fault()
6003 return do_numa_page(vmf); in handle_pte_fault()
6005 spin_lock(vmf->ptl); in handle_pte_fault()
6006 entry = vmf->orig_pte; in handle_pte_fault()
6007 if (unlikely(!pte_same(ptep_get(vmf->pte), entry))) { in handle_pte_fault()
6008 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); in handle_pte_fault()
6011 if (vmf->flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) { in handle_pte_fault()
6013 return do_wp_page(vmf); in handle_pte_fault()
6014 else if (likely(vmf->flags & FAULT_FLAG_WRITE)) in handle_pte_fault()
6018 if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry, in handle_pte_fault()
6019 vmf->flags & FAULT_FLAG_WRITE)) { in handle_pte_fault()
6020 update_mmu_cache_range(vmf, vmf->vma, vmf->address, in handle_pte_fault()
6021 vmf->pte, 1); in handle_pte_fault()
6024 if (vmf->flags & FAULT_FLAG_TRIED) in handle_pte_fault()
6032 if (vmf->flags & FAULT_FLAG_WRITE) in handle_pte_fault()
6033 flush_tlb_fix_spurious_fault(vmf->vma, vmf->address, in handle_pte_fault()
6034 vmf->pte); in handle_pte_fault()
6037 pte_unmap_unlock(vmf->pte, vmf->ptl); in handle_pte_fault()
6050 struct vm_fault vmf = { in __handle_mm_fault() local
6069 vmf.pud = pud_alloc(mm, p4d, address); in __handle_mm_fault()
6070 if (!vmf.pud) in __handle_mm_fault()
6073 if (pud_none(*vmf.pud) && in __handle_mm_fault()
6076 ret = create_huge_pud(&vmf); in __handle_mm_fault()
6080 pud_t orig_pud = *vmf.pud; in __handle_mm_fault()
6090 ret = wp_huge_pud(&vmf, orig_pud); in __handle_mm_fault()
6094 huge_pud_set_accessed(&vmf, orig_pud); in __handle_mm_fault()
6100 vmf.pmd = pmd_alloc(mm, vmf.pud, address); in __handle_mm_fault()
6101 if (!vmf.pmd) in __handle_mm_fault()
6105 if (pud_trans_unstable(vmf.pud)) in __handle_mm_fault()
6108 if (pmd_none(*vmf.pmd) && in __handle_mm_fault()
6111 ret = create_huge_pmd(&vmf); in __handle_mm_fault()
6115 vmf.orig_pmd = pmdp_get_lockless(vmf.pmd); in __handle_mm_fault()
6117 if (unlikely(is_swap_pmd(vmf.orig_pmd))) { in __handle_mm_fault()
6119 !is_pmd_migration_entry(vmf.orig_pmd)); in __handle_mm_fault()
6120 if (is_pmd_migration_entry(vmf.orig_pmd)) in __handle_mm_fault()
6121 pmd_migration_entry_wait(mm, vmf.pmd); in __handle_mm_fault()
6124 if (pmd_trans_huge(vmf.orig_pmd) || pmd_devmap(vmf.orig_pmd)) { in __handle_mm_fault()
6125 if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma)) in __handle_mm_fault()
6126 return do_huge_pmd_numa_page(&vmf); in __handle_mm_fault()
6129 !pmd_write(vmf.orig_pmd)) { in __handle_mm_fault()
6130 ret = wp_huge_pmd(&vmf); in __handle_mm_fault()
6134 huge_pmd_set_accessed(&vmf); in __handle_mm_fault()
6140 return handle_pte_fault(&vmf); in __handle_mm_fault()