| /linux-6.15/mm/ |
| H A D | userfaultfd.c | 1031 struct vm_area_struct *src_vma, in move_present_pte() argument 1119 struct vm_area_struct *src_vma, in move_zeropage_pte() argument 1152 struct vm_area_struct *src_vma, in move_pages_pte() argument 1479 if (!(src_vma->vm_flags & VM_WRITE)) in validate_move_areas() 1595 vma_end_read(src_vma); in uffd_move_unlock() 1596 if (src_vma != dst_vma) in uffd_move_unlock() 1620 mmap_assert_locked(src_vma->vm_mm); in uffd_move_unlock() 1737 if (src_vma->vm_flags & VM_SHARED) in move_pages() 1819 dst_pmdval, dst_vma, src_vma, in move_pages() 1840 dst_vma, src_vma, in move_pages() [all …]
|
| H A D | memory.c | 954 if (src_vma->vm_flags & VM_SHARED) in __copy_present_ptes() 994 if (src_vma->vm_flags & VM_SHARED) in copy_present_ptes() 996 if (!vma_soft_dirty_enabled(src_vma)) in copy_present_ptes() 1004 nr, dst_vma, src_vma))) { in copy_present_ptes() 1015 pte = pte_mkwrite(pte, src_vma); in copy_present_ptes() 1145 dst_vma, src_vma, in copy_pte_range() 1246 addr, dst_vma, src_vma); in copy_pmd_range() 1343 if (src_vma->anon_vma) in vma_needs_copy() 1360 unsigned long end = src_vma->vm_end; in copy_page_range() 1371 if (is_vm_hugetlb_page(src_vma)) in copy_page_range() [all …]
|
| H A D | hugetlb.c | 5562 struct vm_area_struct *src_vma) in copy_hugetlb_page_range() argument 5567 bool cow = is_cow_mapping(src_vma->vm_flags); in copy_hugetlb_page_range() 5568 struct hstate *h = hstate_vma(src_vma); in copy_hugetlb_page_range() 5577 src_vma->vm_start, in copy_hugetlb_page_range() 5578 src_vma->vm_end); in copy_hugetlb_page_range() 5580 vma_assert_write_locked(src_vma); in copy_hugetlb_page_range() 5589 hugetlb_vma_lock_read(src_vma); in copy_hugetlb_page_range() 5593 for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) { in copy_hugetlb_page_range() 5595 src_pte = hugetlb_walk(src_vma, addr, sz); in copy_hugetlb_page_range() 5645 if (userfaultfd_wp(src_vma) && uffd_wp) in copy_hugetlb_page_range() [all …]
|
| H A D | huge_memory.c | 1691 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) in copy_huge_pmd() argument 1714 VM_WARN_ON_ONCE(is_cow_mapping(src_vma->vm_flags) && pmd_write(pmd)); in copy_huge_pmd() 1783 if (unlikely(folio_try_dup_anon_rmap_pmd(src_folio, src_page, dst_vma, src_vma))) { in copy_huge_pmd() 1789 __split_huge_pmd(src_vma, src_pmd, addr, false, NULL); in copy_huge_pmd() 2553 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, in move_pages_huge_pmd() argument 2569 vma_assert_locked(src_vma); in move_pages_huge_pmd() 2603 flush_cache_range(src_vma, src_addr, src_addr + HPAGE_PMD_SIZE); in move_pages_huge_pmd() 2645 src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd); in move_pages_huge_pmd() 2660 src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd); in move_pages_huge_pmd()
|
| /linux-6.15/include/linux/ |
| H A D | rmap.h | 621 struct vm_area_struct *src_vma, enum rmap_level level) in __folio_try_dup_anon_rmap() argument 638 unlikely(folio_needs_cow_for_dma(src_vma, folio)); in __folio_try_dup_anon_rmap() 708 struct vm_area_struct *src_vma) in folio_try_dup_anon_rmap_ptes() argument 711 src_vma, RMAP_LEVEL_PTE); in folio_try_dup_anon_rmap_ptes() 716 struct vm_area_struct *src_vma) in folio_try_dup_anon_rmap_pte() argument 718 return __folio_try_dup_anon_rmap(folio, page, 1, dst_vma, src_vma, in folio_try_dup_anon_rmap_pte() 747 struct vm_area_struct *src_vma) in folio_try_dup_anon_rmap_pmd() argument 751 src_vma, RMAP_LEVEL_PMD); in folio_try_dup_anon_rmap_pmd()
|
| H A D | userfaultfd_k.h | 145 struct vm_area_struct *src_vma,
|
| H A D | pgtable.h | 1519 struct vm_area_struct *src_vma, unsigned long *pfn) in track_pfn_copy() argument 1562 struct vm_area_struct *src_vma, unsigned long *pfn);
|
| H A D | huge_mm.h | 13 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
|
| H A D | hugetlb.h | 329 struct vm_area_struct *src_vma) in copy_hugetlb_page_range() argument
|
| H A D | mm.h | 2492 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
|
| /linux-6.15/arch/x86/mm/pat/ |
| H A D | memtype.c | 988 struct vm_area_struct *src_vma, unsigned long *pfn) in track_pfn_copy() argument 990 const unsigned long vma_size = src_vma->vm_end - src_vma->vm_start; in track_pfn_copy() 995 if (!(src_vma->vm_flags & VM_PAT)) in track_pfn_copy() 1002 if (get_pat_info(src_vma, &paddr, &pgprot)) in track_pfn_copy()
|