| /linux-6.15/mm/ |
| H A D | page_vma_mapped.c | 23 pvmw->pte = pte_offset_map_lock(pvmw->vma->vm_mm, pvmw->pmd, in map_pte() 24 pvmw->address, &pvmw->ptl); in map_pte() 37 pvmw->pte = pte_offset_map_rw_nolock(pvmw->vma->vm_mm, pvmw->pmd, in map_pte() 138 if (pfn > (pvmw->pfn + pvmw->nr_pages - 1)) in check_pte() 148 if (pfn > pvmw->pfn + pvmw->nr_pages - 1) in check_pmd() 155 pvmw->address = (pvmw->address + size) & ~(size - 1); in step_forward() 196 if (pvmw->pmd && !pvmw->pte) in page_vma_mapped_walk() 210 pvmw->pte = hugetlb_walk(vma, pvmw->address, size); in page_vma_mapped_walk() 214 pvmw->ptl = huge_pte_lock(hstate, mm, pvmw->pte); in page_vma_mapped_walk() 241 pvmw->pmd = pmd_offset(pud, pvmw->address); in page_vma_mapped_walk() [all …]
|
| H A D | rmap.c | 858 address = pvmw.address; in folio_referenced_one() 901 } else if (pvmw.pte) { in folio_referenced_one() 903 pvmw.pte)) in folio_referenced_one() 907 pvmw.pmd)) in folio_referenced_one() 1043 if (pvmw->pte) { in page_vma_mkclean_one() 1891 pvmw.flags = PVMW_SYNC; in try_to_unmap_one() 1929 if (!pvmw.pte) { in try_to_unmap_one() 1931 if (unmap_huge_pmd_locked(vma, pvmw.address, pvmw.pmd, folio)) in try_to_unmap_one() 1970 address = pvmw.address; in try_to_unmap_one() 2292 pvmw.flags = PVMW_SYNC; in try_to_migrate_one() [all …]
|
| H A D | page_idle.c | 56 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); in page_idle_clear_pte_refs_one() 59 while (page_vma_mapped_walk(&pvmw)) { in page_idle_clear_pte_refs_one() 60 addr = pvmw.address; in page_idle_clear_pte_refs_one() 61 if (pvmw.pte) { in page_idle_clear_pte_refs_one() 70 if (likely(pte_present(ptep_get(pvmw.pte)))) in page_idle_clear_pte_refs_one() 71 referenced |= ptep_test_and_clear_young(vma, addr, pvmw.pte); in page_idle_clear_pte_refs_one() 74 if (pmdp_clear_young_notify(vma, addr, pvmw.pmd)) in page_idle_clear_pte_refs_one()
|
| H A D | migrate.c | 224 pvmw->vma->vm_page_prot)); in try_to_map_unused_to_zeropage() 225 set_pte_at(pvmw->vma->vm_mm, pvmw->address, pvmw->pte, newpte); in try_to_map_unused_to_zeropage() 245 while (page_vma_mapped_walk(&pvmw)) { in remove_migration_pte() 255 idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff; in remove_migration_pte() 260 if (!pvmw.pte) { in remove_migration_pte() 263 remove_migration_pmd(&pvmw, new); in remove_migration_pte() 273 old_pte = ptep_get(pvmw.pte); in remove_migration_pte() 319 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte, in remove_migration_pte() 326 pvmw.address, rmap_flags); in remove_migration_pte() 329 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); in remove_migration_pte() [all …]
|
| H A D | ksm.c | 1250 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, 0, 0); in write_protect_page() 1261 if (pvmw.address == -EFAULT) in write_protect_page() 1265 pvmw.address + PAGE_SIZE); in write_protect_page() 1268 if (!page_vma_mapped_walk(&pvmw)) in write_protect_page() 1270 if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?")) in write_protect_page() 1273 entry = ptep_get(pvmw.pte); in write_protect_page() 1300 entry = ptep_clear_flush(vma, pvmw.address, pvmw.pte); in write_protect_page() 1306 set_pte_at(mm, pvmw.address, pvmw.pte, entry); in write_protect_page() 1313 set_pte_at(mm, pvmw.address, pvmw.pte, entry); in write_protect_page() 1324 set_pte_at(mm, pvmw.address, pvmw.pte, entry); in write_protect_page() [all …]
|
| H A D | internal.h | 1063 static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw) in vma_address_end() argument 1065 struct vm_area_struct *vma = pvmw->vma; in vma_address_end() 1070 if (pvmw->nr_pages == 1) in vma_address_end() 1071 return pvmw->address + PAGE_SIZE; in vma_address_end() 1073 pgoff = pvmw->pgoff + pvmw->nr_pages; in vma_address_end()
|
| H A D | huge_memory.c | 4622 struct vm_area_struct *vma = pvmw->vma; in set_pmd_migration_entry() 4624 unsigned long address = pvmw->address; in set_pmd_migration_entry() 4630 if (!(pvmw->pmd && !pvmw->pte)) in set_pmd_migration_entry() 4660 set_pmd_at(mm, address, pvmw->pmd, pmdswp); in set_pmd_migration_entry() 4671 struct vm_area_struct *vma = pvmw->vma; in remove_migration_pmd() 4673 unsigned long address = pvmw->address; in remove_migration_pmd() 4678 if (!(pvmw->pmd && !pvmw->pte)) in remove_migration_pmd() 4681 entry = pmd_to_swp_entry(*pvmw->pmd); in remove_migration_pmd() 4684 if (pmd_swp_soft_dirty(*pvmw->pmd)) in remove_migration_pmd() 4688 if (pmd_swp_uffd_wp(*pvmw->pmd)) in remove_migration_pmd() [all …]
|
| H A D | vmscan.c | 4185 bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw) in lru_gen_look_around() argument 4194 pte_t *pte = pvmw->pte; in lru_gen_look_around() 4195 unsigned long addr = pvmw->address; in lru_gen_look_around() 4196 struct vm_area_struct *vma = pvmw->vma; in lru_gen_look_around() 4197 struct folio *folio = pfn_folio(pvmw->pfn); in lru_gen_look_around() 4205 lockdep_assert_held(pvmw->ptl); in lru_gen_look_around() 4211 if (spin_is_contended(pvmw->ptl)) in lru_gen_look_around() 4276 update_bloom_filter(mm_state, max_seq, pvmw->pmd); in lru_gen_look_around()
|
| /linux-6.15/mm/damon/ |
| H A D | paddr.c | 25 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); in damon_folio_mkold_one() 27 while (page_vma_mapped_walk(&pvmw)) { in damon_folio_mkold_one() 28 addr = pvmw.address; in damon_folio_mkold_one() 29 if (pvmw.pte) in damon_folio_mkold_one() 30 damon_ptep_mkold(pvmw.pte, vma, addr); in damon_folio_mkold_one() 32 damon_pmdp_mkold(pvmw.pmd, vma, addr); in damon_folio_mkold_one() 98 while (page_vma_mapped_walk(&pvmw)) { in damon_folio_young_one() 99 addr = pvmw.address; in damon_folio_young_one() 100 if (pvmw.pte) { in damon_folio_young_one() 101 pte = ptep_get(pvmw.pte); in damon_folio_young_one() [all …]
|
| /linux-6.15/include/linux/ |
| H A D | rmap.h | 934 if (pvmw->pte && !is_vm_hugetlb_page(pvmw->vma)) in page_vma_mapped_walk_done() 935 pte_unmap(pvmw->pte); in page_vma_mapped_walk_done() 936 if (pvmw->ptl) in page_vma_mapped_walk_done() 937 spin_unlock(pvmw->ptl); in page_vma_mapped_walk_done() 952 WARN_ON_ONCE(!pvmw->pmd && !pvmw->pte); in page_vma_mapped_walk_restart() 954 if (likely(pvmw->ptl)) in page_vma_mapped_walk_restart() 955 spin_unlock(pvmw->ptl); in page_vma_mapped_walk_restart() 959 pvmw->ptl = NULL; in page_vma_mapped_walk_restart() 960 pvmw->pmd = NULL; in page_vma_mapped_walk_restart() 961 pvmw->pte = NULL; in page_vma_mapped_walk_restart() [all …]
|
| H A D | swapops.h | 534 extern int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, 537 extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, 567 static inline int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, in set_pmd_migration_entry() argument 573 static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, in remove_migration_pmd() argument
|
| H A D | mmzone.h | 585 bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw); 604 static inline bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw) in lru_gen_look_around() argument
|
| /linux-6.15/kernel/events/ |
| H A D | uprobes.c | 173 DEFINE_FOLIO_VMA_WALK(pvmw, old_folio, vma, addr, 0); in __replace_page() 193 if (!page_vma_mapped_walk(&pvmw)) in __replace_page() 195 VM_BUG_ON_PAGE(addr != pvmw.address, old_page); in __replace_page() 196 pte = ptep_get(pvmw.pte); in __replace_page() 203 page_vma_mapped_walk_done(&pvmw); in __replace_page() 221 ptep_clear_flush(vma, addr, pvmw.pte); in __replace_page() 223 set_pte_at(mm, addr, pvmw.pte, in __replace_page() 229 page_vma_mapped_walk_done(&pvmw); in __replace_page()
|