Lines Matching refs:folio

175 struct anon_vma *folio_get_anon_vma(const struct folio *folio);
178 static __always_inline void folio_lock_large_mapcount(struct folio *folio) in folio_lock_large_mapcount() argument
180 bit_spin_lock(FOLIO_MM_IDS_LOCK_BITNUM, &folio->_mm_ids); in folio_lock_large_mapcount()
183 static __always_inline void folio_unlock_large_mapcount(struct folio *folio) in folio_unlock_large_mapcount() argument
185 __bit_spin_unlock(FOLIO_MM_IDS_LOCK_BITNUM, &folio->_mm_ids); in folio_unlock_large_mapcount()
188 static inline unsigned int folio_mm_id(const struct folio *folio, int idx) in folio_mm_id() argument
191 return folio->_mm_id[idx] & MM_ID_MASK; in folio_mm_id()
194 static inline void folio_set_mm_id(struct folio *folio, int idx, mm_id_t id) in folio_set_mm_id() argument
197 folio->_mm_id[idx] &= ~MM_ID_MASK; in folio_set_mm_id()
198 folio->_mm_id[idx] |= id; in folio_set_mm_id()
201 static inline void __folio_large_mapcount_sanity_checks(const struct folio *folio, in __folio_large_mapcount_sanity_checks() argument
204 VM_WARN_ON_ONCE(!folio_test_large(folio) || folio_test_hugetlb(folio)); in __folio_large_mapcount_sanity_checks()
214 VM_WARN_ON_ONCE(diff > folio_large_nr_pages(folio)); in __folio_large_mapcount_sanity_checks()
215 VM_WARN_ON_ONCE(folio_large_nr_pages(folio) - 1 > MM_ID_MAPCOUNT_MAX); in __folio_large_mapcount_sanity_checks()
217 VM_WARN_ON_ONCE(folio_mm_id(folio, 0) == MM_ID_DUMMY && in __folio_large_mapcount_sanity_checks()
218 folio->_mm_id_mapcount[0] != -1); in __folio_large_mapcount_sanity_checks()
219 VM_WARN_ON_ONCE(folio_mm_id(folio, 0) != MM_ID_DUMMY && in __folio_large_mapcount_sanity_checks()
220 folio->_mm_id_mapcount[0] < 0); in __folio_large_mapcount_sanity_checks()
221 VM_WARN_ON_ONCE(folio_mm_id(folio, 1) == MM_ID_DUMMY && in __folio_large_mapcount_sanity_checks()
222 folio->_mm_id_mapcount[1] != -1); in __folio_large_mapcount_sanity_checks()
223 VM_WARN_ON_ONCE(folio_mm_id(folio, 1) != MM_ID_DUMMY && in __folio_large_mapcount_sanity_checks()
224 folio->_mm_id_mapcount[1] < 0); in __folio_large_mapcount_sanity_checks()
225 VM_WARN_ON_ONCE(!folio_mapped(folio) && in __folio_large_mapcount_sanity_checks()
226 folio_test_large_maybe_mapped_shared(folio)); in __folio_large_mapcount_sanity_checks()
229 static __always_inline void folio_set_large_mapcount(struct folio *folio, in folio_set_large_mapcount() argument
232 __folio_large_mapcount_sanity_checks(folio, mapcount, vma->vm_mm->mm_id); in folio_set_large_mapcount()
234 VM_WARN_ON_ONCE(folio_mm_id(folio, 0) != MM_ID_DUMMY); in folio_set_large_mapcount()
235 VM_WARN_ON_ONCE(folio_mm_id(folio, 1) != MM_ID_DUMMY); in folio_set_large_mapcount()
238 atomic_set(&folio->_large_mapcount, mapcount - 1); in folio_set_large_mapcount()
239 folio->_mm_id_mapcount[0] = mapcount - 1; in folio_set_large_mapcount()
240 folio_set_mm_id(folio, 0, vma->vm_mm->mm_id); in folio_set_large_mapcount()
243 static __always_inline int folio_add_return_large_mapcount(struct folio *folio, in folio_add_return_large_mapcount() argument
249 folio_lock_large_mapcount(folio); in folio_add_return_large_mapcount()
250 __folio_large_mapcount_sanity_checks(folio, diff, mm_id); in folio_add_return_large_mapcount()
252 new_mapcount_val = atomic_read(&folio->_large_mapcount) + diff; in folio_add_return_large_mapcount()
253 atomic_set(&folio->_large_mapcount, new_mapcount_val); in folio_add_return_large_mapcount()
262 if (folio_mm_id(folio, 0) == mm_id) { in folio_add_return_large_mapcount()
263 folio->_mm_id_mapcount[0] += diff; in folio_add_return_large_mapcount()
264 if (!IS_ENABLED(CONFIG_64BIT) && unlikely(folio->_mm_id_mapcount[0] < 0)) { in folio_add_return_large_mapcount()
265 folio->_mm_id_mapcount[0] = -1; in folio_add_return_large_mapcount()
266 folio_set_mm_id(folio, 0, MM_ID_DUMMY); in folio_add_return_large_mapcount()
267 folio->_mm_ids |= FOLIO_MM_IDS_SHARED_BIT; in folio_add_return_large_mapcount()
269 } else if (folio_mm_id(folio, 1) == mm_id) { in folio_add_return_large_mapcount()
270 folio->_mm_id_mapcount[1] += diff; in folio_add_return_large_mapcount()
271 if (!IS_ENABLED(CONFIG_64BIT) && unlikely(folio->_mm_id_mapcount[1] < 0)) { in folio_add_return_large_mapcount()
272 folio->_mm_id_mapcount[1] = -1; in folio_add_return_large_mapcount()
273 folio_set_mm_id(folio, 1, MM_ID_DUMMY); in folio_add_return_large_mapcount()
274 folio->_mm_ids |= FOLIO_MM_IDS_SHARED_BIT; in folio_add_return_large_mapcount()
276 } else if (folio_mm_id(folio, 0) == MM_ID_DUMMY) { in folio_add_return_large_mapcount()
277 folio_set_mm_id(folio, 0, mm_id); in folio_add_return_large_mapcount()
278 folio->_mm_id_mapcount[0] = diff - 1; in folio_add_return_large_mapcount()
281 folio->_mm_ids |= FOLIO_MM_IDS_SHARED_BIT; in folio_add_return_large_mapcount()
282 } else if (folio_mm_id(folio, 1) == MM_ID_DUMMY) { in folio_add_return_large_mapcount()
283 folio_set_mm_id(folio, 1, mm_id); in folio_add_return_large_mapcount()
284 folio->_mm_id_mapcount[1] = diff - 1; in folio_add_return_large_mapcount()
286 folio->_mm_ids |= FOLIO_MM_IDS_SHARED_BIT; in folio_add_return_large_mapcount()
288 folio_unlock_large_mapcount(folio); in folio_add_return_large_mapcount()
293 static __always_inline int folio_sub_return_large_mapcount(struct folio *folio, in folio_sub_return_large_mapcount() argument
299 folio_lock_large_mapcount(folio); in folio_sub_return_large_mapcount()
300 __folio_large_mapcount_sanity_checks(folio, diff, mm_id); in folio_sub_return_large_mapcount()
302 new_mapcount_val = atomic_read(&folio->_large_mapcount) - diff; in folio_sub_return_large_mapcount()
303 atomic_set(&folio->_large_mapcount, new_mapcount_val); in folio_sub_return_large_mapcount()
311 if (folio_mm_id(folio, 0) == mm_id) { in folio_sub_return_large_mapcount()
312 folio->_mm_id_mapcount[0] -= diff; in folio_sub_return_large_mapcount()
313 if (folio->_mm_id_mapcount[0] >= 0) in folio_sub_return_large_mapcount()
315 folio->_mm_id_mapcount[0] = -1; in folio_sub_return_large_mapcount()
316 folio_set_mm_id(folio, 0, MM_ID_DUMMY); in folio_sub_return_large_mapcount()
317 } else if (folio_mm_id(folio, 1) == mm_id) { in folio_sub_return_large_mapcount()
318 folio->_mm_id_mapcount[1] -= diff; in folio_sub_return_large_mapcount()
319 if (folio->_mm_id_mapcount[1] >= 0) in folio_sub_return_large_mapcount()
321 folio->_mm_id_mapcount[1] = -1; in folio_sub_return_large_mapcount()
322 folio_set_mm_id(folio, 1, MM_ID_DUMMY); in folio_sub_return_large_mapcount()
331 if (folio->_mm_id_mapcount[0] == new_mapcount_val || in folio_sub_return_large_mapcount()
332 folio->_mm_id_mapcount[1] == new_mapcount_val) in folio_sub_return_large_mapcount()
333 folio->_mm_ids &= ~FOLIO_MM_IDS_SHARED_BIT; in folio_sub_return_large_mapcount()
335 folio_unlock_large_mapcount(folio); in folio_sub_return_large_mapcount()
344 static inline void folio_set_large_mapcount(struct folio *folio, int mapcount, in folio_set_large_mapcount() argument
348 atomic_set(&folio->_large_mapcount, mapcount - 1); in folio_set_large_mapcount()
351 static inline void folio_add_large_mapcount(struct folio *folio, in folio_add_large_mapcount() argument
354 atomic_add(diff, &folio->_large_mapcount); in folio_add_large_mapcount()
357 static inline int folio_add_return_large_mapcount(struct folio *folio, in folio_add_return_large_mapcount() argument
363 static inline void folio_sub_large_mapcount(struct folio *folio, in folio_sub_large_mapcount() argument
366 atomic_sub(diff, &folio->_large_mapcount); in folio_sub_large_mapcount()
369 static inline int folio_sub_return_large_mapcount(struct folio *folio, in folio_sub_return_large_mapcount() argument
376 #define folio_inc_large_mapcount(folio, vma) \ argument
377 folio_add_large_mapcount(folio, 1, vma)
378 #define folio_inc_return_large_mapcount(folio, vma) \ argument
379 folio_add_return_large_mapcount(folio, 1, vma)
380 #define folio_dec_large_mapcount(folio, vma) \ argument
381 folio_sub_large_mapcount(folio, 1, vma)
382 #define folio_dec_return_large_mapcount(folio, vma) \ argument
383 folio_sub_return_large_mapcount(folio, 1, vma)
407 static inline void __folio_rmap_sanity_checks(const struct folio *folio, in __folio_rmap_sanity_checks() argument
411 VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); in __folio_rmap_sanity_checks()
414 VM_WARN_ON_FOLIO(is_zero_folio(folio), folio); in __folio_rmap_sanity_checks()
426 VM_WARN_ON_FOLIO(page_folio(page) != folio, folio); in __folio_rmap_sanity_checks()
427 VM_WARN_ON_FOLIO(page_folio(page + nr_pages - 1) != folio, folio); in __folio_rmap_sanity_checks()
438 VM_WARN_ON_FOLIO(folio_nr_pages(folio) != HPAGE_PMD_NR, folio); in __folio_rmap_sanity_checks()
439 VM_WARN_ON_FOLIO(nr_pages != HPAGE_PMD_NR, folio); in __folio_rmap_sanity_checks()
446 VM_WARN_ON_FOLIO(folio_nr_pages(folio) != HPAGE_PUD_NR, folio); in __folio_rmap_sanity_checks()
447 VM_WARN_ON_FOLIO(nr_pages != HPAGE_PUD_NR, folio); in __folio_rmap_sanity_checks()
457 void folio_move_anon_rmap(struct folio *, struct vm_area_struct *);
458 void folio_add_anon_rmap_ptes(struct folio *, struct page *, int nr_pages,
460 #define folio_add_anon_rmap_pte(folio, page, vma, address, flags) \ argument
461 folio_add_anon_rmap_ptes(folio, page, 1, vma, address, flags)
462 void folio_add_anon_rmap_pmd(struct folio *, struct page *,
464 void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
466 void folio_add_file_rmap_ptes(struct folio *, struct page *, int nr_pages,
468 #define folio_add_file_rmap_pte(folio, page, vma) \ argument
469 folio_add_file_rmap_ptes(folio, page, 1, vma)
470 void folio_add_file_rmap_pmd(struct folio *, struct page *,
472 void folio_add_file_rmap_pud(struct folio *, struct page *,
474 void folio_remove_rmap_ptes(struct folio *, struct page *, int nr_pages,
476 #define folio_remove_rmap_pte(folio, page, vma) \ argument
477 folio_remove_rmap_ptes(folio, page, 1, vma)
478 void folio_remove_rmap_pmd(struct folio *, struct page *,
480 void folio_remove_rmap_pud(struct folio *, struct page *,
483 void hugetlb_add_anon_rmap(struct folio *, struct vm_area_struct *,
485 void hugetlb_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
489 static inline int hugetlb_try_dup_anon_rmap(struct folio *folio, in hugetlb_try_dup_anon_rmap() argument
492 VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); in hugetlb_try_dup_anon_rmap()
493 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); in hugetlb_try_dup_anon_rmap()
495 if (PageAnonExclusive(&folio->page)) { in hugetlb_try_dup_anon_rmap()
496 if (unlikely(folio_needs_cow_for_dma(vma, folio))) in hugetlb_try_dup_anon_rmap()
498 ClearPageAnonExclusive(&folio->page); in hugetlb_try_dup_anon_rmap()
500 atomic_inc(&folio->_entire_mapcount); in hugetlb_try_dup_anon_rmap()
501 atomic_inc(&folio->_large_mapcount); in hugetlb_try_dup_anon_rmap()
506 static inline int hugetlb_try_share_anon_rmap(struct folio *folio) in hugetlb_try_share_anon_rmap() argument
508 VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); in hugetlb_try_share_anon_rmap()
509 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); in hugetlb_try_share_anon_rmap()
510 VM_WARN_ON_FOLIO(!PageAnonExclusive(&folio->page), folio); in hugetlb_try_share_anon_rmap()
516 if (unlikely(folio_maybe_dma_pinned(folio))) in hugetlb_try_share_anon_rmap()
518 ClearPageAnonExclusive(&folio->page); in hugetlb_try_share_anon_rmap()
529 static inline void hugetlb_add_file_rmap(struct folio *folio) in hugetlb_add_file_rmap() argument
531 VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); in hugetlb_add_file_rmap()
532 VM_WARN_ON_FOLIO(folio_test_anon(folio), folio); in hugetlb_add_file_rmap()
534 atomic_inc(&folio->_entire_mapcount); in hugetlb_add_file_rmap()
535 atomic_inc(&folio->_large_mapcount); in hugetlb_add_file_rmap()
538 static inline void hugetlb_remove_rmap(struct folio *folio) in hugetlb_remove_rmap() argument
540 VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); in hugetlb_remove_rmap()
542 atomic_dec(&folio->_entire_mapcount); in hugetlb_remove_rmap()
543 atomic_dec(&folio->_large_mapcount); in hugetlb_remove_rmap()
546 static __always_inline void __folio_dup_file_rmap(struct folio *folio, in __folio_dup_file_rmap() argument
552 __folio_rmap_sanity_checks(folio, page, nr_pages, level); in __folio_dup_file_rmap()
556 if (!folio_test_large(folio)) { in __folio_dup_file_rmap()
557 atomic_inc(&folio->_mapcount); in __folio_dup_file_rmap()
566 folio_add_large_mapcount(folio, orig_nr_pages, dst_vma); in __folio_dup_file_rmap()
570 atomic_inc(&folio->_entire_mapcount); in __folio_dup_file_rmap()
571 folio_inc_large_mapcount(folio, dst_vma); in __folio_dup_file_rmap()
587 static inline void folio_dup_file_rmap_ptes(struct folio *folio, in folio_dup_file_rmap_ptes() argument
590 __folio_dup_file_rmap(folio, page, nr_pages, dst_vma, RMAP_LEVEL_PTE); in folio_dup_file_rmap_ptes()
593 static __always_inline void folio_dup_file_rmap_pte(struct folio *folio, in folio_dup_file_rmap_pte() argument
596 __folio_dup_file_rmap(folio, page, 1, dst_vma, RMAP_LEVEL_PTE); in folio_dup_file_rmap_pte()
609 static inline void folio_dup_file_rmap_pmd(struct folio *folio, in folio_dup_file_rmap_pmd() argument
613 __folio_dup_file_rmap(folio, page, HPAGE_PMD_NR, dst_vma, RMAP_LEVEL_PTE); in folio_dup_file_rmap_pmd()
619 static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio, in __folio_try_dup_anon_rmap() argument
627 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); in __folio_try_dup_anon_rmap()
628 __folio_rmap_sanity_checks(folio, page, nr_pages, level); in __folio_try_dup_anon_rmap()
637 maybe_pinned = likely(!folio_is_device_private(folio)) && in __folio_try_dup_anon_rmap()
638 unlikely(folio_needs_cow_for_dma(src_vma, folio)); in __folio_try_dup_anon_rmap()
653 if (!folio_test_large(folio)) { in __folio_try_dup_anon_rmap()
656 atomic_inc(&folio->_mapcount); in __folio_try_dup_anon_rmap()
666 folio_add_large_mapcount(folio, orig_nr_pages, dst_vma); in __folio_try_dup_anon_rmap()
675 atomic_inc(&folio->_entire_mapcount); in __folio_try_dup_anon_rmap()
676 folio_inc_large_mapcount(folio, dst_vma); in __folio_try_dup_anon_rmap()
706 static inline int folio_try_dup_anon_rmap_ptes(struct folio *folio, in folio_try_dup_anon_rmap_ptes() argument
710 return __folio_try_dup_anon_rmap(folio, page, nr_pages, dst_vma, in folio_try_dup_anon_rmap_ptes()
714 static __always_inline int folio_try_dup_anon_rmap_pte(struct folio *folio, in folio_try_dup_anon_rmap_pte() argument
718 return __folio_try_dup_anon_rmap(folio, page, 1, dst_vma, src_vma, in folio_try_dup_anon_rmap_pte()
745 static inline int folio_try_dup_anon_rmap_pmd(struct folio *folio, in folio_try_dup_anon_rmap_pmd() argument
750 return __folio_try_dup_anon_rmap(folio, page, HPAGE_PMD_NR, dst_vma, in folio_try_dup_anon_rmap_pmd()
758 static __always_inline int __folio_try_share_anon_rmap(struct folio *folio, in __folio_try_share_anon_rmap() argument
761 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); in __folio_try_share_anon_rmap()
762 VM_WARN_ON_FOLIO(!PageAnonExclusive(page), folio); in __folio_try_share_anon_rmap()
763 __folio_rmap_sanity_checks(folio, page, nr_pages, level); in __folio_try_share_anon_rmap()
766 if (unlikely(folio_is_device_private(folio))) { in __folio_try_share_anon_rmap()
817 if (unlikely(folio_maybe_dma_pinned(folio))) in __folio_try_share_anon_rmap()
851 static inline int folio_try_share_anon_rmap_pte(struct folio *folio, in folio_try_share_anon_rmap_pte() argument
854 return __folio_try_share_anon_rmap(folio, page, 1, RMAP_LEVEL_PTE); in folio_try_share_anon_rmap_pte()
880 static inline int folio_try_share_anon_rmap_pmd(struct folio *folio, in folio_try_share_anon_rmap_pmd() argument
884 return __folio_try_share_anon_rmap(folio, page, HPAGE_PMD_NR, in folio_try_share_anon_rmap_pmd()
895 int folio_referenced(struct folio *, int is_locked,
898 void try_to_migrate(struct folio *folio, enum ttu_flags flags);
899 void try_to_unmap(struct folio *, enum ttu_flags flags);
902 void *owner, struct folio **foliop);
965 unsigned long page_address_in_vma(const struct folio *folio,
974 int folio_mkclean(struct folio *);
987 void remove_migration_ptes(struct folio *src, struct folio *dst, int flags);
1008 bool (*rmap_one)(struct folio *folio, struct vm_area_struct *vma,
1010 int (*done)(struct folio *folio);
1011 struct anon_vma *(*anon_lock)(const struct folio *folio,
1016 void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc);
1017 void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc);
1018 struct anon_vma *folio_lock_anon_vma_read(const struct folio *folio,
1026 static inline int folio_referenced(struct folio *folio, int is_locked, in folio_referenced() argument
1034 static inline void try_to_unmap(struct folio *folio, enum ttu_flags flags) in try_to_unmap() argument
1038 static inline int folio_mkclean(struct folio *folio) in folio_mkclean() argument