| /linux-6.15/include/linux/ |
| H A D | rmap.h | 411 VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); in __folio_rmap_sanity_checks() 414 VM_WARN_ON_FOLIO(is_zero_folio(folio), folio); in __folio_rmap_sanity_checks() 426 VM_WARN_ON_FOLIO(page_folio(page) != folio, folio); in __folio_rmap_sanity_checks() 492 VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); in hugetlb_try_dup_anon_rmap() 493 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); in hugetlb_try_dup_anon_rmap() 508 VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); in hugetlb_try_share_anon_rmap() 509 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); in hugetlb_try_share_anon_rmap() 532 VM_WARN_ON_FOLIO(folio_test_anon(folio), folio); in hugetlb_add_file_rmap() 627 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); in __folio_try_dup_anon_rmap() 761 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); in __folio_try_share_anon_rmap() [all …]
|
| H A D | pagemap.h | 915 return folio->index + folio_nr_pages(folio); in folio_next_index() 1033 struct folio *folio = page_folio(page); in page_offset() local 1056 struct folio *folio; member 1062 struct folio *folio; member 1070 if (wait_page->folio != key->folio) in wake_page_match() 1080 void __folio_lock(struct folio *folio); 1084 void folio_unlock(struct folio *folio); 1153 struct folio *folio; in lock_page() local 1227 void folio_end_writeback(struct folio *folio); 1228 void folio_wait_stable(struct folio *folio); [all …]
|
| H A D | hugetlb_cgroup.h | 63 VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio); in __hugetlb_cgroup_from_folio() 76 hugetlb_cgroup_from_folio_rsvd(struct folio *folio) in hugetlb_cgroup_from_folio_rsvd() argument 84 VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio); in __set_hugetlb_cgroup() 133 struct folio *folio); 136 struct folio *folio); 138 struct folio *folio); 140 struct folio *folio); 222 struct folio *folio) in hugetlb_cgroup_commit_charge() argument 229 struct folio *folio) in hugetlb_cgroup_commit_charge_rsvd() argument 234 struct folio *folio) in hugetlb_cgroup_uncharge_folio() argument [all …]
|
| H A D | mm_inline.h | 28 static inline int folio_is_file_lru(struct folio *folio) in folio_is_file_lru() argument 68 VM_BUG_ON_FOLIO(!folio_test_lru(folio), folio); in __folio_clear_lru_flags() 73 if (folio_test_active(folio) && folio_test_unevictable(folio)) in __folio_clear_lru_flags() 91 VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio); in folio_lru_list() 144 static inline int folio_lru_refs(struct folio *folio) in folio_lru_refs() argument 157 static inline int folio_lru_gen(struct folio *folio) in folio_lru_gen() argument 245 (folio_test_dirty(folio) || folio_test_writeback(folio)))) in lru_gen_folio_seq() 291 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio); in lru_gen_del_folio() 292 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio); in lru_gen_del_folio() 340 void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio) in lruvec_add_folio() argument [all …]
|
| H A D | migrate.h | 10 typedef struct folio *new_folio_t(struct folio *folio, unsigned long private); 11 typedef void free_folio_t(struct folio *folio, unsigned long private); 73 bool isolate_folio_to_list(struct folio *folio, struct list_head *list); 76 struct folio *dst, struct folio *src); 79 void folio_migrate_flags(struct folio *newfolio, struct folio *folio); 81 struct folio *newfolio, struct folio *folio, int extra_count); 90 static inline struct folio *alloc_migration_target(struct folio *src, in alloc_migration_target() 99 struct folio *dst, struct folio *src) in migrate_huge_page_move_mapping() 121 static inline bool folio_test_movable(struct folio *folio) in folio_test_movable() argument 145 int migrate_misplaced_folio_prepare(struct folio *folio, [all …]
|
| H A D | page_ref.h | 87 static inline int folio_ref_count(const struct folio *folio) in folio_ref_count() argument 104 static inline void folio_set_count(struct folio *folio, int v) in folio_set_count() argument 125 static inline void folio_ref_add(struct folio *folio, int nr) in folio_ref_add() argument 137 static inline void folio_ref_sub(struct folio *folio, int nr) in folio_ref_sub() argument 142 static inline int folio_ref_sub_return(struct folio *folio, int nr) in folio_ref_sub_return() argument 158 static inline void folio_ref_inc(struct folio *folio) in folio_ref_inc() argument 170 static inline void folio_ref_dec(struct folio *folio) in folio_ref_dec() argument 198 static inline int folio_ref_inc_return(struct folio *folio) in folio_ref_inc_return() argument 212 static inline int folio_ref_dec_and_test(struct folio *folio) in folio_ref_dec_and_test() argument 226 static inline int folio_ref_dec_return(struct folio *folio) in folio_ref_dec_return() argument [all …]
|
| /linux-6.15/mm/ |
| H A D | swap.c | 97 void __folio_put(struct folio *folio) in __folio_put() argument 123 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); in lru_add() 165 struct folio *folio = fbatch->folios[i]; in folio_batch_move_lru() local 179 struct folio *folio, move_fn_t move_fn, in __folio_batch_add_and_move() argument 324 void folio_activate(struct folio *folio) in folio_activate() argument 337 void folio_activate(struct folio *folio) in folio_activate() argument 494 void folio_add_lru(struct folio *folio) in folio_add_lru() argument 497 folio_test_unevictable(folio), folio); in folio_add_lru() 700 void folio_deactivate(struct folio *folio) in folio_deactivate() argument 948 struct folio *folio = folios->folios[i]; in folios_put_refs() local [all …]
|
| H A D | page_io.c | 32 struct folio *folio = bio_first_folio_all(bio); in __end_swap_bio_write() local 60 struct folio *folio = bio_first_folio_all(bio); in __end_swap_bio_read() local 174 static bool is_folio_zero_filled(struct folio *folio) in is_folio_zero_filled() argument 204 static void swap_zeromap_folio_set(struct folio *folio) in swap_zeromap_folio_set() argument 224 static void swap_zeromap_folio_clear(struct folio *folio) in swap_zeromap_folio_clear() argument 242 struct folio *folio = page_folio(page); in swap_writepage() local 444 bio_add_folio_nofail(bio, folio, folio_size(folio), 0); in swap_writepage_bdev_async() 457 VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio); in __swap_writepage() 539 folio_zero_range(folio, 0, folio_size(folio)); in swap_read_folio_zeromap() 624 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); in swap_read_folio() [all …]
|
| H A D | filemap.c | 150 struct folio *folio) in filemap_unaccount_folio() argument 281 struct folio *folio; in page_cache_delete_batch() local 489 struct folio *folio; in filemap_range_has_page() local 649 struct folio *folio; in filemap_range_has_writeback() local 995 struct folio *folio; in filemap_alloc_folio_noprof() local 1184 key.folio = folio; in folio_wake_bit() 1681 wait->folio = folio; in __folio_lock_async() 1854 struct folio *folio; in filemap_get_entry() local 1901 struct folio *folio; in __filemap_get_folio() local 2021 struct folio *folio; in find_get_entry() local [all …]
|
| H A D | truncate.c | 30 struct folio *folio; in clear_shadow_entries() local 65 struct folio *folio; in truncate_folio_batch_exceptionals() local 160 folio_invalidate(folio, 0, folio_size(folio)); in truncate_cleanup_folio() 209 truncate_inode_folio(folio->mapping, folio); in truncate_inode_partial_folio() 267 truncate_inode_folio(folio->mapping, folio); in truncate_inode_partial_folio() 275 struct folio *folio) in generic_error_remove_folio() argument 350 struct folio *folio; in truncate_inode_pages_range() local 426 struct folio *folio = fbatch.folios[i]; in truncate_inode_pages_range() local 525 struct folio *folio = fbatch.folios[i]; in mapping_try_invalidate() local 668 struct folio *folio = fbatch.folios[i]; in invalidate_inode_pages2_range() local [all …]
|
| H A D | rmap.c | 1113 int folio_mkclean(struct folio *folio) in folio_mkclean() argument 1463 atomic_read(&folio->_mapcount) > 0, folio); in __folio_add_anon_rmap() 1850 struct folio *folio, pte_t *ptep) in can_batch_unmap_folio_ptes() argument 2332 !folio_test_pmd_mappable(folio), folio); in try_to_migrate_one() 2659 struct folio *folio, *fw_folio; in make_device_exclusive() local 2803 static void rmap_walk_anon(struct folio *folio, in rmap_walk_anon() argument 2871 VM_WARN_ON_FOLIO(folio && mapping != folio_mapping(folio), folio); in __rmap_walk_file() 2872 VM_WARN_ON_FOLIO(folio && pgoff_start != folio_pgoff(folio), folio); in __rmap_walk_file() 2873 VM_WARN_ON_FOLIO(folio && nr_pages != folio_nr_pages(folio), folio); in __rmap_walk_file() 2930 __rmap_walk_file(folio, folio->mapping, folio->index, in rmap_walk_file() [all …]
|
| H A D | mlock.c | 95 folio->mlock_count = !!folio_test_mlocked(folio); in __mlock_folio() 105 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); in __mlock_new_folio() 169 static inline struct folio *mlock_lru(struct folio *folio) in mlock_lru() argument 174 static inline struct folio *mlock_new(struct folio *folio) in mlock_new() argument 190 struct folio *folio; in mlock_folio_batch() local 196 folio = (struct folio *)((unsigned long)folio - mlock); in mlock_folio_batch() 242 void mlock_folio(struct folio *folio) in mlock_folio() argument 267 void mlock_new_folio(struct folio *folio) in mlock_new_folio() argument 290 void munlock_folio(struct folio *folio) in munlock_folio() argument 360 struct folio *folio; in mlock_pte_range() local [all …]
|
| H A D | swap_state.c | 100 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); in add_to_swap_cache() 175 void delete_from_swap_cache(struct folio *folio) in delete_from_swap_cache() argument 185 folio_ref_sub(folio, folio_nr_pages(folio)); in delete_from_swap_cache() 225 void free_swap_cache(struct folio *folio) in free_swap_cache() argument 240 struct folio *folio = page_folio(page); in free_page_and_swap_cache() local 289 struct folio *folio; in swap_cache_get_folio() local 372 struct folio *folio; in __read_swap_cache_async() local 491 struct folio *folio; in read_swap_cache_async() local 591 struct folio *folio; in swap_cluster_readahead() local 736 struct folio *folio; in swap_vma_readahead() local [all …]
|
| H A D | page_idle.c | 37 struct folio *folio; in page_idle_get_folio() local 43 if (!folio_test_lru(folio) || !folio_try_get(folio)) in page_idle_get_folio() 47 folio = NULL; in page_idle_get_folio() 49 return folio; in page_idle_get_folio() 52 static bool page_idle_clear_pte_refs_one(struct folio *folio, in page_idle_clear_pte_refs_one() argument 94 static void page_idle_clear_pte_refs(struct folio *folio) in page_idle_clear_pte_refs() argument 106 if (!folio_mapped(folio) || !folio_raw_mapping(folio)) in page_idle_clear_pte_refs() 124 struct folio *folio; in page_idle_bitmap_read() local 144 if (folio) { in page_idle_bitmap_read() 169 struct folio *folio; in page_idle_bitmap_write() local [all …]
|
| H A D | migrate.c | 135 struct folio *folio; in putback_movable_pages() local 193 struct folio *folio, in try_to_map_unused_to_zeropage() argument 232 struct folio *folio; member 449 struct folio *folio) in folio_expected_refs() argument 471 struct folio *newfolio, struct folio *folio, int expected_count) in __folio_migrate_mapping() argument 602 struct folio *newfolio, struct folio *folio, int extra_count) in folio_migrate_mapping() argument 653 void folio_migrate_flags(struct folio *newfolio, struct folio *folio) in folio_migrate_flags() argument 1611 struct folio *folio, *folio2; in migrate_hugetlbs() local 2060 struct folio *folio, *folio2; in migrate_pages() local 2261 struct folio *folio; in add_folio_for_migration() local [all …]
|
| /linux-6.15/fs/btrfs/ |
| H A D | subpage.c | 182 struct folio *folio, u64 start, u32 len) in btrfs_subpage_assert() argument 283 struct folio *folio, u64 start, u32 len) in btrfs_folio_end_lock() argument 313 struct folio *folio, unsigned long bitmap) in btrfs_folio_end_lock_bitmap() argument 369 struct folio *folio, u64 start, u32 len) in btrfs_subpage_set_uptodate() argument 384 struct folio *folio, u64 start, u32 len) in btrfs_subpage_clear_uptodate() argument 422 struct folio *folio, u64 start, u32 len) in btrfs_subpage_clear_and_test_dirty() argument 449 struct folio *folio, u64 start, u32 len) in btrfs_subpage_set_writeback() argument 495 struct folio *folio, u64 start, u32 len) in btrfs_subpage_clear_ordered() argument 525 struct folio *folio, u64 start, u32 len) in btrfs_subpage_clear_checked() argument 729 struct folio *folio, u64 start, u32 len) in btrfs_folio_set_lock() argument [all …]
|
| H A D | subpage.h | 13 struct folio; 88 struct folio *folio) in btrfs_is_subpage() argument 90 if (folio->mapping && folio->mapping->host) in btrfs_is_subpage() 100 struct folio *folio) in btrfs_is_subpage() argument 102 if (folio->mapping && folio->mapping->host) in btrfs_is_subpage() 122 struct folio *folio, u64 start, u32 len); 124 struct folio *folio, u64 start, u32 len); 150 struct folio *folio, u64 start, u32 len); \ 152 struct folio *folio, u64 start, u32 len); \ 166 struct folio *folio, u64 start, u32 len); \ [all …]
|
| /linux-6.15/mm/damon/ |
| H A D | paddr.c | 22 static bool damon_folio_mkold_one(struct folio *folio, in damon_folio_mkold_one() argument 37 static void damon_folio_mkold(struct folio *folio) in damon_folio_mkold() argument 65 if (!folio) in damon_pa_mkold() 130 static bool damon_folio_young(struct folio *folio) in damon_folio_young() argument 210 struct folio *folio) in damos_pa_filter_match() argument 287 struct folio *folio; in damon_pa_pageout() local 342 struct folio *folio; in damon_pa_mark_accessed_or_deactivate() local 421 struct folio *folio; in damon_pa_migrate_folio_list() local 426 struct folio *folio; in damon_pa_migrate_folio_list() local 510 struct folio *folio; in damon_pa_migrate() local [all …]
|
| /linux-6.15/fs/jfs/ |
| H A D | jfs_metapage.c | 135 static inline void inc_io(struct folio *folio) in inc_io() argument 176 #define dec_io(folio, status, handler) handler(folio, status) argument 277 struct folio *folio = bio->bi_private; in metapage_read_end_io() local 333 struct folio *folio = bio->bi_private; in metapage_write_end_io() local 341 static int metapage_write_folio(struct folio *folio, in metapage_write_folio() argument 583 struct folio *folio; in __get_metapage() local 655 mp->folio = folio; in __get_metapage() 695 static int metapage_write_one(struct folio *folio) in metapage_write_one() argument 725 struct folio *folio = mp->folio; in force_metapage() local 759 struct folio *folio = mp->folio; in release_metapage() local [all …]
|
| /linux-6.15/fs/bcachefs/ |
| H A D | fs-io-pagecache.h | 19 static inline u64 folio_end_pos(struct folio *folio) in folio_end_pos() argument 21 return folio_pos(folio) + folio_size(folio); in folio_end_pos() 24 static inline size_t folio_sectors(struct folio *folio) in folio_sectors() argument 29 static inline loff_t folio_sector(struct folio *folio) in folio_sector() argument 34 static inline u64 folio_end_sector(struct folio *folio) in folio_end_sector() argument 72 static inline void bch2_folio_sector_set(struct folio *folio, in bch2_folio_sector_set() argument 80 static inline int folio_pos_to_s(struct folio *folio, loff_t pos) in folio_pos_to_s() argument 84 BUG_ON(pos < folio_pos(folio) || pos >= folio_end_pos(folio)); in folio_pos_to_s() 89 static inline void __bch2_folio_release(struct folio *folio) in __bch2_folio_release() argument 94 static inline void bch2_folio_release(struct folio *folio) in bch2_folio_release() argument [all …]
|
| /linux-6.15/fs/iomap/ |
| H A D | buffered-io.c | 336 struct folio *folio) in iomap_read_inline_data() argument 683 size_t len, struct folio *folio) in __iomap_write_begin() argument 756 struct folio *folio) in __iomap_put_folio() argument 769 struct folio *folio) in iomap_write_begin_inline() argument 782 struct folio *folio; in iomap_write_begin() local 917 struct folio *folio; in iomap_write_iter() local 1120 struct folio *folio; in iomap_write_delalloc_scan() local 1277 struct folio *folio; in iomap_unshare_iter() local 1352 struct folio *folio; in iomap_zero_iter() local 1473 struct folio *folio) in iomap_folio_mkwrite_iter() argument [all …]
|
| /linux-6.15/fs/nilfs2/ |
| H A D | page.c | 53 struct folio *folio; in nilfs_grab_buffer() local 76 struct folio *folio = bh->b_folio; in nilfs_forget_buffer() local 140 bool nilfs_folio_buffers_clean(struct folio *folio) in nilfs_folio_buffers_clean() argument 153 void nilfs_folio_bug(struct folio *folio) in nilfs_folio_bug() argument 169 folio, folio_ref_count(folio), in nilfs_folio_bug() 258 struct folio *folio = fbatch.folios[i], *dfolio; in nilfs_copy_dirty_pages() local 312 struct folio *folio = fbatch.folios[i], *dfolio; in nilfs_copy_back_pages() local 372 struct folio *folio = fbatch.folios[i]; in nilfs_clear_dirty_pages() local 400 void nilfs_clear_folio_dirty(struct folio *folio) in nilfs_clear_folio_dirty() argument 474 void __nilfs_clear_folio_dirty(struct folio *folio) in __nilfs_clear_folio_dirty() argument [all …]
|
| /linux-6.15/fs/ubifs/ |
| H A D | file.c | 99 static int do_readpage(struct folio *folio) in do_readpage() argument 119 folio_zero_range(folio, 0, folio_size(folio)); in do_readpage() 213 struct folio *folio; in write_begin_slow() local 417 struct folio *folio; in ubifs_write_begin() local 519 struct folio *folio, void *fsdata) in ubifs_write_end() argument 762 struct folio *folio; in ubifs_do_bulk_read() local 990 inode->i_ino, folio->index, folio->flags); in ubifs_writepage() 1132 struct folio *folio; in do_truncation() local 1442 struct folio *folio) in ubifs_dirty_folio() argument 1489 struct folio *folio = page_folio(vmf->page); in ubifs_vm_page_mkwrite() local [all …]
|
| /linux-6.15/include/trace/events/ |
| H A D | pagemap.h | 30 TP_PROTO(struct folio *folio), 32 TP_ARGS(folio), 35 __field(struct folio *, folio ) 42 __entry->folio = folio; 43 __entry->pfn = folio_pfn(folio); 50 __entry->folio, 63 TP_PROTO(struct folio *folio), 65 TP_ARGS(folio), 68 __field(struct folio *, folio ) 73 __entry->folio = folio; [all …]
|
| /linux-6.15/fs/ecryptfs/ |
| H A D | mmap.c | 33 struct folio *folio = NULL; in ecryptfs_writepages() local 36 while ((folio = writeback_iter(mapping, wbc, folio, &error))) { in ecryptfs_writepages() 41 folio->index); in ecryptfs_writepages() 45 folio_unlock(folio); in ecryptfs_writepages() 91 ecryptfs_copy_up_encrypted_with_header(struct folio *folio, in ecryptfs_copy_up_encrypted_with_header() argument 171 err = ecryptfs_read_lower_page_segment(folio, folio->index, 0, in ecryptfs_read_folio() 188 folio->index, 0, folio_size(folio), in ecryptfs_read_folio() 206 folio->index); in ecryptfs_read_folio() 248 struct folio *folio; in ecryptfs_write_begin() local 256 *foliop = folio; in ecryptfs_write_begin() [all …]
|