Lines Matching refs:folio
535 struct address_space *folio_mapping(struct folio *);
536 struct address_space *swapcache_mapping(struct folio *);
550 static inline struct address_space *folio_flush_mapping(struct folio *folio) in folio_flush_mapping() argument
552 if (unlikely(folio_test_swapcache(folio))) in folio_flush_mapping()
555 return folio_mapping(folio); in folio_flush_mapping()
567 static inline struct inode *folio_inode(struct folio *folio) in folio_inode() argument
569 return folio->mapping->host; in folio_inode()
580 static inline void folio_attach_private(struct folio *folio, void *data) in folio_attach_private() argument
582 folio_get(folio); in folio_attach_private()
583 folio->private = data; in folio_attach_private()
584 folio_set_private(folio); in folio_attach_private()
598 static inline void *folio_change_private(struct folio *folio, void *data) in folio_change_private() argument
600 void *old = folio_get_private(folio); in folio_change_private()
602 folio->private = data; in folio_change_private()
615 static inline void *folio_detach_private(struct folio *folio) in folio_detach_private() argument
617 void *data = folio_get_private(folio); in folio_detach_private()
619 if (!folio_test_private(folio)) in folio_detach_private()
621 folio_clear_private(folio); in folio_detach_private()
622 folio->private = NULL; in folio_detach_private()
623 folio_put(folio); in folio_detach_private()
639 struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order);
641 static inline struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order) in filemap_alloc_folio_noprof()
660 typedef int filler_t(struct file *, struct folio *);
738 struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
754 static inline struct folio *filemap_get_folio(struct address_space *mapping, in filemap_get_folio()
772 static inline struct folio *filemap_lock_folio(struct address_space *mapping, in filemap_lock_folio()
790 static inline struct folio *filemap_grab_folio(struct address_space *mapping, in filemap_grab_folio()
887 extern pgoff_t __folio_swap_cache_index(struct folio *folio);
900 static inline pgoff_t folio_index(struct folio *folio) in folio_index() argument
902 if (unlikely(folio_test_swapcache(folio))) in folio_index()
903 return __folio_swap_cache_index(folio); in folio_index()
904 return folio->index; in folio_index()
913 static inline pgoff_t folio_next_index(struct folio *folio) in folio_next_index() argument
915 return folio->index + folio_nr_pages(folio); in folio_next_index()
928 static inline struct page *folio_file_page(struct folio *folio, pgoff_t index) in folio_file_page() argument
930 return folio_page(folio, index & (folio_nr_pages(folio) - 1)); in folio_file_page()
943 static inline bool folio_contains(struct folio *folio, pgoff_t index) in folio_contains() argument
945 return index - folio_index(folio) < folio_nr_pages(folio); in folio_contains()
977 struct folio *read_cache_folio(struct address_space *, pgoff_t index,
979 struct folio *mapping_read_folio_gfp(struct address_space *, pgoff_t index,
992 static inline struct folio *read_mapping_folio(struct address_space *mapping, in read_mapping_folio()
1013 static inline pgoff_t page_pgoff(const struct folio *folio, in page_pgoff() argument
1016 return folio->index + folio_page_idx(folio, page); in page_pgoff()
1023 static inline loff_t folio_pos(const struct folio *folio) in folio_pos() argument
1025 return ((loff_t)folio->index) * PAGE_SIZE; in folio_pos()
1033 struct folio *folio = page_folio(page); in page_offset() local
1035 return folio_pos(folio) + folio_page_idx(folio, page) * PAGE_SIZE; in page_offset()
1041 static inline pgoff_t folio_pgoff(struct folio *folio) in folio_pgoff() argument
1043 return folio->index; in folio_pgoff()
1056 struct folio *folio; member
1062 struct folio *folio; member
1070 if (wait_page->folio != key->folio) in wake_page_match()
1080 void __folio_lock(struct folio *folio);
1081 int __folio_lock_killable(struct folio *folio);
1082 vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf);
1084 void folio_unlock(struct folio *folio);
1098 static inline bool folio_trylock(struct folio *folio) in folio_trylock() argument
1100 return likely(!test_and_set_bit_lock(PG_locked, folio_flags(folio, 0))); in folio_trylock()
1133 static inline void folio_lock(struct folio *folio) in folio_lock() argument
1136 if (!folio_trylock(folio)) in folio_lock()
1137 __folio_lock(folio); in folio_lock()
1153 struct folio *folio; in lock_page() local
1156 folio = page_folio(page); in lock_page()
1157 if (!folio_trylock(folio)) in lock_page()
1158 __folio_lock(folio); in lock_page()
1171 static inline int folio_lock_killable(struct folio *folio) in folio_lock_killable() argument
1174 if (!folio_trylock(folio)) in folio_lock_killable()
1175 return __folio_lock_killable(folio); in folio_lock_killable()
1186 static inline vm_fault_t folio_lock_or_retry(struct folio *folio, in folio_lock_or_retry() argument
1190 if (!folio_trylock(folio)) in folio_lock_or_retry()
1191 return __folio_lock_or_retry(folio, vmf); in folio_lock_or_retry()
1199 void folio_wait_bit(struct folio *folio, int bit_nr);
1200 int folio_wait_bit_killable(struct folio *folio, int bit_nr);
1209 static inline void folio_wait_locked(struct folio *folio) in folio_wait_locked() argument
1211 if (folio_test_locked(folio)) in folio_wait_locked()
1212 folio_wait_bit(folio, PG_locked); in folio_wait_locked()
1215 static inline int folio_wait_locked_killable(struct folio *folio) in folio_wait_locked_killable() argument
1217 if (!folio_test_locked(folio)) in folio_wait_locked_killable()
1219 return folio_wait_bit_killable(folio, PG_locked); in folio_wait_locked_killable()
1222 void folio_end_read(struct folio *folio, bool success);
1224 void folio_wait_writeback(struct folio *folio);
1225 int folio_wait_writeback_killable(struct folio *folio);
1227 void folio_end_writeback(struct folio *folio);
1228 void folio_wait_stable(struct folio *folio);
1229 void __folio_mark_dirty(struct folio *folio, struct address_space *, int warn);
1230 void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb);
1231 void __folio_cancel_dirty(struct folio *folio);
1232 static inline void folio_cancel_dirty(struct folio *folio) in folio_cancel_dirty() argument
1235 if (folio_test_dirty(folio)) in folio_cancel_dirty()
1236 __folio_cancel_dirty(folio); in folio_cancel_dirty()
1238 bool folio_clear_dirty_for_io(struct folio *folio);
1240 void folio_invalidate(struct folio *folio, size_t offset, size_t length);
1241 bool noop_dirty_folio(struct address_space *mapping, struct folio *folio);
1244 int filemap_migrate_folio(struct address_space *mapping, struct folio *dst,
1245 struct folio *src, enum migrate_mode mode);
1249 void folio_end_private_2(struct folio *folio);
1250 void folio_wait_private_2(struct folio *folio);
1251 int folio_wait_private_2_killable(struct folio *folio);
1263 int filemap_add_folio(struct address_space *mapping, struct folio *folio,
1265 void filemap_remove_folio(struct folio *folio);
1266 void __filemap_remove_folio(struct folio *folio, void *shadow);
1267 void replace_page_cache_folio(struct folio *old, struct folio *new);
1270 bool filemap_release_folio(struct folio *folio, gfp_t gfp);
1275 int __filemap_add_folio(struct address_space *mapping, struct folio *folio,
1349 void page_cache_async_ra(struct readahead_control *, struct folio *,
1392 struct folio *folio, unsigned long req_count) in page_cache_async_readahead() argument
1394 DEFINE_READAHEAD(ractl, file, ra, mapping, folio->index); in page_cache_async_readahead()
1395 page_cache_async_ra(&ractl, folio, req_count); in page_cache_async_readahead()
1398 static inline struct folio *__readahead_folio(struct readahead_control *ractl) in __readahead_folio()
1400 struct folio *folio; in __readahead_folio() local
1411 folio = xa_load(&ractl->mapping->i_pages, ractl->_index); in __readahead_folio()
1412 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); in __readahead_folio()
1413 ractl->_batch_count = folio_nr_pages(folio); in __readahead_folio()
1415 return folio; in __readahead_folio()
1429 struct folio *folio = __readahead_folio(ractl); in readahead_page() local
1431 return &folio->page; in readahead_page()
1442 static inline struct folio *readahead_folio(struct readahead_control *ractl) in readahead_folio()
1444 struct folio *folio = __readahead_folio(ractl); in readahead_folio() local
1446 if (folio) in readahead_folio()
1447 folio_put(folio); in readahead_folio()
1448 return folio; in readahead_folio()
1553 static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio, in folio_mkwrite_check_truncate() argument
1558 size_t offset = offset_in_folio(folio, size); in folio_mkwrite_check_truncate()
1560 if (!folio->mapping) in folio_mkwrite_check_truncate()
1564 if (folio_next_index(folio) - 1 < index) in folio_mkwrite_check_truncate()
1565 return folio_size(folio); in folio_mkwrite_check_truncate()
1567 if (folio->index > index || !offset) in folio_mkwrite_check_truncate()
1585 unsigned int i_blocks_per_folio(struct inode *inode, struct folio *folio) in i_blocks_per_folio() argument
1587 return folio_size(folio) >> inode->i_blkbits; in i_blocks_per_folio()