xref: /linux-6.15/include/linux/pagemap.h (revision 6f75cd16)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_PAGEMAP_H
3 #define _LINUX_PAGEMAP_H
4 
5 /*
6  * Copyright 1995 Linus Torvalds
7  */
8 #include <linux/mm.h>
9 #include <linux/fs.h>
10 #include <linux/list.h>
11 #include <linux/highmem.h>
12 #include <linux/compiler.h>
13 #include <linux/uaccess.h>
14 #include <linux/gfp.h>
15 #include <linux/bitops.h>
16 #include <linux/hardirq.h> /* for in_interrupt() */
17 #include <linux/hugetlb_inline.h>
18 
19 struct folio_batch;
20 
21 unsigned long invalidate_mapping_pages(struct address_space *mapping,
22 					pgoff_t start, pgoff_t end);
23 
24 static inline void invalidate_remote_inode(struct inode *inode)
25 {
26 	if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
27 	    S_ISLNK(inode->i_mode))
28 		invalidate_mapping_pages(inode->i_mapping, 0, -1);
29 }
30 int invalidate_inode_pages2(struct address_space *mapping);
31 int invalidate_inode_pages2_range(struct address_space *mapping,
32 		pgoff_t start, pgoff_t end);
33 int write_inode_now(struct inode *, int sync);
34 int filemap_fdatawrite(struct address_space *);
35 int filemap_flush(struct address_space *);
36 int filemap_fdatawait_keep_errors(struct address_space *mapping);
37 int filemap_fdatawait_range(struct address_space *, loff_t lstart, loff_t lend);
38 int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
39 		loff_t start_byte, loff_t end_byte);
40 
41 static inline int filemap_fdatawait(struct address_space *mapping)
42 {
43 	return filemap_fdatawait_range(mapping, 0, LLONG_MAX);
44 }
45 
46 bool filemap_range_has_page(struct address_space *, loff_t lstart, loff_t lend);
47 int filemap_write_and_wait_range(struct address_space *mapping,
48 		loff_t lstart, loff_t lend);
49 int __filemap_fdatawrite_range(struct address_space *mapping,
50 		loff_t start, loff_t end, int sync_mode);
51 int filemap_fdatawrite_range(struct address_space *mapping,
52 		loff_t start, loff_t end);
53 int filemap_check_errors(struct address_space *mapping);
54 void __filemap_set_wb_err(struct address_space *mapping, int err);
55 int filemap_fdatawrite_wbc(struct address_space *mapping,
56 			   struct writeback_control *wbc);
57 
58 static inline int filemap_write_and_wait(struct address_space *mapping)
59 {
60 	return filemap_write_and_wait_range(mapping, 0, LLONG_MAX);
61 }
62 
63 /**
64  * filemap_set_wb_err - set a writeback error on an address_space
65  * @mapping: mapping in which to set writeback error
66  * @err: error to be set in mapping
67  *
68  * When writeback fails in some way, we must record that error so that
69  * userspace can be informed when fsync and the like are called.  We endeavor
70  * to report errors on any file that was open at the time of the error.  Some
71  * internal callers also need to know when writeback errors have occurred.
72  *
73  * When a writeback error occurs, most filesystems will want to call
74  * filemap_set_wb_err to record the error in the mapping so that it will be
75  * automatically reported whenever fsync is called on the file.
76  */
77 static inline void filemap_set_wb_err(struct address_space *mapping, int err)
78 {
79 	/* Fastpath for common case of no error */
80 	if (unlikely(err))
81 		__filemap_set_wb_err(mapping, err);
82 }
83 
84 /**
85  * filemap_check_wb_err - has an error occurred since the mark was sampled?
86  * @mapping: mapping to check for writeback errors
87  * @since: previously-sampled errseq_t
88  *
89  * Grab the errseq_t value from the mapping, and see if it has changed "since"
90  * the given value was sampled.
91  *
92  * If it has then report the latest error set, otherwise return 0.
93  */
94 static inline int filemap_check_wb_err(struct address_space *mapping,
95 					errseq_t since)
96 {
97 	return errseq_check(&mapping->wb_err, since);
98 }
99 
100 /**
101  * filemap_sample_wb_err - sample the current errseq_t to test for later errors
102  * @mapping: mapping to be sampled
103  *
104  * Writeback errors are always reported relative to a particular sample point
105  * in the past. This function provides those sample points.
106  */
107 static inline errseq_t filemap_sample_wb_err(struct address_space *mapping)
108 {
109 	return errseq_sample(&mapping->wb_err);
110 }
111 
112 /**
113  * file_sample_sb_err - sample the current errseq_t to test for later errors
114  * @file: file pointer to be sampled
115  *
116  * Grab the most current superblock-level errseq_t value for the given
117  * struct file.
118  */
119 static inline errseq_t file_sample_sb_err(struct file *file)
120 {
121 	return errseq_sample(&file->f_path.dentry->d_sb->s_wb_err);
122 }
123 
124 /*
125  * Flush file data before changing attributes.  Caller must hold any locks
126  * required to prevent further writes to this file until we're done setting
127  * flags.
128  */
129 static inline int inode_drain_writes(struct inode *inode)
130 {
131 	inode_dio_wait(inode);
132 	return filemap_write_and_wait(inode->i_mapping);
133 }
134 
135 static inline bool mapping_empty(struct address_space *mapping)
136 {
137 	return xa_empty(&mapping->i_pages);
138 }
139 
140 /*
141  * mapping_shrinkable - test if page cache state allows inode reclaim
142  * @mapping: the page cache mapping
143  *
144  * This checks the mapping's cache state for the pupose of inode
145  * reclaim and LRU management.
146  *
147  * The caller is expected to hold the i_lock, but is not required to
148  * hold the i_pages lock, which usually protects cache state. That's
149  * because the i_lock and the list_lru lock that protect the inode and
150  * its LRU state don't nest inside the irq-safe i_pages lock.
151  *
152  * Cache deletions are performed under the i_lock, which ensures that
153  * when an inode goes empty, it will reliably get queued on the LRU.
154  *
155  * Cache additions do not acquire the i_lock and may race with this
156  * check, in which case we'll report the inode as shrinkable when it
157  * has cache pages. This is okay: the shrinker also checks the
158  * refcount and the referenced bit, which will be elevated or set in
159  * the process of adding new cache pages to an inode.
160  */
161 static inline bool mapping_shrinkable(struct address_space *mapping)
162 {
163 	void *head;
164 
165 	/*
166 	 * On highmem systems, there could be lowmem pressure from the
167 	 * inodes before there is highmem pressure from the page
168 	 * cache. Make inodes shrinkable regardless of cache state.
169 	 */
170 	if (IS_ENABLED(CONFIG_HIGHMEM))
171 		return true;
172 
173 	/* Cache completely empty? Shrink away. */
174 	head = rcu_access_pointer(mapping->i_pages.xa_head);
175 	if (!head)
176 		return true;
177 
178 	/*
179 	 * The xarray stores single offset-0 entries directly in the
180 	 * head pointer, which allows non-resident page cache entries
181 	 * to escape the shadow shrinker's list of xarray nodes. The
182 	 * inode shrinker needs to pick them up under memory pressure.
183 	 */
184 	if (!xa_is_node(head) && xa_is_value(head))
185 		return true;
186 
187 	return false;
188 }
189 
190 /*
191  * Bits in mapping->flags.
192  */
193 enum mapping_flags {
194 	AS_EIO		= 0,	/* IO error on async write */
195 	AS_ENOSPC	= 1,	/* ENOSPC on async write */
196 	AS_MM_ALL_LOCKS	= 2,	/* under mm_take_all_locks() */
197 	AS_UNEVICTABLE	= 3,	/* e.g., ramdisk, SHM_LOCK */
198 	AS_EXITING	= 4, 	/* final truncate in progress */
199 	/* writeback related tags are not used */
200 	AS_NO_WRITEBACK_TAGS = 5,
201 	AS_LARGE_FOLIO_SUPPORT = 6,
202 };
203 
204 /**
205  * mapping_set_error - record a writeback error in the address_space
206  * @mapping: the mapping in which an error should be set
207  * @error: the error to set in the mapping
208  *
209  * When writeback fails in some way, we must record that error so that
210  * userspace can be informed when fsync and the like are called.  We endeavor
211  * to report errors on any file that was open at the time of the error.  Some
212  * internal callers also need to know when writeback errors have occurred.
213  *
214  * When a writeback error occurs, most filesystems will want to call
215  * mapping_set_error to record the error in the mapping so that it can be
216  * reported when the application calls fsync(2).
217  */
218 static inline void mapping_set_error(struct address_space *mapping, int error)
219 {
220 	if (likely(!error))
221 		return;
222 
223 	/* Record in wb_err for checkers using errseq_t based tracking */
224 	__filemap_set_wb_err(mapping, error);
225 
226 	/* Record it in superblock */
227 	if (mapping->host)
228 		errseq_set(&mapping->host->i_sb->s_wb_err, error);
229 
230 	/* Record it in flags for now, for legacy callers */
231 	if (error == -ENOSPC)
232 		set_bit(AS_ENOSPC, &mapping->flags);
233 	else
234 		set_bit(AS_EIO, &mapping->flags);
235 }
236 
237 static inline void mapping_set_unevictable(struct address_space *mapping)
238 {
239 	set_bit(AS_UNEVICTABLE, &mapping->flags);
240 }
241 
242 static inline void mapping_clear_unevictable(struct address_space *mapping)
243 {
244 	clear_bit(AS_UNEVICTABLE, &mapping->flags);
245 }
246 
247 static inline bool mapping_unevictable(struct address_space *mapping)
248 {
249 	return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags);
250 }
251 
252 static inline void mapping_set_exiting(struct address_space *mapping)
253 {
254 	set_bit(AS_EXITING, &mapping->flags);
255 }
256 
257 static inline int mapping_exiting(struct address_space *mapping)
258 {
259 	return test_bit(AS_EXITING, &mapping->flags);
260 }
261 
262 static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
263 {
264 	set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
265 }
266 
267 static inline int mapping_use_writeback_tags(struct address_space *mapping)
268 {
269 	return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
270 }
271 
272 static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
273 {
274 	return mapping->gfp_mask;
275 }
276 
277 /* Restricts the given gfp_mask to what the mapping allows. */
278 static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
279 		gfp_t gfp_mask)
280 {
281 	return mapping_gfp_mask(mapping) & gfp_mask;
282 }
283 
284 /*
285  * This is non-atomic.  Only to be used before the mapping is activated.
286  * Probably needs a barrier...
287  */
288 static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
289 {
290 	m->gfp_mask = mask;
291 }
292 
293 /**
294  * mapping_set_large_folios() - Indicate the file supports large folios.
295  * @mapping: The file.
296  *
297  * The filesystem should call this function in its inode constructor to
298  * indicate that the VFS can use large folios to cache the contents of
299  * the file.
300  *
301  * Context: This should not be called while the inode is active as it
302  * is non-atomic.
303  */
304 static inline void mapping_set_large_folios(struct address_space *mapping)
305 {
306 	__set_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
307 }
308 
309 /*
310  * Large folio support currently depends on THP.  These dependencies are
311  * being worked on but are not yet fixed.
312  */
313 static inline bool mapping_large_folio_support(struct address_space *mapping)
314 {
315 	return IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
316 		test_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
317 }
318 
319 static inline int filemap_nr_thps(struct address_space *mapping)
320 {
321 #ifdef CONFIG_READ_ONLY_THP_FOR_FS
322 	return atomic_read(&mapping->nr_thps);
323 #else
324 	return 0;
325 #endif
326 }
327 
328 static inline void filemap_nr_thps_inc(struct address_space *mapping)
329 {
330 #ifdef CONFIG_READ_ONLY_THP_FOR_FS
331 	if (!mapping_large_folio_support(mapping))
332 		atomic_inc(&mapping->nr_thps);
333 #else
334 	WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0);
335 #endif
336 }
337 
338 static inline void filemap_nr_thps_dec(struct address_space *mapping)
339 {
340 #ifdef CONFIG_READ_ONLY_THP_FOR_FS
341 	if (!mapping_large_folio_support(mapping))
342 		atomic_dec(&mapping->nr_thps);
343 #else
344 	WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0);
345 #endif
346 }
347 
348 struct address_space *page_mapping(struct page *);
349 struct address_space *folio_mapping(struct folio *);
350 struct address_space *swapcache_mapping(struct folio *);
351 
352 /**
353  * folio_file_mapping - Find the mapping this folio belongs to.
354  * @folio: The folio.
355  *
356  * For folios which are in the page cache, return the mapping that this
357  * page belongs to.  Folios in the swap cache return the mapping of the
358  * swap file or swap device where the data is stored.  This is different
359  * from the mapping returned by folio_mapping().  The only reason to
360  * use it is if, like NFS, you return 0 from ->activate_swapfile.
361  *
362  * Do not call this for folios which aren't in the page cache or swap cache.
363  */
364 static inline struct address_space *folio_file_mapping(struct folio *folio)
365 {
366 	if (unlikely(folio_test_swapcache(folio)))
367 		return swapcache_mapping(folio);
368 
369 	return folio->mapping;
370 }
371 
372 static inline struct address_space *page_file_mapping(struct page *page)
373 {
374 	return folio_file_mapping(page_folio(page));
375 }
376 
377 /*
378  * For file cache pages, return the address_space, otherwise return NULL
379  */
380 static inline struct address_space *page_mapping_file(struct page *page)
381 {
382 	struct folio *folio = page_folio(page);
383 
384 	if (unlikely(folio_test_swapcache(folio)))
385 		return NULL;
386 	return folio_mapping(folio);
387 }
388 
389 /**
390  * folio_inode - Get the host inode for this folio.
391  * @folio: The folio.
392  *
393  * For folios which are in the page cache, return the inode that this folio
394  * belongs to.
395  *
396  * Do not call this for folios which aren't in the page cache.
397  */
398 static inline struct inode *folio_inode(struct folio *folio)
399 {
400 	return folio->mapping->host;
401 }
402 
403 /**
404  * folio_attach_private - Attach private data to a folio.
405  * @folio: Folio to attach data to.
406  * @data: Data to attach to folio.
407  *
408  * Attaching private data to a folio increments the page's reference count.
409  * The data must be detached before the folio will be freed.
410  */
411 static inline void folio_attach_private(struct folio *folio, void *data)
412 {
413 	folio_get(folio);
414 	folio->private = data;
415 	folio_set_private(folio);
416 }
417 
418 /**
419  * folio_change_private - Change private data on a folio.
420  * @folio: Folio to change the data on.
421  * @data: Data to set on the folio.
422  *
423  * Change the private data attached to a folio and return the old
424  * data.  The page must previously have had data attached and the data
425  * must be detached before the folio will be freed.
426  *
427  * Return: Data that was previously attached to the folio.
428  */
429 static inline void *folio_change_private(struct folio *folio, void *data)
430 {
431 	void *old = folio_get_private(folio);
432 
433 	folio->private = data;
434 	return old;
435 }
436 
437 /**
438  * folio_detach_private - Detach private data from a folio.
439  * @folio: Folio to detach data from.
440  *
441  * Removes the data that was previously attached to the folio and decrements
442  * the refcount on the page.
443  *
444  * Return: Data that was attached to the folio.
445  */
446 static inline void *folio_detach_private(struct folio *folio)
447 {
448 	void *data = folio_get_private(folio);
449 
450 	if (!folio_test_private(folio))
451 		return NULL;
452 	folio_clear_private(folio);
453 	folio->private = NULL;
454 	folio_put(folio);
455 
456 	return data;
457 }
458 
459 static inline void attach_page_private(struct page *page, void *data)
460 {
461 	folio_attach_private(page_folio(page), data);
462 }
463 
464 static inline void *detach_page_private(struct page *page)
465 {
466 	return folio_detach_private(page_folio(page));
467 }
468 
469 #ifdef CONFIG_NUMA
470 struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order);
471 #else
472 static inline struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order)
473 {
474 	return folio_alloc(gfp, order);
475 }
476 #endif
477 
478 static inline struct page *__page_cache_alloc(gfp_t gfp)
479 {
480 	return &filemap_alloc_folio(gfp, 0)->page;
481 }
482 
483 static inline struct page *page_cache_alloc(struct address_space *x)
484 {
485 	return __page_cache_alloc(mapping_gfp_mask(x));
486 }
487 
488 static inline gfp_t readahead_gfp_mask(struct address_space *x)
489 {
490 	return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
491 }
492 
493 typedef int filler_t(struct file *, struct folio *);
494 
495 pgoff_t page_cache_next_miss(struct address_space *mapping,
496 			     pgoff_t index, unsigned long max_scan);
497 pgoff_t page_cache_prev_miss(struct address_space *mapping,
498 			     pgoff_t index, unsigned long max_scan);
499 
500 #define FGP_ACCESSED		0x00000001
501 #define FGP_LOCK		0x00000002
502 #define FGP_CREAT		0x00000004
503 #define FGP_WRITE		0x00000008
504 #define FGP_NOFS		0x00000010
505 #define FGP_NOWAIT		0x00000020
506 #define FGP_FOR_MMAP		0x00000040
507 #define FGP_ENTRY		0x00000080
508 #define FGP_STABLE		0x00000100
509 
510 #define FGP_WRITEBEGIN		(FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE)
511 
512 struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
513 		int fgp_flags, gfp_t gfp);
514 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
515 		int fgp_flags, gfp_t gfp);
516 
517 /**
518  * filemap_get_folio - Find and get a folio.
519  * @mapping: The address_space to search.
520  * @index: The page index.
521  *
522  * Looks up the page cache entry at @mapping & @index.  If a folio is
523  * present, it is returned with an increased refcount.
524  *
525  * Otherwise, %NULL is returned.
526  */
527 static inline struct folio *filemap_get_folio(struct address_space *mapping,
528 					pgoff_t index)
529 {
530 	return __filemap_get_folio(mapping, index, 0, 0);
531 }
532 
533 /**
534  * filemap_lock_folio - Find and lock a folio.
535  * @mapping: The address_space to search.
536  * @index: The page index.
537  *
538  * Looks up the page cache entry at @mapping & @index.  If a folio is
539  * present, it is returned locked with an increased refcount.
540  *
541  * Context: May sleep.
542  * Return: A folio or %NULL if there is no folio in the cache for this
543  * index.  Will not return a shadow, swap or DAX entry.
544  */
545 static inline struct folio *filemap_lock_folio(struct address_space *mapping,
546 					pgoff_t index)
547 {
548 	return __filemap_get_folio(mapping, index, FGP_LOCK, 0);
549 }
550 
551 /**
552  * filemap_grab_folio - grab a folio from the page cache
553  * @mapping: The address space to search
554  * @index: The page index
555  *
556  * Looks up the page cache entry at @mapping & @index. If no folio is found,
557  * a new folio is created. The folio is locked, marked as accessed, and
558  * returned.
559  *
560  * Return: A found or created folio. NULL if no folio is found and failed to
561  * create a folio.
562  */
563 static inline struct folio *filemap_grab_folio(struct address_space *mapping,
564 					pgoff_t index)
565 {
566 	return __filemap_get_folio(mapping, index,
567 			FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
568 			mapping_gfp_mask(mapping));
569 }
570 
571 /**
572  * find_get_page - find and get a page reference
573  * @mapping: the address_space to search
574  * @offset: the page index
575  *
576  * Looks up the page cache slot at @mapping & @offset.  If there is a
577  * page cache page, it is returned with an increased refcount.
578  *
579  * Otherwise, %NULL is returned.
580  */
581 static inline struct page *find_get_page(struct address_space *mapping,
582 					pgoff_t offset)
583 {
584 	return pagecache_get_page(mapping, offset, 0, 0);
585 }
586 
587 static inline struct page *find_get_page_flags(struct address_space *mapping,
588 					pgoff_t offset, int fgp_flags)
589 {
590 	return pagecache_get_page(mapping, offset, fgp_flags, 0);
591 }
592 
593 /**
594  * find_lock_page - locate, pin and lock a pagecache page
595  * @mapping: the address_space to search
596  * @index: the page index
597  *
598  * Looks up the page cache entry at @mapping & @index.  If there is a
599  * page cache page, it is returned locked and with an increased
600  * refcount.
601  *
602  * Context: May sleep.
603  * Return: A struct page or %NULL if there is no page in the cache for this
604  * index.
605  */
606 static inline struct page *find_lock_page(struct address_space *mapping,
607 					pgoff_t index)
608 {
609 	return pagecache_get_page(mapping, index, FGP_LOCK, 0);
610 }
611 
612 /**
613  * find_or_create_page - locate or add a pagecache page
614  * @mapping: the page's address_space
615  * @index: the page's index into the mapping
616  * @gfp_mask: page allocation mode
617  *
618  * Looks up the page cache slot at @mapping & @offset.  If there is a
619  * page cache page, it is returned locked and with an increased
620  * refcount.
621  *
622  * If the page is not present, a new page is allocated using @gfp_mask
623  * and added to the page cache and the VM's LRU list.  The page is
624  * returned locked and with an increased refcount.
625  *
626  * On memory exhaustion, %NULL is returned.
627  *
628  * find_or_create_page() may sleep, even if @gfp_flags specifies an
629  * atomic allocation!
630  */
631 static inline struct page *find_or_create_page(struct address_space *mapping,
632 					pgoff_t index, gfp_t gfp_mask)
633 {
634 	return pagecache_get_page(mapping, index,
635 					FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
636 					gfp_mask);
637 }
638 
639 /**
640  * grab_cache_page_nowait - returns locked page at given index in given cache
641  * @mapping: target address_space
642  * @index: the page index
643  *
644  * Same as grab_cache_page(), but do not wait if the page is unavailable.
645  * This is intended for speculative data generators, where the data can
646  * be regenerated if the page couldn't be grabbed.  This routine should
647  * be safe to call while holding the lock for another page.
648  *
649  * Clear __GFP_FS when allocating the page to avoid recursion into the fs
650  * and deadlock against the caller's locked page.
651  */
652 static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
653 				pgoff_t index)
654 {
655 	return pagecache_get_page(mapping, index,
656 			FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
657 			mapping_gfp_mask(mapping));
658 }
659 
660 #define swapcache_index(folio)	__page_file_index(&(folio)->page)
661 
662 /**
663  * folio_index - File index of a folio.
664  * @folio: The folio.
665  *
666  * For a folio which is either in the page cache or the swap cache,
667  * return its index within the address_space it belongs to.  If you know
668  * the page is definitely in the page cache, you can look at the folio's
669  * index directly.
670  *
671  * Return: The index (offset in units of pages) of a folio in its file.
672  */
673 static inline pgoff_t folio_index(struct folio *folio)
674 {
675         if (unlikely(folio_test_swapcache(folio)))
676                 return swapcache_index(folio);
677         return folio->index;
678 }
679 
680 /**
681  * folio_next_index - Get the index of the next folio.
682  * @folio: The current folio.
683  *
684  * Return: The index of the folio which follows this folio in the file.
685  */
686 static inline pgoff_t folio_next_index(struct folio *folio)
687 {
688 	return folio->index + folio_nr_pages(folio);
689 }
690 
691 /**
692  * folio_file_page - The page for a particular index.
693  * @folio: The folio which contains this index.
694  * @index: The index we want to look up.
695  *
696  * Sometimes after looking up a folio in the page cache, we need to
697  * obtain the specific page for an index (eg a page fault).
698  *
699  * Return: The page containing the file data for this index.
700  */
701 static inline struct page *folio_file_page(struct folio *folio, pgoff_t index)
702 {
703 	/* HugeTLBfs indexes the page cache in units of hpage_size */
704 	if (folio_test_hugetlb(folio))
705 		return &folio->page;
706 	return folio_page(folio, index & (folio_nr_pages(folio) - 1));
707 }
708 
709 /**
710  * folio_contains - Does this folio contain this index?
711  * @folio: The folio.
712  * @index: The page index within the file.
713  *
714  * Context: The caller should have the page locked in order to prevent
715  * (eg) shmem from moving the page between the page cache and swap cache
716  * and changing its index in the middle of the operation.
717  * Return: true or false.
718  */
719 static inline bool folio_contains(struct folio *folio, pgoff_t index)
720 {
721 	/* HugeTLBfs indexes the page cache in units of hpage_size */
722 	if (folio_test_hugetlb(folio))
723 		return folio->index == index;
724 	return index - folio_index(folio) < folio_nr_pages(folio);
725 }
726 
727 /*
728  * Given the page we found in the page cache, return the page corresponding
729  * to this index in the file
730  */
731 static inline struct page *find_subpage(struct page *head, pgoff_t index)
732 {
733 	/* HugeTLBfs wants the head page regardless */
734 	if (PageHuge(head))
735 		return head;
736 
737 	return head + (index & (thp_nr_pages(head) - 1));
738 }
739 
740 unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
741 		pgoff_t end, struct folio_batch *fbatch);
742 unsigned filemap_get_folios_contig(struct address_space *mapping,
743 		pgoff_t *start, pgoff_t end, struct folio_batch *fbatch);
744 unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start,
745 		pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch);
746 
747 struct page *grab_cache_page_write_begin(struct address_space *mapping,
748 			pgoff_t index);
749 
750 /*
751  * Returns locked page at given index in given cache, creating it if needed.
752  */
753 static inline struct page *grab_cache_page(struct address_space *mapping,
754 								pgoff_t index)
755 {
756 	return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
757 }
758 
759 struct folio *read_cache_folio(struct address_space *, pgoff_t index,
760 		filler_t *filler, struct file *file);
761 struct folio *mapping_read_folio_gfp(struct address_space *, pgoff_t index,
762 		gfp_t flags);
763 struct page *read_cache_page(struct address_space *, pgoff_t index,
764 		filler_t *filler, struct file *file);
765 extern struct page * read_cache_page_gfp(struct address_space *mapping,
766 				pgoff_t index, gfp_t gfp_mask);
767 
768 static inline struct page *read_mapping_page(struct address_space *mapping,
769 				pgoff_t index, struct file *file)
770 {
771 	return read_cache_page(mapping, index, NULL, file);
772 }
773 
774 static inline struct folio *read_mapping_folio(struct address_space *mapping,
775 				pgoff_t index, struct file *file)
776 {
777 	return read_cache_folio(mapping, index, NULL, file);
778 }
779 
780 /*
781  * Get index of the page within radix-tree (but not for hugetlb pages).
782  * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
783  */
784 static inline pgoff_t page_to_index(struct page *page)
785 {
786 	struct page *head;
787 
788 	if (likely(!PageTransTail(page)))
789 		return page->index;
790 
791 	head = compound_head(page);
792 	/*
793 	 *  We don't initialize ->index for tail pages: calculate based on
794 	 *  head page
795 	 */
796 	return head->index + page - head;
797 }
798 
799 extern pgoff_t hugetlb_basepage_index(struct page *page);
800 
801 /*
802  * Get the offset in PAGE_SIZE (even for hugetlb pages).
803  * (TODO: hugetlb pages should have ->index in PAGE_SIZE)
804  */
805 static inline pgoff_t page_to_pgoff(struct page *page)
806 {
807 	if (unlikely(PageHuge(page)))
808 		return hugetlb_basepage_index(page);
809 	return page_to_index(page);
810 }
811 
812 /*
813  * Return byte-offset into filesystem object for page.
814  */
815 static inline loff_t page_offset(struct page *page)
816 {
817 	return ((loff_t)page->index) << PAGE_SHIFT;
818 }
819 
820 static inline loff_t page_file_offset(struct page *page)
821 {
822 	return ((loff_t)page_index(page)) << PAGE_SHIFT;
823 }
824 
825 /**
826  * folio_pos - Returns the byte position of this folio in its file.
827  * @folio: The folio.
828  */
829 static inline loff_t folio_pos(struct folio *folio)
830 {
831 	return page_offset(&folio->page);
832 }
833 
834 /**
835  * folio_file_pos - Returns the byte position of this folio in its file.
836  * @folio: The folio.
837  *
838  * This differs from folio_pos() for folios which belong to a swap file.
839  * NFS is the only filesystem today which needs to use folio_file_pos().
840  */
841 static inline loff_t folio_file_pos(struct folio *folio)
842 {
843 	return page_file_offset(&folio->page);
844 }
845 
846 /*
847  * Get the offset in PAGE_SIZE (even for hugetlb folios).
848  * (TODO: hugetlb folios should have ->index in PAGE_SIZE)
849  */
850 static inline pgoff_t folio_pgoff(struct folio *folio)
851 {
852 	if (unlikely(folio_test_hugetlb(folio)))
853 		return hugetlb_basepage_index(&folio->page);
854 	return folio->index;
855 }
856 
857 extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
858 				     unsigned long address);
859 
860 static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
861 					unsigned long address)
862 {
863 	pgoff_t pgoff;
864 	if (unlikely(is_vm_hugetlb_page(vma)))
865 		return linear_hugepage_index(vma, address);
866 	pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
867 	pgoff += vma->vm_pgoff;
868 	return pgoff;
869 }
870 
871 struct wait_page_key {
872 	struct folio *folio;
873 	int bit_nr;
874 	int page_match;
875 };
876 
877 struct wait_page_queue {
878 	struct folio *folio;
879 	int bit_nr;
880 	wait_queue_entry_t wait;
881 };
882 
883 static inline bool wake_page_match(struct wait_page_queue *wait_page,
884 				  struct wait_page_key *key)
885 {
886 	if (wait_page->folio != key->folio)
887 	       return false;
888 	key->page_match = 1;
889 
890 	if (wait_page->bit_nr != key->bit_nr)
891 		return false;
892 
893 	return true;
894 }
895 
896 void __folio_lock(struct folio *folio);
897 int __folio_lock_killable(struct folio *folio);
898 bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm,
899 				unsigned int flags);
900 void unlock_page(struct page *page);
901 void folio_unlock(struct folio *folio);
902 
903 /**
904  * folio_trylock() - Attempt to lock a folio.
905  * @folio: The folio to attempt to lock.
906  *
907  * Sometimes it is undesirable to wait for a folio to be unlocked (eg
908  * when the locks are being taken in the wrong order, or if making
909  * progress through a batch of folios is more important than processing
910  * them in order).  Usually folio_lock() is the correct function to call.
911  *
912  * Context: Any context.
913  * Return: Whether the lock was successfully acquired.
914  */
915 static inline bool folio_trylock(struct folio *folio)
916 {
917 	return likely(!test_and_set_bit_lock(PG_locked, folio_flags(folio, 0)));
918 }
919 
920 /*
921  * Return true if the page was successfully locked
922  */
923 static inline int trylock_page(struct page *page)
924 {
925 	return folio_trylock(page_folio(page));
926 }
927 
928 /**
929  * folio_lock() - Lock this folio.
930  * @folio: The folio to lock.
931  *
932  * The folio lock protects against many things, probably more than it
933  * should.  It is primarily held while a folio is being brought uptodate,
934  * either from its backing file or from swap.  It is also held while a
935  * folio is being truncated from its address_space, so holding the lock
936  * is sufficient to keep folio->mapping stable.
937  *
938  * The folio lock is also held while write() is modifying the page to
939  * provide POSIX atomicity guarantees (as long as the write does not
940  * cross a page boundary).  Other modifications to the data in the folio
941  * do not hold the folio lock and can race with writes, eg DMA and stores
942  * to mapped pages.
943  *
944  * Context: May sleep.  If you need to acquire the locks of two or
945  * more folios, they must be in order of ascending index, if they are
946  * in the same address_space.  If they are in different address_spaces,
947  * acquire the lock of the folio which belongs to the address_space which
948  * has the lowest address in memory first.
949  */
950 static inline void folio_lock(struct folio *folio)
951 {
952 	might_sleep();
953 	if (!folio_trylock(folio))
954 		__folio_lock(folio);
955 }
956 
957 /**
958  * lock_page() - Lock the folio containing this page.
959  * @page: The page to lock.
960  *
961  * See folio_lock() for a description of what the lock protects.
962  * This is a legacy function and new code should probably use folio_lock()
963  * instead.
964  *
965  * Context: May sleep.  Pages in the same folio share a lock, so do not
966  * attempt to lock two pages which share a folio.
967  */
968 static inline void lock_page(struct page *page)
969 {
970 	struct folio *folio;
971 	might_sleep();
972 
973 	folio = page_folio(page);
974 	if (!folio_trylock(folio))
975 		__folio_lock(folio);
976 }
977 
978 /**
979  * folio_lock_killable() - Lock this folio, interruptible by a fatal signal.
980  * @folio: The folio to lock.
981  *
982  * Attempts to lock the folio, like folio_lock(), except that the sleep
983  * to acquire the lock is interruptible by a fatal signal.
984  *
985  * Context: May sleep; see folio_lock().
986  * Return: 0 if the lock was acquired; -EINTR if a fatal signal was received.
987  */
988 static inline int folio_lock_killable(struct folio *folio)
989 {
990 	might_sleep();
991 	if (!folio_trylock(folio))
992 		return __folio_lock_killable(folio);
993 	return 0;
994 }
995 
996 /*
997  * folio_lock_or_retry - Lock the folio, unless this would block and the
998  * caller indicated that it can handle a retry.
999  *
1000  * Return value and mmap_lock implications depend on flags; see
1001  * __folio_lock_or_retry().
1002  */
1003 static inline bool folio_lock_or_retry(struct folio *folio,
1004 		struct mm_struct *mm, unsigned int flags)
1005 {
1006 	might_sleep();
1007 	return folio_trylock(folio) || __folio_lock_or_retry(folio, mm, flags);
1008 }
1009 
1010 /*
1011  * This is exported only for folio_wait_locked/folio_wait_writeback, etc.,
1012  * and should not be used directly.
1013  */
1014 void folio_wait_bit(struct folio *folio, int bit_nr);
1015 int folio_wait_bit_killable(struct folio *folio, int bit_nr);
1016 
1017 /*
1018  * Wait for a folio to be unlocked.
1019  *
1020  * This must be called with the caller "holding" the folio,
1021  * ie with increased folio reference count so that the folio won't
1022  * go away during the wait.
1023  */
1024 static inline void folio_wait_locked(struct folio *folio)
1025 {
1026 	if (folio_test_locked(folio))
1027 		folio_wait_bit(folio, PG_locked);
1028 }
1029 
1030 static inline int folio_wait_locked_killable(struct folio *folio)
1031 {
1032 	if (!folio_test_locked(folio))
1033 		return 0;
1034 	return folio_wait_bit_killable(folio, PG_locked);
1035 }
1036 
1037 static inline void wait_on_page_locked(struct page *page)
1038 {
1039 	folio_wait_locked(page_folio(page));
1040 }
1041 
1042 static inline int wait_on_page_locked_killable(struct page *page)
1043 {
1044 	return folio_wait_locked_killable(page_folio(page));
1045 }
1046 
1047 void wait_on_page_writeback(struct page *page);
1048 void folio_wait_writeback(struct folio *folio);
1049 int folio_wait_writeback_killable(struct folio *folio);
1050 void end_page_writeback(struct page *page);
1051 void folio_end_writeback(struct folio *folio);
1052 void wait_for_stable_page(struct page *page);
1053 void folio_wait_stable(struct folio *folio);
1054 void __folio_mark_dirty(struct folio *folio, struct address_space *, int warn);
1055 static inline void __set_page_dirty(struct page *page,
1056 		struct address_space *mapping, int warn)
1057 {
1058 	__folio_mark_dirty(page_folio(page), mapping, warn);
1059 }
1060 void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb);
1061 void __folio_cancel_dirty(struct folio *folio);
1062 static inline void folio_cancel_dirty(struct folio *folio)
1063 {
1064 	/* Avoid atomic ops, locking, etc. when not actually needed. */
1065 	if (folio_test_dirty(folio))
1066 		__folio_cancel_dirty(folio);
1067 }
1068 bool folio_clear_dirty_for_io(struct folio *folio);
1069 bool clear_page_dirty_for_io(struct page *page);
1070 void folio_invalidate(struct folio *folio, size_t offset, size_t length);
1071 int __set_page_dirty_nobuffers(struct page *page);
1072 bool noop_dirty_folio(struct address_space *mapping, struct folio *folio);
1073 
1074 #ifdef CONFIG_MIGRATION
1075 int filemap_migrate_folio(struct address_space *mapping, struct folio *dst,
1076 		struct folio *src, enum migrate_mode mode);
1077 #else
1078 #define filemap_migrate_folio NULL
1079 #endif
1080 void page_endio(struct page *page, bool is_write, int err);
1081 
1082 void folio_end_private_2(struct folio *folio);
1083 void folio_wait_private_2(struct folio *folio);
1084 int folio_wait_private_2_killable(struct folio *folio);
1085 
1086 /*
1087  * Add an arbitrary waiter to a page's wait queue
1088  */
1089 void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter);
1090 
1091 /*
1092  * Fault in userspace address range.
1093  */
1094 size_t fault_in_writeable(char __user *uaddr, size_t size);
1095 size_t fault_in_subpage_writeable(char __user *uaddr, size_t size);
1096 size_t fault_in_safe_writeable(const char __user *uaddr, size_t size);
1097 size_t fault_in_readable(const char __user *uaddr, size_t size);
1098 
1099 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
1100 		pgoff_t index, gfp_t gfp);
1101 int filemap_add_folio(struct address_space *mapping, struct folio *folio,
1102 		pgoff_t index, gfp_t gfp);
1103 void filemap_remove_folio(struct folio *folio);
1104 void __filemap_remove_folio(struct folio *folio, void *shadow);
1105 void replace_page_cache_folio(struct folio *old, struct folio *new);
1106 void delete_from_page_cache_batch(struct address_space *mapping,
1107 				  struct folio_batch *fbatch);
1108 bool filemap_release_folio(struct folio *folio, gfp_t gfp);
1109 loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end,
1110 		int whence);
1111 
1112 /* Must be non-static for BPF error injection */
1113 int __filemap_add_folio(struct address_space *mapping, struct folio *folio,
1114 		pgoff_t index, gfp_t gfp, void **shadowp);
1115 
1116 bool filemap_range_has_writeback(struct address_space *mapping,
1117 				 loff_t start_byte, loff_t end_byte);
1118 
1119 /**
1120  * filemap_range_needs_writeback - check if range potentially needs writeback
1121  * @mapping:           address space within which to check
1122  * @start_byte:        offset in bytes where the range starts
1123  * @end_byte:          offset in bytes where the range ends (inclusive)
1124  *
1125  * Find at least one page in the range supplied, usually used to check if
1126  * direct writing in this range will trigger a writeback. Used by O_DIRECT
1127  * read/write with IOCB_NOWAIT, to see if the caller needs to do
1128  * filemap_write_and_wait_range() before proceeding.
1129  *
1130  * Return: %true if the caller should do filemap_write_and_wait_range() before
1131  * doing O_DIRECT to a page in this range, %false otherwise.
1132  */
1133 static inline bool filemap_range_needs_writeback(struct address_space *mapping,
1134 						 loff_t start_byte,
1135 						 loff_t end_byte)
1136 {
1137 	if (!mapping->nrpages)
1138 		return false;
1139 	if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
1140 	    !mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
1141 		return false;
1142 	return filemap_range_has_writeback(mapping, start_byte, end_byte);
1143 }
1144 
1145 /**
1146  * struct readahead_control - Describes a readahead request.
1147  *
1148  * A readahead request is for consecutive pages.  Filesystems which
1149  * implement the ->readahead method should call readahead_page() or
1150  * readahead_page_batch() in a loop and attempt to start I/O against
1151  * each page in the request.
1152  *
1153  * Most of the fields in this struct are private and should be accessed
1154  * by the functions below.
1155  *
1156  * @file: The file, used primarily by network filesystems for authentication.
1157  *	  May be NULL if invoked internally by the filesystem.
1158  * @mapping: Readahead this filesystem object.
1159  * @ra: File readahead state.  May be NULL.
1160  */
1161 struct readahead_control {
1162 	struct file *file;
1163 	struct address_space *mapping;
1164 	struct file_ra_state *ra;
1165 /* private: use the readahead_* accessors instead */
1166 	pgoff_t _index;
1167 	unsigned int _nr_pages;
1168 	unsigned int _batch_count;
1169 	bool _workingset;
1170 	unsigned long _pflags;
1171 };
1172 
1173 #define DEFINE_READAHEAD(ractl, f, r, m, i)				\
1174 	struct readahead_control ractl = {				\
1175 		.file = f,						\
1176 		.mapping = m,						\
1177 		.ra = r,						\
1178 		._index = i,						\
1179 	}
1180 
1181 #define VM_READAHEAD_PAGES	(SZ_128K / PAGE_SIZE)
1182 
1183 void page_cache_ra_unbounded(struct readahead_control *,
1184 		unsigned long nr_to_read, unsigned long lookahead_count);
1185 void page_cache_sync_ra(struct readahead_control *, unsigned long req_count);
1186 void page_cache_async_ra(struct readahead_control *, struct folio *,
1187 		unsigned long req_count);
1188 void readahead_expand(struct readahead_control *ractl,
1189 		      loff_t new_start, size_t new_len);
1190 
1191 /**
1192  * page_cache_sync_readahead - generic file readahead
1193  * @mapping: address_space which holds the pagecache and I/O vectors
1194  * @ra: file_ra_state which holds the readahead state
1195  * @file: Used by the filesystem for authentication.
1196  * @index: Index of first page to be read.
1197  * @req_count: Total number of pages being read by the caller.
1198  *
1199  * page_cache_sync_readahead() should be called when a cache miss happened:
1200  * it will submit the read.  The readahead logic may decide to piggyback more
1201  * pages onto the read request if access patterns suggest it will improve
1202  * performance.
1203  */
1204 static inline
1205 void page_cache_sync_readahead(struct address_space *mapping,
1206 		struct file_ra_state *ra, struct file *file, pgoff_t index,
1207 		unsigned long req_count)
1208 {
1209 	DEFINE_READAHEAD(ractl, file, ra, mapping, index);
1210 	page_cache_sync_ra(&ractl, req_count);
1211 }
1212 
1213 /**
1214  * page_cache_async_readahead - file readahead for marked pages
1215  * @mapping: address_space which holds the pagecache and I/O vectors
1216  * @ra: file_ra_state which holds the readahead state
1217  * @file: Used by the filesystem for authentication.
1218  * @folio: The folio at @index which triggered the readahead call.
1219  * @index: Index of first page to be read.
1220  * @req_count: Total number of pages being read by the caller.
1221  *
1222  * page_cache_async_readahead() should be called when a page is used which
1223  * is marked as PageReadahead; this is a marker to suggest that the application
1224  * has used up enough of the readahead window that we should start pulling in
1225  * more pages.
1226  */
1227 static inline
1228 void page_cache_async_readahead(struct address_space *mapping,
1229 		struct file_ra_state *ra, struct file *file,
1230 		struct folio *folio, pgoff_t index, unsigned long req_count)
1231 {
1232 	DEFINE_READAHEAD(ractl, file, ra, mapping, index);
1233 	page_cache_async_ra(&ractl, folio, req_count);
1234 }
1235 
1236 static inline struct folio *__readahead_folio(struct readahead_control *ractl)
1237 {
1238 	struct folio *folio;
1239 
1240 	BUG_ON(ractl->_batch_count > ractl->_nr_pages);
1241 	ractl->_nr_pages -= ractl->_batch_count;
1242 	ractl->_index += ractl->_batch_count;
1243 
1244 	if (!ractl->_nr_pages) {
1245 		ractl->_batch_count = 0;
1246 		return NULL;
1247 	}
1248 
1249 	folio = xa_load(&ractl->mapping->i_pages, ractl->_index);
1250 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1251 	ractl->_batch_count = folio_nr_pages(folio);
1252 
1253 	return folio;
1254 }
1255 
1256 /**
1257  * readahead_page - Get the next page to read.
1258  * @ractl: The current readahead request.
1259  *
1260  * Context: The page is locked and has an elevated refcount.  The caller
1261  * should decreases the refcount once the page has been submitted for I/O
1262  * and unlock the page once all I/O to that page has completed.
1263  * Return: A pointer to the next page, or %NULL if we are done.
1264  */
1265 static inline struct page *readahead_page(struct readahead_control *ractl)
1266 {
1267 	struct folio *folio = __readahead_folio(ractl);
1268 
1269 	return &folio->page;
1270 }
1271 
1272 /**
1273  * readahead_folio - Get the next folio to read.
1274  * @ractl: The current readahead request.
1275  *
1276  * Context: The folio is locked.  The caller should unlock the folio once
1277  * all I/O to that folio has completed.
1278  * Return: A pointer to the next folio, or %NULL if we are done.
1279  */
1280 static inline struct folio *readahead_folio(struct readahead_control *ractl)
1281 {
1282 	struct folio *folio = __readahead_folio(ractl);
1283 
1284 	if (folio)
1285 		folio_put(folio);
1286 	return folio;
1287 }
1288 
1289 static inline unsigned int __readahead_batch(struct readahead_control *rac,
1290 		struct page **array, unsigned int array_sz)
1291 {
1292 	unsigned int i = 0;
1293 	XA_STATE(xas, &rac->mapping->i_pages, 0);
1294 	struct page *page;
1295 
1296 	BUG_ON(rac->_batch_count > rac->_nr_pages);
1297 	rac->_nr_pages -= rac->_batch_count;
1298 	rac->_index += rac->_batch_count;
1299 	rac->_batch_count = 0;
1300 
1301 	xas_set(&xas, rac->_index);
1302 	rcu_read_lock();
1303 	xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) {
1304 		if (xas_retry(&xas, page))
1305 			continue;
1306 		VM_BUG_ON_PAGE(!PageLocked(page), page);
1307 		VM_BUG_ON_PAGE(PageTail(page), page);
1308 		array[i++] = page;
1309 		rac->_batch_count += thp_nr_pages(page);
1310 		if (i == array_sz)
1311 			break;
1312 	}
1313 	rcu_read_unlock();
1314 
1315 	return i;
1316 }
1317 
1318 /**
1319  * readahead_page_batch - Get a batch of pages to read.
1320  * @rac: The current readahead request.
1321  * @array: An array of pointers to struct page.
1322  *
1323  * Context: The pages are locked and have an elevated refcount.  The caller
1324  * should decreases the refcount once the page has been submitted for I/O
1325  * and unlock the page once all I/O to that page has completed.
1326  * Return: The number of pages placed in the array.  0 indicates the request
1327  * is complete.
1328  */
1329 #define readahead_page_batch(rac, array)				\
1330 	__readahead_batch(rac, array, ARRAY_SIZE(array))
1331 
1332 /**
1333  * readahead_pos - The byte offset into the file of this readahead request.
1334  * @rac: The readahead request.
1335  */
1336 static inline loff_t readahead_pos(struct readahead_control *rac)
1337 {
1338 	return (loff_t)rac->_index * PAGE_SIZE;
1339 }
1340 
1341 /**
1342  * readahead_length - The number of bytes in this readahead request.
1343  * @rac: The readahead request.
1344  */
1345 static inline size_t readahead_length(struct readahead_control *rac)
1346 {
1347 	return rac->_nr_pages * PAGE_SIZE;
1348 }
1349 
1350 /**
1351  * readahead_index - The index of the first page in this readahead request.
1352  * @rac: The readahead request.
1353  */
1354 static inline pgoff_t readahead_index(struct readahead_control *rac)
1355 {
1356 	return rac->_index;
1357 }
1358 
1359 /**
1360  * readahead_count - The number of pages in this readahead request.
1361  * @rac: The readahead request.
1362  */
1363 static inline unsigned int readahead_count(struct readahead_control *rac)
1364 {
1365 	return rac->_nr_pages;
1366 }
1367 
1368 /**
1369  * readahead_batch_length - The number of bytes in the current batch.
1370  * @rac: The readahead request.
1371  */
1372 static inline size_t readahead_batch_length(struct readahead_control *rac)
1373 {
1374 	return rac->_batch_count * PAGE_SIZE;
1375 }
1376 
1377 static inline unsigned long dir_pages(struct inode *inode)
1378 {
1379 	return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
1380 			       PAGE_SHIFT;
1381 }
1382 
1383 /**
1384  * folio_mkwrite_check_truncate - check if folio was truncated
1385  * @folio: the folio to check
1386  * @inode: the inode to check the folio against
1387  *
1388  * Return: the number of bytes in the folio up to EOF,
1389  * or -EFAULT if the folio was truncated.
1390  */
1391 static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio,
1392 					      struct inode *inode)
1393 {
1394 	loff_t size = i_size_read(inode);
1395 	pgoff_t index = size >> PAGE_SHIFT;
1396 	size_t offset = offset_in_folio(folio, size);
1397 
1398 	if (!folio->mapping)
1399 		return -EFAULT;
1400 
1401 	/* folio is wholly inside EOF */
1402 	if (folio_next_index(folio) - 1 < index)
1403 		return folio_size(folio);
1404 	/* folio is wholly past EOF */
1405 	if (folio->index > index || !offset)
1406 		return -EFAULT;
1407 	/* folio is partially inside EOF */
1408 	return offset;
1409 }
1410 
1411 /**
1412  * page_mkwrite_check_truncate - check if page was truncated
1413  * @page: the page to check
1414  * @inode: the inode to check the page against
1415  *
1416  * Returns the number of bytes in the page up to EOF,
1417  * or -EFAULT if the page was truncated.
1418  */
1419 static inline int page_mkwrite_check_truncate(struct page *page,
1420 					      struct inode *inode)
1421 {
1422 	loff_t size = i_size_read(inode);
1423 	pgoff_t index = size >> PAGE_SHIFT;
1424 	int offset = offset_in_page(size);
1425 
1426 	if (page->mapping != inode->i_mapping)
1427 		return -EFAULT;
1428 
1429 	/* page is wholly inside EOF */
1430 	if (page->index < index)
1431 		return PAGE_SIZE;
1432 	/* page is wholly past EOF */
1433 	if (page->index > index || !offset)
1434 		return -EFAULT;
1435 	/* page is partially inside EOF */
1436 	return offset;
1437 }
1438 
1439 /**
1440  * i_blocks_per_folio - How many blocks fit in this folio.
1441  * @inode: The inode which contains the blocks.
1442  * @folio: The folio.
1443  *
1444  * If the block size is larger than the size of this folio, return zero.
1445  *
1446  * Context: The caller should hold a refcount on the folio to prevent it
1447  * from being split.
1448  * Return: The number of filesystem blocks covered by this folio.
1449  */
1450 static inline
1451 unsigned int i_blocks_per_folio(struct inode *inode, struct folio *folio)
1452 {
1453 	return folio_size(folio) >> inode->i_blkbits;
1454 }
1455 
1456 static inline
1457 unsigned int i_blocks_per_page(struct inode *inode, struct page *page)
1458 {
1459 	return i_blocks_per_folio(inode, page_folio(page));
1460 }
1461 #endif /* _LINUX_PAGEMAP_H */
1462