xref: /linux-6.15/include/linux/pagemap.h (revision bb79974c)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_PAGEMAP_H
3 #define _LINUX_PAGEMAP_H
4 
5 /*
6  * Copyright 1995 Linus Torvalds
7  */
8 #include <linux/mm.h>
9 #include <linux/fs.h>
10 #include <linux/list.h>
11 #include <linux/highmem.h>
12 #include <linux/compiler.h>
13 #include <linux/uaccess.h>
14 #include <linux/gfp.h>
15 #include <linux/bitops.h>
16 #include <linux/hardirq.h> /* for in_interrupt() */
17 #include <linux/hugetlb_inline.h>
18 
19 struct pagevec;
20 
21 /*
22  * Bits in mapping->flags.
23  */
24 enum mapping_flags {
25 	AS_EIO		= 0,	/* IO error on async write */
26 	AS_ENOSPC	= 1,	/* ENOSPC on async write */
27 	AS_MM_ALL_LOCKS	= 2,	/* under mm_take_all_locks() */
28 	AS_UNEVICTABLE	= 3,	/* e.g., ramdisk, SHM_LOCK */
29 	AS_EXITING	= 4, 	/* final truncate in progress */
30 	/* writeback related tags are not used */
31 	AS_NO_WRITEBACK_TAGS = 5,
32 };
33 
34 /**
35  * mapping_set_error - record a writeback error in the address_space
36  * @mapping: the mapping in which an error should be set
37  * @error: the error to set in the mapping
38  *
39  * When writeback fails in some way, we must record that error so that
40  * userspace can be informed when fsync and the like are called.  We endeavor
41  * to report errors on any file that was open at the time of the error.  Some
42  * internal callers also need to know when writeback errors have occurred.
43  *
44  * When a writeback error occurs, most filesystems will want to call
45  * mapping_set_error to record the error in the mapping so that it can be
46  * reported when the application calls fsync(2).
47  */
48 static inline void mapping_set_error(struct address_space *mapping, int error)
49 {
50 	if (likely(!error))
51 		return;
52 
53 	/* Record in wb_err for checkers using errseq_t based tracking */
54 	__filemap_set_wb_err(mapping, error);
55 
56 	/* Record it in superblock */
57 	errseq_set(&mapping->host->i_sb->s_wb_err, error);
58 
59 	/* Record it in flags for now, for legacy callers */
60 	if (error == -ENOSPC)
61 		set_bit(AS_ENOSPC, &mapping->flags);
62 	else
63 		set_bit(AS_EIO, &mapping->flags);
64 }
65 
66 static inline void mapping_set_unevictable(struct address_space *mapping)
67 {
68 	set_bit(AS_UNEVICTABLE, &mapping->flags);
69 }
70 
71 static inline void mapping_clear_unevictable(struct address_space *mapping)
72 {
73 	clear_bit(AS_UNEVICTABLE, &mapping->flags);
74 }
75 
76 static inline bool mapping_unevictable(struct address_space *mapping)
77 {
78 	return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags);
79 }
80 
81 static inline void mapping_set_exiting(struct address_space *mapping)
82 {
83 	set_bit(AS_EXITING, &mapping->flags);
84 }
85 
86 static inline int mapping_exiting(struct address_space *mapping)
87 {
88 	return test_bit(AS_EXITING, &mapping->flags);
89 }
90 
91 static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
92 {
93 	set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
94 }
95 
96 static inline int mapping_use_writeback_tags(struct address_space *mapping)
97 {
98 	return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
99 }
100 
101 static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
102 {
103 	return mapping->gfp_mask;
104 }
105 
106 /* Restricts the given gfp_mask to what the mapping allows. */
107 static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
108 		gfp_t gfp_mask)
109 {
110 	return mapping_gfp_mask(mapping) & gfp_mask;
111 }
112 
113 /*
114  * This is non-atomic.  Only to be used before the mapping is activated.
115  * Probably needs a barrier...
116  */
117 static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
118 {
119 	m->gfp_mask = mask;
120 }
121 
122 void release_pages(struct page **pages, int nr);
123 
124 /*
125  * speculatively take a reference to a page.
126  * If the page is free (_refcount == 0), then _refcount is untouched, and 0
127  * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned.
128  *
129  * This function must be called inside the same rcu_read_lock() section as has
130  * been used to lookup the page in the pagecache radix-tree (or page table):
131  * this allows allocators to use a synchronize_rcu() to stabilize _refcount.
132  *
133  * Unless an RCU grace period has passed, the count of all pages coming out
134  * of the allocator must be considered unstable. page_count may return higher
135  * than expected, and put_page must be able to do the right thing when the
136  * page has been finished with, no matter what it is subsequently allocated
137  * for (because put_page is what is used here to drop an invalid speculative
138  * reference).
139  *
140  * This is the interesting part of the lockless pagecache (and lockless
141  * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
142  * has the following pattern:
143  * 1. find page in radix tree
144  * 2. conditionally increment refcount
145  * 3. check the page is still in pagecache (if no, goto 1)
146  *
147  * Remove-side that cares about stability of _refcount (eg. reclaim) has the
148  * following (with the i_pages lock held):
149  * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
150  * B. remove page from pagecache
151  * C. free the page
152  *
153  * There are 2 critical interleavings that matter:
154  * - 2 runs before A: in this case, A sees elevated refcount and bails out
155  * - A runs before 2: in this case, 2 sees zero refcount and retries;
156  *   subsequently, B will complete and 1 will find no page, causing the
157  *   lookup to return NULL.
158  *
159  * It is possible that between 1 and 2, the page is removed then the exact same
160  * page is inserted into the same position in pagecache. That's OK: the
161  * old find_get_page using a lock could equally have run before or after
162  * such a re-insertion, depending on order that locks are granted.
163  *
164  * Lookups racing against pagecache insertion isn't a big problem: either 1
165  * will find the page or it will not. Likewise, the old find_get_page could run
166  * either before the insertion or afterwards, depending on timing.
167  */
168 static inline int __page_cache_add_speculative(struct page *page, int count)
169 {
170 #ifdef CONFIG_TINY_RCU
171 # ifdef CONFIG_PREEMPT_COUNT
172 	VM_BUG_ON(!in_atomic() && !irqs_disabled());
173 # endif
174 	/*
175 	 * Preempt must be disabled here - we rely on rcu_read_lock doing
176 	 * this for us.
177 	 *
178 	 * Pagecache won't be truncated from interrupt context, so if we have
179 	 * found a page in the radix tree here, we have pinned its refcount by
180 	 * disabling preempt, and hence no need for the "speculative get" that
181 	 * SMP requires.
182 	 */
183 	VM_BUG_ON_PAGE(page_count(page) == 0, page);
184 	page_ref_add(page, count);
185 
186 #else
187 	if (unlikely(!page_ref_add_unless(page, count, 0))) {
188 		/*
189 		 * Either the page has been freed, or will be freed.
190 		 * In either case, retry here and the caller should
191 		 * do the right thing (see comments above).
192 		 */
193 		return 0;
194 	}
195 #endif
196 	VM_BUG_ON_PAGE(PageTail(page), page);
197 
198 	return 1;
199 }
200 
201 static inline int page_cache_get_speculative(struct page *page)
202 {
203 	return __page_cache_add_speculative(page, 1);
204 }
205 
206 static inline int page_cache_add_speculative(struct page *page, int count)
207 {
208 	return __page_cache_add_speculative(page, count);
209 }
210 
211 /**
212  * attach_page_private - Attach private data to a page.
213  * @page: Page to attach data to.
214  * @data: Data to attach to page.
215  *
216  * Attaching private data to a page increments the page's reference count.
217  * The data must be detached before the page will be freed.
218  */
219 static inline void attach_page_private(struct page *page, void *data)
220 {
221 	get_page(page);
222 	set_page_private(page, (unsigned long)data);
223 	SetPagePrivate(page);
224 }
225 
226 /**
227  * detach_page_private - Detach private data from a page.
228  * @page: Page to detach data from.
229  *
230  * Removes the data that was previously attached to the page and decrements
231  * the refcount on the page.
232  *
233  * Return: Data that was attached to the page.
234  */
235 static inline void *detach_page_private(struct page *page)
236 {
237 	void *data = (void *)page_private(page);
238 
239 	if (!PagePrivate(page))
240 		return NULL;
241 	ClearPagePrivate(page);
242 	set_page_private(page, 0);
243 	put_page(page);
244 
245 	return data;
246 }
247 
248 #ifdef CONFIG_NUMA
249 extern struct page *__page_cache_alloc(gfp_t gfp);
250 #else
251 static inline struct page *__page_cache_alloc(gfp_t gfp)
252 {
253 	return alloc_pages(gfp, 0);
254 }
255 #endif
256 
257 static inline struct page *page_cache_alloc(struct address_space *x)
258 {
259 	return __page_cache_alloc(mapping_gfp_mask(x));
260 }
261 
262 static inline gfp_t readahead_gfp_mask(struct address_space *x)
263 {
264 	return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
265 }
266 
267 typedef int filler_t(void *, struct page *);
268 
269 pgoff_t page_cache_next_miss(struct address_space *mapping,
270 			     pgoff_t index, unsigned long max_scan);
271 pgoff_t page_cache_prev_miss(struct address_space *mapping,
272 			     pgoff_t index, unsigned long max_scan);
273 
274 #define FGP_ACCESSED		0x00000001
275 #define FGP_LOCK		0x00000002
276 #define FGP_CREAT		0x00000004
277 #define FGP_WRITE		0x00000008
278 #define FGP_NOFS		0x00000010
279 #define FGP_NOWAIT		0x00000020
280 #define FGP_FOR_MMAP		0x00000040
281 
282 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
283 		int fgp_flags, gfp_t cache_gfp_mask);
284 
285 /**
286  * find_get_page - find and get a page reference
287  * @mapping: the address_space to search
288  * @offset: the page index
289  *
290  * Looks up the page cache slot at @mapping & @offset.  If there is a
291  * page cache page, it is returned with an increased refcount.
292  *
293  * Otherwise, %NULL is returned.
294  */
295 static inline struct page *find_get_page(struct address_space *mapping,
296 					pgoff_t offset)
297 {
298 	return pagecache_get_page(mapping, offset, 0, 0);
299 }
300 
301 static inline struct page *find_get_page_flags(struct address_space *mapping,
302 					pgoff_t offset, int fgp_flags)
303 {
304 	return pagecache_get_page(mapping, offset, fgp_flags, 0);
305 }
306 
307 /**
308  * find_lock_page - locate, pin and lock a pagecache page
309  * @mapping: the address_space to search
310  * @offset: the page index
311  *
312  * Looks up the page cache slot at @mapping & @offset.  If there is a
313  * page cache page, it is returned locked and with an increased
314  * refcount.
315  *
316  * Otherwise, %NULL is returned.
317  *
318  * find_lock_page() may sleep.
319  */
320 static inline struct page *find_lock_page(struct address_space *mapping,
321 					pgoff_t offset)
322 {
323 	return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
324 }
325 
326 /**
327  * find_or_create_page - locate or add a pagecache page
328  * @mapping: the page's address_space
329  * @index: the page's index into the mapping
330  * @gfp_mask: page allocation mode
331  *
332  * Looks up the page cache slot at @mapping & @offset.  If there is a
333  * page cache page, it is returned locked and with an increased
334  * refcount.
335  *
336  * If the page is not present, a new page is allocated using @gfp_mask
337  * and added to the page cache and the VM's LRU list.  The page is
338  * returned locked and with an increased refcount.
339  *
340  * On memory exhaustion, %NULL is returned.
341  *
342  * find_or_create_page() may sleep, even if @gfp_flags specifies an
343  * atomic allocation!
344  */
345 static inline struct page *find_or_create_page(struct address_space *mapping,
346 					pgoff_t index, gfp_t gfp_mask)
347 {
348 	return pagecache_get_page(mapping, index,
349 					FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
350 					gfp_mask);
351 }
352 
353 /**
354  * grab_cache_page_nowait - returns locked page at given index in given cache
355  * @mapping: target address_space
356  * @index: the page index
357  *
358  * Same as grab_cache_page(), but do not wait if the page is unavailable.
359  * This is intended for speculative data generators, where the data can
360  * be regenerated if the page couldn't be grabbed.  This routine should
361  * be safe to call while holding the lock for another page.
362  *
363  * Clear __GFP_FS when allocating the page to avoid recursion into the fs
364  * and deadlock against the caller's locked page.
365  */
366 static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
367 				pgoff_t index)
368 {
369 	return pagecache_get_page(mapping, index,
370 			FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
371 			mapping_gfp_mask(mapping));
372 }
373 
374 /*
375  * Given the page we found in the page cache, return the page corresponding
376  * to this index in the file
377  */
378 static inline struct page *find_subpage(struct page *head, pgoff_t index)
379 {
380 	/* HugeTLBfs wants the head page regardless */
381 	if (PageHuge(head))
382 		return head;
383 
384 	return head + (index & (hpage_nr_pages(head) - 1));
385 }
386 
387 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
388 struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
389 unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
390 			  unsigned int nr_entries, struct page **entries,
391 			  pgoff_t *indices);
392 unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
393 			pgoff_t end, unsigned int nr_pages,
394 			struct page **pages);
395 static inline unsigned find_get_pages(struct address_space *mapping,
396 			pgoff_t *start, unsigned int nr_pages,
397 			struct page **pages)
398 {
399 	return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages,
400 				    pages);
401 }
402 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
403 			       unsigned int nr_pages, struct page **pages);
404 unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
405 			pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
406 			struct page **pages);
407 static inline unsigned find_get_pages_tag(struct address_space *mapping,
408 			pgoff_t *index, xa_mark_t tag, unsigned int nr_pages,
409 			struct page **pages)
410 {
411 	return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
412 					nr_pages, pages);
413 }
414 
415 struct page *grab_cache_page_write_begin(struct address_space *mapping,
416 			pgoff_t index, unsigned flags);
417 
418 /*
419  * Returns locked page at given index in given cache, creating it if needed.
420  */
421 static inline struct page *grab_cache_page(struct address_space *mapping,
422 								pgoff_t index)
423 {
424 	return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
425 }
426 
427 extern struct page * read_cache_page(struct address_space *mapping,
428 				pgoff_t index, filler_t *filler, void *data);
429 extern struct page * read_cache_page_gfp(struct address_space *mapping,
430 				pgoff_t index, gfp_t gfp_mask);
431 extern int read_cache_pages(struct address_space *mapping,
432 		struct list_head *pages, filler_t *filler, void *data);
433 
434 static inline struct page *read_mapping_page(struct address_space *mapping,
435 				pgoff_t index, void *data)
436 {
437 	return read_cache_page(mapping, index, NULL, data);
438 }
439 
440 /*
441  * Get index of the page with in radix-tree
442  * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
443  */
444 static inline pgoff_t page_to_index(struct page *page)
445 {
446 	pgoff_t pgoff;
447 
448 	if (likely(!PageTransTail(page)))
449 		return page->index;
450 
451 	/*
452 	 *  We don't initialize ->index for tail pages: calculate based on
453 	 *  head page
454 	 */
455 	pgoff = compound_head(page)->index;
456 	pgoff += page - compound_head(page);
457 	return pgoff;
458 }
459 
460 /*
461  * Get the offset in PAGE_SIZE.
462  * (TODO: hugepage should have ->index in PAGE_SIZE)
463  */
464 static inline pgoff_t page_to_pgoff(struct page *page)
465 {
466 	if (unlikely(PageHeadHuge(page)))
467 		return page->index << compound_order(page);
468 
469 	return page_to_index(page);
470 }
471 
472 /*
473  * Return byte-offset into filesystem object for page.
474  */
475 static inline loff_t page_offset(struct page *page)
476 {
477 	return ((loff_t)page->index) << PAGE_SHIFT;
478 }
479 
480 static inline loff_t page_file_offset(struct page *page)
481 {
482 	return ((loff_t)page_index(page)) << PAGE_SHIFT;
483 }
484 
485 extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
486 				     unsigned long address);
487 
488 static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
489 					unsigned long address)
490 {
491 	pgoff_t pgoff;
492 	if (unlikely(is_vm_hugetlb_page(vma)))
493 		return linear_hugepage_index(vma, address);
494 	pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
495 	pgoff += vma->vm_pgoff;
496 	return pgoff;
497 }
498 
499 extern void __lock_page(struct page *page);
500 extern int __lock_page_killable(struct page *page);
501 extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
502 				unsigned int flags);
503 extern void unlock_page(struct page *page);
504 
505 /*
506  * Return true if the page was successfully locked
507  */
508 static inline int trylock_page(struct page *page)
509 {
510 	page = compound_head(page);
511 	return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
512 }
513 
514 /*
515  * lock_page may only be called if we have the page's inode pinned.
516  */
517 static inline void lock_page(struct page *page)
518 {
519 	might_sleep();
520 	if (!trylock_page(page))
521 		__lock_page(page);
522 }
523 
524 /*
525  * lock_page_killable is like lock_page but can be interrupted by fatal
526  * signals.  It returns 0 if it locked the page and -EINTR if it was
527  * killed while waiting.
528  */
529 static inline int lock_page_killable(struct page *page)
530 {
531 	might_sleep();
532 	if (!trylock_page(page))
533 		return __lock_page_killable(page);
534 	return 0;
535 }
536 
537 /*
538  * lock_page_or_retry - Lock the page, unless this would block and the
539  * caller indicated that it can handle a retry.
540  *
541  * Return value and mmap_lock implications depend on flags; see
542  * __lock_page_or_retry().
543  */
544 static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
545 				     unsigned int flags)
546 {
547 	might_sleep();
548 	return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
549 }
550 
551 /*
552  * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc.,
553  * and should not be used directly.
554  */
555 extern void wait_on_page_bit(struct page *page, int bit_nr);
556 extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
557 
558 /*
559  * Wait for a page to be unlocked.
560  *
561  * This must be called with the caller "holding" the page,
562  * ie with increased "page->count" so that the page won't
563  * go away during the wait..
564  */
565 static inline void wait_on_page_locked(struct page *page)
566 {
567 	if (PageLocked(page))
568 		wait_on_page_bit(compound_head(page), PG_locked);
569 }
570 
571 static inline int wait_on_page_locked_killable(struct page *page)
572 {
573 	if (!PageLocked(page))
574 		return 0;
575 	return wait_on_page_bit_killable(compound_head(page), PG_locked);
576 }
577 
578 extern void put_and_wait_on_page_locked(struct page *page);
579 
580 void wait_on_page_writeback(struct page *page);
581 extern void end_page_writeback(struct page *page);
582 void wait_for_stable_page(struct page *page);
583 
584 void page_endio(struct page *page, bool is_write, int err);
585 
586 /*
587  * Add an arbitrary waiter to a page's wait queue
588  */
589 extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
590 
591 /*
592  * Fault everything in given userspace address range in.
593  */
594 static inline int fault_in_pages_writeable(char __user *uaddr, int size)
595 {
596 	char __user *end = uaddr + size - 1;
597 
598 	if (unlikely(size == 0))
599 		return 0;
600 
601 	if (unlikely(uaddr > end))
602 		return -EFAULT;
603 	/*
604 	 * Writing zeroes into userspace here is OK, because we know that if
605 	 * the zero gets there, we'll be overwriting it.
606 	 */
607 	do {
608 		if (unlikely(__put_user(0, uaddr) != 0))
609 			return -EFAULT;
610 		uaddr += PAGE_SIZE;
611 	} while (uaddr <= end);
612 
613 	/* Check whether the range spilled into the next page. */
614 	if (((unsigned long)uaddr & PAGE_MASK) ==
615 			((unsigned long)end & PAGE_MASK))
616 		return __put_user(0, end);
617 
618 	return 0;
619 }
620 
621 static inline int fault_in_pages_readable(const char __user *uaddr, int size)
622 {
623 	volatile char c;
624 	const char __user *end = uaddr + size - 1;
625 
626 	if (unlikely(size == 0))
627 		return 0;
628 
629 	if (unlikely(uaddr > end))
630 		return -EFAULT;
631 
632 	do {
633 		if (unlikely(__get_user(c, uaddr) != 0))
634 			return -EFAULT;
635 		uaddr += PAGE_SIZE;
636 	} while (uaddr <= end);
637 
638 	/* Check whether the range spilled into the next page. */
639 	if (((unsigned long)uaddr & PAGE_MASK) ==
640 			((unsigned long)end & PAGE_MASK)) {
641 		return __get_user(c, end);
642 	}
643 
644 	(void)c;
645 	return 0;
646 }
647 
648 int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
649 				pgoff_t index, gfp_t gfp_mask);
650 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
651 				pgoff_t index, gfp_t gfp_mask);
652 extern void delete_from_page_cache(struct page *page);
653 extern void __delete_from_page_cache(struct page *page, void *shadow);
654 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
655 void delete_from_page_cache_batch(struct address_space *mapping,
656 				  struct pagevec *pvec);
657 
658 #define VM_READAHEAD_PAGES	(SZ_128K / PAGE_SIZE)
659 
660 void page_cache_sync_readahead(struct address_space *, struct file_ra_state *,
661 		struct file *, pgoff_t index, unsigned long req_count);
662 void page_cache_async_readahead(struct address_space *, struct file_ra_state *,
663 		struct file *, struct page *, pgoff_t index,
664 		unsigned long req_count);
665 void page_cache_readahead_unbounded(struct address_space *, struct file *,
666 		pgoff_t index, unsigned long nr_to_read,
667 		unsigned long lookahead_count);
668 
669 /*
670  * Like add_to_page_cache_locked, but used to add newly allocated pages:
671  * the page is new, so we can just run __SetPageLocked() against it.
672  */
673 static inline int add_to_page_cache(struct page *page,
674 		struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
675 {
676 	int error;
677 
678 	__SetPageLocked(page);
679 	error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
680 	if (unlikely(error))
681 		__ClearPageLocked(page);
682 	return error;
683 }
684 
685 /**
686  * struct readahead_control - Describes a readahead request.
687  *
688  * A readahead request is for consecutive pages.  Filesystems which
689  * implement the ->readahead method should call readahead_page() or
690  * readahead_page_batch() in a loop and attempt to start I/O against
691  * each page in the request.
692  *
693  * Most of the fields in this struct are private and should be accessed
694  * by the functions below.
695  *
696  * @file: The file, used primarily by network filesystems for authentication.
697  *	  May be NULL if invoked internally by the filesystem.
698  * @mapping: Readahead this filesystem object.
699  */
700 struct readahead_control {
701 	struct file *file;
702 	struct address_space *mapping;
703 /* private: use the readahead_* accessors instead */
704 	pgoff_t _index;
705 	unsigned int _nr_pages;
706 	unsigned int _batch_count;
707 };
708 
709 /**
710  * readahead_page - Get the next page to read.
711  * @rac: The current readahead request.
712  *
713  * Context: The page is locked and has an elevated refcount.  The caller
714  * should decreases the refcount once the page has been submitted for I/O
715  * and unlock the page once all I/O to that page has completed.
716  * Return: A pointer to the next page, or %NULL if we are done.
717  */
718 static inline struct page *readahead_page(struct readahead_control *rac)
719 {
720 	struct page *page;
721 
722 	BUG_ON(rac->_batch_count > rac->_nr_pages);
723 	rac->_nr_pages -= rac->_batch_count;
724 	rac->_index += rac->_batch_count;
725 
726 	if (!rac->_nr_pages) {
727 		rac->_batch_count = 0;
728 		return NULL;
729 	}
730 
731 	page = xa_load(&rac->mapping->i_pages, rac->_index);
732 	VM_BUG_ON_PAGE(!PageLocked(page), page);
733 	rac->_batch_count = hpage_nr_pages(page);
734 
735 	return page;
736 }
737 
738 static inline unsigned int __readahead_batch(struct readahead_control *rac,
739 		struct page **array, unsigned int array_sz)
740 {
741 	unsigned int i = 0;
742 	XA_STATE(xas, &rac->mapping->i_pages, 0);
743 	struct page *page;
744 
745 	BUG_ON(rac->_batch_count > rac->_nr_pages);
746 	rac->_nr_pages -= rac->_batch_count;
747 	rac->_index += rac->_batch_count;
748 	rac->_batch_count = 0;
749 
750 	xas_set(&xas, rac->_index);
751 	rcu_read_lock();
752 	xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) {
753 		VM_BUG_ON_PAGE(!PageLocked(page), page);
754 		VM_BUG_ON_PAGE(PageTail(page), page);
755 		array[i++] = page;
756 		rac->_batch_count += hpage_nr_pages(page);
757 
758 		/*
759 		 * The page cache isn't using multi-index entries yet,
760 		 * so the xas cursor needs to be manually moved to the
761 		 * next index.  This can be removed once the page cache
762 		 * is converted.
763 		 */
764 		if (PageHead(page))
765 			xas_set(&xas, rac->_index + rac->_batch_count);
766 
767 		if (i == array_sz)
768 			break;
769 	}
770 	rcu_read_unlock();
771 
772 	return i;
773 }
774 
775 /**
776  * readahead_page_batch - Get a batch of pages to read.
777  * @rac: The current readahead request.
778  * @array: An array of pointers to struct page.
779  *
780  * Context: The pages are locked and have an elevated refcount.  The caller
781  * should decreases the refcount once the page has been submitted for I/O
782  * and unlock the page once all I/O to that page has completed.
783  * Return: The number of pages placed in the array.  0 indicates the request
784  * is complete.
785  */
786 #define readahead_page_batch(rac, array)				\
787 	__readahead_batch(rac, array, ARRAY_SIZE(array))
788 
789 /**
790  * readahead_pos - The byte offset into the file of this readahead request.
791  * @rac: The readahead request.
792  */
793 static inline loff_t readahead_pos(struct readahead_control *rac)
794 {
795 	return (loff_t)rac->_index * PAGE_SIZE;
796 }
797 
798 /**
799  * readahead_length - The number of bytes in this readahead request.
800  * @rac: The readahead request.
801  */
802 static inline loff_t readahead_length(struct readahead_control *rac)
803 {
804 	return (loff_t)rac->_nr_pages * PAGE_SIZE;
805 }
806 
807 /**
808  * readahead_index - The index of the first page in this readahead request.
809  * @rac: The readahead request.
810  */
811 static inline pgoff_t readahead_index(struct readahead_control *rac)
812 {
813 	return rac->_index;
814 }
815 
816 /**
817  * readahead_count - The number of pages in this readahead request.
818  * @rac: The readahead request.
819  */
820 static inline unsigned int readahead_count(struct readahead_control *rac)
821 {
822 	return rac->_nr_pages;
823 }
824 
825 static inline unsigned long dir_pages(struct inode *inode)
826 {
827 	return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
828 			       PAGE_SHIFT;
829 }
830 
831 /**
832  * page_mkwrite_check_truncate - check if page was truncated
833  * @page: the page to check
834  * @inode: the inode to check the page against
835  *
836  * Returns the number of bytes in the page up to EOF,
837  * or -EFAULT if the page was truncated.
838  */
839 static inline int page_mkwrite_check_truncate(struct page *page,
840 					      struct inode *inode)
841 {
842 	loff_t size = i_size_read(inode);
843 	pgoff_t index = size >> PAGE_SHIFT;
844 	int offset = offset_in_page(size);
845 
846 	if (page->mapping != inode->i_mapping)
847 		return -EFAULT;
848 
849 	/* page is wholly inside EOF */
850 	if (page->index < index)
851 		return PAGE_SIZE;
852 	/* page is wholly past EOF */
853 	if (page->index > index || !offset)
854 		return -EFAULT;
855 	/* page is partially inside EOF */
856 	return offset;
857 }
858 
859 #endif /* _LINUX_PAGEMAP_H */
860