xref: /linux-6.15/include/linux/page-flags.h (revision bbf62599)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Macros for manipulating and testing page->flags
4  */
5 
6 #ifndef PAGE_FLAGS_H
7 #define PAGE_FLAGS_H
8 
9 #include <linux/types.h>
10 #include <linux/bug.h>
11 #include <linux/mmdebug.h>
12 #ifndef __GENERATING_BOUNDS_H
13 #include <linux/mm_types.h>
14 #include <generated/bounds.h>
15 #endif /* !__GENERATING_BOUNDS_H */
16 
17 /*
18  * Various page->flags bits:
19  *
20  * PG_reserved is set for special pages. The "struct page" of such a page
21  * should in general not be touched (e.g. set dirty) except by its owner.
22  * Pages marked as PG_reserved include:
23  * - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS,
24  *   initrd, HW tables)
25  * - Pages reserved or allocated early during boot (before the page allocator
26  *   was initialized). This includes (depending on the architecture) the
27  *   initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much
28  *   much more. Once (if ever) freed, PG_reserved is cleared and they will
29  *   be given to the page allocator.
30  * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying
31  *   to read/write these pages might end badly. Don't touch!
32  * - The zero page(s)
33  * - Pages not added to the page allocator when onlining a section because
34  *   they were excluded via the online_page_callback() or because they are
35  *   PG_hwpoison.
36  * - Pages allocated in the context of kexec/kdump (loaded kernel image,
37  *   control pages, vmcoreinfo)
38  * - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are
39  *   not marked PG_reserved (as they might be in use by somebody else who does
40  *   not respect the caching strategy).
41  * - Pages part of an offline section (struct pages of offline sections should
42  *   not be trusted as they will be initialized when first onlined).
43  * - MCA pages on ia64
44  * - Pages holding CPU notes for POWER Firmware Assisted Dump
45  * - Device memory (e.g. PMEM, DAX, HMM)
46  * Some PG_reserved pages will be excluded from the hibernation image.
47  * PG_reserved does in general not hinder anybody from dumping or swapping
48  * and is no longer required for remap_pfn_range(). ioremap might require it.
49  * Consequently, PG_reserved for a page mapped into user space can indicate
50  * the zero page, the vDSO, MMIO pages or device memory.
51  *
52  * The PG_private bitflag is set on pagecache pages if they contain filesystem
53  * specific data (which is normally at page->private). It can be used by
54  * private allocations for its own usage.
55  *
56  * During initiation of disk I/O, PG_locked is set. This bit is set before I/O
57  * and cleared when writeback _starts_ or when read _completes_. PG_writeback
58  * is set before writeback starts and cleared when it finishes.
59  *
60  * PG_locked also pins a page in pagecache, and blocks truncation of the file
61  * while it is held.
62  *
63  * page_waitqueue(page) is a wait queue of all tasks waiting for the page
64  * to become unlocked.
65  *
66  * PG_swapbacked is set when a page uses swap as a backing storage.  This are
67  * usually PageAnon or shmem pages but please note that even anonymous pages
68  * might lose their PG_swapbacked flag when they simply can be dropped (e.g. as
69  * a result of MADV_FREE).
70  *
71  * PG_uptodate tells whether the page's contents is valid.  When a read
72  * completes, the page becomes uptodate, unless a disk I/O error happened.
73  *
74  * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
75  * file-backed pagecache (see mm/vmscan.c).
76  *
77  * PG_error is set to indicate that an I/O error occurred on this page.
78  *
79  * PG_arch_1 is an architecture specific page state bit.  The generic code
80  * guarantees that this bit is cleared for a page when it first is entered into
81  * the page cache.
82  *
83  * PG_hwpoison indicates that a page got corrupted in hardware and contains
84  * data with incorrect ECC bits that triggered a machine check. Accessing is
85  * not safe since it may cause another machine check. Don't touch!
86  */
87 
88 /*
89  * Don't use the *_dontuse flags.  Use the macros.  Otherwise you'll break
90  * locked- and dirty-page accounting.
91  *
92  * The page flags field is split into two parts, the main flags area
93  * which extends from the low bits upwards, and the fields area which
94  * extends from the high bits downwards.
95  *
96  *  | FIELD | ... | FLAGS |
97  *  N-1           ^       0
98  *               (NR_PAGEFLAGS)
99  *
100  * The fields area is reserved for fields mapping zone, node (for NUMA) and
101  * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
102  * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
103  */
104 enum pageflags {
105 	PG_locked,		/* Page is locked. Don't touch. */
106 	PG_referenced,
107 	PG_uptodate,
108 	PG_dirty,
109 	PG_lru,
110 	PG_active,
111 	PG_workingset,
112 	PG_waiters,		/* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
113 	PG_error,
114 	PG_slab,
115 	PG_owner_priv_1,	/* Owner use. If pagecache, fs may use*/
116 	PG_arch_1,
117 	PG_reserved,
118 	PG_private,		/* If pagecache, has fs-private data */
119 	PG_private_2,		/* If pagecache, has fs aux data */
120 	PG_writeback,		/* Page is under writeback */
121 	PG_head,		/* A head page */
122 	PG_mappedtodisk,	/* Has blocks allocated on-disk */
123 	PG_reclaim,		/* To be reclaimed asap */
124 	PG_swapbacked,		/* Page is backed by RAM/swap */
125 	PG_unevictable,		/* Page is "unevictable"  */
126 #ifdef CONFIG_MMU
127 	PG_mlocked,		/* Page is vma mlocked */
128 #endif
129 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
130 	PG_uncached,		/* Page has been mapped as uncached */
131 #endif
132 #ifdef CONFIG_MEMORY_FAILURE
133 	PG_hwpoison,		/* hardware poisoned page. Don't touch */
134 #endif
135 #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
136 	PG_young,
137 	PG_idle,
138 #endif
139 #ifdef CONFIG_64BIT
140 	PG_arch_2,
141 #endif
142 	__NR_PAGEFLAGS,
143 
144 	/* Filesystems */
145 	PG_checked = PG_owner_priv_1,
146 
147 	/* SwapBacked */
148 	PG_swapcache = PG_owner_priv_1,	/* Swap page: swp_entry_t in private */
149 
150 	/* Two page bits are conscripted by FS-Cache to maintain local caching
151 	 * state.  These bits are set on pages belonging to the netfs's inodes
152 	 * when those inodes are being locally cached.
153 	 */
154 	PG_fscache = PG_private_2,	/* page backed by cache */
155 
156 	/* XEN */
157 	/* Pinned in Xen as a read-only pagetable page. */
158 	PG_pinned = PG_owner_priv_1,
159 	/* Pinned as part of domain save (see xen_mm_pin_all()). */
160 	PG_savepinned = PG_dirty,
161 	/* Has a grant mapping of another (foreign) domain's page. */
162 	PG_foreign = PG_owner_priv_1,
163 	/* Remapped by swiotlb-xen. */
164 	PG_xen_remapped = PG_owner_priv_1,
165 
166 	/* SLOB */
167 	PG_slob_free = PG_private,
168 
169 	/* Compound pages. Stored in first tail page's flags */
170 	PG_double_map = PG_workingset,
171 
172 	/* non-lru isolated movable page */
173 	PG_isolated = PG_reclaim,
174 
175 	/* Only valid for buddy pages. Used to track pages that are reported */
176 	PG_reported = PG_uptodate,
177 };
178 
179 #ifndef __GENERATING_BOUNDS_H
180 
181 struct page;	/* forward declaration */
182 
183 static inline struct page *compound_head(struct page *page)
184 {
185 	unsigned long head = READ_ONCE(page->compound_head);
186 
187 	if (unlikely(head & 1))
188 		return (struct page *) (head - 1);
189 	return page;
190 }
191 
192 static __always_inline int PageTail(struct page *page)
193 {
194 	return READ_ONCE(page->compound_head) & 1;
195 }
196 
197 static __always_inline int PageCompound(struct page *page)
198 {
199 	return test_bit(PG_head, &page->flags) || PageTail(page);
200 }
201 
202 #define	PAGE_POISON_PATTERN	-1l
203 static inline int PagePoisoned(const struct page *page)
204 {
205 	return page->flags == PAGE_POISON_PATTERN;
206 }
207 
208 #ifdef CONFIG_DEBUG_VM
209 void page_init_poison(struct page *page, size_t size);
210 #else
211 static inline void page_init_poison(struct page *page, size_t size)
212 {
213 }
214 #endif
215 
216 /*
217  * Page flags policies wrt compound pages
218  *
219  * PF_POISONED_CHECK
220  *     check if this struct page poisoned/uninitialized
221  *
222  * PF_ANY:
223  *     the page flag is relevant for small, head and tail pages.
224  *
225  * PF_HEAD:
226  *     for compound page all operations related to the page flag applied to
227  *     head page.
228  *
229  * PF_ONLY_HEAD:
230  *     for compound page, callers only ever operate on the head page.
231  *
232  * PF_NO_TAIL:
233  *     modifications of the page flag must be done on small or head pages,
234  *     checks can be done on tail pages too.
235  *
236  * PF_NO_COMPOUND:
237  *     the page flag is not relevant for compound pages.
238  *
239  * PF_SECOND:
240  *     the page flag is stored in the first tail page.
241  */
242 #define PF_POISONED_CHECK(page) ({					\
243 		VM_BUG_ON_PGFLAGS(PagePoisoned(page), page);		\
244 		page; })
245 #define PF_ANY(page, enforce)	PF_POISONED_CHECK(page)
246 #define PF_HEAD(page, enforce)	PF_POISONED_CHECK(compound_head(page))
247 #define PF_ONLY_HEAD(page, enforce) ({					\
248 		VM_BUG_ON_PGFLAGS(PageTail(page), page);		\
249 		PF_POISONED_CHECK(page); })
250 #define PF_NO_TAIL(page, enforce) ({					\
251 		VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page);	\
252 		PF_POISONED_CHECK(compound_head(page)); })
253 #define PF_NO_COMPOUND(page, enforce) ({				\
254 		VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page);	\
255 		PF_POISONED_CHECK(page); })
256 #define PF_SECOND(page, enforce) ({					\
257 		VM_BUG_ON_PGFLAGS(!PageHead(page), page);		\
258 		PF_POISONED_CHECK(&page[1]); })
259 
260 /*
261  * Macros to create function definitions for page flags
262  */
263 #define TESTPAGEFLAG(uname, lname, policy)				\
264 static __always_inline int Page##uname(struct page *page)		\
265 	{ return test_bit(PG_##lname, &policy(page, 0)->flags); }
266 
267 #define SETPAGEFLAG(uname, lname, policy)				\
268 static __always_inline void SetPage##uname(struct page *page)		\
269 	{ set_bit(PG_##lname, &policy(page, 1)->flags); }
270 
271 #define CLEARPAGEFLAG(uname, lname, policy)				\
272 static __always_inline void ClearPage##uname(struct page *page)		\
273 	{ clear_bit(PG_##lname, &policy(page, 1)->flags); }
274 
275 #define __SETPAGEFLAG(uname, lname, policy)				\
276 static __always_inline void __SetPage##uname(struct page *page)		\
277 	{ __set_bit(PG_##lname, &policy(page, 1)->flags); }
278 
279 #define __CLEARPAGEFLAG(uname, lname, policy)				\
280 static __always_inline void __ClearPage##uname(struct page *page)	\
281 	{ __clear_bit(PG_##lname, &policy(page, 1)->flags); }
282 
283 #define TESTSETFLAG(uname, lname, policy)				\
284 static __always_inline int TestSetPage##uname(struct page *page)	\
285 	{ return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
286 
287 #define TESTCLEARFLAG(uname, lname, policy)				\
288 static __always_inline int TestClearPage##uname(struct page *page)	\
289 	{ return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
290 
291 #define PAGEFLAG(uname, lname, policy)					\
292 	TESTPAGEFLAG(uname, lname, policy)				\
293 	SETPAGEFLAG(uname, lname, policy)				\
294 	CLEARPAGEFLAG(uname, lname, policy)
295 
296 #define __PAGEFLAG(uname, lname, policy)				\
297 	TESTPAGEFLAG(uname, lname, policy)				\
298 	__SETPAGEFLAG(uname, lname, policy)				\
299 	__CLEARPAGEFLAG(uname, lname, policy)
300 
301 #define TESTSCFLAG(uname, lname, policy)				\
302 	TESTSETFLAG(uname, lname, policy)				\
303 	TESTCLEARFLAG(uname, lname, policy)
304 
305 #define TESTPAGEFLAG_FALSE(uname)					\
306 static inline int Page##uname(const struct page *page) { return 0; }
307 
308 #define SETPAGEFLAG_NOOP(uname)						\
309 static inline void SetPage##uname(struct page *page) {  }
310 
311 #define CLEARPAGEFLAG_NOOP(uname)					\
312 static inline void ClearPage##uname(struct page *page) {  }
313 
314 #define __CLEARPAGEFLAG_NOOP(uname)					\
315 static inline void __ClearPage##uname(struct page *page) {  }
316 
317 #define TESTSETFLAG_FALSE(uname)					\
318 static inline int TestSetPage##uname(struct page *page) { return 0; }
319 
320 #define TESTCLEARFLAG_FALSE(uname)					\
321 static inline int TestClearPage##uname(struct page *page) { return 0; }
322 
323 #define PAGEFLAG_FALSE(uname) TESTPAGEFLAG_FALSE(uname)			\
324 	SETPAGEFLAG_NOOP(uname) CLEARPAGEFLAG_NOOP(uname)
325 
326 #define TESTSCFLAG_FALSE(uname)						\
327 	TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname)
328 
329 __PAGEFLAG(Locked, locked, PF_NO_TAIL)
330 PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD)
331 PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL)
332 PAGEFLAG(Referenced, referenced, PF_HEAD)
333 	TESTCLEARFLAG(Referenced, referenced, PF_HEAD)
334 	__SETPAGEFLAG(Referenced, referenced, PF_HEAD)
335 PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
336 	__CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
337 PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
338 PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
339 	TESTCLEARFLAG(Active, active, PF_HEAD)
340 PAGEFLAG(Workingset, workingset, PF_HEAD)
341 	TESTCLEARFLAG(Workingset, workingset, PF_HEAD)
342 __PAGEFLAG(Slab, slab, PF_NO_TAIL)
343 __PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL)
344 PAGEFLAG(Checked, checked, PF_NO_COMPOUND)	   /* Used by some filesystems */
345 
346 /* Xen */
347 PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND)
348 	TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND)
349 PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND);
350 PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
351 PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
352 	TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
353 
354 PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
355 	__CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
356 	__SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
357 PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
358 	__CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
359 	__SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
360 
361 /*
362  * Private page markings that may be used by the filesystem that owns the page
363  * for its own purposes.
364  * - PG_private and PG_private_2 cause releasepage() and co to be invoked
365  */
366 PAGEFLAG(Private, private, PF_ANY) __SETPAGEFLAG(Private, private, PF_ANY)
367 	__CLEARPAGEFLAG(Private, private, PF_ANY)
368 PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
369 PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
370 	TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
371 
372 /*
373  * Only test-and-set exist for PG_writeback.  The unconditional operators are
374  * risky: they bypass page accounting.
375  */
376 TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL)
377 	TESTSCFLAG(Writeback, writeback, PF_NO_TAIL)
378 PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
379 
380 /* PG_readahead is only used for reads; PG_reclaim is only for writes */
381 PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
382 	TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
383 PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
384 	TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)
385 
386 #ifdef CONFIG_HIGHMEM
387 /*
388  * Must use a macro here due to header dependency issues. page_zone() is not
389  * available at this point.
390  */
391 #define PageHighMem(__p) is_highmem_idx(page_zonenum(__p))
392 #else
393 PAGEFLAG_FALSE(HighMem)
394 #endif
395 
396 #ifdef CONFIG_SWAP
397 static __always_inline int PageSwapCache(struct page *page)
398 {
399 #ifdef CONFIG_THP_SWAP
400 	page = compound_head(page);
401 #endif
402 	return PageSwapBacked(page) && test_bit(PG_swapcache, &page->flags);
403 
404 }
405 SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
406 CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
407 #else
408 PAGEFLAG_FALSE(SwapCache)
409 #endif
410 
411 PAGEFLAG(Unevictable, unevictable, PF_HEAD)
412 	__CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD)
413 	TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD)
414 
415 #ifdef CONFIG_MMU
416 PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
417 	__CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
418 	TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL)
419 #else
420 PAGEFLAG_FALSE(Mlocked) __CLEARPAGEFLAG_NOOP(Mlocked)
421 	TESTSCFLAG_FALSE(Mlocked)
422 #endif
423 
424 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
425 PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND)
426 #else
427 PAGEFLAG_FALSE(Uncached)
428 #endif
429 
430 #ifdef CONFIG_MEMORY_FAILURE
431 PAGEFLAG(HWPoison, hwpoison, PF_ANY)
432 TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
433 #define __PG_HWPOISON (1UL << PG_hwpoison)
434 extern bool set_hwpoison_free_buddy_page(struct page *page);
435 #else
436 PAGEFLAG_FALSE(HWPoison)
437 static inline bool set_hwpoison_free_buddy_page(struct page *page)
438 {
439 	return 0;
440 }
441 #define __PG_HWPOISON 0
442 #endif
443 
444 #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
445 TESTPAGEFLAG(Young, young, PF_ANY)
446 SETPAGEFLAG(Young, young, PF_ANY)
447 TESTCLEARFLAG(Young, young, PF_ANY)
448 PAGEFLAG(Idle, idle, PF_ANY)
449 #endif
450 
451 /*
452  * PageReported() is used to track reported free pages within the Buddy
453  * allocator. We can use the non-atomic version of the test and set
454  * operations as both should be shielded with the zone lock to prevent
455  * any possible races on the setting or clearing of the bit.
456  */
457 __PAGEFLAG(Reported, reported, PF_NO_COMPOUND)
458 
459 /*
460  * On an anonymous page mapped into a user virtual memory area,
461  * page->mapping points to its anon_vma, not to a struct address_space;
462  * with the PAGE_MAPPING_ANON bit set to distinguish it.  See rmap.h.
463  *
464  * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
465  * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
466  * bit; and then page->mapping points, not to an anon_vma, but to a private
467  * structure which KSM associates with that merged page.  See ksm.h.
468  *
469  * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
470  * page and then page->mapping points a struct address_space.
471  *
472  * Please note that, confusingly, "page_mapping" refers to the inode
473  * address_space which maps the page from disk; whereas "page_mapped"
474  * refers to user virtual address space into which the page is mapped.
475  */
476 #define PAGE_MAPPING_ANON	0x1
477 #define PAGE_MAPPING_MOVABLE	0x2
478 #define PAGE_MAPPING_KSM	(PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
479 #define PAGE_MAPPING_FLAGS	(PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
480 
481 static __always_inline int PageMappingFlags(struct page *page)
482 {
483 	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
484 }
485 
486 static __always_inline int PageAnon(struct page *page)
487 {
488 	page = compound_head(page);
489 	return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
490 }
491 
492 static __always_inline int __PageMovable(struct page *page)
493 {
494 	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
495 				PAGE_MAPPING_MOVABLE;
496 }
497 
498 #ifdef CONFIG_KSM
499 /*
500  * A KSM page is one of those write-protected "shared pages" or "merged pages"
501  * which KSM maps into multiple mms, wherever identical anonymous page content
502  * is found in VM_MERGEABLE vmas.  It's a PageAnon page, pointing not to any
503  * anon_vma, but to that page's node of the stable tree.
504  */
505 static __always_inline int PageKsm(struct page *page)
506 {
507 	page = compound_head(page);
508 	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
509 				PAGE_MAPPING_KSM;
510 }
511 #else
512 TESTPAGEFLAG_FALSE(Ksm)
513 #endif
514 
515 u64 stable_page_flags(struct page *page);
516 
517 static inline int PageUptodate(struct page *page)
518 {
519 	int ret;
520 	page = compound_head(page);
521 	ret = test_bit(PG_uptodate, &(page)->flags);
522 	/*
523 	 * Must ensure that the data we read out of the page is loaded
524 	 * _after_ we've loaded page->flags to check for PageUptodate.
525 	 * We can skip the barrier if the page is not uptodate, because
526 	 * we wouldn't be reading anything from it.
527 	 *
528 	 * See SetPageUptodate() for the other side of the story.
529 	 */
530 	if (ret)
531 		smp_rmb();
532 
533 	return ret;
534 }
535 
536 static __always_inline void __SetPageUptodate(struct page *page)
537 {
538 	VM_BUG_ON_PAGE(PageTail(page), page);
539 	smp_wmb();
540 	__set_bit(PG_uptodate, &page->flags);
541 }
542 
543 static __always_inline void SetPageUptodate(struct page *page)
544 {
545 	VM_BUG_ON_PAGE(PageTail(page), page);
546 	/*
547 	 * Memory barrier must be issued before setting the PG_uptodate bit,
548 	 * so that all previous stores issued in order to bring the page
549 	 * uptodate are actually visible before PageUptodate becomes true.
550 	 */
551 	smp_wmb();
552 	set_bit(PG_uptodate, &page->flags);
553 }
554 
555 CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
556 
557 int test_clear_page_writeback(struct page *page);
558 int __test_set_page_writeback(struct page *page, bool keep_write);
559 
560 #define test_set_page_writeback(page)			\
561 	__test_set_page_writeback(page, false)
562 #define test_set_page_writeback_keepwrite(page)	\
563 	__test_set_page_writeback(page, true)
564 
565 static inline void set_page_writeback(struct page *page)
566 {
567 	test_set_page_writeback(page);
568 }
569 
570 static inline void set_page_writeback_keepwrite(struct page *page)
571 {
572 	test_set_page_writeback_keepwrite(page);
573 }
574 
575 __PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY)
576 
577 static __always_inline void set_compound_head(struct page *page, struct page *head)
578 {
579 	WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
580 }
581 
582 static __always_inline void clear_compound_head(struct page *page)
583 {
584 	WRITE_ONCE(page->compound_head, 0);
585 }
586 
587 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
588 static inline void ClearPageCompound(struct page *page)
589 {
590 	BUG_ON(!PageHead(page));
591 	ClearPageHead(page);
592 }
593 #endif
594 
595 #define PG_head_mask ((1UL << PG_head))
596 
597 #ifdef CONFIG_HUGETLB_PAGE
598 int PageHuge(struct page *page);
599 int PageHeadHuge(struct page *page);
600 bool page_huge_active(struct page *page);
601 #else
602 TESTPAGEFLAG_FALSE(Huge)
603 TESTPAGEFLAG_FALSE(HeadHuge)
604 
605 static inline bool page_huge_active(struct page *page)
606 {
607 	return 0;
608 }
609 #endif
610 
611 
612 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
613 /*
614  * PageHuge() only returns true for hugetlbfs pages, but not for
615  * normal or transparent huge pages.
616  *
617  * PageTransHuge() returns true for both transparent huge and
618  * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
619  * called only in the core VM paths where hugetlbfs pages can't exist.
620  */
621 static inline int PageTransHuge(struct page *page)
622 {
623 	VM_BUG_ON_PAGE(PageTail(page), page);
624 	return PageHead(page);
625 }
626 
627 /*
628  * PageTransCompound returns true for both transparent huge pages
629  * and hugetlbfs pages, so it should only be called when it's known
630  * that hugetlbfs pages aren't involved.
631  */
632 static inline int PageTransCompound(struct page *page)
633 {
634 	return PageCompound(page);
635 }
636 
637 /*
638  * PageTransCompoundMap is the same as PageTransCompound, but it also
639  * guarantees the primary MMU has the entire compound page mapped
640  * through pmd_trans_huge, which in turn guarantees the secondary MMUs
641  * can also map the entire compound page. This allows the secondary
642  * MMUs to call get_user_pages() only once for each compound page and
643  * to immediately map the entire compound page with a single secondary
644  * MMU fault. If there will be a pmd split later, the secondary MMUs
645  * will get an update through the MMU notifier invalidation through
646  * split_huge_pmd().
647  *
648  * Unlike PageTransCompound, this is safe to be called only while
649  * split_huge_pmd() cannot run from under us, like if protected by the
650  * MMU notifier, otherwise it may result in page->_mapcount check false
651  * positives.
652  *
653  * We have to treat page cache THP differently since every subpage of it
654  * would get _mapcount inc'ed once it is PMD mapped.  But, it may be PTE
655  * mapped in the current process so comparing subpage's _mapcount to
656  * compound_mapcount to filter out PTE mapped case.
657  */
658 static inline int PageTransCompoundMap(struct page *page)
659 {
660 	struct page *head;
661 
662 	if (!PageTransCompound(page))
663 		return 0;
664 
665 	if (PageAnon(page))
666 		return atomic_read(&page->_mapcount) < 0;
667 
668 	head = compound_head(page);
669 	/* File THP is PMD mapped and not PTE mapped */
670 	return atomic_read(&page->_mapcount) ==
671 	       atomic_read(compound_mapcount_ptr(head));
672 }
673 
674 /*
675  * PageTransTail returns true for both transparent huge pages
676  * and hugetlbfs pages, so it should only be called when it's known
677  * that hugetlbfs pages aren't involved.
678  */
679 static inline int PageTransTail(struct page *page)
680 {
681 	return PageTail(page);
682 }
683 
684 /*
685  * PageDoubleMap indicates that the compound page is mapped with PTEs as well
686  * as PMDs.
687  *
688  * This is required for optimization of rmap operations for THP: we can postpone
689  * per small page mapcount accounting (and its overhead from atomic operations)
690  * until the first PMD split.
691  *
692  * For the page PageDoubleMap means ->_mapcount in all sub-pages is offset up
693  * by one. This reference will go away with last compound_mapcount.
694  *
695  * See also __split_huge_pmd_locked() and page_remove_anon_compound_rmap().
696  */
697 PAGEFLAG(DoubleMap, double_map, PF_SECOND)
698 	TESTSCFLAG(DoubleMap, double_map, PF_SECOND)
699 #else
700 TESTPAGEFLAG_FALSE(TransHuge)
701 TESTPAGEFLAG_FALSE(TransCompound)
702 TESTPAGEFLAG_FALSE(TransCompoundMap)
703 TESTPAGEFLAG_FALSE(TransTail)
704 PAGEFLAG_FALSE(DoubleMap)
705 	TESTSCFLAG_FALSE(DoubleMap)
706 #endif
707 
708 /*
709  * For pages that are never mapped to userspace (and aren't PageSlab),
710  * page_type may be used.  Because it is initialised to -1, we invert the
711  * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and
712  * __ClearPageFoo *sets* the bit used for PageFoo.  We reserve a few high and
713  * low bits so that an underflow or overflow of page_mapcount() won't be
714  * mistaken for a page type value.
715  */
716 
717 #define PAGE_TYPE_BASE	0xf0000000
718 /* Reserve		0x0000007f to catch underflows of page_mapcount */
719 #define PAGE_MAPCOUNT_RESERVE	-128
720 #define PG_buddy	0x00000080
721 #define PG_offline	0x00000100
722 #define PG_kmemcg	0x00000200
723 #define PG_table	0x00000400
724 #define PG_guard	0x00000800
725 
726 #define PageType(page, flag)						\
727 	((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
728 
729 static inline int page_has_type(struct page *page)
730 {
731 	return (int)page->page_type < PAGE_MAPCOUNT_RESERVE;
732 }
733 
734 #define PAGE_TYPE_OPS(uname, lname)					\
735 static __always_inline int Page##uname(struct page *page)		\
736 {									\
737 	return PageType(page, PG_##lname);				\
738 }									\
739 static __always_inline void __SetPage##uname(struct page *page)		\
740 {									\
741 	VM_BUG_ON_PAGE(!PageType(page, 0), page);			\
742 	page->page_type &= ~PG_##lname;					\
743 }									\
744 static __always_inline void __ClearPage##uname(struct page *page)	\
745 {									\
746 	VM_BUG_ON_PAGE(!Page##uname(page), page);			\
747 	page->page_type |= PG_##lname;					\
748 }
749 
750 /*
751  * PageBuddy() indicates that the page is free and in the buddy system
752  * (see mm/page_alloc.c).
753  */
754 PAGE_TYPE_OPS(Buddy, buddy)
755 
756 /*
757  * PageOffline() indicates that the page is logically offline although the
758  * containing section is online. (e.g. inflated in a balloon driver or
759  * not onlined when onlining the section).
760  * The content of these pages is effectively stale. Such pages should not
761  * be touched (read/write/dump/save) except by their owner.
762  *
763  * If a driver wants to allow to offline unmovable PageOffline() pages without
764  * putting them back to the buddy, it can do so via the memory notifier by
765  * decrementing the reference count in MEM_GOING_OFFLINE and incrementing the
766  * reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline()
767  * pages (now with a reference count of zero) are treated like free pages,
768  * allowing the containing memory block to get offlined. A driver that
769  * relies on this feature is aware that re-onlining the memory block will
770  * require to re-set the pages PageOffline() and not giving them to the
771  * buddy via online_page_callback_t.
772  */
773 PAGE_TYPE_OPS(Offline, offline)
774 
775 /*
776  * If kmemcg is enabled, the buddy allocator will set PageKmemcg() on
777  * pages allocated with __GFP_ACCOUNT. It gets cleared on page free.
778  */
779 PAGE_TYPE_OPS(Kmemcg, kmemcg)
780 
781 /*
782  * Marks pages in use as page tables.
783  */
784 PAGE_TYPE_OPS(Table, table)
785 
786 /*
787  * Marks guardpages used with debug_pagealloc.
788  */
789 PAGE_TYPE_OPS(Guard, guard)
790 
791 extern bool is_free_buddy_page(struct page *page);
792 
793 __PAGEFLAG(Isolated, isolated, PF_ANY);
794 
795 /*
796  * If network-based swap is enabled, sl*b must keep track of whether pages
797  * were allocated from pfmemalloc reserves.
798  */
799 static inline int PageSlabPfmemalloc(struct page *page)
800 {
801 	VM_BUG_ON_PAGE(!PageSlab(page), page);
802 	return PageActive(page);
803 }
804 
805 static inline void SetPageSlabPfmemalloc(struct page *page)
806 {
807 	VM_BUG_ON_PAGE(!PageSlab(page), page);
808 	SetPageActive(page);
809 }
810 
811 static inline void __ClearPageSlabPfmemalloc(struct page *page)
812 {
813 	VM_BUG_ON_PAGE(!PageSlab(page), page);
814 	__ClearPageActive(page);
815 }
816 
817 static inline void ClearPageSlabPfmemalloc(struct page *page)
818 {
819 	VM_BUG_ON_PAGE(!PageSlab(page), page);
820 	ClearPageActive(page);
821 }
822 
823 #ifdef CONFIG_MMU
824 #define __PG_MLOCKED		(1UL << PG_mlocked)
825 #else
826 #define __PG_MLOCKED		0
827 #endif
828 
829 /*
830  * Flags checked when a page is freed.  Pages being freed should not have
831  * these flags set.  It they are, there is a problem.
832  */
833 #define PAGE_FLAGS_CHECK_AT_FREE				\
834 	(1UL << PG_lru		| 1UL << PG_locked	|	\
835 	 1UL << PG_private	| 1UL << PG_private_2	|	\
836 	 1UL << PG_writeback	| 1UL << PG_reserved	|	\
837 	 1UL << PG_slab		| 1UL << PG_active 	|	\
838 	 1UL << PG_unevictable	| __PG_MLOCKED)
839 
840 /*
841  * Flags checked when a page is prepped for return by the page allocator.
842  * Pages being prepped should not have these flags set.  It they are set,
843  * there has been a kernel bug or struct page corruption.
844  *
845  * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
846  * alloc-free cycle to prevent from reusing the page.
847  */
848 #define PAGE_FLAGS_CHECK_AT_PREP	\
849 	(((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
850 
851 #define PAGE_FLAGS_PRIVATE				\
852 	(1UL << PG_private | 1UL << PG_private_2)
853 /**
854  * page_has_private - Determine if page has private stuff
855  * @page: The page to be checked
856  *
857  * Determine if a page has private stuff, indicating that release routines
858  * should be invoked upon it.
859  */
860 static inline int page_has_private(struct page *page)
861 {
862 	return !!(page->flags & PAGE_FLAGS_PRIVATE);
863 }
864 
865 #undef PF_ANY
866 #undef PF_HEAD
867 #undef PF_ONLY_HEAD
868 #undef PF_NO_TAIL
869 #undef PF_NO_COMPOUND
870 #undef PF_SECOND
871 #endif /* !__GENERATING_BOUNDS_H */
872 
873 #endif	/* PAGE_FLAGS_H */
874