xref: /linux-6.15/include/linux/page-flags.h (revision b3e8701d)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Macros for manipulating and testing page->flags
4  */
5 
6 #ifndef PAGE_FLAGS_H
7 #define PAGE_FLAGS_H
8 
9 #include <linux/types.h>
10 #include <linux/bug.h>
11 #include <linux/mmdebug.h>
12 #ifndef __GENERATING_BOUNDS_H
13 #include <linux/mm_types.h>
14 #include <generated/bounds.h>
15 #endif /* !__GENERATING_BOUNDS_H */
16 
17 /*
18  * Various page->flags bits:
19  *
20  * PG_reserved is set for special pages. The "struct page" of such a page
21  * should in general not be touched (e.g. set dirty) except by its owner.
22  * Pages marked as PG_reserved include:
23  * - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS,
24  *   initrd, HW tables)
25  * - Pages reserved or allocated early during boot (before the page allocator
26  *   was initialized). This includes (depending on the architecture) the
27  *   initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much
28  *   much more. Once (if ever) freed, PG_reserved is cleared and they will
29  *   be given to the page allocator.
30  * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying
31  *   to read/write these pages might end badly. Don't touch!
32  * - The zero page(s)
33  * - Pages not added to the page allocator when onlining a section because
34  *   they were excluded via the online_page_callback() or because they are
35  *   PG_hwpoison.
36  * - Pages allocated in the context of kexec/kdump (loaded kernel image,
37  *   control pages, vmcoreinfo)
38  * - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are
39  *   not marked PG_reserved (as they might be in use by somebody else who does
40  *   not respect the caching strategy).
41  * - Pages part of an offline section (struct pages of offline sections should
42  *   not be trusted as they will be initialized when first onlined).
43  * - MCA pages on ia64
44  * - Pages holding CPU notes for POWER Firmware Assisted Dump
45  * - Device memory (e.g. PMEM, DAX, HMM)
46  * Some PG_reserved pages will be excluded from the hibernation image.
47  * PG_reserved does in general not hinder anybody from dumping or swapping
48  * and is no longer required for remap_pfn_range(). ioremap might require it.
49  * Consequently, PG_reserved for a page mapped into user space can indicate
50  * the zero page, the vDSO, MMIO pages or device memory.
51  *
52  * The PG_private bitflag is set on pagecache pages if they contain filesystem
53  * specific data (which is normally at page->private). It can be used by
54  * private allocations for its own usage.
55  *
56  * During initiation of disk I/O, PG_locked is set. This bit is set before I/O
57  * and cleared when writeback _starts_ or when read _completes_. PG_writeback
58  * is set before writeback starts and cleared when it finishes.
59  *
60  * PG_locked also pins a page in pagecache, and blocks truncation of the file
61  * while it is held.
62  *
63  * page_waitqueue(page) is a wait queue of all tasks waiting for the page
64  * to become unlocked.
65  *
66  * PG_swapbacked is set when a page uses swap as a backing storage.  This are
67  * usually PageAnon or shmem pages but please note that even anonymous pages
68  * might lose their PG_swapbacked flag when they simply can be dropped (e.g. as
69  * a result of MADV_FREE).
70  *
71  * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
72  * file-backed pagecache (see mm/vmscan.c).
73  *
74  * PG_error is set to indicate that an I/O error occurred on this page.
75  *
76  * PG_arch_1 is an architecture specific page state bit.  The generic code
77  * guarantees that this bit is cleared for a page when it first is entered into
78  * the page cache.
79  *
80  * PG_hwpoison indicates that a page got corrupted in hardware and contains
81  * data with incorrect ECC bits that triggered a machine check. Accessing is
82  * not safe since it may cause another machine check. Don't touch!
83  */
84 
85 /*
86  * Don't use the pageflags directly.  Use the PageFoo macros.
87  *
88  * The page flags field is split into two parts, the main flags area
89  * which extends from the low bits upwards, and the fields area which
90  * extends from the high bits downwards.
91  *
92  *  | FIELD | ... | FLAGS |
93  *  N-1           ^       0
94  *               (NR_PAGEFLAGS)
95  *
96  * The fields area is reserved for fields mapping zone, node (for NUMA) and
97  * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
98  * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
99  */
100 enum pageflags {
101 	PG_locked,		/* Page is locked. Don't touch. */
102 	PG_referenced,
103 	PG_uptodate,
104 	PG_dirty,
105 	PG_lru,
106 	PG_active,
107 	PG_workingset,
108 	PG_waiters,		/* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
109 	PG_error,
110 	PG_slab,
111 	PG_owner_priv_1,	/* Owner use. If pagecache, fs may use*/
112 	PG_arch_1,
113 	PG_reserved,
114 	PG_private,		/* If pagecache, has fs-private data */
115 	PG_private_2,		/* If pagecache, has fs aux data */
116 	PG_writeback,		/* Page is under writeback */
117 	PG_head,		/* A head page */
118 	PG_mappedtodisk,	/* Has blocks allocated on-disk */
119 	PG_reclaim,		/* To be reclaimed asap */
120 	PG_swapbacked,		/* Page is backed by RAM/swap */
121 	PG_unevictable,		/* Page is "unevictable"  */
122 #ifdef CONFIG_MMU
123 	PG_mlocked,		/* Page is vma mlocked */
124 #endif
125 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
126 	PG_uncached,		/* Page has been mapped as uncached */
127 #endif
128 #ifdef CONFIG_MEMORY_FAILURE
129 	PG_hwpoison,		/* hardware poisoned page. Don't touch */
130 #endif
131 #if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
132 	PG_young,
133 	PG_idle,
134 #endif
135 #ifdef CONFIG_ARCH_USES_PG_ARCH_X
136 	PG_arch_2,
137 	PG_arch_3,
138 #endif
139 #ifdef CONFIG_KASAN_HW_TAGS
140 	PG_skip_kasan_poison,
141 #endif
142 	__NR_PAGEFLAGS,
143 
144 	PG_readahead = PG_reclaim,
145 
146 	/*
147 	 * Depending on the way an anonymous folio can be mapped into a page
148 	 * table (e.g., single PMD/PUD/CONT of the head page vs. PTE-mapped
149 	 * THP), PG_anon_exclusive may be set only for the head page or for
150 	 * tail pages of an anonymous folio. For now, we only expect it to be
151 	 * set on tail pages for PTE-mapped THP.
152 	 */
153 	PG_anon_exclusive = PG_mappedtodisk,
154 
155 	/* Filesystems */
156 	PG_checked = PG_owner_priv_1,
157 
158 	/* SwapBacked */
159 	PG_swapcache = PG_owner_priv_1,	/* Swap page: swp_entry_t in private */
160 
161 	/* Two page bits are conscripted by FS-Cache to maintain local caching
162 	 * state.  These bits are set on pages belonging to the netfs's inodes
163 	 * when those inodes are being locally cached.
164 	 */
165 	PG_fscache = PG_private_2,	/* page backed by cache */
166 
167 	/* XEN */
168 	/* Pinned in Xen as a read-only pagetable page. */
169 	PG_pinned = PG_owner_priv_1,
170 	/* Pinned as part of domain save (see xen_mm_pin_all()). */
171 	PG_savepinned = PG_dirty,
172 	/* Has a grant mapping of another (foreign) domain's page. */
173 	PG_foreign = PG_owner_priv_1,
174 	/* Remapped by swiotlb-xen. */
175 	PG_xen_remapped = PG_owner_priv_1,
176 
177 #ifdef CONFIG_MEMORY_FAILURE
178 	/*
179 	 * Compound pages. Stored in first tail page's flags.
180 	 * Indicates that at least one subpage is hwpoisoned in the
181 	 * THP.
182 	 */
183 	PG_has_hwpoisoned = PG_error,
184 #endif
185 
186 	/* non-lru isolated movable page */
187 	PG_isolated = PG_reclaim,
188 
189 	/* Only valid for buddy pages. Used to track pages that are reported */
190 	PG_reported = PG_uptodate,
191 
192 #ifdef CONFIG_MEMORY_HOTPLUG
193 	/* For self-hosted memmap pages */
194 	PG_vmemmap_self_hosted = PG_owner_priv_1,
195 #endif
196 };
197 
198 #define PAGEFLAGS_MASK		((1UL << NR_PAGEFLAGS) - 1)
199 
200 #ifndef __GENERATING_BOUNDS_H
201 
202 #ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
203 DECLARE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
204 
205 /*
206  * Return the real head page struct iff the @page is a fake head page, otherwise
207  * return the @page itself. See Documentation/mm/vmemmap_dedup.rst.
208  */
209 static __always_inline const struct page *page_fixed_fake_head(const struct page *page)
210 {
211 	if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key))
212 		return page;
213 
214 	/*
215 	 * Only addresses aligned with PAGE_SIZE of struct page may be fake head
216 	 * struct page. The alignment check aims to avoid access the fields (
217 	 * e.g. compound_head) of the @page[1]. It can avoid touch a (possibly)
218 	 * cold cacheline in some cases.
219 	 */
220 	if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) &&
221 	    test_bit(PG_head, &page->flags)) {
222 		/*
223 		 * We can safely access the field of the @page[1] with PG_head
224 		 * because the @page is a compound page composed with at least
225 		 * two contiguous pages.
226 		 */
227 		unsigned long head = READ_ONCE(page[1].compound_head);
228 
229 		if (likely(head & 1))
230 			return (const struct page *)(head - 1);
231 	}
232 	return page;
233 }
234 #else
235 static inline const struct page *page_fixed_fake_head(const struct page *page)
236 {
237 	return page;
238 }
239 #endif
240 
241 static __always_inline int page_is_fake_head(struct page *page)
242 {
243 	return page_fixed_fake_head(page) != page;
244 }
245 
246 static inline unsigned long _compound_head(const struct page *page)
247 {
248 	unsigned long head = READ_ONCE(page->compound_head);
249 
250 	if (unlikely(head & 1))
251 		return head - 1;
252 	return (unsigned long)page_fixed_fake_head(page);
253 }
254 
255 #define compound_head(page)	((typeof(page))_compound_head(page))
256 
257 /**
258  * page_folio - Converts from page to folio.
259  * @p: The page.
260  *
261  * Every page is part of a folio.  This function cannot be called on a
262  * NULL pointer.
263  *
264  * Context: No reference, nor lock is required on @page.  If the caller
265  * does not hold a reference, this call may race with a folio split, so
266  * it should re-check the folio still contains this page after gaining
267  * a reference on the folio.
268  * Return: The folio which contains this page.
269  */
270 #define page_folio(p)		(_Generic((p),				\
271 	const struct page *:	(const struct folio *)_compound_head(p), \
272 	struct page *:		(struct folio *)_compound_head(p)))
273 
274 /**
275  * folio_page - Return a page from a folio.
276  * @folio: The folio.
277  * @n: The page number to return.
278  *
279  * @n is relative to the start of the folio.  This function does not
280  * check that the page number lies within @folio; the caller is presumed
281  * to have a reference to the page.
282  */
283 #define folio_page(folio, n)	nth_page(&(folio)->page, n)
284 
285 static __always_inline int PageTail(struct page *page)
286 {
287 	return READ_ONCE(page->compound_head) & 1 || page_is_fake_head(page);
288 }
289 
290 static __always_inline int PageCompound(struct page *page)
291 {
292 	return test_bit(PG_head, &page->flags) ||
293 	       READ_ONCE(page->compound_head) & 1;
294 }
295 
296 #define	PAGE_POISON_PATTERN	-1l
297 static inline int PagePoisoned(const struct page *page)
298 {
299 	return READ_ONCE(page->flags) == PAGE_POISON_PATTERN;
300 }
301 
302 #ifdef CONFIG_DEBUG_VM
303 void page_init_poison(struct page *page, size_t size);
304 #else
305 static inline void page_init_poison(struct page *page, size_t size)
306 {
307 }
308 #endif
309 
310 static unsigned long *folio_flags(struct folio *folio, unsigned n)
311 {
312 	struct page *page = &folio->page;
313 
314 	VM_BUG_ON_PGFLAGS(PageTail(page), page);
315 	VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page);
316 	return &page[n].flags;
317 }
318 
319 /*
320  * Page flags policies wrt compound pages
321  *
322  * PF_POISONED_CHECK
323  *     check if this struct page poisoned/uninitialized
324  *
325  * PF_ANY:
326  *     the page flag is relevant for small, head and tail pages.
327  *
328  * PF_HEAD:
329  *     for compound page all operations related to the page flag applied to
330  *     head page.
331  *
332  * PF_ONLY_HEAD:
333  *     for compound page, callers only ever operate on the head page.
334  *
335  * PF_NO_TAIL:
336  *     modifications of the page flag must be done on small or head pages,
337  *     checks can be done on tail pages too.
338  *
339  * PF_NO_COMPOUND:
340  *     the page flag is not relevant for compound pages.
341  *
342  * PF_SECOND:
343  *     the page flag is stored in the first tail page.
344  */
345 #define PF_POISONED_CHECK(page) ({					\
346 		VM_BUG_ON_PGFLAGS(PagePoisoned(page), page);		\
347 		page; })
348 #define PF_ANY(page, enforce)	PF_POISONED_CHECK(page)
349 #define PF_HEAD(page, enforce)	PF_POISONED_CHECK(compound_head(page))
350 #define PF_ONLY_HEAD(page, enforce) ({					\
351 		VM_BUG_ON_PGFLAGS(PageTail(page), page);		\
352 		PF_POISONED_CHECK(page); })
353 #define PF_NO_TAIL(page, enforce) ({					\
354 		VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page);	\
355 		PF_POISONED_CHECK(compound_head(page)); })
356 #define PF_NO_COMPOUND(page, enforce) ({				\
357 		VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page);	\
358 		PF_POISONED_CHECK(page); })
359 #define PF_SECOND(page, enforce) ({					\
360 		VM_BUG_ON_PGFLAGS(!PageHead(page), page);		\
361 		PF_POISONED_CHECK(&page[1]); })
362 
363 /* Which page is the flag stored in */
364 #define FOLIO_PF_ANY		0
365 #define FOLIO_PF_HEAD		0
366 #define FOLIO_PF_ONLY_HEAD	0
367 #define FOLIO_PF_NO_TAIL	0
368 #define FOLIO_PF_NO_COMPOUND	0
369 #define FOLIO_PF_SECOND		1
370 
371 /*
372  * Macros to create function definitions for page flags
373  */
374 #define TESTPAGEFLAG(uname, lname, policy)				\
375 static __always_inline bool folio_test_##lname(struct folio *folio)	\
376 { return test_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); }	\
377 static __always_inline int Page##uname(struct page *page)		\
378 { return test_bit(PG_##lname, &policy(page, 0)->flags); }
379 
380 #define SETPAGEFLAG(uname, lname, policy)				\
381 static __always_inline							\
382 void folio_set_##lname(struct folio *folio)				\
383 { set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); }		\
384 static __always_inline void SetPage##uname(struct page *page)		\
385 { set_bit(PG_##lname, &policy(page, 1)->flags); }
386 
387 #define CLEARPAGEFLAG(uname, lname, policy)				\
388 static __always_inline							\
389 void folio_clear_##lname(struct folio *folio)				\
390 { clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); }		\
391 static __always_inline void ClearPage##uname(struct page *page)		\
392 { clear_bit(PG_##lname, &policy(page, 1)->flags); }
393 
394 #define __SETPAGEFLAG(uname, lname, policy)				\
395 static __always_inline							\
396 void __folio_set_##lname(struct folio *folio)				\
397 { __set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); }		\
398 static __always_inline void __SetPage##uname(struct page *page)		\
399 { __set_bit(PG_##lname, &policy(page, 1)->flags); }
400 
401 #define __CLEARPAGEFLAG(uname, lname, policy)				\
402 static __always_inline							\
403 void __folio_clear_##lname(struct folio *folio)				\
404 { __clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); }	\
405 static __always_inline void __ClearPage##uname(struct page *page)	\
406 { __clear_bit(PG_##lname, &policy(page, 1)->flags); }
407 
408 #define TESTSETFLAG(uname, lname, policy)				\
409 static __always_inline							\
410 bool folio_test_set_##lname(struct folio *folio)			\
411 { return test_and_set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
412 static __always_inline int TestSetPage##uname(struct page *page)	\
413 { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
414 
415 #define TESTCLEARFLAG(uname, lname, policy)				\
416 static __always_inline							\
417 bool folio_test_clear_##lname(struct folio *folio)			\
418 { return test_and_clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
419 static __always_inline int TestClearPage##uname(struct page *page)	\
420 { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
421 
422 #define PAGEFLAG(uname, lname, policy)					\
423 	TESTPAGEFLAG(uname, lname, policy)				\
424 	SETPAGEFLAG(uname, lname, policy)				\
425 	CLEARPAGEFLAG(uname, lname, policy)
426 
427 #define __PAGEFLAG(uname, lname, policy)				\
428 	TESTPAGEFLAG(uname, lname, policy)				\
429 	__SETPAGEFLAG(uname, lname, policy)				\
430 	__CLEARPAGEFLAG(uname, lname, policy)
431 
432 #define TESTSCFLAG(uname, lname, policy)				\
433 	TESTSETFLAG(uname, lname, policy)				\
434 	TESTCLEARFLAG(uname, lname, policy)
435 
436 #define TESTPAGEFLAG_FALSE(uname, lname)				\
437 static inline bool folio_test_##lname(const struct folio *folio) { return false; } \
438 static inline int Page##uname(const struct page *page) { return 0; }
439 
440 #define SETPAGEFLAG_NOOP(uname, lname)					\
441 static inline void folio_set_##lname(struct folio *folio) { }		\
442 static inline void SetPage##uname(struct page *page) {  }
443 
444 #define CLEARPAGEFLAG_NOOP(uname, lname)				\
445 static inline void folio_clear_##lname(struct folio *folio) { }		\
446 static inline void ClearPage##uname(struct page *page) {  }
447 
448 #define __CLEARPAGEFLAG_NOOP(uname, lname)				\
449 static inline void __folio_clear_##lname(struct folio *folio) { }	\
450 static inline void __ClearPage##uname(struct page *page) {  }
451 
452 #define TESTSETFLAG_FALSE(uname, lname)					\
453 static inline bool folio_test_set_##lname(struct folio *folio)		\
454 { return 0; }								\
455 static inline int TestSetPage##uname(struct page *page) { return 0; }
456 
457 #define TESTCLEARFLAG_FALSE(uname, lname)				\
458 static inline bool folio_test_clear_##lname(struct folio *folio)	\
459 { return 0; }								\
460 static inline int TestClearPage##uname(struct page *page) { return 0; }
461 
462 #define PAGEFLAG_FALSE(uname, lname) TESTPAGEFLAG_FALSE(uname, lname)	\
463 	SETPAGEFLAG_NOOP(uname, lname) CLEARPAGEFLAG_NOOP(uname, lname)
464 
465 #define TESTSCFLAG_FALSE(uname, lname)					\
466 	TESTSETFLAG_FALSE(uname, lname) TESTCLEARFLAG_FALSE(uname, lname)
467 
468 __PAGEFLAG(Locked, locked, PF_NO_TAIL)
469 PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD)
470 PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL)
471 PAGEFLAG(Referenced, referenced, PF_HEAD)
472 	TESTCLEARFLAG(Referenced, referenced, PF_HEAD)
473 	__SETPAGEFLAG(Referenced, referenced, PF_HEAD)
474 PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
475 	__CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
476 PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
477 	TESTCLEARFLAG(LRU, lru, PF_HEAD)
478 PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
479 	TESTCLEARFLAG(Active, active, PF_HEAD)
480 PAGEFLAG(Workingset, workingset, PF_HEAD)
481 	TESTCLEARFLAG(Workingset, workingset, PF_HEAD)
482 __PAGEFLAG(Slab, slab, PF_NO_TAIL)
483 PAGEFLAG(Checked, checked, PF_NO_COMPOUND)	   /* Used by some filesystems */
484 
485 /* Xen */
486 PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND)
487 	TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND)
488 PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND);
489 PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
490 PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
491 	TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
492 
493 PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
494 	__CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
495 	__SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
496 PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
497 	__CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
498 	__SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
499 
500 /*
501  * Private page markings that may be used by the filesystem that owns the page
502  * for its own purposes.
503  * - PG_private and PG_private_2 cause release_folio() and co to be invoked
504  */
505 PAGEFLAG(Private, private, PF_ANY)
506 PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
507 PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
508 	TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
509 
510 /*
511  * Only test-and-set exist for PG_writeback.  The unconditional operators are
512  * risky: they bypass page accounting.
513  */
514 TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL)
515 	TESTSCFLAG(Writeback, writeback, PF_NO_TAIL)
516 PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
517 
518 /* PG_readahead is only used for reads; PG_reclaim is only for writes */
519 PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
520 	TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
521 PAGEFLAG(Readahead, readahead, PF_NO_COMPOUND)
522 	TESTCLEARFLAG(Readahead, readahead, PF_NO_COMPOUND)
523 
524 #ifdef CONFIG_HIGHMEM
525 /*
526  * Must use a macro here due to header dependency issues. page_zone() is not
527  * available at this point.
528  */
529 #define PageHighMem(__p) is_highmem_idx(page_zonenum(__p))
530 #define folio_test_highmem(__f)	is_highmem_idx(folio_zonenum(__f))
531 #else
532 PAGEFLAG_FALSE(HighMem, highmem)
533 #endif
534 
535 #ifdef CONFIG_SWAP
536 static __always_inline bool folio_test_swapcache(struct folio *folio)
537 {
538 	return folio_test_swapbacked(folio) &&
539 			test_bit(PG_swapcache, folio_flags(folio, 0));
540 }
541 
542 static __always_inline bool PageSwapCache(struct page *page)
543 {
544 	return folio_test_swapcache(page_folio(page));
545 }
546 
547 SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
548 CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
549 #else
550 PAGEFLAG_FALSE(SwapCache, swapcache)
551 #endif
552 
553 PAGEFLAG(Unevictable, unevictable, PF_HEAD)
554 	__CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD)
555 	TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD)
556 
557 #ifdef CONFIG_MMU
558 PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
559 	__CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
560 	TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL)
561 #else
562 PAGEFLAG_FALSE(Mlocked, mlocked) __CLEARPAGEFLAG_NOOP(Mlocked, mlocked)
563 	TESTSCFLAG_FALSE(Mlocked, mlocked)
564 #endif
565 
566 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
567 PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND)
568 #else
569 PAGEFLAG_FALSE(Uncached, uncached)
570 #endif
571 
572 #ifdef CONFIG_MEMORY_FAILURE
573 PAGEFLAG(HWPoison, hwpoison, PF_ANY)
574 TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
575 #define __PG_HWPOISON (1UL << PG_hwpoison)
576 #define MAGIC_HWPOISON	0x48575053U	/* HWPS */
577 extern void SetPageHWPoisonTakenOff(struct page *page);
578 extern void ClearPageHWPoisonTakenOff(struct page *page);
579 extern bool take_page_off_buddy(struct page *page);
580 extern bool put_page_back_buddy(struct page *page);
581 #else
582 PAGEFLAG_FALSE(HWPoison, hwpoison)
583 #define __PG_HWPOISON 0
584 #endif
585 
586 #if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
587 TESTPAGEFLAG(Young, young, PF_ANY)
588 SETPAGEFLAG(Young, young, PF_ANY)
589 TESTCLEARFLAG(Young, young, PF_ANY)
590 PAGEFLAG(Idle, idle, PF_ANY)
591 #endif
592 
593 #ifdef CONFIG_KASAN_HW_TAGS
594 PAGEFLAG(SkipKASanPoison, skip_kasan_poison, PF_HEAD)
595 #else
596 PAGEFLAG_FALSE(SkipKASanPoison, skip_kasan_poison)
597 #endif
598 
599 /*
600  * PageReported() is used to track reported free pages within the Buddy
601  * allocator. We can use the non-atomic version of the test and set
602  * operations as both should be shielded with the zone lock to prevent
603  * any possible races on the setting or clearing of the bit.
604  */
605 __PAGEFLAG(Reported, reported, PF_NO_COMPOUND)
606 
607 #ifdef CONFIG_MEMORY_HOTPLUG
608 PAGEFLAG(VmemmapSelfHosted, vmemmap_self_hosted, PF_ANY)
609 #else
610 PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted)
611 #endif
612 
613 /*
614  * On an anonymous page mapped into a user virtual memory area,
615  * page->mapping points to its anon_vma, not to a struct address_space;
616  * with the PAGE_MAPPING_ANON bit set to distinguish it.  See rmap.h.
617  *
618  * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
619  * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
620  * bit; and then page->mapping points, not to an anon_vma, but to a private
621  * structure which KSM associates with that merged page.  See ksm.h.
622  *
623  * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
624  * page and then page->mapping points to a struct movable_operations.
625  *
626  * Please note that, confusingly, "page_mapping" refers to the inode
627  * address_space which maps the page from disk; whereas "page_mapped"
628  * refers to user virtual address space into which the page is mapped.
629  */
630 #define PAGE_MAPPING_ANON	0x1
631 #define PAGE_MAPPING_MOVABLE	0x2
632 #define PAGE_MAPPING_KSM	(PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
633 #define PAGE_MAPPING_FLAGS	(PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
634 
635 /*
636  * Different with flags above, this flag is used only for fsdax mode.  It
637  * indicates that this page->mapping is now under reflink case.
638  */
639 #define PAGE_MAPPING_DAX_SHARED	((void *)0x1)
640 
641 static __always_inline bool folio_mapping_flags(struct folio *folio)
642 {
643 	return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) != 0;
644 }
645 
646 static __always_inline int PageMappingFlags(struct page *page)
647 {
648 	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
649 }
650 
651 static __always_inline bool folio_test_anon(struct folio *folio)
652 {
653 	return ((unsigned long)folio->mapping & PAGE_MAPPING_ANON) != 0;
654 }
655 
656 static __always_inline bool PageAnon(struct page *page)
657 {
658 	return folio_test_anon(page_folio(page));
659 }
660 
661 static __always_inline bool __folio_test_movable(const struct folio *folio)
662 {
663 	return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
664 			PAGE_MAPPING_MOVABLE;
665 }
666 
667 static __always_inline int __PageMovable(struct page *page)
668 {
669 	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
670 				PAGE_MAPPING_MOVABLE;
671 }
672 
673 #ifdef CONFIG_KSM
674 /*
675  * A KSM page is one of those write-protected "shared pages" or "merged pages"
676  * which KSM maps into multiple mms, wherever identical anonymous page content
677  * is found in VM_MERGEABLE vmas.  It's a PageAnon page, pointing not to any
678  * anon_vma, but to that page's node of the stable tree.
679  */
680 static __always_inline bool folio_test_ksm(struct folio *folio)
681 {
682 	return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
683 				PAGE_MAPPING_KSM;
684 }
685 
686 static __always_inline bool PageKsm(struct page *page)
687 {
688 	return folio_test_ksm(page_folio(page));
689 }
690 #else
691 TESTPAGEFLAG_FALSE(Ksm, ksm)
692 #endif
693 
694 u64 stable_page_flags(struct page *page);
695 
696 /**
697  * folio_test_uptodate - Is this folio up to date?
698  * @folio: The folio.
699  *
700  * The uptodate flag is set on a folio when every byte in the folio is
701  * at least as new as the corresponding bytes on storage.  Anonymous
702  * and CoW folios are always uptodate.  If the folio is not uptodate,
703  * some of the bytes in it may be; see the is_partially_uptodate()
704  * address_space operation.
705  */
706 static inline bool folio_test_uptodate(struct folio *folio)
707 {
708 	bool ret = test_bit(PG_uptodate, folio_flags(folio, 0));
709 	/*
710 	 * Must ensure that the data we read out of the folio is loaded
711 	 * _after_ we've loaded folio->flags to check the uptodate bit.
712 	 * We can skip the barrier if the folio is not uptodate, because
713 	 * we wouldn't be reading anything from it.
714 	 *
715 	 * See folio_mark_uptodate() for the other side of the story.
716 	 */
717 	if (ret)
718 		smp_rmb();
719 
720 	return ret;
721 }
722 
723 static inline int PageUptodate(struct page *page)
724 {
725 	return folio_test_uptodate(page_folio(page));
726 }
727 
728 static __always_inline void __folio_mark_uptodate(struct folio *folio)
729 {
730 	smp_wmb();
731 	__set_bit(PG_uptodate, folio_flags(folio, 0));
732 }
733 
734 static __always_inline void folio_mark_uptodate(struct folio *folio)
735 {
736 	/*
737 	 * Memory barrier must be issued before setting the PG_uptodate bit,
738 	 * so that all previous stores issued in order to bring the folio
739 	 * uptodate are actually visible before folio_test_uptodate becomes true.
740 	 */
741 	smp_wmb();
742 	set_bit(PG_uptodate, folio_flags(folio, 0));
743 }
744 
745 static __always_inline void __SetPageUptodate(struct page *page)
746 {
747 	__folio_mark_uptodate((struct folio *)page);
748 }
749 
750 static __always_inline void SetPageUptodate(struct page *page)
751 {
752 	folio_mark_uptodate((struct folio *)page);
753 }
754 
755 CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
756 
757 bool __folio_start_writeback(struct folio *folio, bool keep_write);
758 bool set_page_writeback(struct page *page);
759 
760 #define folio_start_writeback(folio)			\
761 	__folio_start_writeback(folio, false)
762 #define folio_start_writeback_keepwrite(folio)	\
763 	__folio_start_writeback(folio, true)
764 
765 static inline bool test_set_page_writeback(struct page *page)
766 {
767 	return set_page_writeback(page);
768 }
769 
770 static __always_inline bool folio_test_head(struct folio *folio)
771 {
772 	return test_bit(PG_head, folio_flags(folio, FOLIO_PF_ANY));
773 }
774 
775 static __always_inline int PageHead(struct page *page)
776 {
777 	PF_POISONED_CHECK(page);
778 	return test_bit(PG_head, &page->flags) && !page_is_fake_head(page);
779 }
780 
781 __SETPAGEFLAG(Head, head, PF_ANY)
782 __CLEARPAGEFLAG(Head, head, PF_ANY)
783 CLEARPAGEFLAG(Head, head, PF_ANY)
784 
785 /**
786  * folio_test_large() - Does this folio contain more than one page?
787  * @folio: The folio to test.
788  *
789  * Return: True if the folio is larger than one page.
790  */
791 static inline bool folio_test_large(struct folio *folio)
792 {
793 	return folio_test_head(folio);
794 }
795 
796 static __always_inline void set_compound_head(struct page *page, struct page *head)
797 {
798 	WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
799 }
800 
801 static __always_inline void clear_compound_head(struct page *page)
802 {
803 	WRITE_ONCE(page->compound_head, 0);
804 }
805 
806 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
807 static inline void ClearPageCompound(struct page *page)
808 {
809 	BUG_ON(!PageHead(page));
810 	ClearPageHead(page);
811 }
812 #endif
813 
814 #define PG_head_mask ((1UL << PG_head))
815 
816 #ifdef CONFIG_HUGETLB_PAGE
817 int PageHuge(struct page *page);
818 int PageHeadHuge(struct page *page);
819 static inline bool folio_test_hugetlb(struct folio *folio)
820 {
821 	return PageHeadHuge(&folio->page);
822 }
823 #else
824 TESTPAGEFLAG_FALSE(Huge, hugetlb)
825 TESTPAGEFLAG_FALSE(HeadHuge, headhuge)
826 #endif
827 
828 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
829 /*
830  * PageHuge() only returns true for hugetlbfs pages, but not for
831  * normal or transparent huge pages.
832  *
833  * PageTransHuge() returns true for both transparent huge and
834  * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
835  * called only in the core VM paths where hugetlbfs pages can't exist.
836  */
837 static inline int PageTransHuge(struct page *page)
838 {
839 	VM_BUG_ON_PAGE(PageTail(page), page);
840 	return PageHead(page);
841 }
842 
843 static inline bool folio_test_transhuge(struct folio *folio)
844 {
845 	return folio_test_head(folio);
846 }
847 
848 /*
849  * PageTransCompound returns true for both transparent huge pages
850  * and hugetlbfs pages, so it should only be called when it's known
851  * that hugetlbfs pages aren't involved.
852  */
853 static inline int PageTransCompound(struct page *page)
854 {
855 	return PageCompound(page);
856 }
857 
858 /*
859  * PageTransTail returns true for both transparent huge pages
860  * and hugetlbfs pages, so it should only be called when it's known
861  * that hugetlbfs pages aren't involved.
862  */
863 static inline int PageTransTail(struct page *page)
864 {
865 	return PageTail(page);
866 }
867 #else
868 TESTPAGEFLAG_FALSE(TransHuge, transhuge)
869 TESTPAGEFLAG_FALSE(TransCompound, transcompound)
870 TESTPAGEFLAG_FALSE(TransCompoundMap, transcompoundmap)
871 TESTPAGEFLAG_FALSE(TransTail, transtail)
872 #endif
873 
874 #if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
875 /*
876  * PageHasHWPoisoned indicates that at least one subpage is hwpoisoned in the
877  * compound page.
878  *
879  * This flag is set by hwpoison handler.  Cleared by THP split or free page.
880  */
881 PAGEFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND)
882 	TESTSCFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND)
883 #else
884 PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
885 	TESTSCFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
886 #endif
887 
888 /*
889  * Check if a page is currently marked HWPoisoned. Note that this check is
890  * best effort only and inherently racy: there is no way to synchronize with
891  * failing hardware.
892  */
893 static inline bool is_page_hwpoison(struct page *page)
894 {
895 	if (PageHWPoison(page))
896 		return true;
897 	return PageHuge(page) && PageHWPoison(compound_head(page));
898 }
899 
900 /*
901  * For pages that are never mapped to userspace (and aren't PageSlab),
902  * page_type may be used.  Because it is initialised to -1, we invert the
903  * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and
904  * __ClearPageFoo *sets* the bit used for PageFoo.  We reserve a few high and
905  * low bits so that an underflow or overflow of page_mapcount() won't be
906  * mistaken for a page type value.
907  */
908 
909 #define PAGE_TYPE_BASE	0xf0000000
910 /* Reserve		0x0000007f to catch underflows of page_mapcount */
911 #define PAGE_MAPCOUNT_RESERVE	-128
912 #define PG_buddy	0x00000080
913 #define PG_offline	0x00000100
914 #define PG_table	0x00000200
915 #define PG_guard	0x00000400
916 
917 #define PageType(page, flag)						\
918 	((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
919 
920 static inline int page_has_type(struct page *page)
921 {
922 	return (int)page->page_type < PAGE_MAPCOUNT_RESERVE;
923 }
924 
925 #define PAGE_TYPE_OPS(uname, lname)					\
926 static __always_inline int Page##uname(struct page *page)		\
927 {									\
928 	return PageType(page, PG_##lname);				\
929 }									\
930 static __always_inline void __SetPage##uname(struct page *page)		\
931 {									\
932 	VM_BUG_ON_PAGE(!PageType(page, 0), page);			\
933 	page->page_type &= ~PG_##lname;					\
934 }									\
935 static __always_inline void __ClearPage##uname(struct page *page)	\
936 {									\
937 	VM_BUG_ON_PAGE(!Page##uname(page), page);			\
938 	page->page_type |= PG_##lname;					\
939 }
940 
941 /*
942  * PageBuddy() indicates that the page is free and in the buddy system
943  * (see mm/page_alloc.c).
944  */
945 PAGE_TYPE_OPS(Buddy, buddy)
946 
947 /*
948  * PageOffline() indicates that the page is logically offline although the
949  * containing section is online. (e.g. inflated in a balloon driver or
950  * not onlined when onlining the section).
951  * The content of these pages is effectively stale. Such pages should not
952  * be touched (read/write/dump/save) except by their owner.
953  *
954  * If a driver wants to allow to offline unmovable PageOffline() pages without
955  * putting them back to the buddy, it can do so via the memory notifier by
956  * decrementing the reference count in MEM_GOING_OFFLINE and incrementing the
957  * reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline()
958  * pages (now with a reference count of zero) are treated like free pages,
959  * allowing the containing memory block to get offlined. A driver that
960  * relies on this feature is aware that re-onlining the memory block will
961  * require to re-set the pages PageOffline() and not giving them to the
962  * buddy via online_page_callback_t.
963  *
964  * There are drivers that mark a page PageOffline() and expect there won't be
965  * any further access to page content. PFN walkers that read content of random
966  * pages should check PageOffline() and synchronize with such drivers using
967  * page_offline_freeze()/page_offline_thaw().
968  */
969 PAGE_TYPE_OPS(Offline, offline)
970 
971 extern void page_offline_freeze(void);
972 extern void page_offline_thaw(void);
973 extern void page_offline_begin(void);
974 extern void page_offline_end(void);
975 
976 /*
977  * Marks pages in use as page tables.
978  */
979 PAGE_TYPE_OPS(Table, table)
980 
981 /*
982  * Marks guardpages used with debug_pagealloc.
983  */
984 PAGE_TYPE_OPS(Guard, guard)
985 
986 extern bool is_free_buddy_page(struct page *page);
987 
988 PAGEFLAG(Isolated, isolated, PF_ANY);
989 
990 static __always_inline int PageAnonExclusive(struct page *page)
991 {
992 	VM_BUG_ON_PGFLAGS(!PageAnon(page), page);
993 	VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
994 	return test_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
995 }
996 
997 static __always_inline void SetPageAnonExclusive(struct page *page)
998 {
999 	VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page);
1000 	VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
1001 	set_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1002 }
1003 
1004 static __always_inline void ClearPageAnonExclusive(struct page *page)
1005 {
1006 	VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page);
1007 	VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
1008 	clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1009 }
1010 
1011 static __always_inline void __ClearPageAnonExclusive(struct page *page)
1012 {
1013 	VM_BUG_ON_PGFLAGS(!PageAnon(page), page);
1014 	VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
1015 	__clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1016 }
1017 
1018 #ifdef CONFIG_MMU
1019 #define __PG_MLOCKED		(1UL << PG_mlocked)
1020 #else
1021 #define __PG_MLOCKED		0
1022 #endif
1023 
1024 /*
1025  * Flags checked when a page is freed.  Pages being freed should not have
1026  * these flags set.  If they are, there is a problem.
1027  */
1028 #define PAGE_FLAGS_CHECK_AT_FREE				\
1029 	(1UL << PG_lru		| 1UL << PG_locked	|	\
1030 	 1UL << PG_private	| 1UL << PG_private_2	|	\
1031 	 1UL << PG_writeback	| 1UL << PG_reserved	|	\
1032 	 1UL << PG_slab		| 1UL << PG_active 	|	\
1033 	 1UL << PG_unevictable	| __PG_MLOCKED | LRU_GEN_MASK)
1034 
1035 /*
1036  * Flags checked when a page is prepped for return by the page allocator.
1037  * Pages being prepped should not have these flags set.  If they are set,
1038  * there has been a kernel bug or struct page corruption.
1039  *
1040  * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
1041  * alloc-free cycle to prevent from reusing the page.
1042  */
1043 #define PAGE_FLAGS_CHECK_AT_PREP	\
1044 	((PAGEFLAGS_MASK & ~__PG_HWPOISON) | LRU_GEN_MASK | LRU_REFS_MASK)
1045 
1046 #define PAGE_FLAGS_PRIVATE				\
1047 	(1UL << PG_private | 1UL << PG_private_2)
1048 /**
1049  * page_has_private - Determine if page has private stuff
1050  * @page: The page to be checked
1051  *
1052  * Determine if a page has private stuff, indicating that release routines
1053  * should be invoked upon it.
1054  */
1055 static inline int page_has_private(struct page *page)
1056 {
1057 	return !!(page->flags & PAGE_FLAGS_PRIVATE);
1058 }
1059 
1060 static inline bool folio_has_private(struct folio *folio)
1061 {
1062 	return page_has_private(&folio->page);
1063 }
1064 
1065 #undef PF_ANY
1066 #undef PF_HEAD
1067 #undef PF_ONLY_HEAD
1068 #undef PF_NO_TAIL
1069 #undef PF_NO_COMPOUND
1070 #undef PF_SECOND
1071 #endif /* !__GENERATING_BOUNDS_H */
1072 
1073 #endif	/* PAGE_FLAGS_H */
1074