xref: /linux-6.15/include/linux/page-flags.h (revision fe968ca2)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Macros for manipulating and testing page->flags
4  */
5 
6 #ifndef PAGE_FLAGS_H
7 #define PAGE_FLAGS_H
8 
9 #include <linux/types.h>
10 #include <linux/bug.h>
11 #include <linux/mmdebug.h>
12 #ifndef __GENERATING_BOUNDS_H
13 #include <linux/mm_types.h>
14 #include <generated/bounds.h>
15 #endif /* !__GENERATING_BOUNDS_H */
16 
17 /*
18  * Various page->flags bits:
19  *
20  * PG_reserved is set for special pages. The "struct page" of such a page
21  * should in general not be touched (e.g. set dirty) except by its owner.
22  * Pages marked as PG_reserved include:
23  * - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS,
24  *   initrd, HW tables)
25  * - Pages reserved or allocated early during boot (before the page allocator
26  *   was initialized). This includes (depending on the architecture) the
27  *   initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much
28  *   much more. Once (if ever) freed, PG_reserved is cleared and they will
29  *   be given to the page allocator.
30  * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying
31  *   to read/write these pages might end badly. Don't touch!
32  * - The zero page(s)
33  * - Pages not added to the page allocator when onlining a section because
34  *   they were excluded via the online_page_callback() or because they are
35  *   PG_hwpoison.
36  * - Pages allocated in the context of kexec/kdump (loaded kernel image,
37  *   control pages, vmcoreinfo)
38  * - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are
39  *   not marked PG_reserved (as they might be in use by somebody else who does
40  *   not respect the caching strategy).
41  * - Pages part of an offline section (struct pages of offline sections should
42  *   not be trusted as they will be initialized when first onlined).
43  * - MCA pages on ia64
44  * - Pages holding CPU notes for POWER Firmware Assisted Dump
45  * - Device memory (e.g. PMEM, DAX, HMM)
46  * Some PG_reserved pages will be excluded from the hibernation image.
47  * PG_reserved does in general not hinder anybody from dumping or swapping
48  * and is no longer required for remap_pfn_range(). ioremap might require it.
49  * Consequently, PG_reserved for a page mapped into user space can indicate
50  * the zero page, the vDSO, MMIO pages or device memory.
51  *
52  * The PG_private bitflag is set on pagecache pages if they contain filesystem
53  * specific data (which is normally at page->private). It can be used by
54  * private allocations for its own usage.
55  *
56  * During initiation of disk I/O, PG_locked is set. This bit is set before I/O
57  * and cleared when writeback _starts_ or when read _completes_. PG_writeback
58  * is set before writeback starts and cleared when it finishes.
59  *
60  * PG_locked also pins a page in pagecache, and blocks truncation of the file
61  * while it is held.
62  *
63  * page_waitqueue(page) is a wait queue of all tasks waiting for the page
64  * to become unlocked.
65  *
66  * PG_swapbacked is set when a page uses swap as a backing storage.  This are
67  * usually PageAnon or shmem pages but please note that even anonymous pages
68  * might lose their PG_swapbacked flag when they simply can be dropped (e.g. as
69  * a result of MADV_FREE).
70  *
71  * PG_uptodate tells whether the page's contents is valid.  When a read
72  * completes, the page becomes uptodate, unless a disk I/O error happened.
73  *
74  * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
75  * file-backed pagecache (see mm/vmscan.c).
76  *
77  * PG_error is set to indicate that an I/O error occurred on this page.
78  *
79  * PG_arch_1 is an architecture specific page state bit.  The generic code
80  * guarantees that this bit is cleared for a page when it first is entered into
81  * the page cache.
82  *
83  * PG_hwpoison indicates that a page got corrupted in hardware and contains
84  * data with incorrect ECC bits that triggered a machine check. Accessing is
85  * not safe since it may cause another machine check. Don't touch!
86  */
87 
88 /*
89  * Don't use the pageflags directly.  Use the PageFoo macros.
90  *
91  * The page flags field is split into two parts, the main flags area
92  * which extends from the low bits upwards, and the fields area which
93  * extends from the high bits downwards.
94  *
95  *  | FIELD | ... | FLAGS |
96  *  N-1           ^       0
97  *               (NR_PAGEFLAGS)
98  *
99  * The fields area is reserved for fields mapping zone, node (for NUMA) and
100  * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
101  * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
102  */
103 enum pageflags {
104 	PG_locked,		/* Page is locked. Don't touch. */
105 	PG_referenced,
106 	PG_uptodate,
107 	PG_dirty,
108 	PG_lru,
109 	PG_active,
110 	PG_workingset,
111 	PG_waiters,		/* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
112 	PG_error,
113 	PG_slab,
114 	PG_owner_priv_1,	/* Owner use. If pagecache, fs may use*/
115 	PG_arch_1,
116 	PG_reserved,
117 	PG_private,		/* If pagecache, has fs-private data */
118 	PG_private_2,		/* If pagecache, has fs aux data */
119 	PG_writeback,		/* Page is under writeback */
120 	PG_head,		/* A head page */
121 	PG_mappedtodisk,	/* Has blocks allocated on-disk */
122 	PG_reclaim,		/* To be reclaimed asap */
123 	PG_swapbacked,		/* Page is backed by RAM/swap */
124 	PG_unevictable,		/* Page is "unevictable"  */
125 #ifdef CONFIG_MMU
126 	PG_mlocked,		/* Page is vma mlocked */
127 #endif
128 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
129 	PG_uncached,		/* Page has been mapped as uncached */
130 #endif
131 #ifdef CONFIG_MEMORY_FAILURE
132 	PG_hwpoison,		/* hardware poisoned page. Don't touch */
133 #endif
134 #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
135 	PG_young,
136 	PG_idle,
137 #endif
138 #ifdef CONFIG_64BIT
139 	PG_arch_2,
140 #endif
141 #ifdef CONFIG_KASAN_HW_TAGS
142 	PG_skip_kasan_poison,
143 #endif
144 	__NR_PAGEFLAGS,
145 
146 	/* Filesystems */
147 	PG_checked = PG_owner_priv_1,
148 
149 	/* SwapBacked */
150 	PG_swapcache = PG_owner_priv_1,	/* Swap page: swp_entry_t in private */
151 
152 	/* Two page bits are conscripted by FS-Cache to maintain local caching
153 	 * state.  These bits are set on pages belonging to the netfs's inodes
154 	 * when those inodes are being locally cached.
155 	 */
156 	PG_fscache = PG_private_2,	/* page backed by cache */
157 
158 	/* XEN */
159 	/* Pinned in Xen as a read-only pagetable page. */
160 	PG_pinned = PG_owner_priv_1,
161 	/* Pinned as part of domain save (see xen_mm_pin_all()). */
162 	PG_savepinned = PG_dirty,
163 	/* Has a grant mapping of another (foreign) domain's page. */
164 	PG_foreign = PG_owner_priv_1,
165 	/* Remapped by swiotlb-xen. */
166 	PG_xen_remapped = PG_owner_priv_1,
167 
168 	/* SLOB */
169 	PG_slob_free = PG_private,
170 
171 	/* Compound pages. Stored in first tail page's flags */
172 	PG_double_map = PG_workingset,
173 
174 	/* non-lru isolated movable page */
175 	PG_isolated = PG_reclaim,
176 
177 	/* Only valid for buddy pages. Used to track pages that are reported */
178 	PG_reported = PG_uptodate,
179 };
180 
181 #ifndef __GENERATING_BOUNDS_H
182 
183 static inline unsigned long _compound_head(const struct page *page)
184 {
185 	unsigned long head = READ_ONCE(page->compound_head);
186 
187 	if (unlikely(head & 1))
188 		return head - 1;
189 	return (unsigned long)page;
190 }
191 
192 #define compound_head(page)	((typeof(page))_compound_head(page))
193 
194 static __always_inline int PageTail(struct page *page)
195 {
196 	return READ_ONCE(page->compound_head) & 1;
197 }
198 
199 static __always_inline int PageCompound(struct page *page)
200 {
201 	return test_bit(PG_head, &page->flags) || PageTail(page);
202 }
203 
204 #define	PAGE_POISON_PATTERN	-1l
205 static inline int PagePoisoned(const struct page *page)
206 {
207 	return page->flags == PAGE_POISON_PATTERN;
208 }
209 
210 #ifdef CONFIG_DEBUG_VM
211 void page_init_poison(struct page *page, size_t size);
212 #else
213 static inline void page_init_poison(struct page *page, size_t size)
214 {
215 }
216 #endif
217 
218 /*
219  * Page flags policies wrt compound pages
220  *
221  * PF_POISONED_CHECK
222  *     check if this struct page poisoned/uninitialized
223  *
224  * PF_ANY:
225  *     the page flag is relevant for small, head and tail pages.
226  *
227  * PF_HEAD:
228  *     for compound page all operations related to the page flag applied to
229  *     head page.
230  *
231  * PF_ONLY_HEAD:
232  *     for compound page, callers only ever operate on the head page.
233  *
234  * PF_NO_TAIL:
235  *     modifications of the page flag must be done on small or head pages,
236  *     checks can be done on tail pages too.
237  *
238  * PF_NO_COMPOUND:
239  *     the page flag is not relevant for compound pages.
240  *
241  * PF_SECOND:
242  *     the page flag is stored in the first tail page.
243  */
244 #define PF_POISONED_CHECK(page) ({					\
245 		VM_BUG_ON_PGFLAGS(PagePoisoned(page), page);		\
246 		page; })
247 #define PF_ANY(page, enforce)	PF_POISONED_CHECK(page)
248 #define PF_HEAD(page, enforce)	PF_POISONED_CHECK(compound_head(page))
249 #define PF_ONLY_HEAD(page, enforce) ({					\
250 		VM_BUG_ON_PGFLAGS(PageTail(page), page);		\
251 		PF_POISONED_CHECK(page); })
252 #define PF_NO_TAIL(page, enforce) ({					\
253 		VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page);	\
254 		PF_POISONED_CHECK(compound_head(page)); })
255 #define PF_NO_COMPOUND(page, enforce) ({				\
256 		VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page);	\
257 		PF_POISONED_CHECK(page); })
258 #define PF_SECOND(page, enforce) ({					\
259 		VM_BUG_ON_PGFLAGS(!PageHead(page), page);		\
260 		PF_POISONED_CHECK(&page[1]); })
261 
262 /*
263  * Macros to create function definitions for page flags
264  */
265 #define TESTPAGEFLAG(uname, lname, policy)				\
266 static __always_inline int Page##uname(struct page *page)		\
267 	{ return test_bit(PG_##lname, &policy(page, 0)->flags); }
268 
269 #define SETPAGEFLAG(uname, lname, policy)				\
270 static __always_inline void SetPage##uname(struct page *page)		\
271 	{ set_bit(PG_##lname, &policy(page, 1)->flags); }
272 
273 #define CLEARPAGEFLAG(uname, lname, policy)				\
274 static __always_inline void ClearPage##uname(struct page *page)		\
275 	{ clear_bit(PG_##lname, &policy(page, 1)->flags); }
276 
277 #define __SETPAGEFLAG(uname, lname, policy)				\
278 static __always_inline void __SetPage##uname(struct page *page)		\
279 	{ __set_bit(PG_##lname, &policy(page, 1)->flags); }
280 
281 #define __CLEARPAGEFLAG(uname, lname, policy)				\
282 static __always_inline void __ClearPage##uname(struct page *page)	\
283 	{ __clear_bit(PG_##lname, &policy(page, 1)->flags); }
284 
285 #define TESTSETFLAG(uname, lname, policy)				\
286 static __always_inline int TestSetPage##uname(struct page *page)	\
287 	{ return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
288 
289 #define TESTCLEARFLAG(uname, lname, policy)				\
290 static __always_inline int TestClearPage##uname(struct page *page)	\
291 	{ return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
292 
293 #define PAGEFLAG(uname, lname, policy)					\
294 	TESTPAGEFLAG(uname, lname, policy)				\
295 	SETPAGEFLAG(uname, lname, policy)				\
296 	CLEARPAGEFLAG(uname, lname, policy)
297 
298 #define __PAGEFLAG(uname, lname, policy)				\
299 	TESTPAGEFLAG(uname, lname, policy)				\
300 	__SETPAGEFLAG(uname, lname, policy)				\
301 	__CLEARPAGEFLAG(uname, lname, policy)
302 
303 #define TESTSCFLAG(uname, lname, policy)				\
304 	TESTSETFLAG(uname, lname, policy)				\
305 	TESTCLEARFLAG(uname, lname, policy)
306 
307 #define TESTPAGEFLAG_FALSE(uname)					\
308 static inline int Page##uname(const struct page *page) { return 0; }
309 
310 #define SETPAGEFLAG_NOOP(uname)						\
311 static inline void SetPage##uname(struct page *page) {  }
312 
313 #define CLEARPAGEFLAG_NOOP(uname)					\
314 static inline void ClearPage##uname(struct page *page) {  }
315 
316 #define __CLEARPAGEFLAG_NOOP(uname)					\
317 static inline void __ClearPage##uname(struct page *page) {  }
318 
319 #define TESTSETFLAG_FALSE(uname)					\
320 static inline int TestSetPage##uname(struct page *page) { return 0; }
321 
322 #define TESTCLEARFLAG_FALSE(uname)					\
323 static inline int TestClearPage##uname(struct page *page) { return 0; }
324 
325 #define PAGEFLAG_FALSE(uname) TESTPAGEFLAG_FALSE(uname)			\
326 	SETPAGEFLAG_NOOP(uname) CLEARPAGEFLAG_NOOP(uname)
327 
328 #define TESTSCFLAG_FALSE(uname)						\
329 	TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname)
330 
331 __PAGEFLAG(Locked, locked, PF_NO_TAIL)
332 PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD)
333 PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL)
334 PAGEFLAG(Referenced, referenced, PF_HEAD)
335 	TESTCLEARFLAG(Referenced, referenced, PF_HEAD)
336 	__SETPAGEFLAG(Referenced, referenced, PF_HEAD)
337 PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
338 	__CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
339 PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
340 	TESTCLEARFLAG(LRU, lru, PF_HEAD)
341 PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
342 	TESTCLEARFLAG(Active, active, PF_HEAD)
343 PAGEFLAG(Workingset, workingset, PF_HEAD)
344 	TESTCLEARFLAG(Workingset, workingset, PF_HEAD)
345 __PAGEFLAG(Slab, slab, PF_NO_TAIL)
346 __PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL)
347 PAGEFLAG(Checked, checked, PF_NO_COMPOUND)	   /* Used by some filesystems */
348 
349 /* Xen */
350 PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND)
351 	TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND)
352 PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND);
353 PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
354 PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
355 	TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
356 
357 PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
358 	__CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
359 	__SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
360 PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
361 	__CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
362 	__SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
363 
364 /*
365  * Private page markings that may be used by the filesystem that owns the page
366  * for its own purposes.
367  * - PG_private and PG_private_2 cause releasepage() and co to be invoked
368  */
369 PAGEFLAG(Private, private, PF_ANY)
370 PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
371 PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
372 	TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
373 
374 /*
375  * Only test-and-set exist for PG_writeback.  The unconditional operators are
376  * risky: they bypass page accounting.
377  */
378 TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL)
379 	TESTSCFLAG(Writeback, writeback, PF_NO_TAIL)
380 PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
381 
382 /* PG_readahead is only used for reads; PG_reclaim is only for writes */
383 PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
384 	TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
385 PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
386 	TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)
387 
388 #ifdef CONFIG_HIGHMEM
389 /*
390  * Must use a macro here due to header dependency issues. page_zone() is not
391  * available at this point.
392  */
393 #define PageHighMem(__p) is_highmem_idx(page_zonenum(__p))
394 #else
395 PAGEFLAG_FALSE(HighMem)
396 #endif
397 
398 #ifdef CONFIG_SWAP
399 static __always_inline int PageSwapCache(struct page *page)
400 {
401 #ifdef CONFIG_THP_SWAP
402 	page = compound_head(page);
403 #endif
404 	return PageSwapBacked(page) && test_bit(PG_swapcache, &page->flags);
405 
406 }
407 SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
408 CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
409 #else
410 PAGEFLAG_FALSE(SwapCache)
411 #endif
412 
413 PAGEFLAG(Unevictable, unevictable, PF_HEAD)
414 	__CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD)
415 	TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD)
416 
417 #ifdef CONFIG_MMU
418 PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
419 	__CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
420 	TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL)
421 #else
422 PAGEFLAG_FALSE(Mlocked) __CLEARPAGEFLAG_NOOP(Mlocked)
423 	TESTSCFLAG_FALSE(Mlocked)
424 #endif
425 
426 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
427 PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND)
428 #else
429 PAGEFLAG_FALSE(Uncached)
430 #endif
431 
432 #ifdef CONFIG_MEMORY_FAILURE
433 PAGEFLAG(HWPoison, hwpoison, PF_ANY)
434 TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
435 #define __PG_HWPOISON (1UL << PG_hwpoison)
436 extern bool take_page_off_buddy(struct page *page);
437 #else
438 PAGEFLAG_FALSE(HWPoison)
439 #define __PG_HWPOISON 0
440 #endif
441 
442 #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
443 TESTPAGEFLAG(Young, young, PF_ANY)
444 SETPAGEFLAG(Young, young, PF_ANY)
445 TESTCLEARFLAG(Young, young, PF_ANY)
446 PAGEFLAG(Idle, idle, PF_ANY)
447 #endif
448 
449 #ifdef CONFIG_KASAN_HW_TAGS
450 PAGEFLAG(SkipKASanPoison, skip_kasan_poison, PF_HEAD)
451 #else
452 PAGEFLAG_FALSE(SkipKASanPoison)
453 #endif
454 
455 /*
456  * PageReported() is used to track reported free pages within the Buddy
457  * allocator. We can use the non-atomic version of the test and set
458  * operations as both should be shielded with the zone lock to prevent
459  * any possible races on the setting or clearing of the bit.
460  */
461 __PAGEFLAG(Reported, reported, PF_NO_COMPOUND)
462 
463 /*
464  * On an anonymous page mapped into a user virtual memory area,
465  * page->mapping points to its anon_vma, not to a struct address_space;
466  * with the PAGE_MAPPING_ANON bit set to distinguish it.  See rmap.h.
467  *
468  * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
469  * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
470  * bit; and then page->mapping points, not to an anon_vma, but to a private
471  * structure which KSM associates with that merged page.  See ksm.h.
472  *
473  * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
474  * page and then page->mapping points a struct address_space.
475  *
476  * Please note that, confusingly, "page_mapping" refers to the inode
477  * address_space which maps the page from disk; whereas "page_mapped"
478  * refers to user virtual address space into which the page is mapped.
479  */
480 #define PAGE_MAPPING_ANON	0x1
481 #define PAGE_MAPPING_MOVABLE	0x2
482 #define PAGE_MAPPING_KSM	(PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
483 #define PAGE_MAPPING_FLAGS	(PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
484 
485 static __always_inline int PageMappingFlags(struct page *page)
486 {
487 	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
488 }
489 
490 static __always_inline int PageAnon(struct page *page)
491 {
492 	page = compound_head(page);
493 	return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
494 }
495 
496 static __always_inline int __PageMovable(struct page *page)
497 {
498 	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
499 				PAGE_MAPPING_MOVABLE;
500 }
501 
502 #ifdef CONFIG_KSM
503 /*
504  * A KSM page is one of those write-protected "shared pages" or "merged pages"
505  * which KSM maps into multiple mms, wherever identical anonymous page content
506  * is found in VM_MERGEABLE vmas.  It's a PageAnon page, pointing not to any
507  * anon_vma, but to that page's node of the stable tree.
508  */
509 static __always_inline int PageKsm(struct page *page)
510 {
511 	page = compound_head(page);
512 	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
513 				PAGE_MAPPING_KSM;
514 }
515 #else
516 TESTPAGEFLAG_FALSE(Ksm)
517 #endif
518 
519 u64 stable_page_flags(struct page *page);
520 
521 static inline int PageUptodate(struct page *page)
522 {
523 	int ret;
524 	page = compound_head(page);
525 	ret = test_bit(PG_uptodate, &(page)->flags);
526 	/*
527 	 * Must ensure that the data we read out of the page is loaded
528 	 * _after_ we've loaded page->flags to check for PageUptodate.
529 	 * We can skip the barrier if the page is not uptodate, because
530 	 * we wouldn't be reading anything from it.
531 	 *
532 	 * See SetPageUptodate() for the other side of the story.
533 	 */
534 	if (ret)
535 		smp_rmb();
536 
537 	return ret;
538 }
539 
540 static __always_inline void __SetPageUptodate(struct page *page)
541 {
542 	VM_BUG_ON_PAGE(PageTail(page), page);
543 	smp_wmb();
544 	__set_bit(PG_uptodate, &page->flags);
545 }
546 
547 static __always_inline void SetPageUptodate(struct page *page)
548 {
549 	VM_BUG_ON_PAGE(PageTail(page), page);
550 	/*
551 	 * Memory barrier must be issued before setting the PG_uptodate bit,
552 	 * so that all previous stores issued in order to bring the page
553 	 * uptodate are actually visible before PageUptodate becomes true.
554 	 */
555 	smp_wmb();
556 	set_bit(PG_uptodate, &page->flags);
557 }
558 
559 CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
560 
561 int test_clear_page_writeback(struct page *page);
562 int __test_set_page_writeback(struct page *page, bool keep_write);
563 
564 #define test_set_page_writeback(page)			\
565 	__test_set_page_writeback(page, false)
566 #define test_set_page_writeback_keepwrite(page)	\
567 	__test_set_page_writeback(page, true)
568 
569 static inline void set_page_writeback(struct page *page)
570 {
571 	test_set_page_writeback(page);
572 }
573 
574 static inline void set_page_writeback_keepwrite(struct page *page)
575 {
576 	test_set_page_writeback_keepwrite(page);
577 }
578 
579 __PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY)
580 
581 static __always_inline void set_compound_head(struct page *page, struct page *head)
582 {
583 	WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
584 }
585 
586 static __always_inline void clear_compound_head(struct page *page)
587 {
588 	WRITE_ONCE(page->compound_head, 0);
589 }
590 
591 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
592 static inline void ClearPageCompound(struct page *page)
593 {
594 	BUG_ON(!PageHead(page));
595 	ClearPageHead(page);
596 }
597 #endif
598 
599 #define PG_head_mask ((1UL << PG_head))
600 
601 #ifdef CONFIG_HUGETLB_PAGE
602 int PageHuge(struct page *page);
603 int PageHeadHuge(struct page *page);
604 #else
605 TESTPAGEFLAG_FALSE(Huge)
606 TESTPAGEFLAG_FALSE(HeadHuge)
607 #endif
608 
609 
610 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
611 /*
612  * PageHuge() only returns true for hugetlbfs pages, but not for
613  * normal or transparent huge pages.
614  *
615  * PageTransHuge() returns true for both transparent huge and
616  * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
617  * called only in the core VM paths where hugetlbfs pages can't exist.
618  */
619 static inline int PageTransHuge(struct page *page)
620 {
621 	VM_BUG_ON_PAGE(PageTail(page), page);
622 	return PageHead(page);
623 }
624 
625 /*
626  * PageTransCompound returns true for both transparent huge pages
627  * and hugetlbfs pages, so it should only be called when it's known
628  * that hugetlbfs pages aren't involved.
629  */
630 static inline int PageTransCompound(struct page *page)
631 {
632 	return PageCompound(page);
633 }
634 
635 /*
636  * PageTransCompoundMap is the same as PageTransCompound, but it also
637  * guarantees the primary MMU has the entire compound page mapped
638  * through pmd_trans_huge, which in turn guarantees the secondary MMUs
639  * can also map the entire compound page. This allows the secondary
640  * MMUs to call get_user_pages() only once for each compound page and
641  * to immediately map the entire compound page with a single secondary
642  * MMU fault. If there will be a pmd split later, the secondary MMUs
643  * will get an update through the MMU notifier invalidation through
644  * split_huge_pmd().
645  *
646  * Unlike PageTransCompound, this is safe to be called only while
647  * split_huge_pmd() cannot run from under us, like if protected by the
648  * MMU notifier, otherwise it may result in page->_mapcount check false
649  * positives.
650  *
651  * We have to treat page cache THP differently since every subpage of it
652  * would get _mapcount inc'ed once it is PMD mapped.  But, it may be PTE
653  * mapped in the current process so comparing subpage's _mapcount to
654  * compound_mapcount to filter out PTE mapped case.
655  */
656 static inline int PageTransCompoundMap(struct page *page)
657 {
658 	struct page *head;
659 
660 	if (!PageTransCompound(page))
661 		return 0;
662 
663 	if (PageAnon(page))
664 		return atomic_read(&page->_mapcount) < 0;
665 
666 	head = compound_head(page);
667 	/* File THP is PMD mapped and not PTE mapped */
668 	return atomic_read(&page->_mapcount) ==
669 	       atomic_read(compound_mapcount_ptr(head));
670 }
671 
672 /*
673  * PageTransTail returns true for both transparent huge pages
674  * and hugetlbfs pages, so it should only be called when it's known
675  * that hugetlbfs pages aren't involved.
676  */
677 static inline int PageTransTail(struct page *page)
678 {
679 	return PageTail(page);
680 }
681 
682 /*
683  * PageDoubleMap indicates that the compound page is mapped with PTEs as well
684  * as PMDs.
685  *
686  * This is required for optimization of rmap operations for THP: we can postpone
687  * per small page mapcount accounting (and its overhead from atomic operations)
688  * until the first PMD split.
689  *
690  * For the page PageDoubleMap means ->_mapcount in all sub-pages is offset up
691  * by one. This reference will go away with last compound_mapcount.
692  *
693  * See also __split_huge_pmd_locked() and page_remove_anon_compound_rmap().
694  */
695 PAGEFLAG(DoubleMap, double_map, PF_SECOND)
696 	TESTSCFLAG(DoubleMap, double_map, PF_SECOND)
697 #else
698 TESTPAGEFLAG_FALSE(TransHuge)
699 TESTPAGEFLAG_FALSE(TransCompound)
700 TESTPAGEFLAG_FALSE(TransCompoundMap)
701 TESTPAGEFLAG_FALSE(TransTail)
702 PAGEFLAG_FALSE(DoubleMap)
703 	TESTSCFLAG_FALSE(DoubleMap)
704 #endif
705 
706 /*
707  * Check if a page is currently marked HWPoisoned. Note that this check is
708  * best effort only and inherently racy: there is no way to synchronize with
709  * failing hardware.
710  */
711 static inline bool is_page_hwpoison(struct page *page)
712 {
713 	if (PageHWPoison(page))
714 		return true;
715 	return PageHuge(page) && PageHWPoison(compound_head(page));
716 }
717 
718 /*
719  * For pages that are never mapped to userspace (and aren't PageSlab),
720  * page_type may be used.  Because it is initialised to -1, we invert the
721  * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and
722  * __ClearPageFoo *sets* the bit used for PageFoo.  We reserve a few high and
723  * low bits so that an underflow or overflow of page_mapcount() won't be
724  * mistaken for a page type value.
725  */
726 
727 #define PAGE_TYPE_BASE	0xf0000000
728 /* Reserve		0x0000007f to catch underflows of page_mapcount */
729 #define PAGE_MAPCOUNT_RESERVE	-128
730 #define PG_buddy	0x00000080
731 #define PG_offline	0x00000100
732 #define PG_table	0x00000200
733 #define PG_guard	0x00000400
734 
735 #define PageType(page, flag)						\
736 	((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
737 
738 static inline int page_has_type(struct page *page)
739 {
740 	return (int)page->page_type < PAGE_MAPCOUNT_RESERVE;
741 }
742 
743 #define PAGE_TYPE_OPS(uname, lname)					\
744 static __always_inline int Page##uname(struct page *page)		\
745 {									\
746 	return PageType(page, PG_##lname);				\
747 }									\
748 static __always_inline void __SetPage##uname(struct page *page)		\
749 {									\
750 	VM_BUG_ON_PAGE(!PageType(page, 0), page);			\
751 	page->page_type &= ~PG_##lname;					\
752 }									\
753 static __always_inline void __ClearPage##uname(struct page *page)	\
754 {									\
755 	VM_BUG_ON_PAGE(!Page##uname(page), page);			\
756 	page->page_type |= PG_##lname;					\
757 }
758 
759 /*
760  * PageBuddy() indicates that the page is free and in the buddy system
761  * (see mm/page_alloc.c).
762  */
763 PAGE_TYPE_OPS(Buddy, buddy)
764 
765 /*
766  * PageOffline() indicates that the page is logically offline although the
767  * containing section is online. (e.g. inflated in a balloon driver or
768  * not onlined when onlining the section).
769  * The content of these pages is effectively stale. Such pages should not
770  * be touched (read/write/dump/save) except by their owner.
771  *
772  * If a driver wants to allow to offline unmovable PageOffline() pages without
773  * putting them back to the buddy, it can do so via the memory notifier by
774  * decrementing the reference count in MEM_GOING_OFFLINE and incrementing the
775  * reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline()
776  * pages (now with a reference count of zero) are treated like free pages,
777  * allowing the containing memory block to get offlined. A driver that
778  * relies on this feature is aware that re-onlining the memory block will
779  * require to re-set the pages PageOffline() and not giving them to the
780  * buddy via online_page_callback_t.
781  *
782  * There are drivers that mark a page PageOffline() and expect there won't be
783  * any further access to page content. PFN walkers that read content of random
784  * pages should check PageOffline() and synchronize with such drivers using
785  * page_offline_freeze()/page_offline_thaw().
786  */
787 PAGE_TYPE_OPS(Offline, offline)
788 
789 extern void page_offline_freeze(void);
790 extern void page_offline_thaw(void);
791 extern void page_offline_begin(void);
792 extern void page_offline_end(void);
793 
794 /*
795  * Marks pages in use as page tables.
796  */
797 PAGE_TYPE_OPS(Table, table)
798 
799 /*
800  * Marks guardpages used with debug_pagealloc.
801  */
802 PAGE_TYPE_OPS(Guard, guard)
803 
804 extern bool is_free_buddy_page(struct page *page);
805 
806 __PAGEFLAG(Isolated, isolated, PF_ANY);
807 
808 /*
809  * If network-based swap is enabled, sl*b must keep track of whether pages
810  * were allocated from pfmemalloc reserves.
811  */
812 static inline int PageSlabPfmemalloc(struct page *page)
813 {
814 	VM_BUG_ON_PAGE(!PageSlab(page), page);
815 	return PageActive(page);
816 }
817 
818 static inline void SetPageSlabPfmemalloc(struct page *page)
819 {
820 	VM_BUG_ON_PAGE(!PageSlab(page), page);
821 	SetPageActive(page);
822 }
823 
824 static inline void __ClearPageSlabPfmemalloc(struct page *page)
825 {
826 	VM_BUG_ON_PAGE(!PageSlab(page), page);
827 	__ClearPageActive(page);
828 }
829 
830 static inline void ClearPageSlabPfmemalloc(struct page *page)
831 {
832 	VM_BUG_ON_PAGE(!PageSlab(page), page);
833 	ClearPageActive(page);
834 }
835 
836 #ifdef CONFIG_MMU
837 #define __PG_MLOCKED		(1UL << PG_mlocked)
838 #else
839 #define __PG_MLOCKED		0
840 #endif
841 
842 /*
843  * Flags checked when a page is freed.  Pages being freed should not have
844  * these flags set.  If they are, there is a problem.
845  */
846 #define PAGE_FLAGS_CHECK_AT_FREE				\
847 	(1UL << PG_lru		| 1UL << PG_locked	|	\
848 	 1UL << PG_private	| 1UL << PG_private_2	|	\
849 	 1UL << PG_writeback	| 1UL << PG_reserved	|	\
850 	 1UL << PG_slab		| 1UL << PG_active 	|	\
851 	 1UL << PG_unevictable	| __PG_MLOCKED)
852 
853 /*
854  * Flags checked when a page is prepped for return by the page allocator.
855  * Pages being prepped should not have these flags set.  If they are set,
856  * there has been a kernel bug or struct page corruption.
857  *
858  * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
859  * alloc-free cycle to prevent from reusing the page.
860  */
861 #define PAGE_FLAGS_CHECK_AT_PREP	\
862 	(((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
863 
864 #define PAGE_FLAGS_PRIVATE				\
865 	(1UL << PG_private | 1UL << PG_private_2)
866 /**
867  * page_has_private - Determine if page has private stuff
868  * @page: The page to be checked
869  *
870  * Determine if a page has private stuff, indicating that release routines
871  * should be invoked upon it.
872  */
873 static inline int page_has_private(struct page *page)
874 {
875 	return !!(page->flags & PAGE_FLAGS_PRIVATE);
876 }
877 
878 #undef PF_ANY
879 #undef PF_HEAD
880 #undef PF_ONLY_HEAD
881 #undef PF_NO_TAIL
882 #undef PF_NO_COMPOUND
883 #undef PF_SECOND
884 #endif /* !__GENERATING_BOUNDS_H */
885 
886 #endif	/* PAGE_FLAGS_H */
887