xref: /linux-6.15/include/linux/page-flags.h (revision 8ea8814f)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Macros for manipulating and testing page->flags
4  */
5 
6 #ifndef PAGE_FLAGS_H
7 #define PAGE_FLAGS_H
8 
9 #include <linux/types.h>
10 #include <linux/bug.h>
11 #include <linux/mmdebug.h>
12 #ifndef __GENERATING_BOUNDS_H
13 #include <linux/mm_types.h>
14 #include <generated/bounds.h>
15 #endif /* !__GENERATING_BOUNDS_H */
16 
17 /*
18  * Various page->flags bits:
19  *
20  * PG_reserved is set for special pages. The "struct page" of such a page
21  * should in general not be touched (e.g. set dirty) except by its owner.
22  * Pages marked as PG_reserved include:
23  * - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS,
24  *   initrd, HW tables)
25  * - Pages reserved or allocated early during boot (before the page allocator
26  *   was initialized). This includes (depending on the architecture) the
27  *   initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much
28  *   much more. Once (if ever) freed, PG_reserved is cleared and they will
29  *   be given to the page allocator.
30  * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying
31  *   to read/write these pages might end badly. Don't touch!
32  * - The zero page(s)
33  * - Pages not added to the page allocator when onlining a section because
34  *   they were excluded via the online_page_callback() or because they are
35  *   PG_hwpoison.
36  * - Pages allocated in the context of kexec/kdump (loaded kernel image,
37  *   control pages, vmcoreinfo)
38  * - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are
39  *   not marked PG_reserved (as they might be in use by somebody else who does
40  *   not respect the caching strategy).
41  * - Pages part of an offline section (struct pages of offline sections should
42  *   not be trusted as they will be initialized when first onlined).
43  * - MCA pages on ia64
44  * - Pages holding CPU notes for POWER Firmware Assisted Dump
45  * - Device memory (e.g. PMEM, DAX, HMM)
46  * Some PG_reserved pages will be excluded from the hibernation image.
47  * PG_reserved does in general not hinder anybody from dumping or swapping
48  * and is no longer required for remap_pfn_range(). ioremap might require it.
49  * Consequently, PG_reserved for a page mapped into user space can indicate
50  * the zero page, the vDSO, MMIO pages or device memory.
51  *
52  * The PG_private bitflag is set on pagecache pages if they contain filesystem
53  * specific data (which is normally at page->private). It can be used by
54  * private allocations for its own usage.
55  *
56  * During initiation of disk I/O, PG_locked is set. This bit is set before I/O
57  * and cleared when writeback _starts_ or when read _completes_. PG_writeback
58  * is set before writeback starts and cleared when it finishes.
59  *
60  * PG_locked also pins a page in pagecache, and blocks truncation of the file
61  * while it is held.
62  *
63  * page_waitqueue(page) is a wait queue of all tasks waiting for the page
64  * to become unlocked.
65  *
66  * PG_uptodate tells whether the page's contents is valid.  When a read
67  * completes, the page becomes uptodate, unless a disk I/O error happened.
68  *
69  * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
70  * file-backed pagecache (see mm/vmscan.c).
71  *
72  * PG_error is set to indicate that an I/O error occurred on this page.
73  *
74  * PG_arch_1 is an architecture specific page state bit.  The generic code
75  * guarantees that this bit is cleared for a page when it first is entered into
76  * the page cache.
77  *
78  * PG_hwpoison indicates that a page got corrupted in hardware and contains
79  * data with incorrect ECC bits that triggered a machine check. Accessing is
80  * not safe since it may cause another machine check. Don't touch!
81  */
82 
83 /*
84  * Don't use the *_dontuse flags.  Use the macros.  Otherwise you'll break
85  * locked- and dirty-page accounting.
86  *
87  * The page flags field is split into two parts, the main flags area
88  * which extends from the low bits upwards, and the fields area which
89  * extends from the high bits downwards.
90  *
91  *  | FIELD | ... | FLAGS |
92  *  N-1           ^       0
93  *               (NR_PAGEFLAGS)
94  *
95  * The fields area is reserved for fields mapping zone, node (for NUMA) and
96  * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
97  * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
98  */
99 enum pageflags {
100 	PG_locked,		/* Page is locked. Don't touch. */
101 	PG_referenced,
102 	PG_uptodate,
103 	PG_dirty,
104 	PG_lru,
105 	PG_active,
106 	PG_workingset,
107 	PG_waiters,		/* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
108 	PG_error,
109 	PG_slab,
110 	PG_owner_priv_1,	/* Owner use. If pagecache, fs may use*/
111 	PG_arch_1,
112 	PG_reserved,
113 	PG_private,		/* If pagecache, has fs-private data */
114 	PG_private_2,		/* If pagecache, has fs aux data */
115 	PG_writeback,		/* Page is under writeback */
116 	PG_head,		/* A head page */
117 	PG_mappedtodisk,	/* Has blocks allocated on-disk */
118 	PG_reclaim,		/* To be reclaimed asap */
119 	PG_swapbacked,		/* Page is backed by RAM/swap */
120 	PG_unevictable,		/* Page is "unevictable"  */
121 #ifdef CONFIG_MMU
122 	PG_mlocked,		/* Page is vma mlocked */
123 #endif
124 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
125 	PG_uncached,		/* Page has been mapped as uncached */
126 #endif
127 #ifdef CONFIG_MEMORY_FAILURE
128 	PG_hwpoison,		/* hardware poisoned page. Don't touch */
129 #endif
130 #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
131 	PG_young,
132 	PG_idle,
133 #endif
134 	__NR_PAGEFLAGS,
135 
136 	/* Filesystems */
137 	PG_checked = PG_owner_priv_1,
138 
139 	/* SwapBacked */
140 	PG_swapcache = PG_owner_priv_1,	/* Swap page: swp_entry_t in private */
141 
142 	/* Two page bits are conscripted by FS-Cache to maintain local caching
143 	 * state.  These bits are set on pages belonging to the netfs's inodes
144 	 * when those inodes are being locally cached.
145 	 */
146 	PG_fscache = PG_private_2,	/* page backed by cache */
147 
148 	/* XEN */
149 	/* Pinned in Xen as a read-only pagetable page. */
150 	PG_pinned = PG_owner_priv_1,
151 	/* Pinned as part of domain save (see xen_mm_pin_all()). */
152 	PG_savepinned = PG_dirty,
153 	/* Has a grant mapping of another (foreign) domain's page. */
154 	PG_foreign = PG_owner_priv_1,
155 
156 	/* SLOB */
157 	PG_slob_free = PG_private,
158 
159 	/* Compound pages. Stored in first tail page's flags */
160 	PG_double_map = PG_private_2,
161 
162 	/* non-lru isolated movable page */
163 	PG_isolated = PG_reclaim,
164 };
165 
166 #ifndef __GENERATING_BOUNDS_H
167 
168 struct page;	/* forward declaration */
169 
170 static inline struct page *compound_head(struct page *page)
171 {
172 	unsigned long head = READ_ONCE(page->compound_head);
173 
174 	if (unlikely(head & 1))
175 		return (struct page *) (head - 1);
176 	return page;
177 }
178 
179 static __always_inline int PageTail(struct page *page)
180 {
181 	return READ_ONCE(page->compound_head) & 1;
182 }
183 
184 static __always_inline int PageCompound(struct page *page)
185 {
186 	return test_bit(PG_head, &page->flags) || PageTail(page);
187 }
188 
189 #define	PAGE_POISON_PATTERN	-1l
190 static inline int PagePoisoned(const struct page *page)
191 {
192 	return page->flags == PAGE_POISON_PATTERN;
193 }
194 
195 #ifdef CONFIG_DEBUG_VM
196 void page_init_poison(struct page *page, size_t size);
197 #else
198 static inline void page_init_poison(struct page *page, size_t size)
199 {
200 }
201 #endif
202 
203 /*
204  * Page flags policies wrt compound pages
205  *
206  * PF_POISONED_CHECK
207  *     check if this struct page poisoned/uninitialized
208  *
209  * PF_ANY:
210  *     the page flag is relevant for small, head and tail pages.
211  *
212  * PF_HEAD:
213  *     for compound page all operations related to the page flag applied to
214  *     head page.
215  *
216  * PF_ONLY_HEAD:
217  *     for compound page, callers only ever operate on the head page.
218  *
219  * PF_NO_TAIL:
220  *     modifications of the page flag must be done on small or head pages,
221  *     checks can be done on tail pages too.
222  *
223  * PF_NO_COMPOUND:
224  *     the page flag is not relevant for compound pages.
225  */
226 #define PF_POISONED_CHECK(page) ({					\
227 		VM_BUG_ON_PGFLAGS(PagePoisoned(page), page);		\
228 		page; })
229 #define PF_ANY(page, enforce)	PF_POISONED_CHECK(page)
230 #define PF_HEAD(page, enforce)	PF_POISONED_CHECK(compound_head(page))
231 #define PF_ONLY_HEAD(page, enforce) ({					\
232 		VM_BUG_ON_PGFLAGS(PageTail(page), page);		\
233 		PF_POISONED_CHECK(page); })
234 #define PF_NO_TAIL(page, enforce) ({					\
235 		VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page);	\
236 		PF_POISONED_CHECK(compound_head(page)); })
237 #define PF_NO_COMPOUND(page, enforce) ({				\
238 		VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page);	\
239 		PF_POISONED_CHECK(page); })
240 
241 /*
242  * Macros to create function definitions for page flags
243  */
244 #define TESTPAGEFLAG(uname, lname, policy)				\
245 static __always_inline int Page##uname(struct page *page)		\
246 	{ return test_bit(PG_##lname, &policy(page, 0)->flags); }
247 
248 #define SETPAGEFLAG(uname, lname, policy)				\
249 static __always_inline void SetPage##uname(struct page *page)		\
250 	{ set_bit(PG_##lname, &policy(page, 1)->flags); }
251 
252 #define CLEARPAGEFLAG(uname, lname, policy)				\
253 static __always_inline void ClearPage##uname(struct page *page)		\
254 	{ clear_bit(PG_##lname, &policy(page, 1)->flags); }
255 
256 #define __SETPAGEFLAG(uname, lname, policy)				\
257 static __always_inline void __SetPage##uname(struct page *page)		\
258 	{ __set_bit(PG_##lname, &policy(page, 1)->flags); }
259 
260 #define __CLEARPAGEFLAG(uname, lname, policy)				\
261 static __always_inline void __ClearPage##uname(struct page *page)	\
262 	{ __clear_bit(PG_##lname, &policy(page, 1)->flags); }
263 
264 #define TESTSETFLAG(uname, lname, policy)				\
265 static __always_inline int TestSetPage##uname(struct page *page)	\
266 	{ return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
267 
268 #define TESTCLEARFLAG(uname, lname, policy)				\
269 static __always_inline int TestClearPage##uname(struct page *page)	\
270 	{ return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
271 
272 #define PAGEFLAG(uname, lname, policy)					\
273 	TESTPAGEFLAG(uname, lname, policy)				\
274 	SETPAGEFLAG(uname, lname, policy)				\
275 	CLEARPAGEFLAG(uname, lname, policy)
276 
277 #define __PAGEFLAG(uname, lname, policy)				\
278 	TESTPAGEFLAG(uname, lname, policy)				\
279 	__SETPAGEFLAG(uname, lname, policy)				\
280 	__CLEARPAGEFLAG(uname, lname, policy)
281 
282 #define TESTSCFLAG(uname, lname, policy)				\
283 	TESTSETFLAG(uname, lname, policy)				\
284 	TESTCLEARFLAG(uname, lname, policy)
285 
286 #define TESTPAGEFLAG_FALSE(uname)					\
287 static inline int Page##uname(const struct page *page) { return 0; }
288 
289 #define SETPAGEFLAG_NOOP(uname)						\
290 static inline void SetPage##uname(struct page *page) {  }
291 
292 #define CLEARPAGEFLAG_NOOP(uname)					\
293 static inline void ClearPage##uname(struct page *page) {  }
294 
295 #define __CLEARPAGEFLAG_NOOP(uname)					\
296 static inline void __ClearPage##uname(struct page *page) {  }
297 
298 #define TESTSETFLAG_FALSE(uname)					\
299 static inline int TestSetPage##uname(struct page *page) { return 0; }
300 
301 #define TESTCLEARFLAG_FALSE(uname)					\
302 static inline int TestClearPage##uname(struct page *page) { return 0; }
303 
304 #define PAGEFLAG_FALSE(uname) TESTPAGEFLAG_FALSE(uname)			\
305 	SETPAGEFLAG_NOOP(uname) CLEARPAGEFLAG_NOOP(uname)
306 
307 #define TESTSCFLAG_FALSE(uname)						\
308 	TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname)
309 
310 __PAGEFLAG(Locked, locked, PF_NO_TAIL)
311 PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD)
312 PAGEFLAG(Error, error, PF_NO_COMPOUND) TESTCLEARFLAG(Error, error, PF_NO_COMPOUND)
313 PAGEFLAG(Referenced, referenced, PF_HEAD)
314 	TESTCLEARFLAG(Referenced, referenced, PF_HEAD)
315 	__SETPAGEFLAG(Referenced, referenced, PF_HEAD)
316 PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
317 	__CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
318 PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
319 PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
320 	TESTCLEARFLAG(Active, active, PF_HEAD)
321 PAGEFLAG(Workingset, workingset, PF_HEAD)
322 	TESTCLEARFLAG(Workingset, workingset, PF_HEAD)
323 __PAGEFLAG(Slab, slab, PF_NO_TAIL)
324 __PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL)
325 PAGEFLAG(Checked, checked, PF_NO_COMPOUND)	   /* Used by some filesystems */
326 
327 /* Xen */
328 PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND)
329 	TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND)
330 PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND);
331 PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
332 
333 PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
334 	__CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
335 	__SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
336 PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
337 	__CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
338 	__SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
339 
340 /*
341  * Private page markings that may be used by the filesystem that owns the page
342  * for its own purposes.
343  * - PG_private and PG_private_2 cause releasepage() and co to be invoked
344  */
345 PAGEFLAG(Private, private, PF_ANY) __SETPAGEFLAG(Private, private, PF_ANY)
346 	__CLEARPAGEFLAG(Private, private, PF_ANY)
347 PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
348 PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
349 	TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
350 
351 /*
352  * Only test-and-set exist for PG_writeback.  The unconditional operators are
353  * risky: they bypass page accounting.
354  */
355 TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL)
356 	TESTSCFLAG(Writeback, writeback, PF_NO_TAIL)
357 PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
358 
359 /* PG_readahead is only used for reads; PG_reclaim is only for writes */
360 PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
361 	TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
362 PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
363 	TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)
364 
365 #ifdef CONFIG_HIGHMEM
366 /*
367  * Must use a macro here due to header dependency issues. page_zone() is not
368  * available at this point.
369  */
370 #define PageHighMem(__p) is_highmem_idx(page_zonenum(__p))
371 #else
372 PAGEFLAG_FALSE(HighMem)
373 #endif
374 
375 #ifdef CONFIG_SWAP
376 static __always_inline int PageSwapCache(struct page *page)
377 {
378 #ifdef CONFIG_THP_SWAP
379 	page = compound_head(page);
380 #endif
381 	return PageSwapBacked(page) && test_bit(PG_swapcache, &page->flags);
382 
383 }
384 SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
385 CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
386 #else
387 PAGEFLAG_FALSE(SwapCache)
388 #endif
389 
390 PAGEFLAG(Unevictable, unevictable, PF_HEAD)
391 	__CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD)
392 	TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD)
393 
394 #ifdef CONFIG_MMU
395 PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
396 	__CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
397 	TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL)
398 #else
399 PAGEFLAG_FALSE(Mlocked) __CLEARPAGEFLAG_NOOP(Mlocked)
400 	TESTSCFLAG_FALSE(Mlocked)
401 #endif
402 
403 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
404 PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND)
405 #else
406 PAGEFLAG_FALSE(Uncached)
407 #endif
408 
409 #ifdef CONFIG_MEMORY_FAILURE
410 PAGEFLAG(HWPoison, hwpoison, PF_ANY)
411 TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
412 #define __PG_HWPOISON (1UL << PG_hwpoison)
413 extern bool set_hwpoison_free_buddy_page(struct page *page);
414 #else
415 PAGEFLAG_FALSE(HWPoison)
416 static inline bool set_hwpoison_free_buddy_page(struct page *page)
417 {
418 	return 0;
419 }
420 #define __PG_HWPOISON 0
421 #endif
422 
423 #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
424 TESTPAGEFLAG(Young, young, PF_ANY)
425 SETPAGEFLAG(Young, young, PF_ANY)
426 TESTCLEARFLAG(Young, young, PF_ANY)
427 PAGEFLAG(Idle, idle, PF_ANY)
428 #endif
429 
430 /*
431  * On an anonymous page mapped into a user virtual memory area,
432  * page->mapping points to its anon_vma, not to a struct address_space;
433  * with the PAGE_MAPPING_ANON bit set to distinguish it.  See rmap.h.
434  *
435  * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
436  * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
437  * bit; and then page->mapping points, not to an anon_vma, but to a private
438  * structure which KSM associates with that merged page.  See ksm.h.
439  *
440  * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
441  * page and then page->mapping points a struct address_space.
442  *
443  * Please note that, confusingly, "page_mapping" refers to the inode
444  * address_space which maps the page from disk; whereas "page_mapped"
445  * refers to user virtual address space into which the page is mapped.
446  */
447 #define PAGE_MAPPING_ANON	0x1
448 #define PAGE_MAPPING_MOVABLE	0x2
449 #define PAGE_MAPPING_KSM	(PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
450 #define PAGE_MAPPING_FLAGS	(PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
451 
452 static __always_inline int PageMappingFlags(struct page *page)
453 {
454 	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
455 }
456 
457 static __always_inline int PageAnon(struct page *page)
458 {
459 	page = compound_head(page);
460 	return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
461 }
462 
463 static __always_inline int __PageMovable(struct page *page)
464 {
465 	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
466 				PAGE_MAPPING_MOVABLE;
467 }
468 
469 #ifdef CONFIG_KSM
470 /*
471  * A KSM page is one of those write-protected "shared pages" or "merged pages"
472  * which KSM maps into multiple mms, wherever identical anonymous page content
473  * is found in VM_MERGEABLE vmas.  It's a PageAnon page, pointing not to any
474  * anon_vma, but to that page's node of the stable tree.
475  */
476 static __always_inline int PageKsm(struct page *page)
477 {
478 	page = compound_head(page);
479 	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
480 				PAGE_MAPPING_KSM;
481 }
482 #else
483 TESTPAGEFLAG_FALSE(Ksm)
484 #endif
485 
486 u64 stable_page_flags(struct page *page);
487 
488 static inline int PageUptodate(struct page *page)
489 {
490 	int ret;
491 	page = compound_head(page);
492 	ret = test_bit(PG_uptodate, &(page)->flags);
493 	/*
494 	 * Must ensure that the data we read out of the page is loaded
495 	 * _after_ we've loaded page->flags to check for PageUptodate.
496 	 * We can skip the barrier if the page is not uptodate, because
497 	 * we wouldn't be reading anything from it.
498 	 *
499 	 * See SetPageUptodate() for the other side of the story.
500 	 */
501 	if (ret)
502 		smp_rmb();
503 
504 	return ret;
505 }
506 
507 static __always_inline void __SetPageUptodate(struct page *page)
508 {
509 	VM_BUG_ON_PAGE(PageTail(page), page);
510 	smp_wmb();
511 	__set_bit(PG_uptodate, &page->flags);
512 }
513 
514 static __always_inline void SetPageUptodate(struct page *page)
515 {
516 	VM_BUG_ON_PAGE(PageTail(page), page);
517 	/*
518 	 * Memory barrier must be issued before setting the PG_uptodate bit,
519 	 * so that all previous stores issued in order to bring the page
520 	 * uptodate are actually visible before PageUptodate becomes true.
521 	 */
522 	smp_wmb();
523 	set_bit(PG_uptodate, &page->flags);
524 }
525 
526 CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
527 
528 int test_clear_page_writeback(struct page *page);
529 int __test_set_page_writeback(struct page *page, bool keep_write);
530 
531 #define test_set_page_writeback(page)			\
532 	__test_set_page_writeback(page, false)
533 #define test_set_page_writeback_keepwrite(page)	\
534 	__test_set_page_writeback(page, true)
535 
536 static inline void set_page_writeback(struct page *page)
537 {
538 	test_set_page_writeback(page);
539 }
540 
541 static inline void set_page_writeback_keepwrite(struct page *page)
542 {
543 	test_set_page_writeback_keepwrite(page);
544 }
545 
546 __PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY)
547 
548 static __always_inline void set_compound_head(struct page *page, struct page *head)
549 {
550 	WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
551 }
552 
553 static __always_inline void clear_compound_head(struct page *page)
554 {
555 	WRITE_ONCE(page->compound_head, 0);
556 }
557 
558 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
559 static inline void ClearPageCompound(struct page *page)
560 {
561 	BUG_ON(!PageHead(page));
562 	ClearPageHead(page);
563 }
564 #endif
565 
566 #define PG_head_mask ((1UL << PG_head))
567 
568 #ifdef CONFIG_HUGETLB_PAGE
569 int PageHuge(struct page *page);
570 int PageHeadHuge(struct page *page);
571 bool page_huge_active(struct page *page);
572 #else
573 TESTPAGEFLAG_FALSE(Huge)
574 TESTPAGEFLAG_FALSE(HeadHuge)
575 
576 static inline bool page_huge_active(struct page *page)
577 {
578 	return 0;
579 }
580 #endif
581 
582 
583 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
584 /*
585  * PageHuge() only returns true for hugetlbfs pages, but not for
586  * normal or transparent huge pages.
587  *
588  * PageTransHuge() returns true for both transparent huge and
589  * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
590  * called only in the core VM paths where hugetlbfs pages can't exist.
591  */
592 static inline int PageTransHuge(struct page *page)
593 {
594 	VM_BUG_ON_PAGE(PageTail(page), page);
595 	return PageHead(page);
596 }
597 
598 /*
599  * PageTransCompound returns true for both transparent huge pages
600  * and hugetlbfs pages, so it should only be called when it's known
601  * that hugetlbfs pages aren't involved.
602  */
603 static inline int PageTransCompound(struct page *page)
604 {
605 	return PageCompound(page);
606 }
607 
608 /*
609  * PageTransCompoundMap is the same as PageTransCompound, but it also
610  * guarantees the primary MMU has the entire compound page mapped
611  * through pmd_trans_huge, which in turn guarantees the secondary MMUs
612  * can also map the entire compound page. This allows the secondary
613  * MMUs to call get_user_pages() only once for each compound page and
614  * to immediately map the entire compound page with a single secondary
615  * MMU fault. If there will be a pmd split later, the secondary MMUs
616  * will get an update through the MMU notifier invalidation through
617  * split_huge_pmd().
618  *
619  * Unlike PageTransCompound, this is safe to be called only while
620  * split_huge_pmd() cannot run from under us, like if protected by the
621  * MMU notifier, otherwise it may result in page->_mapcount < 0 false
622  * positives.
623  */
624 static inline int PageTransCompoundMap(struct page *page)
625 {
626 	return PageTransCompound(page) && atomic_read(&page->_mapcount) < 0;
627 }
628 
629 /*
630  * PageTransTail returns true for both transparent huge pages
631  * and hugetlbfs pages, so it should only be called when it's known
632  * that hugetlbfs pages aren't involved.
633  */
634 static inline int PageTransTail(struct page *page)
635 {
636 	return PageTail(page);
637 }
638 
639 /*
640  * PageDoubleMap indicates that the compound page is mapped with PTEs as well
641  * as PMDs.
642  *
643  * This is required for optimization of rmap operations for THP: we can postpone
644  * per small page mapcount accounting (and its overhead from atomic operations)
645  * until the first PMD split.
646  *
647  * For the page PageDoubleMap means ->_mapcount in all sub-pages is offset up
648  * by one. This reference will go away with last compound_mapcount.
649  *
650  * See also __split_huge_pmd_locked() and page_remove_anon_compound_rmap().
651  */
652 static inline int PageDoubleMap(struct page *page)
653 {
654 	return PageHead(page) && test_bit(PG_double_map, &page[1].flags);
655 }
656 
657 static inline void SetPageDoubleMap(struct page *page)
658 {
659 	VM_BUG_ON_PAGE(!PageHead(page), page);
660 	set_bit(PG_double_map, &page[1].flags);
661 }
662 
663 static inline void ClearPageDoubleMap(struct page *page)
664 {
665 	VM_BUG_ON_PAGE(!PageHead(page), page);
666 	clear_bit(PG_double_map, &page[1].flags);
667 }
668 static inline int TestSetPageDoubleMap(struct page *page)
669 {
670 	VM_BUG_ON_PAGE(!PageHead(page), page);
671 	return test_and_set_bit(PG_double_map, &page[1].flags);
672 }
673 
674 static inline int TestClearPageDoubleMap(struct page *page)
675 {
676 	VM_BUG_ON_PAGE(!PageHead(page), page);
677 	return test_and_clear_bit(PG_double_map, &page[1].flags);
678 }
679 
680 #else
681 TESTPAGEFLAG_FALSE(TransHuge)
682 TESTPAGEFLAG_FALSE(TransCompound)
683 TESTPAGEFLAG_FALSE(TransCompoundMap)
684 TESTPAGEFLAG_FALSE(TransTail)
685 PAGEFLAG_FALSE(DoubleMap)
686 	TESTSETFLAG_FALSE(DoubleMap)
687 	TESTCLEARFLAG_FALSE(DoubleMap)
688 #endif
689 
690 /*
691  * For pages that are never mapped to userspace (and aren't PageSlab),
692  * page_type may be used.  Because it is initialised to -1, we invert the
693  * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and
694  * __ClearPageFoo *sets* the bit used for PageFoo.  We reserve a few high and
695  * low bits so that an underflow or overflow of page_mapcount() won't be
696  * mistaken for a page type value.
697  */
698 
699 #define PAGE_TYPE_BASE	0xf0000000
700 /* Reserve		0x0000007f to catch underflows of page_mapcount */
701 #define PAGE_MAPCOUNT_RESERVE	-128
702 #define PG_buddy	0x00000080
703 #define PG_offline	0x00000100
704 #define PG_kmemcg	0x00000200
705 #define PG_table	0x00000400
706 
707 #define PageType(page, flag)						\
708 	((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
709 
710 static inline int page_has_type(struct page *page)
711 {
712 	return (int)page->page_type < PAGE_MAPCOUNT_RESERVE;
713 }
714 
715 #define PAGE_TYPE_OPS(uname, lname)					\
716 static __always_inline int Page##uname(struct page *page)		\
717 {									\
718 	return PageType(page, PG_##lname);				\
719 }									\
720 static __always_inline void __SetPage##uname(struct page *page)		\
721 {									\
722 	VM_BUG_ON_PAGE(!PageType(page, 0), page);			\
723 	page->page_type &= ~PG_##lname;					\
724 }									\
725 static __always_inline void __ClearPage##uname(struct page *page)	\
726 {									\
727 	VM_BUG_ON_PAGE(!Page##uname(page), page);			\
728 	page->page_type |= PG_##lname;					\
729 }
730 
731 /*
732  * PageBuddy() indicates that the page is free and in the buddy system
733  * (see mm/page_alloc.c).
734  */
735 PAGE_TYPE_OPS(Buddy, buddy)
736 
737 /*
738  * PageOffline() indicates that the page is logically offline although the
739  * containing section is online. (e.g. inflated in a balloon driver or
740  * not onlined when onlining the section).
741  * The content of these pages is effectively stale. Such pages should not
742  * be touched (read/write/dump/save) except by their owner.
743  */
744 PAGE_TYPE_OPS(Offline, offline)
745 
746 /*
747  * If kmemcg is enabled, the buddy allocator will set PageKmemcg() on
748  * pages allocated with __GFP_ACCOUNT. It gets cleared on page free.
749  */
750 PAGE_TYPE_OPS(Kmemcg, kmemcg)
751 
752 /*
753  * Marks pages in use as page tables.
754  */
755 PAGE_TYPE_OPS(Table, table)
756 
757 extern bool is_free_buddy_page(struct page *page);
758 
759 __PAGEFLAG(Isolated, isolated, PF_ANY);
760 
761 /*
762  * If network-based swap is enabled, sl*b must keep track of whether pages
763  * were allocated from pfmemalloc reserves.
764  */
765 static inline int PageSlabPfmemalloc(struct page *page)
766 {
767 	VM_BUG_ON_PAGE(!PageSlab(page), page);
768 	return PageActive(page);
769 }
770 
771 static inline void SetPageSlabPfmemalloc(struct page *page)
772 {
773 	VM_BUG_ON_PAGE(!PageSlab(page), page);
774 	SetPageActive(page);
775 }
776 
777 static inline void __ClearPageSlabPfmemalloc(struct page *page)
778 {
779 	VM_BUG_ON_PAGE(!PageSlab(page), page);
780 	__ClearPageActive(page);
781 }
782 
783 static inline void ClearPageSlabPfmemalloc(struct page *page)
784 {
785 	VM_BUG_ON_PAGE(!PageSlab(page), page);
786 	ClearPageActive(page);
787 }
788 
789 #ifdef CONFIG_MMU
790 #define __PG_MLOCKED		(1UL << PG_mlocked)
791 #else
792 #define __PG_MLOCKED		0
793 #endif
794 
795 /*
796  * Flags checked when a page is freed.  Pages being freed should not have
797  * these flags set.  It they are, there is a problem.
798  */
799 #define PAGE_FLAGS_CHECK_AT_FREE				\
800 	(1UL << PG_lru		| 1UL << PG_locked	|	\
801 	 1UL << PG_private	| 1UL << PG_private_2	|	\
802 	 1UL << PG_writeback	| 1UL << PG_reserved	|	\
803 	 1UL << PG_slab		| 1UL << PG_active 	|	\
804 	 1UL << PG_unevictable	| __PG_MLOCKED)
805 
806 /*
807  * Flags checked when a page is prepped for return by the page allocator.
808  * Pages being prepped should not have these flags set.  It they are set,
809  * there has been a kernel bug or struct page corruption.
810  *
811  * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
812  * alloc-free cycle to prevent from reusing the page.
813  */
814 #define PAGE_FLAGS_CHECK_AT_PREP	\
815 	(((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
816 
817 #define PAGE_FLAGS_PRIVATE				\
818 	(1UL << PG_private | 1UL << PG_private_2)
819 /**
820  * page_has_private - Determine if page has private stuff
821  * @page: The page to be checked
822  *
823  * Determine if a page has private stuff, indicating that release routines
824  * should be invoked upon it.
825  */
826 static inline int page_has_private(struct page *page)
827 {
828 	return !!(page->flags & PAGE_FLAGS_PRIVATE);
829 }
830 
831 #undef PF_ANY
832 #undef PF_HEAD
833 #undef PF_ONLY_HEAD
834 #undef PF_NO_TAIL
835 #undef PF_NO_COMPOUND
836 #endif /* !__GENERATING_BOUNDS_H */
837 
838 #endif	/* PAGE_FLAGS_H */
839