xref: /linux-6.15/include/linux/page-flags.h (revision 1f330c32)
1 /*
2  * Macros for manipulating and testing page->flags
3  */
4 
5 #ifndef PAGE_FLAGS_H
6 #define PAGE_FLAGS_H
7 
8 #include <linux/types.h>
9 #include <linux/bug.h>
10 #include <linux/mmdebug.h>
11 #ifndef __GENERATING_BOUNDS_H
12 #include <linux/mm_types.h>
13 #include <generated/bounds.h>
14 #endif /* !__GENERATING_BOUNDS_H */
15 
16 /*
17  * Various page->flags bits:
18  *
19  * PG_reserved is set for special pages, which can never be swapped out. Some
20  * of them might not even exist (eg empty_bad_page)...
21  *
22  * The PG_private bitflag is set on pagecache pages if they contain filesystem
23  * specific data (which is normally at page->private). It can be used by
24  * private allocations for its own usage.
25  *
26  * During initiation of disk I/O, PG_locked is set. This bit is set before I/O
27  * and cleared when writeback _starts_ or when read _completes_. PG_writeback
28  * is set before writeback starts and cleared when it finishes.
29  *
30  * PG_locked also pins a page in pagecache, and blocks truncation of the file
31  * while it is held.
32  *
33  * page_waitqueue(page) is a wait queue of all tasks waiting for the page
34  * to become unlocked.
35  *
36  * PG_uptodate tells whether the page's contents is valid.  When a read
37  * completes, the page becomes uptodate, unless a disk I/O error happened.
38  *
39  * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
40  * file-backed pagecache (see mm/vmscan.c).
41  *
42  * PG_error is set to indicate that an I/O error occurred on this page.
43  *
44  * PG_arch_1 is an architecture specific page state bit.  The generic code
45  * guarantees that this bit is cleared for a page when it first is entered into
46  * the page cache.
47  *
48  * PG_highmem pages are not permanently mapped into the kernel virtual address
49  * space, they need to be kmapped separately for doing IO on the pages.  The
50  * struct page (these bits with information) are always mapped into kernel
51  * address space...
52  *
53  * PG_hwpoison indicates that a page got corrupted in hardware and contains
54  * data with incorrect ECC bits that triggered a machine check. Accessing is
55  * not safe since it may cause another machine check. Don't touch!
56  */
57 
58 /*
59  * Don't use the *_dontuse flags.  Use the macros.  Otherwise you'll break
60  * locked- and dirty-page accounting.
61  *
62  * The page flags field is split into two parts, the main flags area
63  * which extends from the low bits upwards, and the fields area which
64  * extends from the high bits downwards.
65  *
66  *  | FIELD | ... | FLAGS |
67  *  N-1           ^       0
68  *               (NR_PAGEFLAGS)
69  *
70  * The fields area is reserved for fields mapping zone, node (for NUMA) and
71  * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
72  * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
73  */
74 enum pageflags {
75 	PG_locked,		/* Page is locked. Don't touch. */
76 	PG_error,
77 	PG_referenced,
78 	PG_uptodate,
79 	PG_dirty,
80 	PG_lru,
81 	PG_active,
82 	PG_slab,
83 	PG_owner_priv_1,	/* Owner use. If pagecache, fs may use*/
84 	PG_arch_1,
85 	PG_reserved,
86 	PG_private,		/* If pagecache, has fs-private data */
87 	PG_private_2,		/* If pagecache, has fs aux data */
88 	PG_writeback,		/* Page is under writeback */
89 	PG_head,		/* A head page */
90 	PG_swapcache,		/* Swap page: swp_entry_t in private */
91 	PG_mappedtodisk,	/* Has blocks allocated on-disk */
92 	PG_reclaim,		/* To be reclaimed asap */
93 	PG_swapbacked,		/* Page is backed by RAM/swap */
94 	PG_unevictable,		/* Page is "unevictable"  */
95 #ifdef CONFIG_MMU
96 	PG_mlocked,		/* Page is vma mlocked */
97 #endif
98 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
99 	PG_uncached,		/* Page has been mapped as uncached */
100 #endif
101 #ifdef CONFIG_MEMORY_FAILURE
102 	PG_hwpoison,		/* hardware poisoned page. Don't touch */
103 #endif
104 #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
105 	PG_young,
106 	PG_idle,
107 #endif
108 	__NR_PAGEFLAGS,
109 
110 	/* Filesystems */
111 	PG_checked = PG_owner_priv_1,
112 
113 	/* Two page bits are conscripted by FS-Cache to maintain local caching
114 	 * state.  These bits are set on pages belonging to the netfs's inodes
115 	 * when those inodes are being locally cached.
116 	 */
117 	PG_fscache = PG_private_2,	/* page backed by cache */
118 
119 	/* XEN */
120 	/* Pinned in Xen as a read-only pagetable page. */
121 	PG_pinned = PG_owner_priv_1,
122 	/* Pinned as part of domain save (see xen_mm_pin_all()). */
123 	PG_savepinned = PG_dirty,
124 	/* Has a grant mapping of another (foreign) domain's page. */
125 	PG_foreign = PG_owner_priv_1,
126 
127 	/* SLOB */
128 	PG_slob_free = PG_private,
129 
130 	/* Compound pages. Stored in first tail page's flags */
131 	PG_double_map = PG_private_2,
132 };
133 
134 #ifndef __GENERATING_BOUNDS_H
135 
136 struct page;	/* forward declaration */
137 
138 static inline struct page *compound_head(struct page *page)
139 {
140 	unsigned long head = READ_ONCE(page->compound_head);
141 
142 	if (unlikely(head & 1))
143 		return (struct page *) (head - 1);
144 	return page;
145 }
146 
147 static inline int PageTail(struct page *page)
148 {
149 	return READ_ONCE(page->compound_head) & 1;
150 }
151 
152 static inline int PageCompound(struct page *page)
153 {
154 	return test_bit(PG_head, &page->flags) || PageTail(page);
155 }
156 
157 /*
158  * Page flags policies wrt compound pages
159  *
160  * PF_ANY:
161  *     the page flag is relevant for small, head and tail pages.
162  *
163  * PF_HEAD:
164  *     for compound page all operations related to the page flag applied to
165  *     head page.
166  *
167  * PF_NO_TAIL:
168  *     modifications of the page flag must be done on small or head pages,
169  *     checks can be done on tail pages too.
170  *
171  * PF_NO_COMPOUND:
172  *     the page flag is not relevant for compound pages.
173  */
174 #define PF_ANY(page, enforce)	page
175 #define PF_HEAD(page, enforce)	compound_head(page)
176 #define PF_NO_TAIL(page, enforce) ({					\
177 		VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page);	\
178 		compound_head(page);})
179 #define PF_NO_COMPOUND(page, enforce) ({				\
180 		VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page);	\
181 		page;})
182 
183 /*
184  * Macros to create function definitions for page flags
185  */
186 #define TESTPAGEFLAG(uname, lname, policy)				\
187 static inline int Page##uname(struct page *page)			\
188 	{ return test_bit(PG_##lname, &policy(page, 0)->flags); }
189 
190 #define SETPAGEFLAG(uname, lname, policy)				\
191 static inline void SetPage##uname(struct page *page)			\
192 	{ set_bit(PG_##lname, &policy(page, 1)->flags); }
193 
194 #define CLEARPAGEFLAG(uname, lname, policy)				\
195 static inline void ClearPage##uname(struct page *page)			\
196 	{ clear_bit(PG_##lname, &policy(page, 1)->flags); }
197 
198 #define __SETPAGEFLAG(uname, lname, policy)				\
199 static inline void __SetPage##uname(struct page *page)			\
200 	{ __set_bit(PG_##lname, &policy(page, 1)->flags); }
201 
202 #define __CLEARPAGEFLAG(uname, lname, policy)				\
203 static inline void __ClearPage##uname(struct page *page)		\
204 	{ __clear_bit(PG_##lname, &policy(page, 1)->flags); }
205 
206 #define TESTSETFLAG(uname, lname, policy)				\
207 static inline int TestSetPage##uname(struct page *page)			\
208 	{ return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
209 
210 #define TESTCLEARFLAG(uname, lname, policy)				\
211 static inline int TestClearPage##uname(struct page *page)		\
212 	{ return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
213 
214 #define PAGEFLAG(uname, lname, policy)					\
215 	TESTPAGEFLAG(uname, lname, policy)				\
216 	SETPAGEFLAG(uname, lname, policy)				\
217 	CLEARPAGEFLAG(uname, lname, policy)
218 
219 #define __PAGEFLAG(uname, lname, policy)				\
220 	TESTPAGEFLAG(uname, lname, policy)				\
221 	__SETPAGEFLAG(uname, lname, policy)				\
222 	__CLEARPAGEFLAG(uname, lname, policy)
223 
224 #define TESTSCFLAG(uname, lname, policy)				\
225 	TESTSETFLAG(uname, lname, policy)				\
226 	TESTCLEARFLAG(uname, lname, policy)
227 
228 #define TESTPAGEFLAG_FALSE(uname)					\
229 static inline int Page##uname(const struct page *page) { return 0; }
230 
231 #define SETPAGEFLAG_NOOP(uname)						\
232 static inline void SetPage##uname(struct page *page) {  }
233 
234 #define CLEARPAGEFLAG_NOOP(uname)					\
235 static inline void ClearPage##uname(struct page *page) {  }
236 
237 #define __CLEARPAGEFLAG_NOOP(uname)					\
238 static inline void __ClearPage##uname(struct page *page) {  }
239 
240 #define TESTSETFLAG_FALSE(uname)					\
241 static inline int TestSetPage##uname(struct page *page) { return 0; }
242 
243 #define TESTCLEARFLAG_FALSE(uname)					\
244 static inline int TestClearPage##uname(struct page *page) { return 0; }
245 
246 #define PAGEFLAG_FALSE(uname) TESTPAGEFLAG_FALSE(uname)			\
247 	SETPAGEFLAG_NOOP(uname) CLEARPAGEFLAG_NOOP(uname)
248 
249 #define TESTSCFLAG_FALSE(uname)						\
250 	TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname)
251 
252 __PAGEFLAG(Locked, locked, PF_NO_TAIL)
253 PAGEFLAG(Error, error, PF_NO_COMPOUND) TESTCLEARFLAG(Error, error, PF_NO_COMPOUND)
254 PAGEFLAG(Referenced, referenced, PF_HEAD)
255 	TESTCLEARFLAG(Referenced, referenced, PF_HEAD)
256 	__SETPAGEFLAG(Referenced, referenced, PF_HEAD)
257 PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
258 	__CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
259 PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
260 PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
261 	TESTCLEARFLAG(Active, active, PF_HEAD)
262 __PAGEFLAG(Slab, slab, PF_NO_TAIL)
263 __PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL)
264 PAGEFLAG(Checked, checked, PF_NO_COMPOUND)	   /* Used by some filesystems */
265 
266 /* Xen */
267 PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND)
268 	TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND)
269 PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND);
270 PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
271 
272 PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
273 	__CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
274 PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
275 	__CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
276 	__SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
277 
278 /*
279  * Private page markings that may be used by the filesystem that owns the page
280  * for its own purposes.
281  * - PG_private and PG_private_2 cause releasepage() and co to be invoked
282  */
283 PAGEFLAG(Private, private, PF_ANY) __SETPAGEFLAG(Private, private, PF_ANY)
284 	__CLEARPAGEFLAG(Private, private, PF_ANY)
285 PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
286 PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
287 	TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
288 
289 /*
290  * Only test-and-set exist for PG_writeback.  The unconditional operators are
291  * risky: they bypass page accounting.
292  */
293 TESTPAGEFLAG(Writeback, writeback, PF_NO_COMPOUND)
294 	TESTSCFLAG(Writeback, writeback, PF_NO_COMPOUND)
295 PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_COMPOUND)
296 
297 /* PG_readahead is only used for reads; PG_reclaim is only for writes */
298 PAGEFLAG(Reclaim, reclaim, PF_NO_COMPOUND)
299 	TESTCLEARFLAG(Reclaim, reclaim, PF_NO_COMPOUND)
300 PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
301 	TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)
302 
303 #ifdef CONFIG_HIGHMEM
304 /*
305  * Must use a macro here due to header dependency issues. page_zone() is not
306  * available at this point.
307  */
308 #define PageHighMem(__p) is_highmem_idx(page_zonenum(__p))
309 #else
310 PAGEFLAG_FALSE(HighMem)
311 #endif
312 
313 #ifdef CONFIG_SWAP
314 PAGEFLAG(SwapCache, swapcache, PF_NO_COMPOUND)
315 #else
316 PAGEFLAG_FALSE(SwapCache)
317 #endif
318 
319 PAGEFLAG(Unevictable, unevictable, PF_HEAD)
320 	__CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD)
321 	TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD)
322 
323 #ifdef CONFIG_MMU
324 PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
325 	__CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
326 	TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL)
327 #else
328 PAGEFLAG_FALSE(Mlocked) __CLEARPAGEFLAG_NOOP(Mlocked)
329 	TESTSCFLAG_FALSE(Mlocked)
330 #endif
331 
332 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
333 PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND)
334 #else
335 PAGEFLAG_FALSE(Uncached)
336 #endif
337 
338 #ifdef CONFIG_MEMORY_FAILURE
339 PAGEFLAG(HWPoison, hwpoison, PF_ANY)
340 TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
341 #define __PG_HWPOISON (1UL << PG_hwpoison)
342 #else
343 PAGEFLAG_FALSE(HWPoison)
344 #define __PG_HWPOISON 0
345 #endif
346 
347 #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
348 TESTPAGEFLAG(Young, young, PF_ANY)
349 SETPAGEFLAG(Young, young, PF_ANY)
350 TESTCLEARFLAG(Young, young, PF_ANY)
351 PAGEFLAG(Idle, idle, PF_ANY)
352 #endif
353 
354 /*
355  * On an anonymous page mapped into a user virtual memory area,
356  * page->mapping points to its anon_vma, not to a struct address_space;
357  * with the PAGE_MAPPING_ANON bit set to distinguish it.  See rmap.h.
358  *
359  * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
360  * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit;
361  * and then page->mapping points, not to an anon_vma, but to a private
362  * structure which KSM associates with that merged page.  See ksm.h.
363  *
364  * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used.
365  *
366  * Please note that, confusingly, "page_mapping" refers to the inode
367  * address_space which maps the page from disk; whereas "page_mapped"
368  * refers to user virtual address space into which the page is mapped.
369  */
370 #define PAGE_MAPPING_ANON	1
371 #define PAGE_MAPPING_KSM	2
372 #define PAGE_MAPPING_FLAGS	(PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
373 
374 static inline int PageAnon(struct page *page)
375 {
376 	page = compound_head(page);
377 	return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
378 }
379 
380 #ifdef CONFIG_KSM
381 /*
382  * A KSM page is one of those write-protected "shared pages" or "merged pages"
383  * which KSM maps into multiple mms, wherever identical anonymous page content
384  * is found in VM_MERGEABLE vmas.  It's a PageAnon page, pointing not to any
385  * anon_vma, but to that page's node of the stable tree.
386  */
387 static inline int PageKsm(struct page *page)
388 {
389 	page = compound_head(page);
390 	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
391 				(PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
392 }
393 #else
394 TESTPAGEFLAG_FALSE(Ksm)
395 #endif
396 
397 u64 stable_page_flags(struct page *page);
398 
399 static inline int PageUptodate(struct page *page)
400 {
401 	int ret;
402 	page = compound_head(page);
403 	ret = test_bit(PG_uptodate, &(page)->flags);
404 	/*
405 	 * Must ensure that the data we read out of the page is loaded
406 	 * _after_ we've loaded page->flags to check for PageUptodate.
407 	 * We can skip the barrier if the page is not uptodate, because
408 	 * we wouldn't be reading anything from it.
409 	 *
410 	 * See SetPageUptodate() for the other side of the story.
411 	 */
412 	if (ret)
413 		smp_rmb();
414 
415 	return ret;
416 }
417 
418 static inline void __SetPageUptodate(struct page *page)
419 {
420 	VM_BUG_ON_PAGE(PageTail(page), page);
421 	smp_wmb();
422 	__set_bit(PG_uptodate, &page->flags);
423 }
424 
425 static inline void SetPageUptodate(struct page *page)
426 {
427 	VM_BUG_ON_PAGE(PageTail(page), page);
428 	/*
429 	 * Memory barrier must be issued before setting the PG_uptodate bit,
430 	 * so that all previous stores issued in order to bring the page
431 	 * uptodate are actually visible before PageUptodate becomes true.
432 	 */
433 	smp_wmb();
434 	set_bit(PG_uptodate, &page->flags);
435 }
436 
437 CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
438 
439 int test_clear_page_writeback(struct page *page);
440 int __test_set_page_writeback(struct page *page, bool keep_write);
441 
442 #define test_set_page_writeback(page)			\
443 	__test_set_page_writeback(page, false)
444 #define test_set_page_writeback_keepwrite(page)	\
445 	__test_set_page_writeback(page, true)
446 
447 static inline void set_page_writeback(struct page *page)
448 {
449 	test_set_page_writeback(page);
450 }
451 
452 static inline void set_page_writeback_keepwrite(struct page *page)
453 {
454 	test_set_page_writeback_keepwrite(page);
455 }
456 
457 __PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY)
458 
459 static inline void set_compound_head(struct page *page, struct page *head)
460 {
461 	WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
462 }
463 
464 static inline void clear_compound_head(struct page *page)
465 {
466 	WRITE_ONCE(page->compound_head, 0);
467 }
468 
469 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
470 static inline void ClearPageCompound(struct page *page)
471 {
472 	BUG_ON(!PageHead(page));
473 	ClearPageHead(page);
474 }
475 #endif
476 
477 #define PG_head_mask ((1L << PG_head))
478 
479 #ifdef CONFIG_HUGETLB_PAGE
480 int PageHuge(struct page *page);
481 int PageHeadHuge(struct page *page);
482 bool page_huge_active(struct page *page);
483 #else
484 TESTPAGEFLAG_FALSE(Huge)
485 TESTPAGEFLAG_FALSE(HeadHuge)
486 
487 static inline bool page_huge_active(struct page *page)
488 {
489 	return 0;
490 }
491 #endif
492 
493 
494 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
495 /*
496  * PageHuge() only returns true for hugetlbfs pages, but not for
497  * normal or transparent huge pages.
498  *
499  * PageTransHuge() returns true for both transparent huge and
500  * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
501  * called only in the core VM paths where hugetlbfs pages can't exist.
502  */
503 static inline int PageTransHuge(struct page *page)
504 {
505 	VM_BUG_ON_PAGE(PageTail(page), page);
506 	return PageHead(page);
507 }
508 
509 /*
510  * PageTransCompound returns true for both transparent huge pages
511  * and hugetlbfs pages, so it should only be called when it's known
512  * that hugetlbfs pages aren't involved.
513  */
514 static inline int PageTransCompound(struct page *page)
515 {
516 	return PageCompound(page);
517 }
518 
519 /*
520  * PageTransTail returns true for both transparent huge pages
521  * and hugetlbfs pages, so it should only be called when it's known
522  * that hugetlbfs pages aren't involved.
523  */
524 static inline int PageTransTail(struct page *page)
525 {
526 	return PageTail(page);
527 }
528 
529 /*
530  * PageDoubleMap indicates that the compound page is mapped with PTEs as well
531  * as PMDs.
532  *
533  * This is required for optimization of rmap operations for THP: we can postpone
534  * per small page mapcount accounting (and its overhead from atomic operations)
535  * until the first PMD split.
536  *
537  * For the page PageDoubleMap means ->_mapcount in all sub-pages is offset up
538  * by one. This reference will go away with last compound_mapcount.
539  *
540  * See also __split_huge_pmd_locked() and page_remove_anon_compound_rmap().
541  */
542 static inline int PageDoubleMap(struct page *page)
543 {
544 	return PageHead(page) && test_bit(PG_double_map, &page[1].flags);
545 }
546 
547 static inline int TestSetPageDoubleMap(struct page *page)
548 {
549 	VM_BUG_ON_PAGE(!PageHead(page), page);
550 	return test_and_set_bit(PG_double_map, &page[1].flags);
551 }
552 
553 static inline int TestClearPageDoubleMap(struct page *page)
554 {
555 	VM_BUG_ON_PAGE(!PageHead(page), page);
556 	return test_and_clear_bit(PG_double_map, &page[1].flags);
557 }
558 
559 #else
560 TESTPAGEFLAG_FALSE(TransHuge)
561 TESTPAGEFLAG_FALSE(TransCompound)
562 TESTPAGEFLAG_FALSE(TransTail)
563 TESTPAGEFLAG_FALSE(DoubleMap)
564 	TESTSETFLAG_FALSE(DoubleMap)
565 	TESTCLEARFLAG_FALSE(DoubleMap)
566 #endif
567 
568 /*
569  * PageBuddy() indicate that the page is free and in the buddy system
570  * (see mm/page_alloc.c).
571  *
572  * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to
573  * -2 so that an underflow of the page_mapcount() won't be mistaken
574  * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very
575  * efficiently by most CPU architectures.
576  */
577 #define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
578 
579 static inline int PageBuddy(struct page *page)
580 {
581 	return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE;
582 }
583 
584 static inline void __SetPageBuddy(struct page *page)
585 {
586 	VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
587 	atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
588 }
589 
590 static inline void __ClearPageBuddy(struct page *page)
591 {
592 	VM_BUG_ON_PAGE(!PageBuddy(page), page);
593 	atomic_set(&page->_mapcount, -1);
594 }
595 
596 #define PAGE_BALLOON_MAPCOUNT_VALUE (-256)
597 
598 static inline int PageBalloon(struct page *page)
599 {
600 	return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE;
601 }
602 
603 static inline void __SetPageBalloon(struct page *page)
604 {
605 	VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
606 	atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE);
607 }
608 
609 static inline void __ClearPageBalloon(struct page *page)
610 {
611 	VM_BUG_ON_PAGE(!PageBalloon(page), page);
612 	atomic_set(&page->_mapcount, -1);
613 }
614 
615 /*
616  * If network-based swap is enabled, sl*b must keep track of whether pages
617  * were allocated from pfmemalloc reserves.
618  */
619 static inline int PageSlabPfmemalloc(struct page *page)
620 {
621 	VM_BUG_ON_PAGE(!PageSlab(page), page);
622 	return PageActive(page);
623 }
624 
625 static inline void SetPageSlabPfmemalloc(struct page *page)
626 {
627 	VM_BUG_ON_PAGE(!PageSlab(page), page);
628 	SetPageActive(page);
629 }
630 
631 static inline void __ClearPageSlabPfmemalloc(struct page *page)
632 {
633 	VM_BUG_ON_PAGE(!PageSlab(page), page);
634 	__ClearPageActive(page);
635 }
636 
637 static inline void ClearPageSlabPfmemalloc(struct page *page)
638 {
639 	VM_BUG_ON_PAGE(!PageSlab(page), page);
640 	ClearPageActive(page);
641 }
642 
643 #ifdef CONFIG_MMU
644 #define __PG_MLOCKED		(1 << PG_mlocked)
645 #else
646 #define __PG_MLOCKED		0
647 #endif
648 
649 /*
650  * Flags checked when a page is freed.  Pages being freed should not have
651  * these flags set.  It they are, there is a problem.
652  */
653 #define PAGE_FLAGS_CHECK_AT_FREE \
654 	(1 << PG_lru	 | 1 << PG_locked    | \
655 	 1 << PG_private | 1 << PG_private_2 | \
656 	 1 << PG_writeback | 1 << PG_reserved | \
657 	 1 << PG_slab	 | 1 << PG_swapcache | 1 << PG_active | \
658 	 1 << PG_unevictable | __PG_MLOCKED)
659 
660 /*
661  * Flags checked when a page is prepped for return by the page allocator.
662  * Pages being prepped should not have these flags set.  It they are set,
663  * there has been a kernel bug or struct page corruption.
664  *
665  * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
666  * alloc-free cycle to prevent from reusing the page.
667  */
668 #define PAGE_FLAGS_CHECK_AT_PREP	\
669 	(((1 << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
670 
671 #define PAGE_FLAGS_PRIVATE				\
672 	(1 << PG_private | 1 << PG_private_2)
673 /**
674  * page_has_private - Determine if page has private stuff
675  * @page: The page to be checked
676  *
677  * Determine if a page has private stuff, indicating that release routines
678  * should be invoked upon it.
679  */
680 static inline int page_has_private(struct page *page)
681 {
682 	return !!(page->flags & PAGE_FLAGS_PRIVATE);
683 }
684 
685 #undef PF_ANY
686 #undef PF_HEAD
687 #undef PF_NO_TAIL
688 #undef PF_NO_COMPOUND
689 #endif /* !__GENERATING_BOUNDS_H */
690 
691 #endif	/* PAGE_FLAGS_H */
692