1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Macros for manipulating and testing page->flags 4 */ 5 6 #ifndef PAGE_FLAGS_H 7 #define PAGE_FLAGS_H 8 9 #include <linux/types.h> 10 #include <linux/bug.h> 11 #include <linux/mmdebug.h> 12 #ifndef __GENERATING_BOUNDS_H 13 #include <linux/mm_types.h> 14 #include <generated/bounds.h> 15 #endif /* !__GENERATING_BOUNDS_H */ 16 17 /* 18 * Various page->flags bits: 19 * 20 * PG_reserved is set for special pages. The "struct page" of such a page 21 * should in general not be touched (e.g. set dirty) except by its owner. 22 * Pages marked as PG_reserved include: 23 * - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS, 24 * initrd, HW tables) 25 * - Pages reserved or allocated early during boot (before the page allocator 26 * was initialized). This includes (depending on the architecture) the 27 * initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much 28 * much more. Once (if ever) freed, PG_reserved is cleared and they will 29 * be given to the page allocator. 30 * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying 31 * to read/write these pages might end badly. Don't touch! 32 * - The zero page(s) 33 * - Pages not added to the page allocator when onlining a section because 34 * they were excluded via the online_page_callback() or because they are 35 * PG_hwpoison. 36 * - Pages allocated in the context of kexec/kdump (loaded kernel image, 37 * control pages, vmcoreinfo) 38 * - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are 39 * not marked PG_reserved (as they might be in use by somebody else who does 40 * not respect the caching strategy). 41 * - Pages part of an offline section (struct pages of offline sections should 42 * not be trusted as they will be initialized when first onlined). 43 * - MCA pages on ia64 44 * - Pages holding CPU notes for POWER Firmware Assisted Dump 45 * - Device memory (e.g. PMEM, DAX, HMM) 46 * Some PG_reserved pages will be excluded from the hibernation image. 47 * PG_reserved does in general not hinder anybody from dumping or swapping 48 * and is no longer required for remap_pfn_range(). ioremap might require it. 49 * Consequently, PG_reserved for a page mapped into user space can indicate 50 * the zero page, the vDSO, MMIO pages or device memory. 51 * 52 * The PG_private bitflag is set on pagecache pages if they contain filesystem 53 * specific data (which is normally at page->private). It can be used by 54 * private allocations for its own usage. 55 * 56 * During initiation of disk I/O, PG_locked is set. This bit is set before I/O 57 * and cleared when writeback _starts_ or when read _completes_. PG_writeback 58 * is set before writeback starts and cleared when it finishes. 59 * 60 * PG_locked also pins a page in pagecache, and blocks truncation of the file 61 * while it is held. 62 * 63 * page_waitqueue(page) is a wait queue of all tasks waiting for the page 64 * to become unlocked. 65 * 66 * PG_swapbacked is set when a page uses swap as a backing storage. This are 67 * usually PageAnon or shmem pages but please note that even anonymous pages 68 * might lose their PG_swapbacked flag when they simply can be dropped (e.g. as 69 * a result of MADV_FREE). 70 * 71 * PG_referenced, PG_reclaim are used for page reclaim for anonymous and 72 * file-backed pagecache (see mm/vmscan.c). 73 * 74 * PG_error is set to indicate that an I/O error occurred on this page. 75 * 76 * PG_arch_1 is an architecture specific page state bit. The generic code 77 * guarantees that this bit is cleared for a page when it first is entered into 78 * the page cache. 79 * 80 * PG_hwpoison indicates that a page got corrupted in hardware and contains 81 * data with incorrect ECC bits that triggered a machine check. Accessing is 82 * not safe since it may cause another machine check. Don't touch! 83 */ 84 85 /* 86 * Don't use the pageflags directly. Use the PageFoo macros. 87 * 88 * The page flags field is split into two parts, the main flags area 89 * which extends from the low bits upwards, and the fields area which 90 * extends from the high bits downwards. 91 * 92 * | FIELD | ... | FLAGS | 93 * N-1 ^ 0 94 * (NR_PAGEFLAGS) 95 * 96 * The fields area is reserved for fields mapping zone, node (for NUMA) and 97 * SPARSEMEM section (for variants of SPARSEMEM that require section ids like 98 * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP). 99 */ 100 enum pageflags { 101 PG_locked, /* Page is locked. Don't touch. */ 102 PG_writeback, /* Page is under writeback */ 103 PG_referenced, 104 PG_uptodate, 105 PG_dirty, 106 PG_lru, 107 PG_head, /* Must be in bit 6 */ 108 PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */ 109 PG_active, 110 PG_workingset, 111 PG_error, 112 PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/ 113 PG_arch_1, 114 PG_reserved, 115 PG_private, /* If pagecache, has fs-private data */ 116 PG_private_2, /* If pagecache, has fs aux data */ 117 PG_mappedtodisk, /* Has blocks allocated on-disk */ 118 PG_reclaim, /* To be reclaimed asap */ 119 PG_swapbacked, /* Page is backed by RAM/swap */ 120 PG_unevictable, /* Page is "unevictable" */ 121 #ifdef CONFIG_MMU 122 PG_mlocked, /* Page is vma mlocked */ 123 #endif 124 #ifdef CONFIG_ARCH_USES_PG_UNCACHED 125 PG_uncached, /* Page has been mapped as uncached */ 126 #endif 127 #ifdef CONFIG_MEMORY_FAILURE 128 PG_hwpoison, /* hardware poisoned page. Don't touch */ 129 #endif 130 #if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT) 131 PG_young, 132 PG_idle, 133 #endif 134 #ifdef CONFIG_ARCH_USES_PG_ARCH_X 135 PG_arch_2, 136 PG_arch_3, 137 #endif 138 __NR_PAGEFLAGS, 139 140 PG_readahead = PG_reclaim, 141 142 /* 143 * Depending on the way an anonymous folio can be mapped into a page 144 * table (e.g., single PMD/PUD/CONT of the head page vs. PTE-mapped 145 * THP), PG_anon_exclusive may be set only for the head page or for 146 * tail pages of an anonymous folio. For now, we only expect it to be 147 * set on tail pages for PTE-mapped THP. 148 */ 149 PG_anon_exclusive = PG_mappedtodisk, 150 151 /* Filesystems */ 152 PG_checked = PG_owner_priv_1, 153 154 /* SwapBacked */ 155 PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */ 156 157 /* Two page bits are conscripted by FS-Cache to maintain local caching 158 * state. These bits are set on pages belonging to the netfs's inodes 159 * when those inodes are being locally cached. 160 */ 161 PG_fscache = PG_private_2, /* page backed by cache */ 162 163 /* XEN */ 164 /* Pinned in Xen as a read-only pagetable page. */ 165 PG_pinned = PG_owner_priv_1, 166 /* Pinned as part of domain save (see xen_mm_pin_all()). */ 167 PG_savepinned = PG_dirty, 168 /* Has a grant mapping of another (foreign) domain's page. */ 169 PG_foreign = PG_owner_priv_1, 170 /* Remapped by swiotlb-xen. */ 171 PG_xen_remapped = PG_owner_priv_1, 172 173 /* non-lru isolated movable page */ 174 PG_isolated = PG_reclaim, 175 176 /* Only valid for buddy pages. Used to track pages that are reported */ 177 PG_reported = PG_uptodate, 178 179 #ifdef CONFIG_MEMORY_HOTPLUG 180 /* For self-hosted memmap pages */ 181 PG_vmemmap_self_hosted = PG_owner_priv_1, 182 #endif 183 184 /* 185 * Flags only valid for compound pages. Stored in first tail page's 186 * flags word. Cannot use the first 8 flags or any flag marked as 187 * PF_ANY. 188 */ 189 190 /* At least one page in this folio has the hwpoison flag set */ 191 PG_has_hwpoisoned = PG_error, 192 PG_large_rmappable = PG_workingset, /* anon or file-backed */ 193 }; 194 195 #define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1) 196 197 #ifndef __GENERATING_BOUNDS_H 198 199 #ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP 200 DECLARE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key); 201 202 /* 203 * Return the real head page struct iff the @page is a fake head page, otherwise 204 * return the @page itself. See Documentation/mm/vmemmap_dedup.rst. 205 */ 206 static __always_inline const struct page *page_fixed_fake_head(const struct page *page) 207 { 208 if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key)) 209 return page; 210 211 /* 212 * Only addresses aligned with PAGE_SIZE of struct page may be fake head 213 * struct page. The alignment check aims to avoid access the fields ( 214 * e.g. compound_head) of the @page[1]. It can avoid touch a (possibly) 215 * cold cacheline in some cases. 216 */ 217 if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) && 218 test_bit(PG_head, &page->flags)) { 219 /* 220 * We can safely access the field of the @page[1] with PG_head 221 * because the @page is a compound page composed with at least 222 * two contiguous pages. 223 */ 224 unsigned long head = READ_ONCE(page[1].compound_head); 225 226 if (likely(head & 1)) 227 return (const struct page *)(head - 1); 228 } 229 return page; 230 } 231 #else 232 static inline const struct page *page_fixed_fake_head(const struct page *page) 233 { 234 return page; 235 } 236 #endif 237 238 static __always_inline int page_is_fake_head(const struct page *page) 239 { 240 return page_fixed_fake_head(page) != page; 241 } 242 243 static inline unsigned long _compound_head(const struct page *page) 244 { 245 unsigned long head = READ_ONCE(page->compound_head); 246 247 if (unlikely(head & 1)) 248 return head - 1; 249 return (unsigned long)page_fixed_fake_head(page); 250 } 251 252 #define compound_head(page) ((typeof(page))_compound_head(page)) 253 254 /** 255 * page_folio - Converts from page to folio. 256 * @p: The page. 257 * 258 * Every page is part of a folio. This function cannot be called on a 259 * NULL pointer. 260 * 261 * Context: No reference, nor lock is required on @page. If the caller 262 * does not hold a reference, this call may race with a folio split, so 263 * it should re-check the folio still contains this page after gaining 264 * a reference on the folio. 265 * Return: The folio which contains this page. 266 */ 267 #define page_folio(p) (_Generic((p), \ 268 const struct page *: (const struct folio *)_compound_head(p), \ 269 struct page *: (struct folio *)_compound_head(p))) 270 271 /** 272 * folio_page - Return a page from a folio. 273 * @folio: The folio. 274 * @n: The page number to return. 275 * 276 * @n is relative to the start of the folio. This function does not 277 * check that the page number lies within @folio; the caller is presumed 278 * to have a reference to the page. 279 */ 280 #define folio_page(folio, n) nth_page(&(folio)->page, n) 281 282 static __always_inline int PageTail(const struct page *page) 283 { 284 return READ_ONCE(page->compound_head) & 1 || page_is_fake_head(page); 285 } 286 287 static __always_inline int PageCompound(const struct page *page) 288 { 289 return test_bit(PG_head, &page->flags) || 290 READ_ONCE(page->compound_head) & 1; 291 } 292 293 #define PAGE_POISON_PATTERN -1l 294 static inline int PagePoisoned(const struct page *page) 295 { 296 return READ_ONCE(page->flags) == PAGE_POISON_PATTERN; 297 } 298 299 #ifdef CONFIG_DEBUG_VM 300 void page_init_poison(struct page *page, size_t size); 301 #else 302 static inline void page_init_poison(struct page *page, size_t size) 303 { 304 } 305 #endif 306 307 static const unsigned long *const_folio_flags(const struct folio *folio, 308 unsigned n) 309 { 310 const struct page *page = &folio->page; 311 312 VM_BUG_ON_PGFLAGS(PageTail(page), page); 313 VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page); 314 return &page[n].flags; 315 } 316 317 static unsigned long *folio_flags(struct folio *folio, unsigned n) 318 { 319 struct page *page = &folio->page; 320 321 VM_BUG_ON_PGFLAGS(PageTail(page), page); 322 VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page); 323 return &page[n].flags; 324 } 325 326 /* 327 * Page flags policies wrt compound pages 328 * 329 * PF_POISONED_CHECK 330 * check if this struct page poisoned/uninitialized 331 * 332 * PF_ANY: 333 * the page flag is relevant for small, head and tail pages. 334 * 335 * PF_HEAD: 336 * for compound page all operations related to the page flag applied to 337 * head page. 338 * 339 * PF_NO_TAIL: 340 * modifications of the page flag must be done on small or head pages, 341 * checks can be done on tail pages too. 342 * 343 * PF_NO_COMPOUND: 344 * the page flag is not relevant for compound pages. 345 * 346 * PF_SECOND: 347 * the page flag is stored in the first tail page. 348 */ 349 #define PF_POISONED_CHECK(page) ({ \ 350 VM_BUG_ON_PGFLAGS(PagePoisoned(page), page); \ 351 page; }) 352 #define PF_ANY(page, enforce) PF_POISONED_CHECK(page) 353 #define PF_HEAD(page, enforce) PF_POISONED_CHECK(compound_head(page)) 354 #define PF_NO_TAIL(page, enforce) ({ \ 355 VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \ 356 PF_POISONED_CHECK(compound_head(page)); }) 357 #define PF_NO_COMPOUND(page, enforce) ({ \ 358 VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \ 359 PF_POISONED_CHECK(page); }) 360 #define PF_SECOND(page, enforce) ({ \ 361 VM_BUG_ON_PGFLAGS(!PageHead(page), page); \ 362 PF_POISONED_CHECK(&page[1]); }) 363 364 /* Which page is the flag stored in */ 365 #define FOLIO_PF_ANY 0 366 #define FOLIO_PF_HEAD 0 367 #define FOLIO_PF_NO_TAIL 0 368 #define FOLIO_PF_NO_COMPOUND 0 369 #define FOLIO_PF_SECOND 1 370 371 #define FOLIO_HEAD_PAGE 0 372 #define FOLIO_SECOND_PAGE 1 373 374 /* 375 * Macros to create function definitions for page flags 376 */ 377 #define FOLIO_TEST_FLAG(name, page) \ 378 static __always_inline bool folio_test_##name(const struct folio *folio) \ 379 { return test_bit(PG_##name, const_folio_flags(folio, page)); } 380 381 #define FOLIO_SET_FLAG(name, page) \ 382 static __always_inline void folio_set_##name(struct folio *folio) \ 383 { set_bit(PG_##name, folio_flags(folio, page)); } 384 385 #define FOLIO_CLEAR_FLAG(name, page) \ 386 static __always_inline void folio_clear_##name(struct folio *folio) \ 387 { clear_bit(PG_##name, folio_flags(folio, page)); } 388 389 #define __FOLIO_SET_FLAG(name, page) \ 390 static __always_inline void __folio_set_##name(struct folio *folio) \ 391 { __set_bit(PG_##name, folio_flags(folio, page)); } 392 393 #define __FOLIO_CLEAR_FLAG(name, page) \ 394 static __always_inline void __folio_clear_##name(struct folio *folio) \ 395 { __clear_bit(PG_##name, folio_flags(folio, page)); } 396 397 #define FOLIO_TEST_SET_FLAG(name, page) \ 398 static __always_inline bool folio_test_set_##name(struct folio *folio) \ 399 { return test_and_set_bit(PG_##name, folio_flags(folio, page)); } 400 401 #define FOLIO_TEST_CLEAR_FLAG(name, page) \ 402 static __always_inline bool folio_test_clear_##name(struct folio *folio) \ 403 { return test_and_clear_bit(PG_##name, folio_flags(folio, page)); } 404 405 #define FOLIO_FLAG(name, page) \ 406 FOLIO_TEST_FLAG(name, page) \ 407 FOLIO_SET_FLAG(name, page) \ 408 FOLIO_CLEAR_FLAG(name, page) 409 410 #define TESTPAGEFLAG(uname, lname, policy) \ 411 FOLIO_TEST_FLAG(lname, FOLIO_##policy) \ 412 static __always_inline int Page##uname(const struct page *page) \ 413 { return test_bit(PG_##lname, &policy(page, 0)->flags); } 414 415 #define SETPAGEFLAG(uname, lname, policy) \ 416 FOLIO_SET_FLAG(lname, FOLIO_##policy) \ 417 static __always_inline void SetPage##uname(struct page *page) \ 418 { set_bit(PG_##lname, &policy(page, 1)->flags); } 419 420 #define CLEARPAGEFLAG(uname, lname, policy) \ 421 FOLIO_CLEAR_FLAG(lname, FOLIO_##policy) \ 422 static __always_inline void ClearPage##uname(struct page *page) \ 423 { clear_bit(PG_##lname, &policy(page, 1)->flags); } 424 425 #define __SETPAGEFLAG(uname, lname, policy) \ 426 __FOLIO_SET_FLAG(lname, FOLIO_##policy) \ 427 static __always_inline void __SetPage##uname(struct page *page) \ 428 { __set_bit(PG_##lname, &policy(page, 1)->flags); } 429 430 #define __CLEARPAGEFLAG(uname, lname, policy) \ 431 __FOLIO_CLEAR_FLAG(lname, FOLIO_##policy) \ 432 static __always_inline void __ClearPage##uname(struct page *page) \ 433 { __clear_bit(PG_##lname, &policy(page, 1)->flags); } 434 435 #define TESTSETFLAG(uname, lname, policy) \ 436 FOLIO_TEST_SET_FLAG(lname, FOLIO_##policy) \ 437 static __always_inline int TestSetPage##uname(struct page *page) \ 438 { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); } 439 440 #define TESTCLEARFLAG(uname, lname, policy) \ 441 FOLIO_TEST_CLEAR_FLAG(lname, FOLIO_##policy) \ 442 static __always_inline int TestClearPage##uname(struct page *page) \ 443 { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); } 444 445 #define PAGEFLAG(uname, lname, policy) \ 446 TESTPAGEFLAG(uname, lname, policy) \ 447 SETPAGEFLAG(uname, lname, policy) \ 448 CLEARPAGEFLAG(uname, lname, policy) 449 450 #define __PAGEFLAG(uname, lname, policy) \ 451 TESTPAGEFLAG(uname, lname, policy) \ 452 __SETPAGEFLAG(uname, lname, policy) \ 453 __CLEARPAGEFLAG(uname, lname, policy) 454 455 #define TESTSCFLAG(uname, lname, policy) \ 456 TESTSETFLAG(uname, lname, policy) \ 457 TESTCLEARFLAG(uname, lname, policy) 458 459 #define FOLIO_TEST_FLAG_FALSE(name) \ 460 static inline bool folio_test_##name(const struct folio *folio) \ 461 { return false; } 462 #define FOLIO_SET_FLAG_NOOP(name) \ 463 static inline void folio_set_##name(struct folio *folio) { } 464 #define FOLIO_CLEAR_FLAG_NOOP(name) \ 465 static inline void folio_clear_##name(struct folio *folio) { } 466 #define __FOLIO_SET_FLAG_NOOP(name) \ 467 static inline void __folio_set_##name(struct folio *folio) { } 468 #define __FOLIO_CLEAR_FLAG_NOOP(name) \ 469 static inline void __folio_clear_##name(struct folio *folio) { } 470 #define FOLIO_TEST_SET_FLAG_FALSE(name) \ 471 static inline bool folio_test_set_##name(struct folio *folio) \ 472 { return false; } 473 #define FOLIO_TEST_CLEAR_FLAG_FALSE(name) \ 474 static inline bool folio_test_clear_##name(struct folio *folio) \ 475 { return false; } 476 477 #define FOLIO_FLAG_FALSE(name) \ 478 FOLIO_TEST_FLAG_FALSE(name) \ 479 FOLIO_SET_FLAG_NOOP(name) \ 480 FOLIO_CLEAR_FLAG_NOOP(name) 481 482 #define TESTPAGEFLAG_FALSE(uname, lname) \ 483 FOLIO_TEST_FLAG_FALSE(lname) \ 484 static inline int Page##uname(const struct page *page) { return 0; } 485 486 #define SETPAGEFLAG_NOOP(uname, lname) \ 487 FOLIO_SET_FLAG_NOOP(lname) \ 488 static inline void SetPage##uname(struct page *page) { } 489 490 #define CLEARPAGEFLAG_NOOP(uname, lname) \ 491 FOLIO_CLEAR_FLAG_NOOP(lname) \ 492 static inline void ClearPage##uname(struct page *page) { } 493 494 #define __CLEARPAGEFLAG_NOOP(uname, lname) \ 495 __FOLIO_CLEAR_FLAG_NOOP(lname) \ 496 static inline void __ClearPage##uname(struct page *page) { } 497 498 #define TESTSETFLAG_FALSE(uname, lname) \ 499 FOLIO_TEST_SET_FLAG_FALSE(lname) \ 500 static inline int TestSetPage##uname(struct page *page) { return 0; } 501 502 #define TESTCLEARFLAG_FALSE(uname, lname) \ 503 FOLIO_TEST_CLEAR_FLAG_FALSE(lname) \ 504 static inline int TestClearPage##uname(struct page *page) { return 0; } 505 506 #define PAGEFLAG_FALSE(uname, lname) TESTPAGEFLAG_FALSE(uname, lname) \ 507 SETPAGEFLAG_NOOP(uname, lname) CLEARPAGEFLAG_NOOP(uname, lname) 508 509 #define TESTSCFLAG_FALSE(uname, lname) \ 510 TESTSETFLAG_FALSE(uname, lname) TESTCLEARFLAG_FALSE(uname, lname) 511 512 __PAGEFLAG(Locked, locked, PF_NO_TAIL) 513 FOLIO_FLAG(waiters, FOLIO_HEAD_PAGE) 514 PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL) 515 PAGEFLAG(Referenced, referenced, PF_HEAD) 516 TESTCLEARFLAG(Referenced, referenced, PF_HEAD) 517 __SETPAGEFLAG(Referenced, referenced, PF_HEAD) 518 PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD) 519 __CLEARPAGEFLAG(Dirty, dirty, PF_HEAD) 520 PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD) 521 TESTCLEARFLAG(LRU, lru, PF_HEAD) 522 PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD) 523 TESTCLEARFLAG(Active, active, PF_HEAD) 524 PAGEFLAG(Workingset, workingset, PF_HEAD) 525 TESTCLEARFLAG(Workingset, workingset, PF_HEAD) 526 PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */ 527 528 /* Xen */ 529 PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND) 530 TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND) 531 PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND); 532 PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND); 533 PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND) 534 TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND) 535 536 PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) 537 __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) 538 __SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) 539 PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) 540 __CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) 541 __SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) 542 543 /* 544 * Private page markings that may be used by the filesystem that owns the page 545 * for its own purposes. 546 * - PG_private and PG_private_2 cause release_folio() and co to be invoked 547 */ 548 PAGEFLAG(Private, private, PF_ANY) 549 PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY) 550 PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY) 551 TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY) 552 553 /* 554 * Only test-and-set exist for PG_writeback. The unconditional operators are 555 * risky: they bypass page accounting. 556 */ 557 TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL) 558 TESTSCFLAG(Writeback, writeback, PF_NO_TAIL) 559 PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL) 560 561 /* PG_readahead is only used for reads; PG_reclaim is only for writes */ 562 PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL) 563 TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL) 564 PAGEFLAG(Readahead, readahead, PF_NO_COMPOUND) 565 TESTCLEARFLAG(Readahead, readahead, PF_NO_COMPOUND) 566 567 #ifdef CONFIG_HIGHMEM 568 /* 569 * Must use a macro here due to header dependency issues. page_zone() is not 570 * available at this point. 571 */ 572 #define PageHighMem(__p) is_highmem_idx(page_zonenum(__p)) 573 #define folio_test_highmem(__f) is_highmem_idx(folio_zonenum(__f)) 574 #else 575 PAGEFLAG_FALSE(HighMem, highmem) 576 #endif 577 578 #ifdef CONFIG_SWAP 579 static __always_inline bool folio_test_swapcache(const struct folio *folio) 580 { 581 return folio_test_swapbacked(folio) && 582 test_bit(PG_swapcache, const_folio_flags(folio, 0)); 583 } 584 585 static __always_inline bool PageSwapCache(const struct page *page) 586 { 587 return folio_test_swapcache(page_folio(page)); 588 } 589 590 SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL) 591 CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL) 592 #else 593 PAGEFLAG_FALSE(SwapCache, swapcache) 594 #endif 595 596 PAGEFLAG(Unevictable, unevictable, PF_HEAD) 597 __CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD) 598 TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD) 599 600 #ifdef CONFIG_MMU 601 PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL) 602 __CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL) 603 TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL) 604 #else 605 PAGEFLAG_FALSE(Mlocked, mlocked) __CLEARPAGEFLAG_NOOP(Mlocked, mlocked) 606 TESTSCFLAG_FALSE(Mlocked, mlocked) 607 #endif 608 609 #ifdef CONFIG_ARCH_USES_PG_UNCACHED 610 PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND) 611 #else 612 PAGEFLAG_FALSE(Uncached, uncached) 613 #endif 614 615 #ifdef CONFIG_MEMORY_FAILURE 616 PAGEFLAG(HWPoison, hwpoison, PF_ANY) 617 TESTSCFLAG(HWPoison, hwpoison, PF_ANY) 618 #define __PG_HWPOISON (1UL << PG_hwpoison) 619 #define MAGIC_HWPOISON 0x48575053U /* HWPS */ 620 extern void SetPageHWPoisonTakenOff(struct page *page); 621 extern void ClearPageHWPoisonTakenOff(struct page *page); 622 extern bool take_page_off_buddy(struct page *page); 623 extern bool put_page_back_buddy(struct page *page); 624 #else 625 PAGEFLAG_FALSE(HWPoison, hwpoison) 626 #define __PG_HWPOISON 0 627 #endif 628 629 #if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT) 630 FOLIO_TEST_FLAG(young, FOLIO_HEAD_PAGE) 631 FOLIO_SET_FLAG(young, FOLIO_HEAD_PAGE) 632 FOLIO_TEST_CLEAR_FLAG(young, FOLIO_HEAD_PAGE) 633 FOLIO_FLAG(idle, FOLIO_HEAD_PAGE) 634 #endif 635 636 /* 637 * PageReported() is used to track reported free pages within the Buddy 638 * allocator. We can use the non-atomic version of the test and set 639 * operations as both should be shielded with the zone lock to prevent 640 * any possible races on the setting or clearing of the bit. 641 */ 642 __PAGEFLAG(Reported, reported, PF_NO_COMPOUND) 643 644 #ifdef CONFIG_MEMORY_HOTPLUG 645 PAGEFLAG(VmemmapSelfHosted, vmemmap_self_hosted, PF_ANY) 646 #else 647 PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted) 648 #endif 649 650 /* 651 * On an anonymous page mapped into a user virtual memory area, 652 * page->mapping points to its anon_vma, not to a struct address_space; 653 * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h. 654 * 655 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled, 656 * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON 657 * bit; and then page->mapping points, not to an anon_vma, but to a private 658 * structure which KSM associates with that merged page. See ksm.h. 659 * 660 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable 661 * page and then page->mapping points to a struct movable_operations. 662 * 663 * Please note that, confusingly, "page_mapping" refers to the inode 664 * address_space which maps the page from disk; whereas "page_mapped" 665 * refers to user virtual address space into which the page is mapped. 666 * 667 * For slab pages, since slab reuses the bits in struct page to store its 668 * internal states, the page->mapping does not exist as such, nor do these 669 * flags below. So in order to avoid testing non-existent bits, please 670 * make sure that PageSlab(page) actually evaluates to false before calling 671 * the following functions (e.g., PageAnon). See mm/slab.h. 672 */ 673 #define PAGE_MAPPING_ANON 0x1 674 #define PAGE_MAPPING_MOVABLE 0x2 675 #define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE) 676 #define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE) 677 678 /* 679 * Different with flags above, this flag is used only for fsdax mode. It 680 * indicates that this page->mapping is now under reflink case. 681 */ 682 #define PAGE_MAPPING_DAX_SHARED ((void *)0x1) 683 684 static __always_inline bool folio_mapping_flags(const struct folio *folio) 685 { 686 return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) != 0; 687 } 688 689 static __always_inline bool PageMappingFlags(const struct page *page) 690 { 691 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0; 692 } 693 694 static __always_inline bool folio_test_anon(const struct folio *folio) 695 { 696 return ((unsigned long)folio->mapping & PAGE_MAPPING_ANON) != 0; 697 } 698 699 static __always_inline bool PageAnon(const struct page *page) 700 { 701 return folio_test_anon(page_folio(page)); 702 } 703 704 static __always_inline bool __folio_test_movable(const struct folio *folio) 705 { 706 return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) == 707 PAGE_MAPPING_MOVABLE; 708 } 709 710 static __always_inline bool __PageMovable(const struct page *page) 711 { 712 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == 713 PAGE_MAPPING_MOVABLE; 714 } 715 716 #ifdef CONFIG_KSM 717 /* 718 * A KSM page is one of those write-protected "shared pages" or "merged pages" 719 * which KSM maps into multiple mms, wherever identical anonymous page content 720 * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any 721 * anon_vma, but to that page's node of the stable tree. 722 */ 723 static __always_inline bool folio_test_ksm(const struct folio *folio) 724 { 725 return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) == 726 PAGE_MAPPING_KSM; 727 } 728 729 static __always_inline bool PageKsm(const struct page *page) 730 { 731 return folio_test_ksm(page_folio(page)); 732 } 733 #else 734 TESTPAGEFLAG_FALSE(Ksm, ksm) 735 #endif 736 737 u64 stable_page_flags(const struct page *page); 738 739 /** 740 * folio_xor_flags_has_waiters - Change some folio flags. 741 * @folio: The folio. 742 * @mask: Bits set in this word will be changed. 743 * 744 * This must only be used for flags which are changed with the folio 745 * lock held. For example, it is unsafe to use for PG_dirty as that 746 * can be set without the folio lock held. It can also only be used 747 * on flags which are in the range 0-6 as some of the implementations 748 * only affect those bits. 749 * 750 * Return: Whether there are tasks waiting on the folio. 751 */ 752 static inline bool folio_xor_flags_has_waiters(struct folio *folio, 753 unsigned long mask) 754 { 755 return xor_unlock_is_negative_byte(mask, folio_flags(folio, 0)); 756 } 757 758 /** 759 * folio_test_uptodate - Is this folio up to date? 760 * @folio: The folio. 761 * 762 * The uptodate flag is set on a folio when every byte in the folio is 763 * at least as new as the corresponding bytes on storage. Anonymous 764 * and CoW folios are always uptodate. If the folio is not uptodate, 765 * some of the bytes in it may be; see the is_partially_uptodate() 766 * address_space operation. 767 */ 768 static inline bool folio_test_uptodate(const struct folio *folio) 769 { 770 bool ret = test_bit(PG_uptodate, const_folio_flags(folio, 0)); 771 /* 772 * Must ensure that the data we read out of the folio is loaded 773 * _after_ we've loaded folio->flags to check the uptodate bit. 774 * We can skip the barrier if the folio is not uptodate, because 775 * we wouldn't be reading anything from it. 776 * 777 * See folio_mark_uptodate() for the other side of the story. 778 */ 779 if (ret) 780 smp_rmb(); 781 782 return ret; 783 } 784 785 static inline int PageUptodate(const struct page *page) 786 { 787 return folio_test_uptodate(page_folio(page)); 788 } 789 790 static __always_inline void __folio_mark_uptodate(struct folio *folio) 791 { 792 smp_wmb(); 793 __set_bit(PG_uptodate, folio_flags(folio, 0)); 794 } 795 796 static __always_inline void folio_mark_uptodate(struct folio *folio) 797 { 798 /* 799 * Memory barrier must be issued before setting the PG_uptodate bit, 800 * so that all previous stores issued in order to bring the folio 801 * uptodate are actually visible before folio_test_uptodate becomes true. 802 */ 803 smp_wmb(); 804 set_bit(PG_uptodate, folio_flags(folio, 0)); 805 } 806 807 static __always_inline void __SetPageUptodate(struct page *page) 808 { 809 __folio_mark_uptodate((struct folio *)page); 810 } 811 812 static __always_inline void SetPageUptodate(struct page *page) 813 { 814 folio_mark_uptodate((struct folio *)page); 815 } 816 817 CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL) 818 819 void __folio_start_writeback(struct folio *folio, bool keep_write); 820 void set_page_writeback(struct page *page); 821 822 #define folio_start_writeback(folio) \ 823 __folio_start_writeback(folio, false) 824 #define folio_start_writeback_keepwrite(folio) \ 825 __folio_start_writeback(folio, true) 826 827 static __always_inline bool folio_test_head(const struct folio *folio) 828 { 829 return test_bit(PG_head, const_folio_flags(folio, FOLIO_PF_ANY)); 830 } 831 832 static __always_inline int PageHead(const struct page *page) 833 { 834 PF_POISONED_CHECK(page); 835 return test_bit(PG_head, &page->flags) && !page_is_fake_head(page); 836 } 837 838 __SETPAGEFLAG(Head, head, PF_ANY) 839 __CLEARPAGEFLAG(Head, head, PF_ANY) 840 CLEARPAGEFLAG(Head, head, PF_ANY) 841 842 /** 843 * folio_test_large() - Does this folio contain more than one page? 844 * @folio: The folio to test. 845 * 846 * Return: True if the folio is larger than one page. 847 */ 848 static inline bool folio_test_large(const struct folio *folio) 849 { 850 return folio_test_head(folio); 851 } 852 853 static __always_inline void set_compound_head(struct page *page, struct page *head) 854 { 855 WRITE_ONCE(page->compound_head, (unsigned long)head + 1); 856 } 857 858 static __always_inline void clear_compound_head(struct page *page) 859 { 860 WRITE_ONCE(page->compound_head, 0); 861 } 862 863 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 864 static inline void ClearPageCompound(struct page *page) 865 { 866 BUG_ON(!PageHead(page)); 867 ClearPageHead(page); 868 } 869 FOLIO_FLAG(large_rmappable, FOLIO_SECOND_PAGE) 870 #else 871 FOLIO_FLAG_FALSE(large_rmappable) 872 #endif 873 874 #define PG_head_mask ((1UL << PG_head)) 875 876 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 877 /* 878 * PageHuge() only returns true for hugetlbfs pages, but not for 879 * normal or transparent huge pages. 880 * 881 * PageTransHuge() returns true for both transparent huge and 882 * hugetlbfs pages, but not normal pages. PageTransHuge() can only be 883 * called only in the core VM paths where hugetlbfs pages can't exist. 884 */ 885 static inline int PageTransHuge(const struct page *page) 886 { 887 VM_BUG_ON_PAGE(PageTail(page), page); 888 return PageHead(page); 889 } 890 891 /* 892 * PageTransCompound returns true for both transparent huge pages 893 * and hugetlbfs pages, so it should only be called when it's known 894 * that hugetlbfs pages aren't involved. 895 */ 896 static inline int PageTransCompound(const struct page *page) 897 { 898 return PageCompound(page); 899 } 900 901 /* 902 * PageTransTail returns true for both transparent huge pages 903 * and hugetlbfs pages, so it should only be called when it's known 904 * that hugetlbfs pages aren't involved. 905 */ 906 static inline int PageTransTail(const struct page *page) 907 { 908 return PageTail(page); 909 } 910 #else 911 TESTPAGEFLAG_FALSE(TransHuge, transhuge) 912 TESTPAGEFLAG_FALSE(TransCompound, transcompound) 913 TESTPAGEFLAG_FALSE(TransCompoundMap, transcompoundmap) 914 TESTPAGEFLAG_FALSE(TransTail, transtail) 915 #endif 916 917 #if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_TRANSPARENT_HUGEPAGE) 918 /* 919 * PageHasHWPoisoned indicates that at least one subpage is hwpoisoned in the 920 * compound page. 921 * 922 * This flag is set by hwpoison handler. Cleared by THP split or free page. 923 */ 924 PAGEFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND) 925 TESTSCFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND) 926 #else 927 PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned) 928 TESTSCFLAG_FALSE(HasHWPoisoned, has_hwpoisoned) 929 #endif 930 931 /* 932 * For pages that are never mapped to userspace, 933 * page_type may be used. Because it is initialised to -1, we invert the 934 * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and 935 * __ClearPageFoo *sets* the bit used for PageFoo. We reserve a few high and 936 * low bits so that an underflow or overflow of _mapcount won't be 937 * mistaken for a page type value. 938 */ 939 940 #define PAGE_TYPE_BASE 0xf0000000 941 /* Reserve 0x0000007f to catch underflows of _mapcount */ 942 #define PAGE_MAPCOUNT_RESERVE -128 943 #define PG_buddy 0x00000080 944 #define PG_offline 0x00000100 945 #define PG_table 0x00000200 946 #define PG_guard 0x00000400 947 #define PG_hugetlb 0x00000800 948 #define PG_slab 0x00001000 949 950 #define PageType(page, flag) \ 951 ((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE) 952 #define folio_test_type(folio, flag) \ 953 ((folio->page.page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE) 954 955 static inline int page_type_has_type(unsigned int page_type) 956 { 957 return (int)page_type < PAGE_MAPCOUNT_RESERVE; 958 } 959 960 static inline int page_has_type(const struct page *page) 961 { 962 return page_type_has_type(page->page_type); 963 } 964 965 #define FOLIO_TYPE_OPS(lname, fname) \ 966 static __always_inline bool folio_test_##fname(const struct folio *folio)\ 967 { \ 968 return folio_test_type(folio, PG_##lname); \ 969 } \ 970 static __always_inline void __folio_set_##fname(struct folio *folio) \ 971 { \ 972 VM_BUG_ON_FOLIO(!folio_test_type(folio, 0), folio); \ 973 folio->page.page_type &= ~PG_##lname; \ 974 } \ 975 static __always_inline void __folio_clear_##fname(struct folio *folio) \ 976 { \ 977 VM_BUG_ON_FOLIO(!folio_test_##fname(folio), folio); \ 978 folio->page.page_type |= PG_##lname; \ 979 } 980 981 #define PAGE_TYPE_OPS(uname, lname, fname) \ 982 FOLIO_TYPE_OPS(lname, fname) \ 983 static __always_inline int Page##uname(const struct page *page) \ 984 { \ 985 return PageType(page, PG_##lname); \ 986 } \ 987 static __always_inline void __SetPage##uname(struct page *page) \ 988 { \ 989 VM_BUG_ON_PAGE(!PageType(page, 0), page); \ 990 page->page_type &= ~PG_##lname; \ 991 } \ 992 static __always_inline void __ClearPage##uname(struct page *page) \ 993 { \ 994 VM_BUG_ON_PAGE(!Page##uname(page), page); \ 995 page->page_type |= PG_##lname; \ 996 } 997 998 /* 999 * PageBuddy() indicates that the page is free and in the buddy system 1000 * (see mm/page_alloc.c). 1001 */ 1002 PAGE_TYPE_OPS(Buddy, buddy, buddy) 1003 1004 /* 1005 * PageOffline() indicates that the page is logically offline although the 1006 * containing section is online. (e.g. inflated in a balloon driver or 1007 * not onlined when onlining the section). 1008 * The content of these pages is effectively stale. Such pages should not 1009 * be touched (read/write/dump/save) except by their owner. 1010 * 1011 * If a driver wants to allow to offline unmovable PageOffline() pages without 1012 * putting them back to the buddy, it can do so via the memory notifier by 1013 * decrementing the reference count in MEM_GOING_OFFLINE and incrementing the 1014 * reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline() 1015 * pages (now with a reference count of zero) are treated like free pages, 1016 * allowing the containing memory block to get offlined. A driver that 1017 * relies on this feature is aware that re-onlining the memory block will 1018 * require to re-set the pages PageOffline() and not giving them to the 1019 * buddy via online_page_callback_t. 1020 * 1021 * There are drivers that mark a page PageOffline() and expect there won't be 1022 * any further access to page content. PFN walkers that read content of random 1023 * pages should check PageOffline() and synchronize with such drivers using 1024 * page_offline_freeze()/page_offline_thaw(). 1025 */ 1026 PAGE_TYPE_OPS(Offline, offline, offline) 1027 1028 extern void page_offline_freeze(void); 1029 extern void page_offline_thaw(void); 1030 extern void page_offline_begin(void); 1031 extern void page_offline_end(void); 1032 1033 /* 1034 * Marks pages in use as page tables. 1035 */ 1036 PAGE_TYPE_OPS(Table, table, pgtable) 1037 1038 /* 1039 * Marks guardpages used with debug_pagealloc. 1040 */ 1041 PAGE_TYPE_OPS(Guard, guard, guard) 1042 1043 FOLIO_TYPE_OPS(slab, slab) 1044 1045 /** 1046 * PageSlab - Determine if the page belongs to the slab allocator 1047 * @page: The page to test. 1048 * 1049 * Context: Any context. 1050 * Return: True for slab pages, false for any other kind of page. 1051 */ 1052 static inline bool PageSlab(const struct page *page) 1053 { 1054 return folio_test_slab(page_folio(page)); 1055 } 1056 1057 #ifdef CONFIG_HUGETLB_PAGE 1058 FOLIO_TYPE_OPS(hugetlb, hugetlb) 1059 #else 1060 FOLIO_TEST_FLAG_FALSE(hugetlb) 1061 #endif 1062 1063 /** 1064 * PageHuge - Determine if the page belongs to hugetlbfs 1065 * @page: The page to test. 1066 * 1067 * Context: Any context. 1068 * Return: True for hugetlbfs pages, false for anon pages or pages 1069 * belonging to other filesystems. 1070 */ 1071 static inline bool PageHuge(const struct page *page) 1072 { 1073 return folio_test_hugetlb(page_folio(page)); 1074 } 1075 1076 /* 1077 * Check if a page is currently marked HWPoisoned. Note that this check is 1078 * best effort only and inherently racy: there is no way to synchronize with 1079 * failing hardware. 1080 */ 1081 static inline bool is_page_hwpoison(const struct page *page) 1082 { 1083 const struct folio *folio; 1084 1085 if (PageHWPoison(page)) 1086 return true; 1087 folio = page_folio(page); 1088 return folio_test_hugetlb(folio) && PageHWPoison(&folio->page); 1089 } 1090 1091 bool is_free_buddy_page(const struct page *page); 1092 1093 PAGEFLAG(Isolated, isolated, PF_ANY); 1094 1095 static __always_inline int PageAnonExclusive(const struct page *page) 1096 { 1097 VM_BUG_ON_PGFLAGS(!PageAnon(page), page); 1098 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); 1099 return test_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); 1100 } 1101 1102 static __always_inline void SetPageAnonExclusive(struct page *page) 1103 { 1104 VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page); 1105 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); 1106 set_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); 1107 } 1108 1109 static __always_inline void ClearPageAnonExclusive(struct page *page) 1110 { 1111 VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page); 1112 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); 1113 clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); 1114 } 1115 1116 static __always_inline void __ClearPageAnonExclusive(struct page *page) 1117 { 1118 VM_BUG_ON_PGFLAGS(!PageAnon(page), page); 1119 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); 1120 __clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); 1121 } 1122 1123 #ifdef CONFIG_MMU 1124 #define __PG_MLOCKED (1UL << PG_mlocked) 1125 #else 1126 #define __PG_MLOCKED 0 1127 #endif 1128 1129 /* 1130 * Flags checked when a page is freed. Pages being freed should not have 1131 * these flags set. If they are, there is a problem. 1132 */ 1133 #define PAGE_FLAGS_CHECK_AT_FREE \ 1134 (1UL << PG_lru | 1UL << PG_locked | \ 1135 1UL << PG_private | 1UL << PG_private_2 | \ 1136 1UL << PG_writeback | 1UL << PG_reserved | \ 1137 1UL << PG_active | \ 1138 1UL << PG_unevictable | __PG_MLOCKED | LRU_GEN_MASK) 1139 1140 /* 1141 * Flags checked when a page is prepped for return by the page allocator. 1142 * Pages being prepped should not have these flags set. If they are set, 1143 * there has been a kernel bug or struct page corruption. 1144 * 1145 * __PG_HWPOISON is exceptional because it needs to be kept beyond page's 1146 * alloc-free cycle to prevent from reusing the page. 1147 */ 1148 #define PAGE_FLAGS_CHECK_AT_PREP \ 1149 ((PAGEFLAGS_MASK & ~__PG_HWPOISON) | LRU_GEN_MASK | LRU_REFS_MASK) 1150 1151 /* 1152 * Flags stored in the second page of a compound page. They may overlap 1153 * the CHECK_AT_FREE flags above, so need to be cleared. 1154 */ 1155 #define PAGE_FLAGS_SECOND \ 1156 (0xffUL /* order */ | 1UL << PG_has_hwpoisoned | \ 1157 1UL << PG_large_rmappable) 1158 1159 #define PAGE_FLAGS_PRIVATE \ 1160 (1UL << PG_private | 1UL << PG_private_2) 1161 /** 1162 * page_has_private - Determine if page has private stuff 1163 * @page: The page to be checked 1164 * 1165 * Determine if a page has private stuff, indicating that release routines 1166 * should be invoked upon it. 1167 */ 1168 static inline int page_has_private(const struct page *page) 1169 { 1170 return !!(page->flags & PAGE_FLAGS_PRIVATE); 1171 } 1172 1173 static inline bool folio_has_private(const struct folio *folio) 1174 { 1175 return page_has_private(&folio->page); 1176 } 1177 1178 #undef PF_ANY 1179 #undef PF_HEAD 1180 #undef PF_NO_TAIL 1181 #undef PF_NO_COMPOUND 1182 #undef PF_SECOND 1183 #endif /* !__GENERATING_BOUNDS_H */ 1184 1185 #endif /* PAGE_FLAGS_H */ 1186