1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Macros for manipulating and testing page->flags 4 */ 5 6 #ifndef PAGE_FLAGS_H 7 #define PAGE_FLAGS_H 8 9 #include <linux/types.h> 10 #include <linux/bug.h> 11 #include <linux/mmdebug.h> 12 #ifndef __GENERATING_BOUNDS_H 13 #include <linux/mm_types.h> 14 #include <generated/bounds.h> 15 #endif /* !__GENERATING_BOUNDS_H */ 16 17 /* 18 * Various page->flags bits: 19 * 20 * PG_reserved is set for special pages. The "struct page" of such a page 21 * should in general not be touched (e.g. set dirty) except by its owner. 22 * Pages marked as PG_reserved include: 23 * - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS, 24 * initrd, HW tables) 25 * - Pages reserved or allocated early during boot (before the page allocator 26 * was initialized). This includes (depending on the architecture) the 27 * initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much 28 * much more. Once (if ever) freed, PG_reserved is cleared and they will 29 * be given to the page allocator. 30 * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying 31 * to read/write these pages might end badly. Don't touch! 32 * - The zero page(s) 33 * - Pages allocated in the context of kexec/kdump (loaded kernel image, 34 * control pages, vmcoreinfo) 35 * - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are 36 * not marked PG_reserved (as they might be in use by somebody else who does 37 * not respect the caching strategy). 38 * - MCA pages on ia64 39 * - Pages holding CPU notes for POWER Firmware Assisted Dump 40 * - Device memory (e.g. PMEM, DAX, HMM) 41 * Some PG_reserved pages will be excluded from the hibernation image. 42 * PG_reserved does in general not hinder anybody from dumping or swapping 43 * and is no longer required for remap_pfn_range(). ioremap might require it. 44 * Consequently, PG_reserved for a page mapped into user space can indicate 45 * the zero page, the vDSO, MMIO pages or device memory. 46 * 47 * The PG_private bitflag is set on pagecache pages if they contain filesystem 48 * specific data (which is normally at page->private). It can be used by 49 * private allocations for its own usage. 50 * 51 * During initiation of disk I/O, PG_locked is set. This bit is set before I/O 52 * and cleared when writeback _starts_ or when read _completes_. PG_writeback 53 * is set before writeback starts and cleared when it finishes. 54 * 55 * PG_locked also pins a page in pagecache, and blocks truncation of the file 56 * while it is held. 57 * 58 * page_waitqueue(page) is a wait queue of all tasks waiting for the page 59 * to become unlocked. 60 * 61 * PG_swapbacked is set when a page uses swap as a backing storage. This are 62 * usually PageAnon or shmem pages but please note that even anonymous pages 63 * might lose their PG_swapbacked flag when they simply can be dropped (e.g. as 64 * a result of MADV_FREE). 65 * 66 * PG_referenced, PG_reclaim are used for page reclaim for anonymous and 67 * file-backed pagecache (see mm/vmscan.c). 68 * 69 * PG_arch_1 is an architecture specific page state bit. The generic code 70 * guarantees that this bit is cleared for a page when it first is entered into 71 * the page cache. 72 * 73 * PG_hwpoison indicates that a page got corrupted in hardware and contains 74 * data with incorrect ECC bits that triggered a machine check. Accessing is 75 * not safe since it may cause another machine check. Don't touch! 76 */ 77 78 /* 79 * Don't use the pageflags directly. Use the PageFoo macros. 80 * 81 * The page flags field is split into two parts, the main flags area 82 * which extends from the low bits upwards, and the fields area which 83 * extends from the high bits downwards. 84 * 85 * | FIELD | ... | FLAGS | 86 * N-1 ^ 0 87 * (NR_PAGEFLAGS) 88 * 89 * The fields area is reserved for fields mapping zone, node (for NUMA) and 90 * SPARSEMEM section (for variants of SPARSEMEM that require section ids like 91 * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP). 92 */ 93 enum pageflags { 94 PG_locked, /* Page is locked. Don't touch. */ 95 PG_writeback, /* Page is under writeback */ 96 PG_referenced, 97 PG_uptodate, 98 PG_dirty, 99 PG_lru, 100 PG_head, /* Must be in bit 6 */ 101 PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */ 102 PG_active, 103 PG_workingset, 104 PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/ 105 PG_arch_1, 106 PG_reserved, 107 PG_private, /* If pagecache, has fs-private data */ 108 PG_private_2, /* If pagecache, has fs aux data */ 109 PG_mappedtodisk, /* Has blocks allocated on-disk */ 110 PG_reclaim, /* To be reclaimed asap */ 111 PG_swapbacked, /* Page is backed by RAM/swap */ 112 PG_unevictable, /* Page is "unevictable" */ 113 #ifdef CONFIG_MMU 114 PG_mlocked, /* Page is vma mlocked */ 115 #endif 116 #ifdef CONFIG_ARCH_USES_PG_UNCACHED 117 PG_uncached, /* Page has been mapped as uncached */ 118 #endif 119 #ifdef CONFIG_MEMORY_FAILURE 120 PG_hwpoison, /* hardware poisoned page. Don't touch */ 121 #endif 122 #if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT) 123 PG_young, 124 PG_idle, 125 #endif 126 #ifdef CONFIG_ARCH_USES_PG_ARCH_X 127 PG_arch_2, 128 PG_arch_3, 129 #endif 130 __NR_PAGEFLAGS, 131 132 PG_readahead = PG_reclaim, 133 134 /* 135 * Depending on the way an anonymous folio can be mapped into a page 136 * table (e.g., single PMD/PUD/CONT of the head page vs. PTE-mapped 137 * THP), PG_anon_exclusive may be set only for the head page or for 138 * tail pages of an anonymous folio. For now, we only expect it to be 139 * set on tail pages for PTE-mapped THP. 140 */ 141 PG_anon_exclusive = PG_mappedtodisk, 142 143 /* Filesystems */ 144 PG_checked = PG_owner_priv_1, 145 146 /* SwapBacked */ 147 PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */ 148 149 /* Two page bits are conscripted by FS-Cache to maintain local caching 150 * state. These bits are set on pages belonging to the netfs's inodes 151 * when those inodes are being locally cached. 152 */ 153 PG_fscache = PG_private_2, /* page backed by cache */ 154 155 /* XEN */ 156 /* Pinned in Xen as a read-only pagetable page. */ 157 PG_pinned = PG_owner_priv_1, 158 /* Pinned as part of domain save (see xen_mm_pin_all()). */ 159 PG_savepinned = PG_dirty, 160 /* Has a grant mapping of another (foreign) domain's page. */ 161 PG_foreign = PG_owner_priv_1, 162 /* Remapped by swiotlb-xen. */ 163 PG_xen_remapped = PG_owner_priv_1, 164 165 /* non-lru isolated movable page */ 166 PG_isolated = PG_reclaim, 167 168 /* Only valid for buddy pages. Used to track pages that are reported */ 169 PG_reported = PG_uptodate, 170 171 #ifdef CONFIG_MEMORY_HOTPLUG 172 /* For self-hosted memmap pages */ 173 PG_vmemmap_self_hosted = PG_owner_priv_1, 174 #endif 175 176 /* 177 * Flags only valid for compound pages. Stored in first tail page's 178 * flags word. Cannot use the first 8 flags or any flag marked as 179 * PF_ANY. 180 */ 181 182 /* At least one page in this folio has the hwpoison flag set */ 183 PG_has_hwpoisoned = PG_active, 184 PG_large_rmappable = PG_workingset, /* anon or file-backed */ 185 }; 186 187 #define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1) 188 189 #ifndef __GENERATING_BOUNDS_H 190 191 #ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP 192 DECLARE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key); 193 194 /* 195 * Return the real head page struct iff the @page is a fake head page, otherwise 196 * return the @page itself. See Documentation/mm/vmemmap_dedup.rst. 197 */ 198 static __always_inline const struct page *page_fixed_fake_head(const struct page *page) 199 { 200 if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key)) 201 return page; 202 203 /* 204 * Only addresses aligned with PAGE_SIZE of struct page may be fake head 205 * struct page. The alignment check aims to avoid access the fields ( 206 * e.g. compound_head) of the @page[1]. It can avoid touch a (possibly) 207 * cold cacheline in some cases. 208 */ 209 if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) && 210 test_bit(PG_head, &page->flags)) { 211 /* 212 * We can safely access the field of the @page[1] with PG_head 213 * because the @page is a compound page composed with at least 214 * two contiguous pages. 215 */ 216 unsigned long head = READ_ONCE(page[1].compound_head); 217 218 if (likely(head & 1)) 219 return (const struct page *)(head - 1); 220 } 221 return page; 222 } 223 #else 224 static inline const struct page *page_fixed_fake_head(const struct page *page) 225 { 226 return page; 227 } 228 #endif 229 230 static __always_inline int page_is_fake_head(const struct page *page) 231 { 232 return page_fixed_fake_head(page) != page; 233 } 234 235 static inline unsigned long _compound_head(const struct page *page) 236 { 237 unsigned long head = READ_ONCE(page->compound_head); 238 239 if (unlikely(head & 1)) 240 return head - 1; 241 return (unsigned long)page_fixed_fake_head(page); 242 } 243 244 #define compound_head(page) ((typeof(page))_compound_head(page)) 245 246 /** 247 * page_folio - Converts from page to folio. 248 * @p: The page. 249 * 250 * Every page is part of a folio. This function cannot be called on a 251 * NULL pointer. 252 * 253 * Context: No reference, nor lock is required on @page. If the caller 254 * does not hold a reference, this call may race with a folio split, so 255 * it should re-check the folio still contains this page after gaining 256 * a reference on the folio. 257 * Return: The folio which contains this page. 258 */ 259 #define page_folio(p) (_Generic((p), \ 260 const struct page *: (const struct folio *)_compound_head(p), \ 261 struct page *: (struct folio *)_compound_head(p))) 262 263 /** 264 * folio_page - Return a page from a folio. 265 * @folio: The folio. 266 * @n: The page number to return. 267 * 268 * @n is relative to the start of the folio. This function does not 269 * check that the page number lies within @folio; the caller is presumed 270 * to have a reference to the page. 271 */ 272 #define folio_page(folio, n) nth_page(&(folio)->page, n) 273 274 static __always_inline int PageTail(const struct page *page) 275 { 276 return READ_ONCE(page->compound_head) & 1 || page_is_fake_head(page); 277 } 278 279 static __always_inline int PageCompound(const struct page *page) 280 { 281 return test_bit(PG_head, &page->flags) || 282 READ_ONCE(page->compound_head) & 1; 283 } 284 285 #define PAGE_POISON_PATTERN -1l 286 static inline int PagePoisoned(const struct page *page) 287 { 288 return READ_ONCE(page->flags) == PAGE_POISON_PATTERN; 289 } 290 291 #ifdef CONFIG_DEBUG_VM 292 void page_init_poison(struct page *page, size_t size); 293 #else 294 static inline void page_init_poison(struct page *page, size_t size) 295 { 296 } 297 #endif 298 299 static const unsigned long *const_folio_flags(const struct folio *folio, 300 unsigned n) 301 { 302 const struct page *page = &folio->page; 303 304 VM_BUG_ON_PGFLAGS(PageTail(page), page); 305 VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page); 306 return &page[n].flags; 307 } 308 309 static unsigned long *folio_flags(struct folio *folio, unsigned n) 310 { 311 struct page *page = &folio->page; 312 313 VM_BUG_ON_PGFLAGS(PageTail(page), page); 314 VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page); 315 return &page[n].flags; 316 } 317 318 /* 319 * Page flags policies wrt compound pages 320 * 321 * PF_POISONED_CHECK 322 * check if this struct page poisoned/uninitialized 323 * 324 * PF_ANY: 325 * the page flag is relevant for small, head and tail pages. 326 * 327 * PF_HEAD: 328 * for compound page all operations related to the page flag applied to 329 * head page. 330 * 331 * PF_NO_TAIL: 332 * modifications of the page flag must be done on small or head pages, 333 * checks can be done on tail pages too. 334 * 335 * PF_NO_COMPOUND: 336 * the page flag is not relevant for compound pages. 337 * 338 * PF_SECOND: 339 * the page flag is stored in the first tail page. 340 */ 341 #define PF_POISONED_CHECK(page) ({ \ 342 VM_BUG_ON_PGFLAGS(PagePoisoned(page), page); \ 343 page; }) 344 #define PF_ANY(page, enforce) PF_POISONED_CHECK(page) 345 #define PF_HEAD(page, enforce) PF_POISONED_CHECK(compound_head(page)) 346 #define PF_NO_TAIL(page, enforce) ({ \ 347 VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \ 348 PF_POISONED_CHECK(compound_head(page)); }) 349 #define PF_NO_COMPOUND(page, enforce) ({ \ 350 VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \ 351 PF_POISONED_CHECK(page); }) 352 #define PF_SECOND(page, enforce) ({ \ 353 VM_BUG_ON_PGFLAGS(!PageHead(page), page); \ 354 PF_POISONED_CHECK(&page[1]); }) 355 356 /* Which page is the flag stored in */ 357 #define FOLIO_PF_ANY 0 358 #define FOLIO_PF_HEAD 0 359 #define FOLIO_PF_NO_TAIL 0 360 #define FOLIO_PF_NO_COMPOUND 0 361 #define FOLIO_PF_SECOND 1 362 363 #define FOLIO_HEAD_PAGE 0 364 #define FOLIO_SECOND_PAGE 1 365 366 /* 367 * Macros to create function definitions for page flags 368 */ 369 #define FOLIO_TEST_FLAG(name, page) \ 370 static __always_inline bool folio_test_##name(const struct folio *folio) \ 371 { return test_bit(PG_##name, const_folio_flags(folio, page)); } 372 373 #define FOLIO_SET_FLAG(name, page) \ 374 static __always_inline void folio_set_##name(struct folio *folio) \ 375 { set_bit(PG_##name, folio_flags(folio, page)); } 376 377 #define FOLIO_CLEAR_FLAG(name, page) \ 378 static __always_inline void folio_clear_##name(struct folio *folio) \ 379 { clear_bit(PG_##name, folio_flags(folio, page)); } 380 381 #define __FOLIO_SET_FLAG(name, page) \ 382 static __always_inline void __folio_set_##name(struct folio *folio) \ 383 { __set_bit(PG_##name, folio_flags(folio, page)); } 384 385 #define __FOLIO_CLEAR_FLAG(name, page) \ 386 static __always_inline void __folio_clear_##name(struct folio *folio) \ 387 { __clear_bit(PG_##name, folio_flags(folio, page)); } 388 389 #define FOLIO_TEST_SET_FLAG(name, page) \ 390 static __always_inline bool folio_test_set_##name(struct folio *folio) \ 391 { return test_and_set_bit(PG_##name, folio_flags(folio, page)); } 392 393 #define FOLIO_TEST_CLEAR_FLAG(name, page) \ 394 static __always_inline bool folio_test_clear_##name(struct folio *folio) \ 395 { return test_and_clear_bit(PG_##name, folio_flags(folio, page)); } 396 397 #define FOLIO_FLAG(name, page) \ 398 FOLIO_TEST_FLAG(name, page) \ 399 FOLIO_SET_FLAG(name, page) \ 400 FOLIO_CLEAR_FLAG(name, page) 401 402 #define TESTPAGEFLAG(uname, lname, policy) \ 403 FOLIO_TEST_FLAG(lname, FOLIO_##policy) \ 404 static __always_inline int Page##uname(const struct page *page) \ 405 { return test_bit(PG_##lname, &policy(page, 0)->flags); } 406 407 #define SETPAGEFLAG(uname, lname, policy) \ 408 FOLIO_SET_FLAG(lname, FOLIO_##policy) \ 409 static __always_inline void SetPage##uname(struct page *page) \ 410 { set_bit(PG_##lname, &policy(page, 1)->flags); } 411 412 #define CLEARPAGEFLAG(uname, lname, policy) \ 413 FOLIO_CLEAR_FLAG(lname, FOLIO_##policy) \ 414 static __always_inline void ClearPage##uname(struct page *page) \ 415 { clear_bit(PG_##lname, &policy(page, 1)->flags); } 416 417 #define __SETPAGEFLAG(uname, lname, policy) \ 418 __FOLIO_SET_FLAG(lname, FOLIO_##policy) \ 419 static __always_inline void __SetPage##uname(struct page *page) \ 420 { __set_bit(PG_##lname, &policy(page, 1)->flags); } 421 422 #define __CLEARPAGEFLAG(uname, lname, policy) \ 423 __FOLIO_CLEAR_FLAG(lname, FOLIO_##policy) \ 424 static __always_inline void __ClearPage##uname(struct page *page) \ 425 { __clear_bit(PG_##lname, &policy(page, 1)->flags); } 426 427 #define TESTSETFLAG(uname, lname, policy) \ 428 FOLIO_TEST_SET_FLAG(lname, FOLIO_##policy) \ 429 static __always_inline int TestSetPage##uname(struct page *page) \ 430 { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); } 431 432 #define TESTCLEARFLAG(uname, lname, policy) \ 433 FOLIO_TEST_CLEAR_FLAG(lname, FOLIO_##policy) \ 434 static __always_inline int TestClearPage##uname(struct page *page) \ 435 { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); } 436 437 #define PAGEFLAG(uname, lname, policy) \ 438 TESTPAGEFLAG(uname, lname, policy) \ 439 SETPAGEFLAG(uname, lname, policy) \ 440 CLEARPAGEFLAG(uname, lname, policy) 441 442 #define __PAGEFLAG(uname, lname, policy) \ 443 TESTPAGEFLAG(uname, lname, policy) \ 444 __SETPAGEFLAG(uname, lname, policy) \ 445 __CLEARPAGEFLAG(uname, lname, policy) 446 447 #define TESTSCFLAG(uname, lname, policy) \ 448 TESTSETFLAG(uname, lname, policy) \ 449 TESTCLEARFLAG(uname, lname, policy) 450 451 #define FOLIO_TEST_FLAG_FALSE(name) \ 452 static inline bool folio_test_##name(const struct folio *folio) \ 453 { return false; } 454 #define FOLIO_SET_FLAG_NOOP(name) \ 455 static inline void folio_set_##name(struct folio *folio) { } 456 #define FOLIO_CLEAR_FLAG_NOOP(name) \ 457 static inline void folio_clear_##name(struct folio *folio) { } 458 #define __FOLIO_SET_FLAG_NOOP(name) \ 459 static inline void __folio_set_##name(struct folio *folio) { } 460 #define __FOLIO_CLEAR_FLAG_NOOP(name) \ 461 static inline void __folio_clear_##name(struct folio *folio) { } 462 #define FOLIO_TEST_SET_FLAG_FALSE(name) \ 463 static inline bool folio_test_set_##name(struct folio *folio) \ 464 { return false; } 465 #define FOLIO_TEST_CLEAR_FLAG_FALSE(name) \ 466 static inline bool folio_test_clear_##name(struct folio *folio) \ 467 { return false; } 468 469 #define FOLIO_FLAG_FALSE(name) \ 470 FOLIO_TEST_FLAG_FALSE(name) \ 471 FOLIO_SET_FLAG_NOOP(name) \ 472 FOLIO_CLEAR_FLAG_NOOP(name) 473 474 #define TESTPAGEFLAG_FALSE(uname, lname) \ 475 FOLIO_TEST_FLAG_FALSE(lname) \ 476 static inline int Page##uname(const struct page *page) { return 0; } 477 478 #define SETPAGEFLAG_NOOP(uname, lname) \ 479 FOLIO_SET_FLAG_NOOP(lname) \ 480 static inline void SetPage##uname(struct page *page) { } 481 482 #define CLEARPAGEFLAG_NOOP(uname, lname) \ 483 FOLIO_CLEAR_FLAG_NOOP(lname) \ 484 static inline void ClearPage##uname(struct page *page) { } 485 486 #define __CLEARPAGEFLAG_NOOP(uname, lname) \ 487 __FOLIO_CLEAR_FLAG_NOOP(lname) \ 488 static inline void __ClearPage##uname(struct page *page) { } 489 490 #define TESTSETFLAG_FALSE(uname, lname) \ 491 FOLIO_TEST_SET_FLAG_FALSE(lname) \ 492 static inline int TestSetPage##uname(struct page *page) { return 0; } 493 494 #define TESTCLEARFLAG_FALSE(uname, lname) \ 495 FOLIO_TEST_CLEAR_FLAG_FALSE(lname) \ 496 static inline int TestClearPage##uname(struct page *page) { return 0; } 497 498 #define PAGEFLAG_FALSE(uname, lname) TESTPAGEFLAG_FALSE(uname, lname) \ 499 SETPAGEFLAG_NOOP(uname, lname) CLEARPAGEFLAG_NOOP(uname, lname) 500 501 #define TESTSCFLAG_FALSE(uname, lname) \ 502 TESTSETFLAG_FALSE(uname, lname) TESTCLEARFLAG_FALSE(uname, lname) 503 504 __PAGEFLAG(Locked, locked, PF_NO_TAIL) 505 FOLIO_FLAG(waiters, FOLIO_HEAD_PAGE) 506 FOLIO_FLAG(referenced, FOLIO_HEAD_PAGE) 507 FOLIO_TEST_CLEAR_FLAG(referenced, FOLIO_HEAD_PAGE) 508 __FOLIO_SET_FLAG(referenced, FOLIO_HEAD_PAGE) 509 PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD) 510 __CLEARPAGEFLAG(Dirty, dirty, PF_HEAD) 511 PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD) 512 TESTCLEARFLAG(LRU, lru, PF_HEAD) 513 PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD) 514 TESTCLEARFLAG(Active, active, PF_HEAD) 515 PAGEFLAG(Workingset, workingset, PF_HEAD) 516 TESTCLEARFLAG(Workingset, workingset, PF_HEAD) 517 PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */ 518 519 /* Xen */ 520 PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND) 521 TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND) 522 PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND); 523 PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND); 524 PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND) 525 TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND) 526 527 PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) 528 __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) 529 __SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) 530 PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) 531 __CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) 532 __SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) 533 534 /* 535 * Private page markings that may be used by the filesystem that owns the page 536 * for its own purposes. 537 * - PG_private and PG_private_2 cause release_folio() and co to be invoked 538 */ 539 PAGEFLAG(Private, private, PF_ANY) 540 PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY) 541 PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY) 542 TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY) 543 544 /* 545 * Only test-and-set exist for PG_writeback. The unconditional operators are 546 * risky: they bypass page accounting. 547 */ 548 TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL) 549 TESTSCFLAG(Writeback, writeback, PF_NO_TAIL) 550 PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL) 551 552 /* PG_readahead is only used for reads; PG_reclaim is only for writes */ 553 PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL) 554 TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL) 555 PAGEFLAG(Readahead, readahead, PF_NO_COMPOUND) 556 TESTCLEARFLAG(Readahead, readahead, PF_NO_COMPOUND) 557 558 #ifdef CONFIG_HIGHMEM 559 /* 560 * Must use a macro here due to header dependency issues. page_zone() is not 561 * available at this point. 562 */ 563 #define PageHighMem(__p) is_highmem_idx(page_zonenum(__p)) 564 #define folio_test_highmem(__f) is_highmem_idx(folio_zonenum(__f)) 565 #else 566 PAGEFLAG_FALSE(HighMem, highmem) 567 #endif 568 569 #ifdef CONFIG_SWAP 570 static __always_inline bool folio_test_swapcache(const struct folio *folio) 571 { 572 return folio_test_swapbacked(folio) && 573 test_bit(PG_swapcache, const_folio_flags(folio, 0)); 574 } 575 576 static __always_inline bool PageSwapCache(const struct page *page) 577 { 578 return folio_test_swapcache(page_folio(page)); 579 } 580 581 SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL) 582 CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL) 583 #else 584 PAGEFLAG_FALSE(SwapCache, swapcache) 585 #endif 586 587 PAGEFLAG(Unevictable, unevictable, PF_HEAD) 588 __CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD) 589 TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD) 590 591 #ifdef CONFIG_MMU 592 PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL) 593 __CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL) 594 TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL) 595 #else 596 PAGEFLAG_FALSE(Mlocked, mlocked) __CLEARPAGEFLAG_NOOP(Mlocked, mlocked) 597 TESTSCFLAG_FALSE(Mlocked, mlocked) 598 #endif 599 600 #ifdef CONFIG_ARCH_USES_PG_UNCACHED 601 PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND) 602 #else 603 PAGEFLAG_FALSE(Uncached, uncached) 604 #endif 605 606 #ifdef CONFIG_MEMORY_FAILURE 607 PAGEFLAG(HWPoison, hwpoison, PF_ANY) 608 TESTSCFLAG(HWPoison, hwpoison, PF_ANY) 609 #define __PG_HWPOISON (1UL << PG_hwpoison) 610 #else 611 PAGEFLAG_FALSE(HWPoison, hwpoison) 612 #define __PG_HWPOISON 0 613 #endif 614 615 #ifdef CONFIG_PAGE_IDLE_FLAG 616 #ifdef CONFIG_64BIT 617 FOLIO_TEST_FLAG(young, FOLIO_HEAD_PAGE) 618 FOLIO_SET_FLAG(young, FOLIO_HEAD_PAGE) 619 FOLIO_TEST_CLEAR_FLAG(young, FOLIO_HEAD_PAGE) 620 FOLIO_FLAG(idle, FOLIO_HEAD_PAGE) 621 #endif 622 /* See page_idle.h for !64BIT workaround */ 623 #else /* !CONFIG_PAGE_IDLE_FLAG */ 624 FOLIO_FLAG_FALSE(young) 625 FOLIO_TEST_CLEAR_FLAG_FALSE(young) 626 FOLIO_FLAG_FALSE(idle) 627 #endif 628 629 /* 630 * PageReported() is used to track reported free pages within the Buddy 631 * allocator. We can use the non-atomic version of the test and set 632 * operations as both should be shielded with the zone lock to prevent 633 * any possible races on the setting or clearing of the bit. 634 */ 635 __PAGEFLAG(Reported, reported, PF_NO_COMPOUND) 636 637 #ifdef CONFIG_MEMORY_HOTPLUG 638 PAGEFLAG(VmemmapSelfHosted, vmemmap_self_hosted, PF_ANY) 639 #else 640 PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted) 641 #endif 642 643 /* 644 * On an anonymous folio mapped into a user virtual memory area, 645 * folio->mapping points to its anon_vma, not to a struct address_space; 646 * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h. 647 * 648 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled, 649 * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON 650 * bit; and then folio->mapping points, not to an anon_vma, but to a private 651 * structure which KSM associates with that merged page. See ksm.h. 652 * 653 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable 654 * page and then folio->mapping points to a struct movable_operations. 655 * 656 * Please note that, confusingly, "folio_mapping" refers to the inode 657 * address_space which maps the folio from disk; whereas "folio_mapped" 658 * refers to user virtual address space into which the folio is mapped. 659 * 660 * For slab pages, since slab reuses the bits in struct page to store its 661 * internal states, the folio->mapping does not exist as such, nor do 662 * these flags below. So in order to avoid testing non-existent bits, 663 * please make sure that folio_test_slab(folio) actually evaluates to 664 * false before calling the following functions (e.g., folio_test_anon). 665 * See mm/slab.h. 666 */ 667 #define PAGE_MAPPING_ANON 0x1 668 #define PAGE_MAPPING_MOVABLE 0x2 669 #define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE) 670 #define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE) 671 672 /* 673 * Different with flags above, this flag is used only for fsdax mode. It 674 * indicates that this page->mapping is now under reflink case. 675 */ 676 #define PAGE_MAPPING_DAX_SHARED ((void *)0x1) 677 678 static __always_inline bool folio_mapping_flags(const struct folio *folio) 679 { 680 return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) != 0; 681 } 682 683 static __always_inline bool PageMappingFlags(const struct page *page) 684 { 685 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0; 686 } 687 688 static __always_inline bool folio_test_anon(const struct folio *folio) 689 { 690 return ((unsigned long)folio->mapping & PAGE_MAPPING_ANON) != 0; 691 } 692 693 static __always_inline bool PageAnon(const struct page *page) 694 { 695 return folio_test_anon(page_folio(page)); 696 } 697 698 static __always_inline bool __folio_test_movable(const struct folio *folio) 699 { 700 return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) == 701 PAGE_MAPPING_MOVABLE; 702 } 703 704 static __always_inline bool __PageMovable(const struct page *page) 705 { 706 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == 707 PAGE_MAPPING_MOVABLE; 708 } 709 710 #ifdef CONFIG_KSM 711 /* 712 * A KSM page is one of those write-protected "shared pages" or "merged pages" 713 * which KSM maps into multiple mms, wherever identical anonymous page content 714 * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any 715 * anon_vma, but to that page's node of the stable tree. 716 */ 717 static __always_inline bool folio_test_ksm(const struct folio *folio) 718 { 719 return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) == 720 PAGE_MAPPING_KSM; 721 } 722 723 static __always_inline bool PageKsm(const struct page *page) 724 { 725 return folio_test_ksm(page_folio(page)); 726 } 727 #else 728 TESTPAGEFLAG_FALSE(Ksm, ksm) 729 #endif 730 731 u64 stable_page_flags(const struct page *page); 732 733 /** 734 * folio_xor_flags_has_waiters - Change some folio flags. 735 * @folio: The folio. 736 * @mask: Bits set in this word will be changed. 737 * 738 * This must only be used for flags which are changed with the folio 739 * lock held. For example, it is unsafe to use for PG_dirty as that 740 * can be set without the folio lock held. It can also only be used 741 * on flags which are in the range 0-6 as some of the implementations 742 * only affect those bits. 743 * 744 * Return: Whether there are tasks waiting on the folio. 745 */ 746 static inline bool folio_xor_flags_has_waiters(struct folio *folio, 747 unsigned long mask) 748 { 749 return xor_unlock_is_negative_byte(mask, folio_flags(folio, 0)); 750 } 751 752 /** 753 * folio_test_uptodate - Is this folio up to date? 754 * @folio: The folio. 755 * 756 * The uptodate flag is set on a folio when every byte in the folio is 757 * at least as new as the corresponding bytes on storage. Anonymous 758 * and CoW folios are always uptodate. If the folio is not uptodate, 759 * some of the bytes in it may be; see the is_partially_uptodate() 760 * address_space operation. 761 */ 762 static inline bool folio_test_uptodate(const struct folio *folio) 763 { 764 bool ret = test_bit(PG_uptodate, const_folio_flags(folio, 0)); 765 /* 766 * Must ensure that the data we read out of the folio is loaded 767 * _after_ we've loaded folio->flags to check the uptodate bit. 768 * We can skip the barrier if the folio is not uptodate, because 769 * we wouldn't be reading anything from it. 770 * 771 * See folio_mark_uptodate() for the other side of the story. 772 */ 773 if (ret) 774 smp_rmb(); 775 776 return ret; 777 } 778 779 static inline bool PageUptodate(const struct page *page) 780 { 781 return folio_test_uptodate(page_folio(page)); 782 } 783 784 static __always_inline void __folio_mark_uptodate(struct folio *folio) 785 { 786 smp_wmb(); 787 __set_bit(PG_uptodate, folio_flags(folio, 0)); 788 } 789 790 static __always_inline void folio_mark_uptodate(struct folio *folio) 791 { 792 /* 793 * Memory barrier must be issued before setting the PG_uptodate bit, 794 * so that all previous stores issued in order to bring the folio 795 * uptodate are actually visible before folio_test_uptodate becomes true. 796 */ 797 smp_wmb(); 798 set_bit(PG_uptodate, folio_flags(folio, 0)); 799 } 800 801 static __always_inline void __SetPageUptodate(struct page *page) 802 { 803 __folio_mark_uptodate((struct folio *)page); 804 } 805 806 static __always_inline void SetPageUptodate(struct page *page) 807 { 808 folio_mark_uptodate((struct folio *)page); 809 } 810 811 CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL) 812 813 void __folio_start_writeback(struct folio *folio, bool keep_write); 814 void set_page_writeback(struct page *page); 815 816 #define folio_start_writeback(folio) \ 817 __folio_start_writeback(folio, false) 818 #define folio_start_writeback_keepwrite(folio) \ 819 __folio_start_writeback(folio, true) 820 821 static __always_inline bool folio_test_head(const struct folio *folio) 822 { 823 return test_bit(PG_head, const_folio_flags(folio, FOLIO_PF_ANY)); 824 } 825 826 static __always_inline int PageHead(const struct page *page) 827 { 828 PF_POISONED_CHECK(page); 829 return test_bit(PG_head, &page->flags) && !page_is_fake_head(page); 830 } 831 832 __SETPAGEFLAG(Head, head, PF_ANY) 833 __CLEARPAGEFLAG(Head, head, PF_ANY) 834 CLEARPAGEFLAG(Head, head, PF_ANY) 835 836 /** 837 * folio_test_large() - Does this folio contain more than one page? 838 * @folio: The folio to test. 839 * 840 * Return: True if the folio is larger than one page. 841 */ 842 static inline bool folio_test_large(const struct folio *folio) 843 { 844 return folio_test_head(folio); 845 } 846 847 static __always_inline void set_compound_head(struct page *page, struct page *head) 848 { 849 WRITE_ONCE(page->compound_head, (unsigned long)head + 1); 850 } 851 852 static __always_inline void clear_compound_head(struct page *page) 853 { 854 WRITE_ONCE(page->compound_head, 0); 855 } 856 857 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 858 static inline void ClearPageCompound(struct page *page) 859 { 860 BUG_ON(!PageHead(page)); 861 ClearPageHead(page); 862 } 863 FOLIO_FLAG(large_rmappable, FOLIO_SECOND_PAGE) 864 #else 865 FOLIO_FLAG_FALSE(large_rmappable) 866 #endif 867 868 #define PG_head_mask ((1UL << PG_head)) 869 870 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 871 /* 872 * PageHuge() only returns true for hugetlbfs pages, but not for 873 * normal or transparent huge pages. 874 * 875 * PageTransHuge() returns true for both transparent huge and 876 * hugetlbfs pages, but not normal pages. PageTransHuge() can only be 877 * called only in the core VM paths where hugetlbfs pages can't exist. 878 */ 879 static inline int PageTransHuge(const struct page *page) 880 { 881 VM_BUG_ON_PAGE(PageTail(page), page); 882 return PageHead(page); 883 } 884 885 /* 886 * PageTransCompound returns true for both transparent huge pages 887 * and hugetlbfs pages, so it should only be called when it's known 888 * that hugetlbfs pages aren't involved. 889 */ 890 static inline int PageTransCompound(const struct page *page) 891 { 892 return PageCompound(page); 893 } 894 895 /* 896 * PageTransTail returns true for both transparent huge pages 897 * and hugetlbfs pages, so it should only be called when it's known 898 * that hugetlbfs pages aren't involved. 899 */ 900 static inline int PageTransTail(const struct page *page) 901 { 902 return PageTail(page); 903 } 904 #else 905 TESTPAGEFLAG_FALSE(TransHuge, transhuge) 906 TESTPAGEFLAG_FALSE(TransCompound, transcompound) 907 TESTPAGEFLAG_FALSE(TransCompoundMap, transcompoundmap) 908 TESTPAGEFLAG_FALSE(TransTail, transtail) 909 #endif 910 911 #if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_TRANSPARENT_HUGEPAGE) 912 /* 913 * PageHasHWPoisoned indicates that at least one subpage is hwpoisoned in the 914 * compound page. 915 * 916 * This flag is set by hwpoison handler. Cleared by THP split or free page. 917 */ 918 PAGEFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND) 919 TESTSCFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND) 920 #else 921 PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned) 922 TESTSCFLAG_FALSE(HasHWPoisoned, has_hwpoisoned) 923 #endif 924 925 /* 926 * For pages that are never mapped to userspace, 927 * page_type may be used. Because it is initialised to -1, we invert the 928 * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and 929 * __ClearPageFoo *sets* the bit used for PageFoo. We reserve a few high and 930 * low bits so that an underflow or overflow of _mapcount won't be 931 * mistaken for a page type value. 932 */ 933 934 enum pagetype { 935 PG_buddy = 0x40000000, 936 PG_offline = 0x20000000, 937 PG_table = 0x10000000, 938 PG_guard = 0x08000000, 939 PG_hugetlb = 0x04000000, 940 PG_slab = 0x02000000, 941 PG_zsmalloc = 0x01000000, 942 PG_unaccepted = 0x00800000, 943 944 PAGE_TYPE_BASE = 0x80000000, 945 946 /* 947 * Reserve 0xffff0000 - 0xfffffffe to catch _mapcount underflows and 948 * allow owners that set a type to reuse the lower 16 bit for their own 949 * purposes. 950 */ 951 PAGE_MAPCOUNT_RESERVE = ~0x0000ffff, 952 }; 953 954 #define PageType(page, flag) \ 955 ((READ_ONCE(page->page_type) & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE) 956 #define folio_test_type(folio, flag) \ 957 ((READ_ONCE(folio->page.page_type) & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE) 958 959 static inline int page_type_has_type(unsigned int page_type) 960 { 961 return (int)page_type < PAGE_MAPCOUNT_RESERVE; 962 } 963 964 static inline int page_has_type(const struct page *page) 965 { 966 return page_type_has_type(READ_ONCE(page->page_type)); 967 } 968 969 #define FOLIO_TYPE_OPS(lname, fname) \ 970 static __always_inline bool folio_test_##fname(const struct folio *folio)\ 971 { \ 972 return folio_test_type(folio, PG_##lname); \ 973 } \ 974 static __always_inline void __folio_set_##fname(struct folio *folio) \ 975 { \ 976 VM_BUG_ON_FOLIO(!folio_test_type(folio, 0), folio); \ 977 folio->page.page_type &= ~PG_##lname; \ 978 } \ 979 static __always_inline void __folio_clear_##fname(struct folio *folio) \ 980 { \ 981 VM_BUG_ON_FOLIO(!folio_test_##fname(folio), folio); \ 982 folio->page.page_type |= PG_##lname; \ 983 } 984 985 #define PAGE_TYPE_OPS(uname, lname, fname) \ 986 FOLIO_TYPE_OPS(lname, fname) \ 987 static __always_inline int Page##uname(const struct page *page) \ 988 { \ 989 return PageType(page, PG_##lname); \ 990 } \ 991 static __always_inline void __SetPage##uname(struct page *page) \ 992 { \ 993 VM_BUG_ON_PAGE(!PageType(page, 0), page); \ 994 page->page_type &= ~PG_##lname; \ 995 } \ 996 static __always_inline void __ClearPage##uname(struct page *page) \ 997 { \ 998 VM_BUG_ON_PAGE(!Page##uname(page), page); \ 999 page->page_type |= PG_##lname; \ 1000 } 1001 1002 /* 1003 * PageBuddy() indicates that the page is free and in the buddy system 1004 * (see mm/page_alloc.c). 1005 */ 1006 PAGE_TYPE_OPS(Buddy, buddy, buddy) 1007 1008 /* 1009 * PageOffline() indicates that the page is logically offline although the 1010 * containing section is online. (e.g. inflated in a balloon driver or 1011 * not onlined when onlining the section). 1012 * The content of these pages is effectively stale. Such pages should not 1013 * be touched (read/write/dump/save) except by their owner. 1014 * 1015 * When a memory block gets onlined, all pages are initialized with a 1016 * refcount of 1 and PageOffline(). generic_online_page() will 1017 * take care of clearing PageOffline(). 1018 * 1019 * If a driver wants to allow to offline unmovable PageOffline() pages without 1020 * putting them back to the buddy, it can do so via the memory notifier by 1021 * decrementing the reference count in MEM_GOING_OFFLINE and incrementing the 1022 * reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline() 1023 * pages (now with a reference count of zero) are treated like free (unmanaged) 1024 * pages, allowing the containing memory block to get offlined. A driver that 1025 * relies on this feature is aware that re-onlining the memory block will 1026 * require not giving them to the buddy via generic_online_page(). 1027 * 1028 * Memory offlining code will not adjust the managed page count for any 1029 * PageOffline() pages, treating them like they were never exposed to the 1030 * buddy using generic_online_page(). 1031 * 1032 * There are drivers that mark a page PageOffline() and expect there won't be 1033 * any further access to page content. PFN walkers that read content of random 1034 * pages should check PageOffline() and synchronize with such drivers using 1035 * page_offline_freeze()/page_offline_thaw(). 1036 */ 1037 PAGE_TYPE_OPS(Offline, offline, offline) 1038 1039 extern void page_offline_freeze(void); 1040 extern void page_offline_thaw(void); 1041 extern void page_offline_begin(void); 1042 extern void page_offline_end(void); 1043 1044 /* 1045 * Marks pages in use as page tables. 1046 */ 1047 PAGE_TYPE_OPS(Table, table, pgtable) 1048 1049 /* 1050 * Marks guardpages used with debug_pagealloc. 1051 */ 1052 PAGE_TYPE_OPS(Guard, guard, guard) 1053 1054 FOLIO_TYPE_OPS(slab, slab) 1055 1056 /** 1057 * PageSlab - Determine if the page belongs to the slab allocator 1058 * @page: The page to test. 1059 * 1060 * Context: Any context. 1061 * Return: True for slab pages, false for any other kind of page. 1062 */ 1063 static inline bool PageSlab(const struct page *page) 1064 { 1065 return folio_test_slab(page_folio(page)); 1066 } 1067 1068 #ifdef CONFIG_HUGETLB_PAGE 1069 FOLIO_TYPE_OPS(hugetlb, hugetlb) 1070 #else 1071 FOLIO_TEST_FLAG_FALSE(hugetlb) 1072 #endif 1073 1074 PAGE_TYPE_OPS(Zsmalloc, zsmalloc, zsmalloc) 1075 1076 /* 1077 * Mark pages that has to be accepted before touched for the first time. 1078 * 1079 * Serialized with zone lock. 1080 */ 1081 PAGE_TYPE_OPS(Unaccepted, unaccepted, unaccepted) 1082 1083 /** 1084 * PageHuge - Determine if the page belongs to hugetlbfs 1085 * @page: The page to test. 1086 * 1087 * Context: Any context. 1088 * Return: True for hugetlbfs pages, false for anon pages or pages 1089 * belonging to other filesystems. 1090 */ 1091 static inline bool PageHuge(const struct page *page) 1092 { 1093 return folio_test_hugetlb(page_folio(page)); 1094 } 1095 1096 /* 1097 * Check if a page is currently marked HWPoisoned. Note that this check is 1098 * best effort only and inherently racy: there is no way to synchronize with 1099 * failing hardware. 1100 */ 1101 static inline bool is_page_hwpoison(const struct page *page) 1102 { 1103 const struct folio *folio; 1104 1105 if (PageHWPoison(page)) 1106 return true; 1107 folio = page_folio(page); 1108 return folio_test_hugetlb(folio) && PageHWPoison(&folio->page); 1109 } 1110 1111 bool is_free_buddy_page(const struct page *page); 1112 1113 PAGEFLAG(Isolated, isolated, PF_ANY); 1114 1115 static __always_inline int PageAnonExclusive(const struct page *page) 1116 { 1117 VM_BUG_ON_PGFLAGS(!PageAnon(page), page); 1118 /* 1119 * HugeTLB stores this information on the head page; THP keeps it per 1120 * page 1121 */ 1122 if (PageHuge(page)) 1123 page = compound_head(page); 1124 return test_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); 1125 } 1126 1127 static __always_inline void SetPageAnonExclusive(struct page *page) 1128 { 1129 VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page); 1130 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); 1131 set_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); 1132 } 1133 1134 static __always_inline void ClearPageAnonExclusive(struct page *page) 1135 { 1136 VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page); 1137 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); 1138 clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); 1139 } 1140 1141 static __always_inline void __ClearPageAnonExclusive(struct page *page) 1142 { 1143 VM_BUG_ON_PGFLAGS(!PageAnon(page), page); 1144 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); 1145 __clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); 1146 } 1147 1148 #ifdef CONFIG_MMU 1149 #define __PG_MLOCKED (1UL << PG_mlocked) 1150 #else 1151 #define __PG_MLOCKED 0 1152 #endif 1153 1154 /* 1155 * Flags checked when a page is freed. Pages being freed should not have 1156 * these flags set. If they are, there is a problem. 1157 */ 1158 #define PAGE_FLAGS_CHECK_AT_FREE \ 1159 (1UL << PG_lru | 1UL << PG_locked | \ 1160 1UL << PG_private | 1UL << PG_private_2 | \ 1161 1UL << PG_writeback | 1UL << PG_reserved | \ 1162 1UL << PG_active | \ 1163 1UL << PG_unevictable | __PG_MLOCKED | LRU_GEN_MASK) 1164 1165 /* 1166 * Flags checked when a page is prepped for return by the page allocator. 1167 * Pages being prepped should not have these flags set. If they are set, 1168 * there has been a kernel bug or struct page corruption. 1169 * 1170 * __PG_HWPOISON is exceptional because it needs to be kept beyond page's 1171 * alloc-free cycle to prevent from reusing the page. 1172 */ 1173 #define PAGE_FLAGS_CHECK_AT_PREP \ 1174 ((PAGEFLAGS_MASK & ~__PG_HWPOISON) | LRU_GEN_MASK | LRU_REFS_MASK) 1175 1176 /* 1177 * Flags stored in the second page of a compound page. They may overlap 1178 * the CHECK_AT_FREE flags above, so need to be cleared. 1179 */ 1180 #define PAGE_FLAGS_SECOND \ 1181 (0xffUL /* order */ | 1UL << PG_has_hwpoisoned | \ 1182 1UL << PG_large_rmappable) 1183 1184 #define PAGE_FLAGS_PRIVATE \ 1185 (1UL << PG_private | 1UL << PG_private_2) 1186 /** 1187 * page_has_private - Determine if page has private stuff 1188 * @page: The page to be checked 1189 * 1190 * Determine if a page has private stuff, indicating that release routines 1191 * should be invoked upon it. 1192 */ 1193 static inline int page_has_private(const struct page *page) 1194 { 1195 return !!(page->flags & PAGE_FLAGS_PRIVATE); 1196 } 1197 1198 static inline bool folio_has_private(const struct folio *folio) 1199 { 1200 return page_has_private(&folio->page); 1201 } 1202 1203 #undef PF_ANY 1204 #undef PF_HEAD 1205 #undef PF_NO_TAIL 1206 #undef PF_NO_COMPOUND 1207 #undef PF_SECOND 1208 #endif /* !__GENERATING_BOUNDS_H */ 1209 1210 #endif /* PAGE_FLAGS_H */ 1211