1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Macros for manipulating and testing page->flags 4 */ 5 6 #ifndef PAGE_FLAGS_H 7 #define PAGE_FLAGS_H 8 9 #include <linux/types.h> 10 #include <linux/bug.h> 11 #include <linux/mmdebug.h> 12 #ifndef __GENERATING_BOUNDS_H 13 #include <linux/mm_types.h> 14 #include <generated/bounds.h> 15 #endif /* !__GENERATING_BOUNDS_H */ 16 17 /* 18 * Various page->flags bits: 19 * 20 * PG_reserved is set for special pages. The "struct page" of such a page 21 * should in general not be touched (e.g. set dirty) except by its owner. 22 * Pages marked as PG_reserved include: 23 * - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS, 24 * initrd, HW tables) 25 * - Pages reserved or allocated early during boot (before the page allocator 26 * was initialized). This includes (depending on the architecture) the 27 * initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much 28 * much more. Once (if ever) freed, PG_reserved is cleared and they will 29 * be given to the page allocator. 30 * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying 31 * to read/write these pages might end badly. Don't touch! 32 * - The zero page(s) 33 * - Pages allocated in the context of kexec/kdump (loaded kernel image, 34 * control pages, vmcoreinfo) 35 * - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are 36 * not marked PG_reserved (as they might be in use by somebody else who does 37 * not respect the caching strategy). 38 * - MCA pages on ia64 39 * - Pages holding CPU notes for POWER Firmware Assisted Dump 40 * - Device memory (e.g. PMEM, DAX, HMM) 41 * Some PG_reserved pages will be excluded from the hibernation image. 42 * PG_reserved does in general not hinder anybody from dumping or swapping 43 * and is no longer required for remap_pfn_range(). ioremap might require it. 44 * Consequently, PG_reserved for a page mapped into user space can indicate 45 * the zero page, the vDSO, MMIO pages or device memory. 46 * 47 * The PG_private bitflag is set on pagecache pages if they contain filesystem 48 * specific data (which is normally at page->private). It can be used by 49 * private allocations for its own usage. 50 * 51 * During initiation of disk I/O, PG_locked is set. This bit is set before I/O 52 * and cleared when writeback _starts_ or when read _completes_. PG_writeback 53 * is set before writeback starts and cleared when it finishes. 54 * 55 * PG_locked also pins a page in pagecache, and blocks truncation of the file 56 * while it is held. 57 * 58 * page_waitqueue(page) is a wait queue of all tasks waiting for the page 59 * to become unlocked. 60 * 61 * PG_swapbacked is set when a page uses swap as a backing storage. This are 62 * usually PageAnon or shmem pages but please note that even anonymous pages 63 * might lose their PG_swapbacked flag when they simply can be dropped (e.g. as 64 * a result of MADV_FREE). 65 * 66 * PG_referenced, PG_reclaim are used for page reclaim for anonymous and 67 * file-backed pagecache (see mm/vmscan.c). 68 * 69 * PG_arch_1 is an architecture specific page state bit. The generic code 70 * guarantees that this bit is cleared for a page when it first is entered into 71 * the page cache. 72 * 73 * PG_hwpoison indicates that a page got corrupted in hardware and contains 74 * data with incorrect ECC bits that triggered a machine check. Accessing is 75 * not safe since it may cause another machine check. Don't touch! 76 */ 77 78 /* 79 * Don't use the pageflags directly. Use the PageFoo macros. 80 * 81 * The page flags field is split into two parts, the main flags area 82 * which extends from the low bits upwards, and the fields area which 83 * extends from the high bits downwards. 84 * 85 * | FIELD | ... | FLAGS | 86 * N-1 ^ 0 87 * (NR_PAGEFLAGS) 88 * 89 * The fields area is reserved for fields mapping zone, node (for NUMA) and 90 * SPARSEMEM section (for variants of SPARSEMEM that require section ids like 91 * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP). 92 */ 93 enum pageflags { 94 PG_locked, /* Page is locked. Don't touch. */ 95 PG_writeback, /* Page is under writeback */ 96 PG_referenced, 97 PG_uptodate, 98 PG_dirty, 99 PG_lru, 100 PG_head, /* Must be in bit 6 */ 101 PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */ 102 PG_active, 103 PG_workingset, 104 PG_owner_priv_1, /* Owner use. If pagecache, fs may use */ 105 PG_owner_2, /* Owner use. If pagecache, fs may use */ 106 PG_arch_1, 107 PG_reserved, 108 PG_private, /* If pagecache, has fs-private data */ 109 PG_private_2, /* If pagecache, has fs aux data */ 110 PG_reclaim, /* To be reclaimed asap */ 111 PG_swapbacked, /* Page is backed by RAM/swap */ 112 PG_unevictable, /* Page is "unevictable" */ 113 PG_dropbehind, /* drop pages on IO completion */ 114 #ifdef CONFIG_MMU 115 PG_mlocked, /* Page is vma mlocked */ 116 #endif 117 #ifdef CONFIG_MEMORY_FAILURE 118 PG_hwpoison, /* hardware poisoned page. Don't touch */ 119 #endif 120 #if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT) 121 PG_young, 122 PG_idle, 123 #endif 124 #ifdef CONFIG_ARCH_USES_PG_ARCH_2 125 PG_arch_2, 126 #endif 127 #ifdef CONFIG_ARCH_USES_PG_ARCH_3 128 PG_arch_3, 129 #endif 130 __NR_PAGEFLAGS, 131 132 PG_readahead = PG_reclaim, 133 134 /* Anonymous memory (and shmem) */ 135 PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */ 136 /* Some filesystems */ 137 PG_checked = PG_owner_priv_1, 138 139 /* 140 * Depending on the way an anonymous folio can be mapped into a page 141 * table (e.g., single PMD/PUD/CONT of the head page vs. PTE-mapped 142 * THP), PG_anon_exclusive may be set only for the head page or for 143 * tail pages of an anonymous folio. For now, we only expect it to be 144 * set on tail pages for PTE-mapped THP. 145 */ 146 PG_anon_exclusive = PG_owner_2, 147 148 /* 149 * Set if all buffer heads in the folio are mapped. 150 * Filesystems which do not use BHs can use it for their own purpose. 151 */ 152 PG_mappedtodisk = PG_owner_2, 153 154 /* Two page bits are conscripted by FS-Cache to maintain local caching 155 * state. These bits are set on pages belonging to the netfs's inodes 156 * when those inodes are being locally cached. 157 */ 158 PG_fscache = PG_private_2, /* page backed by cache */ 159 160 /* XEN */ 161 /* Pinned in Xen as a read-only pagetable page. */ 162 PG_pinned = PG_owner_priv_1, 163 /* Pinned as part of domain save (see xen_mm_pin_all()). */ 164 PG_savepinned = PG_dirty, 165 /* Has a grant mapping of another (foreign) domain's page. */ 166 PG_foreign = PG_owner_priv_1, 167 /* Remapped by swiotlb-xen. */ 168 PG_xen_remapped = PG_owner_priv_1, 169 170 /* non-lru isolated movable page */ 171 PG_isolated = PG_reclaim, 172 173 /* Only valid for buddy pages. Used to track pages that are reported */ 174 PG_reported = PG_uptodate, 175 176 #ifdef CONFIG_MEMORY_HOTPLUG 177 /* For self-hosted memmap pages */ 178 PG_vmemmap_self_hosted = PG_owner_priv_1, 179 #endif 180 181 /* 182 * Flags only valid for compound pages. Stored in first tail page's 183 * flags word. Cannot use the first 8 flags or any flag marked as 184 * PF_ANY. 185 */ 186 187 /* At least one page in this folio has the hwpoison flag set */ 188 PG_has_hwpoisoned = PG_active, 189 PG_large_rmappable = PG_workingset, /* anon or file-backed */ 190 PG_partially_mapped = PG_reclaim, /* was identified to be partially mapped */ 191 }; 192 193 #define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1) 194 195 #ifndef __GENERATING_BOUNDS_H 196 197 #ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP 198 DECLARE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key); 199 200 /* 201 * Return the real head page struct iff the @page is a fake head page, otherwise 202 * return the @page itself. See Documentation/mm/vmemmap_dedup.rst. 203 */ 204 static __always_inline const struct page *page_fixed_fake_head(const struct page *page) 205 { 206 if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key)) 207 return page; 208 209 /* 210 * Only addresses aligned with PAGE_SIZE of struct page may be fake head 211 * struct page. The alignment check aims to avoid access the fields ( 212 * e.g. compound_head) of the @page[1]. It can avoid touch a (possibly) 213 * cold cacheline in some cases. 214 */ 215 if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) && 216 test_bit(PG_head, &page->flags)) { 217 /* 218 * We can safely access the field of the @page[1] with PG_head 219 * because the @page is a compound page composed with at least 220 * two contiguous pages. 221 */ 222 unsigned long head = READ_ONCE(page[1].compound_head); 223 224 if (likely(head & 1)) 225 return (const struct page *)(head - 1); 226 } 227 return page; 228 } 229 #else 230 static inline const struct page *page_fixed_fake_head(const struct page *page) 231 { 232 return page; 233 } 234 #endif 235 236 static __always_inline int page_is_fake_head(const struct page *page) 237 { 238 return page_fixed_fake_head(page) != page; 239 } 240 241 static __always_inline unsigned long _compound_head(const struct page *page) 242 { 243 unsigned long head = READ_ONCE(page->compound_head); 244 245 if (unlikely(head & 1)) 246 return head - 1; 247 return (unsigned long)page_fixed_fake_head(page); 248 } 249 250 #define compound_head(page) ((typeof(page))_compound_head(page)) 251 252 /** 253 * page_folio - Converts from page to folio. 254 * @p: The page. 255 * 256 * Every page is part of a folio. This function cannot be called on a 257 * NULL pointer. 258 * 259 * Context: No reference, nor lock is required on @page. If the caller 260 * does not hold a reference, this call may race with a folio split, so 261 * it should re-check the folio still contains this page after gaining 262 * a reference on the folio. 263 * Return: The folio which contains this page. 264 */ 265 #define page_folio(p) (_Generic((p), \ 266 const struct page *: (const struct folio *)_compound_head(p), \ 267 struct page *: (struct folio *)_compound_head(p))) 268 269 /** 270 * folio_page - Return a page from a folio. 271 * @folio: The folio. 272 * @n: The page number to return. 273 * 274 * @n is relative to the start of the folio. This function does not 275 * check that the page number lies within @folio; the caller is presumed 276 * to have a reference to the page. 277 */ 278 #define folio_page(folio, n) nth_page(&(folio)->page, n) 279 280 static __always_inline int PageTail(const struct page *page) 281 { 282 return READ_ONCE(page->compound_head) & 1 || page_is_fake_head(page); 283 } 284 285 static __always_inline int PageCompound(const struct page *page) 286 { 287 return test_bit(PG_head, &page->flags) || 288 READ_ONCE(page->compound_head) & 1; 289 } 290 291 #define PAGE_POISON_PATTERN -1l 292 static inline int PagePoisoned(const struct page *page) 293 { 294 return READ_ONCE(page->flags) == PAGE_POISON_PATTERN; 295 } 296 297 #ifdef CONFIG_DEBUG_VM 298 void page_init_poison(struct page *page, size_t size); 299 #else 300 static inline void page_init_poison(struct page *page, size_t size) 301 { 302 } 303 #endif 304 305 static const unsigned long *const_folio_flags(const struct folio *folio, 306 unsigned n) 307 { 308 const struct page *page = &folio->page; 309 310 VM_BUG_ON_PGFLAGS(page->compound_head & 1, page); 311 VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page); 312 return &page[n].flags; 313 } 314 315 static unsigned long *folio_flags(struct folio *folio, unsigned n) 316 { 317 struct page *page = &folio->page; 318 319 VM_BUG_ON_PGFLAGS(page->compound_head & 1, page); 320 VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page); 321 return &page[n].flags; 322 } 323 324 /* 325 * Page flags policies wrt compound pages 326 * 327 * PF_POISONED_CHECK 328 * check if this struct page poisoned/uninitialized 329 * 330 * PF_ANY: 331 * the page flag is relevant for small, head and tail pages. 332 * 333 * PF_HEAD: 334 * for compound page all operations related to the page flag applied to 335 * head page. 336 * 337 * PF_NO_TAIL: 338 * modifications of the page flag must be done on small or head pages, 339 * checks can be done on tail pages too. 340 * 341 * PF_NO_COMPOUND: 342 * the page flag is not relevant for compound pages. 343 * 344 * PF_SECOND: 345 * the page flag is stored in the first tail page. 346 */ 347 #define PF_POISONED_CHECK(page) ({ \ 348 VM_BUG_ON_PGFLAGS(PagePoisoned(page), page); \ 349 page; }) 350 #define PF_ANY(page, enforce) PF_POISONED_CHECK(page) 351 #define PF_HEAD(page, enforce) PF_POISONED_CHECK(compound_head(page)) 352 #define PF_NO_TAIL(page, enforce) ({ \ 353 VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \ 354 PF_POISONED_CHECK(compound_head(page)); }) 355 #define PF_NO_COMPOUND(page, enforce) ({ \ 356 VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \ 357 PF_POISONED_CHECK(page); }) 358 #define PF_SECOND(page, enforce) ({ \ 359 VM_BUG_ON_PGFLAGS(!PageHead(page), page); \ 360 PF_POISONED_CHECK(&page[1]); }) 361 362 /* Which page is the flag stored in */ 363 #define FOLIO_PF_ANY 0 364 #define FOLIO_PF_HEAD 0 365 #define FOLIO_PF_NO_TAIL 0 366 #define FOLIO_PF_NO_COMPOUND 0 367 #define FOLIO_PF_SECOND 1 368 369 #define FOLIO_HEAD_PAGE 0 370 #define FOLIO_SECOND_PAGE 1 371 372 /* 373 * Macros to create function definitions for page flags 374 */ 375 #define FOLIO_TEST_FLAG(name, page) \ 376 static __always_inline bool folio_test_##name(const struct folio *folio) \ 377 { return test_bit(PG_##name, const_folio_flags(folio, page)); } 378 379 #define FOLIO_SET_FLAG(name, page) \ 380 static __always_inline void folio_set_##name(struct folio *folio) \ 381 { set_bit(PG_##name, folio_flags(folio, page)); } 382 383 #define FOLIO_CLEAR_FLAG(name, page) \ 384 static __always_inline void folio_clear_##name(struct folio *folio) \ 385 { clear_bit(PG_##name, folio_flags(folio, page)); } 386 387 #define __FOLIO_SET_FLAG(name, page) \ 388 static __always_inline void __folio_set_##name(struct folio *folio) \ 389 { __set_bit(PG_##name, folio_flags(folio, page)); } 390 391 #define __FOLIO_CLEAR_FLAG(name, page) \ 392 static __always_inline void __folio_clear_##name(struct folio *folio) \ 393 { __clear_bit(PG_##name, folio_flags(folio, page)); } 394 395 #define FOLIO_TEST_SET_FLAG(name, page) \ 396 static __always_inline bool folio_test_set_##name(struct folio *folio) \ 397 { return test_and_set_bit(PG_##name, folio_flags(folio, page)); } 398 399 #define FOLIO_TEST_CLEAR_FLAG(name, page) \ 400 static __always_inline bool folio_test_clear_##name(struct folio *folio) \ 401 { return test_and_clear_bit(PG_##name, folio_flags(folio, page)); } 402 403 #define FOLIO_FLAG(name, page) \ 404 FOLIO_TEST_FLAG(name, page) \ 405 FOLIO_SET_FLAG(name, page) \ 406 FOLIO_CLEAR_FLAG(name, page) 407 408 #define TESTPAGEFLAG(uname, lname, policy) \ 409 FOLIO_TEST_FLAG(lname, FOLIO_##policy) \ 410 static __always_inline int Page##uname(const struct page *page) \ 411 { return test_bit(PG_##lname, &policy(page, 0)->flags); } 412 413 #define SETPAGEFLAG(uname, lname, policy) \ 414 FOLIO_SET_FLAG(lname, FOLIO_##policy) \ 415 static __always_inline void SetPage##uname(struct page *page) \ 416 { set_bit(PG_##lname, &policy(page, 1)->flags); } 417 418 #define CLEARPAGEFLAG(uname, lname, policy) \ 419 FOLIO_CLEAR_FLAG(lname, FOLIO_##policy) \ 420 static __always_inline void ClearPage##uname(struct page *page) \ 421 { clear_bit(PG_##lname, &policy(page, 1)->flags); } 422 423 #define __SETPAGEFLAG(uname, lname, policy) \ 424 __FOLIO_SET_FLAG(lname, FOLIO_##policy) \ 425 static __always_inline void __SetPage##uname(struct page *page) \ 426 { __set_bit(PG_##lname, &policy(page, 1)->flags); } 427 428 #define __CLEARPAGEFLAG(uname, lname, policy) \ 429 __FOLIO_CLEAR_FLAG(lname, FOLIO_##policy) \ 430 static __always_inline void __ClearPage##uname(struct page *page) \ 431 { __clear_bit(PG_##lname, &policy(page, 1)->flags); } 432 433 #define TESTSETFLAG(uname, lname, policy) \ 434 FOLIO_TEST_SET_FLAG(lname, FOLIO_##policy) \ 435 static __always_inline int TestSetPage##uname(struct page *page) \ 436 { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); } 437 438 #define TESTCLEARFLAG(uname, lname, policy) \ 439 FOLIO_TEST_CLEAR_FLAG(lname, FOLIO_##policy) \ 440 static __always_inline int TestClearPage##uname(struct page *page) \ 441 { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); } 442 443 #define PAGEFLAG(uname, lname, policy) \ 444 TESTPAGEFLAG(uname, lname, policy) \ 445 SETPAGEFLAG(uname, lname, policy) \ 446 CLEARPAGEFLAG(uname, lname, policy) 447 448 #define __PAGEFLAG(uname, lname, policy) \ 449 TESTPAGEFLAG(uname, lname, policy) \ 450 __SETPAGEFLAG(uname, lname, policy) \ 451 __CLEARPAGEFLAG(uname, lname, policy) 452 453 #define TESTSCFLAG(uname, lname, policy) \ 454 TESTSETFLAG(uname, lname, policy) \ 455 TESTCLEARFLAG(uname, lname, policy) 456 457 #define FOLIO_TEST_FLAG_FALSE(name) \ 458 static inline bool folio_test_##name(const struct folio *folio) \ 459 { return false; } 460 #define FOLIO_SET_FLAG_NOOP(name) \ 461 static inline void folio_set_##name(struct folio *folio) { } 462 #define FOLIO_CLEAR_FLAG_NOOP(name) \ 463 static inline void folio_clear_##name(struct folio *folio) { } 464 #define __FOLIO_SET_FLAG_NOOP(name) \ 465 static inline void __folio_set_##name(struct folio *folio) { } 466 #define __FOLIO_CLEAR_FLAG_NOOP(name) \ 467 static inline void __folio_clear_##name(struct folio *folio) { } 468 #define FOLIO_TEST_SET_FLAG_FALSE(name) \ 469 static inline bool folio_test_set_##name(struct folio *folio) \ 470 { return false; } 471 #define FOLIO_TEST_CLEAR_FLAG_FALSE(name) \ 472 static inline bool folio_test_clear_##name(struct folio *folio) \ 473 { return false; } 474 475 #define FOLIO_FLAG_FALSE(name) \ 476 FOLIO_TEST_FLAG_FALSE(name) \ 477 FOLIO_SET_FLAG_NOOP(name) \ 478 FOLIO_CLEAR_FLAG_NOOP(name) 479 480 #define TESTPAGEFLAG_FALSE(uname, lname) \ 481 FOLIO_TEST_FLAG_FALSE(lname) \ 482 static inline int Page##uname(const struct page *page) { return 0; } 483 484 #define SETPAGEFLAG_NOOP(uname, lname) \ 485 FOLIO_SET_FLAG_NOOP(lname) \ 486 static inline void SetPage##uname(struct page *page) { } 487 488 #define CLEARPAGEFLAG_NOOP(uname, lname) \ 489 FOLIO_CLEAR_FLAG_NOOP(lname) \ 490 static inline void ClearPage##uname(struct page *page) { } 491 492 #define __CLEARPAGEFLAG_NOOP(uname, lname) \ 493 __FOLIO_CLEAR_FLAG_NOOP(lname) \ 494 static inline void __ClearPage##uname(struct page *page) { } 495 496 #define TESTSETFLAG_FALSE(uname, lname) \ 497 FOLIO_TEST_SET_FLAG_FALSE(lname) \ 498 static inline int TestSetPage##uname(struct page *page) { return 0; } 499 500 #define TESTCLEARFLAG_FALSE(uname, lname) \ 501 FOLIO_TEST_CLEAR_FLAG_FALSE(lname) \ 502 static inline int TestClearPage##uname(struct page *page) { return 0; } 503 504 #define PAGEFLAG_FALSE(uname, lname) TESTPAGEFLAG_FALSE(uname, lname) \ 505 SETPAGEFLAG_NOOP(uname, lname) CLEARPAGEFLAG_NOOP(uname, lname) 506 507 #define TESTSCFLAG_FALSE(uname, lname) \ 508 TESTSETFLAG_FALSE(uname, lname) TESTCLEARFLAG_FALSE(uname, lname) 509 510 __PAGEFLAG(Locked, locked, PF_NO_TAIL) 511 FOLIO_FLAG(waiters, FOLIO_HEAD_PAGE) 512 FOLIO_FLAG(referenced, FOLIO_HEAD_PAGE) 513 FOLIO_TEST_CLEAR_FLAG(referenced, FOLIO_HEAD_PAGE) 514 __FOLIO_SET_FLAG(referenced, FOLIO_HEAD_PAGE) 515 PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD) 516 __CLEARPAGEFLAG(Dirty, dirty, PF_HEAD) 517 PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD) 518 TESTCLEARFLAG(LRU, lru, PF_HEAD) 519 FOLIO_FLAG(active, FOLIO_HEAD_PAGE) 520 __FOLIO_CLEAR_FLAG(active, FOLIO_HEAD_PAGE) 521 FOLIO_TEST_CLEAR_FLAG(active, FOLIO_HEAD_PAGE) 522 PAGEFLAG(Workingset, workingset, PF_HEAD) 523 TESTCLEARFLAG(Workingset, workingset, PF_HEAD) 524 PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */ 525 526 /* Xen */ 527 PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND) 528 TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND) 529 PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND); 530 PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND); 531 PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND) 532 TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND) 533 534 PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) 535 __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) 536 __SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) 537 FOLIO_FLAG(swapbacked, FOLIO_HEAD_PAGE) 538 __FOLIO_CLEAR_FLAG(swapbacked, FOLIO_HEAD_PAGE) 539 __FOLIO_SET_FLAG(swapbacked, FOLIO_HEAD_PAGE) 540 541 /* 542 * Private page markings that may be used by the filesystem that owns the page 543 * for its own purposes. 544 * - PG_private and PG_private_2 cause release_folio() and co to be invoked 545 */ 546 PAGEFLAG(Private, private, PF_ANY) 547 FOLIO_FLAG(private_2, FOLIO_HEAD_PAGE) 548 549 /* owner_2 can be set on tail pages for anon memory */ 550 FOLIO_FLAG(owner_2, FOLIO_HEAD_PAGE) 551 552 /* 553 * Only test-and-set exist for PG_writeback. The unconditional operators are 554 * risky: they bypass page accounting. 555 */ 556 TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL) 557 TESTSCFLAG(Writeback, writeback, PF_NO_TAIL) 558 FOLIO_FLAG(mappedtodisk, FOLIO_HEAD_PAGE) 559 560 /* PG_readahead is only used for reads; PG_reclaim is only for writes */ 561 PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL) 562 TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL) 563 FOLIO_FLAG(readahead, FOLIO_HEAD_PAGE) 564 FOLIO_TEST_CLEAR_FLAG(readahead, FOLIO_HEAD_PAGE) 565 566 FOLIO_FLAG(dropbehind, FOLIO_HEAD_PAGE) 567 FOLIO_TEST_CLEAR_FLAG(dropbehind, FOLIO_HEAD_PAGE) 568 __FOLIO_SET_FLAG(dropbehind, FOLIO_HEAD_PAGE) 569 570 #ifdef CONFIG_HIGHMEM 571 /* 572 * Must use a macro here due to header dependency issues. page_zone() is not 573 * available at this point. 574 */ 575 #define PageHighMem(__p) is_highmem_idx(page_zonenum(__p)) 576 #define folio_test_highmem(__f) is_highmem_idx(folio_zonenum(__f)) 577 #else 578 PAGEFLAG_FALSE(HighMem, highmem) 579 #endif 580 581 #ifdef CONFIG_SWAP 582 static __always_inline bool folio_test_swapcache(const struct folio *folio) 583 { 584 return folio_test_swapbacked(folio) && 585 test_bit(PG_swapcache, const_folio_flags(folio, 0)); 586 } 587 588 FOLIO_SET_FLAG(swapcache, FOLIO_HEAD_PAGE) 589 FOLIO_CLEAR_FLAG(swapcache, FOLIO_HEAD_PAGE) 590 #else 591 FOLIO_FLAG_FALSE(swapcache) 592 #endif 593 594 FOLIO_FLAG(unevictable, FOLIO_HEAD_PAGE) 595 __FOLIO_CLEAR_FLAG(unevictable, FOLIO_HEAD_PAGE) 596 FOLIO_TEST_CLEAR_FLAG(unevictable, FOLIO_HEAD_PAGE) 597 598 #ifdef CONFIG_MMU 599 FOLIO_FLAG(mlocked, FOLIO_HEAD_PAGE) 600 __FOLIO_CLEAR_FLAG(mlocked, FOLIO_HEAD_PAGE) 601 FOLIO_TEST_CLEAR_FLAG(mlocked, FOLIO_HEAD_PAGE) 602 FOLIO_TEST_SET_FLAG(mlocked, FOLIO_HEAD_PAGE) 603 #else 604 FOLIO_FLAG_FALSE(mlocked) 605 __FOLIO_CLEAR_FLAG_NOOP(mlocked) 606 FOLIO_TEST_CLEAR_FLAG_FALSE(mlocked) 607 FOLIO_TEST_SET_FLAG_FALSE(mlocked) 608 #endif 609 610 #ifdef CONFIG_MEMORY_FAILURE 611 PAGEFLAG(HWPoison, hwpoison, PF_ANY) 612 TESTSCFLAG(HWPoison, hwpoison, PF_ANY) 613 #define __PG_HWPOISON (1UL << PG_hwpoison) 614 #else 615 PAGEFLAG_FALSE(HWPoison, hwpoison) 616 #define __PG_HWPOISON 0 617 #endif 618 619 #ifdef CONFIG_PAGE_IDLE_FLAG 620 #ifdef CONFIG_64BIT 621 FOLIO_TEST_FLAG(young, FOLIO_HEAD_PAGE) 622 FOLIO_SET_FLAG(young, FOLIO_HEAD_PAGE) 623 FOLIO_TEST_CLEAR_FLAG(young, FOLIO_HEAD_PAGE) 624 FOLIO_FLAG(idle, FOLIO_HEAD_PAGE) 625 #endif 626 /* See page_idle.h for !64BIT workaround */ 627 #else /* !CONFIG_PAGE_IDLE_FLAG */ 628 FOLIO_FLAG_FALSE(young) 629 FOLIO_TEST_CLEAR_FLAG_FALSE(young) 630 FOLIO_FLAG_FALSE(idle) 631 #endif 632 633 /* 634 * PageReported() is used to track reported free pages within the Buddy 635 * allocator. We can use the non-atomic version of the test and set 636 * operations as both should be shielded with the zone lock to prevent 637 * any possible races on the setting or clearing of the bit. 638 */ 639 __PAGEFLAG(Reported, reported, PF_NO_COMPOUND) 640 641 #ifdef CONFIG_MEMORY_HOTPLUG 642 PAGEFLAG(VmemmapSelfHosted, vmemmap_self_hosted, PF_ANY) 643 #else 644 PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted) 645 #endif 646 647 /* 648 * On an anonymous folio mapped into a user virtual memory area, 649 * folio->mapping points to its anon_vma, not to a struct address_space; 650 * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h. 651 * 652 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled, 653 * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON 654 * bit; and then folio->mapping points, not to an anon_vma, but to a private 655 * structure which KSM associates with that merged page. See ksm.h. 656 * 657 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable 658 * page and then folio->mapping points to a struct movable_operations. 659 * 660 * Please note that, confusingly, "folio_mapping" refers to the inode 661 * address_space which maps the folio from disk; whereas "folio_mapped" 662 * refers to user virtual address space into which the folio is mapped. 663 * 664 * For slab pages, since slab reuses the bits in struct page to store its 665 * internal states, the folio->mapping does not exist as such, nor do 666 * these flags below. So in order to avoid testing non-existent bits, 667 * please make sure that folio_test_slab(folio) actually evaluates to 668 * false before calling the following functions (e.g., folio_test_anon). 669 * See mm/slab.h. 670 */ 671 #define PAGE_MAPPING_ANON 0x1 672 #define PAGE_MAPPING_MOVABLE 0x2 673 #define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE) 674 #define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE) 675 676 /* 677 * Different with flags above, this flag is used only for fsdax mode. It 678 * indicates that this page->mapping is now under reflink case. 679 */ 680 #define PAGE_MAPPING_DAX_SHARED ((void *)0x1) 681 682 static __always_inline bool folio_mapping_flags(const struct folio *folio) 683 { 684 return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) != 0; 685 } 686 687 static __always_inline bool PageMappingFlags(const struct page *page) 688 { 689 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0; 690 } 691 692 static __always_inline bool folio_test_anon(const struct folio *folio) 693 { 694 return ((unsigned long)folio->mapping & PAGE_MAPPING_ANON) != 0; 695 } 696 697 static __always_inline bool PageAnonNotKsm(const struct page *page) 698 { 699 unsigned long flags = (unsigned long)page_folio(page)->mapping; 700 701 return (flags & PAGE_MAPPING_FLAGS) == PAGE_MAPPING_ANON; 702 } 703 704 static __always_inline bool PageAnon(const struct page *page) 705 { 706 return folio_test_anon(page_folio(page)); 707 } 708 709 static __always_inline bool __folio_test_movable(const struct folio *folio) 710 { 711 return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) == 712 PAGE_MAPPING_MOVABLE; 713 } 714 715 static __always_inline bool __PageMovable(const struct page *page) 716 { 717 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == 718 PAGE_MAPPING_MOVABLE; 719 } 720 721 #ifdef CONFIG_KSM 722 /* 723 * A KSM page is one of those write-protected "shared pages" or "merged pages" 724 * which KSM maps into multiple mms, wherever identical anonymous page content 725 * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any 726 * anon_vma, but to that page's node of the stable tree. 727 */ 728 static __always_inline bool folio_test_ksm(const struct folio *folio) 729 { 730 return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) == 731 PAGE_MAPPING_KSM; 732 } 733 #else 734 FOLIO_TEST_FLAG_FALSE(ksm) 735 #endif 736 737 u64 stable_page_flags(const struct page *page); 738 739 /** 740 * folio_xor_flags_has_waiters - Change some folio flags. 741 * @folio: The folio. 742 * @mask: Bits set in this word will be changed. 743 * 744 * This must only be used for flags which are changed with the folio 745 * lock held. For example, it is unsafe to use for PG_dirty as that 746 * can be set without the folio lock held. It can also only be used 747 * on flags which are in the range 0-6 as some of the implementations 748 * only affect those bits. 749 * 750 * Return: Whether there are tasks waiting on the folio. 751 */ 752 static inline bool folio_xor_flags_has_waiters(struct folio *folio, 753 unsigned long mask) 754 { 755 return xor_unlock_is_negative_byte(mask, folio_flags(folio, 0)); 756 } 757 758 /** 759 * folio_test_uptodate - Is this folio up to date? 760 * @folio: The folio. 761 * 762 * The uptodate flag is set on a folio when every byte in the folio is 763 * at least as new as the corresponding bytes on storage. Anonymous 764 * and CoW folios are always uptodate. If the folio is not uptodate, 765 * some of the bytes in it may be; see the is_partially_uptodate() 766 * address_space operation. 767 */ 768 static inline bool folio_test_uptodate(const struct folio *folio) 769 { 770 bool ret = test_bit(PG_uptodate, const_folio_flags(folio, 0)); 771 /* 772 * Must ensure that the data we read out of the folio is loaded 773 * _after_ we've loaded folio->flags to check the uptodate bit. 774 * We can skip the barrier if the folio is not uptodate, because 775 * we wouldn't be reading anything from it. 776 * 777 * See folio_mark_uptodate() for the other side of the story. 778 */ 779 if (ret) 780 smp_rmb(); 781 782 return ret; 783 } 784 785 static inline bool PageUptodate(const struct page *page) 786 { 787 return folio_test_uptodate(page_folio(page)); 788 } 789 790 static __always_inline void __folio_mark_uptodate(struct folio *folio) 791 { 792 smp_wmb(); 793 __set_bit(PG_uptodate, folio_flags(folio, 0)); 794 } 795 796 static __always_inline void folio_mark_uptodate(struct folio *folio) 797 { 798 /* 799 * Memory barrier must be issued before setting the PG_uptodate bit, 800 * so that all previous stores issued in order to bring the folio 801 * uptodate are actually visible before folio_test_uptodate becomes true. 802 */ 803 smp_wmb(); 804 set_bit(PG_uptodate, folio_flags(folio, 0)); 805 } 806 807 static __always_inline void __SetPageUptodate(struct page *page) 808 { 809 __folio_mark_uptodate((struct folio *)page); 810 } 811 812 static __always_inline void SetPageUptodate(struct page *page) 813 { 814 folio_mark_uptodate((struct folio *)page); 815 } 816 817 CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL) 818 819 void __folio_start_writeback(struct folio *folio, bool keep_write); 820 void set_page_writeback(struct page *page); 821 822 #define folio_start_writeback(folio) \ 823 __folio_start_writeback(folio, false) 824 #define folio_start_writeback_keepwrite(folio) \ 825 __folio_start_writeback(folio, true) 826 827 static __always_inline bool folio_test_head(const struct folio *folio) 828 { 829 return test_bit(PG_head, const_folio_flags(folio, FOLIO_PF_ANY)); 830 } 831 832 static __always_inline int PageHead(const struct page *page) 833 { 834 PF_POISONED_CHECK(page); 835 return test_bit(PG_head, &page->flags) && !page_is_fake_head(page); 836 } 837 838 __SETPAGEFLAG(Head, head, PF_ANY) 839 __CLEARPAGEFLAG(Head, head, PF_ANY) 840 CLEARPAGEFLAG(Head, head, PF_ANY) 841 842 /** 843 * folio_test_large() - Does this folio contain more than one page? 844 * @folio: The folio to test. 845 * 846 * Return: True if the folio is larger than one page. 847 */ 848 static inline bool folio_test_large(const struct folio *folio) 849 { 850 return folio_test_head(folio); 851 } 852 853 static __always_inline void set_compound_head(struct page *page, struct page *head) 854 { 855 WRITE_ONCE(page->compound_head, (unsigned long)head + 1); 856 } 857 858 static __always_inline void clear_compound_head(struct page *page) 859 { 860 WRITE_ONCE(page->compound_head, 0); 861 } 862 863 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 864 static inline void ClearPageCompound(struct page *page) 865 { 866 BUG_ON(!PageHead(page)); 867 ClearPageHead(page); 868 } 869 FOLIO_FLAG(large_rmappable, FOLIO_SECOND_PAGE) 870 FOLIO_FLAG(partially_mapped, FOLIO_SECOND_PAGE) 871 #else 872 FOLIO_FLAG_FALSE(large_rmappable) 873 FOLIO_FLAG_FALSE(partially_mapped) 874 #endif 875 876 #define PG_head_mask ((1UL << PG_head)) 877 878 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 879 /* 880 * PageHuge() only returns true for hugetlbfs pages, but not for 881 * normal or transparent huge pages. 882 * 883 * PageTransHuge() returns true for both transparent huge and 884 * hugetlbfs pages, but not normal pages. PageTransHuge() can only be 885 * called only in the core VM paths where hugetlbfs pages can't exist. 886 */ 887 static inline int PageTransHuge(const struct page *page) 888 { 889 VM_BUG_ON_PAGE(PageTail(page), page); 890 return PageHead(page); 891 } 892 893 /* 894 * PageTransCompound returns true for both transparent huge pages 895 * and hugetlbfs pages, so it should only be called when it's known 896 * that hugetlbfs pages aren't involved. 897 */ 898 static inline int PageTransCompound(const struct page *page) 899 { 900 return PageCompound(page); 901 } 902 #else 903 TESTPAGEFLAG_FALSE(TransHuge, transhuge) 904 TESTPAGEFLAG_FALSE(TransCompound, transcompound) 905 #endif 906 907 #if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_TRANSPARENT_HUGEPAGE) 908 /* 909 * PageHasHWPoisoned indicates that at least one subpage is hwpoisoned in the 910 * compound page. 911 * 912 * This flag is set by hwpoison handler. Cleared by THP split or free page. 913 */ 914 FOLIO_FLAG(has_hwpoisoned, FOLIO_SECOND_PAGE) 915 #else 916 FOLIO_FLAG_FALSE(has_hwpoisoned) 917 #endif 918 919 /* 920 * For pages that do not use mapcount, page_type may be used. 921 * The low 24 bits of pagetype may be used for your own purposes, as long 922 * as you are careful to not affect the top 8 bits. The low bits of 923 * pagetype will be overwritten when you clear the page_type from the page. 924 */ 925 enum pagetype { 926 /* 0x00-0x7f are positive numbers, ie mapcount */ 927 /* Reserve 0x80-0xef for mapcount overflow. */ 928 PGTY_buddy = 0xf0, 929 PGTY_offline = 0xf1, 930 PGTY_table = 0xf2, 931 PGTY_guard = 0xf3, 932 PGTY_hugetlb = 0xf4, 933 PGTY_slab = 0xf5, 934 PGTY_zsmalloc = 0xf6, 935 PGTY_unaccepted = 0xf7, 936 937 PGTY_mapcount_underflow = 0xff 938 }; 939 940 static inline bool page_type_has_type(int page_type) 941 { 942 return page_type < (PGTY_mapcount_underflow << 24); 943 } 944 945 /* This takes a mapcount which is one more than page->_mapcount */ 946 static inline bool page_mapcount_is_type(unsigned int mapcount) 947 { 948 return page_type_has_type(mapcount - 1); 949 } 950 951 static inline bool page_has_type(const struct page *page) 952 { 953 return page_mapcount_is_type(data_race(page->page_type)); 954 } 955 956 #define FOLIO_TYPE_OPS(lname, fname) \ 957 static __always_inline bool folio_test_##fname(const struct folio *folio) \ 958 { \ 959 return data_race(folio->page.page_type >> 24) == PGTY_##lname; \ 960 } \ 961 static __always_inline void __folio_set_##fname(struct folio *folio) \ 962 { \ 963 if (folio_test_##fname(folio)) \ 964 return; \ 965 VM_BUG_ON_FOLIO(data_race(folio->page.page_type) != UINT_MAX, \ 966 folio); \ 967 folio->page.page_type = (unsigned int)PGTY_##lname << 24; \ 968 } \ 969 static __always_inline void __folio_clear_##fname(struct folio *folio) \ 970 { \ 971 if (folio->page.page_type == UINT_MAX) \ 972 return; \ 973 VM_BUG_ON_FOLIO(!folio_test_##fname(folio), folio); \ 974 folio->page.page_type = UINT_MAX; \ 975 } 976 977 #define PAGE_TYPE_OPS(uname, lname, fname) \ 978 FOLIO_TYPE_OPS(lname, fname) \ 979 static __always_inline int Page##uname(const struct page *page) \ 980 { \ 981 return data_race(page->page_type >> 24) == PGTY_##lname; \ 982 } \ 983 static __always_inline void __SetPage##uname(struct page *page) \ 984 { \ 985 if (Page##uname(page)) \ 986 return; \ 987 VM_BUG_ON_PAGE(data_race(page->page_type) != UINT_MAX, page); \ 988 page->page_type = (unsigned int)PGTY_##lname << 24; \ 989 } \ 990 static __always_inline void __ClearPage##uname(struct page *page) \ 991 { \ 992 if (page->page_type == UINT_MAX) \ 993 return; \ 994 VM_BUG_ON_PAGE(!Page##uname(page), page); \ 995 page->page_type = UINT_MAX; \ 996 } 997 998 /* 999 * PageBuddy() indicates that the page is free and in the buddy system 1000 * (see mm/page_alloc.c). 1001 */ 1002 PAGE_TYPE_OPS(Buddy, buddy, buddy) 1003 1004 /* 1005 * PageOffline() indicates that the page is logically offline although the 1006 * containing section is online. (e.g. inflated in a balloon driver or 1007 * not onlined when onlining the section). 1008 * The content of these pages is effectively stale. Such pages should not 1009 * be touched (read/write/dump/save) except by their owner. 1010 * 1011 * When a memory block gets onlined, all pages are initialized with a 1012 * refcount of 1 and PageOffline(). generic_online_page() will 1013 * take care of clearing PageOffline(). 1014 * 1015 * If a driver wants to allow to offline unmovable PageOffline() pages without 1016 * putting them back to the buddy, it can do so via the memory notifier by 1017 * decrementing the reference count in MEM_GOING_OFFLINE and incrementing the 1018 * reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline() 1019 * pages (now with a reference count of zero) are treated like free (unmanaged) 1020 * pages, allowing the containing memory block to get offlined. A driver that 1021 * relies on this feature is aware that re-onlining the memory block will 1022 * require not giving them to the buddy via generic_online_page(). 1023 * 1024 * Memory offlining code will not adjust the managed page count for any 1025 * PageOffline() pages, treating them like they were never exposed to the 1026 * buddy using generic_online_page(). 1027 * 1028 * There are drivers that mark a page PageOffline() and expect there won't be 1029 * any further access to page content. PFN walkers that read content of random 1030 * pages should check PageOffline() and synchronize with such drivers using 1031 * page_offline_freeze()/page_offline_thaw(). 1032 */ 1033 PAGE_TYPE_OPS(Offline, offline, offline) 1034 1035 extern void page_offline_freeze(void); 1036 extern void page_offline_thaw(void); 1037 extern void page_offline_begin(void); 1038 extern void page_offline_end(void); 1039 1040 /* 1041 * Marks pages in use as page tables. 1042 */ 1043 PAGE_TYPE_OPS(Table, table, pgtable) 1044 1045 /* 1046 * Marks guardpages used with debug_pagealloc. 1047 */ 1048 PAGE_TYPE_OPS(Guard, guard, guard) 1049 1050 FOLIO_TYPE_OPS(slab, slab) 1051 1052 /** 1053 * PageSlab - Determine if the page belongs to the slab allocator 1054 * @page: The page to test. 1055 * 1056 * Context: Any context. 1057 * Return: True for slab pages, false for any other kind of page. 1058 */ 1059 static inline bool PageSlab(const struct page *page) 1060 { 1061 return folio_test_slab(page_folio(page)); 1062 } 1063 1064 #ifdef CONFIG_HUGETLB_PAGE 1065 FOLIO_TYPE_OPS(hugetlb, hugetlb) 1066 #else 1067 FOLIO_TEST_FLAG_FALSE(hugetlb) 1068 #endif 1069 1070 PAGE_TYPE_OPS(Zsmalloc, zsmalloc, zsmalloc) 1071 1072 /* 1073 * Mark pages that has to be accepted before touched for the first time. 1074 * 1075 * Serialized with zone lock. 1076 */ 1077 PAGE_TYPE_OPS(Unaccepted, unaccepted, unaccepted) 1078 1079 /** 1080 * PageHuge - Determine if the page belongs to hugetlbfs 1081 * @page: The page to test. 1082 * 1083 * Context: Any context. 1084 * Return: True for hugetlbfs pages, false for anon pages or pages 1085 * belonging to other filesystems. 1086 */ 1087 static inline bool PageHuge(const struct page *page) 1088 { 1089 return folio_test_hugetlb(page_folio(page)); 1090 } 1091 1092 /* 1093 * Check if a page is currently marked HWPoisoned. Note that this check is 1094 * best effort only and inherently racy: there is no way to synchronize with 1095 * failing hardware. 1096 */ 1097 static inline bool is_page_hwpoison(const struct page *page) 1098 { 1099 const struct folio *folio; 1100 1101 if (PageHWPoison(page)) 1102 return true; 1103 folio = page_folio(page); 1104 return folio_test_hugetlb(folio) && PageHWPoison(&folio->page); 1105 } 1106 1107 bool is_free_buddy_page(const struct page *page); 1108 1109 PAGEFLAG(Isolated, isolated, PF_ANY); 1110 1111 static __always_inline int PageAnonExclusive(const struct page *page) 1112 { 1113 VM_BUG_ON_PGFLAGS(!PageAnon(page), page); 1114 /* 1115 * HugeTLB stores this information on the head page; THP keeps it per 1116 * page 1117 */ 1118 if (PageHuge(page)) 1119 page = compound_head(page); 1120 return test_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); 1121 } 1122 1123 static __always_inline void SetPageAnonExclusive(struct page *page) 1124 { 1125 VM_BUG_ON_PGFLAGS(!PageAnonNotKsm(page), page); 1126 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); 1127 set_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); 1128 } 1129 1130 static __always_inline void ClearPageAnonExclusive(struct page *page) 1131 { 1132 VM_BUG_ON_PGFLAGS(!PageAnonNotKsm(page), page); 1133 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); 1134 clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); 1135 } 1136 1137 static __always_inline void __ClearPageAnonExclusive(struct page *page) 1138 { 1139 VM_BUG_ON_PGFLAGS(!PageAnon(page), page); 1140 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); 1141 __clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); 1142 } 1143 1144 #ifdef CONFIG_MMU 1145 #define __PG_MLOCKED (1UL << PG_mlocked) 1146 #else 1147 #define __PG_MLOCKED 0 1148 #endif 1149 1150 /* 1151 * Flags checked when a page is freed. Pages being freed should not have 1152 * these flags set. If they are, there is a problem. 1153 */ 1154 #define PAGE_FLAGS_CHECK_AT_FREE \ 1155 (1UL << PG_lru | 1UL << PG_locked | \ 1156 1UL << PG_private | 1UL << PG_private_2 | \ 1157 1UL << PG_writeback | 1UL << PG_reserved | \ 1158 1UL << PG_active | \ 1159 1UL << PG_unevictable | __PG_MLOCKED | LRU_GEN_MASK) 1160 1161 /* 1162 * Flags checked when a page is prepped for return by the page allocator. 1163 * Pages being prepped should not have these flags set. If they are set, 1164 * there has been a kernel bug or struct page corruption. 1165 * 1166 * __PG_HWPOISON is exceptional because it needs to be kept beyond page's 1167 * alloc-free cycle to prevent from reusing the page. 1168 */ 1169 #define PAGE_FLAGS_CHECK_AT_PREP \ 1170 ((PAGEFLAGS_MASK & ~__PG_HWPOISON) | LRU_GEN_MASK | LRU_REFS_MASK) 1171 1172 /* 1173 * Flags stored in the second page of a compound page. They may overlap 1174 * the CHECK_AT_FREE flags above, so need to be cleared. 1175 */ 1176 #define PAGE_FLAGS_SECOND \ 1177 (0xffUL /* order */ | 1UL << PG_has_hwpoisoned | \ 1178 1UL << PG_large_rmappable | 1UL << PG_partially_mapped) 1179 1180 #define PAGE_FLAGS_PRIVATE \ 1181 (1UL << PG_private | 1UL << PG_private_2) 1182 /** 1183 * folio_has_private - Determine if folio has private stuff 1184 * @folio: The folio to be checked 1185 * 1186 * Determine if a folio has private stuff, indicating that release routines 1187 * should be invoked upon it. 1188 */ 1189 static inline int folio_has_private(const struct folio *folio) 1190 { 1191 return !!(folio->flags & PAGE_FLAGS_PRIVATE); 1192 } 1193 1194 #undef PF_ANY 1195 #undef PF_HEAD 1196 #undef PF_NO_TAIL 1197 #undef PF_NO_COMPOUND 1198 #undef PF_SECOND 1199 #endif /* !__GENERATING_BOUNDS_H */ 1200 1201 #endif /* PAGE_FLAGS_H */ 1202