1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Macros for manipulating and testing page->flags 4 */ 5 6 #ifndef PAGE_FLAGS_H 7 #define PAGE_FLAGS_H 8 9 #include <linux/types.h> 10 #include <linux/bug.h> 11 #include <linux/mmdebug.h> 12 #ifndef __GENERATING_BOUNDS_H 13 #include <linux/mm_types.h> 14 #include <generated/bounds.h> 15 #endif /* !__GENERATING_BOUNDS_H */ 16 17 /* 18 * Various page->flags bits: 19 * 20 * PG_reserved is set for special pages. The "struct page" of such a page 21 * should in general not be touched (e.g. set dirty) except by its owner. 22 * Pages marked as PG_reserved include: 23 * - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS, 24 * initrd, HW tables) 25 * - Pages reserved or allocated early during boot (before the page allocator 26 * was initialized). This includes (depending on the architecture) the 27 * initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much 28 * much more. Once (if ever) freed, PG_reserved is cleared and they will 29 * be given to the page allocator. 30 * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying 31 * to read/write these pages might end badly. Don't touch! 32 * - The zero page(s) 33 * - Pages not added to the page allocator when onlining a section because 34 * they were excluded via the online_page_callback() or because they are 35 * PG_hwpoison. 36 * - Pages allocated in the context of kexec/kdump (loaded kernel image, 37 * control pages, vmcoreinfo) 38 * - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are 39 * not marked PG_reserved (as they might be in use by somebody else who does 40 * not respect the caching strategy). 41 * - Pages part of an offline section (struct pages of offline sections should 42 * not be trusted as they will be initialized when first onlined). 43 * - MCA pages on ia64 44 * - Pages holding CPU notes for POWER Firmware Assisted Dump 45 * - Device memory (e.g. PMEM, DAX, HMM) 46 * Some PG_reserved pages will be excluded from the hibernation image. 47 * PG_reserved does in general not hinder anybody from dumping or swapping 48 * and is no longer required for remap_pfn_range(). ioremap might require it. 49 * Consequently, PG_reserved for a page mapped into user space can indicate 50 * the zero page, the vDSO, MMIO pages or device memory. 51 * 52 * The PG_private bitflag is set on pagecache pages if they contain filesystem 53 * specific data (which is normally at page->private). It can be used by 54 * private allocations for its own usage. 55 * 56 * During initiation of disk I/O, PG_locked is set. This bit is set before I/O 57 * and cleared when writeback _starts_ or when read _completes_. PG_writeback 58 * is set before writeback starts and cleared when it finishes. 59 * 60 * PG_locked also pins a page in pagecache, and blocks truncation of the file 61 * while it is held. 62 * 63 * page_waitqueue(page) is a wait queue of all tasks waiting for the page 64 * to become unlocked. 65 * 66 * PG_swapbacked is set when a page uses swap as a backing storage. This are 67 * usually PageAnon or shmem pages but please note that even anonymous pages 68 * might lose their PG_swapbacked flag when they simply can be dropped (e.g. as 69 * a result of MADV_FREE). 70 * 71 * PG_referenced, PG_reclaim are used for page reclaim for anonymous and 72 * file-backed pagecache (see mm/vmscan.c). 73 * 74 * PG_error is set to indicate that an I/O error occurred on this page. 75 * 76 * PG_arch_1 is an architecture specific page state bit. The generic code 77 * guarantees that this bit is cleared for a page when it first is entered into 78 * the page cache. 79 * 80 * PG_hwpoison indicates that a page got corrupted in hardware and contains 81 * data with incorrect ECC bits that triggered a machine check. Accessing is 82 * not safe since it may cause another machine check. Don't touch! 83 */ 84 85 /* 86 * Don't use the pageflags directly. Use the PageFoo macros. 87 * 88 * The page flags field is split into two parts, the main flags area 89 * which extends from the low bits upwards, and the fields area which 90 * extends from the high bits downwards. 91 * 92 * | FIELD | ... | FLAGS | 93 * N-1 ^ 0 94 * (NR_PAGEFLAGS) 95 * 96 * The fields area is reserved for fields mapping zone, node (for NUMA) and 97 * SPARSEMEM section (for variants of SPARSEMEM that require section ids like 98 * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP). 99 */ 100 enum pageflags { 101 PG_locked, /* Page is locked. Don't touch. */ 102 PG_referenced, 103 PG_uptodate, 104 PG_dirty, 105 PG_lru, 106 PG_active, 107 PG_workingset, 108 PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */ 109 PG_error, 110 PG_slab, 111 PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/ 112 PG_arch_1, 113 PG_reserved, 114 PG_private, /* If pagecache, has fs-private data */ 115 PG_private_2, /* If pagecache, has fs aux data */ 116 PG_writeback, /* Page is under writeback */ 117 PG_head, /* A head page */ 118 PG_mappedtodisk, /* Has blocks allocated on-disk */ 119 PG_reclaim, /* To be reclaimed asap */ 120 PG_swapbacked, /* Page is backed by RAM/swap */ 121 PG_unevictable, /* Page is "unevictable" */ 122 #ifdef CONFIG_MMU 123 PG_mlocked, /* Page is vma mlocked */ 124 #endif 125 #ifdef CONFIG_ARCH_USES_PG_UNCACHED 126 PG_uncached, /* Page has been mapped as uncached */ 127 #endif 128 #ifdef CONFIG_MEMORY_FAILURE 129 PG_hwpoison, /* hardware poisoned page. Don't touch */ 130 #endif 131 #if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT) 132 PG_young, 133 PG_idle, 134 #endif 135 #ifdef CONFIG_ARCH_USES_PG_ARCH_X 136 PG_arch_2, 137 PG_arch_3, 138 #endif 139 #ifdef CONFIG_KASAN_HW_TAGS 140 PG_skip_kasan_poison, 141 #endif 142 __NR_PAGEFLAGS, 143 144 PG_readahead = PG_reclaim, 145 146 /* 147 * Depending on the way an anonymous folio can be mapped into a page 148 * table (e.g., single PMD/PUD/CONT of the head page vs. PTE-mapped 149 * THP), PG_anon_exclusive may be set only for the head page or for 150 * tail pages of an anonymous folio. For now, we only expect it to be 151 * set on tail pages for PTE-mapped THP. 152 */ 153 PG_anon_exclusive = PG_mappedtodisk, 154 155 /* Filesystems */ 156 PG_checked = PG_owner_priv_1, 157 158 /* SwapBacked */ 159 PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */ 160 161 /* Two page bits are conscripted by FS-Cache to maintain local caching 162 * state. These bits are set on pages belonging to the netfs's inodes 163 * when those inodes are being locally cached. 164 */ 165 PG_fscache = PG_private_2, /* page backed by cache */ 166 167 /* XEN */ 168 /* Pinned in Xen as a read-only pagetable page. */ 169 PG_pinned = PG_owner_priv_1, 170 /* Pinned as part of domain save (see xen_mm_pin_all()). */ 171 PG_savepinned = PG_dirty, 172 /* Has a grant mapping of another (foreign) domain's page. */ 173 PG_foreign = PG_owner_priv_1, 174 /* Remapped by swiotlb-xen. */ 175 PG_xen_remapped = PG_owner_priv_1, 176 177 /* SLOB */ 178 PG_slob_free = PG_private, 179 180 #ifdef CONFIG_MEMORY_FAILURE 181 /* 182 * Compound pages. Stored in first tail page's flags. 183 * Indicates that at least one subpage is hwpoisoned in the 184 * THP. 185 */ 186 PG_has_hwpoisoned = PG_error, 187 #endif 188 189 /* non-lru isolated movable page */ 190 PG_isolated = PG_reclaim, 191 192 /* Only valid for buddy pages. Used to track pages that are reported */ 193 PG_reported = PG_uptodate, 194 195 #ifdef CONFIG_MEMORY_HOTPLUG 196 /* For self-hosted memmap pages */ 197 PG_vmemmap_self_hosted = PG_owner_priv_1, 198 #endif 199 }; 200 201 #define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1) 202 203 #ifndef __GENERATING_BOUNDS_H 204 205 #ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP 206 DECLARE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key); 207 208 /* 209 * Return the real head page struct iff the @page is a fake head page, otherwise 210 * return the @page itself. See Documentation/mm/vmemmap_dedup.rst. 211 */ 212 static __always_inline const struct page *page_fixed_fake_head(const struct page *page) 213 { 214 if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key)) 215 return page; 216 217 /* 218 * Only addresses aligned with PAGE_SIZE of struct page may be fake head 219 * struct page. The alignment check aims to avoid access the fields ( 220 * e.g. compound_head) of the @page[1]. It can avoid touch a (possibly) 221 * cold cacheline in some cases. 222 */ 223 if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) && 224 test_bit(PG_head, &page->flags)) { 225 /* 226 * We can safely access the field of the @page[1] with PG_head 227 * because the @page is a compound page composed with at least 228 * two contiguous pages. 229 */ 230 unsigned long head = READ_ONCE(page[1].compound_head); 231 232 if (likely(head & 1)) 233 return (const struct page *)(head - 1); 234 } 235 return page; 236 } 237 #else 238 static inline const struct page *page_fixed_fake_head(const struct page *page) 239 { 240 return page; 241 } 242 #endif 243 244 static __always_inline int page_is_fake_head(struct page *page) 245 { 246 return page_fixed_fake_head(page) != page; 247 } 248 249 static inline unsigned long _compound_head(const struct page *page) 250 { 251 unsigned long head = READ_ONCE(page->compound_head); 252 253 if (unlikely(head & 1)) 254 return head - 1; 255 return (unsigned long)page_fixed_fake_head(page); 256 } 257 258 #define compound_head(page) ((typeof(page))_compound_head(page)) 259 260 /** 261 * page_folio - Converts from page to folio. 262 * @p: The page. 263 * 264 * Every page is part of a folio. This function cannot be called on a 265 * NULL pointer. 266 * 267 * Context: No reference, nor lock is required on @page. If the caller 268 * does not hold a reference, this call may race with a folio split, so 269 * it should re-check the folio still contains this page after gaining 270 * a reference on the folio. 271 * Return: The folio which contains this page. 272 */ 273 #define page_folio(p) (_Generic((p), \ 274 const struct page *: (const struct folio *)_compound_head(p), \ 275 struct page *: (struct folio *)_compound_head(p))) 276 277 /** 278 * folio_page - Return a page from a folio. 279 * @folio: The folio. 280 * @n: The page number to return. 281 * 282 * @n is relative to the start of the folio. This function does not 283 * check that the page number lies within @folio; the caller is presumed 284 * to have a reference to the page. 285 */ 286 #define folio_page(folio, n) nth_page(&(folio)->page, n) 287 288 static __always_inline int PageTail(struct page *page) 289 { 290 return READ_ONCE(page->compound_head) & 1 || page_is_fake_head(page); 291 } 292 293 static __always_inline int PageCompound(struct page *page) 294 { 295 return test_bit(PG_head, &page->flags) || 296 READ_ONCE(page->compound_head) & 1; 297 } 298 299 #define PAGE_POISON_PATTERN -1l 300 static inline int PagePoisoned(const struct page *page) 301 { 302 return READ_ONCE(page->flags) == PAGE_POISON_PATTERN; 303 } 304 305 #ifdef CONFIG_DEBUG_VM 306 void page_init_poison(struct page *page, size_t size); 307 #else 308 static inline void page_init_poison(struct page *page, size_t size) 309 { 310 } 311 #endif 312 313 static unsigned long *folio_flags(struct folio *folio, unsigned n) 314 { 315 struct page *page = &folio->page; 316 317 VM_BUG_ON_PGFLAGS(PageTail(page), page); 318 VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page); 319 return &page[n].flags; 320 } 321 322 /* 323 * Page flags policies wrt compound pages 324 * 325 * PF_POISONED_CHECK 326 * check if this struct page poisoned/uninitialized 327 * 328 * PF_ANY: 329 * the page flag is relevant for small, head and tail pages. 330 * 331 * PF_HEAD: 332 * for compound page all operations related to the page flag applied to 333 * head page. 334 * 335 * PF_ONLY_HEAD: 336 * for compound page, callers only ever operate on the head page. 337 * 338 * PF_NO_TAIL: 339 * modifications of the page flag must be done on small or head pages, 340 * checks can be done on tail pages too. 341 * 342 * PF_NO_COMPOUND: 343 * the page flag is not relevant for compound pages. 344 * 345 * PF_SECOND: 346 * the page flag is stored in the first tail page. 347 */ 348 #define PF_POISONED_CHECK(page) ({ \ 349 VM_BUG_ON_PGFLAGS(PagePoisoned(page), page); \ 350 page; }) 351 #define PF_ANY(page, enforce) PF_POISONED_CHECK(page) 352 #define PF_HEAD(page, enforce) PF_POISONED_CHECK(compound_head(page)) 353 #define PF_ONLY_HEAD(page, enforce) ({ \ 354 VM_BUG_ON_PGFLAGS(PageTail(page), page); \ 355 PF_POISONED_CHECK(page); }) 356 #define PF_NO_TAIL(page, enforce) ({ \ 357 VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \ 358 PF_POISONED_CHECK(compound_head(page)); }) 359 #define PF_NO_COMPOUND(page, enforce) ({ \ 360 VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \ 361 PF_POISONED_CHECK(page); }) 362 #define PF_SECOND(page, enforce) ({ \ 363 VM_BUG_ON_PGFLAGS(!PageHead(page), page); \ 364 PF_POISONED_CHECK(&page[1]); }) 365 366 /* Which page is the flag stored in */ 367 #define FOLIO_PF_ANY 0 368 #define FOLIO_PF_HEAD 0 369 #define FOLIO_PF_ONLY_HEAD 0 370 #define FOLIO_PF_NO_TAIL 0 371 #define FOLIO_PF_NO_COMPOUND 0 372 #define FOLIO_PF_SECOND 1 373 374 /* 375 * Macros to create function definitions for page flags 376 */ 377 #define TESTPAGEFLAG(uname, lname, policy) \ 378 static __always_inline bool folio_test_##lname(struct folio *folio) \ 379 { return test_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \ 380 static __always_inline int Page##uname(struct page *page) \ 381 { return test_bit(PG_##lname, &policy(page, 0)->flags); } 382 383 #define SETPAGEFLAG(uname, lname, policy) \ 384 static __always_inline \ 385 void folio_set_##lname(struct folio *folio) \ 386 { set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \ 387 static __always_inline void SetPage##uname(struct page *page) \ 388 { set_bit(PG_##lname, &policy(page, 1)->flags); } 389 390 #define CLEARPAGEFLAG(uname, lname, policy) \ 391 static __always_inline \ 392 void folio_clear_##lname(struct folio *folio) \ 393 { clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \ 394 static __always_inline void ClearPage##uname(struct page *page) \ 395 { clear_bit(PG_##lname, &policy(page, 1)->flags); } 396 397 #define __SETPAGEFLAG(uname, lname, policy) \ 398 static __always_inline \ 399 void __folio_set_##lname(struct folio *folio) \ 400 { __set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \ 401 static __always_inline void __SetPage##uname(struct page *page) \ 402 { __set_bit(PG_##lname, &policy(page, 1)->flags); } 403 404 #define __CLEARPAGEFLAG(uname, lname, policy) \ 405 static __always_inline \ 406 void __folio_clear_##lname(struct folio *folio) \ 407 { __clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \ 408 static __always_inline void __ClearPage##uname(struct page *page) \ 409 { __clear_bit(PG_##lname, &policy(page, 1)->flags); } 410 411 #define TESTSETFLAG(uname, lname, policy) \ 412 static __always_inline \ 413 bool folio_test_set_##lname(struct folio *folio) \ 414 { return test_and_set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \ 415 static __always_inline int TestSetPage##uname(struct page *page) \ 416 { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); } 417 418 #define TESTCLEARFLAG(uname, lname, policy) \ 419 static __always_inline \ 420 bool folio_test_clear_##lname(struct folio *folio) \ 421 { return test_and_clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \ 422 static __always_inline int TestClearPage##uname(struct page *page) \ 423 { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); } 424 425 #define PAGEFLAG(uname, lname, policy) \ 426 TESTPAGEFLAG(uname, lname, policy) \ 427 SETPAGEFLAG(uname, lname, policy) \ 428 CLEARPAGEFLAG(uname, lname, policy) 429 430 #define __PAGEFLAG(uname, lname, policy) \ 431 TESTPAGEFLAG(uname, lname, policy) \ 432 __SETPAGEFLAG(uname, lname, policy) \ 433 __CLEARPAGEFLAG(uname, lname, policy) 434 435 #define TESTSCFLAG(uname, lname, policy) \ 436 TESTSETFLAG(uname, lname, policy) \ 437 TESTCLEARFLAG(uname, lname, policy) 438 439 #define TESTPAGEFLAG_FALSE(uname, lname) \ 440 static inline bool folio_test_##lname(const struct folio *folio) { return false; } \ 441 static inline int Page##uname(const struct page *page) { return 0; } 442 443 #define SETPAGEFLAG_NOOP(uname, lname) \ 444 static inline void folio_set_##lname(struct folio *folio) { } \ 445 static inline void SetPage##uname(struct page *page) { } 446 447 #define CLEARPAGEFLAG_NOOP(uname, lname) \ 448 static inline void folio_clear_##lname(struct folio *folio) { } \ 449 static inline void ClearPage##uname(struct page *page) { } 450 451 #define __CLEARPAGEFLAG_NOOP(uname, lname) \ 452 static inline void __folio_clear_##lname(struct folio *folio) { } \ 453 static inline void __ClearPage##uname(struct page *page) { } 454 455 #define TESTSETFLAG_FALSE(uname, lname) \ 456 static inline bool folio_test_set_##lname(struct folio *folio) \ 457 { return 0; } \ 458 static inline int TestSetPage##uname(struct page *page) { return 0; } 459 460 #define TESTCLEARFLAG_FALSE(uname, lname) \ 461 static inline bool folio_test_clear_##lname(struct folio *folio) \ 462 { return 0; } \ 463 static inline int TestClearPage##uname(struct page *page) { return 0; } 464 465 #define PAGEFLAG_FALSE(uname, lname) TESTPAGEFLAG_FALSE(uname, lname) \ 466 SETPAGEFLAG_NOOP(uname, lname) CLEARPAGEFLAG_NOOP(uname, lname) 467 468 #define TESTSCFLAG_FALSE(uname, lname) \ 469 TESTSETFLAG_FALSE(uname, lname) TESTCLEARFLAG_FALSE(uname, lname) 470 471 __PAGEFLAG(Locked, locked, PF_NO_TAIL) 472 PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) 473 PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL) 474 PAGEFLAG(Referenced, referenced, PF_HEAD) 475 TESTCLEARFLAG(Referenced, referenced, PF_HEAD) 476 __SETPAGEFLAG(Referenced, referenced, PF_HEAD) 477 PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD) 478 __CLEARPAGEFLAG(Dirty, dirty, PF_HEAD) 479 PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD) 480 TESTCLEARFLAG(LRU, lru, PF_HEAD) 481 PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD) 482 TESTCLEARFLAG(Active, active, PF_HEAD) 483 PAGEFLAG(Workingset, workingset, PF_HEAD) 484 TESTCLEARFLAG(Workingset, workingset, PF_HEAD) 485 __PAGEFLAG(Slab, slab, PF_NO_TAIL) 486 __PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL) 487 PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */ 488 489 /* Xen */ 490 PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND) 491 TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND) 492 PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND); 493 PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND); 494 PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND) 495 TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND) 496 497 PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) 498 __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) 499 __SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) 500 PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) 501 __CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) 502 __SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) 503 504 /* 505 * Private page markings that may be used by the filesystem that owns the page 506 * for its own purposes. 507 * - PG_private and PG_private_2 cause release_folio() and co to be invoked 508 */ 509 PAGEFLAG(Private, private, PF_ANY) 510 PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY) 511 PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY) 512 TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY) 513 514 /* 515 * Only test-and-set exist for PG_writeback. The unconditional operators are 516 * risky: they bypass page accounting. 517 */ 518 TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL) 519 TESTSCFLAG(Writeback, writeback, PF_NO_TAIL) 520 PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL) 521 522 /* PG_readahead is only used for reads; PG_reclaim is only for writes */ 523 PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL) 524 TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL) 525 PAGEFLAG(Readahead, readahead, PF_NO_COMPOUND) 526 TESTCLEARFLAG(Readahead, readahead, PF_NO_COMPOUND) 527 528 #ifdef CONFIG_HIGHMEM 529 /* 530 * Must use a macro here due to header dependency issues. page_zone() is not 531 * available at this point. 532 */ 533 #define PageHighMem(__p) is_highmem_idx(page_zonenum(__p)) 534 #define folio_test_highmem(__f) is_highmem_idx(folio_zonenum(__f)) 535 #else 536 PAGEFLAG_FALSE(HighMem, highmem) 537 #endif 538 539 #ifdef CONFIG_SWAP 540 static __always_inline bool folio_test_swapcache(struct folio *folio) 541 { 542 return folio_test_swapbacked(folio) && 543 test_bit(PG_swapcache, folio_flags(folio, 0)); 544 } 545 546 static __always_inline bool PageSwapCache(struct page *page) 547 { 548 return folio_test_swapcache(page_folio(page)); 549 } 550 551 SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL) 552 CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL) 553 #else 554 PAGEFLAG_FALSE(SwapCache, swapcache) 555 #endif 556 557 PAGEFLAG(Unevictable, unevictable, PF_HEAD) 558 __CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD) 559 TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD) 560 561 #ifdef CONFIG_MMU 562 PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL) 563 __CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL) 564 TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL) 565 #else 566 PAGEFLAG_FALSE(Mlocked, mlocked) __CLEARPAGEFLAG_NOOP(Mlocked, mlocked) 567 TESTSCFLAG_FALSE(Mlocked, mlocked) 568 #endif 569 570 #ifdef CONFIG_ARCH_USES_PG_UNCACHED 571 PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND) 572 #else 573 PAGEFLAG_FALSE(Uncached, uncached) 574 #endif 575 576 #ifdef CONFIG_MEMORY_FAILURE 577 PAGEFLAG(HWPoison, hwpoison, PF_ANY) 578 TESTSCFLAG(HWPoison, hwpoison, PF_ANY) 579 #define __PG_HWPOISON (1UL << PG_hwpoison) 580 #define MAGIC_HWPOISON 0x48575053U /* HWPS */ 581 extern void SetPageHWPoisonTakenOff(struct page *page); 582 extern void ClearPageHWPoisonTakenOff(struct page *page); 583 extern bool take_page_off_buddy(struct page *page); 584 extern bool put_page_back_buddy(struct page *page); 585 #else 586 PAGEFLAG_FALSE(HWPoison, hwpoison) 587 #define __PG_HWPOISON 0 588 #endif 589 590 #if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT) 591 TESTPAGEFLAG(Young, young, PF_ANY) 592 SETPAGEFLAG(Young, young, PF_ANY) 593 TESTCLEARFLAG(Young, young, PF_ANY) 594 PAGEFLAG(Idle, idle, PF_ANY) 595 #endif 596 597 #ifdef CONFIG_KASAN_HW_TAGS 598 PAGEFLAG(SkipKASanPoison, skip_kasan_poison, PF_HEAD) 599 #else 600 PAGEFLAG_FALSE(SkipKASanPoison, skip_kasan_poison) 601 #endif 602 603 /* 604 * PageReported() is used to track reported free pages within the Buddy 605 * allocator. We can use the non-atomic version of the test and set 606 * operations as both should be shielded with the zone lock to prevent 607 * any possible races on the setting or clearing of the bit. 608 */ 609 __PAGEFLAG(Reported, reported, PF_NO_COMPOUND) 610 611 #ifdef CONFIG_MEMORY_HOTPLUG 612 PAGEFLAG(VmemmapSelfHosted, vmemmap_self_hosted, PF_ANY) 613 #else 614 PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted) 615 #endif 616 617 /* 618 * On an anonymous page mapped into a user virtual memory area, 619 * page->mapping points to its anon_vma, not to a struct address_space; 620 * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h. 621 * 622 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled, 623 * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON 624 * bit; and then page->mapping points, not to an anon_vma, but to a private 625 * structure which KSM associates with that merged page. See ksm.h. 626 * 627 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable 628 * page and then page->mapping points to a struct movable_operations. 629 * 630 * Please note that, confusingly, "page_mapping" refers to the inode 631 * address_space which maps the page from disk; whereas "page_mapped" 632 * refers to user virtual address space into which the page is mapped. 633 */ 634 #define PAGE_MAPPING_ANON 0x1 635 #define PAGE_MAPPING_MOVABLE 0x2 636 #define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE) 637 #define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE) 638 639 /* 640 * Different with flags above, this flag is used only for fsdax mode. It 641 * indicates that this page->mapping is now under reflink case. 642 */ 643 #define PAGE_MAPPING_DAX_SHARED ((void *)0x1) 644 645 static __always_inline bool folio_mapping_flags(struct folio *folio) 646 { 647 return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) != 0; 648 } 649 650 static __always_inline int PageMappingFlags(struct page *page) 651 { 652 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0; 653 } 654 655 static __always_inline bool folio_test_anon(struct folio *folio) 656 { 657 return ((unsigned long)folio->mapping & PAGE_MAPPING_ANON) != 0; 658 } 659 660 static __always_inline bool PageAnon(struct page *page) 661 { 662 return folio_test_anon(page_folio(page)); 663 } 664 665 static __always_inline bool __folio_test_movable(const struct folio *folio) 666 { 667 return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) == 668 PAGE_MAPPING_MOVABLE; 669 } 670 671 static __always_inline int __PageMovable(struct page *page) 672 { 673 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == 674 PAGE_MAPPING_MOVABLE; 675 } 676 677 #ifdef CONFIG_KSM 678 /* 679 * A KSM page is one of those write-protected "shared pages" or "merged pages" 680 * which KSM maps into multiple mms, wherever identical anonymous page content 681 * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any 682 * anon_vma, but to that page's node of the stable tree. 683 */ 684 static __always_inline bool folio_test_ksm(struct folio *folio) 685 { 686 return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) == 687 PAGE_MAPPING_KSM; 688 } 689 690 static __always_inline bool PageKsm(struct page *page) 691 { 692 return folio_test_ksm(page_folio(page)); 693 } 694 #else 695 TESTPAGEFLAG_FALSE(Ksm, ksm) 696 #endif 697 698 u64 stable_page_flags(struct page *page); 699 700 /** 701 * folio_test_uptodate - Is this folio up to date? 702 * @folio: The folio. 703 * 704 * The uptodate flag is set on a folio when every byte in the folio is 705 * at least as new as the corresponding bytes on storage. Anonymous 706 * and CoW folios are always uptodate. If the folio is not uptodate, 707 * some of the bytes in it may be; see the is_partially_uptodate() 708 * address_space operation. 709 */ 710 static inline bool folio_test_uptodate(struct folio *folio) 711 { 712 bool ret = test_bit(PG_uptodate, folio_flags(folio, 0)); 713 /* 714 * Must ensure that the data we read out of the folio is loaded 715 * _after_ we've loaded folio->flags to check the uptodate bit. 716 * We can skip the barrier if the folio is not uptodate, because 717 * we wouldn't be reading anything from it. 718 * 719 * See folio_mark_uptodate() for the other side of the story. 720 */ 721 if (ret) 722 smp_rmb(); 723 724 return ret; 725 } 726 727 static inline int PageUptodate(struct page *page) 728 { 729 return folio_test_uptodate(page_folio(page)); 730 } 731 732 static __always_inline void __folio_mark_uptodate(struct folio *folio) 733 { 734 smp_wmb(); 735 __set_bit(PG_uptodate, folio_flags(folio, 0)); 736 } 737 738 static __always_inline void folio_mark_uptodate(struct folio *folio) 739 { 740 /* 741 * Memory barrier must be issued before setting the PG_uptodate bit, 742 * so that all previous stores issued in order to bring the folio 743 * uptodate are actually visible before folio_test_uptodate becomes true. 744 */ 745 smp_wmb(); 746 set_bit(PG_uptodate, folio_flags(folio, 0)); 747 } 748 749 static __always_inline void __SetPageUptodate(struct page *page) 750 { 751 __folio_mark_uptodate((struct folio *)page); 752 } 753 754 static __always_inline void SetPageUptodate(struct page *page) 755 { 756 folio_mark_uptodate((struct folio *)page); 757 } 758 759 CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL) 760 761 bool __folio_start_writeback(struct folio *folio, bool keep_write); 762 bool set_page_writeback(struct page *page); 763 764 #define folio_start_writeback(folio) \ 765 __folio_start_writeback(folio, false) 766 #define folio_start_writeback_keepwrite(folio) \ 767 __folio_start_writeback(folio, true) 768 769 static inline void set_page_writeback_keepwrite(struct page *page) 770 { 771 folio_start_writeback_keepwrite(page_folio(page)); 772 } 773 774 static inline bool test_set_page_writeback(struct page *page) 775 { 776 return set_page_writeback(page); 777 } 778 779 static __always_inline bool folio_test_head(struct folio *folio) 780 { 781 return test_bit(PG_head, folio_flags(folio, FOLIO_PF_ANY)); 782 } 783 784 static __always_inline int PageHead(struct page *page) 785 { 786 PF_POISONED_CHECK(page); 787 return test_bit(PG_head, &page->flags) && !page_is_fake_head(page); 788 } 789 790 __SETPAGEFLAG(Head, head, PF_ANY) 791 __CLEARPAGEFLAG(Head, head, PF_ANY) 792 CLEARPAGEFLAG(Head, head, PF_ANY) 793 794 /** 795 * folio_test_large() - Does this folio contain more than one page? 796 * @folio: The folio to test. 797 * 798 * Return: True if the folio is larger than one page. 799 */ 800 static inline bool folio_test_large(struct folio *folio) 801 { 802 return folio_test_head(folio); 803 } 804 805 static __always_inline void set_compound_head(struct page *page, struct page *head) 806 { 807 WRITE_ONCE(page->compound_head, (unsigned long)head + 1); 808 } 809 810 static __always_inline void clear_compound_head(struct page *page) 811 { 812 WRITE_ONCE(page->compound_head, 0); 813 } 814 815 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 816 static inline void ClearPageCompound(struct page *page) 817 { 818 BUG_ON(!PageHead(page)); 819 ClearPageHead(page); 820 } 821 #endif 822 823 #define PG_head_mask ((1UL << PG_head)) 824 825 #ifdef CONFIG_HUGETLB_PAGE 826 int PageHuge(struct page *page); 827 int PageHeadHuge(struct page *page); 828 static inline bool folio_test_hugetlb(struct folio *folio) 829 { 830 return PageHeadHuge(&folio->page); 831 } 832 #else 833 TESTPAGEFLAG_FALSE(Huge, hugetlb) 834 TESTPAGEFLAG_FALSE(HeadHuge, headhuge) 835 #endif 836 837 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 838 /* 839 * PageHuge() only returns true for hugetlbfs pages, but not for 840 * normal or transparent huge pages. 841 * 842 * PageTransHuge() returns true for both transparent huge and 843 * hugetlbfs pages, but not normal pages. PageTransHuge() can only be 844 * called only in the core VM paths where hugetlbfs pages can't exist. 845 */ 846 static inline int PageTransHuge(struct page *page) 847 { 848 VM_BUG_ON_PAGE(PageTail(page), page); 849 return PageHead(page); 850 } 851 852 static inline bool folio_test_transhuge(struct folio *folio) 853 { 854 return folio_test_head(folio); 855 } 856 857 /* 858 * PageTransCompound returns true for both transparent huge pages 859 * and hugetlbfs pages, so it should only be called when it's known 860 * that hugetlbfs pages aren't involved. 861 */ 862 static inline int PageTransCompound(struct page *page) 863 { 864 return PageCompound(page); 865 } 866 867 /* 868 * PageTransTail returns true for both transparent huge pages 869 * and hugetlbfs pages, so it should only be called when it's known 870 * that hugetlbfs pages aren't involved. 871 */ 872 static inline int PageTransTail(struct page *page) 873 { 874 return PageTail(page); 875 } 876 #else 877 TESTPAGEFLAG_FALSE(TransHuge, transhuge) 878 TESTPAGEFLAG_FALSE(TransCompound, transcompound) 879 TESTPAGEFLAG_FALSE(TransCompoundMap, transcompoundmap) 880 TESTPAGEFLAG_FALSE(TransTail, transtail) 881 #endif 882 883 #if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_TRANSPARENT_HUGEPAGE) 884 /* 885 * PageHasHWPoisoned indicates that at least one subpage is hwpoisoned in the 886 * compound page. 887 * 888 * This flag is set by hwpoison handler. Cleared by THP split or free page. 889 */ 890 PAGEFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND) 891 TESTSCFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND) 892 #else 893 PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned) 894 TESTSCFLAG_FALSE(HasHWPoisoned, has_hwpoisoned) 895 #endif 896 897 /* 898 * Check if a page is currently marked HWPoisoned. Note that this check is 899 * best effort only and inherently racy: there is no way to synchronize with 900 * failing hardware. 901 */ 902 static inline bool is_page_hwpoison(struct page *page) 903 { 904 if (PageHWPoison(page)) 905 return true; 906 return PageHuge(page) && PageHWPoison(compound_head(page)); 907 } 908 909 /* 910 * For pages that are never mapped to userspace (and aren't PageSlab), 911 * page_type may be used. Because it is initialised to -1, we invert the 912 * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and 913 * __ClearPageFoo *sets* the bit used for PageFoo. We reserve a few high and 914 * low bits so that an underflow or overflow of page_mapcount() won't be 915 * mistaken for a page type value. 916 */ 917 918 #define PAGE_TYPE_BASE 0xf0000000 919 /* Reserve 0x0000007f to catch underflows of page_mapcount */ 920 #define PAGE_MAPCOUNT_RESERVE -128 921 #define PG_buddy 0x00000080 922 #define PG_offline 0x00000100 923 #define PG_table 0x00000200 924 #define PG_guard 0x00000400 925 926 #define PageType(page, flag) \ 927 ((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE) 928 929 static inline int page_has_type(struct page *page) 930 { 931 return (int)page->page_type < PAGE_MAPCOUNT_RESERVE; 932 } 933 934 #define PAGE_TYPE_OPS(uname, lname) \ 935 static __always_inline int Page##uname(struct page *page) \ 936 { \ 937 return PageType(page, PG_##lname); \ 938 } \ 939 static __always_inline void __SetPage##uname(struct page *page) \ 940 { \ 941 VM_BUG_ON_PAGE(!PageType(page, 0), page); \ 942 page->page_type &= ~PG_##lname; \ 943 } \ 944 static __always_inline void __ClearPage##uname(struct page *page) \ 945 { \ 946 VM_BUG_ON_PAGE(!Page##uname(page), page); \ 947 page->page_type |= PG_##lname; \ 948 } 949 950 /* 951 * PageBuddy() indicates that the page is free and in the buddy system 952 * (see mm/page_alloc.c). 953 */ 954 PAGE_TYPE_OPS(Buddy, buddy) 955 956 /* 957 * PageOffline() indicates that the page is logically offline although the 958 * containing section is online. (e.g. inflated in a balloon driver or 959 * not onlined when onlining the section). 960 * The content of these pages is effectively stale. Such pages should not 961 * be touched (read/write/dump/save) except by their owner. 962 * 963 * If a driver wants to allow to offline unmovable PageOffline() pages without 964 * putting them back to the buddy, it can do so via the memory notifier by 965 * decrementing the reference count in MEM_GOING_OFFLINE and incrementing the 966 * reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline() 967 * pages (now with a reference count of zero) are treated like free pages, 968 * allowing the containing memory block to get offlined. A driver that 969 * relies on this feature is aware that re-onlining the memory block will 970 * require to re-set the pages PageOffline() and not giving them to the 971 * buddy via online_page_callback_t. 972 * 973 * There are drivers that mark a page PageOffline() and expect there won't be 974 * any further access to page content. PFN walkers that read content of random 975 * pages should check PageOffline() and synchronize with such drivers using 976 * page_offline_freeze()/page_offline_thaw(). 977 */ 978 PAGE_TYPE_OPS(Offline, offline) 979 980 extern void page_offline_freeze(void); 981 extern void page_offline_thaw(void); 982 extern void page_offline_begin(void); 983 extern void page_offline_end(void); 984 985 /* 986 * Marks pages in use as page tables. 987 */ 988 PAGE_TYPE_OPS(Table, table) 989 990 /* 991 * Marks guardpages used with debug_pagealloc. 992 */ 993 PAGE_TYPE_OPS(Guard, guard) 994 995 extern bool is_free_buddy_page(struct page *page); 996 997 PAGEFLAG(Isolated, isolated, PF_ANY); 998 999 static __always_inline int PageAnonExclusive(struct page *page) 1000 { 1001 VM_BUG_ON_PGFLAGS(!PageAnon(page), page); 1002 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); 1003 return test_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); 1004 } 1005 1006 static __always_inline void SetPageAnonExclusive(struct page *page) 1007 { 1008 VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page); 1009 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); 1010 set_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); 1011 } 1012 1013 static __always_inline void ClearPageAnonExclusive(struct page *page) 1014 { 1015 VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page); 1016 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); 1017 clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); 1018 } 1019 1020 static __always_inline void __ClearPageAnonExclusive(struct page *page) 1021 { 1022 VM_BUG_ON_PGFLAGS(!PageAnon(page), page); 1023 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); 1024 __clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); 1025 } 1026 1027 #ifdef CONFIG_MMU 1028 #define __PG_MLOCKED (1UL << PG_mlocked) 1029 #else 1030 #define __PG_MLOCKED 0 1031 #endif 1032 1033 /* 1034 * Flags checked when a page is freed. Pages being freed should not have 1035 * these flags set. If they are, there is a problem. 1036 */ 1037 #define PAGE_FLAGS_CHECK_AT_FREE \ 1038 (1UL << PG_lru | 1UL << PG_locked | \ 1039 1UL << PG_private | 1UL << PG_private_2 | \ 1040 1UL << PG_writeback | 1UL << PG_reserved | \ 1041 1UL << PG_slab | 1UL << PG_active | \ 1042 1UL << PG_unevictable | __PG_MLOCKED | LRU_GEN_MASK) 1043 1044 /* 1045 * Flags checked when a page is prepped for return by the page allocator. 1046 * Pages being prepped should not have these flags set. If they are set, 1047 * there has been a kernel bug or struct page corruption. 1048 * 1049 * __PG_HWPOISON is exceptional because it needs to be kept beyond page's 1050 * alloc-free cycle to prevent from reusing the page. 1051 */ 1052 #define PAGE_FLAGS_CHECK_AT_PREP \ 1053 ((PAGEFLAGS_MASK & ~__PG_HWPOISON) | LRU_GEN_MASK | LRU_REFS_MASK) 1054 1055 #define PAGE_FLAGS_PRIVATE \ 1056 (1UL << PG_private | 1UL << PG_private_2) 1057 /** 1058 * page_has_private - Determine if page has private stuff 1059 * @page: The page to be checked 1060 * 1061 * Determine if a page has private stuff, indicating that release routines 1062 * should be invoked upon it. 1063 */ 1064 static inline int page_has_private(struct page *page) 1065 { 1066 return !!(page->flags & PAGE_FLAGS_PRIVATE); 1067 } 1068 1069 static inline bool folio_has_private(struct folio *folio) 1070 { 1071 return page_has_private(&folio->page); 1072 } 1073 1074 #undef PF_ANY 1075 #undef PF_HEAD 1076 #undef PF_ONLY_HEAD 1077 #undef PF_NO_TAIL 1078 #undef PF_NO_COMPOUND 1079 #undef PF_SECOND 1080 #endif /* !__GENERATING_BOUNDS_H */ 1081 1082 #endif /* PAGE_FLAGS_H */ 1083