1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Macros for manipulating and testing page->flags 4 */ 5 6 #ifndef PAGE_FLAGS_H 7 #define PAGE_FLAGS_H 8 9 #include <linux/types.h> 10 #include <linux/bug.h> 11 #include <linux/mmdebug.h> 12 #ifndef __GENERATING_BOUNDS_H 13 #include <linux/mm_types.h> 14 #include <generated/bounds.h> 15 #endif /* !__GENERATING_BOUNDS_H */ 16 17 /* 18 * Various page->flags bits: 19 * 20 * PG_reserved is set for special pages. The "struct page" of such a page 21 * should in general not be touched (e.g. set dirty) except by its owner. 22 * Pages marked as PG_reserved include: 23 * - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS, 24 * initrd, HW tables) 25 * - Pages reserved or allocated early during boot (before the page allocator 26 * was initialized). This includes (depending on the architecture) the 27 * initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much 28 * much more. Once (if ever) freed, PG_reserved is cleared and they will 29 * be given to the page allocator. 30 * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying 31 * to read/write these pages might end badly. Don't touch! 32 * - The zero page(s) 33 * - Pages not added to the page allocator when onlining a section because 34 * they were excluded via the online_page_callback() or because they are 35 * PG_hwpoison. 36 * - Pages allocated in the context of kexec/kdump (loaded kernel image, 37 * control pages, vmcoreinfo) 38 * - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are 39 * not marked PG_reserved (as they might be in use by somebody else who does 40 * not respect the caching strategy). 41 * - Pages part of an offline section (struct pages of offline sections should 42 * not be trusted as they will be initialized when first onlined). 43 * - MCA pages on ia64 44 * - Pages holding CPU notes for POWER Firmware Assisted Dump 45 * - Device memory (e.g. PMEM, DAX, HMM) 46 * Some PG_reserved pages will be excluded from the hibernation image. 47 * PG_reserved does in general not hinder anybody from dumping or swapping 48 * and is no longer required for remap_pfn_range(). ioremap might require it. 49 * Consequently, PG_reserved for a page mapped into user space can indicate 50 * the zero page, the vDSO, MMIO pages or device memory. 51 * 52 * The PG_private bitflag is set on pagecache pages if they contain filesystem 53 * specific data (which is normally at page->private). It can be used by 54 * private allocations for its own usage. 55 * 56 * During initiation of disk I/O, PG_locked is set. This bit is set before I/O 57 * and cleared when writeback _starts_ or when read _completes_. PG_writeback 58 * is set before writeback starts and cleared when it finishes. 59 * 60 * PG_locked also pins a page in pagecache, and blocks truncation of the file 61 * while it is held. 62 * 63 * page_waitqueue(page) is a wait queue of all tasks waiting for the page 64 * to become unlocked. 65 * 66 * PG_swapbacked is set when a page uses swap as a backing storage. This are 67 * usually PageAnon or shmem pages but please note that even anonymous pages 68 * might lose their PG_swapbacked flag when they simply can be dropped (e.g. as 69 * a result of MADV_FREE). 70 * 71 * PG_referenced, PG_reclaim are used for page reclaim for anonymous and 72 * file-backed pagecache (see mm/vmscan.c). 73 * 74 * PG_error is set to indicate that an I/O error occurred on this page. 75 * 76 * PG_arch_1 is an architecture specific page state bit. The generic code 77 * guarantees that this bit is cleared for a page when it first is entered into 78 * the page cache. 79 * 80 * PG_hwpoison indicates that a page got corrupted in hardware and contains 81 * data with incorrect ECC bits that triggered a machine check. Accessing is 82 * not safe since it may cause another machine check. Don't touch! 83 */ 84 85 /* 86 * Don't use the pageflags directly. Use the PageFoo macros. 87 * 88 * The page flags field is split into two parts, the main flags area 89 * which extends from the low bits upwards, and the fields area which 90 * extends from the high bits downwards. 91 * 92 * | FIELD | ... | FLAGS | 93 * N-1 ^ 0 94 * (NR_PAGEFLAGS) 95 * 96 * The fields area is reserved for fields mapping zone, node (for NUMA) and 97 * SPARSEMEM section (for variants of SPARSEMEM that require section ids like 98 * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP). 99 */ 100 enum pageflags { 101 PG_locked, /* Page is locked. Don't touch. */ 102 PG_referenced, 103 PG_uptodate, 104 PG_dirty, 105 PG_lru, 106 PG_active, 107 PG_workingset, 108 PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */ 109 PG_error, 110 PG_slab, 111 PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/ 112 PG_arch_1, 113 PG_reserved, 114 PG_private, /* If pagecache, has fs-private data */ 115 PG_private_2, /* If pagecache, has fs aux data */ 116 PG_writeback, /* Page is under writeback */ 117 PG_head, /* A head page */ 118 PG_mappedtodisk, /* Has blocks allocated on-disk */ 119 PG_reclaim, /* To be reclaimed asap */ 120 PG_swapbacked, /* Page is backed by RAM/swap */ 121 PG_unevictable, /* Page is "unevictable" */ 122 #ifdef CONFIG_MMU 123 PG_mlocked, /* Page is vma mlocked */ 124 #endif 125 #ifdef CONFIG_ARCH_USES_PG_UNCACHED 126 PG_uncached, /* Page has been mapped as uncached */ 127 #endif 128 #ifdef CONFIG_MEMORY_FAILURE 129 PG_hwpoison, /* hardware poisoned page. Don't touch */ 130 #endif 131 #if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT) 132 PG_young, 133 PG_idle, 134 #endif 135 #ifdef CONFIG_64BIT 136 PG_arch_2, 137 #endif 138 #ifdef CONFIG_KASAN_HW_TAGS 139 PG_skip_kasan_poison, 140 #endif 141 __NR_PAGEFLAGS, 142 143 PG_readahead = PG_reclaim, 144 145 /* 146 * Depending on the way an anonymous folio can be mapped into a page 147 * table (e.g., single PMD/PUD/CONT of the head page vs. PTE-mapped 148 * THP), PG_anon_exclusive may be set only for the head page or for 149 * tail pages of an anonymous folio. For now, we only expect it to be 150 * set on tail pages for PTE-mapped THP. 151 */ 152 PG_anon_exclusive = PG_mappedtodisk, 153 154 /* Filesystems */ 155 PG_checked = PG_owner_priv_1, 156 157 /* SwapBacked */ 158 PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */ 159 160 /* Two page bits are conscripted by FS-Cache to maintain local caching 161 * state. These bits are set on pages belonging to the netfs's inodes 162 * when those inodes are being locally cached. 163 */ 164 PG_fscache = PG_private_2, /* page backed by cache */ 165 166 /* XEN */ 167 /* Pinned in Xen as a read-only pagetable page. */ 168 PG_pinned = PG_owner_priv_1, 169 /* Pinned as part of domain save (see xen_mm_pin_all()). */ 170 PG_savepinned = PG_dirty, 171 /* Has a grant mapping of another (foreign) domain's page. */ 172 PG_foreign = PG_owner_priv_1, 173 /* Remapped by swiotlb-xen. */ 174 PG_xen_remapped = PG_owner_priv_1, 175 176 /* SLOB */ 177 PG_slob_free = PG_private, 178 179 /* Compound pages. Stored in first tail page's flags */ 180 PG_double_map = PG_workingset, 181 182 #ifdef CONFIG_MEMORY_FAILURE 183 /* 184 * Compound pages. Stored in first tail page's flags. 185 * Indicates that at least one subpage is hwpoisoned in the 186 * THP. 187 */ 188 PG_has_hwpoisoned = PG_error, 189 #endif 190 191 /* non-lru isolated movable page */ 192 PG_isolated = PG_reclaim, 193 194 /* Only valid for buddy pages. Used to track pages that are reported */ 195 PG_reported = PG_uptodate, 196 197 #ifdef CONFIG_MEMORY_HOTPLUG 198 /* For self-hosted memmap pages */ 199 PG_vmemmap_self_hosted = PG_owner_priv_1, 200 #endif 201 }; 202 203 #define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1) 204 205 #ifndef __GENERATING_BOUNDS_H 206 207 #ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP 208 DECLARE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON, 209 hugetlb_optimize_vmemmap_key); 210 211 static __always_inline bool hugetlb_optimize_vmemmap_enabled(void) 212 { 213 return static_branch_maybe(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON, 214 &hugetlb_optimize_vmemmap_key); 215 } 216 217 /* 218 * If the feature of optimizing vmemmap pages associated with each HugeTLB 219 * page is enabled, the head vmemmap page frame is reused and all of the tail 220 * vmemmap addresses map to the head vmemmap page frame (furture details can 221 * refer to the figure at the head of the mm/hugetlb_vmemmap.c). In other 222 * words, there are more than one page struct with PG_head associated with each 223 * HugeTLB page. We __know__ that there is only one head page struct, the tail 224 * page structs with PG_head are fake head page structs. We need an approach 225 * to distinguish between those two different types of page structs so that 226 * compound_head() can return the real head page struct when the parameter is 227 * the tail page struct but with PG_head. 228 * 229 * The page_fixed_fake_head() returns the real head page struct if the @page is 230 * fake page head, otherwise, returns @page which can either be a true page 231 * head or tail. 232 */ 233 static __always_inline const struct page *page_fixed_fake_head(const struct page *page) 234 { 235 if (!hugetlb_optimize_vmemmap_enabled()) 236 return page; 237 238 /* 239 * Only addresses aligned with PAGE_SIZE of struct page may be fake head 240 * struct page. The alignment check aims to avoid access the fields ( 241 * e.g. compound_head) of the @page[1]. It can avoid touch a (possibly) 242 * cold cacheline in some cases. 243 */ 244 if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) && 245 test_bit(PG_head, &page->flags)) { 246 /* 247 * We can safely access the field of the @page[1] with PG_head 248 * because the @page is a compound page composed with at least 249 * two contiguous pages. 250 */ 251 unsigned long head = READ_ONCE(page[1].compound_head); 252 253 if (likely(head & 1)) 254 return (const struct page *)(head - 1); 255 } 256 return page; 257 } 258 #else 259 static inline const struct page *page_fixed_fake_head(const struct page *page) 260 { 261 return page; 262 } 263 264 static inline bool hugetlb_optimize_vmemmap_enabled(void) 265 { 266 return false; 267 } 268 #endif 269 270 static __always_inline int page_is_fake_head(struct page *page) 271 { 272 return page_fixed_fake_head(page) != page; 273 } 274 275 static inline unsigned long _compound_head(const struct page *page) 276 { 277 unsigned long head = READ_ONCE(page->compound_head); 278 279 if (unlikely(head & 1)) 280 return head - 1; 281 return (unsigned long)page_fixed_fake_head(page); 282 } 283 284 #define compound_head(page) ((typeof(page))_compound_head(page)) 285 286 /** 287 * page_folio - Converts from page to folio. 288 * @p: The page. 289 * 290 * Every page is part of a folio. This function cannot be called on a 291 * NULL pointer. 292 * 293 * Context: No reference, nor lock is required on @page. If the caller 294 * does not hold a reference, this call may race with a folio split, so 295 * it should re-check the folio still contains this page after gaining 296 * a reference on the folio. 297 * Return: The folio which contains this page. 298 */ 299 #define page_folio(p) (_Generic((p), \ 300 const struct page *: (const struct folio *)_compound_head(p), \ 301 struct page *: (struct folio *)_compound_head(p))) 302 303 /** 304 * folio_page - Return a page from a folio. 305 * @folio: The folio. 306 * @n: The page number to return. 307 * 308 * @n is relative to the start of the folio. This function does not 309 * check that the page number lies within @folio; the caller is presumed 310 * to have a reference to the page. 311 */ 312 #define folio_page(folio, n) nth_page(&(folio)->page, n) 313 314 static __always_inline int PageTail(struct page *page) 315 { 316 return READ_ONCE(page->compound_head) & 1 || page_is_fake_head(page); 317 } 318 319 static __always_inline int PageCompound(struct page *page) 320 { 321 return test_bit(PG_head, &page->flags) || 322 READ_ONCE(page->compound_head) & 1; 323 } 324 325 #define PAGE_POISON_PATTERN -1l 326 static inline int PagePoisoned(const struct page *page) 327 { 328 return READ_ONCE(page->flags) == PAGE_POISON_PATTERN; 329 } 330 331 #ifdef CONFIG_DEBUG_VM 332 void page_init_poison(struct page *page, size_t size); 333 #else 334 static inline void page_init_poison(struct page *page, size_t size) 335 { 336 } 337 #endif 338 339 static unsigned long *folio_flags(struct folio *folio, unsigned n) 340 { 341 struct page *page = &folio->page; 342 343 VM_BUG_ON_PGFLAGS(PageTail(page), page); 344 VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page); 345 return &page[n].flags; 346 } 347 348 /* 349 * Page flags policies wrt compound pages 350 * 351 * PF_POISONED_CHECK 352 * check if this struct page poisoned/uninitialized 353 * 354 * PF_ANY: 355 * the page flag is relevant for small, head and tail pages. 356 * 357 * PF_HEAD: 358 * for compound page all operations related to the page flag applied to 359 * head page. 360 * 361 * PF_ONLY_HEAD: 362 * for compound page, callers only ever operate on the head page. 363 * 364 * PF_NO_TAIL: 365 * modifications of the page flag must be done on small or head pages, 366 * checks can be done on tail pages too. 367 * 368 * PF_NO_COMPOUND: 369 * the page flag is not relevant for compound pages. 370 * 371 * PF_SECOND: 372 * the page flag is stored in the first tail page. 373 */ 374 #define PF_POISONED_CHECK(page) ({ \ 375 VM_BUG_ON_PGFLAGS(PagePoisoned(page), page); \ 376 page; }) 377 #define PF_ANY(page, enforce) PF_POISONED_CHECK(page) 378 #define PF_HEAD(page, enforce) PF_POISONED_CHECK(compound_head(page)) 379 #define PF_ONLY_HEAD(page, enforce) ({ \ 380 VM_BUG_ON_PGFLAGS(PageTail(page), page); \ 381 PF_POISONED_CHECK(page); }) 382 #define PF_NO_TAIL(page, enforce) ({ \ 383 VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \ 384 PF_POISONED_CHECK(compound_head(page)); }) 385 #define PF_NO_COMPOUND(page, enforce) ({ \ 386 VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \ 387 PF_POISONED_CHECK(page); }) 388 #define PF_SECOND(page, enforce) ({ \ 389 VM_BUG_ON_PGFLAGS(!PageHead(page), page); \ 390 PF_POISONED_CHECK(&page[1]); }) 391 392 /* Which page is the flag stored in */ 393 #define FOLIO_PF_ANY 0 394 #define FOLIO_PF_HEAD 0 395 #define FOLIO_PF_ONLY_HEAD 0 396 #define FOLIO_PF_NO_TAIL 0 397 #define FOLIO_PF_NO_COMPOUND 0 398 #define FOLIO_PF_SECOND 1 399 400 /* 401 * Macros to create function definitions for page flags 402 */ 403 #define TESTPAGEFLAG(uname, lname, policy) \ 404 static __always_inline bool folio_test_##lname(struct folio *folio) \ 405 { return test_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \ 406 static __always_inline int Page##uname(struct page *page) \ 407 { return test_bit(PG_##lname, &policy(page, 0)->flags); } 408 409 #define SETPAGEFLAG(uname, lname, policy) \ 410 static __always_inline \ 411 void folio_set_##lname(struct folio *folio) \ 412 { set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \ 413 static __always_inline void SetPage##uname(struct page *page) \ 414 { set_bit(PG_##lname, &policy(page, 1)->flags); } 415 416 #define CLEARPAGEFLAG(uname, lname, policy) \ 417 static __always_inline \ 418 void folio_clear_##lname(struct folio *folio) \ 419 { clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \ 420 static __always_inline void ClearPage##uname(struct page *page) \ 421 { clear_bit(PG_##lname, &policy(page, 1)->flags); } 422 423 #define __SETPAGEFLAG(uname, lname, policy) \ 424 static __always_inline \ 425 void __folio_set_##lname(struct folio *folio) \ 426 { __set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \ 427 static __always_inline void __SetPage##uname(struct page *page) \ 428 { __set_bit(PG_##lname, &policy(page, 1)->flags); } 429 430 #define __CLEARPAGEFLAG(uname, lname, policy) \ 431 static __always_inline \ 432 void __folio_clear_##lname(struct folio *folio) \ 433 { __clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \ 434 static __always_inline void __ClearPage##uname(struct page *page) \ 435 { __clear_bit(PG_##lname, &policy(page, 1)->flags); } 436 437 #define TESTSETFLAG(uname, lname, policy) \ 438 static __always_inline \ 439 bool folio_test_set_##lname(struct folio *folio) \ 440 { return test_and_set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \ 441 static __always_inline int TestSetPage##uname(struct page *page) \ 442 { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); } 443 444 #define TESTCLEARFLAG(uname, lname, policy) \ 445 static __always_inline \ 446 bool folio_test_clear_##lname(struct folio *folio) \ 447 { return test_and_clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \ 448 static __always_inline int TestClearPage##uname(struct page *page) \ 449 { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); } 450 451 #define PAGEFLAG(uname, lname, policy) \ 452 TESTPAGEFLAG(uname, lname, policy) \ 453 SETPAGEFLAG(uname, lname, policy) \ 454 CLEARPAGEFLAG(uname, lname, policy) 455 456 #define __PAGEFLAG(uname, lname, policy) \ 457 TESTPAGEFLAG(uname, lname, policy) \ 458 __SETPAGEFLAG(uname, lname, policy) \ 459 __CLEARPAGEFLAG(uname, lname, policy) 460 461 #define TESTSCFLAG(uname, lname, policy) \ 462 TESTSETFLAG(uname, lname, policy) \ 463 TESTCLEARFLAG(uname, lname, policy) 464 465 #define TESTPAGEFLAG_FALSE(uname, lname) \ 466 static inline bool folio_test_##lname(const struct folio *folio) { return false; } \ 467 static inline int Page##uname(const struct page *page) { return 0; } 468 469 #define SETPAGEFLAG_NOOP(uname, lname) \ 470 static inline void folio_set_##lname(struct folio *folio) { } \ 471 static inline void SetPage##uname(struct page *page) { } 472 473 #define CLEARPAGEFLAG_NOOP(uname, lname) \ 474 static inline void folio_clear_##lname(struct folio *folio) { } \ 475 static inline void ClearPage##uname(struct page *page) { } 476 477 #define __CLEARPAGEFLAG_NOOP(uname, lname) \ 478 static inline void __folio_clear_##lname(struct folio *folio) { } \ 479 static inline void __ClearPage##uname(struct page *page) { } 480 481 #define TESTSETFLAG_FALSE(uname, lname) \ 482 static inline bool folio_test_set_##lname(struct folio *folio) \ 483 { return 0; } \ 484 static inline int TestSetPage##uname(struct page *page) { return 0; } 485 486 #define TESTCLEARFLAG_FALSE(uname, lname) \ 487 static inline bool folio_test_clear_##lname(struct folio *folio) \ 488 { return 0; } \ 489 static inline int TestClearPage##uname(struct page *page) { return 0; } 490 491 #define PAGEFLAG_FALSE(uname, lname) TESTPAGEFLAG_FALSE(uname, lname) \ 492 SETPAGEFLAG_NOOP(uname, lname) CLEARPAGEFLAG_NOOP(uname, lname) 493 494 #define TESTSCFLAG_FALSE(uname, lname) \ 495 TESTSETFLAG_FALSE(uname, lname) TESTCLEARFLAG_FALSE(uname, lname) 496 497 __PAGEFLAG(Locked, locked, PF_NO_TAIL) 498 PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) 499 PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL) 500 PAGEFLAG(Referenced, referenced, PF_HEAD) 501 TESTCLEARFLAG(Referenced, referenced, PF_HEAD) 502 __SETPAGEFLAG(Referenced, referenced, PF_HEAD) 503 PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD) 504 __CLEARPAGEFLAG(Dirty, dirty, PF_HEAD) 505 PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD) 506 TESTCLEARFLAG(LRU, lru, PF_HEAD) 507 PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD) 508 TESTCLEARFLAG(Active, active, PF_HEAD) 509 PAGEFLAG(Workingset, workingset, PF_HEAD) 510 TESTCLEARFLAG(Workingset, workingset, PF_HEAD) 511 __PAGEFLAG(Slab, slab, PF_NO_TAIL) 512 __PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL) 513 PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */ 514 515 /* Xen */ 516 PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND) 517 TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND) 518 PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND); 519 PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND); 520 PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND) 521 TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND) 522 523 PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) 524 __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) 525 __SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) 526 PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) 527 __CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) 528 __SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) 529 530 /* 531 * Private page markings that may be used by the filesystem that owns the page 532 * for its own purposes. 533 * - PG_private and PG_private_2 cause release_folio() and co to be invoked 534 */ 535 PAGEFLAG(Private, private, PF_ANY) 536 PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY) 537 PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY) 538 TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY) 539 540 /* 541 * Only test-and-set exist for PG_writeback. The unconditional operators are 542 * risky: they bypass page accounting. 543 */ 544 TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL) 545 TESTSCFLAG(Writeback, writeback, PF_NO_TAIL) 546 PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL) 547 548 /* PG_readahead is only used for reads; PG_reclaim is only for writes */ 549 PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL) 550 TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL) 551 PAGEFLAG(Readahead, readahead, PF_NO_COMPOUND) 552 TESTCLEARFLAG(Readahead, readahead, PF_NO_COMPOUND) 553 554 #ifdef CONFIG_HIGHMEM 555 /* 556 * Must use a macro here due to header dependency issues. page_zone() is not 557 * available at this point. 558 */ 559 #define PageHighMem(__p) is_highmem_idx(page_zonenum(__p)) 560 #else 561 PAGEFLAG_FALSE(HighMem, highmem) 562 #endif 563 564 #ifdef CONFIG_SWAP 565 static __always_inline bool folio_test_swapcache(struct folio *folio) 566 { 567 return folio_test_swapbacked(folio) && 568 test_bit(PG_swapcache, folio_flags(folio, 0)); 569 } 570 571 static __always_inline bool PageSwapCache(struct page *page) 572 { 573 return folio_test_swapcache(page_folio(page)); 574 } 575 576 SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL) 577 CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL) 578 #else 579 PAGEFLAG_FALSE(SwapCache, swapcache) 580 #endif 581 582 PAGEFLAG(Unevictable, unevictable, PF_HEAD) 583 __CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD) 584 TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD) 585 586 #ifdef CONFIG_MMU 587 PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL) 588 __CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL) 589 TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL) 590 #else 591 PAGEFLAG_FALSE(Mlocked, mlocked) __CLEARPAGEFLAG_NOOP(Mlocked, mlocked) 592 TESTSCFLAG_FALSE(Mlocked, mlocked) 593 #endif 594 595 #ifdef CONFIG_ARCH_USES_PG_UNCACHED 596 PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND) 597 #else 598 PAGEFLAG_FALSE(Uncached, uncached) 599 #endif 600 601 #ifdef CONFIG_MEMORY_FAILURE 602 PAGEFLAG(HWPoison, hwpoison, PF_ANY) 603 TESTSCFLAG(HWPoison, hwpoison, PF_ANY) 604 #define __PG_HWPOISON (1UL << PG_hwpoison) 605 #define MAGIC_HWPOISON 0x48575053U /* HWPS */ 606 extern void SetPageHWPoisonTakenOff(struct page *page); 607 extern void ClearPageHWPoisonTakenOff(struct page *page); 608 extern bool take_page_off_buddy(struct page *page); 609 extern bool put_page_back_buddy(struct page *page); 610 #else 611 PAGEFLAG_FALSE(HWPoison, hwpoison) 612 #define __PG_HWPOISON 0 613 #endif 614 615 #if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT) 616 TESTPAGEFLAG(Young, young, PF_ANY) 617 SETPAGEFLAG(Young, young, PF_ANY) 618 TESTCLEARFLAG(Young, young, PF_ANY) 619 PAGEFLAG(Idle, idle, PF_ANY) 620 #endif 621 622 #ifdef CONFIG_KASAN_HW_TAGS 623 PAGEFLAG(SkipKASanPoison, skip_kasan_poison, PF_HEAD) 624 #else 625 PAGEFLAG_FALSE(SkipKASanPoison, skip_kasan_poison) 626 #endif 627 628 /* 629 * PageReported() is used to track reported free pages within the Buddy 630 * allocator. We can use the non-atomic version of the test and set 631 * operations as both should be shielded with the zone lock to prevent 632 * any possible races on the setting or clearing of the bit. 633 */ 634 __PAGEFLAG(Reported, reported, PF_NO_COMPOUND) 635 636 #ifdef CONFIG_MEMORY_HOTPLUG 637 PAGEFLAG(VmemmapSelfHosted, vmemmap_self_hosted, PF_ANY) 638 #else 639 PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted) 640 #endif 641 642 /* 643 * On an anonymous page mapped into a user virtual memory area, 644 * page->mapping points to its anon_vma, not to a struct address_space; 645 * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h. 646 * 647 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled, 648 * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON 649 * bit; and then page->mapping points, not to an anon_vma, but to a private 650 * structure which KSM associates with that merged page. See ksm.h. 651 * 652 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable 653 * page and then page->mapping points to a struct movable_operations. 654 * 655 * Please note that, confusingly, "page_mapping" refers to the inode 656 * address_space which maps the page from disk; whereas "page_mapped" 657 * refers to user virtual address space into which the page is mapped. 658 */ 659 #define PAGE_MAPPING_ANON 0x1 660 #define PAGE_MAPPING_MOVABLE 0x2 661 #define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE) 662 #define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE) 663 664 /* 665 * Different with flags above, this flag is used only for fsdax mode. It 666 * indicates that this page->mapping is now under reflink case. 667 */ 668 #define PAGE_MAPPING_DAX_COW 0x1 669 670 static __always_inline bool folio_mapping_flags(struct folio *folio) 671 { 672 return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) != 0; 673 } 674 675 static __always_inline int PageMappingFlags(struct page *page) 676 { 677 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0; 678 } 679 680 static __always_inline bool folio_test_anon(struct folio *folio) 681 { 682 return ((unsigned long)folio->mapping & PAGE_MAPPING_ANON) != 0; 683 } 684 685 static __always_inline bool PageAnon(struct page *page) 686 { 687 return folio_test_anon(page_folio(page)); 688 } 689 690 static __always_inline bool __folio_test_movable(const struct folio *folio) 691 { 692 return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) == 693 PAGE_MAPPING_MOVABLE; 694 } 695 696 static __always_inline int __PageMovable(struct page *page) 697 { 698 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == 699 PAGE_MAPPING_MOVABLE; 700 } 701 702 #ifdef CONFIG_KSM 703 /* 704 * A KSM page is one of those write-protected "shared pages" or "merged pages" 705 * which KSM maps into multiple mms, wherever identical anonymous page content 706 * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any 707 * anon_vma, but to that page's node of the stable tree. 708 */ 709 static __always_inline bool folio_test_ksm(struct folio *folio) 710 { 711 return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) == 712 PAGE_MAPPING_KSM; 713 } 714 715 static __always_inline bool PageKsm(struct page *page) 716 { 717 return folio_test_ksm(page_folio(page)); 718 } 719 #else 720 TESTPAGEFLAG_FALSE(Ksm, ksm) 721 #endif 722 723 u64 stable_page_flags(struct page *page); 724 725 /** 726 * folio_test_uptodate - Is this folio up to date? 727 * @folio: The folio. 728 * 729 * The uptodate flag is set on a folio when every byte in the folio is 730 * at least as new as the corresponding bytes on storage. Anonymous 731 * and CoW folios are always uptodate. If the folio is not uptodate, 732 * some of the bytes in it may be; see the is_partially_uptodate() 733 * address_space operation. 734 */ 735 static inline bool folio_test_uptodate(struct folio *folio) 736 { 737 bool ret = test_bit(PG_uptodate, folio_flags(folio, 0)); 738 /* 739 * Must ensure that the data we read out of the folio is loaded 740 * _after_ we've loaded folio->flags to check the uptodate bit. 741 * We can skip the barrier if the folio is not uptodate, because 742 * we wouldn't be reading anything from it. 743 * 744 * See folio_mark_uptodate() for the other side of the story. 745 */ 746 if (ret) 747 smp_rmb(); 748 749 return ret; 750 } 751 752 static inline int PageUptodate(struct page *page) 753 { 754 return folio_test_uptodate(page_folio(page)); 755 } 756 757 static __always_inline void __folio_mark_uptodate(struct folio *folio) 758 { 759 smp_wmb(); 760 __set_bit(PG_uptodate, folio_flags(folio, 0)); 761 } 762 763 static __always_inline void folio_mark_uptodate(struct folio *folio) 764 { 765 /* 766 * Memory barrier must be issued before setting the PG_uptodate bit, 767 * so that all previous stores issued in order to bring the folio 768 * uptodate are actually visible before folio_test_uptodate becomes true. 769 */ 770 smp_wmb(); 771 set_bit(PG_uptodate, folio_flags(folio, 0)); 772 } 773 774 static __always_inline void __SetPageUptodate(struct page *page) 775 { 776 __folio_mark_uptodate((struct folio *)page); 777 } 778 779 static __always_inline void SetPageUptodate(struct page *page) 780 { 781 folio_mark_uptodate((struct folio *)page); 782 } 783 784 CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL) 785 786 bool __folio_start_writeback(struct folio *folio, bool keep_write); 787 bool set_page_writeback(struct page *page); 788 789 #define folio_start_writeback(folio) \ 790 __folio_start_writeback(folio, false) 791 #define folio_start_writeback_keepwrite(folio) \ 792 __folio_start_writeback(folio, true) 793 794 static inline void set_page_writeback_keepwrite(struct page *page) 795 { 796 folio_start_writeback_keepwrite(page_folio(page)); 797 } 798 799 static inline bool test_set_page_writeback(struct page *page) 800 { 801 return set_page_writeback(page); 802 } 803 804 static __always_inline bool folio_test_head(struct folio *folio) 805 { 806 return test_bit(PG_head, folio_flags(folio, FOLIO_PF_ANY)); 807 } 808 809 static __always_inline int PageHead(struct page *page) 810 { 811 PF_POISONED_CHECK(page); 812 return test_bit(PG_head, &page->flags) && !page_is_fake_head(page); 813 } 814 815 __SETPAGEFLAG(Head, head, PF_ANY) 816 __CLEARPAGEFLAG(Head, head, PF_ANY) 817 CLEARPAGEFLAG(Head, head, PF_ANY) 818 819 /** 820 * folio_test_large() - Does this folio contain more than one page? 821 * @folio: The folio to test. 822 * 823 * Return: True if the folio is larger than one page. 824 */ 825 static inline bool folio_test_large(struct folio *folio) 826 { 827 return folio_test_head(folio); 828 } 829 830 static __always_inline void set_compound_head(struct page *page, struct page *head) 831 { 832 WRITE_ONCE(page->compound_head, (unsigned long)head + 1); 833 } 834 835 static __always_inline void clear_compound_head(struct page *page) 836 { 837 WRITE_ONCE(page->compound_head, 0); 838 } 839 840 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 841 static inline void ClearPageCompound(struct page *page) 842 { 843 BUG_ON(!PageHead(page)); 844 ClearPageHead(page); 845 } 846 #endif 847 848 #define PG_head_mask ((1UL << PG_head)) 849 850 #ifdef CONFIG_HUGETLB_PAGE 851 int PageHuge(struct page *page); 852 int PageHeadHuge(struct page *page); 853 static inline bool folio_test_hugetlb(struct folio *folio) 854 { 855 return PageHeadHuge(&folio->page); 856 } 857 #else 858 TESTPAGEFLAG_FALSE(Huge, hugetlb) 859 TESTPAGEFLAG_FALSE(HeadHuge, headhuge) 860 #endif 861 862 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 863 /* 864 * PageHuge() only returns true for hugetlbfs pages, but not for 865 * normal or transparent huge pages. 866 * 867 * PageTransHuge() returns true for both transparent huge and 868 * hugetlbfs pages, but not normal pages. PageTransHuge() can only be 869 * called only in the core VM paths where hugetlbfs pages can't exist. 870 */ 871 static inline int PageTransHuge(struct page *page) 872 { 873 VM_BUG_ON_PAGE(PageTail(page), page); 874 return PageHead(page); 875 } 876 877 static inline bool folio_test_transhuge(struct folio *folio) 878 { 879 return folio_test_head(folio); 880 } 881 882 /* 883 * PageTransCompound returns true for both transparent huge pages 884 * and hugetlbfs pages, so it should only be called when it's known 885 * that hugetlbfs pages aren't involved. 886 */ 887 static inline int PageTransCompound(struct page *page) 888 { 889 return PageCompound(page); 890 } 891 892 /* 893 * PageTransTail returns true for both transparent huge pages 894 * and hugetlbfs pages, so it should only be called when it's known 895 * that hugetlbfs pages aren't involved. 896 */ 897 static inline int PageTransTail(struct page *page) 898 { 899 return PageTail(page); 900 } 901 902 /* 903 * PageDoubleMap indicates that the compound page is mapped with PTEs as well 904 * as PMDs. 905 * 906 * This is required for optimization of rmap operations for THP: we can postpone 907 * per small page mapcount accounting (and its overhead from atomic operations) 908 * until the first PMD split. 909 * 910 * For the page PageDoubleMap means ->_mapcount in all sub-pages is offset up 911 * by one. This reference will go away with last compound_mapcount. 912 * 913 * See also __split_huge_pmd_locked() and page_remove_anon_compound_rmap(). 914 */ 915 PAGEFLAG(DoubleMap, double_map, PF_SECOND) 916 TESTSCFLAG(DoubleMap, double_map, PF_SECOND) 917 #else 918 TESTPAGEFLAG_FALSE(TransHuge, transhuge) 919 TESTPAGEFLAG_FALSE(TransCompound, transcompound) 920 TESTPAGEFLAG_FALSE(TransCompoundMap, transcompoundmap) 921 TESTPAGEFLAG_FALSE(TransTail, transtail) 922 PAGEFLAG_FALSE(DoubleMap, double_map) 923 TESTSCFLAG_FALSE(DoubleMap, double_map) 924 #endif 925 926 #if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_TRANSPARENT_HUGEPAGE) 927 /* 928 * PageHasHWPoisoned indicates that at least one subpage is hwpoisoned in the 929 * compound page. 930 * 931 * This flag is set by hwpoison handler. Cleared by THP split or free page. 932 */ 933 PAGEFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND) 934 TESTSCFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND) 935 #else 936 PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned) 937 TESTSCFLAG_FALSE(HasHWPoisoned, has_hwpoisoned) 938 #endif 939 940 /* 941 * Check if a page is currently marked HWPoisoned. Note that this check is 942 * best effort only and inherently racy: there is no way to synchronize with 943 * failing hardware. 944 */ 945 static inline bool is_page_hwpoison(struct page *page) 946 { 947 if (PageHWPoison(page)) 948 return true; 949 return PageHuge(page) && PageHWPoison(compound_head(page)); 950 } 951 952 /* 953 * For pages that are never mapped to userspace (and aren't PageSlab), 954 * page_type may be used. Because it is initialised to -1, we invert the 955 * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and 956 * __ClearPageFoo *sets* the bit used for PageFoo. We reserve a few high and 957 * low bits so that an underflow or overflow of page_mapcount() won't be 958 * mistaken for a page type value. 959 */ 960 961 #define PAGE_TYPE_BASE 0xf0000000 962 /* Reserve 0x0000007f to catch underflows of page_mapcount */ 963 #define PAGE_MAPCOUNT_RESERVE -128 964 #define PG_buddy 0x00000080 965 #define PG_offline 0x00000100 966 #define PG_table 0x00000200 967 #define PG_guard 0x00000400 968 969 #define PageType(page, flag) \ 970 ((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE) 971 972 static inline int page_has_type(struct page *page) 973 { 974 return (int)page->page_type < PAGE_MAPCOUNT_RESERVE; 975 } 976 977 #define PAGE_TYPE_OPS(uname, lname) \ 978 static __always_inline int Page##uname(struct page *page) \ 979 { \ 980 return PageType(page, PG_##lname); \ 981 } \ 982 static __always_inline void __SetPage##uname(struct page *page) \ 983 { \ 984 VM_BUG_ON_PAGE(!PageType(page, 0), page); \ 985 page->page_type &= ~PG_##lname; \ 986 } \ 987 static __always_inline void __ClearPage##uname(struct page *page) \ 988 { \ 989 VM_BUG_ON_PAGE(!Page##uname(page), page); \ 990 page->page_type |= PG_##lname; \ 991 } 992 993 /* 994 * PageBuddy() indicates that the page is free and in the buddy system 995 * (see mm/page_alloc.c). 996 */ 997 PAGE_TYPE_OPS(Buddy, buddy) 998 999 /* 1000 * PageOffline() indicates that the page is logically offline although the 1001 * containing section is online. (e.g. inflated in a balloon driver or 1002 * not onlined when onlining the section). 1003 * The content of these pages is effectively stale. Such pages should not 1004 * be touched (read/write/dump/save) except by their owner. 1005 * 1006 * If a driver wants to allow to offline unmovable PageOffline() pages without 1007 * putting them back to the buddy, it can do so via the memory notifier by 1008 * decrementing the reference count in MEM_GOING_OFFLINE and incrementing the 1009 * reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline() 1010 * pages (now with a reference count of zero) are treated like free pages, 1011 * allowing the containing memory block to get offlined. A driver that 1012 * relies on this feature is aware that re-onlining the memory block will 1013 * require to re-set the pages PageOffline() and not giving them to the 1014 * buddy via online_page_callback_t. 1015 * 1016 * There are drivers that mark a page PageOffline() and expect there won't be 1017 * any further access to page content. PFN walkers that read content of random 1018 * pages should check PageOffline() and synchronize with such drivers using 1019 * page_offline_freeze()/page_offline_thaw(). 1020 */ 1021 PAGE_TYPE_OPS(Offline, offline) 1022 1023 extern void page_offline_freeze(void); 1024 extern void page_offline_thaw(void); 1025 extern void page_offline_begin(void); 1026 extern void page_offline_end(void); 1027 1028 /* 1029 * Marks pages in use as page tables. 1030 */ 1031 PAGE_TYPE_OPS(Table, table) 1032 1033 /* 1034 * Marks guardpages used with debug_pagealloc. 1035 */ 1036 PAGE_TYPE_OPS(Guard, guard) 1037 1038 extern bool is_free_buddy_page(struct page *page); 1039 1040 PAGEFLAG(Isolated, isolated, PF_ANY); 1041 1042 static __always_inline int PageAnonExclusive(struct page *page) 1043 { 1044 VM_BUG_ON_PGFLAGS(!PageAnon(page), page); 1045 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); 1046 return test_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); 1047 } 1048 1049 static __always_inline void SetPageAnonExclusive(struct page *page) 1050 { 1051 VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page); 1052 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); 1053 set_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); 1054 } 1055 1056 static __always_inline void ClearPageAnonExclusive(struct page *page) 1057 { 1058 VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page); 1059 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); 1060 clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); 1061 } 1062 1063 static __always_inline void __ClearPageAnonExclusive(struct page *page) 1064 { 1065 VM_BUG_ON_PGFLAGS(!PageAnon(page), page); 1066 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); 1067 __clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); 1068 } 1069 1070 #ifdef CONFIG_MMU 1071 #define __PG_MLOCKED (1UL << PG_mlocked) 1072 #else 1073 #define __PG_MLOCKED 0 1074 #endif 1075 1076 /* 1077 * Flags checked when a page is freed. Pages being freed should not have 1078 * these flags set. If they are, there is a problem. 1079 */ 1080 #define PAGE_FLAGS_CHECK_AT_FREE \ 1081 (1UL << PG_lru | 1UL << PG_locked | \ 1082 1UL << PG_private | 1UL << PG_private_2 | \ 1083 1UL << PG_writeback | 1UL << PG_reserved | \ 1084 1UL << PG_slab | 1UL << PG_active | \ 1085 1UL << PG_unevictable | __PG_MLOCKED) 1086 1087 /* 1088 * Flags checked when a page is prepped for return by the page allocator. 1089 * Pages being prepped should not have these flags set. If they are set, 1090 * there has been a kernel bug or struct page corruption. 1091 * 1092 * __PG_HWPOISON is exceptional because it needs to be kept beyond page's 1093 * alloc-free cycle to prevent from reusing the page. 1094 */ 1095 #define PAGE_FLAGS_CHECK_AT_PREP \ 1096 (PAGEFLAGS_MASK & ~__PG_HWPOISON) 1097 1098 #define PAGE_FLAGS_PRIVATE \ 1099 (1UL << PG_private | 1UL << PG_private_2) 1100 /** 1101 * page_has_private - Determine if page has private stuff 1102 * @page: The page to be checked 1103 * 1104 * Determine if a page has private stuff, indicating that release routines 1105 * should be invoked upon it. 1106 */ 1107 static inline int page_has_private(struct page *page) 1108 { 1109 return !!(page->flags & PAGE_FLAGS_PRIVATE); 1110 } 1111 1112 static inline bool folio_has_private(struct folio *folio) 1113 { 1114 return page_has_private(&folio->page); 1115 } 1116 1117 #undef PF_ANY 1118 #undef PF_HEAD 1119 #undef PF_ONLY_HEAD 1120 #undef PF_NO_TAIL 1121 #undef PF_NO_COMPOUND 1122 #undef PF_SECOND 1123 #endif /* !__GENERATING_BOUNDS_H */ 1124 1125 #endif /* PAGE_FLAGS_H */ 1126