1 /* 2 * Macros for manipulating and testing page->flags 3 */ 4 5 #ifndef PAGE_FLAGS_H 6 #define PAGE_FLAGS_H 7 8 #include <linux/types.h> 9 #include <linux/bug.h> 10 #include <linux/mmdebug.h> 11 #ifndef __GENERATING_BOUNDS_H 12 #include <linux/mm_types.h> 13 #include <generated/bounds.h> 14 #endif /* !__GENERATING_BOUNDS_H */ 15 16 /* 17 * Various page->flags bits: 18 * 19 * PG_reserved is set for special pages, which can never be swapped out. Some 20 * of them might not even exist (eg empty_bad_page)... 21 * 22 * The PG_private bitflag is set on pagecache pages if they contain filesystem 23 * specific data (which is normally at page->private). It can be used by 24 * private allocations for its own usage. 25 * 26 * During initiation of disk I/O, PG_locked is set. This bit is set before I/O 27 * and cleared when writeback _starts_ or when read _completes_. PG_writeback 28 * is set before writeback starts and cleared when it finishes. 29 * 30 * PG_locked also pins a page in pagecache, and blocks truncation of the file 31 * while it is held. 32 * 33 * page_waitqueue(page) is a wait queue of all tasks waiting for the page 34 * to become unlocked. 35 * 36 * PG_uptodate tells whether the page's contents is valid. When a read 37 * completes, the page becomes uptodate, unless a disk I/O error happened. 38 * 39 * PG_referenced, PG_reclaim are used for page reclaim for anonymous and 40 * file-backed pagecache (see mm/vmscan.c). 41 * 42 * PG_error is set to indicate that an I/O error occurred on this page. 43 * 44 * PG_arch_1 is an architecture specific page state bit. The generic code 45 * guarantees that this bit is cleared for a page when it first is entered into 46 * the page cache. 47 * 48 * PG_highmem pages are not permanently mapped into the kernel virtual address 49 * space, they need to be kmapped separately for doing IO on the pages. The 50 * struct page (these bits with information) are always mapped into kernel 51 * address space... 52 * 53 * PG_hwpoison indicates that a page got corrupted in hardware and contains 54 * data with incorrect ECC bits that triggered a machine check. Accessing is 55 * not safe since it may cause another machine check. Don't touch! 56 */ 57 58 /* 59 * Don't use the *_dontuse flags. Use the macros. Otherwise you'll break 60 * locked- and dirty-page accounting. 61 * 62 * The page flags field is split into two parts, the main flags area 63 * which extends from the low bits upwards, and the fields area which 64 * extends from the high bits downwards. 65 * 66 * | FIELD | ... | FLAGS | 67 * N-1 ^ 0 68 * (NR_PAGEFLAGS) 69 * 70 * The fields area is reserved for fields mapping zone, node (for NUMA) and 71 * SPARSEMEM section (for variants of SPARSEMEM that require section ids like 72 * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP). 73 */ 74 enum pageflags { 75 PG_locked, /* Page is locked. Don't touch. */ 76 PG_error, 77 PG_referenced, 78 PG_uptodate, 79 PG_dirty, 80 PG_lru, 81 PG_active, 82 PG_slab, 83 PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/ 84 PG_arch_1, 85 PG_reserved, 86 PG_private, /* If pagecache, has fs-private data */ 87 PG_private_2, /* If pagecache, has fs aux data */ 88 PG_writeback, /* Page is under writeback */ 89 PG_head, /* A head page */ 90 PG_swapcache, /* Swap page: swp_entry_t in private */ 91 PG_mappedtodisk, /* Has blocks allocated on-disk */ 92 PG_reclaim, /* To be reclaimed asap */ 93 PG_swapbacked, /* Page is backed by RAM/swap */ 94 PG_unevictable, /* Page is "unevictable" */ 95 #ifdef CONFIG_MMU 96 PG_mlocked, /* Page is vma mlocked */ 97 #endif 98 #ifdef CONFIG_ARCH_USES_PG_UNCACHED 99 PG_uncached, /* Page has been mapped as uncached */ 100 #endif 101 #ifdef CONFIG_MEMORY_FAILURE 102 PG_hwpoison, /* hardware poisoned page. Don't touch */ 103 #endif 104 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 105 PG_compound_lock, 106 #endif 107 #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT) 108 PG_young, 109 PG_idle, 110 #endif 111 __NR_PAGEFLAGS, 112 113 /* Filesystems */ 114 PG_checked = PG_owner_priv_1, 115 116 /* Two page bits are conscripted by FS-Cache to maintain local caching 117 * state. These bits are set on pages belonging to the netfs's inodes 118 * when those inodes are being locally cached. 119 */ 120 PG_fscache = PG_private_2, /* page backed by cache */ 121 122 /* XEN */ 123 /* Pinned in Xen as a read-only pagetable page. */ 124 PG_pinned = PG_owner_priv_1, 125 /* Pinned as part of domain save (see xen_mm_pin_all()). */ 126 PG_savepinned = PG_dirty, 127 /* Has a grant mapping of another (foreign) domain's page. */ 128 PG_foreign = PG_owner_priv_1, 129 130 /* SLOB */ 131 PG_slob_free = PG_private, 132 }; 133 134 #ifndef __GENERATING_BOUNDS_H 135 136 /* 137 * Macros to create function definitions for page flags 138 */ 139 #define TESTPAGEFLAG(uname, lname) \ 140 static inline int Page##uname(const struct page *page) \ 141 { return test_bit(PG_##lname, &page->flags); } 142 143 #define SETPAGEFLAG(uname, lname) \ 144 static inline void SetPage##uname(struct page *page) \ 145 { set_bit(PG_##lname, &page->flags); } 146 147 #define CLEARPAGEFLAG(uname, lname) \ 148 static inline void ClearPage##uname(struct page *page) \ 149 { clear_bit(PG_##lname, &page->flags); } 150 151 #define __SETPAGEFLAG(uname, lname) \ 152 static inline void __SetPage##uname(struct page *page) \ 153 { __set_bit(PG_##lname, &page->flags); } 154 155 #define __CLEARPAGEFLAG(uname, lname) \ 156 static inline void __ClearPage##uname(struct page *page) \ 157 { __clear_bit(PG_##lname, &page->flags); } 158 159 #define TESTSETFLAG(uname, lname) \ 160 static inline int TestSetPage##uname(struct page *page) \ 161 { return test_and_set_bit(PG_##lname, &page->flags); } 162 163 #define TESTCLEARFLAG(uname, lname) \ 164 static inline int TestClearPage##uname(struct page *page) \ 165 { return test_and_clear_bit(PG_##lname, &page->flags); } 166 167 #define __TESTCLEARFLAG(uname, lname) \ 168 static inline int __TestClearPage##uname(struct page *page) \ 169 { return __test_and_clear_bit(PG_##lname, &page->flags); } 170 171 #define PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) \ 172 SETPAGEFLAG(uname, lname) CLEARPAGEFLAG(uname, lname) 173 174 #define __PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) \ 175 __SETPAGEFLAG(uname, lname) __CLEARPAGEFLAG(uname, lname) 176 177 #define TESTSCFLAG(uname, lname) \ 178 TESTSETFLAG(uname, lname) TESTCLEARFLAG(uname, lname) 179 180 #define TESTPAGEFLAG_FALSE(uname) \ 181 static inline int Page##uname(const struct page *page) { return 0; } 182 183 #define SETPAGEFLAG_NOOP(uname) \ 184 static inline void SetPage##uname(struct page *page) { } 185 186 #define CLEARPAGEFLAG_NOOP(uname) \ 187 static inline void ClearPage##uname(struct page *page) { } 188 189 #define __CLEARPAGEFLAG_NOOP(uname) \ 190 static inline void __ClearPage##uname(struct page *page) { } 191 192 #define TESTSETFLAG_FALSE(uname) \ 193 static inline int TestSetPage##uname(struct page *page) { return 0; } 194 195 #define TESTCLEARFLAG_FALSE(uname) \ 196 static inline int TestClearPage##uname(struct page *page) { return 0; } 197 198 #define __TESTCLEARFLAG_FALSE(uname) \ 199 static inline int __TestClearPage##uname(struct page *page) { return 0; } 200 201 #define PAGEFLAG_FALSE(uname) TESTPAGEFLAG_FALSE(uname) \ 202 SETPAGEFLAG_NOOP(uname) CLEARPAGEFLAG_NOOP(uname) 203 204 #define TESTSCFLAG_FALSE(uname) \ 205 TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname) 206 207 struct page; /* forward declaration */ 208 209 TESTPAGEFLAG(Locked, locked) 210 PAGEFLAG(Error, error) TESTCLEARFLAG(Error, error) 211 PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced) 212 __SETPAGEFLAG(Referenced, referenced) 213 PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty) 214 PAGEFLAG(LRU, lru) __CLEARPAGEFLAG(LRU, lru) 215 PAGEFLAG(Active, active) __CLEARPAGEFLAG(Active, active) 216 TESTCLEARFLAG(Active, active) 217 __PAGEFLAG(Slab, slab) 218 PAGEFLAG(Checked, checked) /* Used by some filesystems */ 219 PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */ 220 PAGEFLAG(SavePinned, savepinned); /* Xen */ 221 PAGEFLAG(Foreign, foreign); /* Xen */ 222 PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved) 223 PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked) 224 __SETPAGEFLAG(SwapBacked, swapbacked) 225 226 __PAGEFLAG(SlobFree, slob_free) 227 228 /* 229 * Private page markings that may be used by the filesystem that owns the page 230 * for its own purposes. 231 * - PG_private and PG_private_2 cause releasepage() and co to be invoked 232 */ 233 PAGEFLAG(Private, private) __SETPAGEFLAG(Private, private) 234 __CLEARPAGEFLAG(Private, private) 235 PAGEFLAG(Private2, private_2) TESTSCFLAG(Private2, private_2) 236 PAGEFLAG(OwnerPriv1, owner_priv_1) TESTCLEARFLAG(OwnerPriv1, owner_priv_1) 237 238 /* 239 * Only test-and-set exist for PG_writeback. The unconditional operators are 240 * risky: they bypass page accounting. 241 */ 242 TESTPAGEFLAG(Writeback, writeback) TESTSCFLAG(Writeback, writeback) 243 PAGEFLAG(MappedToDisk, mappedtodisk) 244 245 /* PG_readahead is only used for reads; PG_reclaim is only for writes */ 246 PAGEFLAG(Reclaim, reclaim) TESTCLEARFLAG(Reclaim, reclaim) 247 PAGEFLAG(Readahead, reclaim) TESTCLEARFLAG(Readahead, reclaim) 248 249 #ifdef CONFIG_HIGHMEM 250 /* 251 * Must use a macro here due to header dependency issues. page_zone() is not 252 * available at this point. 253 */ 254 #define PageHighMem(__p) is_highmem_idx(page_zonenum(__p)) 255 #else 256 PAGEFLAG_FALSE(HighMem) 257 #endif 258 259 #ifdef CONFIG_SWAP 260 PAGEFLAG(SwapCache, swapcache) 261 #else 262 PAGEFLAG_FALSE(SwapCache) 263 #endif 264 265 PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable) 266 TESTCLEARFLAG(Unevictable, unevictable) 267 268 #ifdef CONFIG_MMU 269 PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked) 270 TESTSCFLAG(Mlocked, mlocked) __TESTCLEARFLAG(Mlocked, mlocked) 271 #else 272 PAGEFLAG_FALSE(Mlocked) __CLEARPAGEFLAG_NOOP(Mlocked) 273 TESTSCFLAG_FALSE(Mlocked) __TESTCLEARFLAG_FALSE(Mlocked) 274 #endif 275 276 #ifdef CONFIG_ARCH_USES_PG_UNCACHED 277 PAGEFLAG(Uncached, uncached) 278 #else 279 PAGEFLAG_FALSE(Uncached) 280 #endif 281 282 #ifdef CONFIG_MEMORY_FAILURE 283 PAGEFLAG(HWPoison, hwpoison) 284 TESTSCFLAG(HWPoison, hwpoison) 285 #define __PG_HWPOISON (1UL << PG_hwpoison) 286 #else 287 PAGEFLAG_FALSE(HWPoison) 288 #define __PG_HWPOISON 0 289 #endif 290 291 #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT) 292 TESTPAGEFLAG(Young, young) 293 SETPAGEFLAG(Young, young) 294 TESTCLEARFLAG(Young, young) 295 PAGEFLAG(Idle, idle) 296 #endif 297 298 /* 299 * On an anonymous page mapped into a user virtual memory area, 300 * page->mapping points to its anon_vma, not to a struct address_space; 301 * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h. 302 * 303 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled, 304 * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit; 305 * and then page->mapping points, not to an anon_vma, but to a private 306 * structure which KSM associates with that merged page. See ksm.h. 307 * 308 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used. 309 * 310 * Please note that, confusingly, "page_mapping" refers to the inode 311 * address_space which maps the page from disk; whereas "page_mapped" 312 * refers to user virtual address space into which the page is mapped. 313 */ 314 #define PAGE_MAPPING_ANON 1 315 #define PAGE_MAPPING_KSM 2 316 #define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM) 317 318 static inline int PageAnon(struct page *page) 319 { 320 return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; 321 } 322 323 #ifdef CONFIG_KSM 324 /* 325 * A KSM page is one of those write-protected "shared pages" or "merged pages" 326 * which KSM maps into multiple mms, wherever identical anonymous page content 327 * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any 328 * anon_vma, but to that page's node of the stable tree. 329 */ 330 static inline int PageKsm(struct page *page) 331 { 332 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == 333 (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); 334 } 335 #else 336 TESTPAGEFLAG_FALSE(Ksm) 337 #endif 338 339 u64 stable_page_flags(struct page *page); 340 341 static inline int PageUptodate(struct page *page) 342 { 343 int ret = test_bit(PG_uptodate, &(page)->flags); 344 345 /* 346 * Must ensure that the data we read out of the page is loaded 347 * _after_ we've loaded page->flags to check for PageUptodate. 348 * We can skip the barrier if the page is not uptodate, because 349 * we wouldn't be reading anything from it. 350 * 351 * See SetPageUptodate() for the other side of the story. 352 */ 353 if (ret) 354 smp_rmb(); 355 356 return ret; 357 } 358 359 static inline void __SetPageUptodate(struct page *page) 360 { 361 smp_wmb(); 362 __set_bit(PG_uptodate, &(page)->flags); 363 } 364 365 static inline void SetPageUptodate(struct page *page) 366 { 367 /* 368 * Memory barrier must be issued before setting the PG_uptodate bit, 369 * so that all previous stores issued in order to bring the page 370 * uptodate are actually visible before PageUptodate becomes true. 371 */ 372 smp_wmb(); 373 set_bit(PG_uptodate, &(page)->flags); 374 } 375 376 CLEARPAGEFLAG(Uptodate, uptodate) 377 378 int test_clear_page_writeback(struct page *page); 379 int __test_set_page_writeback(struct page *page, bool keep_write); 380 381 #define test_set_page_writeback(page) \ 382 __test_set_page_writeback(page, false) 383 #define test_set_page_writeback_keepwrite(page) \ 384 __test_set_page_writeback(page, true) 385 386 static inline void set_page_writeback(struct page *page) 387 { 388 test_set_page_writeback(page); 389 } 390 391 static inline void set_page_writeback_keepwrite(struct page *page) 392 { 393 test_set_page_writeback_keepwrite(page); 394 } 395 396 __PAGEFLAG(Head, head) CLEARPAGEFLAG(Head, head) 397 398 static inline int PageTail(struct page *page) 399 { 400 return READ_ONCE(page->compound_head) & 1; 401 } 402 403 static inline void set_compound_head(struct page *page, struct page *head) 404 { 405 WRITE_ONCE(page->compound_head, (unsigned long)head + 1); 406 } 407 408 static inline void clear_compound_head(struct page *page) 409 { 410 WRITE_ONCE(page->compound_head, 0); 411 } 412 413 static inline struct page *compound_head(struct page *page) 414 { 415 unsigned long head = READ_ONCE(page->compound_head); 416 417 if (unlikely(head & 1)) 418 return (struct page *) (head - 1); 419 return page; 420 } 421 422 static inline int PageCompound(struct page *page) 423 { 424 return PageHead(page) || PageTail(page); 425 426 } 427 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 428 static inline void ClearPageCompound(struct page *page) 429 { 430 BUG_ON(!PageHead(page)); 431 ClearPageHead(page); 432 } 433 #endif 434 435 #define PG_head_mask ((1L << PG_head)) 436 437 #ifdef CONFIG_HUGETLB_PAGE 438 int PageHuge(struct page *page); 439 int PageHeadHuge(struct page *page); 440 bool page_huge_active(struct page *page); 441 #else 442 TESTPAGEFLAG_FALSE(Huge) 443 TESTPAGEFLAG_FALSE(HeadHuge) 444 445 static inline bool page_huge_active(struct page *page) 446 { 447 return 0; 448 } 449 #endif 450 451 452 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 453 /* 454 * PageHuge() only returns true for hugetlbfs pages, but not for 455 * normal or transparent huge pages. 456 * 457 * PageTransHuge() returns true for both transparent huge and 458 * hugetlbfs pages, but not normal pages. PageTransHuge() can only be 459 * called only in the core VM paths where hugetlbfs pages can't exist. 460 */ 461 static inline int PageTransHuge(struct page *page) 462 { 463 VM_BUG_ON_PAGE(PageTail(page), page); 464 return PageHead(page); 465 } 466 467 /* 468 * PageTransCompound returns true for both transparent huge pages 469 * and hugetlbfs pages, so it should only be called when it's known 470 * that hugetlbfs pages aren't involved. 471 */ 472 static inline int PageTransCompound(struct page *page) 473 { 474 return PageCompound(page); 475 } 476 477 /* 478 * PageTransTail returns true for both transparent huge pages 479 * and hugetlbfs pages, so it should only be called when it's known 480 * that hugetlbfs pages aren't involved. 481 */ 482 static inline int PageTransTail(struct page *page) 483 { 484 return PageTail(page); 485 } 486 487 #else 488 489 static inline int PageTransHuge(struct page *page) 490 { 491 return 0; 492 } 493 494 static inline int PageTransCompound(struct page *page) 495 { 496 return 0; 497 } 498 499 static inline int PageTransTail(struct page *page) 500 { 501 return 0; 502 } 503 #endif 504 505 /* 506 * PageBuddy() indicate that the page is free and in the buddy system 507 * (see mm/page_alloc.c). 508 * 509 * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to 510 * -2 so that an underflow of the page_mapcount() won't be mistaken 511 * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very 512 * efficiently by most CPU architectures. 513 */ 514 #define PAGE_BUDDY_MAPCOUNT_VALUE (-128) 515 516 static inline int PageBuddy(struct page *page) 517 { 518 return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE; 519 } 520 521 static inline void __SetPageBuddy(struct page *page) 522 { 523 VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); 524 atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE); 525 } 526 527 static inline void __ClearPageBuddy(struct page *page) 528 { 529 VM_BUG_ON_PAGE(!PageBuddy(page), page); 530 atomic_set(&page->_mapcount, -1); 531 } 532 533 #define PAGE_BALLOON_MAPCOUNT_VALUE (-256) 534 535 static inline int PageBalloon(struct page *page) 536 { 537 return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE; 538 } 539 540 static inline void __SetPageBalloon(struct page *page) 541 { 542 VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); 543 atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE); 544 } 545 546 static inline void __ClearPageBalloon(struct page *page) 547 { 548 VM_BUG_ON_PAGE(!PageBalloon(page), page); 549 atomic_set(&page->_mapcount, -1); 550 } 551 552 /* 553 * If network-based swap is enabled, sl*b must keep track of whether pages 554 * were allocated from pfmemalloc reserves. 555 */ 556 static inline int PageSlabPfmemalloc(struct page *page) 557 { 558 VM_BUG_ON_PAGE(!PageSlab(page), page); 559 return PageActive(page); 560 } 561 562 static inline void SetPageSlabPfmemalloc(struct page *page) 563 { 564 VM_BUG_ON_PAGE(!PageSlab(page), page); 565 SetPageActive(page); 566 } 567 568 static inline void __ClearPageSlabPfmemalloc(struct page *page) 569 { 570 VM_BUG_ON_PAGE(!PageSlab(page), page); 571 __ClearPageActive(page); 572 } 573 574 static inline void ClearPageSlabPfmemalloc(struct page *page) 575 { 576 VM_BUG_ON_PAGE(!PageSlab(page), page); 577 ClearPageActive(page); 578 } 579 580 #ifdef CONFIG_MMU 581 #define __PG_MLOCKED (1 << PG_mlocked) 582 #else 583 #define __PG_MLOCKED 0 584 #endif 585 586 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 587 #define __PG_COMPOUND_LOCK (1 << PG_compound_lock) 588 #else 589 #define __PG_COMPOUND_LOCK 0 590 #endif 591 592 /* 593 * Flags checked when a page is freed. Pages being freed should not have 594 * these flags set. It they are, there is a problem. 595 */ 596 #define PAGE_FLAGS_CHECK_AT_FREE \ 597 (1 << PG_lru | 1 << PG_locked | \ 598 1 << PG_private | 1 << PG_private_2 | \ 599 1 << PG_writeback | 1 << PG_reserved | \ 600 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ 601 1 << PG_unevictable | __PG_MLOCKED | \ 602 __PG_COMPOUND_LOCK) 603 604 /* 605 * Flags checked when a page is prepped for return by the page allocator. 606 * Pages being prepped should not have these flags set. It they are set, 607 * there has been a kernel bug or struct page corruption. 608 * 609 * __PG_HWPOISON is exceptional because it needs to be kept beyond page's 610 * alloc-free cycle to prevent from reusing the page. 611 */ 612 #define PAGE_FLAGS_CHECK_AT_PREP \ 613 (((1 << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON) 614 615 #define PAGE_FLAGS_PRIVATE \ 616 (1 << PG_private | 1 << PG_private_2) 617 /** 618 * page_has_private - Determine if page has private stuff 619 * @page: The page to be checked 620 * 621 * Determine if a page has private stuff, indicating that release routines 622 * should be invoked upon it. 623 */ 624 static inline int page_has_private(struct page *page) 625 { 626 return !!(page->flags & PAGE_FLAGS_PRIVATE); 627 } 628 629 #endif /* !__GENERATING_BOUNDS_H */ 630 631 #endif /* PAGE_FLAGS_H */ 632