1873e65bcSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
2835c134eSMel Gorman /*
3835c134eSMel Gorman  * Macros for manipulating and testing flags related to a
4d9c23400SMel Gorman  * pageblock_nr_pages number of pages.
5835c134eSMel Gorman  *
6835c134eSMel Gorman  * Copyright (C) IBM Corporation, 2006
7835c134eSMel Gorman  *
8835c134eSMel Gorman  * Original author, Mel Gorman
9835c134eSMel Gorman  * Major cleanups and reduction of bit operations, Andy Whitcroft
10835c134eSMel Gorman  */
11835c134eSMel Gorman #ifndef PAGEBLOCK_FLAGS_H
12835c134eSMel Gorman #define PAGEBLOCK_FLAGS_H
13835c134eSMel Gorman 
14835c134eSMel Gorman #include <linux/types.h>
15835c134eSMel Gorman 
16125b860bSPingfan Liu #define PB_migratetype_bits 3
17835c134eSMel Gorman /* Bit indices that affect a whole block of pages */
18835c134eSMel Gorman enum pageblock_bits {
19c801ed38SPaul Jackson 	PB_migrate,
20125b860bSPingfan Liu 	PB_migrate_end = PB_migrate + PB_migratetype_bits - 1,
21c801ed38SPaul Jackson 			/* 3 bits required for migrate types */
22bb13ffebSMel Gorman 	PB_migrate_skip,/* If set the block is skipped by compaction */
23e58469baSMel Gorman 
24e58469baSMel Gorman 	/*
25e58469baSMel Gorman 	 * Assume the bits will always align on a word. If this assumption
26e58469baSMel Gorman 	 * changes then get/set pageblock needs updating.
27e58469baSMel Gorman 	 */
28835c134eSMel Gorman 	NR_PAGEBLOCK_BITS
29835c134eSMel Gorman };
30835c134eSMel Gorman 
3172801513SBaolin Wang #if defined(CONFIG_HUGETLB_PAGE)
32d9c23400SMel Gorman 
33d9c23400SMel Gorman #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
34d9c23400SMel Gorman 
35d9c23400SMel Gorman /* Huge page sizes are variable */
36d00181b9SKirill A. Shutemov extern unsigned int pageblock_order;
37d9c23400SMel Gorman 
38d9c23400SMel Gorman #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
39d9c23400SMel Gorman 
40b3d40a2bSDavid Hildenbrand /*
41b3d40a2bSDavid Hildenbrand  * Huge pages are a constant size, but don't exceed the maximum allocation
42b3d40a2bSDavid Hildenbrand  * granularity.
43b3d40a2bSDavid Hildenbrand  */
44*3a7e02c0SLinus Torvalds #define pageblock_order		MIN_T(unsigned int, HUGETLB_PAGE_ORDER, MAX_PAGE_ORDER)
45d9c23400SMel Gorman 
46d9c23400SMel Gorman #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
47d9c23400SMel Gorman 
4872801513SBaolin Wang #elif defined(CONFIG_TRANSPARENT_HUGEPAGE)
4972801513SBaolin Wang 
50*3a7e02c0SLinus Torvalds #define pageblock_order		MIN_T(unsigned int, HPAGE_PMD_ORDER, MAX_PAGE_ORDER)
5172801513SBaolin Wang 
5272801513SBaolin Wang #else /* CONFIG_TRANSPARENT_HUGEPAGE */
53d9c23400SMel Gorman 
54d9c23400SMel Gorman /* If huge pages are not used, group by MAX_ORDER_NR_PAGES */
555e0a760bSKirill A. Shutemov #define pageblock_order		MAX_PAGE_ORDER
56d9c23400SMel Gorman 
57d9c23400SMel Gorman #endif /* CONFIG_HUGETLB_PAGE */
58d9c23400SMel Gorman 
59d9c23400SMel Gorman #define pageblock_nr_pages	(1UL << pageblock_order)
605f7fa13fSKefeng Wang #define pageblock_align(pfn)	ALIGN((pfn), pageblock_nr_pages)
61ee0913c4SKefeng Wang #define pageblock_aligned(pfn)	IS_ALIGNED((pfn), pageblock_nr_pages)
624f9bc69aSKefeng Wang #define pageblock_start_pfn(pfn)	ALIGN_DOWN((pfn), pageblock_nr_pages)
634f9bc69aSKefeng Wang #define pageblock_end_pfn(pfn)		ALIGN((pfn) + 1, pageblock_nr_pages)
64d9c23400SMel Gorman 
65835c134eSMel Gorman /* Forward declaration */
66835c134eSMel Gorman struct page;
67835c134eSMel Gorman 
68ca891f41SMatthew Wilcox (Oracle) unsigned long get_pfnblock_flags_mask(const struct page *page,
69dc4b0cafSMel Gorman 				unsigned long pfn,
70e58469baSMel Gorman 				unsigned long mask);
71dc4b0cafSMel Gorman 
72dc4b0cafSMel Gorman void set_pfnblock_flags_mask(struct page *page,
73e58469baSMel Gorman 				unsigned long flags,
74dc4b0cafSMel Gorman 				unsigned long pfn,
75e58469baSMel Gorman 				unsigned long mask);
76e58469baSMel Gorman 
77835c134eSMel Gorman /* Declarations for getting and setting flags. See mm/page_alloc.c */
78bb13ffebSMel Gorman #ifdef CONFIG_COMPACTION
79bb13ffebSMel Gorman #define get_pageblock_skip(page) \
80d93d5ab9SWei Yang 	get_pfnblock_flags_mask(page, page_to_pfn(page),	\
81535b81e2SWei Yang 			(1 << (PB_migrate_skip)))
82bb13ffebSMel Gorman #define clear_pageblock_skip(page) \
83d93d5ab9SWei Yang 	set_pfnblock_flags_mask(page, 0, page_to_pfn(page),	\
84535b81e2SWei Yang 			(1 << PB_migrate_skip))
85bb13ffebSMel Gorman #define set_pageblock_skip(page) \
86d93d5ab9SWei Yang 	set_pfnblock_flags_mask(page, (1 << PB_migrate_skip),	\
87d93d5ab9SWei Yang 			page_to_pfn(page),			\
88535b81e2SWei Yang 			(1 << PB_migrate_skip))
8921dc7e02SDavid Rientjes #else
get_pageblock_skip(struct page * page)9021dc7e02SDavid Rientjes static inline bool get_pageblock_skip(struct page *page)
9121dc7e02SDavid Rientjes {
9221dc7e02SDavid Rientjes 	return false;
9321dc7e02SDavid Rientjes }
clear_pageblock_skip(struct page * page)9421dc7e02SDavid Rientjes static inline void clear_pageblock_skip(struct page *page)
9521dc7e02SDavid Rientjes {
9621dc7e02SDavid Rientjes }
set_pageblock_skip(struct page * page)9721dc7e02SDavid Rientjes static inline void set_pageblock_skip(struct page *page)
9821dc7e02SDavid Rientjes {
9921dc7e02SDavid Rientjes }
100bb13ffebSMel Gorman #endif /* CONFIG_COMPACTION */
101bb13ffebSMel Gorman 
102835c134eSMel Gorman #endif	/* PAGEBLOCK_FLAGS_H */
103