1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2eefa864bSJoonsoo Kim #ifndef __LINUX_PAGE_EXT_H
3eefa864bSJoonsoo Kim #define __LINUX_PAGE_EXT_H
4eefa864bSJoonsoo Kim
548c96a36SJoonsoo Kim #include <linux/types.h>
6*9039b909SLuiz Capitulino #include <linux/mmzone.h>
748c96a36SJoonsoo Kim #include <linux/stacktrace.h>
848c96a36SJoonsoo Kim
9eefa864bSJoonsoo Kim struct pglist_data;
106189eb82SPasha Tatashin
1167311a36SKemeng Shi #ifdef CONFIG_PAGE_EXTENSION
126189eb82SPasha Tatashin /**
136189eb82SPasha Tatashin * struct page_ext_operations - per page_ext client operations
146189eb82SPasha Tatashin * @offset: Offset to the client's data within page_ext. Offset is returned to
156189eb82SPasha Tatashin * the client by page_ext_init.
166189eb82SPasha Tatashin * @size: The size of the client data within page_ext.
176189eb82SPasha Tatashin * @need: Function that returns true if client requires page_ext.
186189eb82SPasha Tatashin * @init: (optional) Called to initialize client once page_exts are allocated.
196189eb82SPasha Tatashin * @need_shared_flags: True when client is using shared page_ext->flags
206189eb82SPasha Tatashin * field.
216189eb82SPasha Tatashin *
226189eb82SPasha Tatashin * Each Page Extension client must define page_ext_operations in
236189eb82SPasha Tatashin * page_ext_ops array.
246189eb82SPasha Tatashin */
25eefa864bSJoonsoo Kim struct page_ext_operations {
26980ac167SJoonsoo Kim size_t offset;
27980ac167SJoonsoo Kim size_t size;
28eefa864bSJoonsoo Kim bool (*need)(void);
29eefa864bSJoonsoo Kim void (*init)(void);
306189eb82SPasha Tatashin bool need_shared_flags;
31eefa864bSJoonsoo Kim };
32eefa864bSJoonsoo Kim
336189eb82SPasha Tatashin /*
346189eb82SPasha Tatashin * The page_ext_flags users must set need_shared_flags to true.
356189eb82SPasha Tatashin */
36e30825f1SJoonsoo Kim enum page_ext_flags {
3748c96a36SJoonsoo Kim PAGE_EXT_OWNER,
38fdf3bf80SVlastimil Babka PAGE_EXT_OWNER_ALLOCATED,
391c676e0dSSeongJae Park #if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT)
4033c3fc71SVladimir Davydov PAGE_EXT_YOUNG,
4133c3fc71SVladimir Davydov PAGE_EXT_IDLE,
4233c3fc71SVladimir Davydov #endif
43e30825f1SJoonsoo Kim };
44e30825f1SJoonsoo Kim
45e30825f1SJoonsoo Kim /*
46eefa864bSJoonsoo Kim * Page Extension can be considered as an extended mem_map.
47eefa864bSJoonsoo Kim * A page_ext page is associated with every page descriptor. The
48eefa864bSJoonsoo Kim * page_ext helps us add more information about the page.
49eefa864bSJoonsoo Kim * All page_ext are allocated at boot or memory hotplug event,
50eefa864bSJoonsoo Kim * then the page_ext for pfn always exists.
51eefa864bSJoonsoo Kim */
52eefa864bSJoonsoo Kim struct page_ext {
53eefa864bSJoonsoo Kim unsigned long flags;
54eefa864bSJoonsoo Kim };
55eefa864bSJoonsoo Kim
56c4f20f14SLi Zhe extern bool early_page_ext;
575556cfe8SVlastimil Babka extern unsigned long page_ext_size;
58eefa864bSJoonsoo Kim extern void pgdat_page_ext_init(struct pglist_data *pgdat);
59eefa864bSJoonsoo Kim
early_page_ext_enabled(void)60c4f20f14SLi Zhe static inline bool early_page_ext_enabled(void)
61c4f20f14SLi Zhe {
62c4f20f14SLi Zhe return early_page_ext;
63c4f20f14SLi Zhe }
64c4f20f14SLi Zhe
65eefa864bSJoonsoo Kim #ifdef CONFIG_SPARSEMEM
page_ext_init_flatmem(void)66eefa864bSJoonsoo Kim static inline void page_ext_init_flatmem(void)
67eefa864bSJoonsoo Kim {
68eefa864bSJoonsoo Kim }
69eefa864bSJoonsoo Kim extern void page_ext_init(void);
page_ext_init_flatmem_late(void)707fb7ab6dSZhenhua Huang static inline void page_ext_init_flatmem_late(void)
717fb7ab6dSZhenhua Huang {
727fb7ab6dSZhenhua Huang }
73*9039b909SLuiz Capitulino
page_ext_iter_next_fast_possible(unsigned long next_pfn)74*9039b909SLuiz Capitulino static inline bool page_ext_iter_next_fast_possible(unsigned long next_pfn)
75*9039b909SLuiz Capitulino {
76*9039b909SLuiz Capitulino /*
77*9039b909SLuiz Capitulino * page_ext is allocated per memory section. Once we cross a
78*9039b909SLuiz Capitulino * memory section, we have to fetch the new pointer.
79*9039b909SLuiz Capitulino */
80*9039b909SLuiz Capitulino return next_pfn % PAGES_PER_SECTION;
81*9039b909SLuiz Capitulino }
82eefa864bSJoonsoo Kim #else
83eefa864bSJoonsoo Kim extern void page_ext_init_flatmem(void);
847fb7ab6dSZhenhua Huang extern void page_ext_init_flatmem_late(void);
page_ext_init(void)85eefa864bSJoonsoo Kim static inline void page_ext_init(void)
86eefa864bSJoonsoo Kim {
87eefa864bSJoonsoo Kim }
88*9039b909SLuiz Capitulino
page_ext_iter_next_fast_possible(unsigned long next_pfn)89*9039b909SLuiz Capitulino static inline bool page_ext_iter_next_fast_possible(unsigned long next_pfn)
90*9039b909SLuiz Capitulino {
91*9039b909SLuiz Capitulino return true;
92*9039b909SLuiz Capitulino }
93eefa864bSJoonsoo Kim #endif
94eefa864bSJoonsoo Kim
956e65aa55SMatthew Wilcox (Oracle) extern struct page_ext *page_ext_get(const struct page *page);
96b1d5488aSCharan Teja Kalla extern void page_ext_put(struct page_ext *page_ext);
97*9039b909SLuiz Capitulino extern struct page_ext *page_ext_lookup(unsigned long pfn);
98eefa864bSJoonsoo Kim
page_ext_data(struct page_ext * page_ext,struct page_ext_operations * ops)99c0a5d93aSKemeng Shi static inline void *page_ext_data(struct page_ext *page_ext,
100c0a5d93aSKemeng Shi struct page_ext_operations *ops)
101c0a5d93aSKemeng Shi {
102c0a5d93aSKemeng Shi return (void *)(page_ext) + ops->offset;
103c0a5d93aSKemeng Shi }
104c0a5d93aSKemeng Shi
page_ext_next(struct page_ext * curr)1055556cfe8SVlastimil Babka static inline struct page_ext *page_ext_next(struct page_ext *curr)
1065556cfe8SVlastimil Babka {
1075556cfe8SVlastimil Babka void *next = curr;
1085556cfe8SVlastimil Babka next += page_ext_size;
1095556cfe8SVlastimil Babka return next;
1105556cfe8SVlastimil Babka }
1115556cfe8SVlastimil Babka
112*9039b909SLuiz Capitulino struct page_ext_iter {
113*9039b909SLuiz Capitulino unsigned long index;
114*9039b909SLuiz Capitulino unsigned long start_pfn;
115*9039b909SLuiz Capitulino struct page_ext *page_ext;
116*9039b909SLuiz Capitulino };
117*9039b909SLuiz Capitulino
118*9039b909SLuiz Capitulino /**
119*9039b909SLuiz Capitulino * page_ext_iter_begin() - Prepare for iterating through page extensions.
120*9039b909SLuiz Capitulino * @iter: page extension iterator.
121*9039b909SLuiz Capitulino * @pfn: PFN of the page we're interested in.
122*9039b909SLuiz Capitulino *
123*9039b909SLuiz Capitulino * Must be called with RCU read lock taken.
124*9039b909SLuiz Capitulino *
125*9039b909SLuiz Capitulino * Return: NULL if no page_ext exists for this page.
126*9039b909SLuiz Capitulino */
page_ext_iter_begin(struct page_ext_iter * iter,unsigned long pfn)127*9039b909SLuiz Capitulino static inline struct page_ext *page_ext_iter_begin(struct page_ext_iter *iter,
128*9039b909SLuiz Capitulino unsigned long pfn)
129*9039b909SLuiz Capitulino {
130*9039b909SLuiz Capitulino iter->index = 0;
131*9039b909SLuiz Capitulino iter->start_pfn = pfn;
132*9039b909SLuiz Capitulino iter->page_ext = page_ext_lookup(pfn);
133*9039b909SLuiz Capitulino
134*9039b909SLuiz Capitulino return iter->page_ext;
135*9039b909SLuiz Capitulino }
136*9039b909SLuiz Capitulino
137*9039b909SLuiz Capitulino /**
138*9039b909SLuiz Capitulino * page_ext_iter_next() - Get next page extension
139*9039b909SLuiz Capitulino * @iter: page extension iterator.
140*9039b909SLuiz Capitulino *
141*9039b909SLuiz Capitulino * Must be called with RCU read lock taken.
142*9039b909SLuiz Capitulino *
143*9039b909SLuiz Capitulino * Return: NULL if no next page_ext exists.
144*9039b909SLuiz Capitulino */
page_ext_iter_next(struct page_ext_iter * iter)145*9039b909SLuiz Capitulino static inline struct page_ext *page_ext_iter_next(struct page_ext_iter *iter)
146*9039b909SLuiz Capitulino {
147*9039b909SLuiz Capitulino unsigned long pfn;
148*9039b909SLuiz Capitulino
149*9039b909SLuiz Capitulino if (WARN_ON_ONCE(!iter->page_ext))
150*9039b909SLuiz Capitulino return NULL;
151*9039b909SLuiz Capitulino
152*9039b909SLuiz Capitulino iter->index++;
153*9039b909SLuiz Capitulino pfn = iter->start_pfn + iter->index;
154*9039b909SLuiz Capitulino
155*9039b909SLuiz Capitulino if (page_ext_iter_next_fast_possible(pfn))
156*9039b909SLuiz Capitulino iter->page_ext = page_ext_next(iter->page_ext);
157*9039b909SLuiz Capitulino else
158*9039b909SLuiz Capitulino iter->page_ext = page_ext_lookup(pfn);
159*9039b909SLuiz Capitulino
160*9039b909SLuiz Capitulino return iter->page_ext;
161*9039b909SLuiz Capitulino }
162*9039b909SLuiz Capitulino
163*9039b909SLuiz Capitulino /**
164*9039b909SLuiz Capitulino * page_ext_iter_get() - Get current page extension
165*9039b909SLuiz Capitulino * @iter: page extension iterator.
166*9039b909SLuiz Capitulino *
167*9039b909SLuiz Capitulino * Return: NULL if no page_ext exists for this iterator.
168*9039b909SLuiz Capitulino */
page_ext_iter_get(const struct page_ext_iter * iter)169*9039b909SLuiz Capitulino static inline struct page_ext *page_ext_iter_get(const struct page_ext_iter *iter)
170*9039b909SLuiz Capitulino {
171*9039b909SLuiz Capitulino return iter->page_ext;
172*9039b909SLuiz Capitulino }
173*9039b909SLuiz Capitulino
174*9039b909SLuiz Capitulino /**
175*9039b909SLuiz Capitulino * for_each_page_ext(): iterate through page_ext objects.
176*9039b909SLuiz Capitulino * @__page: the page we're interested in
177*9039b909SLuiz Capitulino * @__pgcount: how many pages to iterate through
178*9039b909SLuiz Capitulino * @__page_ext: struct page_ext pointer where the current page_ext
179*9039b909SLuiz Capitulino * object is returned
180*9039b909SLuiz Capitulino * @__iter: struct page_ext_iter object (defined in the stack)
181*9039b909SLuiz Capitulino *
182*9039b909SLuiz Capitulino * IMPORTANT: must be called with RCU read lock taken.
183*9039b909SLuiz Capitulino */
184*9039b909SLuiz Capitulino #define for_each_page_ext(__page, __pgcount, __page_ext, __iter) \
185*9039b909SLuiz Capitulino for (__page_ext = page_ext_iter_begin(&__iter, page_to_pfn(__page));\
186*9039b909SLuiz Capitulino __page_ext && __iter.index < __pgcount; \
187*9039b909SLuiz Capitulino __page_ext = page_ext_iter_next(&__iter))
188*9039b909SLuiz Capitulino
189eefa864bSJoonsoo Kim #else /* !CONFIG_PAGE_EXTENSION */
190eefa864bSJoonsoo Kim struct page_ext;
191eefa864bSJoonsoo Kim
early_page_ext_enabled(void)192c4f20f14SLi Zhe static inline bool early_page_ext_enabled(void)
193c4f20f14SLi Zhe {
194c4f20f14SLi Zhe return false;
195c4f20f14SLi Zhe }
196c4f20f14SLi Zhe
pgdat_page_ext_init(struct pglist_data * pgdat)197eefa864bSJoonsoo Kim static inline void pgdat_page_ext_init(struct pglist_data *pgdat)
198eefa864bSJoonsoo Kim {
199eefa864bSJoonsoo Kim }
200eefa864bSJoonsoo Kim
page_ext_init(void)201eefa864bSJoonsoo Kim static inline void page_ext_init(void)
202eefa864bSJoonsoo Kim {
203eefa864bSJoonsoo Kim }
204eefa864bSJoonsoo Kim
page_ext_init_flatmem_late(void)2057fb7ab6dSZhenhua Huang static inline void page_ext_init_flatmem_late(void)
2067fb7ab6dSZhenhua Huang {
2077fb7ab6dSZhenhua Huang }
2087fb7ab6dSZhenhua Huang
page_ext_init_flatmem(void)209eefa864bSJoonsoo Kim static inline void page_ext_init_flatmem(void)
210eefa864bSJoonsoo Kim {
211eefa864bSJoonsoo Kim }
212b1d5488aSCharan Teja Kalla
page_ext_get(const struct page * page)2136e65aa55SMatthew Wilcox (Oracle) static inline struct page_ext *page_ext_get(const struct page *page)
214b1d5488aSCharan Teja Kalla {
215b1d5488aSCharan Teja Kalla return NULL;
216b1d5488aSCharan Teja Kalla }
217b1d5488aSCharan Teja Kalla
page_ext_put(struct page_ext * page_ext)218b1d5488aSCharan Teja Kalla static inline void page_ext_put(struct page_ext *page_ext)
219b1d5488aSCharan Teja Kalla {
220b1d5488aSCharan Teja Kalla }
221eefa864bSJoonsoo Kim #endif /* CONFIG_PAGE_EXTENSION */
222eefa864bSJoonsoo Kim #endif /* __LINUX_PAGE_EXT_H */
223