xref: /linux-6.15/include/linux/memory_hotplug.h (revision 257eeded)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_MEMORY_HOTPLUG_H
3 #define __LINUX_MEMORY_HOTPLUG_H
4 
5 #include <linux/mmzone.h>
6 #include <linux/spinlock.h>
7 #include <linux/notifier.h>
8 #include <linux/bug.h>
9 
10 struct page;
11 struct zone;
12 struct pglist_data;
13 struct mem_section;
14 struct memory_block;
15 struct resource;
16 struct vmem_altmap;
17 
18 #ifdef CONFIG_MEMORY_HOTPLUG
19 /*
20  * Return page for the valid pfn only if the page is online. All pfn
21  * walkers which rely on the fully initialized page->flags and others
22  * should use this rather than pfn_valid && pfn_to_page
23  */
24 #define pfn_to_online_page(pfn)					   \
25 ({								   \
26 	struct page *___page = NULL;				   \
27 	unsigned long ___pfn = pfn;				   \
28 	unsigned long ___nr = pfn_to_section_nr(___pfn);	   \
29 								   \
30 	if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr) && \
31 	    pfn_valid_within(___pfn))				   \
32 		___page = pfn_to_page(___pfn);			   \
33 	___page;						   \
34 })
35 
36 /*
37  * Types for free bootmem stored in page->lru.next. These have to be in
38  * some random range in unsigned long space for debugging purposes.
39  */
40 enum {
41 	MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12,
42 	SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE,
43 	MIX_SECTION_INFO,
44 	NODE_INFO,
45 	MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO,
46 };
47 
48 /* Types for control the zone type of onlined and offlined memory */
49 enum {
50 	MMOP_OFFLINE = -1,
51 	MMOP_ONLINE_KEEP,
52 	MMOP_ONLINE_KERNEL,
53 	MMOP_ONLINE_MOVABLE,
54 };
55 
56 /*
57  * Zone resizing functions
58  *
59  * Note: any attempt to resize a zone should has pgdat_resize_lock()
60  * zone_span_writelock() both held. This ensure the size of a zone
61  * can't be changed while pgdat_resize_lock() held.
62  */
63 static inline unsigned zone_span_seqbegin(struct zone *zone)
64 {
65 	return read_seqbegin(&zone->span_seqlock);
66 }
67 static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
68 {
69 	return read_seqretry(&zone->span_seqlock, iv);
70 }
71 static inline void zone_span_writelock(struct zone *zone)
72 {
73 	write_seqlock(&zone->span_seqlock);
74 }
75 static inline void zone_span_writeunlock(struct zone *zone)
76 {
77 	write_sequnlock(&zone->span_seqlock);
78 }
79 static inline void zone_seqlock_init(struct zone *zone)
80 {
81 	seqlock_init(&zone->span_seqlock);
82 }
83 extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages);
84 extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
85 extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
86 /* VM interface that may be used by firmware interface */
87 extern int online_pages(unsigned long, unsigned long, int);
88 extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
89 	unsigned long *valid_start, unsigned long *valid_end);
90 extern void __offline_isolated_pages(unsigned long, unsigned long);
91 
92 typedef void (*online_page_callback_t)(struct page *page);
93 
94 extern int set_online_page_callback(online_page_callback_t callback);
95 extern int restore_online_page_callback(online_page_callback_t callback);
96 
97 extern void __online_page_set_limits(struct page *page);
98 extern void __online_page_increment_counters(struct page *page);
99 extern void __online_page_free(struct page *page);
100 
101 extern int try_online_node(int nid);
102 
103 extern bool memhp_auto_online;
104 /* If movable_node boot option specified */
105 extern bool movable_node_enabled;
106 static inline bool movable_node_is_enabled(void)
107 {
108 	return movable_node_enabled;
109 }
110 
111 #ifdef CONFIG_MEMORY_HOTREMOVE
112 extern int arch_remove_memory(int nid, u64 start, u64 size,
113 				struct vmem_altmap *altmap);
114 extern int __remove_pages(struct zone *zone, unsigned long start_pfn,
115 	unsigned long nr_pages, struct vmem_altmap *altmap);
116 #endif /* CONFIG_MEMORY_HOTREMOVE */
117 
118 /* reasonably generic interface to expand the physical pages */
119 extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
120 		struct vmem_altmap *altmap, bool want_memblock);
121 
122 #ifndef CONFIG_ARCH_HAS_ADD_PAGES
123 static inline int add_pages(int nid, unsigned long start_pfn,
124 		unsigned long nr_pages, struct vmem_altmap *altmap,
125 		bool want_memblock)
126 {
127 	return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
128 }
129 #else /* ARCH_HAS_ADD_PAGES */
130 int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
131 		struct vmem_altmap *altmap, bool want_memblock);
132 #endif /* ARCH_HAS_ADD_PAGES */
133 
134 #ifdef CONFIG_NUMA
135 extern int memory_add_physaddr_to_nid(u64 start);
136 #else
137 static inline int memory_add_physaddr_to_nid(u64 start)
138 {
139 	return 0;
140 }
141 #endif
142 
143 #ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
144 /*
145  * For supporting node-hotadd, we have to allocate a new pgdat.
146  *
147  * If an arch has generic style NODE_DATA(),
148  * node_data[nid] = kzalloc() works well. But it depends on the architecture.
149  *
150  * In general, generic_alloc_nodedata() is used.
151  * Now, arch_free_nodedata() is just defined for error path of node_hot_add.
152  *
153  */
154 extern pg_data_t *arch_alloc_nodedata(int nid);
155 extern void arch_free_nodedata(pg_data_t *pgdat);
156 extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
157 
158 #else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
159 
160 #define arch_alloc_nodedata(nid)	generic_alloc_nodedata(nid)
161 #define arch_free_nodedata(pgdat)	generic_free_nodedata(pgdat)
162 
163 #ifdef CONFIG_NUMA
164 /*
165  * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat.
166  * XXX: kmalloc_node() can't work well to get new node's memory at this time.
167  *	Because, pgdat for the new node is not allocated/initialized yet itself.
168  *	To use new node's memory, more consideration will be necessary.
169  */
170 #define generic_alloc_nodedata(nid)				\
171 ({								\
172 	kzalloc(sizeof(pg_data_t), GFP_KERNEL);			\
173 })
174 /*
175  * This definition is just for error path in node hotadd.
176  * For node hotremove, we have to replace this.
177  */
178 #define generic_free_nodedata(pgdat)	kfree(pgdat)
179 
180 extern pg_data_t *node_data[];
181 static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
182 {
183 	node_data[nid] = pgdat;
184 }
185 
186 #else /* !CONFIG_NUMA */
187 
188 /* never called */
189 static inline pg_data_t *generic_alloc_nodedata(int nid)
190 {
191 	BUG();
192 	return NULL;
193 }
194 static inline void generic_free_nodedata(pg_data_t *pgdat)
195 {
196 }
197 static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
198 {
199 }
200 #endif /* CONFIG_NUMA */
201 #endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
202 
203 #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
204 extern void __init register_page_bootmem_info_node(struct pglist_data *pgdat);
205 #else
206 static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
207 {
208 }
209 #endif
210 extern void put_page_bootmem(struct page *page);
211 extern void get_page_bootmem(unsigned long ingo, struct page *page,
212 			     unsigned long type);
213 
214 void get_online_mems(void);
215 void put_online_mems(void);
216 
217 void mem_hotplug_begin(void);
218 void mem_hotplug_done(void);
219 
220 extern void set_zone_contiguous(struct zone *zone);
221 extern void clear_zone_contiguous(struct zone *zone);
222 
223 #else /* ! CONFIG_MEMORY_HOTPLUG */
224 #define pfn_to_online_page(pfn)			\
225 ({						\
226 	struct page *___page = NULL;		\
227 	if (pfn_valid(pfn))			\
228 		___page = pfn_to_page(pfn);	\
229 	___page;				\
230  })
231 
232 static inline unsigned zone_span_seqbegin(struct zone *zone)
233 {
234 	return 0;
235 }
236 static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
237 {
238 	return 0;
239 }
240 static inline void zone_span_writelock(struct zone *zone) {}
241 static inline void zone_span_writeunlock(struct zone *zone) {}
242 static inline void zone_seqlock_init(struct zone *zone) {}
243 
244 static inline int mhp_notimplemented(const char *func)
245 {
246 	printk(KERN_WARNING "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func);
247 	dump_stack();
248 	return -ENOSYS;
249 }
250 
251 static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
252 {
253 }
254 
255 static inline int try_online_node(int nid)
256 {
257 	return 0;
258 }
259 
260 static inline void get_online_mems(void) {}
261 static inline void put_online_mems(void) {}
262 
263 static inline void mem_hotplug_begin(void) {}
264 static inline void mem_hotplug_done(void) {}
265 
266 static inline bool movable_node_is_enabled(void)
267 {
268 	return false;
269 }
270 #endif /* ! CONFIG_MEMORY_HOTPLUG */
271 
272 #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
273 /*
274  * pgdat resizing functions
275  */
276 static inline
277 void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
278 {
279 	spin_lock_irqsave(&pgdat->node_size_lock, *flags);
280 }
281 static inline
282 void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
283 {
284 	spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
285 }
286 static inline
287 void pgdat_resize_init(struct pglist_data *pgdat)
288 {
289 	spin_lock_init(&pgdat->node_size_lock);
290 }
291 #else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
292 /*
293  * Stub functions for when hotplug is off
294  */
295 static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
296 static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
297 static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
298 #endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
299 
300 #ifdef CONFIG_MEMORY_HOTREMOVE
301 
302 extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);
303 extern void try_offline_node(int nid);
304 extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
305 extern void remove_memory(int nid, u64 start, u64 size);
306 extern void __remove_memory(int nid, u64 start, u64 size);
307 
308 #else
309 static inline bool is_mem_section_removable(unsigned long pfn,
310 					unsigned long nr_pages)
311 {
312 	return false;
313 }
314 
315 static inline void try_offline_node(int nid) {}
316 
317 static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
318 {
319 	return -EINVAL;
320 }
321 
322 static inline void remove_memory(int nid, u64 start, u64 size) {}
323 static inline void __remove_memory(int nid, u64 start, u64 size) {}
324 #endif /* CONFIG_MEMORY_HOTREMOVE */
325 
326 extern void __ref free_area_init_core_hotplug(int nid);
327 extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
328 		void *arg, int (*func)(struct memory_block *, void *));
329 extern int __add_memory(int nid, u64 start, u64 size);
330 extern int add_memory(int nid, u64 start, u64 size);
331 extern int add_memory_resource(int nid, struct resource *resource);
332 extern int arch_add_memory(int nid, u64 start, u64 size,
333 		struct vmem_altmap *altmap, bool want_memblock);
334 extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
335 		unsigned long nr_pages, struct vmem_altmap *altmap);
336 extern bool is_memblock_offlined(struct memory_block *mem);
337 extern int sparse_add_one_section(int nid, unsigned long start_pfn,
338 				  struct vmem_altmap *altmap);
339 extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
340 		unsigned long map_offset, struct vmem_altmap *altmap);
341 extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
342 					  unsigned long pnum);
343 extern bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages,
344 		int online_type);
345 extern struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
346 		unsigned long nr_pages);
347 #endif /* __LINUX_MEMORY_HOTPLUG_H */
348