xref: /linux-6.15/include/linux/memory_hotplug.h (revision 76426e23)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_MEMORY_HOTPLUG_H
3 #define __LINUX_MEMORY_HOTPLUG_H
4 
5 #include <linux/mmzone.h>
6 #include <linux/spinlock.h>
7 #include <linux/notifier.h>
8 #include <linux/bug.h>
9 
10 struct page;
11 struct zone;
12 struct pglist_data;
13 struct mem_section;
14 struct memory_block;
15 struct resource;
16 struct vmem_altmap;
17 
18 #ifdef CONFIG_MEMORY_HOTPLUG
19 /*
20  * Return page for the valid pfn only if the page is online. All pfn
21  * walkers which rely on the fully initialized page->flags and others
22  * should use this rather than pfn_valid && pfn_to_page
23  */
24 #define pfn_to_online_page(pfn)					   \
25 ({								   \
26 	struct page *___page = NULL;				   \
27 	unsigned long ___pfn = pfn;				   \
28 	unsigned long ___nr = pfn_to_section_nr(___pfn);	   \
29 								   \
30 	if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr) && \
31 	    pfn_valid_within(___pfn))				   \
32 		___page = pfn_to_page(___pfn);			   \
33 	___page;						   \
34 })
35 
36 /*
37  * Types for free bootmem stored in page->lru.next. These have to be in
38  * some random range in unsigned long space for debugging purposes.
39  */
40 enum {
41 	MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12,
42 	SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE,
43 	MIX_SECTION_INFO,
44 	NODE_INFO,
45 	MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO,
46 };
47 
48 /* Types for control the zone type of onlined and offlined memory */
49 enum {
50 	/* Offline the memory. */
51 	MMOP_OFFLINE = 0,
52 	/* Online the memory. Zone depends, see default_zone_for_pfn(). */
53 	MMOP_ONLINE,
54 	/* Online the memory to ZONE_NORMAL. */
55 	MMOP_ONLINE_KERNEL,
56 	/* Online the memory to ZONE_MOVABLE. */
57 	MMOP_ONLINE_MOVABLE,
58 };
59 
60 /*
61  * Restrictions for the memory hotplug:
62  * flags:  MHP_ flags
63  * altmap: alternative allocator for memmap array
64  */
65 struct mhp_restrictions {
66 	unsigned long flags;
67 	struct vmem_altmap *altmap;
68 };
69 
70 /*
71  * Zone resizing functions
72  *
73  * Note: any attempt to resize a zone should has pgdat_resize_lock()
74  * zone_span_writelock() both held. This ensure the size of a zone
75  * can't be changed while pgdat_resize_lock() held.
76  */
77 static inline unsigned zone_span_seqbegin(struct zone *zone)
78 {
79 	return read_seqbegin(&zone->span_seqlock);
80 }
81 static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
82 {
83 	return read_seqretry(&zone->span_seqlock, iv);
84 }
85 static inline void zone_span_writelock(struct zone *zone)
86 {
87 	write_seqlock(&zone->span_seqlock);
88 }
89 static inline void zone_span_writeunlock(struct zone *zone)
90 {
91 	write_sequnlock(&zone->span_seqlock);
92 }
93 static inline void zone_seqlock_init(struct zone *zone)
94 {
95 	seqlock_init(&zone->span_seqlock);
96 }
97 extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages);
98 extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
99 extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
100 /* VM interface that may be used by firmware interface */
101 extern int online_pages(unsigned long pfn, unsigned long nr_pages,
102 			int online_type, int nid);
103 extern struct zone *test_pages_in_a_zone(unsigned long start_pfn,
104 					 unsigned long end_pfn);
105 extern unsigned long __offline_isolated_pages(unsigned long start_pfn,
106 						unsigned long end_pfn);
107 
108 typedef void (*online_page_callback_t)(struct page *page, unsigned int order);
109 
110 extern void generic_online_page(struct page *page, unsigned int order);
111 extern int set_online_page_callback(online_page_callback_t callback);
112 extern int restore_online_page_callback(online_page_callback_t callback);
113 
114 extern int try_online_node(int nid);
115 
116 extern int arch_add_memory(int nid, u64 start, u64 size,
117 			struct mhp_restrictions *restrictions);
118 extern u64 max_mem_size;
119 
120 extern int memhp_online_type_from_str(const char *str);
121 
122 /* Default online_type (MMOP_*) when new memory blocks are added. */
123 extern int memhp_default_online_type;
124 /* If movable_node boot option specified */
125 extern bool movable_node_enabled;
126 static inline bool movable_node_is_enabled(void)
127 {
128 	return movable_node_enabled;
129 }
130 
131 extern void arch_remove_memory(int nid, u64 start, u64 size,
132 			       struct vmem_altmap *altmap);
133 extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages,
134 			   struct vmem_altmap *altmap);
135 
136 /* reasonably generic interface to expand the physical pages */
137 extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
138 		       struct mhp_restrictions *restrictions);
139 
140 #ifndef CONFIG_ARCH_HAS_ADD_PAGES
141 static inline int add_pages(int nid, unsigned long start_pfn,
142 		unsigned long nr_pages, struct mhp_restrictions *restrictions)
143 {
144 	return __add_pages(nid, start_pfn, nr_pages, restrictions);
145 }
146 #else /* ARCH_HAS_ADD_PAGES */
147 int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
148 	      struct mhp_restrictions *restrictions);
149 #endif /* ARCH_HAS_ADD_PAGES */
150 
151 #ifdef CONFIG_NUMA
152 extern int memory_add_physaddr_to_nid(u64 start);
153 #else
154 static inline int memory_add_physaddr_to_nid(u64 start)
155 {
156 	return 0;
157 }
158 #endif
159 
160 #ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
161 /*
162  * For supporting node-hotadd, we have to allocate a new pgdat.
163  *
164  * If an arch has generic style NODE_DATA(),
165  * node_data[nid] = kzalloc() works well. But it depends on the architecture.
166  *
167  * In general, generic_alloc_nodedata() is used.
168  * Now, arch_free_nodedata() is just defined for error path of node_hot_add.
169  *
170  */
171 extern pg_data_t *arch_alloc_nodedata(int nid);
172 extern void arch_free_nodedata(pg_data_t *pgdat);
173 extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
174 
175 #else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
176 
177 #define arch_alloc_nodedata(nid)	generic_alloc_nodedata(nid)
178 #define arch_free_nodedata(pgdat)	generic_free_nodedata(pgdat)
179 
180 #ifdef CONFIG_NUMA
181 /*
182  * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat.
183  * XXX: kmalloc_node() can't work well to get new node's memory at this time.
184  *	Because, pgdat for the new node is not allocated/initialized yet itself.
185  *	To use new node's memory, more consideration will be necessary.
186  */
187 #define generic_alloc_nodedata(nid)				\
188 ({								\
189 	kzalloc(sizeof(pg_data_t), GFP_KERNEL);			\
190 })
191 /*
192  * This definition is just for error path in node hotadd.
193  * For node hotremove, we have to replace this.
194  */
195 #define generic_free_nodedata(pgdat)	kfree(pgdat)
196 
197 extern pg_data_t *node_data[];
198 static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
199 {
200 	node_data[nid] = pgdat;
201 }
202 
203 #else /* !CONFIG_NUMA */
204 
205 /* never called */
206 static inline pg_data_t *generic_alloc_nodedata(int nid)
207 {
208 	BUG();
209 	return NULL;
210 }
211 static inline void generic_free_nodedata(pg_data_t *pgdat)
212 {
213 }
214 static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
215 {
216 }
217 #endif /* CONFIG_NUMA */
218 #endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
219 
220 #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
221 extern void __init register_page_bootmem_info_node(struct pglist_data *pgdat);
222 #else
223 static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
224 {
225 }
226 #endif
227 extern void put_page_bootmem(struct page *page);
228 extern void get_page_bootmem(unsigned long ingo, struct page *page,
229 			     unsigned long type);
230 
231 void get_online_mems(void);
232 void put_online_mems(void);
233 
234 void mem_hotplug_begin(void);
235 void mem_hotplug_done(void);
236 
237 #else /* ! CONFIG_MEMORY_HOTPLUG */
238 #define pfn_to_online_page(pfn)			\
239 ({						\
240 	struct page *___page = NULL;		\
241 	if (pfn_valid(pfn))			\
242 		___page = pfn_to_page(pfn);	\
243 	___page;				\
244  })
245 
246 static inline unsigned zone_span_seqbegin(struct zone *zone)
247 {
248 	return 0;
249 }
250 static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
251 {
252 	return 0;
253 }
254 static inline void zone_span_writelock(struct zone *zone) {}
255 static inline void zone_span_writeunlock(struct zone *zone) {}
256 static inline void zone_seqlock_init(struct zone *zone) {}
257 
258 static inline int mhp_notimplemented(const char *func)
259 {
260 	printk(KERN_WARNING "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func);
261 	dump_stack();
262 	return -ENOSYS;
263 }
264 
265 static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
266 {
267 }
268 
269 static inline int try_online_node(int nid)
270 {
271 	return 0;
272 }
273 
274 static inline void get_online_mems(void) {}
275 static inline void put_online_mems(void) {}
276 
277 static inline void mem_hotplug_begin(void) {}
278 static inline void mem_hotplug_done(void) {}
279 
280 static inline bool movable_node_is_enabled(void)
281 {
282 	return false;
283 }
284 #endif /* ! CONFIG_MEMORY_HOTPLUG */
285 
286 #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
287 /*
288  * pgdat resizing functions
289  */
290 static inline
291 void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
292 {
293 	spin_lock_irqsave(&pgdat->node_size_lock, *flags);
294 }
295 static inline
296 void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
297 {
298 	spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
299 }
300 static inline
301 void pgdat_resize_init(struct pglist_data *pgdat)
302 {
303 	spin_lock_init(&pgdat->node_size_lock);
304 }
305 #else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
306 /*
307  * Stub functions for when hotplug is off
308  */
309 static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
310 static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
311 static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
312 #endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
313 
314 #ifdef CONFIG_MEMORY_HOTREMOVE
315 
316 extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);
317 extern void try_offline_node(int nid);
318 extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
319 extern int remove_memory(int nid, u64 start, u64 size);
320 extern void __remove_memory(int nid, u64 start, u64 size);
321 
322 #else
323 static inline bool is_mem_section_removable(unsigned long pfn,
324 					unsigned long nr_pages)
325 {
326 	return false;
327 }
328 
329 static inline void try_offline_node(int nid) {}
330 
331 static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
332 {
333 	return -EINVAL;
334 }
335 
336 static inline int remove_memory(int nid, u64 start, u64 size)
337 {
338 	return -EBUSY;
339 }
340 
341 static inline void __remove_memory(int nid, u64 start, u64 size) {}
342 #endif /* CONFIG_MEMORY_HOTREMOVE */
343 
344 extern void set_zone_contiguous(struct zone *zone);
345 extern void clear_zone_contiguous(struct zone *zone);
346 
347 extern void __ref free_area_init_core_hotplug(int nid);
348 extern int __add_memory(int nid, u64 start, u64 size);
349 extern int add_memory(int nid, u64 start, u64 size);
350 extern int add_memory_resource(int nid, struct resource *resource);
351 extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
352 		unsigned long nr_pages, struct vmem_altmap *altmap);
353 extern void remove_pfn_range_from_zone(struct zone *zone,
354 				       unsigned long start_pfn,
355 				       unsigned long nr_pages);
356 extern bool is_memblock_offlined(struct memory_block *mem);
357 extern int sparse_add_section(int nid, unsigned long pfn,
358 		unsigned long nr_pages, struct vmem_altmap *altmap);
359 extern void sparse_remove_section(struct mem_section *ms,
360 		unsigned long pfn, unsigned long nr_pages,
361 		unsigned long map_offset, struct vmem_altmap *altmap);
362 extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
363 					  unsigned long pnum);
364 extern bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages,
365 		int online_type);
366 extern struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
367 		unsigned long nr_pages);
368 #endif /* __LINUX_MEMORY_HOTPLUG_H */
369