xref: /linux-6.15/include/linux/memory_hotplug.h (revision 09f49dca)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_MEMORY_HOTPLUG_H
3 #define __LINUX_MEMORY_HOTPLUG_H
4 
5 #include <linux/mmzone.h>
6 #include <linux/spinlock.h>
7 #include <linux/notifier.h>
8 #include <linux/bug.h>
9 
10 struct page;
11 struct zone;
12 struct pglist_data;
13 struct mem_section;
14 struct memory_block;
15 struct memory_group;
16 struct resource;
17 struct vmem_altmap;
18 
19 #ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
20 /*
21  * For supporting node-hotadd, we have to allocate a new pgdat.
22  *
23  * If an arch has generic style NODE_DATA(),
24  * node_data[nid] = kzalloc() works well. But it depends on the architecture.
25  *
26  * In general, generic_alloc_nodedata() is used.
27  * Now, arch_free_nodedata() is just defined for error path of node_hot_add.
28  *
29  */
30 extern pg_data_t *arch_alloc_nodedata(int nid);
31 extern void arch_free_nodedata(pg_data_t *pgdat);
32 extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
33 
34 #else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
35 
36 #define arch_alloc_nodedata(nid)	generic_alloc_nodedata(nid)
37 #define arch_free_nodedata(pgdat)	generic_free_nodedata(pgdat)
38 
39 #ifdef CONFIG_NUMA
40 /*
41  * XXX: node aware allocation can't work well to get new node's memory at this time.
42  *	Because, pgdat for the new node is not allocated/initialized yet itself.
43  *	To use new node's memory, more consideration will be necessary.
44  */
45 #define generic_alloc_nodedata(nid)				\
46 ({								\
47 	memblock_alloc(sizeof(*pgdat), SMP_CACHE_BYTES);	\
48 })
49 /*
50  * This definition is just for error path in node hotadd.
51  * For node hotremove, we have to replace this.
52  */
53 #define generic_free_nodedata(pgdat)	kfree(pgdat)
54 
55 extern pg_data_t *node_data[];
56 static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
57 {
58 	node_data[nid] = pgdat;
59 }
60 
61 #else /* !CONFIG_NUMA */
62 
63 /* never called */
64 static inline pg_data_t *generic_alloc_nodedata(int nid)
65 {
66 	BUG();
67 	return NULL;
68 }
69 static inline void generic_free_nodedata(pg_data_t *pgdat)
70 {
71 }
72 static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
73 {
74 }
75 #endif /* CONFIG_NUMA */
76 #endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
77 
78 #ifdef CONFIG_MEMORY_HOTPLUG
79 struct page *pfn_to_online_page(unsigned long pfn);
80 
81 /* Types for control the zone type of onlined and offlined memory */
82 enum {
83 	/* Offline the memory. */
84 	MMOP_OFFLINE = 0,
85 	/* Online the memory. Zone depends, see default_zone_for_pfn(). */
86 	MMOP_ONLINE,
87 	/* Online the memory to ZONE_NORMAL. */
88 	MMOP_ONLINE_KERNEL,
89 	/* Online the memory to ZONE_MOVABLE. */
90 	MMOP_ONLINE_MOVABLE,
91 };
92 
93 /* Flags for add_memory() and friends to specify memory hotplug details. */
94 typedef int __bitwise mhp_t;
95 
96 /* No special request */
97 #define MHP_NONE		((__force mhp_t)0)
98 /*
99  * Allow merging of the added System RAM resource with adjacent,
100  * mergeable resources. After a successful call to add_memory_resource()
101  * with this flag set, the resource pointer must no longer be used as it
102  * might be stale, or the resource might have changed.
103  */
104 #define MHP_MERGE_RESOURCE	((__force mhp_t)BIT(0))
105 
106 /*
107  * We want memmap (struct page array) to be self contained.
108  * To do so, we will use the beginning of the hot-added range to build
109  * the page tables for the memmap array that describes the entire range.
110  * Only selected architectures support it with SPARSE_VMEMMAP.
111  */
112 #define MHP_MEMMAP_ON_MEMORY   ((__force mhp_t)BIT(1))
113 /*
114  * The nid field specifies a memory group id (mgid) instead. The memory group
115  * implies the node id (nid).
116  */
117 #define MHP_NID_IS_MGID		((__force mhp_t)BIT(2))
118 
119 /*
120  * Extended parameters for memory hotplug:
121  * altmap: alternative allocator for memmap array (optional)
122  * pgprot: page protection flags to apply to newly created page tables
123  *	(required)
124  */
125 struct mhp_params {
126 	struct vmem_altmap *altmap;
127 	pgprot_t pgprot;
128 };
129 
130 bool mhp_range_allowed(u64 start, u64 size, bool need_mapping);
131 struct range mhp_get_pluggable_range(bool need_mapping);
132 
133 /*
134  * Zone resizing functions
135  *
136  * Note: any attempt to resize a zone should has pgdat_resize_lock()
137  * zone_span_writelock() both held. This ensure the size of a zone
138  * can't be changed while pgdat_resize_lock() held.
139  */
140 static inline unsigned zone_span_seqbegin(struct zone *zone)
141 {
142 	return read_seqbegin(&zone->span_seqlock);
143 }
144 static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
145 {
146 	return read_seqretry(&zone->span_seqlock, iv);
147 }
148 static inline void zone_span_writelock(struct zone *zone)
149 {
150 	write_seqlock(&zone->span_seqlock);
151 }
152 static inline void zone_span_writeunlock(struct zone *zone)
153 {
154 	write_sequnlock(&zone->span_seqlock);
155 }
156 static inline void zone_seqlock_init(struct zone *zone)
157 {
158 	seqlock_init(&zone->span_seqlock);
159 }
160 extern void adjust_present_page_count(struct page *page,
161 				      struct memory_group *group,
162 				      long nr_pages);
163 /* VM interface that may be used by firmware interface */
164 extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages,
165 				     struct zone *zone);
166 extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages);
167 extern int online_pages(unsigned long pfn, unsigned long nr_pages,
168 			struct zone *zone, struct memory_group *group);
169 extern struct zone *test_pages_in_a_zone(unsigned long start_pfn,
170 					 unsigned long end_pfn);
171 extern void __offline_isolated_pages(unsigned long start_pfn,
172 				     unsigned long end_pfn);
173 
174 typedef void (*online_page_callback_t)(struct page *page, unsigned int order);
175 
176 extern void generic_online_page(struct page *page, unsigned int order);
177 extern int set_online_page_callback(online_page_callback_t callback);
178 extern int restore_online_page_callback(online_page_callback_t callback);
179 
180 extern int try_online_node(int nid);
181 
182 extern int arch_add_memory(int nid, u64 start, u64 size,
183 			   struct mhp_params *params);
184 extern u64 max_mem_size;
185 
186 extern int mhp_online_type_from_str(const char *str);
187 
188 /* Default online_type (MMOP_*) when new memory blocks are added. */
189 extern int mhp_default_online_type;
190 /* If movable_node boot option specified */
191 extern bool movable_node_enabled;
192 static inline bool movable_node_is_enabled(void)
193 {
194 	return movable_node_enabled;
195 }
196 
197 extern void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap);
198 extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages,
199 			   struct vmem_altmap *altmap);
200 
201 /* reasonably generic interface to expand the physical pages */
202 extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
203 		       struct mhp_params *params);
204 
205 #ifndef CONFIG_ARCH_HAS_ADD_PAGES
206 static inline int add_pages(int nid, unsigned long start_pfn,
207 		unsigned long nr_pages, struct mhp_params *params)
208 {
209 	return __add_pages(nid, start_pfn, nr_pages, params);
210 }
211 #else /* ARCH_HAS_ADD_PAGES */
212 int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
213 	      struct mhp_params *params);
214 #endif /* ARCH_HAS_ADD_PAGES */
215 
216 void get_online_mems(void);
217 void put_online_mems(void);
218 
219 void mem_hotplug_begin(void);
220 void mem_hotplug_done(void);
221 
222 #else /* ! CONFIG_MEMORY_HOTPLUG */
223 #define pfn_to_online_page(pfn)			\
224 ({						\
225 	struct page *___page = NULL;		\
226 	if (pfn_valid(pfn))			\
227 		___page = pfn_to_page(pfn);	\
228 	___page;				\
229  })
230 
231 static inline unsigned zone_span_seqbegin(struct zone *zone)
232 {
233 	return 0;
234 }
235 static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
236 {
237 	return 0;
238 }
239 static inline void zone_span_writelock(struct zone *zone) {}
240 static inline void zone_span_writeunlock(struct zone *zone) {}
241 static inline void zone_seqlock_init(struct zone *zone) {}
242 
243 static inline int try_online_node(int nid)
244 {
245 	return 0;
246 }
247 
248 static inline void get_online_mems(void) {}
249 static inline void put_online_mems(void) {}
250 
251 static inline void mem_hotplug_begin(void) {}
252 static inline void mem_hotplug_done(void) {}
253 
254 static inline bool movable_node_is_enabled(void)
255 {
256 	return false;
257 }
258 #endif /* ! CONFIG_MEMORY_HOTPLUG */
259 
260 /*
261  * Keep this declaration outside CONFIG_MEMORY_HOTPLUG as some
262  * platforms might override and use arch_get_mappable_range()
263  * for internal non memory hotplug purposes.
264  */
265 struct range arch_get_mappable_range(void);
266 
267 #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
268 /*
269  * pgdat resizing functions
270  */
271 static inline
272 void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
273 {
274 	spin_lock_irqsave(&pgdat->node_size_lock, *flags);
275 }
276 static inline
277 void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
278 {
279 	spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
280 }
281 static inline
282 void pgdat_resize_init(struct pglist_data *pgdat)
283 {
284 	spin_lock_init(&pgdat->node_size_lock);
285 }
286 #else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
287 /*
288  * Stub functions for when hotplug is off
289  */
290 static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
291 static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
292 static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
293 #endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
294 
295 #ifdef CONFIG_MEMORY_HOTREMOVE
296 
297 extern void try_offline_node(int nid);
298 extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
299 			 struct memory_group *group);
300 extern int remove_memory(u64 start, u64 size);
301 extern void __remove_memory(u64 start, u64 size);
302 extern int offline_and_remove_memory(u64 start, u64 size);
303 
304 #else
305 static inline void try_offline_node(int nid) {}
306 
307 static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
308 				struct memory_group *group)
309 {
310 	return -EINVAL;
311 }
312 
313 static inline int remove_memory(u64 start, u64 size)
314 {
315 	return -EBUSY;
316 }
317 
318 static inline void __remove_memory(u64 start, u64 size) {}
319 #endif /* CONFIG_MEMORY_HOTREMOVE */
320 
321 extern void set_zone_contiguous(struct zone *zone);
322 extern void clear_zone_contiguous(struct zone *zone);
323 
324 #ifdef CONFIG_MEMORY_HOTPLUG
325 extern void __ref free_area_init_core_hotplug(int nid);
326 extern int __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
327 extern int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
328 extern int add_memory_resource(int nid, struct resource *resource,
329 			       mhp_t mhp_flags);
330 extern int add_memory_driver_managed(int nid, u64 start, u64 size,
331 				     const char *resource_name,
332 				     mhp_t mhp_flags);
333 extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
334 				   unsigned long nr_pages,
335 				   struct vmem_altmap *altmap, int migratetype);
336 extern void remove_pfn_range_from_zone(struct zone *zone,
337 				       unsigned long start_pfn,
338 				       unsigned long nr_pages);
339 extern bool is_memblock_offlined(struct memory_block *mem);
340 extern int sparse_add_section(int nid, unsigned long pfn,
341 		unsigned long nr_pages, struct vmem_altmap *altmap);
342 extern void sparse_remove_section(struct mem_section *ms,
343 		unsigned long pfn, unsigned long nr_pages,
344 		unsigned long map_offset, struct vmem_altmap *altmap);
345 extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
346 					  unsigned long pnum);
347 extern struct zone *zone_for_pfn_range(int online_type, int nid,
348 		struct memory_group *group, unsigned long start_pfn,
349 		unsigned long nr_pages);
350 extern int arch_create_linear_mapping(int nid, u64 start, u64 size,
351 				      struct mhp_params *params);
352 void arch_remove_linear_mapping(u64 start, u64 size);
353 extern bool mhp_supports_memmap_on_memory(unsigned long size);
354 #endif /* CONFIG_MEMORY_HOTPLUG */
355 
356 #endif /* __LINUX_MEMORY_HOTPLUG_H */
357