xref: /linux-6.15/include/linux/memory_hotplug.h (revision 028fc57a)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_MEMORY_HOTPLUG_H
3 #define __LINUX_MEMORY_HOTPLUG_H
4 
5 #include <linux/mmzone.h>
6 #include <linux/spinlock.h>
7 #include <linux/notifier.h>
8 #include <linux/bug.h>
9 
10 struct page;
11 struct zone;
12 struct pglist_data;
13 struct mem_section;
14 struct memory_block;
15 struct resource;
16 struct vmem_altmap;
17 
18 #ifdef CONFIG_MEMORY_HOTPLUG
19 struct page *pfn_to_online_page(unsigned long pfn);
20 
21 /* Types for control the zone type of onlined and offlined memory */
22 enum {
23 	/* Offline the memory. */
24 	MMOP_OFFLINE = 0,
25 	/* Online the memory. Zone depends, see default_zone_for_pfn(). */
26 	MMOP_ONLINE,
27 	/* Online the memory to ZONE_NORMAL. */
28 	MMOP_ONLINE_KERNEL,
29 	/* Online the memory to ZONE_MOVABLE. */
30 	MMOP_ONLINE_MOVABLE,
31 };
32 
33 /* Flags for add_memory() and friends to specify memory hotplug details. */
34 typedef int __bitwise mhp_t;
35 
36 /* No special request */
37 #define MHP_NONE		((__force mhp_t)0)
38 /*
39  * Allow merging of the added System RAM resource with adjacent,
40  * mergeable resources. After a successful call to add_memory_resource()
41  * with this flag set, the resource pointer must no longer be used as it
42  * might be stale, or the resource might have changed.
43  */
44 #define MHP_MERGE_RESOURCE	((__force mhp_t)BIT(0))
45 
46 /*
47  * We want memmap (struct page array) to be self contained.
48  * To do so, we will use the beginning of the hot-added range to build
49  * the page tables for the memmap array that describes the entire range.
50  * Only selected architectures support it with SPARSE_VMEMMAP.
51  */
52 #define MHP_MEMMAP_ON_MEMORY   ((__force mhp_t)BIT(1))
53 /*
54  * The nid field specifies a memory group id (mgid) instead. The memory group
55  * implies the node id (nid).
56  */
57 #define MHP_NID_IS_MGID		((__force mhp_t)BIT(2))
58 
59 /*
60  * Extended parameters for memory hotplug:
61  * altmap: alternative allocator for memmap array (optional)
62  * pgprot: page protection flags to apply to newly created page tables
63  *	(required)
64  */
65 struct mhp_params {
66 	struct vmem_altmap *altmap;
67 	pgprot_t pgprot;
68 };
69 
70 bool mhp_range_allowed(u64 start, u64 size, bool need_mapping);
71 struct range mhp_get_pluggable_range(bool need_mapping);
72 
73 /*
74  * Zone resizing functions
75  *
76  * Note: any attempt to resize a zone should has pgdat_resize_lock()
77  * zone_span_writelock() both held. This ensure the size of a zone
78  * can't be changed while pgdat_resize_lock() held.
79  */
80 static inline unsigned zone_span_seqbegin(struct zone *zone)
81 {
82 	return read_seqbegin(&zone->span_seqlock);
83 }
84 static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
85 {
86 	return read_seqretry(&zone->span_seqlock, iv);
87 }
88 static inline void zone_span_writelock(struct zone *zone)
89 {
90 	write_seqlock(&zone->span_seqlock);
91 }
92 static inline void zone_span_writeunlock(struct zone *zone)
93 {
94 	write_sequnlock(&zone->span_seqlock);
95 }
96 static inline void zone_seqlock_init(struct zone *zone)
97 {
98 	seqlock_init(&zone->span_seqlock);
99 }
100 extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages);
101 extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
102 extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
103 extern void adjust_present_page_count(struct page *page, long nr_pages);
104 /* VM interface that may be used by firmware interface */
105 extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages,
106 				     struct zone *zone);
107 extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages);
108 extern int online_pages(unsigned long pfn, unsigned long nr_pages,
109 			struct zone *zone);
110 extern struct zone *test_pages_in_a_zone(unsigned long start_pfn,
111 					 unsigned long end_pfn);
112 extern void __offline_isolated_pages(unsigned long start_pfn,
113 				     unsigned long end_pfn);
114 
115 typedef void (*online_page_callback_t)(struct page *page, unsigned int order);
116 
117 extern void generic_online_page(struct page *page, unsigned int order);
118 extern int set_online_page_callback(online_page_callback_t callback);
119 extern int restore_online_page_callback(online_page_callback_t callback);
120 
121 extern int try_online_node(int nid);
122 
123 extern int arch_add_memory(int nid, u64 start, u64 size,
124 			   struct mhp_params *params);
125 extern u64 max_mem_size;
126 
127 extern int mhp_online_type_from_str(const char *str);
128 
129 /* Default online_type (MMOP_*) when new memory blocks are added. */
130 extern int mhp_default_online_type;
131 /* If movable_node boot option specified */
132 extern bool movable_node_enabled;
133 static inline bool movable_node_is_enabled(void)
134 {
135 	return movable_node_enabled;
136 }
137 
138 extern void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap);
139 extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages,
140 			   struct vmem_altmap *altmap);
141 
142 /* reasonably generic interface to expand the physical pages */
143 extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
144 		       struct mhp_params *params);
145 
146 #ifndef CONFIG_ARCH_HAS_ADD_PAGES
147 static inline int add_pages(int nid, unsigned long start_pfn,
148 		unsigned long nr_pages, struct mhp_params *params)
149 {
150 	return __add_pages(nid, start_pfn, nr_pages, params);
151 }
152 #else /* ARCH_HAS_ADD_PAGES */
153 int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
154 	      struct mhp_params *params);
155 #endif /* ARCH_HAS_ADD_PAGES */
156 
157 #ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
158 /*
159  * For supporting node-hotadd, we have to allocate a new pgdat.
160  *
161  * If an arch has generic style NODE_DATA(),
162  * node_data[nid] = kzalloc() works well. But it depends on the architecture.
163  *
164  * In general, generic_alloc_nodedata() is used.
165  * Now, arch_free_nodedata() is just defined for error path of node_hot_add.
166  *
167  */
168 extern pg_data_t *arch_alloc_nodedata(int nid);
169 extern void arch_free_nodedata(pg_data_t *pgdat);
170 extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
171 
172 #else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
173 
174 #define arch_alloc_nodedata(nid)	generic_alloc_nodedata(nid)
175 #define arch_free_nodedata(pgdat)	generic_free_nodedata(pgdat)
176 
177 #ifdef CONFIG_NUMA
178 /*
179  * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat.
180  * XXX: kmalloc_node() can't work well to get new node's memory at this time.
181  *	Because, pgdat for the new node is not allocated/initialized yet itself.
182  *	To use new node's memory, more consideration will be necessary.
183  */
184 #define generic_alloc_nodedata(nid)				\
185 ({								\
186 	kzalloc(sizeof(pg_data_t), GFP_KERNEL);			\
187 })
188 /*
189  * This definition is just for error path in node hotadd.
190  * For node hotremove, we have to replace this.
191  */
192 #define generic_free_nodedata(pgdat)	kfree(pgdat)
193 
194 extern pg_data_t *node_data[];
195 static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
196 {
197 	node_data[nid] = pgdat;
198 }
199 
200 #else /* !CONFIG_NUMA */
201 
202 /* never called */
203 static inline pg_data_t *generic_alloc_nodedata(int nid)
204 {
205 	BUG();
206 	return NULL;
207 }
208 static inline void generic_free_nodedata(pg_data_t *pgdat)
209 {
210 }
211 static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
212 {
213 }
214 #endif /* CONFIG_NUMA */
215 #endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
216 
217 void get_online_mems(void);
218 void put_online_mems(void);
219 
220 void mem_hotplug_begin(void);
221 void mem_hotplug_done(void);
222 
223 #else /* ! CONFIG_MEMORY_HOTPLUG */
224 #define pfn_to_online_page(pfn)			\
225 ({						\
226 	struct page *___page = NULL;		\
227 	if (pfn_valid(pfn))			\
228 		___page = pfn_to_page(pfn);	\
229 	___page;				\
230  })
231 
232 static inline unsigned zone_span_seqbegin(struct zone *zone)
233 {
234 	return 0;
235 }
236 static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
237 {
238 	return 0;
239 }
240 static inline void zone_span_writelock(struct zone *zone) {}
241 static inline void zone_span_writeunlock(struct zone *zone) {}
242 static inline void zone_seqlock_init(struct zone *zone) {}
243 
244 static inline int try_online_node(int nid)
245 {
246 	return 0;
247 }
248 
249 static inline void get_online_mems(void) {}
250 static inline void put_online_mems(void) {}
251 
252 static inline void mem_hotplug_begin(void) {}
253 static inline void mem_hotplug_done(void) {}
254 
255 static inline bool movable_node_is_enabled(void)
256 {
257 	return false;
258 }
259 #endif /* ! CONFIG_MEMORY_HOTPLUG */
260 
261 /*
262  * Keep this declaration outside CONFIG_MEMORY_HOTPLUG as some
263  * platforms might override and use arch_get_mappable_range()
264  * for internal non memory hotplug purposes.
265  */
266 struct range arch_get_mappable_range(void);
267 
268 #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
269 /*
270  * pgdat resizing functions
271  */
272 static inline
273 void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
274 {
275 	spin_lock_irqsave(&pgdat->node_size_lock, *flags);
276 }
277 static inline
278 void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
279 {
280 	spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
281 }
282 static inline
283 void pgdat_resize_init(struct pglist_data *pgdat)
284 {
285 	spin_lock_init(&pgdat->node_size_lock);
286 }
287 #else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
288 /*
289  * Stub functions for when hotplug is off
290  */
291 static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
292 static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
293 static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
294 #endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
295 
296 #ifdef CONFIG_MEMORY_HOTREMOVE
297 
298 extern void try_offline_node(int nid);
299 extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
300 extern int remove_memory(u64 start, u64 size);
301 extern void __remove_memory(u64 start, u64 size);
302 extern int offline_and_remove_memory(u64 start, u64 size);
303 
304 #else
305 static inline void try_offline_node(int nid) {}
306 
307 static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
308 {
309 	return -EINVAL;
310 }
311 
312 static inline int remove_memory(u64 start, u64 size)
313 {
314 	return -EBUSY;
315 }
316 
317 static inline void __remove_memory(u64 start, u64 size) {}
318 #endif /* CONFIG_MEMORY_HOTREMOVE */
319 
320 extern void set_zone_contiguous(struct zone *zone);
321 extern void clear_zone_contiguous(struct zone *zone);
322 
323 #ifdef CONFIG_MEMORY_HOTPLUG
324 extern void __ref free_area_init_core_hotplug(int nid);
325 extern int __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
326 extern int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
327 extern int add_memory_resource(int nid, struct resource *resource,
328 			       mhp_t mhp_flags);
329 extern int add_memory_driver_managed(int nid, u64 start, u64 size,
330 				     const char *resource_name,
331 				     mhp_t mhp_flags);
332 extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
333 				   unsigned long nr_pages,
334 				   struct vmem_altmap *altmap, int migratetype);
335 extern void remove_pfn_range_from_zone(struct zone *zone,
336 				       unsigned long start_pfn,
337 				       unsigned long nr_pages);
338 extern bool is_memblock_offlined(struct memory_block *mem);
339 extern int sparse_add_section(int nid, unsigned long pfn,
340 		unsigned long nr_pages, struct vmem_altmap *altmap);
341 extern void sparse_remove_section(struct mem_section *ms,
342 		unsigned long pfn, unsigned long nr_pages,
343 		unsigned long map_offset, struct vmem_altmap *altmap);
344 extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
345 					  unsigned long pnum);
346 extern struct zone *zone_for_pfn_range(int online_type, int nid,
347 		unsigned long start_pfn, unsigned long nr_pages);
348 extern int arch_create_linear_mapping(int nid, u64 start, u64 size,
349 				      struct mhp_params *params);
350 void arch_remove_linear_mapping(u64 start, u64 size);
351 extern bool mhp_supports_memmap_on_memory(unsigned long size);
352 #endif /* CONFIG_MEMORY_HOTPLUG */
353 
354 #endif /* __LINUX_MEMORY_HOTPLUG_H */
355