1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __LINUX_MEMORY_HOTPLUG_H 3 #define __LINUX_MEMORY_HOTPLUG_H 4 5 #include <linux/mmzone.h> 6 #include <linux/spinlock.h> 7 #include <linux/notifier.h> 8 #include <linux/bug.h> 9 10 struct page; 11 struct zone; 12 struct pglist_data; 13 struct mem_section; 14 struct memory_block; 15 struct resource; 16 struct vmem_altmap; 17 18 #ifdef CONFIG_MEMORY_HOTPLUG 19 /* 20 * Return page for the valid pfn only if the page is online. All pfn 21 * walkers which rely on the fully initialized page->flags and others 22 * should use this rather than pfn_valid && pfn_to_page 23 */ 24 #define pfn_to_online_page(pfn) \ 25 ({ \ 26 struct page *___page = NULL; \ 27 unsigned long ___nr = pfn_to_section_nr(pfn); \ 28 \ 29 if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr))\ 30 ___page = pfn_to_page(pfn); \ 31 ___page; \ 32 }) 33 34 /* 35 * Types for free bootmem stored in page->lru.next. These have to be in 36 * some random range in unsigned long space for debugging purposes. 37 */ 38 enum { 39 MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12, 40 SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE, 41 MIX_SECTION_INFO, 42 NODE_INFO, 43 MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO, 44 }; 45 46 /* Types for control the zone type of onlined and offlined memory */ 47 enum { 48 MMOP_OFFLINE = -1, 49 MMOP_ONLINE_KEEP, 50 MMOP_ONLINE_KERNEL, 51 MMOP_ONLINE_MOVABLE, 52 }; 53 54 /* 55 * pgdat resizing functions 56 */ 57 static inline 58 void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags) 59 { 60 spin_lock_irqsave(&pgdat->node_size_lock, *flags); 61 } 62 static inline 63 void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags) 64 { 65 spin_unlock_irqrestore(&pgdat->node_size_lock, *flags); 66 } 67 static inline 68 void pgdat_resize_init(struct pglist_data *pgdat) 69 { 70 spin_lock_init(&pgdat->node_size_lock); 71 } 72 /* 73 * Zone resizing functions 74 * 75 * Note: any attempt to resize a zone should has pgdat_resize_lock() 76 * zone_span_writelock() both held. This ensure the size of a zone 77 * can't be changed while pgdat_resize_lock() held. 78 */ 79 static inline unsigned zone_span_seqbegin(struct zone *zone) 80 { 81 return read_seqbegin(&zone->span_seqlock); 82 } 83 static inline int zone_span_seqretry(struct zone *zone, unsigned iv) 84 { 85 return read_seqretry(&zone->span_seqlock, iv); 86 } 87 static inline void zone_span_writelock(struct zone *zone) 88 { 89 write_seqlock(&zone->span_seqlock); 90 } 91 static inline void zone_span_writeunlock(struct zone *zone) 92 { 93 write_sequnlock(&zone->span_seqlock); 94 } 95 static inline void zone_seqlock_init(struct zone *zone) 96 { 97 seqlock_init(&zone->span_seqlock); 98 } 99 extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages); 100 extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages); 101 extern int add_one_highpage(struct page *page, int pfn, int bad_ppro); 102 /* VM interface that may be used by firmware interface */ 103 extern int online_pages(unsigned long, unsigned long, int); 104 extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn, 105 unsigned long *valid_start, unsigned long *valid_end); 106 extern void __offline_isolated_pages(unsigned long, unsigned long); 107 108 typedef void (*online_page_callback_t)(struct page *page); 109 110 extern int set_online_page_callback(online_page_callback_t callback); 111 extern int restore_online_page_callback(online_page_callback_t callback); 112 113 extern void __online_page_set_limits(struct page *page); 114 extern void __online_page_increment_counters(struct page *page); 115 extern void __online_page_free(struct page *page); 116 117 extern int try_online_node(int nid); 118 119 extern bool memhp_auto_online; 120 /* If movable_node boot option specified */ 121 extern bool movable_node_enabled; 122 static inline bool movable_node_is_enabled(void) 123 { 124 return movable_node_enabled; 125 } 126 127 #ifdef CONFIG_MEMORY_HOTREMOVE 128 extern bool is_pageblock_removable_nolock(struct page *page); 129 extern int arch_remove_memory(u64 start, u64 size, 130 struct vmem_altmap *altmap); 131 extern int __remove_pages(struct zone *zone, unsigned long start_pfn, 132 unsigned long nr_pages, struct vmem_altmap *altmap); 133 #endif /* CONFIG_MEMORY_HOTREMOVE */ 134 135 /* reasonably generic interface to expand the physical pages */ 136 extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, 137 struct vmem_altmap *altmap, bool want_memblock); 138 139 #ifndef CONFIG_ARCH_HAS_ADD_PAGES 140 static inline int add_pages(int nid, unsigned long start_pfn, 141 unsigned long nr_pages, struct vmem_altmap *altmap, 142 bool want_memblock) 143 { 144 return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock); 145 } 146 #else /* ARCH_HAS_ADD_PAGES */ 147 int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, 148 struct vmem_altmap *altmap, bool want_memblock); 149 #endif /* ARCH_HAS_ADD_PAGES */ 150 151 #ifdef CONFIG_NUMA 152 extern int memory_add_physaddr_to_nid(u64 start); 153 #else 154 static inline int memory_add_physaddr_to_nid(u64 start) 155 { 156 return 0; 157 } 158 #endif 159 160 #ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION 161 /* 162 * For supporting node-hotadd, we have to allocate a new pgdat. 163 * 164 * If an arch has generic style NODE_DATA(), 165 * node_data[nid] = kzalloc() works well. But it depends on the architecture. 166 * 167 * In general, generic_alloc_nodedata() is used. 168 * Now, arch_free_nodedata() is just defined for error path of node_hot_add. 169 * 170 */ 171 extern pg_data_t *arch_alloc_nodedata(int nid); 172 extern void arch_free_nodedata(pg_data_t *pgdat); 173 extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat); 174 175 #else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ 176 177 #define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid) 178 #define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat) 179 180 #ifdef CONFIG_NUMA 181 /* 182 * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat. 183 * XXX: kmalloc_node() can't work well to get new node's memory at this time. 184 * Because, pgdat for the new node is not allocated/initialized yet itself. 185 * To use new node's memory, more consideration will be necessary. 186 */ 187 #define generic_alloc_nodedata(nid) \ 188 ({ \ 189 kzalloc(sizeof(pg_data_t), GFP_KERNEL); \ 190 }) 191 /* 192 * This definition is just for error path in node hotadd. 193 * For node hotremove, we have to replace this. 194 */ 195 #define generic_free_nodedata(pgdat) kfree(pgdat) 196 197 extern pg_data_t *node_data[]; 198 static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) 199 { 200 node_data[nid] = pgdat; 201 } 202 203 #else /* !CONFIG_NUMA */ 204 205 /* never called */ 206 static inline pg_data_t *generic_alloc_nodedata(int nid) 207 { 208 BUG(); 209 return NULL; 210 } 211 static inline void generic_free_nodedata(pg_data_t *pgdat) 212 { 213 } 214 static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) 215 { 216 } 217 #endif /* CONFIG_NUMA */ 218 #endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ 219 220 #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE 221 extern void __init register_page_bootmem_info_node(struct pglist_data *pgdat); 222 #else 223 static inline void register_page_bootmem_info_node(struct pglist_data *pgdat) 224 { 225 } 226 #endif 227 extern void put_page_bootmem(struct page *page); 228 extern void get_page_bootmem(unsigned long ingo, struct page *page, 229 unsigned long type); 230 231 void get_online_mems(void); 232 void put_online_mems(void); 233 234 void mem_hotplug_begin(void); 235 void mem_hotplug_done(void); 236 237 extern void set_zone_contiguous(struct zone *zone); 238 extern void clear_zone_contiguous(struct zone *zone); 239 240 #else /* ! CONFIG_MEMORY_HOTPLUG */ 241 #define pfn_to_online_page(pfn) \ 242 ({ \ 243 struct page *___page = NULL; \ 244 if (pfn_valid(pfn)) \ 245 ___page = pfn_to_page(pfn); \ 246 ___page; \ 247 }) 248 249 /* 250 * Stub functions for when hotplug is off 251 */ 252 static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {} 253 static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {} 254 static inline void pgdat_resize_init(struct pglist_data *pgdat) {} 255 256 static inline unsigned zone_span_seqbegin(struct zone *zone) 257 { 258 return 0; 259 } 260 static inline int zone_span_seqretry(struct zone *zone, unsigned iv) 261 { 262 return 0; 263 } 264 static inline void zone_span_writelock(struct zone *zone) {} 265 static inline void zone_span_writeunlock(struct zone *zone) {} 266 static inline void zone_seqlock_init(struct zone *zone) {} 267 268 static inline int mhp_notimplemented(const char *func) 269 { 270 printk(KERN_WARNING "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func); 271 dump_stack(); 272 return -ENOSYS; 273 } 274 275 static inline void register_page_bootmem_info_node(struct pglist_data *pgdat) 276 { 277 } 278 279 static inline int try_online_node(int nid) 280 { 281 return 0; 282 } 283 284 static inline void get_online_mems(void) {} 285 static inline void put_online_mems(void) {} 286 287 static inline void mem_hotplug_begin(void) {} 288 static inline void mem_hotplug_done(void) {} 289 290 static inline bool movable_node_is_enabled(void) 291 { 292 return false; 293 } 294 #endif /* ! CONFIG_MEMORY_HOTPLUG */ 295 296 #ifdef CONFIG_MEMORY_HOTREMOVE 297 298 extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages); 299 extern void try_offline_node(int nid); 300 extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); 301 extern void remove_memory(int nid, u64 start, u64 size); 302 303 #else 304 static inline bool is_mem_section_removable(unsigned long pfn, 305 unsigned long nr_pages) 306 { 307 return false; 308 } 309 310 static inline void try_offline_node(int nid) {} 311 312 static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages) 313 { 314 return -EINVAL; 315 } 316 317 static inline void remove_memory(int nid, u64 start, u64 size) {} 318 #endif /* CONFIG_MEMORY_HOTREMOVE */ 319 320 extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn, 321 void *arg, int (*func)(struct memory_block *, void *)); 322 extern int add_memory(int nid, u64 start, u64 size); 323 extern int add_memory_resource(int nid, struct resource *resource, bool online); 324 extern int arch_add_memory(int nid, u64 start, u64 size, 325 struct vmem_altmap *altmap, bool want_memblock); 326 extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, 327 unsigned long nr_pages, struct vmem_altmap *altmap); 328 extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); 329 extern bool is_memblock_offlined(struct memory_block *mem); 330 extern void remove_memory(int nid, u64 start, u64 size); 331 extern int sparse_add_one_section(struct pglist_data *pgdat, 332 unsigned long start_pfn, struct vmem_altmap *altmap); 333 extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms, 334 unsigned long map_offset, struct vmem_altmap *altmap); 335 extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, 336 unsigned long pnum); 337 extern bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages, 338 int online_type); 339 extern struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn, 340 unsigned long nr_pages); 341 #endif /* __LINUX_MEMORY_HOTPLUG_H */ 342