1 #ifndef __LINUX_MEMORY_HOTPLUG_H 2 #define __LINUX_MEMORY_HOTPLUG_H 3 4 #include <linux/mmzone.h> 5 #include <linux/spinlock.h> 6 #include <linux/mmzone.h> 7 #include <linux/notifier.h> 8 9 #ifdef CONFIG_MEMORY_HOTPLUG 10 /* 11 * pgdat resizing functions 12 */ 13 static inline 14 void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags) 15 { 16 spin_lock_irqsave(&pgdat->node_size_lock, *flags); 17 } 18 static inline 19 void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags) 20 { 21 spin_unlock_irqrestore(&pgdat->node_size_lock, *flags); 22 } 23 static inline 24 void pgdat_resize_init(struct pglist_data *pgdat) 25 { 26 spin_lock_init(&pgdat->node_size_lock); 27 } 28 /* 29 * Zone resizing functions 30 */ 31 static inline unsigned zone_span_seqbegin(struct zone *zone) 32 { 33 return read_seqbegin(&zone->span_seqlock); 34 } 35 static inline int zone_span_seqretry(struct zone *zone, unsigned iv) 36 { 37 return read_seqretry(&zone->span_seqlock, iv); 38 } 39 static inline void zone_span_writelock(struct zone *zone) 40 { 41 write_seqlock(&zone->span_seqlock); 42 } 43 static inline void zone_span_writeunlock(struct zone *zone) 44 { 45 write_sequnlock(&zone->span_seqlock); 46 } 47 static inline void zone_seqlock_init(struct zone *zone) 48 { 49 seqlock_init(&zone->span_seqlock); 50 } 51 extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages); 52 extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages); 53 extern int add_one_highpage(struct page *page, int pfn, int bad_ppro); 54 /* need some defines for these for archs that don't support it */ 55 extern void online_page(struct page *page); 56 /* VM interface that may be used by firmware interface */ 57 extern int add_memory(u64 start, u64 size); 58 extern int remove_memory(u64 start, u64 size); 59 extern int online_pages(unsigned long, unsigned long); 60 61 /* reasonably generic interface to expand the physical pages in a zone */ 62 extern int __add_pages(struct zone *zone, unsigned long start_pfn, 63 unsigned long nr_pages); 64 #else /* ! CONFIG_MEMORY_HOTPLUG */ 65 /* 66 * Stub functions for when hotplug is off 67 */ 68 static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {} 69 static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {} 70 static inline void pgdat_resize_init(struct pglist_data *pgdat) {} 71 72 static inline unsigned zone_span_seqbegin(struct zone *zone) 73 { 74 return 0; 75 } 76 static inline int zone_span_seqretry(struct zone *zone, unsigned iv) 77 { 78 return 0; 79 } 80 static inline void zone_span_writelock(struct zone *zone) {} 81 static inline void zone_span_writeunlock(struct zone *zone) {} 82 static inline void zone_seqlock_init(struct zone *zone) {} 83 84 static inline int mhp_notimplemented(const char *func) 85 { 86 printk(KERN_WARNING "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func); 87 dump_stack(); 88 return -ENOSYS; 89 } 90 91 static inline int __add_pages(struct zone *zone, unsigned long start_pfn, 92 unsigned long nr_pages) 93 { 94 return mhp_notimplemented(__FUNCTION__); 95 } 96 #endif /* ! CONFIG_MEMORY_HOTPLUG */ 97 static inline int __remove_pages(struct zone *zone, unsigned long start_pfn, 98 unsigned long nr_pages) 99 { 100 printk(KERN_WARNING "%s() called, not yet supported\n", __FUNCTION__); 101 dump_stack(); 102 return -ENOSYS; 103 } 104 #endif /* __LINUX_MEMORY_HOTPLUG_H */ 105