1 #ifndef __LINUX_SMP_H 2 #define __LINUX_SMP_H 3 4 /* 5 * Generic SMP support 6 * Alan Cox. <[email protected]> 7 */ 8 9 #include <linux/errno.h> 10 #include <linux/types.h> 11 #include <linux/list.h> 12 #include <linux/cpumask.h> 13 #include <linux/init.h> 14 #include <linux/llist.h> 15 16 extern void cpu_idle(void); 17 18 typedef void (*smp_call_func_t)(void *info); 19 struct call_single_data { 20 union { 21 struct list_head list; 22 struct llist_node llist; 23 }; 24 smp_call_func_t func; 25 void *info; 26 u16 flags; 27 }; 28 29 /* total number of cpus in this system (may exceed NR_CPUS) */ 30 extern unsigned int total_cpus; 31 32 int smp_call_function_single(int cpuid, smp_call_func_t func, void *info, 33 int wait); 34 35 /* 36 * Call a function on all processors 37 */ 38 int on_each_cpu(smp_call_func_t func, void *info, int wait); 39 40 /* 41 * Call a function on processors specified by mask, which might include 42 * the local one. 43 */ 44 void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, 45 void *info, bool wait); 46 47 /* 48 * Call a function on each processor for which the supplied function 49 * cond_func returns a positive value. This may include the local 50 * processor. 51 */ 52 void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), 53 smp_call_func_t func, void *info, bool wait, 54 gfp_t gfp_flags); 55 56 void __smp_call_function_single(int cpuid, struct call_single_data *data, 57 int wait); 58 59 #ifdef CONFIG_SMP 60 61 #include <linux/preempt.h> 62 #include <linux/kernel.h> 63 #include <linux/compiler.h> 64 #include <linux/thread_info.h> 65 #include <asm/smp.h> 66 67 /* 68 * main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc. 69 * (defined in asm header): 70 */ 71 72 /* 73 * stops all CPUs but the current one: 74 */ 75 extern void smp_send_stop(void); 76 77 /* 78 * sends a 'reschedule' event to another CPU: 79 */ 80 extern void smp_send_reschedule(int cpu); 81 82 83 /* 84 * Prepare machine for booting other CPUs. 85 */ 86 extern void smp_prepare_cpus(unsigned int max_cpus); 87 88 /* 89 * Bring a CPU up 90 */ 91 extern int __cpu_up(unsigned int cpunum, struct task_struct *tidle); 92 93 /* 94 * Final polishing of CPUs 95 */ 96 extern void smp_cpus_done(unsigned int max_cpus); 97 98 /* 99 * Call a function on all other processors 100 */ 101 int smp_call_function(smp_call_func_t func, void *info, int wait); 102 void smp_call_function_many(const struct cpumask *mask, 103 smp_call_func_t func, void *info, bool wait); 104 105 int smp_call_function_any(const struct cpumask *mask, 106 smp_call_func_t func, void *info, int wait); 107 108 void kick_all_cpus_sync(void); 109 110 /* 111 * Generic and arch helpers 112 */ 113 void __init call_function_init(void); 114 void generic_smp_call_function_single_interrupt(void); 115 #define generic_smp_call_function_interrupt \ 116 generic_smp_call_function_single_interrupt 117 118 /* 119 * Mark the boot cpu "online" so that it can call console drivers in 120 * printk() and can access its per-cpu storage. 121 */ 122 void smp_prepare_boot_cpu(void); 123 124 extern unsigned int setup_max_cpus; 125 extern void __init setup_nr_cpu_ids(void); 126 extern void __init smp_init(void); 127 128 #else /* !SMP */ 129 130 static inline void smp_send_stop(void) { } 131 132 /* 133 * These macros fold the SMP functionality into a single CPU system 134 */ 135 #define raw_smp_processor_id() 0 136 static inline int up_smp_call_function(smp_call_func_t func, void *info) 137 { 138 return 0; 139 } 140 #define smp_call_function(func, info, wait) \ 141 (up_smp_call_function(func, info)) 142 143 static inline void smp_send_reschedule(int cpu) { } 144 #define smp_prepare_boot_cpu() do {} while (0) 145 #define smp_call_function_many(mask, func, info, wait) \ 146 (up_smp_call_function(func, info)) 147 static inline void call_function_init(void) { } 148 149 static inline int 150 smp_call_function_any(const struct cpumask *mask, smp_call_func_t func, 151 void *info, int wait) 152 { 153 return smp_call_function_single(0, func, info, wait); 154 } 155 156 static inline void kick_all_cpus_sync(void) { } 157 158 #endif /* !SMP */ 159 160 /* 161 * smp_processor_id(): get the current CPU ID. 162 * 163 * if DEBUG_PREEMPT is enabled then we check whether it is 164 * used in a preemption-safe way. (smp_processor_id() is safe 165 * if it's used in a preemption-off critical section, or in 166 * a thread that is bound to the current CPU.) 167 * 168 * NOTE: raw_smp_processor_id() is for internal use only 169 * (smp_processor_id() is the preferred variant), but in rare 170 * instances it might also be used to turn off false positives 171 * (i.e. smp_processor_id() use that the debugging code reports but 172 * which use for some reason is legal). Don't use this to hack around 173 * the warning message, as your code might not work under PREEMPT. 174 */ 175 #ifdef CONFIG_DEBUG_PREEMPT 176 extern unsigned int debug_smp_processor_id(void); 177 # define smp_processor_id() debug_smp_processor_id() 178 #else 179 # define smp_processor_id() raw_smp_processor_id() 180 #endif 181 182 #define get_cpu() ({ preempt_disable(); smp_processor_id(); }) 183 #define put_cpu() preempt_enable() 184 185 /* 186 * Callback to arch code if there's nosmp or maxcpus=0 on the 187 * boot command line: 188 */ 189 extern void arch_disable_smp_support(void); 190 191 extern void arch_enable_nonboot_cpus_begin(void); 192 extern void arch_enable_nonboot_cpus_end(void); 193 194 void smp_setup_processor_id(void); 195 196 #endif /* __LINUX_SMP_H */ 197