1 #ifndef __LINUX_SMP_H 2 #define __LINUX_SMP_H 3 4 /* 5 * Generic SMP support 6 * Alan Cox. <[email protected]> 7 */ 8 9 #include <linux/errno.h> 10 #include <linux/types.h> 11 #include <linux/list.h> 12 #include <linux/cpumask.h> 13 14 extern void cpu_idle(void); 15 16 typedef void (*smp_call_func_t)(void *info); 17 struct call_single_data { 18 struct list_head list; 19 smp_call_func_t func; 20 void *info; 21 u16 flags; 22 u16 priv; 23 }; 24 25 /* total number of cpus in this system (may exceed NR_CPUS) */ 26 extern unsigned int total_cpus; 27 28 int smp_call_function_single(int cpuid, smp_call_func_t func, void *info, 29 int wait); 30 31 #ifdef CONFIG_SMP 32 33 #include <linux/preempt.h> 34 #include <linux/kernel.h> 35 #include <linux/compiler.h> 36 #include <linux/thread_info.h> 37 #include <asm/smp.h> 38 39 /* 40 * main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc. 41 * (defined in asm header): 42 */ 43 44 /* 45 * stops all CPUs but the current one: 46 */ 47 extern void smp_send_stop(void); 48 49 /* 50 * sends a 'reschedule' event to another CPU: 51 */ 52 extern void smp_send_reschedule(int cpu); 53 54 55 /* 56 * Prepare machine for booting other CPUs. 57 */ 58 extern void smp_prepare_cpus(unsigned int max_cpus); 59 60 /* 61 * Bring a CPU up 62 */ 63 extern int __cpu_up(unsigned int cpunum); 64 65 /* 66 * Final polishing of CPUs 67 */ 68 extern void smp_cpus_done(unsigned int max_cpus); 69 70 /* 71 * Call a function on all other processors 72 */ 73 int smp_call_function(smp_call_func_t func, void *info, int wait); 74 void smp_call_function_many(const struct cpumask *mask, 75 smp_call_func_t func, void *info, bool wait); 76 77 void __smp_call_function_single(int cpuid, struct call_single_data *data, 78 int wait); 79 80 int smp_call_function_any(const struct cpumask *mask, 81 smp_call_func_t func, void *info, int wait); 82 83 /* 84 * Generic and arch helpers 85 */ 86 #ifdef CONFIG_USE_GENERIC_SMP_HELPERS 87 void generic_smp_call_function_single_interrupt(void); 88 void generic_smp_call_function_interrupt(void); 89 void ipi_call_lock(void); 90 void ipi_call_unlock(void); 91 void ipi_call_lock_irq(void); 92 void ipi_call_unlock_irq(void); 93 #endif 94 95 /* 96 * Call a function on all processors 97 */ 98 int on_each_cpu(smp_call_func_t func, void *info, int wait); 99 100 #define MSG_ALL_BUT_SELF 0x8000 /* Assume <32768 CPU's */ 101 #define MSG_ALL 0x8001 102 103 #define MSG_INVALIDATE_TLB 0x0001 /* Remote processor TLB invalidate */ 104 #define MSG_STOP_CPU 0x0002 /* Sent to shut down slave CPU's 105 * when rebooting 106 */ 107 #define MSG_RESCHEDULE 0x0003 /* Reschedule request from master CPU*/ 108 #define MSG_CALL_FUNCTION 0x0004 /* Call function on all other CPUs */ 109 110 /* 111 * Mark the boot cpu "online" so that it can call console drivers in 112 * printk() and can access its per-cpu storage. 113 */ 114 void smp_prepare_boot_cpu(void); 115 116 extern unsigned int setup_max_cpus; 117 118 #else /* !SMP */ 119 120 static inline void smp_send_stop(void) { } 121 122 /* 123 * These macros fold the SMP functionality into a single CPU system 124 */ 125 #define raw_smp_processor_id() 0 126 static inline int up_smp_call_function(smp_call_func_t func, void *info) 127 { 128 return 0; 129 } 130 #define smp_call_function(func, info, wait) \ 131 (up_smp_call_function(func, info)) 132 #define on_each_cpu(func,info,wait) \ 133 ({ \ 134 local_irq_disable(); \ 135 func(info); \ 136 local_irq_enable(); \ 137 0; \ 138 }) 139 static inline void smp_send_reschedule(int cpu) { } 140 #define num_booting_cpus() 1 141 #define smp_prepare_boot_cpu() do {} while (0) 142 #define smp_call_function_many(mask, func, info, wait) \ 143 (up_smp_call_function(func, info)) 144 static inline void init_call_single_data(void) { } 145 146 static inline int 147 smp_call_function_any(const struct cpumask *mask, smp_call_func_t func, 148 void *info, int wait) 149 { 150 return smp_call_function_single(0, func, info, wait); 151 } 152 153 #endif /* !SMP */ 154 155 /* 156 * smp_processor_id(): get the current CPU ID. 157 * 158 * if DEBUG_PREEMPT is enabled then we check whether it is 159 * used in a preemption-safe way. (smp_processor_id() is safe 160 * if it's used in a preemption-off critical section, or in 161 * a thread that is bound to the current CPU.) 162 * 163 * NOTE: raw_smp_processor_id() is for internal use only 164 * (smp_processor_id() is the preferred variant), but in rare 165 * instances it might also be used to turn off false positives 166 * (i.e. smp_processor_id() use that the debugging code reports but 167 * which use for some reason is legal). Don't use this to hack around 168 * the warning message, as your code might not work under PREEMPT. 169 */ 170 #ifdef CONFIG_DEBUG_PREEMPT 171 extern unsigned int debug_smp_processor_id(void); 172 # define smp_processor_id() debug_smp_processor_id() 173 #else 174 # define smp_processor_id() raw_smp_processor_id() 175 #endif 176 177 #define get_cpu() ({ preempt_disable(); smp_processor_id(); }) 178 #define put_cpu() preempt_enable() 179 180 /* 181 * Callback to arch code if there's nosmp or maxcpus=0 on the 182 * boot command line: 183 */ 184 extern void arch_disable_smp_support(void); 185 186 void smp_setup_processor_id(void); 187 188 #endif /* __LINUX_SMP_H */ 189