1 #ifndef __LINUX_PERCPU_H 2 #define __LINUX_PERCPU_H 3 4 #include <linux/spinlock.h> /* For preempt_disable() */ 5 #include <linux/slab.h> /* For kmalloc() */ 6 #include <linux/smp.h> 7 #include <linux/string.h> /* For memset() */ 8 #include <linux/cpumask.h> 9 10 #include <asm/percpu.h> 11 12 /* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */ 13 #ifndef PERCPU_ENOUGH_ROOM 14 #define PERCPU_ENOUGH_ROOM 32768 15 #endif 16 17 /* 18 * Must be an lvalue. Since @var must be a simple identifier, 19 * we force a syntax error here if it isn't. 20 */ 21 #define get_cpu_var(var) (*({ \ 22 extern int simple_identifier_##var(void); \ 23 preempt_disable(); \ 24 &__get_cpu_var(var); })) 25 #define put_cpu_var(var) preempt_enable() 26 27 #ifdef CONFIG_SMP 28 29 struct percpu_data { 30 void *ptrs[NR_CPUS]; 31 }; 32 33 #define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata) 34 /* 35 * Use this to get to a cpu's version of the per-cpu object dynamically 36 * allocated. Non-atomic access to the current CPU's version should 37 * probably be combined with get_cpu()/put_cpu(). 38 */ 39 #define percpu_ptr(ptr, cpu) \ 40 ({ \ 41 struct percpu_data *__p = __percpu_disguise(ptr); \ 42 (__typeof__(ptr))__p->ptrs[(cpu)]; \ 43 }) 44 45 extern void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu); 46 extern void percpu_depopulate(void *__pdata, int cpu); 47 extern int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp, 48 cpumask_t *mask); 49 extern void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask); 50 extern void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask); 51 extern void percpu_free(void *__pdata); 52 53 #else /* CONFIG_SMP */ 54 55 #define percpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) 56 57 static inline void percpu_depopulate(void *__pdata, int cpu) 58 { 59 } 60 61 static inline void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask) 62 { 63 } 64 65 static inline void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, 66 int cpu) 67 { 68 return percpu_ptr(__pdata, cpu); 69 } 70 71 static inline int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp, 72 cpumask_t *mask) 73 { 74 return 0; 75 } 76 77 static __always_inline void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask) 78 { 79 return kzalloc(size, gfp); 80 } 81 82 static inline void percpu_free(void *__pdata) 83 { 84 kfree(__pdata); 85 } 86 87 #endif /* CONFIG_SMP */ 88 89 #define percpu_populate_mask(__pdata, size, gfp, mask) \ 90 __percpu_populate_mask((__pdata), (size), (gfp), &(mask)) 91 #define percpu_depopulate_mask(__pdata, mask) \ 92 __percpu_depopulate_mask((__pdata), &(mask)) 93 #define percpu_alloc_mask(size, gfp, mask) \ 94 __percpu_alloc_mask((size), (gfp), &(mask)) 95 96 #define percpu_alloc(size, gfp) percpu_alloc_mask((size), (gfp), cpu_online_map) 97 98 /* (legacy) interface for use without CPU hotplug handling */ 99 100 #define __alloc_percpu(size) percpu_alloc_mask((size), GFP_KERNEL, \ 101 cpu_possible_map) 102 #define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type)) 103 #define free_percpu(ptr) percpu_free((ptr)) 104 #define per_cpu_ptr(ptr, cpu) percpu_ptr((ptr), (cpu)) 105 106 #endif /* __LINUX_PERCPU_H */ 107