1 #ifndef _LINUX_PERCPU_DEFS_H 2 #define _LINUX_PERCPU_DEFS_H 3 4 /* 5 * Determine the real variable name from the name visible in the 6 * kernel sources. 7 */ 8 #define per_cpu_var(var) per_cpu__##var 9 10 /* 11 * Base implementations of per-CPU variable declarations and definitions, where 12 * the section in which the variable is to be placed is provided by the 13 * 'sec' argument. This may be used to affect the parameters governing the 14 * variable's storage. 15 * 16 * NOTE! The sections for the DECLARE and for the DEFINE must match, lest 17 * linkage errors occur due the compiler generating the wrong code to access 18 * that section. 19 */ 20 #define __PCPU_ATTRS(sec) \ 21 __attribute__((section(PER_CPU_BASE_SECTION sec))) \ 22 PER_CPU_ATTRIBUTES 23 24 #define __PCPU_DUMMY_ATTRS \ 25 __attribute__((section(".discard"), unused)) 26 27 /* 28 * s390 and alpha modules require percpu variables to be defined as 29 * weak to force the compiler to generate GOT based external 30 * references for them. This is necessary because percpu sections 31 * will be located outside of the usually addressable area. 32 * 33 * This definition puts the following two extra restrictions when 34 * defining percpu variables. 35 * 36 * 1. The symbol must be globally unique, even the static ones. 37 * 2. Static percpu variables cannot be defined inside a function. 38 * 39 * Archs which need weak percpu definitions should define 40 * ARCH_NEEDS_WEAK_PER_CPU in asm/percpu.h when necessary. 41 * 42 * To ensure that the generic code observes the above two 43 * restrictions, if CONFIG_DEBUG_FORCE_WEAK_PER_CPU is set weak 44 * definition is used for all cases. 45 */ 46 #if defined(ARCH_NEEDS_WEAK_PER_CPU) || defined(CONFIG_DEBUG_FORCE_WEAK_PER_CPU) 47 /* 48 * __pcpu_scope_* dummy variable is used to enforce scope. It 49 * receives the static modifier when it's used in front of 50 * DEFINE_PER_CPU() and will trigger build failure if 51 * DECLARE_PER_CPU() is used for the same variable. 52 * 53 * __pcpu_unique_* dummy variable is used to enforce symbol uniqueness 54 * such that hidden weak symbol collision, which will cause unrelated 55 * variables to share the same address, can be detected during build. 56 */ 57 #define DECLARE_PER_CPU_SECTION(type, name, sec) \ 58 extern __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ 59 extern __PCPU_ATTRS(sec) __typeof__(type) per_cpu__##name 60 61 #define DEFINE_PER_CPU_SECTION(type, name, sec) \ 62 __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ 63 extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ 64 __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ 65 __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \ 66 __typeof__(type) per_cpu__##name 67 #else 68 /* 69 * Normal declaration and definition macros. 70 */ 71 #define DECLARE_PER_CPU_SECTION(type, name, sec) \ 72 extern __PCPU_ATTRS(sec) __typeof__(type) per_cpu__##name 73 74 #define DEFINE_PER_CPU_SECTION(type, name, sec) \ 75 __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES \ 76 __typeof__(type) per_cpu__##name 77 #endif 78 79 /* 80 * Variant on the per-CPU variable declaration/definition theme used for 81 * ordinary per-CPU variables. 82 */ 83 #define DECLARE_PER_CPU(type, name) \ 84 DECLARE_PER_CPU_SECTION(type, name, "") 85 86 #define DEFINE_PER_CPU(type, name) \ 87 DEFINE_PER_CPU_SECTION(type, name, "") 88 89 /* 90 * Declaration/definition used for per-CPU variables that must come first in 91 * the set of variables. 92 */ 93 #define DECLARE_PER_CPU_FIRST(type, name) \ 94 DECLARE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION) 95 96 #define DEFINE_PER_CPU_FIRST(type, name) \ 97 DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION) 98 99 /* 100 * Declaration/definition used for per-CPU variables that must be cacheline 101 * aligned under SMP conditions so that, whilst a particular instance of the 102 * data corresponds to a particular CPU, inefficiencies due to direct access by 103 * other CPUs are reduced by preventing the data from unnecessarily spanning 104 * cachelines. 105 * 106 * An example of this would be statistical data, where each CPU's set of data 107 * is updated by that CPU alone, but the data from across all CPUs is collated 108 * by a CPU processing a read from a proc file. 109 */ 110 #define DECLARE_PER_CPU_SHARED_ALIGNED(type, name) \ 111 DECLARE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \ 112 ____cacheline_aligned_in_smp 113 114 #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ 115 DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \ 116 ____cacheline_aligned_in_smp 117 118 #define DECLARE_PER_CPU_ALIGNED(type, name) \ 119 DECLARE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION) \ 120 ____cacheline_aligned 121 122 #define DEFINE_PER_CPU_ALIGNED(type, name) \ 123 DEFINE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION) \ 124 ____cacheline_aligned 125 126 /* 127 * Declaration/definition used for per-CPU variables that must be page aligned. 128 */ 129 #define DECLARE_PER_CPU_PAGE_ALIGNED(type, name) \ 130 DECLARE_PER_CPU_SECTION(type, name, ".page_aligned") \ 131 __aligned(PAGE_SIZE) 132 133 #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ 134 DEFINE_PER_CPU_SECTION(type, name, ".page_aligned") \ 135 __aligned(PAGE_SIZE) 136 137 /* 138 * Intermodule exports for per-CPU variables. 139 */ 140 #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) 141 #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) 142 143 144 #endif /* _LINUX_PERCPU_DEFS_H */ 145