1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * linux/percpu-defs.h - basic definitions for percpu areas 4 * 5 * DO NOT INCLUDE DIRECTLY OUTSIDE PERCPU IMPLEMENTATION PROPER. 6 * 7 * This file is separate from linux/percpu.h to avoid cyclic inclusion 8 * dependency from arch header files. Only to be included from 9 * asm/percpu.h. 10 * 11 * This file includes macros necessary to declare percpu sections and 12 * variables, and definitions of percpu accessors and operations. It 13 * should provide enough percpu features to arch header files even when 14 * they can only include asm/percpu.h to avoid cyclic inclusion dependency. 15 */ 16 17 #ifndef _LINUX_PERCPU_DEFS_H 18 #define _LINUX_PERCPU_DEFS_H 19 20 #ifdef CONFIG_SMP 21 22 #ifdef MODULE 23 #define PER_CPU_SHARED_ALIGNED_SECTION "" 24 #define PER_CPU_ALIGNED_SECTION "" 25 #else 26 #define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned" 27 #define PER_CPU_ALIGNED_SECTION "..shared_aligned" 28 #endif 29 #define PER_CPU_FIRST_SECTION "..first" 30 31 #else 32 33 #define PER_CPU_SHARED_ALIGNED_SECTION "" 34 #define PER_CPU_ALIGNED_SECTION "..shared_aligned" 35 #define PER_CPU_FIRST_SECTION "" 36 37 #endif 38 39 /* 40 * Base implementations of per-CPU variable declarations and definitions, where 41 * the section in which the variable is to be placed is provided by the 42 * 'sec' argument. This may be used to affect the parameters governing the 43 * variable's storage. 44 * 45 * NOTE! The sections for the DECLARE and for the DEFINE must match, lest 46 * linkage errors occur due the compiler generating the wrong code to access 47 * that section. 48 */ 49 #define __PCPU_ATTRS(sec) \ 50 __percpu __attribute__((section(PER_CPU_BASE_SECTION sec))) \ 51 PER_CPU_ATTRIBUTES 52 53 #define __PCPU_DUMMY_ATTRS \ 54 __section(".discard") __attribute__((unused)) 55 56 /* 57 * s390 and alpha modules require percpu variables to be defined as 58 * weak to force the compiler to generate GOT based external 59 * references for them. This is necessary because percpu sections 60 * will be located outside of the usually addressable area. 61 * 62 * This definition puts the following two extra restrictions when 63 * defining percpu variables. 64 * 65 * 1. The symbol must be globally unique, even the static ones. 66 * 2. Static percpu variables cannot be defined inside a function. 67 * 68 * Archs which need weak percpu definitions should define 69 * ARCH_NEEDS_WEAK_PER_CPU in asm/percpu.h when necessary. 70 * 71 * To ensure that the generic code observes the above two 72 * restrictions, if CONFIG_DEBUG_FORCE_WEAK_PER_CPU is set weak 73 * definition is used for all cases. 74 */ 75 #if defined(ARCH_NEEDS_WEAK_PER_CPU) || defined(CONFIG_DEBUG_FORCE_WEAK_PER_CPU) 76 /* 77 * __pcpu_scope_* dummy variable is used to enforce scope. It 78 * receives the static modifier when it's used in front of 79 * DEFINE_PER_CPU() and will trigger build failure if 80 * DECLARE_PER_CPU() is used for the same variable. 81 * 82 * __pcpu_unique_* dummy variable is used to enforce symbol uniqueness 83 * such that hidden weak symbol collision, which will cause unrelated 84 * variables to share the same address, can be detected during build. 85 */ 86 #define DECLARE_PER_CPU_SECTION(type, name, sec) \ 87 extern __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ 88 extern __PCPU_ATTRS(sec) __typeof__(type) name 89 90 #define DEFINE_PER_CPU_SECTION(type, name, sec) \ 91 __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ 92 extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ 93 __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ 94 extern __PCPU_ATTRS(sec) __typeof__(type) name; \ 95 __PCPU_ATTRS(sec) __weak __typeof__(type) name 96 #else 97 /* 98 * Normal declaration and definition macros. 99 */ 100 #define DECLARE_PER_CPU_SECTION(type, name, sec) \ 101 extern __PCPU_ATTRS(sec) __typeof__(type) name 102 103 #define DEFINE_PER_CPU_SECTION(type, name, sec) \ 104 __PCPU_ATTRS(sec) __typeof__(type) name 105 #endif 106 107 /* 108 * Variant on the per-CPU variable declaration/definition theme used for 109 * ordinary per-CPU variables. 110 */ 111 #define DECLARE_PER_CPU(type, name) \ 112 DECLARE_PER_CPU_SECTION(type, name, "") 113 114 #define DEFINE_PER_CPU(type, name) \ 115 DEFINE_PER_CPU_SECTION(type, name, "") 116 117 /* 118 * Declaration/definition used for per-CPU variables that must come first in 119 * the set of variables. 120 */ 121 #define DECLARE_PER_CPU_FIRST(type, name) \ 122 DECLARE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION) 123 124 #define DEFINE_PER_CPU_FIRST(type, name) \ 125 DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION) 126 127 /* 128 * Declaration/definition used for per-CPU variables that must be cacheline 129 * aligned under SMP conditions so that, whilst a particular instance of the 130 * data corresponds to a particular CPU, inefficiencies due to direct access by 131 * other CPUs are reduced by preventing the data from unnecessarily spanning 132 * cachelines. 133 * 134 * An example of this would be statistical data, where each CPU's set of data 135 * is updated by that CPU alone, but the data from across all CPUs is collated 136 * by a CPU processing a read from a proc file. 137 */ 138 #define DECLARE_PER_CPU_SHARED_ALIGNED(type, name) \ 139 DECLARE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \ 140 ____cacheline_aligned_in_smp 141 142 #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ 143 DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \ 144 ____cacheline_aligned_in_smp 145 146 #define DECLARE_PER_CPU_ALIGNED(type, name) \ 147 DECLARE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION) \ 148 ____cacheline_aligned 149 150 #define DEFINE_PER_CPU_ALIGNED(type, name) \ 151 DEFINE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION) \ 152 ____cacheline_aligned 153 154 /* 155 * Declaration/definition used for per-CPU variables that must be page aligned. 156 */ 157 #define DECLARE_PER_CPU_PAGE_ALIGNED(type, name) \ 158 DECLARE_PER_CPU_SECTION(type, name, "..page_aligned") \ 159 __aligned(PAGE_SIZE) 160 161 #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ 162 DEFINE_PER_CPU_SECTION(type, name, "..page_aligned") \ 163 __aligned(PAGE_SIZE) 164 165 /* 166 * Declaration/definition used for per-CPU variables that must be read mostly. 167 */ 168 #define DECLARE_PER_CPU_READ_MOSTLY(type, name) \ 169 DECLARE_PER_CPU_SECTION(type, name, "..read_mostly") 170 171 #define DEFINE_PER_CPU_READ_MOSTLY(type, name) \ 172 DEFINE_PER_CPU_SECTION(type, name, "..read_mostly") 173 174 /* 175 * Declaration/definition used for per-CPU variables that should be accessed 176 * as decrypted when memory encryption is enabled in the guest. 177 */ 178 #ifdef CONFIG_AMD_MEM_ENCRYPT 179 #define DECLARE_PER_CPU_DECRYPTED(type, name) \ 180 DECLARE_PER_CPU_SECTION(type, name, "..decrypted") 181 182 #define DEFINE_PER_CPU_DECRYPTED(type, name) \ 183 DEFINE_PER_CPU_SECTION(type, name, "..decrypted") 184 #else 185 #define DEFINE_PER_CPU_DECRYPTED(type, name) DEFINE_PER_CPU(type, name) 186 #endif 187 188 /* 189 * Intermodule exports for per-CPU variables. sparse forgets about 190 * address space across EXPORT_SYMBOL(), change EXPORT_SYMBOL() to 191 * noop if __CHECKER__. 192 */ 193 #ifndef __CHECKER__ 194 #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(var) 195 #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(var) 196 #else 197 #define EXPORT_PER_CPU_SYMBOL(var) 198 #define EXPORT_PER_CPU_SYMBOL_GPL(var) 199 #endif 200 201 /* 202 * Accessors and operations. 203 */ 204 #ifndef __ASSEMBLY__ 205 206 /* 207 * __verify_pcpu_ptr() verifies @ptr is a percpu pointer without evaluating 208 * @ptr and is invoked once before a percpu area is accessed by all 209 * accessors and operations. This is performed in the generic part of 210 * percpu and arch overrides don't need to worry about it; however, if an 211 * arch wants to implement an arch-specific percpu accessor or operation, 212 * it may use __verify_pcpu_ptr() to verify the parameters. 213 * 214 * + 0 is required in order to convert the pointer type from a 215 * potential array type to a pointer to a single item of the array. 216 */ 217 #define __verify_pcpu_ptr(ptr) \ 218 do { \ 219 const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \ 220 (void)__vpp_verify; \ 221 } while (0) 222 223 #define PERCPU_PTR(__p) \ 224 ({ \ 225 unsigned long __pcpu_ptr = (__force unsigned long)(__p); \ 226 (typeof(*(__p)) __force __kernel *)(__pcpu_ptr); \ 227 }) 228 229 #ifdef CONFIG_SMP 230 231 /* 232 * Add an offset to a pointer. Use RELOC_HIDE() to prevent the compiler 233 * from making incorrect assumptions about the pointer value. 234 */ 235 #define SHIFT_PERCPU_PTR(__p, __offset) \ 236 RELOC_HIDE(PERCPU_PTR(__p), (__offset)) 237 238 #define per_cpu_ptr(ptr, cpu) \ 239 ({ \ 240 __verify_pcpu_ptr(ptr); \ 241 SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))); \ 242 }) 243 244 #define raw_cpu_ptr(ptr) \ 245 ({ \ 246 __verify_pcpu_ptr(ptr); \ 247 arch_raw_cpu_ptr(ptr); \ 248 }) 249 250 #ifdef CONFIG_DEBUG_PREEMPT 251 #define this_cpu_ptr(ptr) \ 252 ({ \ 253 __verify_pcpu_ptr(ptr); \ 254 SHIFT_PERCPU_PTR(ptr, my_cpu_offset); \ 255 }) 256 #else 257 #define this_cpu_ptr(ptr) raw_cpu_ptr(ptr) 258 #endif 259 260 #else /* CONFIG_SMP */ 261 262 #define per_cpu_ptr(ptr, cpu) \ 263 ({ \ 264 (void)(cpu); \ 265 __verify_pcpu_ptr(ptr); \ 266 PERCPU_PTR(ptr); \ 267 }) 268 269 #define raw_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) 270 #define this_cpu_ptr(ptr) raw_cpu_ptr(ptr) 271 272 #endif /* CONFIG_SMP */ 273 274 #define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu)) 275 276 /* 277 * Must be an lvalue. Since @var must be a simple identifier, 278 * we force a syntax error here if it isn't. 279 */ 280 #define get_cpu_var(var) \ 281 (*({ \ 282 preempt_disable(); \ 283 this_cpu_ptr(&var); \ 284 })) 285 286 /* 287 * The weird & is necessary because sparse considers (void)(var) to be 288 * a direct dereference of percpu variable (var). 289 */ 290 #define put_cpu_var(var) \ 291 do { \ 292 (void)&(var); \ 293 preempt_enable(); \ 294 } while (0) 295 296 #define get_cpu_ptr(var) \ 297 ({ \ 298 preempt_disable(); \ 299 this_cpu_ptr(var); \ 300 }) 301 302 #define put_cpu_ptr(var) \ 303 do { \ 304 (void)(var); \ 305 preempt_enable(); \ 306 } while (0) 307 308 /* 309 * Branching function to split up a function into a set of functions that 310 * are called for different scalar sizes of the objects handled. 311 */ 312 313 extern void __bad_size_call_parameter(void); 314 315 #ifdef CONFIG_DEBUG_PREEMPT 316 extern void __this_cpu_preempt_check(const char *op); 317 #else 318 static __always_inline void __this_cpu_preempt_check(const char *op) { } 319 #endif 320 321 #define __pcpu_size_call_return(stem, variable) \ 322 ({ \ 323 typeof(variable) pscr_ret__; \ 324 __verify_pcpu_ptr(&(variable)); \ 325 switch(sizeof(variable)) { \ 326 case 1: pscr_ret__ = stem##1(variable); break; \ 327 case 2: pscr_ret__ = stem##2(variable); break; \ 328 case 4: pscr_ret__ = stem##4(variable); break; \ 329 case 8: pscr_ret__ = stem##8(variable); break; \ 330 default: \ 331 __bad_size_call_parameter(); break; \ 332 } \ 333 pscr_ret__; \ 334 }) 335 336 #define __pcpu_size_call_return2(stem, variable, ...) \ 337 ({ \ 338 typeof(variable) pscr2_ret__; \ 339 __verify_pcpu_ptr(&(variable)); \ 340 switch(sizeof(variable)) { \ 341 case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \ 342 case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \ 343 case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \ 344 case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \ 345 default: \ 346 __bad_size_call_parameter(); break; \ 347 } \ 348 pscr2_ret__; \ 349 }) 350 351 #define __pcpu_size_call_return2bool(stem, variable, ...) \ 352 ({ \ 353 bool pscr2_ret__; \ 354 __verify_pcpu_ptr(&(variable)); \ 355 switch(sizeof(variable)) { \ 356 case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \ 357 case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \ 358 case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \ 359 case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \ 360 default: \ 361 __bad_size_call_parameter(); break; \ 362 } \ 363 pscr2_ret__; \ 364 }) 365 366 #define __pcpu_size_call(stem, variable, ...) \ 367 do { \ 368 __verify_pcpu_ptr(&(variable)); \ 369 switch(sizeof(variable)) { \ 370 case 1: stem##1(variable, __VA_ARGS__);break; \ 371 case 2: stem##2(variable, __VA_ARGS__);break; \ 372 case 4: stem##4(variable, __VA_ARGS__);break; \ 373 case 8: stem##8(variable, __VA_ARGS__);break; \ 374 default: \ 375 __bad_size_call_parameter();break; \ 376 } \ 377 } while (0) 378 379 /* 380 * this_cpu operations (C) 2008-2013 Christoph Lameter <[email protected]> 381 * 382 * Optimized manipulation for memory allocated through the per cpu 383 * allocator or for addresses of per cpu variables. 384 * 385 * These operation guarantee exclusivity of access for other operations 386 * on the *same* processor. The assumption is that per cpu data is only 387 * accessed by a single processor instance (the current one). 388 * 389 * The arch code can provide optimized implementation by defining macros 390 * for certain scalar sizes. F.e. provide this_cpu_add_2() to provide per 391 * cpu atomic operations for 2 byte sized RMW actions. If arch code does 392 * not provide operations for a scalar size then the fallback in the 393 * generic code will be used. 394 * 395 * cmpxchg_double replaces two adjacent scalars at once. The first two 396 * parameters are per cpu variables which have to be of the same size. A 397 * truth value is returned to indicate success or failure (since a double 398 * register result is difficult to handle). There is very limited hardware 399 * support for these operations, so only certain sizes may work. 400 */ 401 402 /* 403 * Operations for contexts where we do not want to do any checks for 404 * preemptions. Unless strictly necessary, always use [__]this_cpu_*() 405 * instead. 406 * 407 * If there is no other protection through preempt disable and/or disabling 408 * interrupts then one of these RMW operations can show unexpected behavior 409 * because the execution thread was rescheduled on another processor or an 410 * interrupt occurred and the same percpu variable was modified from the 411 * interrupt context. 412 */ 413 #define raw_cpu_read(pcp) __pcpu_size_call_return(raw_cpu_read_, pcp) 414 #define raw_cpu_write(pcp, val) __pcpu_size_call(raw_cpu_write_, pcp, val) 415 #define raw_cpu_add(pcp, val) __pcpu_size_call(raw_cpu_add_, pcp, val) 416 #define raw_cpu_and(pcp, val) __pcpu_size_call(raw_cpu_and_, pcp, val) 417 #define raw_cpu_or(pcp, val) __pcpu_size_call(raw_cpu_or_, pcp, val) 418 #define raw_cpu_add_return(pcp, val) __pcpu_size_call_return2(raw_cpu_add_return_, pcp, val) 419 #define raw_cpu_xchg(pcp, nval) __pcpu_size_call_return2(raw_cpu_xchg_, pcp, nval) 420 #define raw_cpu_cmpxchg(pcp, oval, nval) \ 421 __pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval) 422 #define raw_cpu_try_cmpxchg(pcp, ovalp, nval) \ 423 __pcpu_size_call_return2bool(raw_cpu_try_cmpxchg_, pcp, ovalp, nval) 424 #define raw_cpu_sub(pcp, val) raw_cpu_add(pcp, -(val)) 425 #define raw_cpu_inc(pcp) raw_cpu_add(pcp, 1) 426 #define raw_cpu_dec(pcp) raw_cpu_sub(pcp, 1) 427 #define raw_cpu_sub_return(pcp, val) raw_cpu_add_return(pcp, -(typeof(pcp))(val)) 428 #define raw_cpu_inc_return(pcp) raw_cpu_add_return(pcp, 1) 429 #define raw_cpu_dec_return(pcp) raw_cpu_add_return(pcp, -1) 430 431 /* 432 * Operations for contexts that are safe from preemption/interrupts. These 433 * operations verify that preemption is disabled. 434 */ 435 #define __this_cpu_read(pcp) \ 436 ({ \ 437 __this_cpu_preempt_check("read"); \ 438 raw_cpu_read(pcp); \ 439 }) 440 441 #define __this_cpu_write(pcp, val) \ 442 ({ \ 443 __this_cpu_preempt_check("write"); \ 444 raw_cpu_write(pcp, val); \ 445 }) 446 447 #define __this_cpu_add(pcp, val) \ 448 ({ \ 449 __this_cpu_preempt_check("add"); \ 450 raw_cpu_add(pcp, val); \ 451 }) 452 453 #define __this_cpu_and(pcp, val) \ 454 ({ \ 455 __this_cpu_preempt_check("and"); \ 456 raw_cpu_and(pcp, val); \ 457 }) 458 459 #define __this_cpu_or(pcp, val) \ 460 ({ \ 461 __this_cpu_preempt_check("or"); \ 462 raw_cpu_or(pcp, val); \ 463 }) 464 465 #define __this_cpu_add_return(pcp, val) \ 466 ({ \ 467 __this_cpu_preempt_check("add_return"); \ 468 raw_cpu_add_return(pcp, val); \ 469 }) 470 471 #define __this_cpu_xchg(pcp, nval) \ 472 ({ \ 473 __this_cpu_preempt_check("xchg"); \ 474 raw_cpu_xchg(pcp, nval); \ 475 }) 476 477 #define __this_cpu_cmpxchg(pcp, oval, nval) \ 478 ({ \ 479 __this_cpu_preempt_check("cmpxchg"); \ 480 raw_cpu_cmpxchg(pcp, oval, nval); \ 481 }) 482 483 #define __this_cpu_try_cmpxchg(pcp, ovalp, nval) \ 484 ({ \ 485 __this_cpu_preempt_check("try_cmpxchg"); \ 486 raw_cpu_try_cmpxchg(pcp, ovalp, nval); \ 487 }) 488 489 #define __this_cpu_sub(pcp, val) __this_cpu_add(pcp, -(typeof(pcp))(val)) 490 #define __this_cpu_inc(pcp) __this_cpu_add(pcp, 1) 491 #define __this_cpu_dec(pcp) __this_cpu_sub(pcp, 1) 492 #define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val)) 493 #define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1) 494 #define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1) 495 496 /* 497 * Operations with implied preemption/interrupt protection. These 498 * operations can be used without worrying about preemption or interrupt. 499 */ 500 #define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, pcp) 501 #define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, pcp, val) 502 #define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, pcp, val) 503 #define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, pcp, val) 504 #define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, pcp, val) 505 #define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val) 506 #define this_cpu_xchg(pcp, nval) __pcpu_size_call_return2(this_cpu_xchg_, pcp, nval) 507 #define this_cpu_cmpxchg(pcp, oval, nval) \ 508 __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval) 509 #define this_cpu_try_cmpxchg(pcp, ovalp, nval) \ 510 __pcpu_size_call_return2bool(this_cpu_try_cmpxchg_, pcp, ovalp, nval) 511 #define this_cpu_sub(pcp, val) this_cpu_add(pcp, -(typeof(pcp))(val)) 512 #define this_cpu_inc(pcp) this_cpu_add(pcp, 1) 513 #define this_cpu_dec(pcp) this_cpu_sub(pcp, 1) 514 #define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(typeof(pcp))(val)) 515 #define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) 516 #define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) 517 518 #endif /* __ASSEMBLY__ */ 519 #endif /* _LINUX_PERCPU_DEFS_H */ 520