1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __LINUX_COMPILER_TYPES_H 3 #define __LINUX_COMPILER_TYPES_H 4 5 /* 6 * __has_builtin is supported on gcc >= 10, clang >= 3 and icc >= 21. 7 * In the meantime, to support gcc < 10, we implement __has_builtin 8 * by hand. 9 */ 10 #ifndef __has_builtin 11 #define __has_builtin(x) (0) 12 #endif 13 14 #ifndef __ASSEMBLY__ 15 16 /* 17 * Skipped when running bindgen due to a libclang issue; 18 * see https://github.com/rust-lang/rust-bindgen/issues/2244. 19 */ 20 #if defined(CONFIG_DEBUG_INFO_BTF) && defined(CONFIG_PAHOLE_HAS_BTF_TAG) && \ 21 __has_attribute(btf_type_tag) && !defined(__BINDGEN__) 22 # define BTF_TYPE_TAG(value) __attribute__((btf_type_tag(#value))) 23 #else 24 # define BTF_TYPE_TAG(value) /* nothing */ 25 #endif 26 27 /* sparse defines __CHECKER__; see Documentation/dev-tools/sparse.rst */ 28 #ifdef __CHECKER__ 29 /* address spaces */ 30 # define __kernel __attribute__((address_space(0))) 31 # define __user __attribute__((noderef, address_space(__user))) 32 # define __iomem __attribute__((noderef, address_space(__iomem))) 33 # define __percpu __attribute__((noderef, address_space(__percpu))) 34 # define __rcu __attribute__((noderef, address_space(__rcu))) 35 static inline void __chk_user_ptr(const volatile void __user *ptr) { } 36 static inline void __chk_io_ptr(const volatile void __iomem *ptr) { } 37 /* context/locking */ 38 # define __must_hold(x) __attribute__((context(x,1,1))) 39 # define __acquires(x) __attribute__((context(x,0,1))) 40 # define __cond_acquires(x) __attribute__((context(x,0,-1))) 41 # define __releases(x) __attribute__((context(x,1,0))) 42 # define __acquire(x) __context__(x,1) 43 # define __release(x) __context__(x,-1) 44 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) 45 /* other */ 46 # define __force __attribute__((force)) 47 # define __nocast __attribute__((nocast)) 48 # define __safe __attribute__((safe)) 49 # define __private __attribute__((noderef)) 50 # define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member)) 51 #else /* __CHECKER__ */ 52 /* address spaces */ 53 # define __kernel 54 # ifdef STRUCTLEAK_PLUGIN 55 # define __user __attribute__((user)) 56 # else 57 # define __user BTF_TYPE_TAG(user) 58 # endif 59 # define __iomem 60 # define __percpu BTF_TYPE_TAG(percpu) 61 # define __rcu BTF_TYPE_TAG(rcu) 62 63 # define __chk_user_ptr(x) (void)0 64 # define __chk_io_ptr(x) (void)0 65 /* context/locking */ 66 # define __must_hold(x) 67 # define __acquires(x) 68 # define __cond_acquires(x) 69 # define __releases(x) 70 # define __acquire(x) (void)0 71 # define __release(x) (void)0 72 # define __cond_lock(x,c) (c) 73 /* other */ 74 # define __force 75 # define __nocast 76 # define __safe 77 # define __private 78 # define ACCESS_PRIVATE(p, member) ((p)->member) 79 # define __builtin_warning(x, y...) (1) 80 #endif /* __CHECKER__ */ 81 82 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */ 83 #define ___PASTE(a,b) a##b 84 #define __PASTE(a,b) ___PASTE(a,b) 85 86 #ifdef __KERNEL__ 87 88 /* Attributes */ 89 #include <linux/compiler_attributes.h> 90 91 #if CONFIG_FUNCTION_ALIGNMENT > 0 92 #define __function_aligned __aligned(CONFIG_FUNCTION_ALIGNMENT) 93 #else 94 #define __function_aligned 95 #endif 96 97 /* 98 * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-cold-function-attribute 99 * gcc: https://gcc.gnu.org/onlinedocs/gcc/Label-Attributes.html#index-cold-label-attribute 100 * 101 * When -falign-functions=N is in use, we must avoid the cold attribute as 102 * GCC drops the alignment for cold functions. Worse, GCC can implicitly mark 103 * callees of cold functions as cold themselves, so it's not sufficient to add 104 * __function_aligned here as that will not ensure that callees are correctly 105 * aligned. 106 * 107 * See: 108 * 109 * https://lore.kernel.org/lkml/Y77%2FqVgvaJidFpYt@FVFF77S0Q05N 110 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=88345#c9 111 */ 112 #if defined(CONFIG_CC_HAS_SANE_FUNCTION_ALIGNMENT) || (CONFIG_FUNCTION_ALIGNMENT == 0) 113 #define __cold __attribute__((__cold__)) 114 #else 115 #define __cold 116 #endif 117 118 /* 119 * On x86-64 and arm64 targets, __preserve_most changes the calling convention 120 * of a function to make the code in the caller as unintrusive as possible. This 121 * convention behaves identically to the C calling convention on how arguments 122 * and return values are passed, but uses a different set of caller- and callee- 123 * saved registers. 124 * 125 * The purpose is to alleviates the burden of saving and recovering a large 126 * register set before and after the call in the caller. This is beneficial for 127 * rarely taken slow paths, such as error-reporting functions that may be called 128 * from hot paths. 129 * 130 * Note: This may conflict with instrumentation inserted on function entry which 131 * does not use __preserve_most or equivalent convention (if in assembly). Since 132 * function tracing assumes the normal C calling convention, where the attribute 133 * is supported, __preserve_most implies notrace. It is recommended to restrict 134 * use of the attribute to functions that should or already disable tracing. 135 * 136 * Optional: not supported by gcc. 137 * 138 * clang: https://clang.llvm.org/docs/AttributeReference.html#preserve-most 139 */ 140 #if __has_attribute(__preserve_most__) && (defined(CONFIG_X86_64) || defined(CONFIG_ARM64)) 141 # define __preserve_most notrace __attribute__((__preserve_most__)) 142 #else 143 # define __preserve_most 144 #endif 145 146 /* Compiler specific macros. */ 147 #ifdef __clang__ 148 #include <linux/compiler-clang.h> 149 #elif defined(__GNUC__) 150 /* The above compilers also define __GNUC__, so order is important here. */ 151 #include <linux/compiler-gcc.h> 152 #else 153 #error "Unknown compiler" 154 #endif 155 156 /* 157 * Some architectures need to provide custom definitions of macros provided 158 * by linux/compiler-*.h, and can do so using asm/compiler.h. We include that 159 * conditionally rather than using an asm-generic wrapper in order to avoid 160 * build failures if any C compilation, which will include this file via an 161 * -include argument in c_flags, occurs prior to the asm-generic wrappers being 162 * generated. 163 */ 164 #ifdef CONFIG_HAVE_ARCH_COMPILER_H 165 #include <asm/compiler.h> 166 #endif 167 168 struct ftrace_branch_data { 169 const char *func; 170 const char *file; 171 unsigned line; 172 union { 173 struct { 174 unsigned long correct; 175 unsigned long incorrect; 176 }; 177 struct { 178 unsigned long miss; 179 unsigned long hit; 180 }; 181 unsigned long miss_hit[2]; 182 }; 183 }; 184 185 struct ftrace_likely_data { 186 struct ftrace_branch_data data; 187 unsigned long constant; 188 }; 189 190 #if defined(CC_USING_HOTPATCH) 191 #define notrace __attribute__((hotpatch(0, 0))) 192 #elif defined(CC_USING_PATCHABLE_FUNCTION_ENTRY) 193 #define notrace __attribute__((patchable_function_entry(0, 0))) 194 #else 195 #define notrace __attribute__((__no_instrument_function__)) 196 #endif 197 198 /* 199 * it doesn't make sense on ARM (currently the only user of __naked) 200 * to trace naked functions because then mcount is called without 201 * stack and frame pointer being set up and there is no chance to 202 * restore the lr register to the value before mcount was called. 203 */ 204 #define __naked __attribute__((__naked__)) notrace 205 206 /* 207 * Prefer gnu_inline, so that extern inline functions do not emit an 208 * externally visible function. This makes extern inline behave as per gnu89 209 * semantics rather than c99. This prevents multiple symbol definition errors 210 * of extern inline functions at link time. 211 * A lot of inline functions can cause havoc with function tracing. 212 */ 213 #define inline inline __gnu_inline __inline_maybe_unused notrace 214 215 /* 216 * gcc provides both __inline__ and __inline as alternate spellings of 217 * the inline keyword, though the latter is undocumented. New kernel 218 * code should only use the inline spelling, but some existing code 219 * uses __inline__. Since we #define inline above, to ensure 220 * __inline__ has the same semantics, we need this #define. 221 * 222 * However, the spelling __inline is strictly reserved for referring 223 * to the bare keyword. 224 */ 225 #define __inline__ inline 226 227 /* 228 * GCC does not warn about unused static inline functions for -Wunused-function. 229 * Suppress the warning in clang as well by using __maybe_unused, but enable it 230 * for W=1 build. This will allow clang to find unused functions. Remove the 231 * __inline_maybe_unused entirely after fixing most of -Wunused-function warnings. 232 */ 233 #ifdef KBUILD_EXTRA_WARN1 234 #define __inline_maybe_unused 235 #else 236 #define __inline_maybe_unused __maybe_unused 237 #endif 238 239 /* 240 * Rather then using noinline to prevent stack consumption, use 241 * noinline_for_stack instead. For documentation reasons. 242 */ 243 #define noinline_for_stack noinline 244 245 /* 246 * Sanitizer helper attributes: Because using __always_inline and 247 * __no_sanitize_* conflict, provide helper attributes that will either expand 248 * to __no_sanitize_* in compilation units where instrumentation is enabled 249 * (__SANITIZE_*__), or __always_inline in compilation units without 250 * instrumentation (__SANITIZE_*__ undefined). 251 */ 252 #ifdef __SANITIZE_ADDRESS__ 253 /* 254 * We can't declare function 'inline' because __no_sanitize_address conflicts 255 * with inlining. Attempt to inline it may cause a build failure. 256 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 257 * '__maybe_unused' allows us to avoid defined-but-not-used warnings. 258 */ 259 # define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused 260 # define __no_sanitize_or_inline __no_kasan_or_inline 261 #else 262 # define __no_kasan_or_inline __always_inline 263 #endif 264 265 #ifdef __SANITIZE_THREAD__ 266 /* 267 * Clang still emits instrumentation for __tsan_func_{entry,exit}() and builtin 268 * atomics even with __no_sanitize_thread (to avoid false positives in userspace 269 * ThreadSanitizer). The kernel's requirements are stricter and we really do not 270 * want any instrumentation with __no_kcsan. 271 * 272 * Therefore we add __disable_sanitizer_instrumentation where available to 273 * disable all instrumentation. See Kconfig.kcsan where this is mandatory. 274 */ 275 # define __no_kcsan __no_sanitize_thread __disable_sanitizer_instrumentation 276 # define __no_sanitize_or_inline __no_kcsan notrace __maybe_unused 277 #else 278 # define __no_kcsan 279 #endif 280 281 #ifdef __SANITIZE_MEMORY__ 282 /* 283 * Similarly to KASAN and KCSAN, KMSAN loses function attributes of inlined 284 * functions, therefore disabling KMSAN checks also requires disabling inlining. 285 * 286 * __no_sanitize_or_inline effectively prevents KMSAN from reporting errors 287 * within the function and marks all its outputs as initialized. 288 */ 289 # define __no_sanitize_or_inline __no_kmsan_checks notrace __maybe_unused 290 #endif 291 292 #ifndef __no_sanitize_or_inline 293 #define __no_sanitize_or_inline __always_inline 294 #endif 295 296 /* Do not trap wrapping arithmetic within an annotated function. */ 297 #ifdef CONFIG_UBSAN_SIGNED_WRAP 298 # define __signed_wrap __attribute__((no_sanitize("signed-integer-overflow"))) 299 #else 300 # define __signed_wrap 301 #endif 302 303 /* Section for code which can't be instrumented at all */ 304 #define __noinstr_section(section) \ 305 noinline notrace __attribute((__section__(section))) \ 306 __no_kcsan __no_sanitize_address __no_profile __no_sanitize_coverage \ 307 __no_sanitize_memory __signed_wrap 308 309 #define noinstr __noinstr_section(".noinstr.text") 310 311 /* 312 * The __cpuidle section is used twofold: 313 * 314 * 1) the original use -- identifying if a CPU is 'stuck' in idle state based 315 * on it's instruction pointer. See cpu_in_idle(). 316 * 317 * 2) supressing instrumentation around where cpuidle disables RCU; where the 318 * function isn't strictly required for #1, this is interchangeable with 319 * noinstr. 320 */ 321 #define __cpuidle __noinstr_section(".cpuidle.text") 322 323 #endif /* __KERNEL__ */ 324 325 #endif /* __ASSEMBLY__ */ 326 327 /* 328 * The below symbols may be defined for one or more, but not ALL, of the above 329 * compilers. We don't consider that to be an error, so set them to nothing. 330 * For example, some of them are for compiler specific plugins. 331 */ 332 #ifndef __latent_entropy 333 # define __latent_entropy 334 #endif 335 336 #if defined(RANDSTRUCT) && !defined(__CHECKER__) 337 # define __randomize_layout __designated_init __attribute__((randomize_layout)) 338 # define __no_randomize_layout __attribute__((no_randomize_layout)) 339 /* This anon struct can add padding, so only enable it under randstruct. */ 340 # define randomized_struct_fields_start struct { 341 # define randomized_struct_fields_end } __randomize_layout; 342 #else 343 # define __randomize_layout __designated_init 344 # define __no_randomize_layout 345 # define randomized_struct_fields_start 346 # define randomized_struct_fields_end 347 #endif 348 349 #ifndef __noscs 350 # define __noscs 351 #endif 352 353 #ifndef __nocfi 354 # define __nocfi 355 #endif 356 357 /* 358 * Any place that could be marked with the "alloc_size" attribute is also 359 * a place to be marked with the "malloc" attribute, except those that may 360 * be performing a _reallocation_, as that may alias the existing pointer. 361 * For these, use __realloc_size(). 362 */ 363 #ifdef __alloc_size__ 364 # define __alloc_size(x, ...) __alloc_size__(x, ## __VA_ARGS__) __malloc 365 # define __realloc_size(x, ...) __alloc_size__(x, ## __VA_ARGS__) 366 #else 367 # define __alloc_size(x, ...) __malloc 368 # define __realloc_size(x, ...) 369 #endif 370 371 /* 372 * When the size of an allocated object is needed, use the best available 373 * mechanism to find it. (For cases where sizeof() cannot be used.) 374 */ 375 #if __has_builtin(__builtin_dynamic_object_size) 376 #define __struct_size(p) __builtin_dynamic_object_size(p, 0) 377 #define __member_size(p) __builtin_dynamic_object_size(p, 1) 378 #else 379 #define __struct_size(p) __builtin_object_size(p, 0) 380 #define __member_size(p) __builtin_object_size(p, 1) 381 #endif 382 383 /* 384 * Some versions of gcc do not mark 'asm goto' volatile: 385 * 386 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=103979 387 * 388 * We do it here by hand, because it doesn't hurt. 389 */ 390 #ifndef asm_goto_output 391 #define asm_goto_output(x...) asm volatile goto(x) 392 #endif 393 394 #ifdef CONFIG_CC_HAS_ASM_INLINE 395 #define asm_inline asm __inline 396 #else 397 #define asm_inline asm 398 #endif 399 400 /* Are two types/vars the same type (ignoring qualifiers)? */ 401 #define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) 402 403 /* 404 * __unqual_scalar_typeof(x) - Declare an unqualified scalar type, leaving 405 * non-scalar types unchanged. 406 */ 407 /* 408 * Prefer C11 _Generic for better compile-times and simpler code. Note: 'char' 409 * is not type-compatible with 'signed char', and we define a separate case. 410 */ 411 #define __scalar_type_to_expr_cases(type) \ 412 unsigned type: (unsigned type)0, \ 413 signed type: (signed type)0 414 415 #define __unqual_scalar_typeof(x) typeof( \ 416 _Generic((x), \ 417 char: (char)0, \ 418 __scalar_type_to_expr_cases(char), \ 419 __scalar_type_to_expr_cases(short), \ 420 __scalar_type_to_expr_cases(int), \ 421 __scalar_type_to_expr_cases(long), \ 422 __scalar_type_to_expr_cases(long long), \ 423 default: (x))) 424 425 /* Is this type a native word size -- useful for atomic operations */ 426 #define __native_word(t) \ 427 (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || \ 428 sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) 429 430 #ifdef __OPTIMIZE__ 431 # define __compiletime_assert(condition, msg, prefix, suffix) \ 432 do { \ 433 /* \ 434 * __noreturn is needed to give the compiler enough \ 435 * information to avoid certain possibly-uninitialized \ 436 * warnings (regardless of the build failing). \ 437 */ \ 438 __noreturn extern void prefix ## suffix(void) \ 439 __compiletime_error(msg); \ 440 if (!(condition)) \ 441 prefix ## suffix(); \ 442 } while (0) 443 #else 444 # define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0) 445 #endif 446 447 #define _compiletime_assert(condition, msg, prefix, suffix) \ 448 __compiletime_assert(condition, msg, prefix, suffix) 449 450 /** 451 * compiletime_assert - break build and emit msg if condition is false 452 * @condition: a compile-time constant condition to check 453 * @msg: a message to emit if condition is false 454 * 455 * In tradition of POSIX assert, this macro will break the build if the 456 * supplied condition is *false*, emitting the supplied error message if the 457 * compiler has support to do so. 458 */ 459 #define compiletime_assert(condition, msg) \ 460 _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__) 461 462 #define compiletime_assert_atomic_type(t) \ 463 compiletime_assert(__native_word(t), \ 464 "Need native word sized stores/loads for atomicity.") 465 466 /* Helpers for emitting diagnostics in pragmas. */ 467 #ifndef __diag 468 #define __diag(string) 469 #endif 470 471 #ifndef __diag_GCC 472 #define __diag_GCC(version, severity, string) 473 #endif 474 475 #define __diag_push() __diag(push) 476 #define __diag_pop() __diag(pop) 477 478 #define __diag_ignore(compiler, version, option, comment) \ 479 __diag_ ## compiler(version, ignore, option) 480 #define __diag_warn(compiler, version, option, comment) \ 481 __diag_ ## compiler(version, warn, option) 482 #define __diag_error(compiler, version, option, comment) \ 483 __diag_ ## compiler(version, error, option) 484 485 #ifndef __diag_ignore_all 486 #define __diag_ignore_all(option, comment) 487 #endif 488 489 #endif /* __LINUX_COMPILER_TYPES_H */ 490