1 #ifndef _TOOLS_LINUX_COMPILER_H_ 2 #define _TOOLS_LINUX_COMPILER_H_ 3 4 /* Optimization barrier */ 5 /* The "volatile" is due to gcc bugs */ 6 #define barrier() __asm__ __volatile__("": : :"memory") 7 8 #ifndef __always_inline 9 # define __always_inline inline __attribute__((always_inline)) 10 #endif 11 12 #define __user 13 14 #ifndef __attribute_const__ 15 # define __attribute_const__ 16 #endif 17 18 #ifndef __maybe_unused 19 # define __maybe_unused __attribute__((unused)) 20 #endif 21 22 #ifndef __packed 23 # define __packed __attribute__((__packed__)) 24 #endif 25 26 #ifndef __force 27 # define __force 28 #endif 29 30 #ifndef __weak 31 # define __weak __attribute__((weak)) 32 #endif 33 34 #ifndef likely 35 # define likely(x) __builtin_expect(!!(x), 1) 36 #endif 37 38 #ifndef unlikely 39 # define unlikely(x) __builtin_expect(!!(x), 0) 40 #endif 41 42 #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) 43 44 #include <linux/types.h> 45 46 static __always_inline void __read_once_size(const volatile void *p, void *res, int size) 47 { 48 switch (size) { 49 case 1: *(__u8 *)res = *(volatile __u8 *)p; break; 50 case 2: *(__u16 *)res = *(volatile __u16 *)p; break; 51 case 4: *(__u32 *)res = *(volatile __u32 *)p; break; 52 case 8: *(__u64 *)res = *(volatile __u64 *)p; break; 53 default: 54 barrier(); 55 __builtin_memcpy((void *)res, (const void *)p, size); 56 barrier(); 57 } 58 } 59 60 static __always_inline void __write_once_size(volatile void *p, void *res, int size) 61 { 62 switch (size) { 63 case 1: *(volatile __u8 *)p = *(__u8 *)res; break; 64 case 2: *(volatile __u16 *)p = *(__u16 *)res; break; 65 case 4: *(volatile __u32 *)p = *(__u32 *)res; break; 66 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 67 default: 68 barrier(); 69 __builtin_memcpy((void *)p, (const void *)res, size); 70 barrier(); 71 } 72 } 73 74 /* 75 * Prevent the compiler from merging or refetching reads or writes. The 76 * compiler is also forbidden from reordering successive instances of 77 * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the 78 * compiler is aware of some particular ordering. One way to make the 79 * compiler aware of ordering is to put the two invocations of READ_ONCE, 80 * WRITE_ONCE or ACCESS_ONCE() in different C statements. 81 * 82 * In contrast to ACCESS_ONCE these two macros will also work on aggregate 83 * data types like structs or unions. If the size of the accessed data 84 * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) 85 * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a 86 * compile-time warning. 87 * 88 * Their two major use cases are: (1) Mediating communication between 89 * process-level code and irq/NMI handlers, all running on the same CPU, 90 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise 91 * mutilate accesses that either do not require ordering or that interact 92 * with an explicit memory barrier or atomic instruction that provides the 93 * required ordering. 94 */ 95 96 #define READ_ONCE(x) \ 97 ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) 98 99 #define WRITE_ONCE(x, val) \ 100 ({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) 101 102 #endif /* _TOOLS_LINUX_COMPILER_H */ 103