1e3d18ceeSMark Rutland // SPDX-License-Identifier: GPL-2.0
2e3d18ceeSMark Rutland 
3e3d18ceeSMark Rutland // Generated by scripts/atomic/gen-atomic-fallback.sh
4e3d18ceeSMark Rutland // DO NOT MODIFY THIS FILE DIRECTLY
5e3d18ceeSMark Rutland 
6e3d18ceeSMark Rutland #ifndef _LINUX_ATOMIC_FALLBACK_H
7e3d18ceeSMark Rutland #define _LINUX_ATOMIC_FALLBACK_H
8e3d18ceeSMark Rutland 
9e3d18ceeSMark Rutland #include <linux/compiler.h>
10e3d18ceeSMark Rutland 
119257959aSMark Rutland #if defined(arch_xchg)
129257959aSMark Rutland #define raw_xchg arch_xchg
139257959aSMark Rutland #elif defined(arch_xchg_relaxed)
149257959aSMark Rutland #define raw_xchg(...) \
15e3d18ceeSMark Rutland 	__atomic_op_fence(arch_xchg, __VA_ARGS__)
169257959aSMark Rutland #else
179257959aSMark Rutland extern void raw_xchg_not_implemented(void);
189257959aSMark Rutland #define raw_xchg(...) raw_xchg_not_implemented()
19e3d18ceeSMark Rutland #endif
20e3d18ceeSMark Rutland 
219257959aSMark Rutland #if defined(arch_xchg_acquire)
229257959aSMark Rutland #define raw_xchg_acquire arch_xchg_acquire
239257959aSMark Rutland #elif defined(arch_xchg_relaxed)
249257959aSMark Rutland #define raw_xchg_acquire(...) \
259257959aSMark Rutland 	__atomic_op_acquire(arch_xchg, __VA_ARGS__)
269257959aSMark Rutland #elif defined(arch_xchg)
279257959aSMark Rutland #define raw_xchg_acquire arch_xchg
289257959aSMark Rutland #else
299257959aSMark Rutland extern void raw_xchg_acquire_not_implemented(void);
309257959aSMark Rutland #define raw_xchg_acquire(...) raw_xchg_acquire_not_implemented()
31e3d18ceeSMark Rutland #endif
32e3d18ceeSMark Rutland 
339257959aSMark Rutland #if defined(arch_xchg_release)
349257959aSMark Rutland #define raw_xchg_release arch_xchg_release
359257959aSMark Rutland #elif defined(arch_xchg_relaxed)
369257959aSMark Rutland #define raw_xchg_release(...) \
379257959aSMark Rutland 	__atomic_op_release(arch_xchg, __VA_ARGS__)
389257959aSMark Rutland #elif defined(arch_xchg)
399257959aSMark Rutland #define raw_xchg_release arch_xchg
409257959aSMark Rutland #else
419257959aSMark Rutland extern void raw_xchg_release_not_implemented(void);
429257959aSMark Rutland #define raw_xchg_release(...) raw_xchg_release_not_implemented()
43e3d18ceeSMark Rutland #endif
44e3d18ceeSMark Rutland 
459257959aSMark Rutland #if defined(arch_xchg_relaxed)
469257959aSMark Rutland #define raw_xchg_relaxed arch_xchg_relaxed
479257959aSMark Rutland #elif defined(arch_xchg)
489257959aSMark Rutland #define raw_xchg_relaxed arch_xchg
499257959aSMark Rutland #else
509257959aSMark Rutland extern void raw_xchg_relaxed_not_implemented(void);
519257959aSMark Rutland #define raw_xchg_relaxed(...) raw_xchg_relaxed_not_implemented()
529257959aSMark Rutland #endif
539257959aSMark Rutland 
549257959aSMark Rutland #if defined(arch_cmpxchg)
559257959aSMark Rutland #define raw_cmpxchg arch_cmpxchg
569257959aSMark Rutland #elif defined(arch_cmpxchg_relaxed)
579257959aSMark Rutland #define raw_cmpxchg(...) \
58e3d18ceeSMark Rutland 	__atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
599257959aSMark Rutland #else
609257959aSMark Rutland extern void raw_cmpxchg_not_implemented(void);
619257959aSMark Rutland #define raw_cmpxchg(...) raw_cmpxchg_not_implemented()
62e3d18ceeSMark Rutland #endif
63e3d18ceeSMark Rutland 
649257959aSMark Rutland #if defined(arch_cmpxchg_acquire)
659257959aSMark Rutland #define raw_cmpxchg_acquire arch_cmpxchg_acquire
669257959aSMark Rutland #elif defined(arch_cmpxchg_relaxed)
679257959aSMark Rutland #define raw_cmpxchg_acquire(...) \
689257959aSMark Rutland 	__atomic_op_acquire(arch_cmpxchg, __VA_ARGS__)
699257959aSMark Rutland #elif defined(arch_cmpxchg)
709257959aSMark Rutland #define raw_cmpxchg_acquire arch_cmpxchg
719257959aSMark Rutland #else
729257959aSMark Rutland extern void raw_cmpxchg_acquire_not_implemented(void);
739257959aSMark Rutland #define raw_cmpxchg_acquire(...) raw_cmpxchg_acquire_not_implemented()
74e3d18ceeSMark Rutland #endif
75e3d18ceeSMark Rutland 
769257959aSMark Rutland #if defined(arch_cmpxchg_release)
779257959aSMark Rutland #define raw_cmpxchg_release arch_cmpxchg_release
789257959aSMark Rutland #elif defined(arch_cmpxchg_relaxed)
799257959aSMark Rutland #define raw_cmpxchg_release(...) \
809257959aSMark Rutland 	__atomic_op_release(arch_cmpxchg, __VA_ARGS__)
819257959aSMark Rutland #elif defined(arch_cmpxchg)
829257959aSMark Rutland #define raw_cmpxchg_release arch_cmpxchg
839257959aSMark Rutland #else
849257959aSMark Rutland extern void raw_cmpxchg_release_not_implemented(void);
859257959aSMark Rutland #define raw_cmpxchg_release(...) raw_cmpxchg_release_not_implemented()
86e3d18ceeSMark Rutland #endif
87e3d18ceeSMark Rutland 
889257959aSMark Rutland #if defined(arch_cmpxchg_relaxed)
899257959aSMark Rutland #define raw_cmpxchg_relaxed arch_cmpxchg_relaxed
909257959aSMark Rutland #elif defined(arch_cmpxchg)
919257959aSMark Rutland #define raw_cmpxchg_relaxed arch_cmpxchg
929257959aSMark Rutland #else
939257959aSMark Rutland extern void raw_cmpxchg_relaxed_not_implemented(void);
949257959aSMark Rutland #define raw_cmpxchg_relaxed(...) raw_cmpxchg_relaxed_not_implemented()
959257959aSMark Rutland #endif
969257959aSMark Rutland 
979257959aSMark Rutland #if defined(arch_cmpxchg64)
989257959aSMark Rutland #define raw_cmpxchg64 arch_cmpxchg64
999257959aSMark Rutland #elif defined(arch_cmpxchg64_relaxed)
1009257959aSMark Rutland #define raw_cmpxchg64(...) \
101e3d18ceeSMark Rutland 	__atomic_op_fence(arch_cmpxchg64, __VA_ARGS__)
1029257959aSMark Rutland #else
1039257959aSMark Rutland extern void raw_cmpxchg64_not_implemented(void);
1049257959aSMark Rutland #define raw_cmpxchg64(...) raw_cmpxchg64_not_implemented()
105e3d18ceeSMark Rutland #endif
106e3d18ceeSMark Rutland 
1079257959aSMark Rutland #if defined(arch_cmpxchg64_acquire)
1089257959aSMark Rutland #define raw_cmpxchg64_acquire arch_cmpxchg64_acquire
1099257959aSMark Rutland #elif defined(arch_cmpxchg64_relaxed)
1109257959aSMark Rutland #define raw_cmpxchg64_acquire(...) \
1119257959aSMark Rutland 	__atomic_op_acquire(arch_cmpxchg64, __VA_ARGS__)
1129257959aSMark Rutland #elif defined(arch_cmpxchg64)
1139257959aSMark Rutland #define raw_cmpxchg64_acquire arch_cmpxchg64
1149257959aSMark Rutland #else
1159257959aSMark Rutland extern void raw_cmpxchg64_acquire_not_implemented(void);
1169257959aSMark Rutland #define raw_cmpxchg64_acquire(...) raw_cmpxchg64_acquire_not_implemented()
1178c8b096aSPeter Zijlstra #endif
1188c8b096aSPeter Zijlstra 
1199257959aSMark Rutland #if defined(arch_cmpxchg64_release)
1209257959aSMark Rutland #define raw_cmpxchg64_release arch_cmpxchg64_release
1219257959aSMark Rutland #elif defined(arch_cmpxchg64_relaxed)
1229257959aSMark Rutland #define raw_cmpxchg64_release(...) \
1239257959aSMark Rutland 	__atomic_op_release(arch_cmpxchg64, __VA_ARGS__)
1249257959aSMark Rutland #elif defined(arch_cmpxchg64)
1259257959aSMark Rutland #define raw_cmpxchg64_release arch_cmpxchg64
1269257959aSMark Rutland #else
1279257959aSMark Rutland extern void raw_cmpxchg64_release_not_implemented(void);
1289257959aSMark Rutland #define raw_cmpxchg64_release(...) raw_cmpxchg64_release_not_implemented()
1298c8b096aSPeter Zijlstra #endif
1308c8b096aSPeter Zijlstra 
1319257959aSMark Rutland #if defined(arch_cmpxchg64_relaxed)
1329257959aSMark Rutland #define raw_cmpxchg64_relaxed arch_cmpxchg64_relaxed
1339257959aSMark Rutland #elif defined(arch_cmpxchg64)
1349257959aSMark Rutland #define raw_cmpxchg64_relaxed arch_cmpxchg64
1359257959aSMark Rutland #else
1369257959aSMark Rutland extern void raw_cmpxchg64_relaxed_not_implemented(void);
1379257959aSMark Rutland #define raw_cmpxchg64_relaxed(...) raw_cmpxchg64_relaxed_not_implemented()
1389257959aSMark Rutland #endif
1399257959aSMark Rutland 
1409257959aSMark Rutland #if defined(arch_cmpxchg128)
1419257959aSMark Rutland #define raw_cmpxchg128 arch_cmpxchg128
1429257959aSMark Rutland #elif defined(arch_cmpxchg128_relaxed)
1439257959aSMark Rutland #define raw_cmpxchg128(...) \
1448c8b096aSPeter Zijlstra 	__atomic_op_fence(arch_cmpxchg128, __VA_ARGS__)
1459257959aSMark Rutland #else
1469257959aSMark Rutland extern void raw_cmpxchg128_not_implemented(void);
1479257959aSMark Rutland #define raw_cmpxchg128(...) raw_cmpxchg128_not_implemented()
1488c8b096aSPeter Zijlstra #endif
1498c8b096aSPeter Zijlstra 
1509257959aSMark Rutland #if defined(arch_cmpxchg128_acquire)
1519257959aSMark Rutland #define raw_cmpxchg128_acquire arch_cmpxchg128_acquire
1529257959aSMark Rutland #elif defined(arch_cmpxchg128_relaxed)
1539257959aSMark Rutland #define raw_cmpxchg128_acquire(...) \
1549257959aSMark Rutland 	__atomic_op_acquire(arch_cmpxchg128, __VA_ARGS__)
1559257959aSMark Rutland #elif defined(arch_cmpxchg128)
1569257959aSMark Rutland #define raw_cmpxchg128_acquire arch_cmpxchg128
1579257959aSMark Rutland #else
1589257959aSMark Rutland extern void raw_cmpxchg128_acquire_not_implemented(void);
1599257959aSMark Rutland #define raw_cmpxchg128_acquire(...) raw_cmpxchg128_acquire_not_implemented()
160e3d18ceeSMark Rutland #endif
161e3d18ceeSMark Rutland 
1629257959aSMark Rutland #if defined(arch_cmpxchg128_release)
1639257959aSMark Rutland #define raw_cmpxchg128_release arch_cmpxchg128_release
1649257959aSMark Rutland #elif defined(arch_cmpxchg128_relaxed)
1659257959aSMark Rutland #define raw_cmpxchg128_release(...) \
1669257959aSMark Rutland 	__atomic_op_release(arch_cmpxchg128, __VA_ARGS__)
1679257959aSMark Rutland #elif defined(arch_cmpxchg128)
1689257959aSMark Rutland #define raw_cmpxchg128_release arch_cmpxchg128
1699257959aSMark Rutland #else
1709257959aSMark Rutland extern void raw_cmpxchg128_release_not_implemented(void);
1719257959aSMark Rutland #define raw_cmpxchg128_release(...) raw_cmpxchg128_release_not_implemented()
172e3d18ceeSMark Rutland #endif
173e3d18ceeSMark Rutland 
1749257959aSMark Rutland #if defined(arch_cmpxchg128_relaxed)
1759257959aSMark Rutland #define raw_cmpxchg128_relaxed arch_cmpxchg128_relaxed
1769257959aSMark Rutland #elif defined(arch_cmpxchg128)
1779257959aSMark Rutland #define raw_cmpxchg128_relaxed arch_cmpxchg128
1789257959aSMark Rutland #else
1799257959aSMark Rutland extern void raw_cmpxchg128_relaxed_not_implemented(void);
1809257959aSMark Rutland #define raw_cmpxchg128_relaxed(...) raw_cmpxchg128_relaxed_not_implemented()
1819257959aSMark Rutland #endif
1829257959aSMark Rutland 
1839257959aSMark Rutland #if defined(arch_try_cmpxchg)
1849257959aSMark Rutland #define raw_try_cmpxchg arch_try_cmpxchg
1859257959aSMark Rutland #elif defined(arch_try_cmpxchg_relaxed)
1869257959aSMark Rutland #define raw_try_cmpxchg(...) \
187e3d18ceeSMark Rutland 	__atomic_op_fence(arch_try_cmpxchg, __VA_ARGS__)
1889257959aSMark Rutland #else
1899257959aSMark Rutland #define raw_try_cmpxchg(_ptr, _oldp, _new) \
1909257959aSMark Rutland ({ \
1919257959aSMark Rutland 	typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
1929257959aSMark Rutland 	___r = raw_cmpxchg((_ptr), ___o, (_new)); \
1939257959aSMark Rutland 	if (unlikely(___r != ___o)) \
1949257959aSMark Rutland 		*___op = ___r; \
1959257959aSMark Rutland 	likely(___r == ___o); \
1969257959aSMark Rutland })
197e3d18ceeSMark Rutland #endif
198e3d18ceeSMark Rutland 
1999257959aSMark Rutland #if defined(arch_try_cmpxchg_acquire)
2009257959aSMark Rutland #define raw_try_cmpxchg_acquire arch_try_cmpxchg_acquire
2019257959aSMark Rutland #elif defined(arch_try_cmpxchg_relaxed)
2029257959aSMark Rutland #define raw_try_cmpxchg_acquire(...) \
2039257959aSMark Rutland 	__atomic_op_acquire(arch_try_cmpxchg, __VA_ARGS__)
2049257959aSMark Rutland #elif defined(arch_try_cmpxchg)
2059257959aSMark Rutland #define raw_try_cmpxchg_acquire arch_try_cmpxchg
2069257959aSMark Rutland #else
2079257959aSMark Rutland #define raw_try_cmpxchg_acquire(_ptr, _oldp, _new) \
2080aa7be05SUros Bizjak ({ \
2090aa7be05SUros Bizjak 	typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
2109257959aSMark Rutland 	___r = raw_cmpxchg_acquire((_ptr), ___o, (_new)); \
2110aa7be05SUros Bizjak 	if (unlikely(___r != ___o)) \
2120aa7be05SUros Bizjak 		*___op = ___r; \
2130aa7be05SUros Bizjak 	likely(___r == ___o); \
2140aa7be05SUros Bizjak })
2150aa7be05SUros Bizjak #endif
2160aa7be05SUros Bizjak 
2179257959aSMark Rutland #if defined(arch_try_cmpxchg_release)
2189257959aSMark Rutland #define raw_try_cmpxchg_release arch_try_cmpxchg_release
2199257959aSMark Rutland #elif defined(arch_try_cmpxchg_relaxed)
2209257959aSMark Rutland #define raw_try_cmpxchg_release(...) \
2219257959aSMark Rutland 	__atomic_op_release(arch_try_cmpxchg, __VA_ARGS__)
2229257959aSMark Rutland #elif defined(arch_try_cmpxchg)
2239257959aSMark Rutland #define raw_try_cmpxchg_release arch_try_cmpxchg
2249257959aSMark Rutland #else
2259257959aSMark Rutland #define raw_try_cmpxchg_release(_ptr, _oldp, _new) \
2269257959aSMark Rutland ({ \
2279257959aSMark Rutland 	typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
2289257959aSMark Rutland 	___r = raw_cmpxchg_release((_ptr), ___o, (_new)); \
2299257959aSMark Rutland 	if (unlikely(___r != ___o)) \
2309257959aSMark Rutland 		*___op = ___r; \
2319257959aSMark Rutland 	likely(___r == ___o); \
2329257959aSMark Rutland })
2330aa7be05SUros Bizjak #endif
2340aa7be05SUros Bizjak 
2359257959aSMark Rutland #if defined(arch_try_cmpxchg_relaxed)
2369257959aSMark Rutland #define raw_try_cmpxchg_relaxed arch_try_cmpxchg_relaxed
2379257959aSMark Rutland #elif defined(arch_try_cmpxchg)
2389257959aSMark Rutland #define raw_try_cmpxchg_relaxed arch_try_cmpxchg
2399257959aSMark Rutland #else
2409257959aSMark Rutland #define raw_try_cmpxchg_relaxed(_ptr, _oldp, _new) \
2419257959aSMark Rutland ({ \
2429257959aSMark Rutland 	typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
2439257959aSMark Rutland 	___r = raw_cmpxchg_relaxed((_ptr), ___o, (_new)); \
2449257959aSMark Rutland 	if (unlikely(___r != ___o)) \
2459257959aSMark Rutland 		*___op = ___r; \
2469257959aSMark Rutland 	likely(___r == ___o); \
2479257959aSMark Rutland })
2489257959aSMark Rutland #endif
2499257959aSMark Rutland 
2509257959aSMark Rutland #if defined(arch_try_cmpxchg64)
2519257959aSMark Rutland #define raw_try_cmpxchg64 arch_try_cmpxchg64
2529257959aSMark Rutland #elif defined(arch_try_cmpxchg64_relaxed)
2539257959aSMark Rutland #define raw_try_cmpxchg64(...) \
2540aa7be05SUros Bizjak 	__atomic_op_fence(arch_try_cmpxchg64, __VA_ARGS__)
2559257959aSMark Rutland #else
2569257959aSMark Rutland #define raw_try_cmpxchg64(_ptr, _oldp, _new) \
2579257959aSMark Rutland ({ \
2589257959aSMark Rutland 	typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
2599257959aSMark Rutland 	___r = raw_cmpxchg64((_ptr), ___o, (_new)); \
2609257959aSMark Rutland 	if (unlikely(___r != ___o)) \
2619257959aSMark Rutland 		*___op = ___r; \
2629257959aSMark Rutland 	likely(___r == ___o); \
2639257959aSMark Rutland })
2640aa7be05SUros Bizjak #endif
2650aa7be05SUros Bizjak 
2669257959aSMark Rutland #if defined(arch_try_cmpxchg64_acquire)
2679257959aSMark Rutland #define raw_try_cmpxchg64_acquire arch_try_cmpxchg64_acquire
2689257959aSMark Rutland #elif defined(arch_try_cmpxchg64_relaxed)
2699257959aSMark Rutland #define raw_try_cmpxchg64_acquire(...) \
2709257959aSMark Rutland 	__atomic_op_acquire(arch_try_cmpxchg64, __VA_ARGS__)
2719257959aSMark Rutland #elif defined(arch_try_cmpxchg64)
2729257959aSMark Rutland #define raw_try_cmpxchg64_acquire arch_try_cmpxchg64
2739257959aSMark Rutland #else
2749257959aSMark Rutland #define raw_try_cmpxchg64_acquire(_ptr, _oldp, _new) \
2758c8b096aSPeter Zijlstra ({ \
2768c8b096aSPeter Zijlstra 	typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
2779257959aSMark Rutland 	___r = raw_cmpxchg64_acquire((_ptr), ___o, (_new)); \
2788c8b096aSPeter Zijlstra 	if (unlikely(___r != ___o)) \
2798c8b096aSPeter Zijlstra 		*___op = ___r; \
2808c8b096aSPeter Zijlstra 	likely(___r == ___o); \
2818c8b096aSPeter Zijlstra })
2828c8b096aSPeter Zijlstra #endif
2838c8b096aSPeter Zijlstra 
2849257959aSMark Rutland #if defined(arch_try_cmpxchg64_release)
2859257959aSMark Rutland #define raw_try_cmpxchg64_release arch_try_cmpxchg64_release
2869257959aSMark Rutland #elif defined(arch_try_cmpxchg64_relaxed)
2879257959aSMark Rutland #define raw_try_cmpxchg64_release(...) \
2889257959aSMark Rutland 	__atomic_op_release(arch_try_cmpxchg64, __VA_ARGS__)
2899257959aSMark Rutland #elif defined(arch_try_cmpxchg64)
2909257959aSMark Rutland #define raw_try_cmpxchg64_release arch_try_cmpxchg64
2919257959aSMark Rutland #else
2929257959aSMark Rutland #define raw_try_cmpxchg64_release(_ptr, _oldp, _new) \
2939257959aSMark Rutland ({ \
2949257959aSMark Rutland 	typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
2959257959aSMark Rutland 	___r = raw_cmpxchg64_release((_ptr), ___o, (_new)); \
2969257959aSMark Rutland 	if (unlikely(___r != ___o)) \
2979257959aSMark Rutland 		*___op = ___r; \
2989257959aSMark Rutland 	likely(___r == ___o); \
2999257959aSMark Rutland })
3008c8b096aSPeter Zijlstra #endif
3018c8b096aSPeter Zijlstra 
3029257959aSMark Rutland #if defined(arch_try_cmpxchg64_relaxed)
3039257959aSMark Rutland #define raw_try_cmpxchg64_relaxed arch_try_cmpxchg64_relaxed
3049257959aSMark Rutland #elif defined(arch_try_cmpxchg64)
3059257959aSMark Rutland #define raw_try_cmpxchg64_relaxed arch_try_cmpxchg64
3069257959aSMark Rutland #else
3079257959aSMark Rutland #define raw_try_cmpxchg64_relaxed(_ptr, _oldp, _new) \
3089257959aSMark Rutland ({ \
3099257959aSMark Rutland 	typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
3109257959aSMark Rutland 	___r = raw_cmpxchg64_relaxed((_ptr), ___o, (_new)); \
3119257959aSMark Rutland 	if (unlikely(___r != ___o)) \
3129257959aSMark Rutland 		*___op = ___r; \
3139257959aSMark Rutland 	likely(___r == ___o); \
3149257959aSMark Rutland })
3159257959aSMark Rutland #endif
3169257959aSMark Rutland 
3179257959aSMark Rutland #if defined(arch_try_cmpxchg128)
3189257959aSMark Rutland #define raw_try_cmpxchg128 arch_try_cmpxchg128
3199257959aSMark Rutland #elif defined(arch_try_cmpxchg128_relaxed)
3209257959aSMark Rutland #define raw_try_cmpxchg128(...) \
3218c8b096aSPeter Zijlstra 	__atomic_op_fence(arch_try_cmpxchg128, __VA_ARGS__)
3229257959aSMark Rutland #else
3239257959aSMark Rutland #define raw_try_cmpxchg128(_ptr, _oldp, _new) \
3249257959aSMark Rutland ({ \
3259257959aSMark Rutland 	typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
3269257959aSMark Rutland 	___r = raw_cmpxchg128((_ptr), ___o, (_new)); \
3279257959aSMark Rutland 	if (unlikely(___r != ___o)) \
3289257959aSMark Rutland 		*___op = ___r; \
3299257959aSMark Rutland 	likely(___r == ___o); \
3309257959aSMark Rutland })
3318c8b096aSPeter Zijlstra #endif
3328c8b096aSPeter Zijlstra 
3339257959aSMark Rutland #if defined(arch_try_cmpxchg128_acquire)
3349257959aSMark Rutland #define raw_try_cmpxchg128_acquire arch_try_cmpxchg128_acquire
3359257959aSMark Rutland #elif defined(arch_try_cmpxchg128_relaxed)
3369257959aSMark Rutland #define raw_try_cmpxchg128_acquire(...) \
3379257959aSMark Rutland 	__atomic_op_acquire(arch_try_cmpxchg128, __VA_ARGS__)
3389257959aSMark Rutland #elif defined(arch_try_cmpxchg128)
3399257959aSMark Rutland #define raw_try_cmpxchg128_acquire arch_try_cmpxchg128
3409257959aSMark Rutland #else
3419257959aSMark Rutland #define raw_try_cmpxchg128_acquire(_ptr, _oldp, _new) \
342e6ce9d74SUros Bizjak ({ \
343e6ce9d74SUros Bizjak 	typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
3449257959aSMark Rutland 	___r = raw_cmpxchg128_acquire((_ptr), ___o, (_new)); \
345e6ce9d74SUros Bizjak 	if (unlikely(___r != ___o)) \
346e6ce9d74SUros Bizjak 		*___op = ___r; \
347e6ce9d74SUros Bizjak 	likely(___r == ___o); \
348e6ce9d74SUros Bizjak })
3499257959aSMark Rutland #endif
350e6ce9d74SUros Bizjak 
3519257959aSMark Rutland #if defined(arch_try_cmpxchg128_release)
3529257959aSMark Rutland #define raw_try_cmpxchg128_release arch_try_cmpxchg128_release
3539257959aSMark Rutland #elif defined(arch_try_cmpxchg128_relaxed)
3549257959aSMark Rutland #define raw_try_cmpxchg128_release(...) \
3559257959aSMark Rutland 	__atomic_op_release(arch_try_cmpxchg128, __VA_ARGS__)
3569257959aSMark Rutland #elif defined(arch_try_cmpxchg128)
3579257959aSMark Rutland #define raw_try_cmpxchg128_release arch_try_cmpxchg128
3589257959aSMark Rutland #else
3599257959aSMark Rutland #define raw_try_cmpxchg128_release(_ptr, _oldp, _new) \
360e6ce9d74SUros Bizjak ({ \
361e6ce9d74SUros Bizjak 	typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
3629257959aSMark Rutland 	___r = raw_cmpxchg128_release((_ptr), ___o, (_new)); \
363e6ce9d74SUros Bizjak 	if (unlikely(___r != ___o)) \
364e6ce9d74SUros Bizjak 		*___op = ___r; \
365e6ce9d74SUros Bizjak 	likely(___r == ___o); \
366e6ce9d74SUros Bizjak })
3679257959aSMark Rutland #endif
368e6ce9d74SUros Bizjak 
3699257959aSMark Rutland #if defined(arch_try_cmpxchg128_relaxed)
3709257959aSMark Rutland #define raw_try_cmpxchg128_relaxed arch_try_cmpxchg128_relaxed
3719257959aSMark Rutland #elif defined(arch_try_cmpxchg128)
3729257959aSMark Rutland #define raw_try_cmpxchg128_relaxed arch_try_cmpxchg128
3739257959aSMark Rutland #else
3749257959aSMark Rutland #define raw_try_cmpxchg128_relaxed(_ptr, _oldp, _new) \
3759257959aSMark Rutland ({ \
3769257959aSMark Rutland 	typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
3779257959aSMark Rutland 	___r = raw_cmpxchg128_relaxed((_ptr), ___o, (_new)); \
3789257959aSMark Rutland 	if (unlikely(___r != ___o)) \
3799257959aSMark Rutland 		*___op = ___r; \
3809257959aSMark Rutland 	likely(___r == ___o); \
3819257959aSMark Rutland })
3829257959aSMark Rutland #endif
3839257959aSMark Rutland 
3849257959aSMark Rutland #define raw_cmpxchg_local arch_cmpxchg_local
3859257959aSMark Rutland 
3869257959aSMark Rutland #ifdef arch_try_cmpxchg_local
3879257959aSMark Rutland #define raw_try_cmpxchg_local arch_try_cmpxchg_local
3889257959aSMark Rutland #else
3899257959aSMark Rutland #define raw_try_cmpxchg_local(_ptr, _oldp, _new) \
3909257959aSMark Rutland ({ \
3919257959aSMark Rutland 	typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
3929257959aSMark Rutland 	___r = raw_cmpxchg_local((_ptr), ___o, (_new)); \
3939257959aSMark Rutland 	if (unlikely(___r != ___o)) \
3949257959aSMark Rutland 		*___op = ___r; \
3959257959aSMark Rutland 	likely(___r == ___o); \
3969257959aSMark Rutland })
3979257959aSMark Rutland #endif
3989257959aSMark Rutland 
3999257959aSMark Rutland #define raw_cmpxchg64_local arch_cmpxchg64_local
4009257959aSMark Rutland 
4019257959aSMark Rutland #ifdef arch_try_cmpxchg64_local
4029257959aSMark Rutland #define raw_try_cmpxchg64_local arch_try_cmpxchg64_local
4039257959aSMark Rutland #else
4049257959aSMark Rutland #define raw_try_cmpxchg64_local(_ptr, _oldp, _new) \
4059257959aSMark Rutland ({ \
4069257959aSMark Rutland 	typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
4079257959aSMark Rutland 	___r = raw_cmpxchg64_local((_ptr), ___o, (_new)); \
4089257959aSMark Rutland 	if (unlikely(___r != ___o)) \
4099257959aSMark Rutland 		*___op = ___r; \
4109257959aSMark Rutland 	likely(___r == ___o); \
4119257959aSMark Rutland })
4129257959aSMark Rutland #endif
4139257959aSMark Rutland 
4149257959aSMark Rutland #define raw_cmpxchg128_local arch_cmpxchg128_local
4159257959aSMark Rutland 
4169257959aSMark Rutland #ifdef arch_try_cmpxchg128_local
4179257959aSMark Rutland #define raw_try_cmpxchg128_local arch_try_cmpxchg128_local
4189257959aSMark Rutland #else
4199257959aSMark Rutland #define raw_try_cmpxchg128_local(_ptr, _oldp, _new) \
4209257959aSMark Rutland ({ \
4219257959aSMark Rutland 	typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
4229257959aSMark Rutland 	___r = raw_cmpxchg128_local((_ptr), ___o, (_new)); \
4239257959aSMark Rutland 	if (unlikely(___r != ___o)) \
4249257959aSMark Rutland 		*___op = ___r; \
4259257959aSMark Rutland 	likely(___r == ___o); \
4269257959aSMark Rutland })
4279257959aSMark Rutland #endif
4289257959aSMark Rutland 
4299257959aSMark Rutland #define raw_sync_cmpxchg arch_sync_cmpxchg
4309257959aSMark Rutland 
431e01cc1e8SUros Bizjak #ifdef arch_sync_try_cmpxchg
432e01cc1e8SUros Bizjak #define raw_sync_try_cmpxchg arch_sync_try_cmpxchg
433e01cc1e8SUros Bizjak #else
434e01cc1e8SUros Bizjak #define raw_sync_try_cmpxchg(_ptr, _oldp, _new) \
435e01cc1e8SUros Bizjak ({ \
436e01cc1e8SUros Bizjak 	typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
437e01cc1e8SUros Bizjak 	___r = raw_sync_cmpxchg((_ptr), ___o, (_new)); \
438e01cc1e8SUros Bizjak 	if (unlikely(___r != ___o)) \
439e01cc1e8SUros Bizjak 		*___op = ___r; \
440e01cc1e8SUros Bizjak 	likely(___r == ___o); \
441e01cc1e8SUros Bizjak })
442e01cc1e8SUros Bizjak #endif
443e01cc1e8SUros Bizjak 
444ad811070SMark Rutland /**
445ad811070SMark Rutland  * raw_atomic_read() - atomic load with relaxed ordering
446ad811070SMark Rutland  * @v: pointer to atomic_t
447ad811070SMark Rutland  *
448ad811070SMark Rutland  * Atomically loads the value of @v with relaxed ordering.
449ad811070SMark Rutland  *
450ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_read() elsewhere.
451ad811070SMark Rutland  *
452ad811070SMark Rutland  * Return: The value loaded from @v.
453ad811070SMark Rutland  */
4541d78814dSMark Rutland static __always_inline int
raw_atomic_read(const atomic_t * v)4551d78814dSMark Rutland raw_atomic_read(const atomic_t *v)
4561d78814dSMark Rutland {
4571d78814dSMark Rutland 	return arch_atomic_read(v);
4581d78814dSMark Rutland }
4599257959aSMark Rutland 
460ad811070SMark Rutland /**
461ad811070SMark Rutland  * raw_atomic_read_acquire() - atomic load with acquire ordering
462ad811070SMark Rutland  * @v: pointer to atomic_t
463ad811070SMark Rutland  *
464ad811070SMark Rutland  * Atomically loads the value of @v with acquire ordering.
465ad811070SMark Rutland  *
466ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_read_acquire() elsewhere.
467ad811070SMark Rutland  *
468ad811070SMark Rutland  * Return: The value loaded from @v.
469ad811070SMark Rutland  */
470e3d18ceeSMark Rutland static __always_inline int
raw_atomic_read_acquire(const atomic_t * v)4719257959aSMark Rutland raw_atomic_read_acquire(const atomic_t *v)
472e3d18ceeSMark Rutland {
4731d78814dSMark Rutland #if defined(arch_atomic_read_acquire)
4741d78814dSMark Rutland 	return arch_atomic_read_acquire(v);
4751d78814dSMark Rutland #else
476dc1b4df0SMark Rutland 	int ret;
477dc1b4df0SMark Rutland 
478dc1b4df0SMark Rutland 	if (__native_word(atomic_t)) {
479dc1b4df0SMark Rutland 		ret = smp_load_acquire(&(v)->counter);
480dc1b4df0SMark Rutland 	} else {
4819257959aSMark Rutland 		ret = raw_atomic_read(v);
482dc1b4df0SMark Rutland 		__atomic_acquire_fence();
483dc1b4df0SMark Rutland 	}
484dc1b4df0SMark Rutland 
485dc1b4df0SMark Rutland 	return ret;
486e3d18ceeSMark Rutland #endif
4871d78814dSMark Rutland }
488e3d18ceeSMark Rutland 
489ad811070SMark Rutland /**
490ad811070SMark Rutland  * raw_atomic_set() - atomic set with relaxed ordering
491ad811070SMark Rutland  * @v: pointer to atomic_t
492ad811070SMark Rutland  * @i: int value to assign
493ad811070SMark Rutland  *
494ad811070SMark Rutland  * Atomically sets @v to @i with relaxed ordering.
495ad811070SMark Rutland  *
496ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_set() elsewhere.
497ad811070SMark Rutland  *
498ad811070SMark Rutland  * Return: Nothing.
499ad811070SMark Rutland  */
5001d78814dSMark Rutland static __always_inline void
raw_atomic_set(atomic_t * v,int i)5011d78814dSMark Rutland raw_atomic_set(atomic_t *v, int i)
5021d78814dSMark Rutland {
5031d78814dSMark Rutland 	arch_atomic_set(v, i);
5041d78814dSMark Rutland }
5059257959aSMark Rutland 
506ad811070SMark Rutland /**
507ad811070SMark Rutland  * raw_atomic_set_release() - atomic set with release ordering
508ad811070SMark Rutland  * @v: pointer to atomic_t
509ad811070SMark Rutland  * @i: int value to assign
510ad811070SMark Rutland  *
511ad811070SMark Rutland  * Atomically sets @v to @i with release ordering.
512ad811070SMark Rutland  *
513ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_set_release() elsewhere.
514ad811070SMark Rutland  *
515ad811070SMark Rutland  * Return: Nothing.
516ad811070SMark Rutland  */
517e3d18ceeSMark Rutland static __always_inline void
raw_atomic_set_release(atomic_t * v,int i)5189257959aSMark Rutland raw_atomic_set_release(atomic_t *v, int i)
519e3d18ceeSMark Rutland {
5201d78814dSMark Rutland #if defined(arch_atomic_set_release)
5211d78814dSMark Rutland 	arch_atomic_set_release(v, i);
5221d78814dSMark Rutland #else
523dc1b4df0SMark Rutland 	if (__native_word(atomic_t)) {
524e3d18ceeSMark Rutland 		smp_store_release(&(v)->counter, i);
525dc1b4df0SMark Rutland 	} else {
526dc1b4df0SMark Rutland 		__atomic_release_fence();
5279257959aSMark Rutland 		raw_atomic_set(v, i);
528dc1b4df0SMark Rutland 	}
529e3d18ceeSMark Rutland #endif
5301d78814dSMark Rutland }
531e3d18ceeSMark Rutland 
532ad811070SMark Rutland /**
533ad811070SMark Rutland  * raw_atomic_add() - atomic add with relaxed ordering
534ad811070SMark Rutland  * @i: int value to add
535ad811070SMark Rutland  * @v: pointer to atomic_t
536ad811070SMark Rutland  *
537ad811070SMark Rutland  * Atomically updates @v to (@v + @i) with relaxed ordering.
538ad811070SMark Rutland  *
539ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_add() elsewhere.
540ad811070SMark Rutland  *
541ad811070SMark Rutland  * Return: Nothing.
542ad811070SMark Rutland  */
5431d78814dSMark Rutland static __always_inline void
raw_atomic_add(int i,atomic_t * v)5441d78814dSMark Rutland raw_atomic_add(int i, atomic_t *v)
5451d78814dSMark Rutland {
5461d78814dSMark Rutland 	arch_atomic_add(i, v);
5471d78814dSMark Rutland }
548e3d18ceeSMark Rutland 
549ad811070SMark Rutland /**
550ad811070SMark Rutland  * raw_atomic_add_return() - atomic add with full ordering
551ad811070SMark Rutland  * @i: int value to add
552ad811070SMark Rutland  * @v: pointer to atomic_t
553ad811070SMark Rutland  *
554ad811070SMark Rutland  * Atomically updates @v to (@v + @i) with full ordering.
555ad811070SMark Rutland  *
556ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_add_return() elsewhere.
557ad811070SMark Rutland  *
558ad811070SMark Rutland  * Return: The updated value of @v.
559ad811070SMark Rutland  */
560e3d18ceeSMark Rutland static __always_inline int
raw_atomic_add_return(int i,atomic_t * v)5619257959aSMark Rutland raw_atomic_add_return(int i, atomic_t *v)
562e3d18ceeSMark Rutland {
5631d78814dSMark Rutland #if defined(arch_atomic_add_return)
5641d78814dSMark Rutland 	return arch_atomic_add_return(i, v);
5651d78814dSMark Rutland #elif defined(arch_atomic_add_return_relaxed)
566e3d18ceeSMark Rutland 	int ret;
567e3d18ceeSMark Rutland 	__atomic_pre_full_fence();
568e3d18ceeSMark Rutland 	ret = arch_atomic_add_return_relaxed(i, v);
569e3d18ceeSMark Rutland 	__atomic_post_full_fence();
570e3d18ceeSMark Rutland 	return ret;
5719257959aSMark Rutland #else
5729257959aSMark Rutland #error "Unable to define raw_atomic_add_return"
573e3d18ceeSMark Rutland #endif
5741d78814dSMark Rutland }
575e3d18ceeSMark Rutland 
576ad811070SMark Rutland /**
577ad811070SMark Rutland  * raw_atomic_add_return_acquire() - atomic add with acquire ordering
578ad811070SMark Rutland  * @i: int value to add
579ad811070SMark Rutland  * @v: pointer to atomic_t
580ad811070SMark Rutland  *
581ad811070SMark Rutland  * Atomically updates @v to (@v + @i) with acquire ordering.
582ad811070SMark Rutland  *
583ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_add_return_acquire() elsewhere.
584ad811070SMark Rutland  *
585ad811070SMark Rutland  * Return: The updated value of @v.
586ad811070SMark Rutland  */
587e3d18ceeSMark Rutland static __always_inline int
raw_atomic_add_return_acquire(int i,atomic_t * v)5889257959aSMark Rutland raw_atomic_add_return_acquire(int i, atomic_t *v)
589e3d18ceeSMark Rutland {
5901d78814dSMark Rutland #if defined(arch_atomic_add_return_acquire)
5911d78814dSMark Rutland 	return arch_atomic_add_return_acquire(i, v);
5921d78814dSMark Rutland #elif defined(arch_atomic_add_return_relaxed)
5939257959aSMark Rutland 	int ret = arch_atomic_add_return_relaxed(i, v);
594e3d18ceeSMark Rutland 	__atomic_acquire_fence();
595e3d18ceeSMark Rutland 	return ret;
5969257959aSMark Rutland #elif defined(arch_atomic_add_return)
5971d78814dSMark Rutland 	return arch_atomic_add_return(i, v);
5989257959aSMark Rutland #else
5999257959aSMark Rutland #error "Unable to define raw_atomic_add_return_acquire"
600e3d18ceeSMark Rutland #endif
6011d78814dSMark Rutland }
602e3d18ceeSMark Rutland 
603ad811070SMark Rutland /**
604ad811070SMark Rutland  * raw_atomic_add_return_release() - atomic add with release ordering
605ad811070SMark Rutland  * @i: int value to add
606ad811070SMark Rutland  * @v: pointer to atomic_t
607ad811070SMark Rutland  *
608ad811070SMark Rutland  * Atomically updates @v to (@v + @i) with release ordering.
609ad811070SMark Rutland  *
610ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_add_return_release() elsewhere.
611ad811070SMark Rutland  *
612ad811070SMark Rutland  * Return: The updated value of @v.
613ad811070SMark Rutland  */
614e3d18ceeSMark Rutland static __always_inline int
raw_atomic_add_return_release(int i,atomic_t * v)6159257959aSMark Rutland raw_atomic_add_return_release(int i, atomic_t *v)
616e3d18ceeSMark Rutland {
6171d78814dSMark Rutland #if defined(arch_atomic_add_return_release)
6181d78814dSMark Rutland 	return arch_atomic_add_return_release(i, v);
6191d78814dSMark Rutland #elif defined(arch_atomic_add_return_relaxed)
620e3d18ceeSMark Rutland 	__atomic_release_fence();
6219257959aSMark Rutland 	return arch_atomic_add_return_relaxed(i, v);
6229257959aSMark Rutland #elif defined(arch_atomic_add_return)
6231d78814dSMark Rutland 	return arch_atomic_add_return(i, v);
6249257959aSMark Rutland #else
6259257959aSMark Rutland #error "Unable to define raw_atomic_add_return_release"
626e3d18ceeSMark Rutland #endif
6271d78814dSMark Rutland }
628e3d18ceeSMark Rutland 
629ad811070SMark Rutland /**
630ad811070SMark Rutland  * raw_atomic_add_return_relaxed() - atomic add with relaxed ordering
631ad811070SMark Rutland  * @i: int value to add
632ad811070SMark Rutland  * @v: pointer to atomic_t
633ad811070SMark Rutland  *
634ad811070SMark Rutland  * Atomically updates @v to (@v + @i) with relaxed ordering.
635ad811070SMark Rutland  *
636ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_add_return_relaxed() elsewhere.
637ad811070SMark Rutland  *
638ad811070SMark Rutland  * Return: The updated value of @v.
639ad811070SMark Rutland  */
6401d78814dSMark Rutland static __always_inline int
raw_atomic_add_return_relaxed(int i,atomic_t * v)6411d78814dSMark Rutland raw_atomic_add_return_relaxed(int i, atomic_t *v)
6421d78814dSMark Rutland {
6439257959aSMark Rutland #if defined(arch_atomic_add_return_relaxed)
6441d78814dSMark Rutland 	return arch_atomic_add_return_relaxed(i, v);
6459257959aSMark Rutland #elif defined(arch_atomic_add_return)
6461d78814dSMark Rutland 	return arch_atomic_add_return(i, v);
6479257959aSMark Rutland #else
6489257959aSMark Rutland #error "Unable to define raw_atomic_add_return_relaxed"
6499257959aSMark Rutland #endif
6501d78814dSMark Rutland }
6519257959aSMark Rutland 
652ad811070SMark Rutland /**
653ad811070SMark Rutland  * raw_atomic_fetch_add() - atomic add with full ordering
654ad811070SMark Rutland  * @i: int value to add
655ad811070SMark Rutland  * @v: pointer to atomic_t
656ad811070SMark Rutland  *
657ad811070SMark Rutland  * Atomically updates @v to (@v + @i) with full ordering.
658ad811070SMark Rutland  *
659ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_fetch_add() elsewhere.
660ad811070SMark Rutland  *
661ad811070SMark Rutland  * Return: The original value of @v.
662ad811070SMark Rutland  */
663e3d18ceeSMark Rutland static __always_inline int
raw_atomic_fetch_add(int i,atomic_t * v)6649257959aSMark Rutland raw_atomic_fetch_add(int i, atomic_t *v)
665e3d18ceeSMark Rutland {
6661d78814dSMark Rutland #if defined(arch_atomic_fetch_add)
6671d78814dSMark Rutland 	return arch_atomic_fetch_add(i, v);
6681d78814dSMark Rutland #elif defined(arch_atomic_fetch_add_relaxed)
669e3d18ceeSMark Rutland 	int ret;
670e3d18ceeSMark Rutland 	__atomic_pre_full_fence();
671e3d18ceeSMark Rutland 	ret = arch_atomic_fetch_add_relaxed(i, v);
672e3d18ceeSMark Rutland 	__atomic_post_full_fence();
673e3d18ceeSMark Rutland 	return ret;
6749257959aSMark Rutland #else
6759257959aSMark Rutland #error "Unable to define raw_atomic_fetch_add"
676e3d18ceeSMark Rutland #endif
6771d78814dSMark Rutland }
678e3d18ceeSMark Rutland 
679ad811070SMark Rutland /**
680ad811070SMark Rutland  * raw_atomic_fetch_add_acquire() - atomic add with acquire ordering
681ad811070SMark Rutland  * @i: int value to add
682ad811070SMark Rutland  * @v: pointer to atomic_t
683ad811070SMark Rutland  *
684ad811070SMark Rutland  * Atomically updates @v to (@v + @i) with acquire ordering.
685ad811070SMark Rutland  *
686ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_fetch_add_acquire() elsewhere.
687ad811070SMark Rutland  *
688ad811070SMark Rutland  * Return: The original value of @v.
689ad811070SMark Rutland  */
690e3d18ceeSMark Rutland static __always_inline int
raw_atomic_fetch_add_acquire(int i,atomic_t * v)6919257959aSMark Rutland raw_atomic_fetch_add_acquire(int i, atomic_t *v)
692e3d18ceeSMark Rutland {
6931d78814dSMark Rutland #if defined(arch_atomic_fetch_add_acquire)
6941d78814dSMark Rutland 	return arch_atomic_fetch_add_acquire(i, v);
6951d78814dSMark Rutland #elif defined(arch_atomic_fetch_add_relaxed)
6969257959aSMark Rutland 	int ret = arch_atomic_fetch_add_relaxed(i, v);
697e3d18ceeSMark Rutland 	__atomic_acquire_fence();
698e3d18ceeSMark Rutland 	return ret;
6999257959aSMark Rutland #elif defined(arch_atomic_fetch_add)
7001d78814dSMark Rutland 	return arch_atomic_fetch_add(i, v);
7019257959aSMark Rutland #else
7029257959aSMark Rutland #error "Unable to define raw_atomic_fetch_add_acquire"
703e3d18ceeSMark Rutland #endif
7041d78814dSMark Rutland }
705e3d18ceeSMark Rutland 
706ad811070SMark Rutland /**
707ad811070SMark Rutland  * raw_atomic_fetch_add_release() - atomic add with release ordering
708ad811070SMark Rutland  * @i: int value to add
709ad811070SMark Rutland  * @v: pointer to atomic_t
710ad811070SMark Rutland  *
711ad811070SMark Rutland  * Atomically updates @v to (@v + @i) with release ordering.
712ad811070SMark Rutland  *
713ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_fetch_add_release() elsewhere.
714ad811070SMark Rutland  *
715ad811070SMark Rutland  * Return: The original value of @v.
716ad811070SMark Rutland  */
717e3d18ceeSMark Rutland static __always_inline int
raw_atomic_fetch_add_release(int i,atomic_t * v)7189257959aSMark Rutland raw_atomic_fetch_add_release(int i, atomic_t *v)
719e3d18ceeSMark Rutland {
7201d78814dSMark Rutland #if defined(arch_atomic_fetch_add_release)
7211d78814dSMark Rutland 	return arch_atomic_fetch_add_release(i, v);
7221d78814dSMark Rutland #elif defined(arch_atomic_fetch_add_relaxed)
723e3d18ceeSMark Rutland 	__atomic_release_fence();
7249257959aSMark Rutland 	return arch_atomic_fetch_add_relaxed(i, v);
7259257959aSMark Rutland #elif defined(arch_atomic_fetch_add)
7261d78814dSMark Rutland 	return arch_atomic_fetch_add(i, v);
7279257959aSMark Rutland #else
7289257959aSMark Rutland #error "Unable to define raw_atomic_fetch_add_release"
729e3d18ceeSMark Rutland #endif
7301d78814dSMark Rutland }
731e3d18ceeSMark Rutland 
732ad811070SMark Rutland /**
733ad811070SMark Rutland  * raw_atomic_fetch_add_relaxed() - atomic add with relaxed ordering
734ad811070SMark Rutland  * @i: int value to add
735ad811070SMark Rutland  * @v: pointer to atomic_t
736ad811070SMark Rutland  *
737ad811070SMark Rutland  * Atomically updates @v to (@v + @i) with relaxed ordering.
738ad811070SMark Rutland  *
739ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_fetch_add_relaxed() elsewhere.
740ad811070SMark Rutland  *
741ad811070SMark Rutland  * Return: The original value of @v.
742ad811070SMark Rutland  */
7431d78814dSMark Rutland static __always_inline int
raw_atomic_fetch_add_relaxed(int i,atomic_t * v)7441d78814dSMark Rutland raw_atomic_fetch_add_relaxed(int i, atomic_t *v)
7451d78814dSMark Rutland {
7469257959aSMark Rutland #if defined(arch_atomic_fetch_add_relaxed)
7471d78814dSMark Rutland 	return arch_atomic_fetch_add_relaxed(i, v);
7489257959aSMark Rutland #elif defined(arch_atomic_fetch_add)
7491d78814dSMark Rutland 	return arch_atomic_fetch_add(i, v);
7509257959aSMark Rutland #else
7519257959aSMark Rutland #error "Unable to define raw_atomic_fetch_add_relaxed"
7529257959aSMark Rutland #endif
7531d78814dSMark Rutland }
7549257959aSMark Rutland 
755ad811070SMark Rutland /**
756ad811070SMark Rutland  * raw_atomic_sub() - atomic subtract with relaxed ordering
757ad811070SMark Rutland  * @i: int value to subtract
758ad811070SMark Rutland  * @v: pointer to atomic_t
759ad811070SMark Rutland  *
760ad811070SMark Rutland  * Atomically updates @v to (@v - @i) with relaxed ordering.
761ad811070SMark Rutland  *
762ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_sub() elsewhere.
763ad811070SMark Rutland  *
764ad811070SMark Rutland  * Return: Nothing.
765ad811070SMark Rutland  */
7661d78814dSMark Rutland static __always_inline void
raw_atomic_sub(int i,atomic_t * v)7671d78814dSMark Rutland raw_atomic_sub(int i, atomic_t *v)
7681d78814dSMark Rutland {
7691d78814dSMark Rutland 	arch_atomic_sub(i, v);
7701d78814dSMark Rutland }
7719257959aSMark Rutland 
772ad811070SMark Rutland /**
773ad811070SMark Rutland  * raw_atomic_sub_return() - atomic subtract with full ordering
774ad811070SMark Rutland  * @i: int value to subtract
775ad811070SMark Rutland  * @v: pointer to atomic_t
776ad811070SMark Rutland  *
777ad811070SMark Rutland  * Atomically updates @v to (@v - @i) with full ordering.
778ad811070SMark Rutland  *
779ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_sub_return() elsewhere.
780ad811070SMark Rutland  *
781ad811070SMark Rutland  * Return: The updated value of @v.
782ad811070SMark Rutland  */
783e3d18ceeSMark Rutland static __always_inline int
raw_atomic_sub_return(int i,atomic_t * v)7849257959aSMark Rutland raw_atomic_sub_return(int i, atomic_t *v)
785e3d18ceeSMark Rutland {
7861d78814dSMark Rutland #if defined(arch_atomic_sub_return)
7871d78814dSMark Rutland 	return arch_atomic_sub_return(i, v);
7881d78814dSMark Rutland #elif defined(arch_atomic_sub_return_relaxed)
789e3d18ceeSMark Rutland 	int ret;
790e3d18ceeSMark Rutland 	__atomic_pre_full_fence();
791e3d18ceeSMark Rutland 	ret = arch_atomic_sub_return_relaxed(i, v);
792e3d18ceeSMark Rutland 	__atomic_post_full_fence();
793e3d18ceeSMark Rutland 	return ret;
7949257959aSMark Rutland #else
7959257959aSMark Rutland #error "Unable to define raw_atomic_sub_return"
796e3d18ceeSMark Rutland #endif
7971d78814dSMark Rutland }
798e3d18ceeSMark Rutland 
799ad811070SMark Rutland /**
800ad811070SMark Rutland  * raw_atomic_sub_return_acquire() - atomic subtract with acquire ordering
801ad811070SMark Rutland  * @i: int value to subtract
802ad811070SMark Rutland  * @v: pointer to atomic_t
803ad811070SMark Rutland  *
804ad811070SMark Rutland  * Atomically updates @v to (@v - @i) with acquire ordering.
805ad811070SMark Rutland  *
806ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_sub_return_acquire() elsewhere.
807ad811070SMark Rutland  *
808ad811070SMark Rutland  * Return: The updated value of @v.
809ad811070SMark Rutland  */
810e3d18ceeSMark Rutland static __always_inline int
raw_atomic_sub_return_acquire(int i,atomic_t * v)8119257959aSMark Rutland raw_atomic_sub_return_acquire(int i, atomic_t *v)
812e3d18ceeSMark Rutland {
8131d78814dSMark Rutland #if defined(arch_atomic_sub_return_acquire)
8141d78814dSMark Rutland 	return arch_atomic_sub_return_acquire(i, v);
8151d78814dSMark Rutland #elif defined(arch_atomic_sub_return_relaxed)
8169257959aSMark Rutland 	int ret = arch_atomic_sub_return_relaxed(i, v);
817e3d18ceeSMark Rutland 	__atomic_acquire_fence();
818e3d18ceeSMark Rutland 	return ret;
8199257959aSMark Rutland #elif defined(arch_atomic_sub_return)
8201d78814dSMark Rutland 	return arch_atomic_sub_return(i, v);
8219257959aSMark Rutland #else
8229257959aSMark Rutland #error "Unable to define raw_atomic_sub_return_acquire"
823e3d18ceeSMark Rutland #endif
8241d78814dSMark Rutland }
825e3d18ceeSMark Rutland 
826ad811070SMark Rutland /**
827ad811070SMark Rutland  * raw_atomic_sub_return_release() - atomic subtract with release ordering
828ad811070SMark Rutland  * @i: int value to subtract
829ad811070SMark Rutland  * @v: pointer to atomic_t
830ad811070SMark Rutland  *
831ad811070SMark Rutland  * Atomically updates @v to (@v - @i) with release ordering.
832ad811070SMark Rutland  *
833ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_sub_return_release() elsewhere.
834ad811070SMark Rutland  *
835ad811070SMark Rutland  * Return: The updated value of @v.
836ad811070SMark Rutland  */
837e3d18ceeSMark Rutland static __always_inline int
raw_atomic_sub_return_release(int i,atomic_t * v)8389257959aSMark Rutland raw_atomic_sub_return_release(int i, atomic_t *v)
839e3d18ceeSMark Rutland {
8401d78814dSMark Rutland #if defined(arch_atomic_sub_return_release)
8411d78814dSMark Rutland 	return arch_atomic_sub_return_release(i, v);
8421d78814dSMark Rutland #elif defined(arch_atomic_sub_return_relaxed)
843e3d18ceeSMark Rutland 	__atomic_release_fence();
8449257959aSMark Rutland 	return arch_atomic_sub_return_relaxed(i, v);
8459257959aSMark Rutland #elif defined(arch_atomic_sub_return)
8461d78814dSMark Rutland 	return arch_atomic_sub_return(i, v);
8479257959aSMark Rutland #else
8489257959aSMark Rutland #error "Unable to define raw_atomic_sub_return_release"
849e3d18ceeSMark Rutland #endif
8501d78814dSMark Rutland }
851e3d18ceeSMark Rutland 
852ad811070SMark Rutland /**
853ad811070SMark Rutland  * raw_atomic_sub_return_relaxed() - atomic subtract with relaxed ordering
854ad811070SMark Rutland  * @i: int value to subtract
855ad811070SMark Rutland  * @v: pointer to atomic_t
856ad811070SMark Rutland  *
857ad811070SMark Rutland  * Atomically updates @v to (@v - @i) with relaxed ordering.
858ad811070SMark Rutland  *
859ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_sub_return_relaxed() elsewhere.
860ad811070SMark Rutland  *
861ad811070SMark Rutland  * Return: The updated value of @v.
862ad811070SMark Rutland  */
8631d78814dSMark Rutland static __always_inline int
raw_atomic_sub_return_relaxed(int i,atomic_t * v)8641d78814dSMark Rutland raw_atomic_sub_return_relaxed(int i, atomic_t *v)
8651d78814dSMark Rutland {
8669257959aSMark Rutland #if defined(arch_atomic_sub_return_relaxed)
8671d78814dSMark Rutland 	return arch_atomic_sub_return_relaxed(i, v);
8689257959aSMark Rutland #elif defined(arch_atomic_sub_return)
8691d78814dSMark Rutland 	return arch_atomic_sub_return(i, v);
8709257959aSMark Rutland #else
8719257959aSMark Rutland #error "Unable to define raw_atomic_sub_return_relaxed"
8729257959aSMark Rutland #endif
8731d78814dSMark Rutland }
8749257959aSMark Rutland 
875ad811070SMark Rutland /**
876ad811070SMark Rutland  * raw_atomic_fetch_sub() - atomic subtract with full ordering
877ad811070SMark Rutland  * @i: int value to subtract
878ad811070SMark Rutland  * @v: pointer to atomic_t
879ad811070SMark Rutland  *
880ad811070SMark Rutland  * Atomically updates @v to (@v - @i) with full ordering.
881ad811070SMark Rutland  *
882ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_fetch_sub() elsewhere.
883ad811070SMark Rutland  *
884ad811070SMark Rutland  * Return: The original value of @v.
885ad811070SMark Rutland  */
886e3d18ceeSMark Rutland static __always_inline int
raw_atomic_fetch_sub(int i,atomic_t * v)8879257959aSMark Rutland raw_atomic_fetch_sub(int i, atomic_t *v)
888e3d18ceeSMark Rutland {
8891d78814dSMark Rutland #if defined(arch_atomic_fetch_sub)
8901d78814dSMark Rutland 	return arch_atomic_fetch_sub(i, v);
8911d78814dSMark Rutland #elif defined(arch_atomic_fetch_sub_relaxed)
892e3d18ceeSMark Rutland 	int ret;
893e3d18ceeSMark Rutland 	__atomic_pre_full_fence();
894e3d18ceeSMark Rutland 	ret = arch_atomic_fetch_sub_relaxed(i, v);
895e3d18ceeSMark Rutland 	__atomic_post_full_fence();
896e3d18ceeSMark Rutland 	return ret;
8979257959aSMark Rutland #else
8989257959aSMark Rutland #error "Unable to define raw_atomic_fetch_sub"
899e3d18ceeSMark Rutland #endif
9001d78814dSMark Rutland }
901e3d18ceeSMark Rutland 
902ad811070SMark Rutland /**
903ad811070SMark Rutland  * raw_atomic_fetch_sub_acquire() - atomic subtract with acquire ordering
904ad811070SMark Rutland  * @i: int value to subtract
905ad811070SMark Rutland  * @v: pointer to atomic_t
906ad811070SMark Rutland  *
907ad811070SMark Rutland  * Atomically updates @v to (@v - @i) with acquire ordering.
908ad811070SMark Rutland  *
909ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_fetch_sub_acquire() elsewhere.
910ad811070SMark Rutland  *
911ad811070SMark Rutland  * Return: The original value of @v.
912ad811070SMark Rutland  */
913e3d18ceeSMark Rutland static __always_inline int
raw_atomic_fetch_sub_acquire(int i,atomic_t * v)9149257959aSMark Rutland raw_atomic_fetch_sub_acquire(int i, atomic_t *v)
915e3d18ceeSMark Rutland {
9161d78814dSMark Rutland #if defined(arch_atomic_fetch_sub_acquire)
9171d78814dSMark Rutland 	return arch_atomic_fetch_sub_acquire(i, v);
9181d78814dSMark Rutland #elif defined(arch_atomic_fetch_sub_relaxed)
9199257959aSMark Rutland 	int ret = arch_atomic_fetch_sub_relaxed(i, v);
920e3d18ceeSMark Rutland 	__atomic_acquire_fence();
921e3d18ceeSMark Rutland 	return ret;
9229257959aSMark Rutland #elif defined(arch_atomic_fetch_sub)
9231d78814dSMark Rutland 	return arch_atomic_fetch_sub(i, v);
9249257959aSMark Rutland #else
9259257959aSMark Rutland #error "Unable to define raw_atomic_fetch_sub_acquire"
926e3d18ceeSMark Rutland #endif
9271d78814dSMark Rutland }
928e3d18ceeSMark Rutland 
929ad811070SMark Rutland /**
930ad811070SMark Rutland  * raw_atomic_fetch_sub_release() - atomic subtract with release ordering
931ad811070SMark Rutland  * @i: int value to subtract
932ad811070SMark Rutland  * @v: pointer to atomic_t
933ad811070SMark Rutland  *
934ad811070SMark Rutland  * Atomically updates @v to (@v - @i) with release ordering.
935ad811070SMark Rutland  *
936ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_fetch_sub_release() elsewhere.
937ad811070SMark Rutland  *
938ad811070SMark Rutland  * Return: The original value of @v.
939ad811070SMark Rutland  */
940e3d18ceeSMark Rutland static __always_inline int
raw_atomic_fetch_sub_release(int i,atomic_t * v)9419257959aSMark Rutland raw_atomic_fetch_sub_release(int i, atomic_t *v)
942e3d18ceeSMark Rutland {
9431d78814dSMark Rutland #if defined(arch_atomic_fetch_sub_release)
9441d78814dSMark Rutland 	return arch_atomic_fetch_sub_release(i, v);
9451d78814dSMark Rutland #elif defined(arch_atomic_fetch_sub_relaxed)
946e3d18ceeSMark Rutland 	__atomic_release_fence();
9479257959aSMark Rutland 	return arch_atomic_fetch_sub_relaxed(i, v);
9489257959aSMark Rutland #elif defined(arch_atomic_fetch_sub)
9491d78814dSMark Rutland 	return arch_atomic_fetch_sub(i, v);
9509257959aSMark Rutland #else
9519257959aSMark Rutland #error "Unable to define raw_atomic_fetch_sub_release"
952e3d18ceeSMark Rutland #endif
9531d78814dSMark Rutland }
954e3d18ceeSMark Rutland 
955ad811070SMark Rutland /**
956ad811070SMark Rutland  * raw_atomic_fetch_sub_relaxed() - atomic subtract with relaxed ordering
957ad811070SMark Rutland  * @i: int value to subtract
958ad811070SMark Rutland  * @v: pointer to atomic_t
959ad811070SMark Rutland  *
960ad811070SMark Rutland  * Atomically updates @v to (@v - @i) with relaxed ordering.
961ad811070SMark Rutland  *
962ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_fetch_sub_relaxed() elsewhere.
963ad811070SMark Rutland  *
964ad811070SMark Rutland  * Return: The original value of @v.
965ad811070SMark Rutland  */
9661d78814dSMark Rutland static __always_inline int
raw_atomic_fetch_sub_relaxed(int i,atomic_t * v)9671d78814dSMark Rutland raw_atomic_fetch_sub_relaxed(int i, atomic_t *v)
9681d78814dSMark Rutland {
9699257959aSMark Rutland #if defined(arch_atomic_fetch_sub_relaxed)
9701d78814dSMark Rutland 	return arch_atomic_fetch_sub_relaxed(i, v);
9719257959aSMark Rutland #elif defined(arch_atomic_fetch_sub)
9721d78814dSMark Rutland 	return arch_atomic_fetch_sub(i, v);
9739257959aSMark Rutland #else
9749257959aSMark Rutland #error "Unable to define raw_atomic_fetch_sub_relaxed"
9759257959aSMark Rutland #endif
9761d78814dSMark Rutland }
9779257959aSMark Rutland 
978ad811070SMark Rutland /**
979ad811070SMark Rutland  * raw_atomic_inc() - atomic increment with relaxed ordering
980ad811070SMark Rutland  * @v: pointer to atomic_t
981ad811070SMark Rutland  *
982ad811070SMark Rutland  * Atomically updates @v to (@v + 1) with relaxed ordering.
983ad811070SMark Rutland  *
984ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_inc() elsewhere.
985ad811070SMark Rutland  *
986ad811070SMark Rutland  * Return: Nothing.
987ad811070SMark Rutland  */
9889257959aSMark Rutland static __always_inline void
raw_atomic_inc(atomic_t * v)9899257959aSMark Rutland raw_atomic_inc(atomic_t *v)
9909257959aSMark Rutland {
9911d78814dSMark Rutland #if defined(arch_atomic_inc)
9921d78814dSMark Rutland 	arch_atomic_inc(v);
9931d78814dSMark Rutland #else
9949257959aSMark Rutland 	raw_atomic_add(1, v);
9959257959aSMark Rutland #endif
9961d78814dSMark Rutland }
9979257959aSMark Rutland 
998ad811070SMark Rutland /**
999ad811070SMark Rutland  * raw_atomic_inc_return() - atomic increment with full ordering
1000ad811070SMark Rutland  * @v: pointer to atomic_t
1001ad811070SMark Rutland  *
1002ad811070SMark Rutland  * Atomically updates @v to (@v + 1) with full ordering.
1003ad811070SMark Rutland  *
1004ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_inc_return() elsewhere.
1005ad811070SMark Rutland  *
1006ad811070SMark Rutland  * Return: The updated value of @v.
1007ad811070SMark Rutland  */
1008e3d18ceeSMark Rutland static __always_inline int
raw_atomic_inc_return(atomic_t * v)10099257959aSMark Rutland raw_atomic_inc_return(atomic_t *v)
1010e3d18ceeSMark Rutland {
10111d78814dSMark Rutland #if defined(arch_atomic_inc_return)
10121d78814dSMark Rutland 	return arch_atomic_inc_return(v);
10131d78814dSMark Rutland #elif defined(arch_atomic_inc_return_relaxed)
1014e3d18ceeSMark Rutland 	int ret;
1015e3d18ceeSMark Rutland 	__atomic_pre_full_fence();
1016e3d18ceeSMark Rutland 	ret = arch_atomic_inc_return_relaxed(v);
1017e3d18ceeSMark Rutland 	__atomic_post_full_fence();
1018e3d18ceeSMark Rutland 	return ret;
10199257959aSMark Rutland #else
10209257959aSMark Rutland 	return raw_atomic_add_return(1, v);
1021e3d18ceeSMark Rutland #endif
10221d78814dSMark Rutland }
1023e3d18ceeSMark Rutland 
1024ad811070SMark Rutland /**
1025ad811070SMark Rutland  * raw_atomic_inc_return_acquire() - atomic increment with acquire ordering
1026ad811070SMark Rutland  * @v: pointer to atomic_t
1027ad811070SMark Rutland  *
1028ad811070SMark Rutland  * Atomically updates @v to (@v + 1) with acquire ordering.
1029ad811070SMark Rutland  *
1030ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_inc_return_acquire() elsewhere.
1031ad811070SMark Rutland  *
1032ad811070SMark Rutland  * Return: The updated value of @v.
1033ad811070SMark Rutland  */
1034e3d18ceeSMark Rutland static __always_inline int
raw_atomic_inc_return_acquire(atomic_t * v)10359257959aSMark Rutland raw_atomic_inc_return_acquire(atomic_t *v)
1036e3d18ceeSMark Rutland {
10371d78814dSMark Rutland #if defined(arch_atomic_inc_return_acquire)
10381d78814dSMark Rutland 	return arch_atomic_inc_return_acquire(v);
10391d78814dSMark Rutland #elif defined(arch_atomic_inc_return_relaxed)
10409257959aSMark Rutland 	int ret = arch_atomic_inc_return_relaxed(v);
1041e3d18ceeSMark Rutland 	__atomic_acquire_fence();
1042e3d18ceeSMark Rutland 	return ret;
10439257959aSMark Rutland #elif defined(arch_atomic_inc_return)
10441d78814dSMark Rutland 	return arch_atomic_inc_return(v);
10459257959aSMark Rutland #else
10469257959aSMark Rutland 	return raw_atomic_add_return_acquire(1, v);
1047e3d18ceeSMark Rutland #endif
10481d78814dSMark Rutland }
1049e3d18ceeSMark Rutland 
1050ad811070SMark Rutland /**
1051ad811070SMark Rutland  * raw_atomic_inc_return_release() - atomic increment with release ordering
1052ad811070SMark Rutland  * @v: pointer to atomic_t
1053ad811070SMark Rutland  *
1054ad811070SMark Rutland  * Atomically updates @v to (@v + 1) with release ordering.
1055ad811070SMark Rutland  *
1056ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_inc_return_release() elsewhere.
1057ad811070SMark Rutland  *
1058ad811070SMark Rutland  * Return: The updated value of @v.
1059ad811070SMark Rutland  */
1060e3d18ceeSMark Rutland static __always_inline int
raw_atomic_inc_return_release(atomic_t * v)10619257959aSMark Rutland raw_atomic_inc_return_release(atomic_t *v)
1062e3d18ceeSMark Rutland {
10631d78814dSMark Rutland #if defined(arch_atomic_inc_return_release)
10641d78814dSMark Rutland 	return arch_atomic_inc_return_release(v);
10651d78814dSMark Rutland #elif defined(arch_atomic_inc_return_relaxed)
1066e3d18ceeSMark Rutland 	__atomic_release_fence();
10679257959aSMark Rutland 	return arch_atomic_inc_return_relaxed(v);
10689257959aSMark Rutland #elif defined(arch_atomic_inc_return)
10691d78814dSMark Rutland 	return arch_atomic_inc_return(v);
10709257959aSMark Rutland #else
10719257959aSMark Rutland 	return raw_atomic_add_return_release(1, v);
1072e3d18ceeSMark Rutland #endif
10731d78814dSMark Rutland }
1074e3d18ceeSMark Rutland 
1075ad811070SMark Rutland /**
1076ad811070SMark Rutland  * raw_atomic_inc_return_relaxed() - atomic increment with relaxed ordering
1077ad811070SMark Rutland  * @v: pointer to atomic_t
1078ad811070SMark Rutland  *
1079ad811070SMark Rutland  * Atomically updates @v to (@v + 1) with relaxed ordering.
1080ad811070SMark Rutland  *
1081ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_inc_return_relaxed() elsewhere.
1082ad811070SMark Rutland  *
1083ad811070SMark Rutland  * Return: The updated value of @v.
1084ad811070SMark Rutland  */
1085e3d18ceeSMark Rutland static __always_inline int
raw_atomic_inc_return_relaxed(atomic_t * v)10869257959aSMark Rutland raw_atomic_inc_return_relaxed(atomic_t *v)
10879257959aSMark Rutland {
10881d78814dSMark Rutland #if defined(arch_atomic_inc_return_relaxed)
10891d78814dSMark Rutland 	return arch_atomic_inc_return_relaxed(v);
10901d78814dSMark Rutland #elif defined(arch_atomic_inc_return)
10911d78814dSMark Rutland 	return arch_atomic_inc_return(v);
10921d78814dSMark Rutland #else
10939257959aSMark Rutland 	return raw_atomic_add_return_relaxed(1, v);
10949257959aSMark Rutland #endif
10951d78814dSMark Rutland }
10969257959aSMark Rutland 
1097ad811070SMark Rutland /**
1098ad811070SMark Rutland  * raw_atomic_fetch_inc() - atomic increment with full ordering
1099ad811070SMark Rutland  * @v: pointer to atomic_t
1100ad811070SMark Rutland  *
1101ad811070SMark Rutland  * Atomically updates @v to (@v + 1) with full ordering.
1102ad811070SMark Rutland  *
1103ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_fetch_inc() elsewhere.
1104ad811070SMark Rutland  *
1105ad811070SMark Rutland  * Return: The original value of @v.
1106ad811070SMark Rutland  */
11079257959aSMark Rutland static __always_inline int
raw_atomic_fetch_inc(atomic_t * v)11089257959aSMark Rutland raw_atomic_fetch_inc(atomic_t *v)
1109e3d18ceeSMark Rutland {
11101d78814dSMark Rutland #if defined(arch_atomic_fetch_inc)
11111d78814dSMark Rutland 	return arch_atomic_fetch_inc(v);
11121d78814dSMark Rutland #elif defined(arch_atomic_fetch_inc_relaxed)
1113e3d18ceeSMark Rutland 	int ret;
1114e3d18ceeSMark Rutland 	__atomic_pre_full_fence();
1115e3d18ceeSMark Rutland 	ret = arch_atomic_fetch_inc_relaxed(v);
1116e3d18ceeSMark Rutland 	__atomic_post_full_fence();
1117e3d18ceeSMark Rutland 	return ret;
11189257959aSMark Rutland #else
11199257959aSMark Rutland 	return raw_atomic_fetch_add(1, v);
1120e3d18ceeSMark Rutland #endif
11211d78814dSMark Rutland }
1122e3d18ceeSMark Rutland 
1123ad811070SMark Rutland /**
1124ad811070SMark Rutland  * raw_atomic_fetch_inc_acquire() - atomic increment with acquire ordering
1125ad811070SMark Rutland  * @v: pointer to atomic_t
1126ad811070SMark Rutland  *
1127ad811070SMark Rutland  * Atomically updates @v to (@v + 1) with acquire ordering.
1128ad811070SMark Rutland  *
1129ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_fetch_inc_acquire() elsewhere.
1130ad811070SMark Rutland  *
1131ad811070SMark Rutland  * Return: The original value of @v.
1132ad811070SMark Rutland  */
1133e3d18ceeSMark Rutland static __always_inline int
raw_atomic_fetch_inc_acquire(atomic_t * v)11349257959aSMark Rutland raw_atomic_fetch_inc_acquire(atomic_t *v)
1135e3d18ceeSMark Rutland {
11361d78814dSMark Rutland #if defined(arch_atomic_fetch_inc_acquire)
11371d78814dSMark Rutland 	return arch_atomic_fetch_inc_acquire(v);
11381d78814dSMark Rutland #elif defined(arch_atomic_fetch_inc_relaxed)
11399257959aSMark Rutland 	int ret = arch_atomic_fetch_inc_relaxed(v);
1140e3d18ceeSMark Rutland 	__atomic_acquire_fence();
1141e3d18ceeSMark Rutland 	return ret;
11429257959aSMark Rutland #elif defined(arch_atomic_fetch_inc)
11431d78814dSMark Rutland 	return arch_atomic_fetch_inc(v);
11449257959aSMark Rutland #else
11459257959aSMark Rutland 	return raw_atomic_fetch_add_acquire(1, v);
1146e3d18ceeSMark Rutland #endif
11471d78814dSMark Rutland }
1148e3d18ceeSMark Rutland 
1149ad811070SMark Rutland /**
1150ad811070SMark Rutland  * raw_atomic_fetch_inc_release() - atomic increment with release ordering
1151ad811070SMark Rutland  * @v: pointer to atomic_t
1152ad811070SMark Rutland  *
1153ad811070SMark Rutland  * Atomically updates @v to (@v + 1) with release ordering.
1154ad811070SMark Rutland  *
1155ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_fetch_inc_release() elsewhere.
1156ad811070SMark Rutland  *
1157ad811070SMark Rutland  * Return: The original value of @v.
1158ad811070SMark Rutland  */
1159e3d18ceeSMark Rutland static __always_inline int
raw_atomic_fetch_inc_release(atomic_t * v)11609257959aSMark Rutland raw_atomic_fetch_inc_release(atomic_t *v)
1161e3d18ceeSMark Rutland {
11621d78814dSMark Rutland #if defined(arch_atomic_fetch_inc_release)
11631d78814dSMark Rutland 	return arch_atomic_fetch_inc_release(v);
11641d78814dSMark Rutland #elif defined(arch_atomic_fetch_inc_relaxed)
1165e3d18ceeSMark Rutland 	__atomic_release_fence();
11669257959aSMark Rutland 	return arch_atomic_fetch_inc_relaxed(v);
11679257959aSMark Rutland #elif defined(arch_atomic_fetch_inc)
11681d78814dSMark Rutland 	return arch_atomic_fetch_inc(v);
11699257959aSMark Rutland #else
11709257959aSMark Rutland 	return raw_atomic_fetch_add_release(1, v);
1171e3d18ceeSMark Rutland #endif
11721d78814dSMark Rutland }
1173e3d18ceeSMark Rutland 
1174ad811070SMark Rutland /**
1175ad811070SMark Rutland  * raw_atomic_fetch_inc_relaxed() - atomic increment with relaxed ordering
1176ad811070SMark Rutland  * @v: pointer to atomic_t
1177ad811070SMark Rutland  *
1178ad811070SMark Rutland  * Atomically updates @v to (@v + 1) with relaxed ordering.
1179ad811070SMark Rutland  *
1180ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_fetch_inc_relaxed() elsewhere.
1181ad811070SMark Rutland  *
1182ad811070SMark Rutland  * Return: The original value of @v.
1183ad811070SMark Rutland  */
1184e3d18ceeSMark Rutland static __always_inline int
raw_atomic_fetch_inc_relaxed(atomic_t * v)11859257959aSMark Rutland raw_atomic_fetch_inc_relaxed(atomic_t *v)
11869257959aSMark Rutland {
11871d78814dSMark Rutland #if defined(arch_atomic_fetch_inc_relaxed)
11881d78814dSMark Rutland 	return arch_atomic_fetch_inc_relaxed(v);
11891d78814dSMark Rutland #elif defined(arch_atomic_fetch_inc)
11901d78814dSMark Rutland 	return arch_atomic_fetch_inc(v);
11919257959aSMark Rutland #else
11921d78814dSMark Rutland 	return raw_atomic_fetch_add_relaxed(1, v);
11931d78814dSMark Rutland #endif
11941d78814dSMark Rutland }
11951d78814dSMark Rutland 
1196ad811070SMark Rutland /**
1197ad811070SMark Rutland  * raw_atomic_dec() - atomic decrement with relaxed ordering
1198ad811070SMark Rutland  * @v: pointer to atomic_t
1199ad811070SMark Rutland  *
1200ad811070SMark Rutland  * Atomically updates @v to (@v - 1) with relaxed ordering.
1201ad811070SMark Rutland  *
1202ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_dec() elsewhere.
1203ad811070SMark Rutland  *
1204ad811070SMark Rutland  * Return: Nothing.
1205ad811070SMark Rutland  */
12069257959aSMark Rutland static __always_inline void
raw_atomic_dec(atomic_t * v)12079257959aSMark Rutland raw_atomic_dec(atomic_t *v)
12089257959aSMark Rutland {
12091d78814dSMark Rutland #if defined(arch_atomic_dec)
12101d78814dSMark Rutland 	arch_atomic_dec(v);
12111d78814dSMark Rutland #else
12129257959aSMark Rutland 	raw_atomic_sub(1, v);
12139257959aSMark Rutland #endif
12141d78814dSMark Rutland }
12159257959aSMark Rutland 
1216ad811070SMark Rutland /**
1217ad811070SMark Rutland  * raw_atomic_dec_return() - atomic decrement with full ordering
1218ad811070SMark Rutland  * @v: pointer to atomic_t
1219ad811070SMark Rutland  *
1220ad811070SMark Rutland  * Atomically updates @v to (@v - 1) with full ordering.
1221ad811070SMark Rutland  *
1222ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_dec_return() elsewhere.
1223ad811070SMark Rutland  *
1224ad811070SMark Rutland  * Return: The updated value of @v.
1225ad811070SMark Rutland  */
12269257959aSMark Rutland static __always_inline int
raw_atomic_dec_return(atomic_t * v)12279257959aSMark Rutland raw_atomic_dec_return(atomic_t *v)
1228e3d18ceeSMark Rutland {
12291d78814dSMark Rutland #if defined(arch_atomic_dec_return)
12301d78814dSMark Rutland 	return arch_atomic_dec_return(v);
12311d78814dSMark Rutland #elif defined(arch_atomic_dec_return_relaxed)
1232e3d18ceeSMark Rutland 	int ret;
1233e3d18ceeSMark Rutland 	__atomic_pre_full_fence();
1234e3d18ceeSMark Rutland 	ret = arch_atomic_dec_return_relaxed(v);
1235e3d18ceeSMark Rutland 	__atomic_post_full_fence();
1236e3d18ceeSMark Rutland 	return ret;
12379257959aSMark Rutland #else
12389257959aSMark Rutland 	return raw_atomic_sub_return(1, v);
1239e3d18ceeSMark Rutland #endif
12401d78814dSMark Rutland }
1241e3d18ceeSMark Rutland 
1242ad811070SMark Rutland /**
1243ad811070SMark Rutland  * raw_atomic_dec_return_acquire() - atomic decrement with acquire ordering
1244ad811070SMark Rutland  * @v: pointer to atomic_t
1245ad811070SMark Rutland  *
1246ad811070SMark Rutland  * Atomically updates @v to (@v - 1) with acquire ordering.
1247ad811070SMark Rutland  *
1248ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_dec_return_acquire() elsewhere.
1249ad811070SMark Rutland  *
1250ad811070SMark Rutland  * Return: The updated value of @v.
1251ad811070SMark Rutland  */
1252e3d18ceeSMark Rutland static __always_inline int
raw_atomic_dec_return_acquire(atomic_t * v)12539257959aSMark Rutland raw_atomic_dec_return_acquire(atomic_t *v)
1254e3d18ceeSMark Rutland {
12551d78814dSMark Rutland #if defined(arch_atomic_dec_return_acquire)
12561d78814dSMark Rutland 	return arch_atomic_dec_return_acquire(v);
12571d78814dSMark Rutland #elif defined(arch_atomic_dec_return_relaxed)
12589257959aSMark Rutland 	int ret = arch_atomic_dec_return_relaxed(v);
1259e3d18ceeSMark Rutland 	__atomic_acquire_fence();
1260e3d18ceeSMark Rutland 	return ret;
12619257959aSMark Rutland #elif defined(arch_atomic_dec_return)
12621d78814dSMark Rutland 	return arch_atomic_dec_return(v);
12639257959aSMark Rutland #else
12649257959aSMark Rutland 	return raw_atomic_sub_return_acquire(1, v);
1265e3d18ceeSMark Rutland #endif
12661d78814dSMark Rutland }
1267e3d18ceeSMark Rutland 
1268ad811070SMark Rutland /**
1269ad811070SMark Rutland  * raw_atomic_dec_return_release() - atomic decrement with release ordering
1270ad811070SMark Rutland  * @v: pointer to atomic_t
1271ad811070SMark Rutland  *
1272ad811070SMark Rutland  * Atomically updates @v to (@v - 1) with release ordering.
1273ad811070SMark Rutland  *
1274ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_dec_return_release() elsewhere.
1275ad811070SMark Rutland  *
1276ad811070SMark Rutland  * Return: The updated value of @v.
1277ad811070SMark Rutland  */
1278e3d18ceeSMark Rutland static __always_inline int
raw_atomic_dec_return_release(atomic_t * v)12799257959aSMark Rutland raw_atomic_dec_return_release(atomic_t *v)
1280e3d18ceeSMark Rutland {
12811d78814dSMark Rutland #if defined(arch_atomic_dec_return_release)
12821d78814dSMark Rutland 	return arch_atomic_dec_return_release(v);
12831d78814dSMark Rutland #elif defined(arch_atomic_dec_return_relaxed)
1284e3d18ceeSMark Rutland 	__atomic_release_fence();
12859257959aSMark Rutland 	return arch_atomic_dec_return_relaxed(v);
12869257959aSMark Rutland #elif defined(arch_atomic_dec_return)
12871d78814dSMark Rutland 	return arch_atomic_dec_return(v);
12889257959aSMark Rutland #else
12899257959aSMark Rutland 	return raw_atomic_sub_return_release(1, v);
1290e3d18ceeSMark Rutland #endif
12911d78814dSMark Rutland }
1292e3d18ceeSMark Rutland 
1293ad811070SMark Rutland /**
1294ad811070SMark Rutland  * raw_atomic_dec_return_relaxed() - atomic decrement with relaxed ordering
1295ad811070SMark Rutland  * @v: pointer to atomic_t
1296ad811070SMark Rutland  *
1297ad811070SMark Rutland  * Atomically updates @v to (@v - 1) with relaxed ordering.
1298ad811070SMark Rutland  *
1299ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_dec_return_relaxed() elsewhere.
1300ad811070SMark Rutland  *
1301ad811070SMark Rutland  * Return: The updated value of @v.
1302ad811070SMark Rutland  */
1303e3d18ceeSMark Rutland static __always_inline int
raw_atomic_dec_return_relaxed(atomic_t * v)13049257959aSMark Rutland raw_atomic_dec_return_relaxed(atomic_t *v)
13059257959aSMark Rutland {
13061d78814dSMark Rutland #if defined(arch_atomic_dec_return_relaxed)
13071d78814dSMark Rutland 	return arch_atomic_dec_return_relaxed(v);
13081d78814dSMark Rutland #elif defined(arch_atomic_dec_return)
13091d78814dSMark Rutland 	return arch_atomic_dec_return(v);
13101d78814dSMark Rutland #else
13119257959aSMark Rutland 	return raw_atomic_sub_return_relaxed(1, v);
13129257959aSMark Rutland #endif
13131d78814dSMark Rutland }
13149257959aSMark Rutland 
1315ad811070SMark Rutland /**
1316ad811070SMark Rutland  * raw_atomic_fetch_dec() - atomic decrement with full ordering
1317ad811070SMark Rutland  * @v: pointer to atomic_t
1318ad811070SMark Rutland  *
1319ad811070SMark Rutland  * Atomically updates @v to (@v - 1) with full ordering.
1320ad811070SMark Rutland  *
1321ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_fetch_dec() elsewhere.
1322ad811070SMark Rutland  *
1323ad811070SMark Rutland  * Return: The original value of @v.
1324ad811070SMark Rutland  */
13259257959aSMark Rutland static __always_inline int
raw_atomic_fetch_dec(atomic_t * v)13269257959aSMark Rutland raw_atomic_fetch_dec(atomic_t *v)
1327e3d18ceeSMark Rutland {
13281d78814dSMark Rutland #if defined(arch_atomic_fetch_dec)
13291d78814dSMark Rutland 	return arch_atomic_fetch_dec(v);
13301d78814dSMark Rutland #elif defined(arch_atomic_fetch_dec_relaxed)
1331e3d18ceeSMark Rutland 	int ret;
1332e3d18ceeSMark Rutland 	__atomic_pre_full_fence();
1333e3d18ceeSMark Rutland 	ret = arch_atomic_fetch_dec_relaxed(v);
1334e3d18ceeSMark Rutland 	__atomic_post_full_fence();
1335e3d18ceeSMark Rutland 	return ret;
13369257959aSMark Rutland #else
13379257959aSMark Rutland 	return raw_atomic_fetch_sub(1, v);
1338e3d18ceeSMark Rutland #endif
13391d78814dSMark Rutland }
1340e3d18ceeSMark Rutland 
1341ad811070SMark Rutland /**
1342ad811070SMark Rutland  * raw_atomic_fetch_dec_acquire() - atomic decrement with acquire ordering
1343ad811070SMark Rutland  * @v: pointer to atomic_t
1344ad811070SMark Rutland  *
1345ad811070SMark Rutland  * Atomically updates @v to (@v - 1) with acquire ordering.
1346ad811070SMark Rutland  *
1347ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_fetch_dec_acquire() elsewhere.
1348ad811070SMark Rutland  *
1349ad811070SMark Rutland  * Return: The original value of @v.
1350ad811070SMark Rutland  */
1351e3d18ceeSMark Rutland static __always_inline int
raw_atomic_fetch_dec_acquire(atomic_t * v)13529257959aSMark Rutland raw_atomic_fetch_dec_acquire(atomic_t *v)
1353e3d18ceeSMark Rutland {
13541d78814dSMark Rutland #if defined(arch_atomic_fetch_dec_acquire)
13551d78814dSMark Rutland 	return arch_atomic_fetch_dec_acquire(v);
13561d78814dSMark Rutland #elif defined(arch_atomic_fetch_dec_relaxed)
13579257959aSMark Rutland 	int ret = arch_atomic_fetch_dec_relaxed(v);
1358e3d18ceeSMark Rutland 	__atomic_acquire_fence();
1359e3d18ceeSMark Rutland 	return ret;
13609257959aSMark Rutland #elif defined(arch_atomic_fetch_dec)
13611d78814dSMark Rutland 	return arch_atomic_fetch_dec(v);
13629257959aSMark Rutland #else
13639257959aSMark Rutland 	return raw_atomic_fetch_sub_acquire(1, v);
1364e3d18ceeSMark Rutland #endif
13651d78814dSMark Rutland }
1366e3d18ceeSMark Rutland 
1367ad811070SMark Rutland /**
1368ad811070SMark Rutland  * raw_atomic_fetch_dec_release() - atomic decrement with release ordering
1369ad811070SMark Rutland  * @v: pointer to atomic_t
1370ad811070SMark Rutland  *
1371ad811070SMark Rutland  * Atomically updates @v to (@v - 1) with release ordering.
1372ad811070SMark Rutland  *
1373ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_fetch_dec_release() elsewhere.
1374ad811070SMark Rutland  *
1375ad811070SMark Rutland  * Return: The original value of @v.
1376ad811070SMark Rutland  */
1377e3d18ceeSMark Rutland static __always_inline int
raw_atomic_fetch_dec_release(atomic_t * v)13789257959aSMark Rutland raw_atomic_fetch_dec_release(atomic_t *v)
1379e3d18ceeSMark Rutland {
13801d78814dSMark Rutland #if defined(arch_atomic_fetch_dec_release)
13811d78814dSMark Rutland 	return arch_atomic_fetch_dec_release(v);
13821d78814dSMark Rutland #elif defined(arch_atomic_fetch_dec_relaxed)
1383e3d18ceeSMark Rutland 	__atomic_release_fence();
13849257959aSMark Rutland 	return arch_atomic_fetch_dec_relaxed(v);
13859257959aSMark Rutland #elif defined(arch_atomic_fetch_dec)
13861d78814dSMark Rutland 	return arch_atomic_fetch_dec(v);
13879257959aSMark Rutland #else
13889257959aSMark Rutland 	return raw_atomic_fetch_sub_release(1, v);
1389e3d18ceeSMark Rutland #endif
13901d78814dSMark Rutland }
1391e3d18ceeSMark Rutland 
1392ad811070SMark Rutland /**
1393ad811070SMark Rutland  * raw_atomic_fetch_dec_relaxed() - atomic decrement with relaxed ordering
1394ad811070SMark Rutland  * @v: pointer to atomic_t
1395ad811070SMark Rutland  *
1396ad811070SMark Rutland  * Atomically updates @v to (@v - 1) with relaxed ordering.
1397ad811070SMark Rutland  *
1398ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_fetch_dec_relaxed() elsewhere.
1399ad811070SMark Rutland  *
1400ad811070SMark Rutland  * Return: The original value of @v.
1401ad811070SMark Rutland  */
1402e3d18ceeSMark Rutland static __always_inline int
raw_atomic_fetch_dec_relaxed(atomic_t * v)14039257959aSMark Rutland raw_atomic_fetch_dec_relaxed(atomic_t *v)
14049257959aSMark Rutland {
14051d78814dSMark Rutland #if defined(arch_atomic_fetch_dec_relaxed)
14061d78814dSMark Rutland 	return arch_atomic_fetch_dec_relaxed(v);
14071d78814dSMark Rutland #elif defined(arch_atomic_fetch_dec)
14081d78814dSMark Rutland 	return arch_atomic_fetch_dec(v);
14091d78814dSMark Rutland #else
14109257959aSMark Rutland 	return raw_atomic_fetch_sub_relaxed(1, v);
14119257959aSMark Rutland #endif
14121d78814dSMark Rutland }
14139257959aSMark Rutland 
1414ad811070SMark Rutland /**
1415ad811070SMark Rutland  * raw_atomic_and() - atomic bitwise AND with relaxed ordering
1416ad811070SMark Rutland  * @i: int value
1417ad811070SMark Rutland  * @v: pointer to atomic_t
1418ad811070SMark Rutland  *
1419ad811070SMark Rutland  * Atomically updates @v to (@v & @i) with relaxed ordering.
1420ad811070SMark Rutland  *
1421ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_and() elsewhere.
1422ad811070SMark Rutland  *
1423ad811070SMark Rutland  * Return: Nothing.
1424ad811070SMark Rutland  */
14251d78814dSMark Rutland static __always_inline void
raw_atomic_and(int i,atomic_t * v)14261d78814dSMark Rutland raw_atomic_and(int i, atomic_t *v)
14271d78814dSMark Rutland {
14281d78814dSMark Rutland 	arch_atomic_and(i, v);
14291d78814dSMark Rutland }
14309257959aSMark Rutland 
1431ad811070SMark Rutland /**
1432ad811070SMark Rutland  * raw_atomic_fetch_and() - atomic bitwise AND with full ordering
1433ad811070SMark Rutland  * @i: int value
1434ad811070SMark Rutland  * @v: pointer to atomic_t
1435ad811070SMark Rutland  *
1436ad811070SMark Rutland  * Atomically updates @v to (@v & @i) with full ordering.
1437ad811070SMark Rutland  *
1438ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_fetch_and() elsewhere.
1439ad811070SMark Rutland  *
1440ad811070SMark Rutland  * Return: The original value of @v.
1441ad811070SMark Rutland  */
14429257959aSMark Rutland static __always_inline int
raw_atomic_fetch_and(int i,atomic_t * v)14439257959aSMark Rutland raw_atomic_fetch_and(int i, atomic_t *v)
1444e3d18ceeSMark Rutland {
14451d78814dSMark Rutland #if defined(arch_atomic_fetch_and)
14461d78814dSMark Rutland 	return arch_atomic_fetch_and(i, v);
14471d78814dSMark Rutland #elif defined(arch_atomic_fetch_and_relaxed)
1448e3d18ceeSMark Rutland 	int ret;
1449e3d18ceeSMark Rutland 	__atomic_pre_full_fence();
1450e3d18ceeSMark Rutland 	ret = arch_atomic_fetch_and_relaxed(i, v);
1451e3d18ceeSMark Rutland 	__atomic_post_full_fence();
1452e3d18ceeSMark Rutland 	return ret;
14539257959aSMark Rutland #else
14549257959aSMark Rutland #error "Unable to define raw_atomic_fetch_and"
1455e3d18ceeSMark Rutland #endif
14561d78814dSMark Rutland }
1457e3d18ceeSMark Rutland 
1458ad811070SMark Rutland /**
1459ad811070SMark Rutland  * raw_atomic_fetch_and_acquire() - atomic bitwise AND with acquire ordering
1460ad811070SMark Rutland  * @i: int value
1461ad811070SMark Rutland  * @v: pointer to atomic_t
1462ad811070SMark Rutland  *
1463ad811070SMark Rutland  * Atomically updates @v to (@v & @i) with acquire ordering.
1464ad811070SMark Rutland  *
1465ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_fetch_and_acquire() elsewhere.
1466ad811070SMark Rutland  *
1467ad811070SMark Rutland  * Return: The original value of @v.
1468ad811070SMark Rutland  */
1469e3d18ceeSMark Rutland static __always_inline int
raw_atomic_fetch_and_acquire(int i,atomic_t * v)14709257959aSMark Rutland raw_atomic_fetch_and_acquire(int i, atomic_t *v)
1471e3d18ceeSMark Rutland {
14721d78814dSMark Rutland #if defined(arch_atomic_fetch_and_acquire)
14731d78814dSMark Rutland 	return arch_atomic_fetch_and_acquire(i, v);
14741d78814dSMark Rutland #elif defined(arch_atomic_fetch_and_relaxed)
14759257959aSMark Rutland 	int ret = arch_atomic_fetch_and_relaxed(i, v);
1476e3d18ceeSMark Rutland 	__atomic_acquire_fence();
1477e3d18ceeSMark Rutland 	return ret;
14789257959aSMark Rutland #elif defined(arch_atomic_fetch_and)
14791d78814dSMark Rutland 	return arch_atomic_fetch_and(i, v);
14809257959aSMark Rutland #else
14819257959aSMark Rutland #error "Unable to define raw_atomic_fetch_and_acquire"
1482e3d18ceeSMark Rutland #endif
14831d78814dSMark Rutland }
1484e3d18ceeSMark Rutland 
1485ad811070SMark Rutland /**
1486ad811070SMark Rutland  * raw_atomic_fetch_and_release() - atomic bitwise AND with release ordering
1487ad811070SMark Rutland  * @i: int value
1488ad811070SMark Rutland  * @v: pointer to atomic_t
1489ad811070SMark Rutland  *
1490ad811070SMark Rutland  * Atomically updates @v to (@v & @i) with release ordering.
1491ad811070SMark Rutland  *
1492ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_fetch_and_release() elsewhere.
1493ad811070SMark Rutland  *
1494ad811070SMark Rutland  * Return: The original value of @v.
1495ad811070SMark Rutland  */
1496e3d18ceeSMark Rutland static __always_inline int
raw_atomic_fetch_and_release(int i,atomic_t * v)14979257959aSMark Rutland raw_atomic_fetch_and_release(int i, atomic_t *v)
1498e3d18ceeSMark Rutland {
14991d78814dSMark Rutland #if defined(arch_atomic_fetch_and_release)
15001d78814dSMark Rutland 	return arch_atomic_fetch_and_release(i, v);
15011d78814dSMark Rutland #elif defined(arch_atomic_fetch_and_relaxed)
1502e3d18ceeSMark Rutland 	__atomic_release_fence();
15039257959aSMark Rutland 	return arch_atomic_fetch_and_relaxed(i, v);
15049257959aSMark Rutland #elif defined(arch_atomic_fetch_and)
15051d78814dSMark Rutland 	return arch_atomic_fetch_and(i, v);
15069257959aSMark Rutland #else
15079257959aSMark Rutland #error "Unable to define raw_atomic_fetch_and_release"
1508e3d18ceeSMark Rutland #endif
15091d78814dSMark Rutland }
1510e3d18ceeSMark Rutland 
1511ad811070SMark Rutland /**
1512ad811070SMark Rutland  * raw_atomic_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
1513ad811070SMark Rutland  * @i: int value
1514ad811070SMark Rutland  * @v: pointer to atomic_t
1515ad811070SMark Rutland  *
1516ad811070SMark Rutland  * Atomically updates @v to (@v & @i) with relaxed ordering.
1517ad811070SMark Rutland  *
1518ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_fetch_and_relaxed() elsewhere.
1519ad811070SMark Rutland  *
1520ad811070SMark Rutland  * Return: The original value of @v.
1521ad811070SMark Rutland  */
15221d78814dSMark Rutland static __always_inline int
raw_atomic_fetch_and_relaxed(int i,atomic_t * v)15231d78814dSMark Rutland raw_atomic_fetch_and_relaxed(int i, atomic_t *v)
15241d78814dSMark Rutland {
15259257959aSMark Rutland #if defined(arch_atomic_fetch_and_relaxed)
15261d78814dSMark Rutland 	return arch_atomic_fetch_and_relaxed(i, v);
15279257959aSMark Rutland #elif defined(arch_atomic_fetch_and)
15281d78814dSMark Rutland 	return arch_atomic_fetch_and(i, v);
15299257959aSMark Rutland #else
15309257959aSMark Rutland #error "Unable to define raw_atomic_fetch_and_relaxed"
15319257959aSMark Rutland #endif
15321d78814dSMark Rutland }
15339257959aSMark Rutland 
1534ad811070SMark Rutland /**
1535ad811070SMark Rutland  * raw_atomic_andnot() - atomic bitwise AND NOT with relaxed ordering
1536ad811070SMark Rutland  * @i: int value
1537ad811070SMark Rutland  * @v: pointer to atomic_t
1538ad811070SMark Rutland  *
1539ad811070SMark Rutland  * Atomically updates @v to (@v & ~@i) with relaxed ordering.
1540ad811070SMark Rutland  *
1541ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_andnot() elsewhere.
1542ad811070SMark Rutland  *
1543ad811070SMark Rutland  * Return: Nothing.
1544ad811070SMark Rutland  */
15459257959aSMark Rutland static __always_inline void
raw_atomic_andnot(int i,atomic_t * v)15469257959aSMark Rutland raw_atomic_andnot(int i, atomic_t *v)
15479257959aSMark Rutland {
15481d78814dSMark Rutland #if defined(arch_atomic_andnot)
15491d78814dSMark Rutland 	arch_atomic_andnot(i, v);
15501d78814dSMark Rutland #else
15519257959aSMark Rutland 	raw_atomic_and(~i, v);
15529257959aSMark Rutland #endif
15531d78814dSMark Rutland }
15549257959aSMark Rutland 
1555ad811070SMark Rutland /**
1556ad811070SMark Rutland  * raw_atomic_fetch_andnot() - atomic bitwise AND NOT with full ordering
1557ad811070SMark Rutland  * @i: int value
1558ad811070SMark Rutland  * @v: pointer to atomic_t
1559ad811070SMark Rutland  *
1560ad811070SMark Rutland  * Atomically updates @v to (@v & ~@i) with full ordering.
1561ad811070SMark Rutland  *
1562ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_fetch_andnot() elsewhere.
1563ad811070SMark Rutland  *
1564ad811070SMark Rutland  * Return: The original value of @v.
1565ad811070SMark Rutland  */
1566e3d18ceeSMark Rutland static __always_inline int
raw_atomic_fetch_andnot(int i,atomic_t * v)15679257959aSMark Rutland raw_atomic_fetch_andnot(int i, atomic_t *v)
1568e3d18ceeSMark Rutland {
15691d78814dSMark Rutland #if defined(arch_atomic_fetch_andnot)
15701d78814dSMark Rutland 	return arch_atomic_fetch_andnot(i, v);
15711d78814dSMark Rutland #elif defined(arch_atomic_fetch_andnot_relaxed)
1572e3d18ceeSMark Rutland 	int ret;
1573e3d18ceeSMark Rutland 	__atomic_pre_full_fence();
1574e3d18ceeSMark Rutland 	ret = arch_atomic_fetch_andnot_relaxed(i, v);
1575e3d18ceeSMark Rutland 	__atomic_post_full_fence();
1576e3d18ceeSMark Rutland 	return ret;
15779257959aSMark Rutland #else
15789257959aSMark Rutland 	return raw_atomic_fetch_and(~i, v);
1579e3d18ceeSMark Rutland #endif
15801d78814dSMark Rutland }
1581e3d18ceeSMark Rutland 
1582ad811070SMark Rutland /**
1583ad811070SMark Rutland  * raw_atomic_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
1584ad811070SMark Rutland  * @i: int value
1585ad811070SMark Rutland  * @v: pointer to atomic_t
1586ad811070SMark Rutland  *
1587ad811070SMark Rutland  * Atomically updates @v to (@v & ~@i) with acquire ordering.
1588ad811070SMark Rutland  *
1589ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_fetch_andnot_acquire() elsewhere.
1590ad811070SMark Rutland  *
1591ad811070SMark Rutland  * Return: The original value of @v.
1592ad811070SMark Rutland  */
1593e3d18ceeSMark Rutland static __always_inline int
raw_atomic_fetch_andnot_acquire(int i,atomic_t * v)15949257959aSMark Rutland raw_atomic_fetch_andnot_acquire(int i, atomic_t *v)
1595e3d18ceeSMark Rutland {
15961d78814dSMark Rutland #if defined(arch_atomic_fetch_andnot_acquire)
15971d78814dSMark Rutland 	return arch_atomic_fetch_andnot_acquire(i, v);
15981d78814dSMark Rutland #elif defined(arch_atomic_fetch_andnot_relaxed)
15999257959aSMark Rutland 	int ret = arch_atomic_fetch_andnot_relaxed(i, v);
1600e3d18ceeSMark Rutland 	__atomic_acquire_fence();
1601e3d18ceeSMark Rutland 	return ret;
16029257959aSMark Rutland #elif defined(arch_atomic_fetch_andnot)
16031d78814dSMark Rutland 	return arch_atomic_fetch_andnot(i, v);
16049257959aSMark Rutland #else
16059257959aSMark Rutland 	return raw_atomic_fetch_and_acquire(~i, v);
1606e3d18ceeSMark Rutland #endif
16071d78814dSMark Rutland }
1608e3d18ceeSMark Rutland 
1609ad811070SMark Rutland /**
1610ad811070SMark Rutland  * raw_atomic_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
1611ad811070SMark Rutland  * @i: int value
1612ad811070SMark Rutland  * @v: pointer to atomic_t
1613ad811070SMark Rutland  *
1614ad811070SMark Rutland  * Atomically updates @v to (@v & ~@i) with release ordering.
1615ad811070SMark Rutland  *
1616ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_fetch_andnot_release() elsewhere.
1617ad811070SMark Rutland  *
1618ad811070SMark Rutland  * Return: The original value of @v.
1619ad811070SMark Rutland  */
1620e3d18ceeSMark Rutland static __always_inline int
raw_atomic_fetch_andnot_release(int i,atomic_t * v)16219257959aSMark Rutland raw_atomic_fetch_andnot_release(int i, atomic_t *v)
1622e3d18ceeSMark Rutland {
16231d78814dSMark Rutland #if defined(arch_atomic_fetch_andnot_release)
16241d78814dSMark Rutland 	return arch_atomic_fetch_andnot_release(i, v);
16251d78814dSMark Rutland #elif defined(arch_atomic_fetch_andnot_relaxed)
1626e3d18ceeSMark Rutland 	__atomic_release_fence();
16279257959aSMark Rutland 	return arch_atomic_fetch_andnot_relaxed(i, v);
16289257959aSMark Rutland #elif defined(arch_atomic_fetch_andnot)
16291d78814dSMark Rutland 	return arch_atomic_fetch_andnot(i, v);
16309257959aSMark Rutland #else
16319257959aSMark Rutland 	return raw_atomic_fetch_and_release(~i, v);
1632e3d18ceeSMark Rutland #endif
16331d78814dSMark Rutland }
1634e3d18ceeSMark Rutland 
1635ad811070SMark Rutland /**
1636ad811070SMark Rutland  * raw_atomic_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
1637ad811070SMark Rutland  * @i: int value
1638ad811070SMark Rutland  * @v: pointer to atomic_t
1639ad811070SMark Rutland  *
1640ad811070SMark Rutland  * Atomically updates @v to (@v & ~@i) with relaxed ordering.
1641ad811070SMark Rutland  *
1642ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_fetch_andnot_relaxed() elsewhere.
1643ad811070SMark Rutland  *
1644ad811070SMark Rutland  * Return: The original value of @v.
1645ad811070SMark Rutland  */
1646e3d18ceeSMark Rutland static __always_inline int
raw_atomic_fetch_andnot_relaxed(int i,atomic_t * v)16479257959aSMark Rutland raw_atomic_fetch_andnot_relaxed(int i, atomic_t *v)
16489257959aSMark Rutland {
16491d78814dSMark Rutland #if defined(arch_atomic_fetch_andnot_relaxed)
16501d78814dSMark Rutland 	return arch_atomic_fetch_andnot_relaxed(i, v);
16511d78814dSMark Rutland #elif defined(arch_atomic_fetch_andnot)
16521d78814dSMark Rutland 	return arch_atomic_fetch_andnot(i, v);
16531d78814dSMark Rutland #else
16549257959aSMark Rutland 	return raw_atomic_fetch_and_relaxed(~i, v);
16559257959aSMark Rutland #endif
16561d78814dSMark Rutland }
16579257959aSMark Rutland 
1658ad811070SMark Rutland /**
1659ad811070SMark Rutland  * raw_atomic_or() - atomic bitwise OR with relaxed ordering
1660ad811070SMark Rutland  * @i: int value
1661ad811070SMark Rutland  * @v: pointer to atomic_t
1662ad811070SMark Rutland  *
1663ad811070SMark Rutland  * Atomically updates @v to (@v | @i) with relaxed ordering.
1664ad811070SMark Rutland  *
1665ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_or() elsewhere.
1666ad811070SMark Rutland  *
1667ad811070SMark Rutland  * Return: Nothing.
1668ad811070SMark Rutland  */
16691d78814dSMark Rutland static __always_inline void
raw_atomic_or(int i,atomic_t * v)16701d78814dSMark Rutland raw_atomic_or(int i, atomic_t *v)
16711d78814dSMark Rutland {
16721d78814dSMark Rutland 	arch_atomic_or(i, v);
16731d78814dSMark Rutland }
16749257959aSMark Rutland 
1675ad811070SMark Rutland /**
1676ad811070SMark Rutland  * raw_atomic_fetch_or() - atomic bitwise OR with full ordering
1677ad811070SMark Rutland  * @i: int value
1678ad811070SMark Rutland  * @v: pointer to atomic_t
1679ad811070SMark Rutland  *
1680ad811070SMark Rutland  * Atomically updates @v to (@v | @i) with full ordering.
1681ad811070SMark Rutland  *
1682ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_fetch_or() elsewhere.
1683ad811070SMark Rutland  *
1684ad811070SMark Rutland  * Return: The original value of @v.
1685ad811070SMark Rutland  */
16869257959aSMark Rutland static __always_inline int
raw_atomic_fetch_or(int i,atomic_t * v)16879257959aSMark Rutland raw_atomic_fetch_or(int i, atomic_t *v)
1688e3d18ceeSMark Rutland {
16891d78814dSMark Rutland #if defined(arch_atomic_fetch_or)
16901d78814dSMark Rutland 	return arch_atomic_fetch_or(i, v);
16911d78814dSMark Rutland #elif defined(arch_atomic_fetch_or_relaxed)
1692e3d18ceeSMark Rutland 	int ret;
1693e3d18ceeSMark Rutland 	__atomic_pre_full_fence();
1694e3d18ceeSMark Rutland 	ret = arch_atomic_fetch_or_relaxed(i, v);
1695e3d18ceeSMark Rutland 	__atomic_post_full_fence();
1696e3d18ceeSMark Rutland 	return ret;
16979257959aSMark Rutland #else
16989257959aSMark Rutland #error "Unable to define raw_atomic_fetch_or"
1699e3d18ceeSMark Rutland #endif
17001d78814dSMark Rutland }
1701e3d18ceeSMark Rutland 
1702ad811070SMark Rutland /**
1703ad811070SMark Rutland  * raw_atomic_fetch_or_acquire() - atomic bitwise OR with acquire ordering
1704ad811070SMark Rutland  * @i: int value
1705ad811070SMark Rutland  * @v: pointer to atomic_t
1706ad811070SMark Rutland  *
1707ad811070SMark Rutland  * Atomically updates @v to (@v | @i) with acquire ordering.
1708ad811070SMark Rutland  *
1709ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_fetch_or_acquire() elsewhere.
1710ad811070SMark Rutland  *
1711ad811070SMark Rutland  * Return: The original value of @v.
1712ad811070SMark Rutland  */
1713e3d18ceeSMark Rutland static __always_inline int
raw_atomic_fetch_or_acquire(int i,atomic_t * v)17149257959aSMark Rutland raw_atomic_fetch_or_acquire(int i, atomic_t *v)
1715e3d18ceeSMark Rutland {
17161d78814dSMark Rutland #if defined(arch_atomic_fetch_or_acquire)
17171d78814dSMark Rutland 	return arch_atomic_fetch_or_acquire(i, v);
17181d78814dSMark Rutland #elif defined(arch_atomic_fetch_or_relaxed)
17199257959aSMark Rutland 	int ret = arch_atomic_fetch_or_relaxed(i, v);
1720e3d18ceeSMark Rutland 	__atomic_acquire_fence();
1721e3d18ceeSMark Rutland 	return ret;
17229257959aSMark Rutland #elif defined(arch_atomic_fetch_or)
17231d78814dSMark Rutland 	return arch_atomic_fetch_or(i, v);
17249257959aSMark Rutland #else
17259257959aSMark Rutland #error "Unable to define raw_atomic_fetch_or_acquire"
1726e3d18ceeSMark Rutland #endif
17271d78814dSMark Rutland }
1728e3d18ceeSMark Rutland 
1729ad811070SMark Rutland /**
1730ad811070SMark Rutland  * raw_atomic_fetch_or_release() - atomic bitwise OR with release ordering
1731ad811070SMark Rutland  * @i: int value
1732ad811070SMark Rutland  * @v: pointer to atomic_t
1733ad811070SMark Rutland  *
1734ad811070SMark Rutland  * Atomically updates @v to (@v | @i) with release ordering.
1735ad811070SMark Rutland  *
1736ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_fetch_or_release() elsewhere.
1737ad811070SMark Rutland  *
1738ad811070SMark Rutland  * Return: The original value of @v.
1739ad811070SMark Rutland  */
1740e3d18ceeSMark Rutland static __always_inline int
raw_atomic_fetch_or_release(int i,atomic_t * v)17419257959aSMark Rutland raw_atomic_fetch_or_release(int i, atomic_t *v)
1742e3d18ceeSMark Rutland {
17431d78814dSMark Rutland #if defined(arch_atomic_fetch_or_release)
17441d78814dSMark Rutland 	return arch_atomic_fetch_or_release(i, v);
17451d78814dSMark Rutland #elif defined(arch_atomic_fetch_or_relaxed)
1746e3d18ceeSMark Rutland 	__atomic_release_fence();
17479257959aSMark Rutland 	return arch_atomic_fetch_or_relaxed(i, v);
17489257959aSMark Rutland #elif defined(arch_atomic_fetch_or)
17491d78814dSMark Rutland 	return arch_atomic_fetch_or(i, v);
17509257959aSMark Rutland #else
17519257959aSMark Rutland #error "Unable to define raw_atomic_fetch_or_release"
1752e3d18ceeSMark Rutland #endif
17531d78814dSMark Rutland }
1754e3d18ceeSMark Rutland 
1755ad811070SMark Rutland /**
1756ad811070SMark Rutland  * raw_atomic_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
1757ad811070SMark Rutland  * @i: int value
1758ad811070SMark Rutland  * @v: pointer to atomic_t
1759ad811070SMark Rutland  *
1760ad811070SMark Rutland  * Atomically updates @v to (@v | @i) with relaxed ordering.
1761ad811070SMark Rutland  *
1762ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_fetch_or_relaxed() elsewhere.
1763ad811070SMark Rutland  *
1764ad811070SMark Rutland  * Return: The original value of @v.
1765ad811070SMark Rutland  */
17661d78814dSMark Rutland static __always_inline int
raw_atomic_fetch_or_relaxed(int i,atomic_t * v)17671d78814dSMark Rutland raw_atomic_fetch_or_relaxed(int i, atomic_t *v)
17681d78814dSMark Rutland {
17699257959aSMark Rutland #if defined(arch_atomic_fetch_or_relaxed)
17701d78814dSMark Rutland 	return arch_atomic_fetch_or_relaxed(i, v);
17719257959aSMark Rutland #elif defined(arch_atomic_fetch_or)
17721d78814dSMark Rutland 	return arch_atomic_fetch_or(i, v);
17739257959aSMark Rutland #else
17749257959aSMark Rutland #error "Unable to define raw_atomic_fetch_or_relaxed"
17759257959aSMark Rutland #endif
17761d78814dSMark Rutland }
17779257959aSMark Rutland 
1778ad811070SMark Rutland /**
1779ad811070SMark Rutland  * raw_atomic_xor() - atomic bitwise XOR with relaxed ordering
1780ad811070SMark Rutland  * @i: int value
1781ad811070SMark Rutland  * @v: pointer to atomic_t
1782ad811070SMark Rutland  *
1783ad811070SMark Rutland  * Atomically updates @v to (@v ^ @i) with relaxed ordering.
1784ad811070SMark Rutland  *
1785ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_xor() elsewhere.
1786ad811070SMark Rutland  *
1787ad811070SMark Rutland  * Return: Nothing.
1788ad811070SMark Rutland  */
17891d78814dSMark Rutland static __always_inline void
raw_atomic_xor(int i,atomic_t * v)17901d78814dSMark Rutland raw_atomic_xor(int i, atomic_t *v)
17911d78814dSMark Rutland {
17921d78814dSMark Rutland 	arch_atomic_xor(i, v);
17931d78814dSMark Rutland }
17949257959aSMark Rutland 
1795ad811070SMark Rutland /**
1796ad811070SMark Rutland  * raw_atomic_fetch_xor() - atomic bitwise XOR with full ordering
1797ad811070SMark Rutland  * @i: int value
1798ad811070SMark Rutland  * @v: pointer to atomic_t
1799ad811070SMark Rutland  *
1800ad811070SMark Rutland  * Atomically updates @v to (@v ^ @i) with full ordering.
1801ad811070SMark Rutland  *
1802ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_fetch_xor() elsewhere.
1803ad811070SMark Rutland  *
1804ad811070SMark Rutland  * Return: The original value of @v.
1805ad811070SMark Rutland  */
1806e3d18ceeSMark Rutland static __always_inline int
raw_atomic_fetch_xor(int i,atomic_t * v)18079257959aSMark Rutland raw_atomic_fetch_xor(int i, atomic_t *v)
1808e3d18ceeSMark Rutland {
18091d78814dSMark Rutland #if defined(arch_atomic_fetch_xor)
18101d78814dSMark Rutland 	return arch_atomic_fetch_xor(i, v);
18111d78814dSMark Rutland #elif defined(arch_atomic_fetch_xor_relaxed)
1812e3d18ceeSMark Rutland 	int ret;
1813e3d18ceeSMark Rutland 	__atomic_pre_full_fence();
1814e3d18ceeSMark Rutland 	ret = arch_atomic_fetch_xor_relaxed(i, v);
1815e3d18ceeSMark Rutland 	__atomic_post_full_fence();
1816e3d18ceeSMark Rutland 	return ret;
18179257959aSMark Rutland #else
18189257959aSMark Rutland #error "Unable to define raw_atomic_fetch_xor"
1819e3d18ceeSMark Rutland #endif
18201d78814dSMark Rutland }
1821e3d18ceeSMark Rutland 
1822ad811070SMark Rutland /**
1823ad811070SMark Rutland  * raw_atomic_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
1824ad811070SMark Rutland  * @i: int value
1825ad811070SMark Rutland  * @v: pointer to atomic_t
1826ad811070SMark Rutland  *
1827ad811070SMark Rutland  * Atomically updates @v to (@v ^ @i) with acquire ordering.
1828ad811070SMark Rutland  *
1829ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_fetch_xor_acquire() elsewhere.
1830ad811070SMark Rutland  *
1831ad811070SMark Rutland  * Return: The original value of @v.
1832ad811070SMark Rutland  */
1833d12157efSMark Rutland static __always_inline int
raw_atomic_fetch_xor_acquire(int i,atomic_t * v)18349257959aSMark Rutland raw_atomic_fetch_xor_acquire(int i, atomic_t *v)
1835d12157efSMark Rutland {
18361d78814dSMark Rutland #if defined(arch_atomic_fetch_xor_acquire)
18371d78814dSMark Rutland 	return arch_atomic_fetch_xor_acquire(i, v);
18381d78814dSMark Rutland #elif defined(arch_atomic_fetch_xor_relaxed)
18399257959aSMark Rutland 	int ret = arch_atomic_fetch_xor_relaxed(i, v);
1840e3d18ceeSMark Rutland 	__atomic_acquire_fence();
1841e3d18ceeSMark Rutland 	return ret;
18429257959aSMark Rutland #elif defined(arch_atomic_fetch_xor)
18431d78814dSMark Rutland 	return arch_atomic_fetch_xor(i, v);
18449257959aSMark Rutland #else
18459257959aSMark Rutland #error "Unable to define raw_atomic_fetch_xor_acquire"
1846e3d18ceeSMark Rutland #endif
18471d78814dSMark Rutland }
1848e3d18ceeSMark Rutland 
1849ad811070SMark Rutland /**
1850ad811070SMark Rutland  * raw_atomic_fetch_xor_release() - atomic bitwise XOR with release ordering
1851ad811070SMark Rutland  * @i: int value
1852ad811070SMark Rutland  * @v: pointer to atomic_t
1853ad811070SMark Rutland  *
1854ad811070SMark Rutland  * Atomically updates @v to (@v ^ @i) with release ordering.
1855ad811070SMark Rutland  *
1856ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_fetch_xor_release() elsewhere.
1857ad811070SMark Rutland  *
1858ad811070SMark Rutland  * Return: The original value of @v.
1859ad811070SMark Rutland  */
1860e3d18ceeSMark Rutland static __always_inline int
raw_atomic_fetch_xor_release(int i,atomic_t * v)18619257959aSMark Rutland raw_atomic_fetch_xor_release(int i, atomic_t *v)
1862e3d18ceeSMark Rutland {
18631d78814dSMark Rutland #if defined(arch_atomic_fetch_xor_release)
18641d78814dSMark Rutland 	return arch_atomic_fetch_xor_release(i, v);
18651d78814dSMark Rutland #elif defined(arch_atomic_fetch_xor_relaxed)
1866e3d18ceeSMark Rutland 	__atomic_release_fence();
18679257959aSMark Rutland 	return arch_atomic_fetch_xor_relaxed(i, v);
18689257959aSMark Rutland #elif defined(arch_atomic_fetch_xor)
18691d78814dSMark Rutland 	return arch_atomic_fetch_xor(i, v);
18709257959aSMark Rutland #else
18719257959aSMark Rutland #error "Unable to define raw_atomic_fetch_xor_release"
1872e3d18ceeSMark Rutland #endif
18731d78814dSMark Rutland }
1874e3d18ceeSMark Rutland 
1875ad811070SMark Rutland /**
1876ad811070SMark Rutland  * raw_atomic_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
1877ad811070SMark Rutland  * @i: int value
1878ad811070SMark Rutland  * @v: pointer to atomic_t
1879ad811070SMark Rutland  *
1880ad811070SMark Rutland  * Atomically updates @v to (@v ^ @i) with relaxed ordering.
1881ad811070SMark Rutland  *
1882ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_fetch_xor_relaxed() elsewhere.
1883ad811070SMark Rutland  *
1884ad811070SMark Rutland  * Return: The original value of @v.
1885ad811070SMark Rutland  */
18861d78814dSMark Rutland static __always_inline int
raw_atomic_fetch_xor_relaxed(int i,atomic_t * v)18871d78814dSMark Rutland raw_atomic_fetch_xor_relaxed(int i, atomic_t *v)
18881d78814dSMark Rutland {
18899257959aSMark Rutland #if defined(arch_atomic_fetch_xor_relaxed)
18901d78814dSMark Rutland 	return arch_atomic_fetch_xor_relaxed(i, v);
18919257959aSMark Rutland #elif defined(arch_atomic_fetch_xor)
18921d78814dSMark Rutland 	return arch_atomic_fetch_xor(i, v);
18939257959aSMark Rutland #else
18949257959aSMark Rutland #error "Unable to define raw_atomic_fetch_xor_relaxed"
18959257959aSMark Rutland #endif
1896e3d18ceeSMark Rutland }
18971d78814dSMark Rutland 
1898ad811070SMark Rutland /**
1899ad811070SMark Rutland  * raw_atomic_xchg() - atomic exchange with full ordering
1900ad811070SMark Rutland  * @v: pointer to atomic_t
1901ad811070SMark Rutland  * @new: int value to assign
1902ad811070SMark Rutland  *
1903ad811070SMark Rutland  * Atomically updates @v to @new with full ordering.
1904ad811070SMark Rutland  *
1905ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_xchg() elsewhere.
1906ad811070SMark Rutland  *
1907ad811070SMark Rutland  * Return: The original value of @v.
1908ad811070SMark Rutland  */
1909d12157efSMark Rutland static __always_inline int
raw_atomic_xchg(atomic_t * v,int new)19109257959aSMark Rutland raw_atomic_xchg(atomic_t *v, int new)
1911d12157efSMark Rutland {
19121d78814dSMark Rutland #if defined(arch_atomic_xchg)
19131d78814dSMark Rutland 	return arch_atomic_xchg(v, new);
19149257959aSMark Rutland #elif defined(arch_atomic_xchg_relaxed)
19151d78814dSMark Rutland 	int ret;
19161d78814dSMark Rutland 	__atomic_pre_full_fence();
19171d78814dSMark Rutland 	ret = arch_atomic_xchg_relaxed(v, new);
19181d78814dSMark Rutland 	__atomic_post_full_fence();
1919e3d18ceeSMark Rutland 	return ret;
19209257959aSMark Rutland #else
19211d78814dSMark Rutland 	return raw_xchg(&v->counter, new);
19221d78814dSMark Rutland #endif
19231d78814dSMark Rutland }
19241d78814dSMark Rutland 
1925ad811070SMark Rutland /**
1926ad811070SMark Rutland  * raw_atomic_xchg_acquire() - atomic exchange with acquire ordering
1927ad811070SMark Rutland  * @v: pointer to atomic_t
1928ad811070SMark Rutland  * @new: int value to assign
1929ad811070SMark Rutland  *
1930ad811070SMark Rutland  * Atomically updates @v to @new with acquire ordering.
1931ad811070SMark Rutland  *
1932ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_xchg_acquire() elsewhere.
1933ad811070SMark Rutland  *
1934ad811070SMark Rutland  * Return: The original value of @v.
1935ad811070SMark Rutland  */
19369257959aSMark Rutland static __always_inline int
raw_atomic_xchg_acquire(atomic_t * v,int new)19379257959aSMark Rutland raw_atomic_xchg_acquire(atomic_t *v, int new)
19389257959aSMark Rutland {
19391d78814dSMark Rutland #if defined(arch_atomic_xchg_acquire)
19401d78814dSMark Rutland 	return arch_atomic_xchg_acquire(v, new);
19419257959aSMark Rutland #elif defined(arch_atomic_xchg_relaxed)
19421d78814dSMark Rutland 	int ret = arch_atomic_xchg_relaxed(v, new);
19431d78814dSMark Rutland 	__atomic_acquire_fence();
19441d78814dSMark Rutland 	return ret;
19459257959aSMark Rutland #elif defined(arch_atomic_xchg)
19461d78814dSMark Rutland 	return arch_atomic_xchg(v, new);
19479257959aSMark Rutland #else
19481d78814dSMark Rutland 	return raw_xchg_acquire(&v->counter, new);
19491d78814dSMark Rutland #endif
19501d78814dSMark Rutland }
19511d78814dSMark Rutland 
1952ad811070SMark Rutland /**
1953ad811070SMark Rutland  * raw_atomic_xchg_release() - atomic exchange with release ordering
1954ad811070SMark Rutland  * @v: pointer to atomic_t
1955ad811070SMark Rutland  * @new: int value to assign
1956ad811070SMark Rutland  *
1957ad811070SMark Rutland  * Atomically updates @v to @new with release ordering.
1958ad811070SMark Rutland  *
1959ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_xchg_release() elsewhere.
1960ad811070SMark Rutland  *
1961ad811070SMark Rutland  * Return: The original value of @v.
1962ad811070SMark Rutland  */
19639257959aSMark Rutland static __always_inline int
raw_atomic_xchg_release(atomic_t * v,int new)19649257959aSMark Rutland raw_atomic_xchg_release(atomic_t *v, int new)
19659257959aSMark Rutland {
19661d78814dSMark Rutland #if defined(arch_atomic_xchg_release)
19671d78814dSMark Rutland 	return arch_atomic_xchg_release(v, new);
19681d78814dSMark Rutland #elif defined(arch_atomic_xchg_relaxed)
19691d78814dSMark Rutland 	__atomic_release_fence();
19701d78814dSMark Rutland 	return arch_atomic_xchg_relaxed(v, new);
19719257959aSMark Rutland #elif defined(arch_atomic_xchg)
19721d78814dSMark Rutland 	return arch_atomic_xchg(v, new);
19739257959aSMark Rutland #else
19741d78814dSMark Rutland 	return raw_xchg_release(&v->counter, new);
19751d78814dSMark Rutland #endif
19761d78814dSMark Rutland }
19771d78814dSMark Rutland 
1978ad811070SMark Rutland /**
1979ad811070SMark Rutland  * raw_atomic_xchg_relaxed() - atomic exchange with relaxed ordering
1980ad811070SMark Rutland  * @v: pointer to atomic_t
1981ad811070SMark Rutland  * @new: int value to assign
1982ad811070SMark Rutland  *
1983ad811070SMark Rutland  * Atomically updates @v to @new with relaxed ordering.
1984ad811070SMark Rutland  *
1985ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_xchg_relaxed() elsewhere.
1986ad811070SMark Rutland  *
1987ad811070SMark Rutland  * Return: The original value of @v.
1988ad811070SMark Rutland  */
1989e3d18ceeSMark Rutland static __always_inline int
raw_atomic_xchg_relaxed(atomic_t * v,int new)19909257959aSMark Rutland raw_atomic_xchg_relaxed(atomic_t *v, int new)
19919257959aSMark Rutland {
19921d78814dSMark Rutland #if defined(arch_atomic_xchg_relaxed)
19931d78814dSMark Rutland 	return arch_atomic_xchg_relaxed(v, new);
19941d78814dSMark Rutland #elif defined(arch_atomic_xchg)
19951d78814dSMark Rutland 	return arch_atomic_xchg(v, new);
19961d78814dSMark Rutland #else
19979257959aSMark Rutland 	return raw_xchg_relaxed(&v->counter, new);
19989257959aSMark Rutland #endif
19991d78814dSMark Rutland }
20009257959aSMark Rutland 
2001ad811070SMark Rutland /**
2002ad811070SMark Rutland  * raw_atomic_cmpxchg() - atomic compare and exchange with full ordering
2003ad811070SMark Rutland  * @v: pointer to atomic_t
2004ad811070SMark Rutland  * @old: int value to compare with
2005ad811070SMark Rutland  * @new: int value to assign
2006ad811070SMark Rutland  *
2007ad811070SMark Rutland  * If (@v == @old), atomically updates @v to @new with full ordering.
20086dfee110SMark Rutland  * Otherwise, @v is not modified and relaxed ordering is provided.
2009ad811070SMark Rutland  *
2010ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_cmpxchg() elsewhere.
2011ad811070SMark Rutland  *
2012ad811070SMark Rutland  * Return: The original value of @v.
2013ad811070SMark Rutland  */
20149257959aSMark Rutland static __always_inline int
raw_atomic_cmpxchg(atomic_t * v,int old,int new)20159257959aSMark Rutland raw_atomic_cmpxchg(atomic_t *v, int old, int new)
2016e3d18ceeSMark Rutland {
20171d78814dSMark Rutland #if defined(arch_atomic_cmpxchg)
20181d78814dSMark Rutland 	return arch_atomic_cmpxchg(v, old, new);
20191d78814dSMark Rutland #elif defined(arch_atomic_cmpxchg_relaxed)
2020e3d18ceeSMark Rutland 	int ret;
2021e3d18ceeSMark Rutland 	__atomic_pre_full_fence();
2022e3d18ceeSMark Rutland 	ret = arch_atomic_cmpxchg_relaxed(v, old, new);
2023e3d18ceeSMark Rutland 	__atomic_post_full_fence();
2024e3d18ceeSMark Rutland 	return ret;
20259257959aSMark Rutland #else
20269257959aSMark Rutland 	return raw_cmpxchg(&v->counter, old, new);
2027e3d18ceeSMark Rutland #endif
20281d78814dSMark Rutland }
2029e3d18ceeSMark Rutland 
2030ad811070SMark Rutland /**
2031ad811070SMark Rutland  * raw_atomic_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
2032ad811070SMark Rutland  * @v: pointer to atomic_t
2033ad811070SMark Rutland  * @old: int value to compare with
2034ad811070SMark Rutland  * @new: int value to assign
2035ad811070SMark Rutland  *
2036ad811070SMark Rutland  * If (@v == @old), atomically updates @v to @new with acquire ordering.
20376dfee110SMark Rutland  * Otherwise, @v is not modified and relaxed ordering is provided.
2038ad811070SMark Rutland  *
2039ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_cmpxchg_acquire() elsewhere.
2040ad811070SMark Rutland  *
2041ad811070SMark Rutland  * Return: The original value of @v.
2042ad811070SMark Rutland  */
20439257959aSMark Rutland static __always_inline int
raw_atomic_cmpxchg_acquire(atomic_t * v,int old,int new)20449257959aSMark Rutland raw_atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
2045e3d18ceeSMark Rutland {
20461d78814dSMark Rutland #if defined(arch_atomic_cmpxchg_acquire)
20471d78814dSMark Rutland 	return arch_atomic_cmpxchg_acquire(v, old, new);
20481d78814dSMark Rutland #elif defined(arch_atomic_cmpxchg_relaxed)
20499257959aSMark Rutland 	int ret = arch_atomic_cmpxchg_relaxed(v, old, new);
2050e3d18ceeSMark Rutland 	__atomic_acquire_fence();
2051e3d18ceeSMark Rutland 	return ret;
20529257959aSMark Rutland #elif defined(arch_atomic_cmpxchg)
20531d78814dSMark Rutland 	return arch_atomic_cmpxchg(v, old, new);
20549257959aSMark Rutland #else
20559257959aSMark Rutland 	return raw_cmpxchg_acquire(&v->counter, old, new);
2056e3d18ceeSMark Rutland #endif
20571d78814dSMark Rutland }
2058e3d18ceeSMark Rutland 
2059ad811070SMark Rutland /**
2060ad811070SMark Rutland  * raw_atomic_cmpxchg_release() - atomic compare and exchange with release ordering
2061ad811070SMark Rutland  * @v: pointer to atomic_t
2062ad811070SMark Rutland  * @old: int value to compare with
2063ad811070SMark Rutland  * @new: int value to assign
2064ad811070SMark Rutland  *
2065ad811070SMark Rutland  * If (@v == @old), atomically updates @v to @new with release ordering.
20666dfee110SMark Rutland  * Otherwise, @v is not modified and relaxed ordering is provided.
2067ad811070SMark Rutland  *
2068ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_cmpxchg_release() elsewhere.
2069ad811070SMark Rutland  *
2070ad811070SMark Rutland  * Return: The original value of @v.
2071ad811070SMark Rutland  */
20729257959aSMark Rutland static __always_inline int
raw_atomic_cmpxchg_release(atomic_t * v,int old,int new)20739257959aSMark Rutland raw_atomic_cmpxchg_release(atomic_t *v, int old, int new)
2074e3d18ceeSMark Rutland {
20751d78814dSMark Rutland #if defined(arch_atomic_cmpxchg_release)
20761d78814dSMark Rutland 	return arch_atomic_cmpxchg_release(v, old, new);
20771d78814dSMark Rutland #elif defined(arch_atomic_cmpxchg_relaxed)
2078e3d18ceeSMark Rutland 	__atomic_release_fence();
20799257959aSMark Rutland 	return arch_atomic_cmpxchg_relaxed(v, old, new);
20809257959aSMark Rutland #elif defined(arch_atomic_cmpxchg)
20811d78814dSMark Rutland 	return arch_atomic_cmpxchg(v, old, new);
20829257959aSMark Rutland #else
20839257959aSMark Rutland 	return raw_cmpxchg_release(&v->counter, old, new);
2084e3d18ceeSMark Rutland #endif
20851d78814dSMark Rutland }
2086e3d18ceeSMark Rutland 
2087ad811070SMark Rutland /**
2088ad811070SMark Rutland  * raw_atomic_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
2089ad811070SMark Rutland  * @v: pointer to atomic_t
2090ad811070SMark Rutland  * @old: int value to compare with
2091ad811070SMark Rutland  * @new: int value to assign
2092ad811070SMark Rutland  *
2093ad811070SMark Rutland  * If (@v == @old), atomically updates @v to @new with relaxed ordering.
20946dfee110SMark Rutland  * Otherwise, @v is not modified and relaxed ordering is provided.
2095ad811070SMark Rutland  *
2096ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_cmpxchg_relaxed() elsewhere.
2097ad811070SMark Rutland  *
2098ad811070SMark Rutland  * Return: The original value of @v.
2099ad811070SMark Rutland  */
21009257959aSMark Rutland static __always_inline int
raw_atomic_cmpxchg_relaxed(atomic_t * v,int old,int new)21019257959aSMark Rutland raw_atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
21029257959aSMark Rutland {
21031d78814dSMark Rutland #if defined(arch_atomic_cmpxchg_relaxed)
21041d78814dSMark Rutland 	return arch_atomic_cmpxchg_relaxed(v, old, new);
21051d78814dSMark Rutland #elif defined(arch_atomic_cmpxchg)
21061d78814dSMark Rutland 	return arch_atomic_cmpxchg(v, old, new);
21071d78814dSMark Rutland #else
21089257959aSMark Rutland 	return raw_cmpxchg_relaxed(&v->counter, old, new);
21099257959aSMark Rutland #endif
21101d78814dSMark Rutland }
21119257959aSMark Rutland 
2112ad811070SMark Rutland /**
2113ad811070SMark Rutland  * raw_atomic_try_cmpxchg() - atomic compare and exchange with full ordering
2114ad811070SMark Rutland  * @v: pointer to atomic_t
2115ad811070SMark Rutland  * @old: pointer to int value to compare with
2116ad811070SMark Rutland  * @new: int value to assign
2117ad811070SMark Rutland  *
2118ad811070SMark Rutland  * If (@v == @old), atomically updates @v to @new with full ordering.
21196dfee110SMark Rutland  * Otherwise, @v is not modified, @old is updated to the current value of @v,
21206dfee110SMark Rutland  * and relaxed ordering is provided.
2121ad811070SMark Rutland  *
2122ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_try_cmpxchg() elsewhere.
2123ad811070SMark Rutland  *
2124ad811070SMark Rutland  * Return: @true if the exchange occured, @false otherwise.
2125ad811070SMark Rutland  */
2126e3d18ceeSMark Rutland static __always_inline bool
raw_atomic_try_cmpxchg(atomic_t * v,int * old,int new)21279257959aSMark Rutland raw_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
2128e3d18ceeSMark Rutland {
21291d78814dSMark Rutland #if defined(arch_atomic_try_cmpxchg)
21301d78814dSMark Rutland 	return arch_atomic_try_cmpxchg(v, old, new);
21311d78814dSMark Rutland #elif defined(arch_atomic_try_cmpxchg_relaxed)
2132e3d18ceeSMark Rutland 	bool ret;
2133e3d18ceeSMark Rutland 	__atomic_pre_full_fence();
2134e3d18ceeSMark Rutland 	ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
2135e3d18ceeSMark Rutland 	__atomic_post_full_fence();
2136e3d18ceeSMark Rutland 	return ret;
21379257959aSMark Rutland #else
21389257959aSMark Rutland 	int r, o = *old;
21399257959aSMark Rutland 	r = raw_atomic_cmpxchg(v, o, new);
21409257959aSMark Rutland 	if (unlikely(r != o))
21419257959aSMark Rutland 		*old = r;
21429257959aSMark Rutland 	return likely(r == o);
2143e3d18ceeSMark Rutland #endif
21441d78814dSMark Rutland }
2145e3d18ceeSMark Rutland 
2146ad811070SMark Rutland /**
2147ad811070SMark Rutland  * raw_atomic_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
2148ad811070SMark Rutland  * @v: pointer to atomic_t
2149ad811070SMark Rutland  * @old: pointer to int value to compare with
2150ad811070SMark Rutland  * @new: int value to assign
2151ad811070SMark Rutland  *
2152ad811070SMark Rutland  * If (@v == @old), atomically updates @v to @new with acquire ordering.
21536dfee110SMark Rutland  * Otherwise, @v is not modified, @old is updated to the current value of @v,
21546dfee110SMark Rutland  * and relaxed ordering is provided.
2155ad811070SMark Rutland  *
2156ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_try_cmpxchg_acquire() elsewhere.
2157ad811070SMark Rutland  *
2158ad811070SMark Rutland  * Return: @true if the exchange occured, @false otherwise.
2159ad811070SMark Rutland  */
2160e3d18ceeSMark Rutland static __always_inline bool
raw_atomic_try_cmpxchg_acquire(atomic_t * v,int * old,int new)21619257959aSMark Rutland raw_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
2162e3d18ceeSMark Rutland {
21631d78814dSMark Rutland #if defined(arch_atomic_try_cmpxchg_acquire)
21641d78814dSMark Rutland 	return arch_atomic_try_cmpxchg_acquire(v, old, new);
21651d78814dSMark Rutland #elif defined(arch_atomic_try_cmpxchg_relaxed)
21669257959aSMark Rutland 	bool ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
2167e5ab9effSThomas Gleixner 	__atomic_acquire_fence();
2168e5ab9effSThomas Gleixner 	return ret;
21699257959aSMark Rutland #elif defined(arch_atomic_try_cmpxchg)
21701d78814dSMark Rutland 	return arch_atomic_try_cmpxchg(v, old, new);
21719257959aSMark Rutland #else
21729257959aSMark Rutland 	int r, o = *old;
21739257959aSMark Rutland 	r = raw_atomic_cmpxchg_acquire(v, o, new);
21749257959aSMark Rutland 	if (unlikely(r != o))
21759257959aSMark Rutland 		*old = r;
21769257959aSMark Rutland 	return likely(r == o);
2177e5ab9effSThomas Gleixner #endif
21781d78814dSMark Rutland }
2179e5ab9effSThomas Gleixner 
2180ad811070SMark Rutland /**
2181ad811070SMark Rutland  * raw_atomic_try_cmpxchg_release() - atomic compare and exchange with release ordering
2182ad811070SMark Rutland  * @v: pointer to atomic_t
2183ad811070SMark Rutland  * @old: pointer to int value to compare with
2184ad811070SMark Rutland  * @new: int value to assign
2185ad811070SMark Rutland  *
2186ad811070SMark Rutland  * If (@v == @old), atomically updates @v to @new with release ordering.
21876dfee110SMark Rutland  * Otherwise, @v is not modified, @old is updated to the current value of @v,
21886dfee110SMark Rutland  * and relaxed ordering is provided.
2189ad811070SMark Rutland  *
2190ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_try_cmpxchg_release() elsewhere.
2191ad811070SMark Rutland  *
2192ad811070SMark Rutland  * Return: @true if the exchange occured, @false otherwise.
2193ad811070SMark Rutland  */
2194e5ab9effSThomas Gleixner static __always_inline bool
raw_atomic_try_cmpxchg_release(atomic_t * v,int * old,int new)21959257959aSMark Rutland raw_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
2196e5ab9effSThomas Gleixner {
21971d78814dSMark Rutland #if defined(arch_atomic_try_cmpxchg_release)
21981d78814dSMark Rutland 	return arch_atomic_try_cmpxchg_release(v, old, new);
21991d78814dSMark Rutland #elif defined(arch_atomic_try_cmpxchg_relaxed)
2200e5ab9effSThomas Gleixner 	__atomic_release_fence();
22019257959aSMark Rutland 	return arch_atomic_try_cmpxchg_relaxed(v, old, new);
22029257959aSMark Rutland #elif defined(arch_atomic_try_cmpxchg)
22031d78814dSMark Rutland 	return arch_atomic_try_cmpxchg(v, old, new);
22049257959aSMark Rutland #else
22059257959aSMark Rutland 	int r, o = *old;
22069257959aSMark Rutland 	r = raw_atomic_cmpxchg_release(v, o, new);
22079257959aSMark Rutland 	if (unlikely(r != o))
22089257959aSMark Rutland 		*old = r;
22099257959aSMark Rutland 	return likely(r == o);
2210e5ab9effSThomas Gleixner #endif
22111d78814dSMark Rutland }
2212e5ab9effSThomas Gleixner 
2213ad811070SMark Rutland /**
2214ad811070SMark Rutland  * raw_atomic_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
2215ad811070SMark Rutland  * @v: pointer to atomic_t
2216ad811070SMark Rutland  * @old: pointer to int value to compare with
2217ad811070SMark Rutland  * @new: int value to assign
2218ad811070SMark Rutland  *
2219ad811070SMark Rutland  * If (@v == @old), atomically updates @v to @new with relaxed ordering.
22206dfee110SMark Rutland  * Otherwise, @v is not modified, @old is updated to the current value of @v,
22216dfee110SMark Rutland  * and relaxed ordering is provided.
2222ad811070SMark Rutland  *
2223ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_try_cmpxchg_relaxed() elsewhere.
2224ad811070SMark Rutland  *
2225ad811070SMark Rutland  * Return: @true if the exchange occured, @false otherwise.
2226ad811070SMark Rutland  */
2227e5ab9effSThomas Gleixner static __always_inline bool
raw_atomic_try_cmpxchg_relaxed(atomic_t * v,int * old,int new)22289257959aSMark Rutland raw_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
22299257959aSMark Rutland {
22301d78814dSMark Rutland #if defined(arch_atomic_try_cmpxchg_relaxed)
22311d78814dSMark Rutland 	return arch_atomic_try_cmpxchg_relaxed(v, old, new);
22321d78814dSMark Rutland #elif defined(arch_atomic_try_cmpxchg)
22331d78814dSMark Rutland 	return arch_atomic_try_cmpxchg(v, old, new);
22341d78814dSMark Rutland #else
22359257959aSMark Rutland 	int r, o = *old;
22369257959aSMark Rutland 	r = raw_atomic_cmpxchg_relaxed(v, o, new);
22379257959aSMark Rutland 	if (unlikely(r != o))
22389257959aSMark Rutland 		*old = r;
22399257959aSMark Rutland 	return likely(r == o);
22409257959aSMark Rutland #endif
22411d78814dSMark Rutland }
22429257959aSMark Rutland 
2243ad811070SMark Rutland /**
2244ad811070SMark Rutland  * raw_atomic_sub_and_test() - atomic subtract and test if zero with full ordering
2245*f92a59f6SCarlos Llamas  * @i: int value to subtract
2246ad811070SMark Rutland  * @v: pointer to atomic_t
2247ad811070SMark Rutland  *
2248ad811070SMark Rutland  * Atomically updates @v to (@v - @i) with full ordering.
2249ad811070SMark Rutland  *
2250ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_sub_and_test() elsewhere.
2251ad811070SMark Rutland  *
2252ad811070SMark Rutland  * Return: @true if the resulting value of @v is zero, @false otherwise.
2253ad811070SMark Rutland  */
22549257959aSMark Rutland static __always_inline bool
raw_atomic_sub_and_test(int i,atomic_t * v)22559257959aSMark Rutland raw_atomic_sub_and_test(int i, atomic_t *v)
22569257959aSMark Rutland {
22571d78814dSMark Rutland #if defined(arch_atomic_sub_and_test)
22581d78814dSMark Rutland 	return arch_atomic_sub_and_test(i, v);
22599257959aSMark Rutland #else
22601d78814dSMark Rutland 	return raw_atomic_sub_return(i, v) == 0;
22611d78814dSMark Rutland #endif
22621d78814dSMark Rutland }
22631d78814dSMark Rutland 
2264ad811070SMark Rutland /**
2265ad811070SMark Rutland  * raw_atomic_dec_and_test() - atomic decrement and test if zero with full ordering
2266ad811070SMark Rutland  * @v: pointer to atomic_t
2267ad811070SMark Rutland  *
2268ad811070SMark Rutland  * Atomically updates @v to (@v - 1) with full ordering.
2269ad811070SMark Rutland  *
2270ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_dec_and_test() elsewhere.
2271ad811070SMark Rutland  *
2272ad811070SMark Rutland  * Return: @true if the resulting value of @v is zero, @false otherwise.
2273ad811070SMark Rutland  */
22749257959aSMark Rutland static __always_inline bool
raw_atomic_dec_and_test(atomic_t * v)22759257959aSMark Rutland raw_atomic_dec_and_test(atomic_t *v)
22769257959aSMark Rutland {
22771d78814dSMark Rutland #if defined(arch_atomic_dec_and_test)
22781d78814dSMark Rutland 	return arch_atomic_dec_and_test(v);
22799257959aSMark Rutland #else
22801d78814dSMark Rutland 	return raw_atomic_dec_return(v) == 0;
22811d78814dSMark Rutland #endif
22821d78814dSMark Rutland }
22831d78814dSMark Rutland 
2284ad811070SMark Rutland /**
2285ad811070SMark Rutland  * raw_atomic_inc_and_test() - atomic increment and test if zero with full ordering
2286ad811070SMark Rutland  * @v: pointer to atomic_t
2287ad811070SMark Rutland  *
2288ad811070SMark Rutland  * Atomically updates @v to (@v + 1) with full ordering.
2289ad811070SMark Rutland  *
2290ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_inc_and_test() elsewhere.
2291ad811070SMark Rutland  *
2292ad811070SMark Rutland  * Return: @true if the resulting value of @v is zero, @false otherwise.
2293ad811070SMark Rutland  */
22949257959aSMark Rutland static __always_inline bool
raw_atomic_inc_and_test(atomic_t * v)22959257959aSMark Rutland raw_atomic_inc_and_test(atomic_t *v)
22969257959aSMark Rutland {
22971d78814dSMark Rutland #if defined(arch_atomic_inc_and_test)
22981d78814dSMark Rutland 	return arch_atomic_inc_and_test(v);
22991d78814dSMark Rutland #else
23009257959aSMark Rutland 	return raw_atomic_inc_return(v) == 0;
23019257959aSMark Rutland #endif
23021d78814dSMark Rutland }
23039257959aSMark Rutland 
2304ad811070SMark Rutland /**
2305ad811070SMark Rutland  * raw_atomic_add_negative() - atomic add and test if negative with full ordering
2306ad811070SMark Rutland  * @i: int value to add
2307ad811070SMark Rutland  * @v: pointer to atomic_t
2308ad811070SMark Rutland  *
2309ad811070SMark Rutland  * Atomically updates @v to (@v + @i) with full ordering.
2310ad811070SMark Rutland  *
2311ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_add_negative() elsewhere.
2312ad811070SMark Rutland  *
2313ad811070SMark Rutland  * Return: @true if the resulting value of @v is negative, @false otherwise.
2314ad811070SMark Rutland  */
23159257959aSMark Rutland static __always_inline bool
raw_atomic_add_negative(int i,atomic_t * v)23169257959aSMark Rutland raw_atomic_add_negative(int i, atomic_t *v)
2317e5ab9effSThomas Gleixner {
23181d78814dSMark Rutland #if defined(arch_atomic_add_negative)
23191d78814dSMark Rutland 	return arch_atomic_add_negative(i, v);
23201d78814dSMark Rutland #elif defined(arch_atomic_add_negative_relaxed)
2321e5ab9effSThomas Gleixner 	bool ret;
2322e5ab9effSThomas Gleixner 	__atomic_pre_full_fence();
2323e5ab9effSThomas Gleixner 	ret = arch_atomic_add_negative_relaxed(i, v);
2324e5ab9effSThomas Gleixner 	__atomic_post_full_fence();
2325e5ab9effSThomas Gleixner 	return ret;
23269257959aSMark Rutland #else
23279257959aSMark Rutland 	return raw_atomic_add_return(i, v) < 0;
2328e5ab9effSThomas Gleixner #endif
23291d78814dSMark Rutland }
2330e5ab9effSThomas Gleixner 
2331ad811070SMark Rutland /**
2332ad811070SMark Rutland  * raw_atomic_add_negative_acquire() - atomic add and test if negative with acquire ordering
2333ad811070SMark Rutland  * @i: int value to add
2334ad811070SMark Rutland  * @v: pointer to atomic_t
2335ad811070SMark Rutland  *
2336ad811070SMark Rutland  * Atomically updates @v to (@v + @i) with acquire ordering.
2337ad811070SMark Rutland  *
2338ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_add_negative_acquire() elsewhere.
2339ad811070SMark Rutland  *
2340ad811070SMark Rutland  * Return: @true if the resulting value of @v is negative, @false otherwise.
2341ad811070SMark Rutland  */
23429257959aSMark Rutland static __always_inline bool
raw_atomic_add_negative_acquire(int i,atomic_t * v)23439257959aSMark Rutland raw_atomic_add_negative_acquire(int i, atomic_t *v)
2344e3d18ceeSMark Rutland {
23451d78814dSMark Rutland #if defined(arch_atomic_add_negative_acquire)
23461d78814dSMark Rutland 	return arch_atomic_add_negative_acquire(i, v);
23471d78814dSMark Rutland #elif defined(arch_atomic_add_negative_relaxed)
23489257959aSMark Rutland 	bool ret = arch_atomic_add_negative_relaxed(i, v);
23499257959aSMark Rutland 	__atomic_acquire_fence();
23509257959aSMark Rutland 	return ret;
23519257959aSMark Rutland #elif defined(arch_atomic_add_negative)
23521d78814dSMark Rutland 	return arch_atomic_add_negative(i, v);
23539257959aSMark Rutland #else
23549257959aSMark Rutland 	return raw_atomic_add_return_acquire(i, v) < 0;
23559257959aSMark Rutland #endif
23561d78814dSMark Rutland }
23579257959aSMark Rutland 
2358ad811070SMark Rutland /**
2359ad811070SMark Rutland  * raw_atomic_add_negative_release() - atomic add and test if negative with release ordering
2360ad811070SMark Rutland  * @i: int value to add
2361ad811070SMark Rutland  * @v: pointer to atomic_t
2362ad811070SMark Rutland  *
2363ad811070SMark Rutland  * Atomically updates @v to (@v + @i) with release ordering.
2364ad811070SMark Rutland  *
2365ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_add_negative_release() elsewhere.
2366ad811070SMark Rutland  *
2367ad811070SMark Rutland  * Return: @true if the resulting value of @v is negative, @false otherwise.
2368ad811070SMark Rutland  */
23699257959aSMark Rutland static __always_inline bool
raw_atomic_add_negative_release(int i,atomic_t * v)23709257959aSMark Rutland raw_atomic_add_negative_release(int i, atomic_t *v)
23719257959aSMark Rutland {
23721d78814dSMark Rutland #if defined(arch_atomic_add_negative_release)
23731d78814dSMark Rutland 	return arch_atomic_add_negative_release(i, v);
23741d78814dSMark Rutland #elif defined(arch_atomic_add_negative_relaxed)
23759257959aSMark Rutland 	__atomic_release_fence();
23769257959aSMark Rutland 	return arch_atomic_add_negative_relaxed(i, v);
23779257959aSMark Rutland #elif defined(arch_atomic_add_negative)
23781d78814dSMark Rutland 	return arch_atomic_add_negative(i, v);
23799257959aSMark Rutland #else
23809257959aSMark Rutland 	return raw_atomic_add_return_release(i, v) < 0;
23819257959aSMark Rutland #endif
23821d78814dSMark Rutland }
23839257959aSMark Rutland 
2384ad811070SMark Rutland /**
2385ad811070SMark Rutland  * raw_atomic_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
2386ad811070SMark Rutland  * @i: int value to add
2387ad811070SMark Rutland  * @v: pointer to atomic_t
2388ad811070SMark Rutland  *
2389ad811070SMark Rutland  * Atomically updates @v to (@v + @i) with relaxed ordering.
2390ad811070SMark Rutland  *
2391ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_add_negative_relaxed() elsewhere.
2392ad811070SMark Rutland  *
2393ad811070SMark Rutland  * Return: @true if the resulting value of @v is negative, @false otherwise.
2394ad811070SMark Rutland  */
23959257959aSMark Rutland static __always_inline bool
raw_atomic_add_negative_relaxed(int i,atomic_t * v)23969257959aSMark Rutland raw_atomic_add_negative_relaxed(int i, atomic_t *v)
23979257959aSMark Rutland {
23981d78814dSMark Rutland #if defined(arch_atomic_add_negative_relaxed)
23991d78814dSMark Rutland 	return arch_atomic_add_negative_relaxed(i, v);
24001d78814dSMark Rutland #elif defined(arch_atomic_add_negative)
24011d78814dSMark Rutland 	return arch_atomic_add_negative(i, v);
24029257959aSMark Rutland #else
24031d78814dSMark Rutland 	return raw_atomic_add_return_relaxed(i, v) < 0;
24041d78814dSMark Rutland #endif
24051d78814dSMark Rutland }
24061d78814dSMark Rutland 
2407ad811070SMark Rutland /**
2408ad811070SMark Rutland  * raw_atomic_fetch_add_unless() - atomic add unless value with full ordering
2409ad811070SMark Rutland  * @v: pointer to atomic_t
2410ad811070SMark Rutland  * @a: int value to add
2411ad811070SMark Rutland  * @u: int value to compare with
2412ad811070SMark Rutland  *
2413ad811070SMark Rutland  * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
24146dfee110SMark Rutland  * Otherwise, @v is not modified and relaxed ordering is provided.
2415ad811070SMark Rutland  *
2416ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_fetch_add_unless() elsewhere.
2417ad811070SMark Rutland  *
2418ad811070SMark Rutland  * Return: The original value of @v.
2419ad811070SMark Rutland  */
24209257959aSMark Rutland static __always_inline int
raw_atomic_fetch_add_unless(atomic_t * v,int a,int u)24219257959aSMark Rutland raw_atomic_fetch_add_unless(atomic_t *v, int a, int u)
24229257959aSMark Rutland {
24231d78814dSMark Rutland #if defined(arch_atomic_fetch_add_unless)
24241d78814dSMark Rutland 	return arch_atomic_fetch_add_unless(v, a, u);
24251d78814dSMark Rutland #else
24269257959aSMark Rutland 	int c = raw_atomic_read(v);
2427e3d18ceeSMark Rutland 
2428e3d18ceeSMark Rutland 	do {
2429e3d18ceeSMark Rutland 		if (unlikely(c == u))
2430e3d18ceeSMark Rutland 			break;
24319257959aSMark Rutland 	} while (!raw_atomic_try_cmpxchg(v, &c, c + a));
2432e3d18ceeSMark Rutland 
2433e3d18ceeSMark Rutland 	return c;
2434e3d18ceeSMark Rutland #endif
24351d78814dSMark Rutland }
2436e3d18ceeSMark Rutland 
2437ad811070SMark Rutland /**
2438ad811070SMark Rutland  * raw_atomic_add_unless() - atomic add unless value with full ordering
2439ad811070SMark Rutland  * @v: pointer to atomic_t
2440ad811070SMark Rutland  * @a: int value to add
2441ad811070SMark Rutland  * @u: int value to compare with
2442ad811070SMark Rutland  *
2443ad811070SMark Rutland  * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
24446dfee110SMark Rutland  * Otherwise, @v is not modified and relaxed ordering is provided.
2445ad811070SMark Rutland  *
2446ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_add_unless() elsewhere.
2447ad811070SMark Rutland  *
2448ad811070SMark Rutland  * Return: @true if @v was updated, @false otherwise.
2449ad811070SMark Rutland  */
2450e3d18ceeSMark Rutland static __always_inline bool
raw_atomic_add_unless(atomic_t * v,int a,int u)24519257959aSMark Rutland raw_atomic_add_unless(atomic_t *v, int a, int u)
2452e3d18ceeSMark Rutland {
24531d78814dSMark Rutland #if defined(arch_atomic_add_unless)
24541d78814dSMark Rutland 	return arch_atomic_add_unless(v, a, u);
24559257959aSMark Rutland #else
24561d78814dSMark Rutland 	return raw_atomic_fetch_add_unless(v, a, u) != u;
24571d78814dSMark Rutland #endif
24581d78814dSMark Rutland }
24591d78814dSMark Rutland 
2460ad811070SMark Rutland /**
2461ad811070SMark Rutland  * raw_atomic_inc_not_zero() - atomic increment unless zero with full ordering
2462ad811070SMark Rutland  * @v: pointer to atomic_t
2463ad811070SMark Rutland  *
2464ad811070SMark Rutland  * If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
24656dfee110SMark Rutland  * Otherwise, @v is not modified and relaxed ordering is provided.
2466ad811070SMark Rutland  *
2467ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_inc_not_zero() elsewhere.
2468ad811070SMark Rutland  *
2469ad811070SMark Rutland  * Return: @true if @v was updated, @false otherwise.
2470ad811070SMark Rutland  */
2471e3d18ceeSMark Rutland static __always_inline bool
raw_atomic_inc_not_zero(atomic_t * v)24729257959aSMark Rutland raw_atomic_inc_not_zero(atomic_t *v)
2473e3d18ceeSMark Rutland {
24741d78814dSMark Rutland #if defined(arch_atomic_inc_not_zero)
24751d78814dSMark Rutland 	return arch_atomic_inc_not_zero(v);
24769257959aSMark Rutland #else
24771d78814dSMark Rutland 	return raw_atomic_add_unless(v, 1, 0);
24781d78814dSMark Rutland #endif
24791d78814dSMark Rutland }
24801d78814dSMark Rutland 
2481ad811070SMark Rutland /**
2482ad811070SMark Rutland  * raw_atomic_inc_unless_negative() - atomic increment unless negative with full ordering
2483ad811070SMark Rutland  * @v: pointer to atomic_t
2484ad811070SMark Rutland  *
2485ad811070SMark Rutland  * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
24866dfee110SMark Rutland  * Otherwise, @v is not modified and relaxed ordering is provided.
2487ad811070SMark Rutland  *
2488ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_inc_unless_negative() elsewhere.
2489ad811070SMark Rutland  *
2490ad811070SMark Rutland  * Return: @true if @v was updated, @false otherwise.
2491ad811070SMark Rutland  */
2492e3d18ceeSMark Rutland static __always_inline bool
raw_atomic_inc_unless_negative(atomic_t * v)24939257959aSMark Rutland raw_atomic_inc_unless_negative(atomic_t *v)
2494e3d18ceeSMark Rutland {
24951d78814dSMark Rutland #if defined(arch_atomic_inc_unless_negative)
24961d78814dSMark Rutland 	return arch_atomic_inc_unless_negative(v);
24971d78814dSMark Rutland #else
24989257959aSMark Rutland 	int c = raw_atomic_read(v);
2499e3d18ceeSMark Rutland 
2500e3d18ceeSMark Rutland 	do {
2501e3d18ceeSMark Rutland 		if (unlikely(c < 0))
2502e3d18ceeSMark Rutland 			return false;
25039257959aSMark Rutland 	} while (!raw_atomic_try_cmpxchg(v, &c, c + 1));
2504e3d18ceeSMark Rutland 
2505e3d18ceeSMark Rutland 	return true;
2506e3d18ceeSMark Rutland #endif
25071d78814dSMark Rutland }
2508e3d18ceeSMark Rutland 
2509ad811070SMark Rutland /**
2510ad811070SMark Rutland  * raw_atomic_dec_unless_positive() - atomic decrement unless positive with full ordering
2511ad811070SMark Rutland  * @v: pointer to atomic_t
2512ad811070SMark Rutland  *
2513ad811070SMark Rutland  * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
25146dfee110SMark Rutland  * Otherwise, @v is not modified and relaxed ordering is provided.
2515ad811070SMark Rutland  *
2516ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_dec_unless_positive() elsewhere.
2517ad811070SMark Rutland  *
2518ad811070SMark Rutland  * Return: @true if @v was updated, @false otherwise.
2519ad811070SMark Rutland  */
2520e3d18ceeSMark Rutland static __always_inline bool
raw_atomic_dec_unless_positive(atomic_t * v)25219257959aSMark Rutland raw_atomic_dec_unless_positive(atomic_t *v)
2522e3d18ceeSMark Rutland {
25231d78814dSMark Rutland #if defined(arch_atomic_dec_unless_positive)
25241d78814dSMark Rutland 	return arch_atomic_dec_unless_positive(v);
25251d78814dSMark Rutland #else
25269257959aSMark Rutland 	int c = raw_atomic_read(v);
2527e3d18ceeSMark Rutland 
2528e3d18ceeSMark Rutland 	do {
2529e3d18ceeSMark Rutland 		if (unlikely(c > 0))
2530e3d18ceeSMark Rutland 			return false;
25319257959aSMark Rutland 	} while (!raw_atomic_try_cmpxchg(v, &c, c - 1));
2532e3d18ceeSMark Rutland 
2533e3d18ceeSMark Rutland 	return true;
2534e3d18ceeSMark Rutland #endif
25351d78814dSMark Rutland }
2536e3d18ceeSMark Rutland 
2537ad811070SMark Rutland /**
2538ad811070SMark Rutland  * raw_atomic_dec_if_positive() - atomic decrement if positive with full ordering
2539ad811070SMark Rutland  * @v: pointer to atomic_t
2540ad811070SMark Rutland  *
2541ad811070SMark Rutland  * If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
25426dfee110SMark Rutland  * Otherwise, @v is not modified and relaxed ordering is provided.
2543ad811070SMark Rutland  *
2544ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic_dec_if_positive() elsewhere.
2545ad811070SMark Rutland  *
2546b33eb50aSMark Rutland  * Return: The old value of (@v - 1), regardless of whether @v was updated.
2547ad811070SMark Rutland  */
2548e3d18ceeSMark Rutland static __always_inline int
raw_atomic_dec_if_positive(atomic_t * v)25499257959aSMark Rutland raw_atomic_dec_if_positive(atomic_t *v)
2550e3d18ceeSMark Rutland {
25511d78814dSMark Rutland #if defined(arch_atomic_dec_if_positive)
25521d78814dSMark Rutland 	return arch_atomic_dec_if_positive(v);
25531d78814dSMark Rutland #else
25549257959aSMark Rutland 	int dec, c = raw_atomic_read(v);
2555e3d18ceeSMark Rutland 
2556e3d18ceeSMark Rutland 	do {
2557e3d18ceeSMark Rutland 		dec = c - 1;
2558e3d18ceeSMark Rutland 		if (unlikely(dec < 0))
2559e3d18ceeSMark Rutland 			break;
25609257959aSMark Rutland 	} while (!raw_atomic_try_cmpxchg(v, &c, dec));
2561e3d18ceeSMark Rutland 
2562e3d18ceeSMark Rutland 	return dec;
2563e3d18ceeSMark Rutland #endif
25641d78814dSMark Rutland }
2565e3d18ceeSMark Rutland 
2566e3d18ceeSMark Rutland #ifdef CONFIG_GENERIC_ATOMIC64
2567e3d18ceeSMark Rutland #include <asm-generic/atomic64.h>
2568e3d18ceeSMark Rutland #endif
2569e3d18ceeSMark Rutland 
2570ad811070SMark Rutland /**
2571ad811070SMark Rutland  * raw_atomic64_read() - atomic load with relaxed ordering
2572ad811070SMark Rutland  * @v: pointer to atomic64_t
2573ad811070SMark Rutland  *
2574ad811070SMark Rutland  * Atomically loads the value of @v with relaxed ordering.
2575ad811070SMark Rutland  *
2576ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_read() elsewhere.
2577ad811070SMark Rutland  *
2578ad811070SMark Rutland  * Return: The value loaded from @v.
2579ad811070SMark Rutland  */
25801d78814dSMark Rutland static __always_inline s64
raw_atomic64_read(const atomic64_t * v)25811d78814dSMark Rutland raw_atomic64_read(const atomic64_t *v)
25821d78814dSMark Rutland {
25831d78814dSMark Rutland 	return arch_atomic64_read(v);
25841d78814dSMark Rutland }
25859257959aSMark Rutland 
2586ad811070SMark Rutland /**
2587ad811070SMark Rutland  * raw_atomic64_read_acquire() - atomic load with acquire ordering
2588ad811070SMark Rutland  * @v: pointer to atomic64_t
2589ad811070SMark Rutland  *
2590ad811070SMark Rutland  * Atomically loads the value of @v with acquire ordering.
2591ad811070SMark Rutland  *
2592ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_read_acquire() elsewhere.
2593ad811070SMark Rutland  *
2594ad811070SMark Rutland  * Return: The value loaded from @v.
2595ad811070SMark Rutland  */
2596e3d18ceeSMark Rutland static __always_inline s64
raw_atomic64_read_acquire(const atomic64_t * v)25979257959aSMark Rutland raw_atomic64_read_acquire(const atomic64_t *v)
2598e3d18ceeSMark Rutland {
25991d78814dSMark Rutland #if defined(arch_atomic64_read_acquire)
26001d78814dSMark Rutland 	return arch_atomic64_read_acquire(v);
26011d78814dSMark Rutland #else
2602dc1b4df0SMark Rutland 	s64 ret;
2603dc1b4df0SMark Rutland 
2604dc1b4df0SMark Rutland 	if (__native_word(atomic64_t)) {
2605dc1b4df0SMark Rutland 		ret = smp_load_acquire(&(v)->counter);
2606dc1b4df0SMark Rutland 	} else {
26079257959aSMark Rutland 		ret = raw_atomic64_read(v);
2608dc1b4df0SMark Rutland 		__atomic_acquire_fence();
2609dc1b4df0SMark Rutland 	}
2610dc1b4df0SMark Rutland 
2611dc1b4df0SMark Rutland 	return ret;
2612e3d18ceeSMark Rutland #endif
26131d78814dSMark Rutland }
2614e3d18ceeSMark Rutland 
2615ad811070SMark Rutland /**
2616ad811070SMark Rutland  * raw_atomic64_set() - atomic set with relaxed ordering
2617ad811070SMark Rutland  * @v: pointer to atomic64_t
2618ad811070SMark Rutland  * @i: s64 value to assign
2619ad811070SMark Rutland  *
2620ad811070SMark Rutland  * Atomically sets @v to @i with relaxed ordering.
2621ad811070SMark Rutland  *
2622ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_set() elsewhere.
2623ad811070SMark Rutland  *
2624ad811070SMark Rutland  * Return: Nothing.
2625ad811070SMark Rutland  */
26261d78814dSMark Rutland static __always_inline void
raw_atomic64_set(atomic64_t * v,s64 i)26271d78814dSMark Rutland raw_atomic64_set(atomic64_t *v, s64 i)
26281d78814dSMark Rutland {
26291d78814dSMark Rutland 	arch_atomic64_set(v, i);
26301d78814dSMark Rutland }
26319257959aSMark Rutland 
2632ad811070SMark Rutland /**
2633ad811070SMark Rutland  * raw_atomic64_set_release() - atomic set with release ordering
2634ad811070SMark Rutland  * @v: pointer to atomic64_t
2635ad811070SMark Rutland  * @i: s64 value to assign
2636ad811070SMark Rutland  *
2637ad811070SMark Rutland  * Atomically sets @v to @i with release ordering.
2638ad811070SMark Rutland  *
2639ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_set_release() elsewhere.
2640ad811070SMark Rutland  *
2641ad811070SMark Rutland  * Return: Nothing.
2642ad811070SMark Rutland  */
2643e3d18ceeSMark Rutland static __always_inline void
raw_atomic64_set_release(atomic64_t * v,s64 i)26449257959aSMark Rutland raw_atomic64_set_release(atomic64_t *v, s64 i)
2645e3d18ceeSMark Rutland {
26461d78814dSMark Rutland #if defined(arch_atomic64_set_release)
26471d78814dSMark Rutland 	arch_atomic64_set_release(v, i);
26481d78814dSMark Rutland #else
2649dc1b4df0SMark Rutland 	if (__native_word(atomic64_t)) {
2650e3d18ceeSMark Rutland 		smp_store_release(&(v)->counter, i);
2651dc1b4df0SMark Rutland 	} else {
2652dc1b4df0SMark Rutland 		__atomic_release_fence();
26539257959aSMark Rutland 		raw_atomic64_set(v, i);
2654dc1b4df0SMark Rutland 	}
2655e3d18ceeSMark Rutland #endif
26561d78814dSMark Rutland }
2657e3d18ceeSMark Rutland 
2658ad811070SMark Rutland /**
2659ad811070SMark Rutland  * raw_atomic64_add() - atomic add with relaxed ordering
2660ad811070SMark Rutland  * @i: s64 value to add
2661ad811070SMark Rutland  * @v: pointer to atomic64_t
2662ad811070SMark Rutland  *
2663ad811070SMark Rutland  * Atomically updates @v to (@v + @i) with relaxed ordering.
2664ad811070SMark Rutland  *
2665ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_add() elsewhere.
2666ad811070SMark Rutland  *
2667ad811070SMark Rutland  * Return: Nothing.
2668ad811070SMark Rutland  */
26691d78814dSMark Rutland static __always_inline void
raw_atomic64_add(s64 i,atomic64_t * v)26701d78814dSMark Rutland raw_atomic64_add(s64 i, atomic64_t *v)
26711d78814dSMark Rutland {
26721d78814dSMark Rutland 	arch_atomic64_add(i, v);
26731d78814dSMark Rutland }
2674e3d18ceeSMark Rutland 
2675ad811070SMark Rutland /**
2676ad811070SMark Rutland  * raw_atomic64_add_return() - atomic add with full ordering
2677ad811070SMark Rutland  * @i: s64 value to add
2678ad811070SMark Rutland  * @v: pointer to atomic64_t
2679ad811070SMark Rutland  *
2680ad811070SMark Rutland  * Atomically updates @v to (@v + @i) with full ordering.
2681ad811070SMark Rutland  *
2682ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_add_return() elsewhere.
2683ad811070SMark Rutland  *
2684ad811070SMark Rutland  * Return: The updated value of @v.
2685ad811070SMark Rutland  */
2686e3d18ceeSMark Rutland static __always_inline s64
raw_atomic64_add_return(s64 i,atomic64_t * v)26879257959aSMark Rutland raw_atomic64_add_return(s64 i, atomic64_t *v)
2688e3d18ceeSMark Rutland {
26891d78814dSMark Rutland #if defined(arch_atomic64_add_return)
26901d78814dSMark Rutland 	return arch_atomic64_add_return(i, v);
26911d78814dSMark Rutland #elif defined(arch_atomic64_add_return_relaxed)
2692e3d18ceeSMark Rutland 	s64 ret;
2693e3d18ceeSMark Rutland 	__atomic_pre_full_fence();
2694e3d18ceeSMark Rutland 	ret = arch_atomic64_add_return_relaxed(i, v);
2695e3d18ceeSMark Rutland 	__atomic_post_full_fence();
2696e3d18ceeSMark Rutland 	return ret;
26979257959aSMark Rutland #else
26989257959aSMark Rutland #error "Unable to define raw_atomic64_add_return"
2699e3d18ceeSMark Rutland #endif
27001d78814dSMark Rutland }
2701e3d18ceeSMark Rutland 
2702ad811070SMark Rutland /**
2703ad811070SMark Rutland  * raw_atomic64_add_return_acquire() - atomic add with acquire ordering
2704ad811070SMark Rutland  * @i: s64 value to add
2705ad811070SMark Rutland  * @v: pointer to atomic64_t
2706ad811070SMark Rutland  *
2707ad811070SMark Rutland  * Atomically updates @v to (@v + @i) with acquire ordering.
2708ad811070SMark Rutland  *
2709ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_add_return_acquire() elsewhere.
2710ad811070SMark Rutland  *
2711ad811070SMark Rutland  * Return: The updated value of @v.
2712ad811070SMark Rutland  */
2713e3d18ceeSMark Rutland static __always_inline s64
raw_atomic64_add_return_acquire(s64 i,atomic64_t * v)27149257959aSMark Rutland raw_atomic64_add_return_acquire(s64 i, atomic64_t *v)
2715e3d18ceeSMark Rutland {
27161d78814dSMark Rutland #if defined(arch_atomic64_add_return_acquire)
27171d78814dSMark Rutland 	return arch_atomic64_add_return_acquire(i, v);
27181d78814dSMark Rutland #elif defined(arch_atomic64_add_return_relaxed)
27199257959aSMark Rutland 	s64 ret = arch_atomic64_add_return_relaxed(i, v);
2720e3d18ceeSMark Rutland 	__atomic_acquire_fence();
2721e3d18ceeSMark Rutland 	return ret;
27229257959aSMark Rutland #elif defined(arch_atomic64_add_return)
27231d78814dSMark Rutland 	return arch_atomic64_add_return(i, v);
27249257959aSMark Rutland #else
27259257959aSMark Rutland #error "Unable to define raw_atomic64_add_return_acquire"
2726e3d18ceeSMark Rutland #endif
27271d78814dSMark Rutland }
2728e3d18ceeSMark Rutland 
2729ad811070SMark Rutland /**
2730ad811070SMark Rutland  * raw_atomic64_add_return_release() - atomic add with release ordering
2731ad811070SMark Rutland  * @i: s64 value to add
2732ad811070SMark Rutland  * @v: pointer to atomic64_t
2733ad811070SMark Rutland  *
2734ad811070SMark Rutland  * Atomically updates @v to (@v + @i) with release ordering.
2735ad811070SMark Rutland  *
2736ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_add_return_release() elsewhere.
2737ad811070SMark Rutland  *
2738ad811070SMark Rutland  * Return: The updated value of @v.
2739ad811070SMark Rutland  */
2740e3d18ceeSMark Rutland static __always_inline s64
raw_atomic64_add_return_release(s64 i,atomic64_t * v)27419257959aSMark Rutland raw_atomic64_add_return_release(s64 i, atomic64_t *v)
2742e3d18ceeSMark Rutland {
27431d78814dSMark Rutland #if defined(arch_atomic64_add_return_release)
27441d78814dSMark Rutland 	return arch_atomic64_add_return_release(i, v);
27451d78814dSMark Rutland #elif defined(arch_atomic64_add_return_relaxed)
2746e3d18ceeSMark Rutland 	__atomic_release_fence();
27479257959aSMark Rutland 	return arch_atomic64_add_return_relaxed(i, v);
27489257959aSMark Rutland #elif defined(arch_atomic64_add_return)
27491d78814dSMark Rutland 	return arch_atomic64_add_return(i, v);
27509257959aSMark Rutland #else
27519257959aSMark Rutland #error "Unable to define raw_atomic64_add_return_release"
2752e3d18ceeSMark Rutland #endif
27531d78814dSMark Rutland }
2754e3d18ceeSMark Rutland 
2755ad811070SMark Rutland /**
2756ad811070SMark Rutland  * raw_atomic64_add_return_relaxed() - atomic add with relaxed ordering
2757ad811070SMark Rutland  * @i: s64 value to add
2758ad811070SMark Rutland  * @v: pointer to atomic64_t
2759ad811070SMark Rutland  *
2760ad811070SMark Rutland  * Atomically updates @v to (@v + @i) with relaxed ordering.
2761ad811070SMark Rutland  *
2762ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_add_return_relaxed() elsewhere.
2763ad811070SMark Rutland  *
2764ad811070SMark Rutland  * Return: The updated value of @v.
2765ad811070SMark Rutland  */
27661d78814dSMark Rutland static __always_inline s64
raw_atomic64_add_return_relaxed(s64 i,atomic64_t * v)27671d78814dSMark Rutland raw_atomic64_add_return_relaxed(s64 i, atomic64_t *v)
27681d78814dSMark Rutland {
27699257959aSMark Rutland #if defined(arch_atomic64_add_return_relaxed)
27701d78814dSMark Rutland 	return arch_atomic64_add_return_relaxed(i, v);
27719257959aSMark Rutland #elif defined(arch_atomic64_add_return)
27721d78814dSMark Rutland 	return arch_atomic64_add_return(i, v);
27739257959aSMark Rutland #else
27749257959aSMark Rutland #error "Unable to define raw_atomic64_add_return_relaxed"
27759257959aSMark Rutland #endif
27761d78814dSMark Rutland }
27779257959aSMark Rutland 
2778ad811070SMark Rutland /**
2779ad811070SMark Rutland  * raw_atomic64_fetch_add() - atomic add with full ordering
2780ad811070SMark Rutland  * @i: s64 value to add
2781ad811070SMark Rutland  * @v: pointer to atomic64_t
2782ad811070SMark Rutland  *
2783ad811070SMark Rutland  * Atomically updates @v to (@v + @i) with full ordering.
2784ad811070SMark Rutland  *
2785ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_fetch_add() elsewhere.
2786ad811070SMark Rutland  *
2787ad811070SMark Rutland  * Return: The original value of @v.
2788ad811070SMark Rutland  */
2789e3d18ceeSMark Rutland static __always_inline s64
raw_atomic64_fetch_add(s64 i,atomic64_t * v)27909257959aSMark Rutland raw_atomic64_fetch_add(s64 i, atomic64_t *v)
2791e3d18ceeSMark Rutland {
27921d78814dSMark Rutland #if defined(arch_atomic64_fetch_add)
27931d78814dSMark Rutland 	return arch_atomic64_fetch_add(i, v);
27941d78814dSMark Rutland #elif defined(arch_atomic64_fetch_add_relaxed)
2795e3d18ceeSMark Rutland 	s64 ret;
2796e3d18ceeSMark Rutland 	__atomic_pre_full_fence();
2797e3d18ceeSMark Rutland 	ret = arch_atomic64_fetch_add_relaxed(i, v);
2798e3d18ceeSMark Rutland 	__atomic_post_full_fence();
2799e3d18ceeSMark Rutland 	return ret;
28009257959aSMark Rutland #else
28019257959aSMark Rutland #error "Unable to define raw_atomic64_fetch_add"
2802e3d18ceeSMark Rutland #endif
28031d78814dSMark Rutland }
2804e3d18ceeSMark Rutland 
2805ad811070SMark Rutland /**
2806ad811070SMark Rutland  * raw_atomic64_fetch_add_acquire() - atomic add with acquire ordering
2807ad811070SMark Rutland  * @i: s64 value to add
2808ad811070SMark Rutland  * @v: pointer to atomic64_t
2809ad811070SMark Rutland  *
2810ad811070SMark Rutland  * Atomically updates @v to (@v + @i) with acquire ordering.
2811ad811070SMark Rutland  *
2812ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_fetch_add_acquire() elsewhere.
2813ad811070SMark Rutland  *
2814ad811070SMark Rutland  * Return: The original value of @v.
2815ad811070SMark Rutland  */
2816e3d18ceeSMark Rutland static __always_inline s64
raw_atomic64_fetch_add_acquire(s64 i,atomic64_t * v)28179257959aSMark Rutland raw_atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
2818e3d18ceeSMark Rutland {
28191d78814dSMark Rutland #if defined(arch_atomic64_fetch_add_acquire)
28201d78814dSMark Rutland 	return arch_atomic64_fetch_add_acquire(i, v);
28211d78814dSMark Rutland #elif defined(arch_atomic64_fetch_add_relaxed)
28229257959aSMark Rutland 	s64 ret = arch_atomic64_fetch_add_relaxed(i, v);
2823e3d18ceeSMark Rutland 	__atomic_acquire_fence();
2824e3d18ceeSMark Rutland 	return ret;
28259257959aSMark Rutland #elif defined(arch_atomic64_fetch_add)
28261d78814dSMark Rutland 	return arch_atomic64_fetch_add(i, v);
28279257959aSMark Rutland #else
28289257959aSMark Rutland #error "Unable to define raw_atomic64_fetch_add_acquire"
2829e3d18ceeSMark Rutland #endif
28301d78814dSMark Rutland }
2831e3d18ceeSMark Rutland 
2832ad811070SMark Rutland /**
2833ad811070SMark Rutland  * raw_atomic64_fetch_add_release() - atomic add with release ordering
2834ad811070SMark Rutland  * @i: s64 value to add
2835ad811070SMark Rutland  * @v: pointer to atomic64_t
2836ad811070SMark Rutland  *
2837ad811070SMark Rutland  * Atomically updates @v to (@v + @i) with release ordering.
2838ad811070SMark Rutland  *
2839ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_fetch_add_release() elsewhere.
2840ad811070SMark Rutland  *
2841ad811070SMark Rutland  * Return: The original value of @v.
2842ad811070SMark Rutland  */
2843e3d18ceeSMark Rutland static __always_inline s64
raw_atomic64_fetch_add_release(s64 i,atomic64_t * v)28449257959aSMark Rutland raw_atomic64_fetch_add_release(s64 i, atomic64_t *v)
2845e3d18ceeSMark Rutland {
28461d78814dSMark Rutland #if defined(arch_atomic64_fetch_add_release)
28471d78814dSMark Rutland 	return arch_atomic64_fetch_add_release(i, v);
28481d78814dSMark Rutland #elif defined(arch_atomic64_fetch_add_relaxed)
2849e3d18ceeSMark Rutland 	__atomic_release_fence();
28509257959aSMark Rutland 	return arch_atomic64_fetch_add_relaxed(i, v);
28519257959aSMark Rutland #elif defined(arch_atomic64_fetch_add)
28521d78814dSMark Rutland 	return arch_atomic64_fetch_add(i, v);
28539257959aSMark Rutland #else
28549257959aSMark Rutland #error "Unable to define raw_atomic64_fetch_add_release"
2855e3d18ceeSMark Rutland #endif
28561d78814dSMark Rutland }
2857e3d18ceeSMark Rutland 
2858ad811070SMark Rutland /**
2859ad811070SMark Rutland  * raw_atomic64_fetch_add_relaxed() - atomic add with relaxed ordering
2860ad811070SMark Rutland  * @i: s64 value to add
2861ad811070SMark Rutland  * @v: pointer to atomic64_t
2862ad811070SMark Rutland  *
2863ad811070SMark Rutland  * Atomically updates @v to (@v + @i) with relaxed ordering.
2864ad811070SMark Rutland  *
2865ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_fetch_add_relaxed() elsewhere.
2866ad811070SMark Rutland  *
2867ad811070SMark Rutland  * Return: The original value of @v.
2868ad811070SMark Rutland  */
28691d78814dSMark Rutland static __always_inline s64
raw_atomic64_fetch_add_relaxed(s64 i,atomic64_t * v)28701d78814dSMark Rutland raw_atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
28711d78814dSMark Rutland {
28729257959aSMark Rutland #if defined(arch_atomic64_fetch_add_relaxed)
28731d78814dSMark Rutland 	return arch_atomic64_fetch_add_relaxed(i, v);
28749257959aSMark Rutland #elif defined(arch_atomic64_fetch_add)
28751d78814dSMark Rutland 	return arch_atomic64_fetch_add(i, v);
28769257959aSMark Rutland #else
28779257959aSMark Rutland #error "Unable to define raw_atomic64_fetch_add_relaxed"
28789257959aSMark Rutland #endif
28791d78814dSMark Rutland }
28809257959aSMark Rutland 
2881ad811070SMark Rutland /**
2882ad811070SMark Rutland  * raw_atomic64_sub() - atomic subtract with relaxed ordering
2883ad811070SMark Rutland  * @i: s64 value to subtract
2884ad811070SMark Rutland  * @v: pointer to atomic64_t
2885ad811070SMark Rutland  *
2886ad811070SMark Rutland  * Atomically updates @v to (@v - @i) with relaxed ordering.
2887ad811070SMark Rutland  *
2888ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_sub() elsewhere.
2889ad811070SMark Rutland  *
2890ad811070SMark Rutland  * Return: Nothing.
2891ad811070SMark Rutland  */
28921d78814dSMark Rutland static __always_inline void
raw_atomic64_sub(s64 i,atomic64_t * v)28931d78814dSMark Rutland raw_atomic64_sub(s64 i, atomic64_t *v)
28941d78814dSMark Rutland {
28951d78814dSMark Rutland 	arch_atomic64_sub(i, v);
28961d78814dSMark Rutland }
28979257959aSMark Rutland 
2898ad811070SMark Rutland /**
2899ad811070SMark Rutland  * raw_atomic64_sub_return() - atomic subtract with full ordering
2900ad811070SMark Rutland  * @i: s64 value to subtract
2901ad811070SMark Rutland  * @v: pointer to atomic64_t
2902ad811070SMark Rutland  *
2903ad811070SMark Rutland  * Atomically updates @v to (@v - @i) with full ordering.
2904ad811070SMark Rutland  *
2905ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_sub_return() elsewhere.
2906ad811070SMark Rutland  *
2907ad811070SMark Rutland  * Return: The updated value of @v.
2908ad811070SMark Rutland  */
2909e3d18ceeSMark Rutland static __always_inline s64
raw_atomic64_sub_return(s64 i,atomic64_t * v)29109257959aSMark Rutland raw_atomic64_sub_return(s64 i, atomic64_t *v)
2911e3d18ceeSMark Rutland {
29121d78814dSMark Rutland #if defined(arch_atomic64_sub_return)
29131d78814dSMark Rutland 	return arch_atomic64_sub_return(i, v);
29141d78814dSMark Rutland #elif defined(arch_atomic64_sub_return_relaxed)
2915e3d18ceeSMark Rutland 	s64 ret;
2916e3d18ceeSMark Rutland 	__atomic_pre_full_fence();
2917e3d18ceeSMark Rutland 	ret = arch_atomic64_sub_return_relaxed(i, v);
2918e3d18ceeSMark Rutland 	__atomic_post_full_fence();
2919e3d18ceeSMark Rutland 	return ret;
29209257959aSMark Rutland #else
29219257959aSMark Rutland #error "Unable to define raw_atomic64_sub_return"
2922e3d18ceeSMark Rutland #endif
29231d78814dSMark Rutland }
2924e3d18ceeSMark Rutland 
2925ad811070SMark Rutland /**
2926ad811070SMark Rutland  * raw_atomic64_sub_return_acquire() - atomic subtract with acquire ordering
2927ad811070SMark Rutland  * @i: s64 value to subtract
2928ad811070SMark Rutland  * @v: pointer to atomic64_t
2929ad811070SMark Rutland  *
2930ad811070SMark Rutland  * Atomically updates @v to (@v - @i) with acquire ordering.
2931ad811070SMark Rutland  *
2932ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_sub_return_acquire() elsewhere.
2933ad811070SMark Rutland  *
2934ad811070SMark Rutland  * Return: The updated value of @v.
2935ad811070SMark Rutland  */
2936e3d18ceeSMark Rutland static __always_inline s64
raw_atomic64_sub_return_acquire(s64 i,atomic64_t * v)29379257959aSMark Rutland raw_atomic64_sub_return_acquire(s64 i, atomic64_t *v)
2938e3d18ceeSMark Rutland {
29391d78814dSMark Rutland #if defined(arch_atomic64_sub_return_acquire)
29401d78814dSMark Rutland 	return arch_atomic64_sub_return_acquire(i, v);
29411d78814dSMark Rutland #elif defined(arch_atomic64_sub_return_relaxed)
29429257959aSMark Rutland 	s64 ret = arch_atomic64_sub_return_relaxed(i, v);
2943e3d18ceeSMark Rutland 	__atomic_acquire_fence();
2944e3d18ceeSMark Rutland 	return ret;
29459257959aSMark Rutland #elif defined(arch_atomic64_sub_return)
29461d78814dSMark Rutland 	return arch_atomic64_sub_return(i, v);
29479257959aSMark Rutland #else
29489257959aSMark Rutland #error "Unable to define raw_atomic64_sub_return_acquire"
2949e3d18ceeSMark Rutland #endif
29501d78814dSMark Rutland }
2951e3d18ceeSMark Rutland 
2952ad811070SMark Rutland /**
2953ad811070SMark Rutland  * raw_atomic64_sub_return_release() - atomic subtract with release ordering
2954ad811070SMark Rutland  * @i: s64 value to subtract
2955ad811070SMark Rutland  * @v: pointer to atomic64_t
2956ad811070SMark Rutland  *
2957ad811070SMark Rutland  * Atomically updates @v to (@v - @i) with release ordering.
2958ad811070SMark Rutland  *
2959ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_sub_return_release() elsewhere.
2960ad811070SMark Rutland  *
2961ad811070SMark Rutland  * Return: The updated value of @v.
2962ad811070SMark Rutland  */
2963e3d18ceeSMark Rutland static __always_inline s64
raw_atomic64_sub_return_release(s64 i,atomic64_t * v)29649257959aSMark Rutland raw_atomic64_sub_return_release(s64 i, atomic64_t *v)
2965e3d18ceeSMark Rutland {
29661d78814dSMark Rutland #if defined(arch_atomic64_sub_return_release)
29671d78814dSMark Rutland 	return arch_atomic64_sub_return_release(i, v);
29681d78814dSMark Rutland #elif defined(arch_atomic64_sub_return_relaxed)
2969e3d18ceeSMark Rutland 	__atomic_release_fence();
29709257959aSMark Rutland 	return arch_atomic64_sub_return_relaxed(i, v);
29719257959aSMark Rutland #elif defined(arch_atomic64_sub_return)
29721d78814dSMark Rutland 	return arch_atomic64_sub_return(i, v);
29739257959aSMark Rutland #else
29749257959aSMark Rutland #error "Unable to define raw_atomic64_sub_return_release"
2975e3d18ceeSMark Rutland #endif
29761d78814dSMark Rutland }
2977e3d18ceeSMark Rutland 
2978ad811070SMark Rutland /**
2979ad811070SMark Rutland  * raw_atomic64_sub_return_relaxed() - atomic subtract with relaxed ordering
2980ad811070SMark Rutland  * @i: s64 value to subtract
2981ad811070SMark Rutland  * @v: pointer to atomic64_t
2982ad811070SMark Rutland  *
2983ad811070SMark Rutland  * Atomically updates @v to (@v - @i) with relaxed ordering.
2984ad811070SMark Rutland  *
2985ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_sub_return_relaxed() elsewhere.
2986ad811070SMark Rutland  *
2987ad811070SMark Rutland  * Return: The updated value of @v.
2988ad811070SMark Rutland  */
29891d78814dSMark Rutland static __always_inline s64
raw_atomic64_sub_return_relaxed(s64 i,atomic64_t * v)29901d78814dSMark Rutland raw_atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
29911d78814dSMark Rutland {
29929257959aSMark Rutland #if defined(arch_atomic64_sub_return_relaxed)
29931d78814dSMark Rutland 	return arch_atomic64_sub_return_relaxed(i, v);
29949257959aSMark Rutland #elif defined(arch_atomic64_sub_return)
29951d78814dSMark Rutland 	return arch_atomic64_sub_return(i, v);
29969257959aSMark Rutland #else
29979257959aSMark Rutland #error "Unable to define raw_atomic64_sub_return_relaxed"
29989257959aSMark Rutland #endif
29991d78814dSMark Rutland }
30009257959aSMark Rutland 
3001ad811070SMark Rutland /**
3002ad811070SMark Rutland  * raw_atomic64_fetch_sub() - atomic subtract with full ordering
3003ad811070SMark Rutland  * @i: s64 value to subtract
3004ad811070SMark Rutland  * @v: pointer to atomic64_t
3005ad811070SMark Rutland  *
3006ad811070SMark Rutland  * Atomically updates @v to (@v - @i) with full ordering.
3007ad811070SMark Rutland  *
3008ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_fetch_sub() elsewhere.
3009ad811070SMark Rutland  *
3010ad811070SMark Rutland  * Return: The original value of @v.
3011ad811070SMark Rutland  */
3012e3d18ceeSMark Rutland static __always_inline s64
raw_atomic64_fetch_sub(s64 i,atomic64_t * v)30139257959aSMark Rutland raw_atomic64_fetch_sub(s64 i, atomic64_t *v)
3014e3d18ceeSMark Rutland {
30151d78814dSMark Rutland #if defined(arch_atomic64_fetch_sub)
30161d78814dSMark Rutland 	return arch_atomic64_fetch_sub(i, v);
30171d78814dSMark Rutland #elif defined(arch_atomic64_fetch_sub_relaxed)
3018e3d18ceeSMark Rutland 	s64 ret;
3019e3d18ceeSMark Rutland 	__atomic_pre_full_fence();
3020e3d18ceeSMark Rutland 	ret = arch_atomic64_fetch_sub_relaxed(i, v);
3021e3d18ceeSMark Rutland 	__atomic_post_full_fence();
3022e3d18ceeSMark Rutland 	return ret;
30239257959aSMark Rutland #else
30249257959aSMark Rutland #error "Unable to define raw_atomic64_fetch_sub"
3025e3d18ceeSMark Rutland #endif
30261d78814dSMark Rutland }
3027e3d18ceeSMark Rutland 
3028ad811070SMark Rutland /**
3029ad811070SMark Rutland  * raw_atomic64_fetch_sub_acquire() - atomic subtract with acquire ordering
3030ad811070SMark Rutland  * @i: s64 value to subtract
3031ad811070SMark Rutland  * @v: pointer to atomic64_t
3032ad811070SMark Rutland  *
3033ad811070SMark Rutland  * Atomically updates @v to (@v - @i) with acquire ordering.
3034ad811070SMark Rutland  *
3035ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_fetch_sub_acquire() elsewhere.
3036ad811070SMark Rutland  *
3037ad811070SMark Rutland  * Return: The original value of @v.
3038ad811070SMark Rutland  */
3039e3d18ceeSMark Rutland static __always_inline s64
raw_atomic64_fetch_sub_acquire(s64 i,atomic64_t * v)30409257959aSMark Rutland raw_atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
3041e3d18ceeSMark Rutland {
30421d78814dSMark Rutland #if defined(arch_atomic64_fetch_sub_acquire)
30431d78814dSMark Rutland 	return arch_atomic64_fetch_sub_acquire(i, v);
30441d78814dSMark Rutland #elif defined(arch_atomic64_fetch_sub_relaxed)
30459257959aSMark Rutland 	s64 ret = arch_atomic64_fetch_sub_relaxed(i, v);
3046e3d18ceeSMark Rutland 	__atomic_acquire_fence();
3047e3d18ceeSMark Rutland 	return ret;
30489257959aSMark Rutland #elif defined(arch_atomic64_fetch_sub)
30491d78814dSMark Rutland 	return arch_atomic64_fetch_sub(i, v);
30509257959aSMark Rutland #else
30519257959aSMark Rutland #error "Unable to define raw_atomic64_fetch_sub_acquire"
3052e3d18ceeSMark Rutland #endif
30531d78814dSMark Rutland }
3054e3d18ceeSMark Rutland 
3055ad811070SMark Rutland /**
3056ad811070SMark Rutland  * raw_atomic64_fetch_sub_release() - atomic subtract with release ordering
3057ad811070SMark Rutland  * @i: s64 value to subtract
3058ad811070SMark Rutland  * @v: pointer to atomic64_t
3059ad811070SMark Rutland  *
3060ad811070SMark Rutland  * Atomically updates @v to (@v - @i) with release ordering.
3061ad811070SMark Rutland  *
3062ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_fetch_sub_release() elsewhere.
3063ad811070SMark Rutland  *
3064ad811070SMark Rutland  * Return: The original value of @v.
3065ad811070SMark Rutland  */
3066e3d18ceeSMark Rutland static __always_inline s64
raw_atomic64_fetch_sub_release(s64 i,atomic64_t * v)30679257959aSMark Rutland raw_atomic64_fetch_sub_release(s64 i, atomic64_t *v)
3068e3d18ceeSMark Rutland {
30691d78814dSMark Rutland #if defined(arch_atomic64_fetch_sub_release)
30701d78814dSMark Rutland 	return arch_atomic64_fetch_sub_release(i, v);
30711d78814dSMark Rutland #elif defined(arch_atomic64_fetch_sub_relaxed)
3072e3d18ceeSMark Rutland 	__atomic_release_fence();
30739257959aSMark Rutland 	return arch_atomic64_fetch_sub_relaxed(i, v);
30749257959aSMark Rutland #elif defined(arch_atomic64_fetch_sub)
30751d78814dSMark Rutland 	return arch_atomic64_fetch_sub(i, v);
30769257959aSMark Rutland #else
30779257959aSMark Rutland #error "Unable to define raw_atomic64_fetch_sub_release"
3078e3d18ceeSMark Rutland #endif
30791d78814dSMark Rutland }
3080e3d18ceeSMark Rutland 
3081ad811070SMark Rutland /**
3082ad811070SMark Rutland  * raw_atomic64_fetch_sub_relaxed() - atomic subtract with relaxed ordering
3083ad811070SMark Rutland  * @i: s64 value to subtract
3084ad811070SMark Rutland  * @v: pointer to atomic64_t
3085ad811070SMark Rutland  *
3086ad811070SMark Rutland  * Atomically updates @v to (@v - @i) with relaxed ordering.
3087ad811070SMark Rutland  *
3088ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_fetch_sub_relaxed() elsewhere.
3089ad811070SMark Rutland  *
3090ad811070SMark Rutland  * Return: The original value of @v.
3091ad811070SMark Rutland  */
30921d78814dSMark Rutland static __always_inline s64
raw_atomic64_fetch_sub_relaxed(s64 i,atomic64_t * v)30931d78814dSMark Rutland raw_atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
30941d78814dSMark Rutland {
30959257959aSMark Rutland #if defined(arch_atomic64_fetch_sub_relaxed)
30961d78814dSMark Rutland 	return arch_atomic64_fetch_sub_relaxed(i, v);
30979257959aSMark Rutland #elif defined(arch_atomic64_fetch_sub)
30981d78814dSMark Rutland 	return arch_atomic64_fetch_sub(i, v);
30999257959aSMark Rutland #else
31009257959aSMark Rutland #error "Unable to define raw_atomic64_fetch_sub_relaxed"
31019257959aSMark Rutland #endif
31021d78814dSMark Rutland }
31039257959aSMark Rutland 
3104ad811070SMark Rutland /**
3105ad811070SMark Rutland  * raw_atomic64_inc() - atomic increment with relaxed ordering
3106ad811070SMark Rutland  * @v: pointer to atomic64_t
3107ad811070SMark Rutland  *
3108ad811070SMark Rutland  * Atomically updates @v to (@v + 1) with relaxed ordering.
3109ad811070SMark Rutland  *
3110ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_inc() elsewhere.
3111ad811070SMark Rutland  *
3112ad811070SMark Rutland  * Return: Nothing.
3113ad811070SMark Rutland  */
31149257959aSMark Rutland static __always_inline void
raw_atomic64_inc(atomic64_t * v)31159257959aSMark Rutland raw_atomic64_inc(atomic64_t *v)
31169257959aSMark Rutland {
31171d78814dSMark Rutland #if defined(arch_atomic64_inc)
31181d78814dSMark Rutland 	arch_atomic64_inc(v);
31191d78814dSMark Rutland #else
31209257959aSMark Rutland 	raw_atomic64_add(1, v);
31219257959aSMark Rutland #endif
31221d78814dSMark Rutland }
31239257959aSMark Rutland 
3124ad811070SMark Rutland /**
3125ad811070SMark Rutland  * raw_atomic64_inc_return() - atomic increment with full ordering
3126ad811070SMark Rutland  * @v: pointer to atomic64_t
3127ad811070SMark Rutland  *
3128ad811070SMark Rutland  * Atomically updates @v to (@v + 1) with full ordering.
3129ad811070SMark Rutland  *
3130ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_inc_return() elsewhere.
3131ad811070SMark Rutland  *
3132ad811070SMark Rutland  * Return: The updated value of @v.
3133ad811070SMark Rutland  */
3134e3d18ceeSMark Rutland static __always_inline s64
raw_atomic64_inc_return(atomic64_t * v)31359257959aSMark Rutland raw_atomic64_inc_return(atomic64_t *v)
3136e3d18ceeSMark Rutland {
31371d78814dSMark Rutland #if defined(arch_atomic64_inc_return)
31381d78814dSMark Rutland 	return arch_atomic64_inc_return(v);
31391d78814dSMark Rutland #elif defined(arch_atomic64_inc_return_relaxed)
3140e3d18ceeSMark Rutland 	s64 ret;
3141e3d18ceeSMark Rutland 	__atomic_pre_full_fence();
3142e3d18ceeSMark Rutland 	ret = arch_atomic64_inc_return_relaxed(v);
3143e3d18ceeSMark Rutland 	__atomic_post_full_fence();
3144e3d18ceeSMark Rutland 	return ret;
31459257959aSMark Rutland #else
31469257959aSMark Rutland 	return raw_atomic64_add_return(1, v);
3147e3d18ceeSMark Rutland #endif
31481d78814dSMark Rutland }
3149e3d18ceeSMark Rutland 
3150ad811070SMark Rutland /**
3151ad811070SMark Rutland  * raw_atomic64_inc_return_acquire() - atomic increment with acquire ordering
3152ad811070SMark Rutland  * @v: pointer to atomic64_t
3153ad811070SMark Rutland  *
3154ad811070SMark Rutland  * Atomically updates @v to (@v + 1) with acquire ordering.
3155ad811070SMark Rutland  *
3156ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_inc_return_acquire() elsewhere.
3157ad811070SMark Rutland  *
3158ad811070SMark Rutland  * Return: The updated value of @v.
3159ad811070SMark Rutland  */
3160e3d18ceeSMark Rutland static __always_inline s64
raw_atomic64_inc_return_acquire(atomic64_t * v)31619257959aSMark Rutland raw_atomic64_inc_return_acquire(atomic64_t *v)
3162e3d18ceeSMark Rutland {
31631d78814dSMark Rutland #if defined(arch_atomic64_inc_return_acquire)
31641d78814dSMark Rutland 	return arch_atomic64_inc_return_acquire(v);
31651d78814dSMark Rutland #elif defined(arch_atomic64_inc_return_relaxed)
31669257959aSMark Rutland 	s64 ret = arch_atomic64_inc_return_relaxed(v);
3167e3d18ceeSMark Rutland 	__atomic_acquire_fence();
3168e3d18ceeSMark Rutland 	return ret;
31699257959aSMark Rutland #elif defined(arch_atomic64_inc_return)
31701d78814dSMark Rutland 	return arch_atomic64_inc_return(v);
31719257959aSMark Rutland #else
31729257959aSMark Rutland 	return raw_atomic64_add_return_acquire(1, v);
3173e3d18ceeSMark Rutland #endif
31741d78814dSMark Rutland }
3175e3d18ceeSMark Rutland 
3176ad811070SMark Rutland /**
3177ad811070SMark Rutland  * raw_atomic64_inc_return_release() - atomic increment with release ordering
3178ad811070SMark Rutland  * @v: pointer to atomic64_t
3179ad811070SMark Rutland  *
3180ad811070SMark Rutland  * Atomically updates @v to (@v + 1) with release ordering.
3181ad811070SMark Rutland  *
3182ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_inc_return_release() elsewhere.
3183ad811070SMark Rutland  *
3184ad811070SMark Rutland  * Return: The updated value of @v.
3185ad811070SMark Rutland  */
3186e3d18ceeSMark Rutland static __always_inline s64
raw_atomic64_inc_return_release(atomic64_t * v)31879257959aSMark Rutland raw_atomic64_inc_return_release(atomic64_t *v)
3188e3d18ceeSMark Rutland {
31891d78814dSMark Rutland #if defined(arch_atomic64_inc_return_release)
31901d78814dSMark Rutland 	return arch_atomic64_inc_return_release(v);
31911d78814dSMark Rutland #elif defined(arch_atomic64_inc_return_relaxed)
3192e3d18ceeSMark Rutland 	__atomic_release_fence();
31939257959aSMark Rutland 	return arch_atomic64_inc_return_relaxed(v);
31949257959aSMark Rutland #elif defined(arch_atomic64_inc_return)
31951d78814dSMark Rutland 	return arch_atomic64_inc_return(v);
31969257959aSMark Rutland #else
31979257959aSMark Rutland 	return raw_atomic64_add_return_release(1, v);
3198e3d18ceeSMark Rutland #endif
31991d78814dSMark Rutland }
3200e3d18ceeSMark Rutland 
3201ad811070SMark Rutland /**
3202ad811070SMark Rutland  * raw_atomic64_inc_return_relaxed() - atomic increment with relaxed ordering
3203ad811070SMark Rutland  * @v: pointer to atomic64_t
3204ad811070SMark Rutland  *
3205ad811070SMark Rutland  * Atomically updates @v to (@v + 1) with relaxed ordering.
3206ad811070SMark Rutland  *
3207ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_inc_return_relaxed() elsewhere.
3208ad811070SMark Rutland  *
3209ad811070SMark Rutland  * Return: The updated value of @v.
3210ad811070SMark Rutland  */
3211e3d18ceeSMark Rutland static __always_inline s64
raw_atomic64_inc_return_relaxed(atomic64_t * v)32129257959aSMark Rutland raw_atomic64_inc_return_relaxed(atomic64_t *v)
32139257959aSMark Rutland {
32141d78814dSMark Rutland #if defined(arch_atomic64_inc_return_relaxed)
32151d78814dSMark Rutland 	return arch_atomic64_inc_return_relaxed(v);
32161d78814dSMark Rutland #elif defined(arch_atomic64_inc_return)
32171d78814dSMark Rutland 	return arch_atomic64_inc_return(v);
32181d78814dSMark Rutland #else
32199257959aSMark Rutland 	return raw_atomic64_add_return_relaxed(1, v);
32209257959aSMark Rutland #endif
32211d78814dSMark Rutland }
32229257959aSMark Rutland 
3223ad811070SMark Rutland /**
3224ad811070SMark Rutland  * raw_atomic64_fetch_inc() - atomic increment with full ordering
3225ad811070SMark Rutland  * @v: pointer to atomic64_t
3226ad811070SMark Rutland  *
3227ad811070SMark Rutland  * Atomically updates @v to (@v + 1) with full ordering.
3228ad811070SMark Rutland  *
3229ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_fetch_inc() elsewhere.
3230ad811070SMark Rutland  *
3231ad811070SMark Rutland  * Return: The original value of @v.
3232ad811070SMark Rutland  */
32339257959aSMark Rutland static __always_inline s64
raw_atomic64_fetch_inc(atomic64_t * v)32349257959aSMark Rutland raw_atomic64_fetch_inc(atomic64_t *v)
3235e3d18ceeSMark Rutland {
32361d78814dSMark Rutland #if defined(arch_atomic64_fetch_inc)
32371d78814dSMark Rutland 	return arch_atomic64_fetch_inc(v);
32381d78814dSMark Rutland #elif defined(arch_atomic64_fetch_inc_relaxed)
3239e3d18ceeSMark Rutland 	s64 ret;
3240e3d18ceeSMark Rutland 	__atomic_pre_full_fence();
3241e3d18ceeSMark Rutland 	ret = arch_atomic64_fetch_inc_relaxed(v);
3242e3d18ceeSMark Rutland 	__atomic_post_full_fence();
3243e3d18ceeSMark Rutland 	return ret;
32449257959aSMark Rutland #else
32459257959aSMark Rutland 	return raw_atomic64_fetch_add(1, v);
3246e3d18ceeSMark Rutland #endif
32471d78814dSMark Rutland }
3248e3d18ceeSMark Rutland 
3249ad811070SMark Rutland /**
3250ad811070SMark Rutland  * raw_atomic64_fetch_inc_acquire() - atomic increment with acquire ordering
3251ad811070SMark Rutland  * @v: pointer to atomic64_t
3252ad811070SMark Rutland  *
3253ad811070SMark Rutland  * Atomically updates @v to (@v + 1) with acquire ordering.
3254ad811070SMark Rutland  *
3255ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_fetch_inc_acquire() elsewhere.
3256ad811070SMark Rutland  *
3257ad811070SMark Rutland  * Return: The original value of @v.
3258ad811070SMark Rutland  */
3259e3d18ceeSMark Rutland static __always_inline s64
raw_atomic64_fetch_inc_acquire(atomic64_t * v)32609257959aSMark Rutland raw_atomic64_fetch_inc_acquire(atomic64_t *v)
3261e3d18ceeSMark Rutland {
32621d78814dSMark Rutland #if defined(arch_atomic64_fetch_inc_acquire)
32631d78814dSMark Rutland 	return arch_atomic64_fetch_inc_acquire(v);
32641d78814dSMark Rutland #elif defined(arch_atomic64_fetch_inc_relaxed)
32659257959aSMark Rutland 	s64 ret = arch_atomic64_fetch_inc_relaxed(v);
3266e3d18ceeSMark Rutland 	__atomic_acquire_fence();
3267e3d18ceeSMark Rutland 	return ret;
32689257959aSMark Rutland #elif defined(arch_atomic64_fetch_inc)
32691d78814dSMark Rutland 	return arch_atomic64_fetch_inc(v);
32709257959aSMark Rutland #else
32719257959aSMark Rutland 	return raw_atomic64_fetch_add_acquire(1, v);
3272e3d18ceeSMark Rutland #endif
32731d78814dSMark Rutland }
3274e3d18ceeSMark Rutland 
3275ad811070SMark Rutland /**
3276ad811070SMark Rutland  * raw_atomic64_fetch_inc_release() - atomic increment with release ordering
3277ad811070SMark Rutland  * @v: pointer to atomic64_t
3278ad811070SMark Rutland  *
3279ad811070SMark Rutland  * Atomically updates @v to (@v + 1) with release ordering.
3280ad811070SMark Rutland  *
3281ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_fetch_inc_release() elsewhere.
3282ad811070SMark Rutland  *
3283ad811070SMark Rutland  * Return: The original value of @v.
3284ad811070SMark Rutland  */
3285e3d18ceeSMark Rutland static __always_inline s64
raw_atomic64_fetch_inc_release(atomic64_t * v)32869257959aSMark Rutland raw_atomic64_fetch_inc_release(atomic64_t *v)
3287e3d18ceeSMark Rutland {
32881d78814dSMark Rutland #if defined(arch_atomic64_fetch_inc_release)
32891d78814dSMark Rutland 	return arch_atomic64_fetch_inc_release(v);
32901d78814dSMark Rutland #elif defined(arch_atomic64_fetch_inc_relaxed)
3291e3d18ceeSMark Rutland 	__atomic_release_fence();
32929257959aSMark Rutland 	return arch_atomic64_fetch_inc_relaxed(v);
32939257959aSMark Rutland #elif defined(arch_atomic64_fetch_inc)
32941d78814dSMark Rutland 	return arch_atomic64_fetch_inc(v);
32959257959aSMark Rutland #else
32969257959aSMark Rutland 	return raw_atomic64_fetch_add_release(1, v);
3297e3d18ceeSMark Rutland #endif
32981d78814dSMark Rutland }
3299e3d18ceeSMark Rutland 
3300ad811070SMark Rutland /**
3301ad811070SMark Rutland  * raw_atomic64_fetch_inc_relaxed() - atomic increment with relaxed ordering
3302ad811070SMark Rutland  * @v: pointer to atomic64_t
3303ad811070SMark Rutland  *
3304ad811070SMark Rutland  * Atomically updates @v to (@v + 1) with relaxed ordering.
3305ad811070SMark Rutland  *
3306ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_fetch_inc_relaxed() elsewhere.
3307ad811070SMark Rutland  *
3308ad811070SMark Rutland  * Return: The original value of @v.
3309ad811070SMark Rutland  */
3310e3d18ceeSMark Rutland static __always_inline s64
raw_atomic64_fetch_inc_relaxed(atomic64_t * v)33119257959aSMark Rutland raw_atomic64_fetch_inc_relaxed(atomic64_t *v)
33129257959aSMark Rutland {
33131d78814dSMark Rutland #if defined(arch_atomic64_fetch_inc_relaxed)
33141d78814dSMark Rutland 	return arch_atomic64_fetch_inc_relaxed(v);
33151d78814dSMark Rutland #elif defined(arch_atomic64_fetch_inc)
33161d78814dSMark Rutland 	return arch_atomic64_fetch_inc(v);
33179257959aSMark Rutland #else
33181d78814dSMark Rutland 	return raw_atomic64_fetch_add_relaxed(1, v);
33191d78814dSMark Rutland #endif
33201d78814dSMark Rutland }
33211d78814dSMark Rutland 
3322ad811070SMark Rutland /**
3323ad811070SMark Rutland  * raw_atomic64_dec() - atomic decrement with relaxed ordering
3324ad811070SMark Rutland  * @v: pointer to atomic64_t
3325ad811070SMark Rutland  *
3326ad811070SMark Rutland  * Atomically updates @v to (@v - 1) with relaxed ordering.
3327ad811070SMark Rutland  *
3328ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_dec() elsewhere.
3329ad811070SMark Rutland  *
3330ad811070SMark Rutland  * Return: Nothing.
3331ad811070SMark Rutland  */
33329257959aSMark Rutland static __always_inline void
raw_atomic64_dec(atomic64_t * v)33339257959aSMark Rutland raw_atomic64_dec(atomic64_t *v)
33349257959aSMark Rutland {
33351d78814dSMark Rutland #if defined(arch_atomic64_dec)
33361d78814dSMark Rutland 	arch_atomic64_dec(v);
33371d78814dSMark Rutland #else
33389257959aSMark Rutland 	raw_atomic64_sub(1, v);
33399257959aSMark Rutland #endif
33401d78814dSMark Rutland }
33419257959aSMark Rutland 
3342ad811070SMark Rutland /**
3343ad811070SMark Rutland  * raw_atomic64_dec_return() - atomic decrement with full ordering
3344ad811070SMark Rutland  * @v: pointer to atomic64_t
3345ad811070SMark Rutland  *
3346ad811070SMark Rutland  * Atomically updates @v to (@v - 1) with full ordering.
3347ad811070SMark Rutland  *
3348ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_dec_return() elsewhere.
3349ad811070SMark Rutland  *
3350ad811070SMark Rutland  * Return: The updated value of @v.
3351ad811070SMark Rutland  */
33529257959aSMark Rutland static __always_inline s64
raw_atomic64_dec_return(atomic64_t * v)33539257959aSMark Rutland raw_atomic64_dec_return(atomic64_t *v)
3354e3d18ceeSMark Rutland {
33551d78814dSMark Rutland #if defined(arch_atomic64_dec_return)
33561d78814dSMark Rutland 	return arch_atomic64_dec_return(v);
33571d78814dSMark Rutland #elif defined(arch_atomic64_dec_return_relaxed)
3358e3d18ceeSMark Rutland 	s64 ret;
3359e3d18ceeSMark Rutland 	__atomic_pre_full_fence();
3360e3d18ceeSMark Rutland 	ret = arch_atomic64_dec_return_relaxed(v);
3361e3d18ceeSMark Rutland 	__atomic_post_full_fence();
3362e3d18ceeSMark Rutland 	return ret;
33639257959aSMark Rutland #else
33649257959aSMark Rutland 	return raw_atomic64_sub_return(1, v);
3365e3d18ceeSMark Rutland #endif
33661d78814dSMark Rutland }
3367e3d18ceeSMark Rutland 
3368ad811070SMark Rutland /**
3369ad811070SMark Rutland  * raw_atomic64_dec_return_acquire() - atomic decrement with acquire ordering
3370ad811070SMark Rutland  * @v: pointer to atomic64_t
3371ad811070SMark Rutland  *
3372ad811070SMark Rutland  * Atomically updates @v to (@v - 1) with acquire ordering.
3373ad811070SMark Rutland  *
3374ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_dec_return_acquire() elsewhere.
3375ad811070SMark Rutland  *
3376ad811070SMark Rutland  * Return: The updated value of @v.
3377ad811070SMark Rutland  */
3378e3d18ceeSMark Rutland static __always_inline s64
raw_atomic64_dec_return_acquire(atomic64_t * v)33799257959aSMark Rutland raw_atomic64_dec_return_acquire(atomic64_t *v)
3380e3d18ceeSMark Rutland {
33811d78814dSMark Rutland #if defined(arch_atomic64_dec_return_acquire)
33821d78814dSMark Rutland 	return arch_atomic64_dec_return_acquire(v);
33831d78814dSMark Rutland #elif defined(arch_atomic64_dec_return_relaxed)
33849257959aSMark Rutland 	s64 ret = arch_atomic64_dec_return_relaxed(v);
3385e3d18ceeSMark Rutland 	__atomic_acquire_fence();
3386e3d18ceeSMark Rutland 	return ret;
33879257959aSMark Rutland #elif defined(arch_atomic64_dec_return)
33881d78814dSMark Rutland 	return arch_atomic64_dec_return(v);
33899257959aSMark Rutland #else
33909257959aSMark Rutland 	return raw_atomic64_sub_return_acquire(1, v);
3391e3d18ceeSMark Rutland #endif
33921d78814dSMark Rutland }
3393e3d18ceeSMark Rutland 
3394ad811070SMark Rutland /**
3395ad811070SMark Rutland  * raw_atomic64_dec_return_release() - atomic decrement with release ordering
3396ad811070SMark Rutland  * @v: pointer to atomic64_t
3397ad811070SMark Rutland  *
3398ad811070SMark Rutland  * Atomically updates @v to (@v - 1) with release ordering.
3399ad811070SMark Rutland  *
3400ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_dec_return_release() elsewhere.
3401ad811070SMark Rutland  *
3402ad811070SMark Rutland  * Return: The updated value of @v.
3403ad811070SMark Rutland  */
3404e3d18ceeSMark Rutland static __always_inline s64
raw_atomic64_dec_return_release(atomic64_t * v)34059257959aSMark Rutland raw_atomic64_dec_return_release(atomic64_t *v)
3406e3d18ceeSMark Rutland {
34071d78814dSMark Rutland #if defined(arch_atomic64_dec_return_release)
34081d78814dSMark Rutland 	return arch_atomic64_dec_return_release(v);
34091d78814dSMark Rutland #elif defined(arch_atomic64_dec_return_relaxed)
3410e3d18ceeSMark Rutland 	__atomic_release_fence();
34119257959aSMark Rutland 	return arch_atomic64_dec_return_relaxed(v);
34129257959aSMark Rutland #elif defined(arch_atomic64_dec_return)
34131d78814dSMark Rutland 	return arch_atomic64_dec_return(v);
34149257959aSMark Rutland #else
34159257959aSMark Rutland 	return raw_atomic64_sub_return_release(1, v);
3416e3d18ceeSMark Rutland #endif
34171d78814dSMark Rutland }
3418e3d18ceeSMark Rutland 
3419ad811070SMark Rutland /**
3420ad811070SMark Rutland  * raw_atomic64_dec_return_relaxed() - atomic decrement with relaxed ordering
3421ad811070SMark Rutland  * @v: pointer to atomic64_t
3422ad811070SMark Rutland  *
3423ad811070SMark Rutland  * Atomically updates @v to (@v - 1) with relaxed ordering.
3424ad811070SMark Rutland  *
3425ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_dec_return_relaxed() elsewhere.
3426ad811070SMark Rutland  *
3427ad811070SMark Rutland  * Return: The updated value of @v.
3428ad811070SMark Rutland  */
3429e3d18ceeSMark Rutland static __always_inline s64
raw_atomic64_dec_return_relaxed(atomic64_t * v)34309257959aSMark Rutland raw_atomic64_dec_return_relaxed(atomic64_t *v)
34319257959aSMark Rutland {
34321d78814dSMark Rutland #if defined(arch_atomic64_dec_return_relaxed)
34331d78814dSMark Rutland 	return arch_atomic64_dec_return_relaxed(v);
34341d78814dSMark Rutland #elif defined(arch_atomic64_dec_return)
34351d78814dSMark Rutland 	return arch_atomic64_dec_return(v);
34361d78814dSMark Rutland #else
34379257959aSMark Rutland 	return raw_atomic64_sub_return_relaxed(1, v);
34389257959aSMark Rutland #endif
34391d78814dSMark Rutland }
34409257959aSMark Rutland 
3441ad811070SMark Rutland /**
3442ad811070SMark Rutland  * raw_atomic64_fetch_dec() - atomic decrement with full ordering
3443ad811070SMark Rutland  * @v: pointer to atomic64_t
3444ad811070SMark Rutland  *
3445ad811070SMark Rutland  * Atomically updates @v to (@v - 1) with full ordering.
3446ad811070SMark Rutland  *
3447ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_fetch_dec() elsewhere.
3448ad811070SMark Rutland  *
3449ad811070SMark Rutland  * Return: The original value of @v.
3450ad811070SMark Rutland  */
34519257959aSMark Rutland static __always_inline s64
raw_atomic64_fetch_dec(atomic64_t * v)34529257959aSMark Rutland raw_atomic64_fetch_dec(atomic64_t *v)
3453e3d18ceeSMark Rutland {
34541d78814dSMark Rutland #if defined(arch_atomic64_fetch_dec)
34551d78814dSMark Rutland 	return arch_atomic64_fetch_dec(v);
34561d78814dSMark Rutland #elif defined(arch_atomic64_fetch_dec_relaxed)
3457e3d18ceeSMark Rutland 	s64 ret;
3458e3d18ceeSMark Rutland 	__atomic_pre_full_fence();
3459e3d18ceeSMark Rutland 	ret = arch_atomic64_fetch_dec_relaxed(v);
3460e3d18ceeSMark Rutland 	__atomic_post_full_fence();
3461e3d18ceeSMark Rutland 	return ret;
34629257959aSMark Rutland #else
34639257959aSMark Rutland 	return raw_atomic64_fetch_sub(1, v);
3464e3d18ceeSMark Rutland #endif
34651d78814dSMark Rutland }
3466e3d18ceeSMark Rutland 
3467ad811070SMark Rutland /**
3468ad811070SMark Rutland  * raw_atomic64_fetch_dec_acquire() - atomic decrement with acquire ordering
3469ad811070SMark Rutland  * @v: pointer to atomic64_t
3470ad811070SMark Rutland  *
3471ad811070SMark Rutland  * Atomically updates @v to (@v - 1) with acquire ordering.
3472ad811070SMark Rutland  *
3473ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_fetch_dec_acquire() elsewhere.
3474ad811070SMark Rutland  *
3475ad811070SMark Rutland  * Return: The original value of @v.
3476ad811070SMark Rutland  */
3477e3d18ceeSMark Rutland static __always_inline s64
raw_atomic64_fetch_dec_acquire(atomic64_t * v)34789257959aSMark Rutland raw_atomic64_fetch_dec_acquire(atomic64_t *v)
3479e3d18ceeSMark Rutland {
34801d78814dSMark Rutland #if defined(arch_atomic64_fetch_dec_acquire)
34811d78814dSMark Rutland 	return arch_atomic64_fetch_dec_acquire(v);
34821d78814dSMark Rutland #elif defined(arch_atomic64_fetch_dec_relaxed)
34839257959aSMark Rutland 	s64 ret = arch_atomic64_fetch_dec_relaxed(v);
3484e3d18ceeSMark Rutland 	__atomic_acquire_fence();
3485e3d18ceeSMark Rutland 	return ret;
34869257959aSMark Rutland #elif defined(arch_atomic64_fetch_dec)
34871d78814dSMark Rutland 	return arch_atomic64_fetch_dec(v);
34889257959aSMark Rutland #else
34899257959aSMark Rutland 	return raw_atomic64_fetch_sub_acquire(1, v);
3490e3d18ceeSMark Rutland #endif
34911d78814dSMark Rutland }
3492e3d18ceeSMark Rutland 
3493ad811070SMark Rutland /**
3494ad811070SMark Rutland  * raw_atomic64_fetch_dec_release() - atomic decrement with release ordering
3495ad811070SMark Rutland  * @v: pointer to atomic64_t
3496ad811070SMark Rutland  *
3497ad811070SMark Rutland  * Atomically updates @v to (@v - 1) with release ordering.
3498ad811070SMark Rutland  *
3499ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_fetch_dec_release() elsewhere.
3500ad811070SMark Rutland  *
3501ad811070SMark Rutland  * Return: The original value of @v.
3502ad811070SMark Rutland  */
3503e3d18ceeSMark Rutland static __always_inline s64
raw_atomic64_fetch_dec_release(atomic64_t * v)35049257959aSMark Rutland raw_atomic64_fetch_dec_release(atomic64_t *v)
3505e3d18ceeSMark Rutland {
35061d78814dSMark Rutland #if defined(arch_atomic64_fetch_dec_release)
35071d78814dSMark Rutland 	return arch_atomic64_fetch_dec_release(v);
35081d78814dSMark Rutland #elif defined(arch_atomic64_fetch_dec_relaxed)
3509e3d18ceeSMark Rutland 	__atomic_release_fence();
35109257959aSMark Rutland 	return arch_atomic64_fetch_dec_relaxed(v);
35119257959aSMark Rutland #elif defined(arch_atomic64_fetch_dec)
35121d78814dSMark Rutland 	return arch_atomic64_fetch_dec(v);
35139257959aSMark Rutland #else
35149257959aSMark Rutland 	return raw_atomic64_fetch_sub_release(1, v);
3515e3d18ceeSMark Rutland #endif
35161d78814dSMark Rutland }
3517e3d18ceeSMark Rutland 
3518ad811070SMark Rutland /**
3519ad811070SMark Rutland  * raw_atomic64_fetch_dec_relaxed() - atomic decrement with relaxed ordering
3520ad811070SMark Rutland  * @v: pointer to atomic64_t
3521ad811070SMark Rutland  *
3522ad811070SMark Rutland  * Atomically updates @v to (@v - 1) with relaxed ordering.
3523ad811070SMark Rutland  *
3524ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_fetch_dec_relaxed() elsewhere.
3525ad811070SMark Rutland  *
3526ad811070SMark Rutland  * Return: The original value of @v.
3527ad811070SMark Rutland  */
3528e3d18ceeSMark Rutland static __always_inline s64
raw_atomic64_fetch_dec_relaxed(atomic64_t * v)35299257959aSMark Rutland raw_atomic64_fetch_dec_relaxed(atomic64_t *v)
35309257959aSMark Rutland {
35311d78814dSMark Rutland #if defined(arch_atomic64_fetch_dec_relaxed)
35321d78814dSMark Rutland 	return arch_atomic64_fetch_dec_relaxed(v);
35331d78814dSMark Rutland #elif defined(arch_atomic64_fetch_dec)
35341d78814dSMark Rutland 	return arch_atomic64_fetch_dec(v);
35351d78814dSMark Rutland #else
35369257959aSMark Rutland 	return raw_atomic64_fetch_sub_relaxed(1, v);
35379257959aSMark Rutland #endif
35381d78814dSMark Rutland }
35399257959aSMark Rutland 
3540ad811070SMark Rutland /**
3541ad811070SMark Rutland  * raw_atomic64_and() - atomic bitwise AND with relaxed ordering
3542ad811070SMark Rutland  * @i: s64 value
3543ad811070SMark Rutland  * @v: pointer to atomic64_t
3544ad811070SMark Rutland  *
3545ad811070SMark Rutland  * Atomically updates @v to (@v & @i) with relaxed ordering.
3546ad811070SMark Rutland  *
3547ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_and() elsewhere.
3548ad811070SMark Rutland  *
3549ad811070SMark Rutland  * Return: Nothing.
3550ad811070SMark Rutland  */
35511d78814dSMark Rutland static __always_inline void
raw_atomic64_and(s64 i,atomic64_t * v)35521d78814dSMark Rutland raw_atomic64_and(s64 i, atomic64_t *v)
35531d78814dSMark Rutland {
35541d78814dSMark Rutland 	arch_atomic64_and(i, v);
35551d78814dSMark Rutland }
35569257959aSMark Rutland 
3557ad811070SMark Rutland /**
3558ad811070SMark Rutland  * raw_atomic64_fetch_and() - atomic bitwise AND with full ordering
3559ad811070SMark Rutland  * @i: s64 value
3560ad811070SMark Rutland  * @v: pointer to atomic64_t
3561ad811070SMark Rutland  *
3562ad811070SMark Rutland  * Atomically updates @v to (@v & @i) with full ordering.
3563ad811070SMark Rutland  *
3564ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_fetch_and() elsewhere.
3565ad811070SMark Rutland  *
3566ad811070SMark Rutland  * Return: The original value of @v.
3567ad811070SMark Rutland  */
35689257959aSMark Rutland static __always_inline s64
raw_atomic64_fetch_and(s64 i,atomic64_t * v)35699257959aSMark Rutland raw_atomic64_fetch_and(s64 i, atomic64_t *v)
3570e3d18ceeSMark Rutland {
35711d78814dSMark Rutland #if defined(arch_atomic64_fetch_and)
35721d78814dSMark Rutland 	return arch_atomic64_fetch_and(i, v);
35731d78814dSMark Rutland #elif defined(arch_atomic64_fetch_and_relaxed)
3574e3d18ceeSMark Rutland 	s64 ret;
3575e3d18ceeSMark Rutland 	__atomic_pre_full_fence();
3576e3d18ceeSMark Rutland 	ret = arch_atomic64_fetch_and_relaxed(i, v);
3577e3d18ceeSMark Rutland 	__atomic_post_full_fence();
3578e3d18ceeSMark Rutland 	return ret;
35799257959aSMark Rutland #else
35809257959aSMark Rutland #error "Unable to define raw_atomic64_fetch_and"
3581e3d18ceeSMark Rutland #endif
35821d78814dSMark Rutland }
3583e3d18ceeSMark Rutland 
3584ad811070SMark Rutland /**
3585ad811070SMark Rutland  * raw_atomic64_fetch_and_acquire() - atomic bitwise AND with acquire ordering
3586ad811070SMark Rutland  * @i: s64 value
3587ad811070SMark Rutland  * @v: pointer to atomic64_t
3588ad811070SMark Rutland  *
3589ad811070SMark Rutland  * Atomically updates @v to (@v & @i) with acquire ordering.
3590ad811070SMark Rutland  *
3591ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_fetch_and_acquire() elsewhere.
3592ad811070SMark Rutland  *
3593ad811070SMark Rutland  * Return: The original value of @v.
3594ad811070SMark Rutland  */
3595e3d18ceeSMark Rutland static __always_inline s64
raw_atomic64_fetch_and_acquire(s64 i,atomic64_t * v)35969257959aSMark Rutland raw_atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
3597e3d18ceeSMark Rutland {
35981d78814dSMark Rutland #if defined(arch_atomic64_fetch_and_acquire)
35991d78814dSMark Rutland 	return arch_atomic64_fetch_and_acquire(i, v);
36001d78814dSMark Rutland #elif defined(arch_atomic64_fetch_and_relaxed)
36019257959aSMark Rutland 	s64 ret = arch_atomic64_fetch_and_relaxed(i, v);
3602e3d18ceeSMark Rutland 	__atomic_acquire_fence();
3603e3d18ceeSMark Rutland 	return ret;
36049257959aSMark Rutland #elif defined(arch_atomic64_fetch_and)
36051d78814dSMark Rutland 	return arch_atomic64_fetch_and(i, v);
36069257959aSMark Rutland #else
36079257959aSMark Rutland #error "Unable to define raw_atomic64_fetch_and_acquire"
3608e3d18ceeSMark Rutland #endif
36091d78814dSMark Rutland }
3610e3d18ceeSMark Rutland 
3611ad811070SMark Rutland /**
3612ad811070SMark Rutland  * raw_atomic64_fetch_and_release() - atomic bitwise AND with release ordering
3613ad811070SMark Rutland  * @i: s64 value
3614ad811070SMark Rutland  * @v: pointer to atomic64_t
3615ad811070SMark Rutland  *
3616ad811070SMark Rutland  * Atomically updates @v to (@v & @i) with release ordering.
3617ad811070SMark Rutland  *
3618ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_fetch_and_release() elsewhere.
3619ad811070SMark Rutland  *
3620ad811070SMark Rutland  * Return: The original value of @v.
3621ad811070SMark Rutland  */
3622e3d18ceeSMark Rutland static __always_inline s64
raw_atomic64_fetch_and_release(s64 i,atomic64_t * v)36239257959aSMark Rutland raw_atomic64_fetch_and_release(s64 i, atomic64_t *v)
3624e3d18ceeSMark Rutland {
36251d78814dSMark Rutland #if defined(arch_atomic64_fetch_and_release)
36261d78814dSMark Rutland 	return arch_atomic64_fetch_and_release(i, v);
36271d78814dSMark Rutland #elif defined(arch_atomic64_fetch_and_relaxed)
3628e3d18ceeSMark Rutland 	__atomic_release_fence();
36299257959aSMark Rutland 	return arch_atomic64_fetch_and_relaxed(i, v);
36309257959aSMark Rutland #elif defined(arch_atomic64_fetch_and)
36311d78814dSMark Rutland 	return arch_atomic64_fetch_and(i, v);
36329257959aSMark Rutland #else
36339257959aSMark Rutland #error "Unable to define raw_atomic64_fetch_and_release"
3634e3d18ceeSMark Rutland #endif
36351d78814dSMark Rutland }
3636e3d18ceeSMark Rutland 
3637ad811070SMark Rutland /**
3638ad811070SMark Rutland  * raw_atomic64_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
3639ad811070SMark Rutland  * @i: s64 value
3640ad811070SMark Rutland  * @v: pointer to atomic64_t
3641ad811070SMark Rutland  *
3642ad811070SMark Rutland  * Atomically updates @v to (@v & @i) with relaxed ordering.
3643ad811070SMark Rutland  *
3644ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_fetch_and_relaxed() elsewhere.
3645ad811070SMark Rutland  *
3646ad811070SMark Rutland  * Return: The original value of @v.
3647ad811070SMark Rutland  */
36481d78814dSMark Rutland static __always_inline s64
raw_atomic64_fetch_and_relaxed(s64 i,atomic64_t * v)36491d78814dSMark Rutland raw_atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
36501d78814dSMark Rutland {
36519257959aSMark Rutland #if defined(arch_atomic64_fetch_and_relaxed)
36521d78814dSMark Rutland 	return arch_atomic64_fetch_and_relaxed(i, v);
36539257959aSMark Rutland #elif defined(arch_atomic64_fetch_and)
36541d78814dSMark Rutland 	return arch_atomic64_fetch_and(i, v);
36559257959aSMark Rutland #else
36569257959aSMark Rutland #error "Unable to define raw_atomic64_fetch_and_relaxed"
36579257959aSMark Rutland #endif
36581d78814dSMark Rutland }
36599257959aSMark Rutland 
3660ad811070SMark Rutland /**
3661ad811070SMark Rutland  * raw_atomic64_andnot() - atomic bitwise AND NOT with relaxed ordering
3662ad811070SMark Rutland  * @i: s64 value
3663ad811070SMark Rutland  * @v: pointer to atomic64_t
3664ad811070SMark Rutland  *
3665ad811070SMark Rutland  * Atomically updates @v to (@v & ~@i) with relaxed ordering.
3666ad811070SMark Rutland  *
3667ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_andnot() elsewhere.
3668ad811070SMark Rutland  *
3669ad811070SMark Rutland  * Return: Nothing.
3670ad811070SMark Rutland  */
36719257959aSMark Rutland static __always_inline void
raw_atomic64_andnot(s64 i,atomic64_t * v)36729257959aSMark Rutland raw_atomic64_andnot(s64 i, atomic64_t *v)
36739257959aSMark Rutland {
36741d78814dSMark Rutland #if defined(arch_atomic64_andnot)
36751d78814dSMark Rutland 	arch_atomic64_andnot(i, v);
36761d78814dSMark Rutland #else
36779257959aSMark Rutland 	raw_atomic64_and(~i, v);
36789257959aSMark Rutland #endif
36791d78814dSMark Rutland }
36809257959aSMark Rutland 
3681ad811070SMark Rutland /**
3682ad811070SMark Rutland  * raw_atomic64_fetch_andnot() - atomic bitwise AND NOT with full ordering
3683ad811070SMark Rutland  * @i: s64 value
3684ad811070SMark Rutland  * @v: pointer to atomic64_t
3685ad811070SMark Rutland  *
3686ad811070SMark Rutland  * Atomically updates @v to (@v & ~@i) with full ordering.
3687ad811070SMark Rutland  *
3688ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_fetch_andnot() elsewhere.
3689ad811070SMark Rutland  *
3690ad811070SMark Rutland  * Return: The original value of @v.
3691ad811070SMark Rutland  */
3692e3d18ceeSMark Rutland static __always_inline s64
raw_atomic64_fetch_andnot(s64 i,atomic64_t * v)36939257959aSMark Rutland raw_atomic64_fetch_andnot(s64 i, atomic64_t *v)
3694e3d18ceeSMark Rutland {
36951d78814dSMark Rutland #if defined(arch_atomic64_fetch_andnot)
36961d78814dSMark Rutland 	return arch_atomic64_fetch_andnot(i, v);
36971d78814dSMark Rutland #elif defined(arch_atomic64_fetch_andnot_relaxed)
3698e3d18ceeSMark Rutland 	s64 ret;
3699e3d18ceeSMark Rutland 	__atomic_pre_full_fence();
3700e3d18ceeSMark Rutland 	ret = arch_atomic64_fetch_andnot_relaxed(i, v);
3701e3d18ceeSMark Rutland 	__atomic_post_full_fence();
3702e3d18ceeSMark Rutland 	return ret;
37039257959aSMark Rutland #else
37049257959aSMark Rutland 	return raw_atomic64_fetch_and(~i, v);
3705e3d18ceeSMark Rutland #endif
37061d78814dSMark Rutland }
3707e3d18ceeSMark Rutland 
3708ad811070SMark Rutland /**
3709ad811070SMark Rutland  * raw_atomic64_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
3710ad811070SMark Rutland  * @i: s64 value
3711ad811070SMark Rutland  * @v: pointer to atomic64_t
3712ad811070SMark Rutland  *
3713ad811070SMark Rutland  * Atomically updates @v to (@v & ~@i) with acquire ordering.
3714ad811070SMark Rutland  *
3715ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_fetch_andnot_acquire() elsewhere.
3716ad811070SMark Rutland  *
3717ad811070SMark Rutland  * Return: The original value of @v.
3718ad811070SMark Rutland  */
3719e3d18ceeSMark Rutland static __always_inline s64
raw_atomic64_fetch_andnot_acquire(s64 i,atomic64_t * v)37209257959aSMark Rutland raw_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
3721e3d18ceeSMark Rutland {
37221d78814dSMark Rutland #if defined(arch_atomic64_fetch_andnot_acquire)
37231d78814dSMark Rutland 	return arch_atomic64_fetch_andnot_acquire(i, v);
37241d78814dSMark Rutland #elif defined(arch_atomic64_fetch_andnot_relaxed)
37259257959aSMark Rutland 	s64 ret = arch_atomic64_fetch_andnot_relaxed(i, v);
3726e3d18ceeSMark Rutland 	__atomic_acquire_fence();
3727e3d18ceeSMark Rutland 	return ret;
37289257959aSMark Rutland #elif defined(arch_atomic64_fetch_andnot)
37291d78814dSMark Rutland 	return arch_atomic64_fetch_andnot(i, v);
37309257959aSMark Rutland #else
37319257959aSMark Rutland 	return raw_atomic64_fetch_and_acquire(~i, v);
3732e3d18ceeSMark Rutland #endif
37331d78814dSMark Rutland }
3734e3d18ceeSMark Rutland 
3735ad811070SMark Rutland /**
3736ad811070SMark Rutland  * raw_atomic64_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
3737ad811070SMark Rutland  * @i: s64 value
3738ad811070SMark Rutland  * @v: pointer to atomic64_t
3739ad811070SMark Rutland  *
3740ad811070SMark Rutland  * Atomically updates @v to (@v & ~@i) with release ordering.
3741ad811070SMark Rutland  *
3742ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_fetch_andnot_release() elsewhere.
3743ad811070SMark Rutland  *
3744ad811070SMark Rutland  * Return: The original value of @v.
3745ad811070SMark Rutland  */
3746e3d18ceeSMark Rutland static __always_inline s64
raw_atomic64_fetch_andnot_release(s64 i,atomic64_t * v)37479257959aSMark Rutland raw_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
3748e3d18ceeSMark Rutland {
37491d78814dSMark Rutland #if defined(arch_atomic64_fetch_andnot_release)
37501d78814dSMark Rutland 	return arch_atomic64_fetch_andnot_release(i, v);
37511d78814dSMark Rutland #elif defined(arch_atomic64_fetch_andnot_relaxed)
3752e3d18ceeSMark Rutland 	__atomic_release_fence();
37539257959aSMark Rutland 	return arch_atomic64_fetch_andnot_relaxed(i, v);
37549257959aSMark Rutland #elif defined(arch_atomic64_fetch_andnot)
37551d78814dSMark Rutland 	return arch_atomic64_fetch_andnot(i, v);
37569257959aSMark Rutland #else
37579257959aSMark Rutland 	return raw_atomic64_fetch_and_release(~i, v);
3758e3d18ceeSMark Rutland #endif
37591d78814dSMark Rutland }
3760e3d18ceeSMark Rutland 
3761ad811070SMark Rutland /**
3762ad811070SMark Rutland  * raw_atomic64_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
3763ad811070SMark Rutland  * @i: s64 value
3764ad811070SMark Rutland  * @v: pointer to atomic64_t
3765ad811070SMark Rutland  *
3766ad811070SMark Rutland  * Atomically updates @v to (@v & ~@i) with relaxed ordering.
3767ad811070SMark Rutland  *
3768ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_fetch_andnot_relaxed() elsewhere.
3769ad811070SMark Rutland  *
3770ad811070SMark Rutland  * Return: The original value of @v.
3771ad811070SMark Rutland  */
3772e3d18ceeSMark Rutland static __always_inline s64
raw_atomic64_fetch_andnot_relaxed(s64 i,atomic64_t * v)37739257959aSMark Rutland raw_atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
37749257959aSMark Rutland {
37751d78814dSMark Rutland #if defined(arch_atomic64_fetch_andnot_relaxed)
37761d78814dSMark Rutland 	return arch_atomic64_fetch_andnot_relaxed(i, v);
37771d78814dSMark Rutland #elif defined(arch_atomic64_fetch_andnot)
37781d78814dSMark Rutland 	return arch_atomic64_fetch_andnot(i, v);
37791d78814dSMark Rutland #else
37809257959aSMark Rutland 	return raw_atomic64_fetch_and_relaxed(~i, v);
37819257959aSMark Rutland #endif
37821d78814dSMark Rutland }
37839257959aSMark Rutland 
3784ad811070SMark Rutland /**
3785ad811070SMark Rutland  * raw_atomic64_or() - atomic bitwise OR with relaxed ordering
3786ad811070SMark Rutland  * @i: s64 value
3787ad811070SMark Rutland  * @v: pointer to atomic64_t
3788ad811070SMark Rutland  *
3789ad811070SMark Rutland  * Atomically updates @v to (@v | @i) with relaxed ordering.
3790ad811070SMark Rutland  *
3791ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_or() elsewhere.
3792ad811070SMark Rutland  *
3793ad811070SMark Rutland  * Return: Nothing.
3794ad811070SMark Rutland  */
37951d78814dSMark Rutland static __always_inline void
raw_atomic64_or(s64 i,atomic64_t * v)37961d78814dSMark Rutland raw_atomic64_or(s64 i, atomic64_t *v)
37971d78814dSMark Rutland {
37981d78814dSMark Rutland 	arch_atomic64_or(i, v);
37991d78814dSMark Rutland }
38009257959aSMark Rutland 
3801ad811070SMark Rutland /**
3802ad811070SMark Rutland  * raw_atomic64_fetch_or() - atomic bitwise OR with full ordering
3803ad811070SMark Rutland  * @i: s64 value
3804ad811070SMark Rutland  * @v: pointer to atomic64_t
3805ad811070SMark Rutland  *
3806ad811070SMark Rutland  * Atomically updates @v to (@v | @i) with full ordering.
3807ad811070SMark Rutland  *
3808ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_fetch_or() elsewhere.
3809ad811070SMark Rutland  *
3810ad811070SMark Rutland  * Return: The original value of @v.
3811ad811070SMark Rutland  */
38129257959aSMark Rutland static __always_inline s64
raw_atomic64_fetch_or(s64 i,atomic64_t * v)38139257959aSMark Rutland raw_atomic64_fetch_or(s64 i, atomic64_t *v)
3814e3d18ceeSMark Rutland {
38151d78814dSMark Rutland #if defined(arch_atomic64_fetch_or)
38161d78814dSMark Rutland 	return arch_atomic64_fetch_or(i, v);
38171d78814dSMark Rutland #elif defined(arch_atomic64_fetch_or_relaxed)
3818e3d18ceeSMark Rutland 	s64 ret;
3819e3d18ceeSMark Rutland 	__atomic_pre_full_fence();
3820e3d18ceeSMark Rutland 	ret = arch_atomic64_fetch_or_relaxed(i, v);
3821e3d18ceeSMark Rutland 	__atomic_post_full_fence();
3822e3d18ceeSMark Rutland 	return ret;
38239257959aSMark Rutland #else
38249257959aSMark Rutland #error "Unable to define raw_atomic64_fetch_or"
3825e3d18ceeSMark Rutland #endif
38261d78814dSMark Rutland }
3827e3d18ceeSMark Rutland 
3828ad811070SMark Rutland /**
3829ad811070SMark Rutland  * raw_atomic64_fetch_or_acquire() - atomic bitwise OR with acquire ordering
3830ad811070SMark Rutland  * @i: s64 value
3831ad811070SMark Rutland  * @v: pointer to atomic64_t
3832ad811070SMark Rutland  *
3833ad811070SMark Rutland  * Atomically updates @v to (@v | @i) with acquire ordering.
3834ad811070SMark Rutland  *
3835ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_fetch_or_acquire() elsewhere.
3836ad811070SMark Rutland  *
3837ad811070SMark Rutland  * Return: The original value of @v.
3838ad811070SMark Rutland  */
3839e3d18ceeSMark Rutland static __always_inline s64
raw_atomic64_fetch_or_acquire(s64 i,atomic64_t * v)38409257959aSMark Rutland raw_atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
3841e3d18ceeSMark Rutland {
38421d78814dSMark Rutland #if defined(arch_atomic64_fetch_or_acquire)
38431d78814dSMark Rutland 	return arch_atomic64_fetch_or_acquire(i, v);
38441d78814dSMark Rutland #elif defined(arch_atomic64_fetch_or_relaxed)
38459257959aSMark Rutland 	s64 ret = arch_atomic64_fetch_or_relaxed(i, v);
3846e3d18ceeSMark Rutland 	__atomic_acquire_fence();
3847e3d18ceeSMark Rutland 	return ret;
38489257959aSMark Rutland #elif defined(arch_atomic64_fetch_or)
38491d78814dSMark Rutland 	return arch_atomic64_fetch_or(i, v);
38509257959aSMark Rutland #else
38519257959aSMark Rutland #error "Unable to define raw_atomic64_fetch_or_acquire"
3852e3d18ceeSMark Rutland #endif
38531d78814dSMark Rutland }
3854e3d18ceeSMark Rutland 
3855ad811070SMark Rutland /**
3856ad811070SMark Rutland  * raw_atomic64_fetch_or_release() - atomic bitwise OR with release ordering
3857ad811070SMark Rutland  * @i: s64 value
3858ad811070SMark Rutland  * @v: pointer to atomic64_t
3859ad811070SMark Rutland  *
3860ad811070SMark Rutland  * Atomically updates @v to (@v | @i) with release ordering.
3861ad811070SMark Rutland  *
3862ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_fetch_or_release() elsewhere.
3863ad811070SMark Rutland  *
3864ad811070SMark Rutland  * Return: The original value of @v.
3865ad811070SMark Rutland  */
3866e3d18ceeSMark Rutland static __always_inline s64
raw_atomic64_fetch_or_release(s64 i,atomic64_t * v)38679257959aSMark Rutland raw_atomic64_fetch_or_release(s64 i, atomic64_t *v)
3868e3d18ceeSMark Rutland {
38691d78814dSMark Rutland #if defined(arch_atomic64_fetch_or_release)
38701d78814dSMark Rutland 	return arch_atomic64_fetch_or_release(i, v);
38711d78814dSMark Rutland #elif defined(arch_atomic64_fetch_or_relaxed)
3872e3d18ceeSMark Rutland 	__atomic_release_fence();
38739257959aSMark Rutland 	return arch_atomic64_fetch_or_relaxed(i, v);
38749257959aSMark Rutland #elif defined(arch_atomic64_fetch_or)
38751d78814dSMark Rutland 	return arch_atomic64_fetch_or(i, v);
38769257959aSMark Rutland #else
38779257959aSMark Rutland #error "Unable to define raw_atomic64_fetch_or_release"
3878e3d18ceeSMark Rutland #endif
38791d78814dSMark Rutland }
3880e3d18ceeSMark Rutland 
3881ad811070SMark Rutland /**
3882ad811070SMark Rutland  * raw_atomic64_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
3883ad811070SMark Rutland  * @i: s64 value
3884ad811070SMark Rutland  * @v: pointer to atomic64_t
3885ad811070SMark Rutland  *
3886ad811070SMark Rutland  * Atomically updates @v to (@v | @i) with relaxed ordering.
3887ad811070SMark Rutland  *
3888ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_fetch_or_relaxed() elsewhere.
3889ad811070SMark Rutland  *
3890ad811070SMark Rutland  * Return: The original value of @v.
3891ad811070SMark Rutland  */
38921d78814dSMark Rutland static __always_inline s64
raw_atomic64_fetch_or_relaxed(s64 i,atomic64_t * v)38931d78814dSMark Rutland raw_atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
38941d78814dSMark Rutland {
38959257959aSMark Rutland #if defined(arch_atomic64_fetch_or_relaxed)
38961d78814dSMark Rutland 	return arch_atomic64_fetch_or_relaxed(i, v);
38979257959aSMark Rutland #elif defined(arch_atomic64_fetch_or)
38981d78814dSMark Rutland 	return arch_atomic64_fetch_or(i, v);
38999257959aSMark Rutland #else
39009257959aSMark Rutland #error "Unable to define raw_atomic64_fetch_or_relaxed"
39019257959aSMark Rutland #endif
39021d78814dSMark Rutland }
39039257959aSMark Rutland 
3904ad811070SMark Rutland /**
3905ad811070SMark Rutland  * raw_atomic64_xor() - atomic bitwise XOR with relaxed ordering
3906ad811070SMark Rutland  * @i: s64 value
3907ad811070SMark Rutland  * @v: pointer to atomic64_t
3908ad811070SMark Rutland  *
3909ad811070SMark Rutland  * Atomically updates @v to (@v ^ @i) with relaxed ordering.
3910ad811070SMark Rutland  *
3911ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_xor() elsewhere.
3912ad811070SMark Rutland  *
3913ad811070SMark Rutland  * Return: Nothing.
3914ad811070SMark Rutland  */
39151d78814dSMark Rutland static __always_inline void
raw_atomic64_xor(s64 i,atomic64_t * v)39161d78814dSMark Rutland raw_atomic64_xor(s64 i, atomic64_t *v)
39171d78814dSMark Rutland {
39181d78814dSMark Rutland 	arch_atomic64_xor(i, v);
39191d78814dSMark Rutland }
39209257959aSMark Rutland 
3921ad811070SMark Rutland /**
3922ad811070SMark Rutland  * raw_atomic64_fetch_xor() - atomic bitwise XOR with full ordering
3923ad811070SMark Rutland  * @i: s64 value
3924ad811070SMark Rutland  * @v: pointer to atomic64_t
3925ad811070SMark Rutland  *
3926ad811070SMark Rutland  * Atomically updates @v to (@v ^ @i) with full ordering.
3927ad811070SMark Rutland  *
3928ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_fetch_xor() elsewhere.
3929ad811070SMark Rutland  *
3930ad811070SMark Rutland  * Return: The original value of @v.
3931ad811070SMark Rutland  */
3932e3d18ceeSMark Rutland static __always_inline s64
raw_atomic64_fetch_xor(s64 i,atomic64_t * v)39339257959aSMark Rutland raw_atomic64_fetch_xor(s64 i, atomic64_t *v)
3934e3d18ceeSMark Rutland {
39351d78814dSMark Rutland #if defined(arch_atomic64_fetch_xor)
39361d78814dSMark Rutland 	return arch_atomic64_fetch_xor(i, v);
39371d78814dSMark Rutland #elif defined(arch_atomic64_fetch_xor_relaxed)
3938e3d18ceeSMark Rutland 	s64 ret;
3939e3d18ceeSMark Rutland 	__atomic_pre_full_fence();
3940e3d18ceeSMark Rutland 	ret = arch_atomic64_fetch_xor_relaxed(i, v);
3941e3d18ceeSMark Rutland 	__atomic_post_full_fence();
3942e3d18ceeSMark Rutland 	return ret;
39439257959aSMark Rutland #else
39449257959aSMark Rutland #error "Unable to define raw_atomic64_fetch_xor"
3945e3d18ceeSMark Rutland #endif
39461d78814dSMark Rutland }
3947e3d18ceeSMark Rutland 
3948ad811070SMark Rutland /**
3949ad811070SMark Rutland  * raw_atomic64_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
3950ad811070SMark Rutland  * @i: s64 value
3951ad811070SMark Rutland  * @v: pointer to atomic64_t
3952ad811070SMark Rutland  *
3953ad811070SMark Rutland  * Atomically updates @v to (@v ^ @i) with acquire ordering.
3954ad811070SMark Rutland  *
3955ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_fetch_xor_acquire() elsewhere.
3956ad811070SMark Rutland  *
3957ad811070SMark Rutland  * Return: The original value of @v.
3958ad811070SMark Rutland  */
3959d12157efSMark Rutland static __always_inline s64
raw_atomic64_fetch_xor_acquire(s64 i,atomic64_t * v)39609257959aSMark Rutland raw_atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
3961d12157efSMark Rutland {
39621d78814dSMark Rutland #if defined(arch_atomic64_fetch_xor_acquire)
39631d78814dSMark Rutland 	return arch_atomic64_fetch_xor_acquire(i, v);
39641d78814dSMark Rutland #elif defined(arch_atomic64_fetch_xor_relaxed)
39659257959aSMark Rutland 	s64 ret = arch_atomic64_fetch_xor_relaxed(i, v);
3966e3d18ceeSMark Rutland 	__atomic_acquire_fence();
3967e3d18ceeSMark Rutland 	return ret;
39689257959aSMark Rutland #elif defined(arch_atomic64_fetch_xor)
39691d78814dSMark Rutland 	return arch_atomic64_fetch_xor(i, v);
39709257959aSMark Rutland #else
39719257959aSMark Rutland #error "Unable to define raw_atomic64_fetch_xor_acquire"
3972e3d18ceeSMark Rutland #endif
39731d78814dSMark Rutland }
3974e3d18ceeSMark Rutland 
3975ad811070SMark Rutland /**
3976ad811070SMark Rutland  * raw_atomic64_fetch_xor_release() - atomic bitwise XOR with release ordering
3977ad811070SMark Rutland  * @i: s64 value
3978ad811070SMark Rutland  * @v: pointer to atomic64_t
3979ad811070SMark Rutland  *
3980ad811070SMark Rutland  * Atomically updates @v to (@v ^ @i) with release ordering.
3981ad811070SMark Rutland  *
3982ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_fetch_xor_release() elsewhere.
3983ad811070SMark Rutland  *
3984ad811070SMark Rutland  * Return: The original value of @v.
3985ad811070SMark Rutland  */
3986e3d18ceeSMark Rutland static __always_inline s64
raw_atomic64_fetch_xor_release(s64 i,atomic64_t * v)39879257959aSMark Rutland raw_atomic64_fetch_xor_release(s64 i, atomic64_t *v)
3988e3d18ceeSMark Rutland {
39891d78814dSMark Rutland #if defined(arch_atomic64_fetch_xor_release)
39901d78814dSMark Rutland 	return arch_atomic64_fetch_xor_release(i, v);
39911d78814dSMark Rutland #elif defined(arch_atomic64_fetch_xor_relaxed)
3992e3d18ceeSMark Rutland 	__atomic_release_fence();
39939257959aSMark Rutland 	return arch_atomic64_fetch_xor_relaxed(i, v);
39949257959aSMark Rutland #elif defined(arch_atomic64_fetch_xor)
39951d78814dSMark Rutland 	return arch_atomic64_fetch_xor(i, v);
39969257959aSMark Rutland #else
39979257959aSMark Rutland #error "Unable to define raw_atomic64_fetch_xor_release"
3998e3d18ceeSMark Rutland #endif
39991d78814dSMark Rutland }
4000e3d18ceeSMark Rutland 
4001ad811070SMark Rutland /**
4002ad811070SMark Rutland  * raw_atomic64_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
4003ad811070SMark Rutland  * @i: s64 value
4004ad811070SMark Rutland  * @v: pointer to atomic64_t
4005ad811070SMark Rutland  *
4006ad811070SMark Rutland  * Atomically updates @v to (@v ^ @i) with relaxed ordering.
4007ad811070SMark Rutland  *
4008ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_fetch_xor_relaxed() elsewhere.
4009ad811070SMark Rutland  *
4010ad811070SMark Rutland  * Return: The original value of @v.
4011ad811070SMark Rutland  */
40121d78814dSMark Rutland static __always_inline s64
raw_atomic64_fetch_xor_relaxed(s64 i,atomic64_t * v)40131d78814dSMark Rutland raw_atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
40141d78814dSMark Rutland {
40159257959aSMark Rutland #if defined(arch_atomic64_fetch_xor_relaxed)
40161d78814dSMark Rutland 	return arch_atomic64_fetch_xor_relaxed(i, v);
40179257959aSMark Rutland #elif defined(arch_atomic64_fetch_xor)
40181d78814dSMark Rutland 	return arch_atomic64_fetch_xor(i, v);
40199257959aSMark Rutland #else
40209257959aSMark Rutland #error "Unable to define raw_atomic64_fetch_xor_relaxed"
40219257959aSMark Rutland #endif
4022e3d18ceeSMark Rutland }
40231d78814dSMark Rutland 
4024ad811070SMark Rutland /**
4025ad811070SMark Rutland  * raw_atomic64_xchg() - atomic exchange with full ordering
4026ad811070SMark Rutland  * @v: pointer to atomic64_t
4027ad811070SMark Rutland  * @new: s64 value to assign
4028ad811070SMark Rutland  *
4029ad811070SMark Rutland  * Atomically updates @v to @new with full ordering.
4030ad811070SMark Rutland  *
4031ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_xchg() elsewhere.
4032ad811070SMark Rutland  *
4033ad811070SMark Rutland  * Return: The original value of @v.
4034ad811070SMark Rutland  */
4035d12157efSMark Rutland static __always_inline s64
raw_atomic64_xchg(atomic64_t * v,s64 new)40369257959aSMark Rutland raw_atomic64_xchg(atomic64_t *v, s64 new)
4037d12157efSMark Rutland {
40381d78814dSMark Rutland #if defined(arch_atomic64_xchg)
40391d78814dSMark Rutland 	return arch_atomic64_xchg(v, new);
40409257959aSMark Rutland #elif defined(arch_atomic64_xchg_relaxed)
40411d78814dSMark Rutland 	s64 ret;
40421d78814dSMark Rutland 	__atomic_pre_full_fence();
40431d78814dSMark Rutland 	ret = arch_atomic64_xchg_relaxed(v, new);
40441d78814dSMark Rutland 	__atomic_post_full_fence();
4045e3d18ceeSMark Rutland 	return ret;
40469257959aSMark Rutland #else
40471d78814dSMark Rutland 	return raw_xchg(&v->counter, new);
40481d78814dSMark Rutland #endif
40491d78814dSMark Rutland }
40501d78814dSMark Rutland 
4051ad811070SMark Rutland /**
4052ad811070SMark Rutland  * raw_atomic64_xchg_acquire() - atomic exchange with acquire ordering
4053ad811070SMark Rutland  * @v: pointer to atomic64_t
4054ad811070SMark Rutland  * @new: s64 value to assign
4055ad811070SMark Rutland  *
4056ad811070SMark Rutland  * Atomically updates @v to @new with acquire ordering.
4057ad811070SMark Rutland  *
4058ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_xchg_acquire() elsewhere.
4059ad811070SMark Rutland  *
4060ad811070SMark Rutland  * Return: The original value of @v.
4061ad811070SMark Rutland  */
40629257959aSMark Rutland static __always_inline s64
raw_atomic64_xchg_acquire(atomic64_t * v,s64 new)40639257959aSMark Rutland raw_atomic64_xchg_acquire(atomic64_t *v, s64 new)
40649257959aSMark Rutland {
40651d78814dSMark Rutland #if defined(arch_atomic64_xchg_acquire)
40661d78814dSMark Rutland 	return arch_atomic64_xchg_acquire(v, new);
40679257959aSMark Rutland #elif defined(arch_atomic64_xchg_relaxed)
40681d78814dSMark Rutland 	s64 ret = arch_atomic64_xchg_relaxed(v, new);
40691d78814dSMark Rutland 	__atomic_acquire_fence();
40701d78814dSMark Rutland 	return ret;
40719257959aSMark Rutland #elif defined(arch_atomic64_xchg)
40721d78814dSMark Rutland 	return arch_atomic64_xchg(v, new);
40739257959aSMark Rutland #else
40741d78814dSMark Rutland 	return raw_xchg_acquire(&v->counter, new);
40751d78814dSMark Rutland #endif
40761d78814dSMark Rutland }
40771d78814dSMark Rutland 
4078ad811070SMark Rutland /**
4079ad811070SMark Rutland  * raw_atomic64_xchg_release() - atomic exchange with release ordering
4080ad811070SMark Rutland  * @v: pointer to atomic64_t
4081ad811070SMark Rutland  * @new: s64 value to assign
4082ad811070SMark Rutland  *
4083ad811070SMark Rutland  * Atomically updates @v to @new with release ordering.
4084ad811070SMark Rutland  *
4085ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_xchg_release() elsewhere.
4086ad811070SMark Rutland  *
4087ad811070SMark Rutland  * Return: The original value of @v.
4088ad811070SMark Rutland  */
40899257959aSMark Rutland static __always_inline s64
raw_atomic64_xchg_release(atomic64_t * v,s64 new)40909257959aSMark Rutland raw_atomic64_xchg_release(atomic64_t *v, s64 new)
40919257959aSMark Rutland {
40921d78814dSMark Rutland #if defined(arch_atomic64_xchg_release)
40931d78814dSMark Rutland 	return arch_atomic64_xchg_release(v, new);
40941d78814dSMark Rutland #elif defined(arch_atomic64_xchg_relaxed)
40951d78814dSMark Rutland 	__atomic_release_fence();
40961d78814dSMark Rutland 	return arch_atomic64_xchg_relaxed(v, new);
40979257959aSMark Rutland #elif defined(arch_atomic64_xchg)
40981d78814dSMark Rutland 	return arch_atomic64_xchg(v, new);
40999257959aSMark Rutland #else
41001d78814dSMark Rutland 	return raw_xchg_release(&v->counter, new);
41011d78814dSMark Rutland #endif
41021d78814dSMark Rutland }
41031d78814dSMark Rutland 
4104ad811070SMark Rutland /**
4105ad811070SMark Rutland  * raw_atomic64_xchg_relaxed() - atomic exchange with relaxed ordering
4106ad811070SMark Rutland  * @v: pointer to atomic64_t
4107ad811070SMark Rutland  * @new: s64 value to assign
4108ad811070SMark Rutland  *
4109ad811070SMark Rutland  * Atomically updates @v to @new with relaxed ordering.
4110ad811070SMark Rutland  *
4111ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_xchg_relaxed() elsewhere.
4112ad811070SMark Rutland  *
4113ad811070SMark Rutland  * Return: The original value of @v.
4114ad811070SMark Rutland  */
4115e3d18ceeSMark Rutland static __always_inline s64
raw_atomic64_xchg_relaxed(atomic64_t * v,s64 new)41169257959aSMark Rutland raw_atomic64_xchg_relaxed(atomic64_t *v, s64 new)
41179257959aSMark Rutland {
41181d78814dSMark Rutland #if defined(arch_atomic64_xchg_relaxed)
41191d78814dSMark Rutland 	return arch_atomic64_xchg_relaxed(v, new);
41201d78814dSMark Rutland #elif defined(arch_atomic64_xchg)
41211d78814dSMark Rutland 	return arch_atomic64_xchg(v, new);
41221d78814dSMark Rutland #else
41239257959aSMark Rutland 	return raw_xchg_relaxed(&v->counter, new);
41249257959aSMark Rutland #endif
41251d78814dSMark Rutland }
41269257959aSMark Rutland 
4127ad811070SMark Rutland /**
4128ad811070SMark Rutland  * raw_atomic64_cmpxchg() - atomic compare and exchange with full ordering
4129ad811070SMark Rutland  * @v: pointer to atomic64_t
4130ad811070SMark Rutland  * @old: s64 value to compare with
4131ad811070SMark Rutland  * @new: s64 value to assign
4132ad811070SMark Rutland  *
4133ad811070SMark Rutland  * If (@v == @old), atomically updates @v to @new with full ordering.
41346dfee110SMark Rutland  * Otherwise, @v is not modified and relaxed ordering is provided.
4135ad811070SMark Rutland  *
4136ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_cmpxchg() elsewhere.
4137ad811070SMark Rutland  *
4138ad811070SMark Rutland  * Return: The original value of @v.
4139ad811070SMark Rutland  */
41409257959aSMark Rutland static __always_inline s64
raw_atomic64_cmpxchg(atomic64_t * v,s64 old,s64 new)41419257959aSMark Rutland raw_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
4142e3d18ceeSMark Rutland {
41431d78814dSMark Rutland #if defined(arch_atomic64_cmpxchg)
41441d78814dSMark Rutland 	return arch_atomic64_cmpxchg(v, old, new);
41451d78814dSMark Rutland #elif defined(arch_atomic64_cmpxchg_relaxed)
4146e3d18ceeSMark Rutland 	s64 ret;
4147e3d18ceeSMark Rutland 	__atomic_pre_full_fence();
4148e3d18ceeSMark Rutland 	ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
4149e3d18ceeSMark Rutland 	__atomic_post_full_fence();
4150e3d18ceeSMark Rutland 	return ret;
41519257959aSMark Rutland #else
41529257959aSMark Rutland 	return raw_cmpxchg(&v->counter, old, new);
4153e3d18ceeSMark Rutland #endif
41541d78814dSMark Rutland }
4155e3d18ceeSMark Rutland 
4156ad811070SMark Rutland /**
4157ad811070SMark Rutland  * raw_atomic64_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
4158ad811070SMark Rutland  * @v: pointer to atomic64_t
4159ad811070SMark Rutland  * @old: s64 value to compare with
4160ad811070SMark Rutland  * @new: s64 value to assign
4161ad811070SMark Rutland  *
4162ad811070SMark Rutland  * If (@v == @old), atomically updates @v to @new with acquire ordering.
41636dfee110SMark Rutland  * Otherwise, @v is not modified and relaxed ordering is provided.
4164ad811070SMark Rutland  *
4165ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_cmpxchg_acquire() elsewhere.
4166ad811070SMark Rutland  *
4167ad811070SMark Rutland  * Return: The original value of @v.
4168ad811070SMark Rutland  */
41699257959aSMark Rutland static __always_inline s64
raw_atomic64_cmpxchg_acquire(atomic64_t * v,s64 old,s64 new)41709257959aSMark Rutland raw_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
4171e3d18ceeSMark Rutland {
41721d78814dSMark Rutland #if defined(arch_atomic64_cmpxchg_acquire)
41731d78814dSMark Rutland 	return arch_atomic64_cmpxchg_acquire(v, old, new);
41741d78814dSMark Rutland #elif defined(arch_atomic64_cmpxchg_relaxed)
41759257959aSMark Rutland 	s64 ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
4176e3d18ceeSMark Rutland 	__atomic_acquire_fence();
4177e3d18ceeSMark Rutland 	return ret;
41789257959aSMark Rutland #elif defined(arch_atomic64_cmpxchg)
41791d78814dSMark Rutland 	return arch_atomic64_cmpxchg(v, old, new);
41809257959aSMark Rutland #else
41819257959aSMark Rutland 	return raw_cmpxchg_acquire(&v->counter, old, new);
4182e3d18ceeSMark Rutland #endif
41831d78814dSMark Rutland }
4184e3d18ceeSMark Rutland 
4185ad811070SMark Rutland /**
4186ad811070SMark Rutland  * raw_atomic64_cmpxchg_release() - atomic compare and exchange with release ordering
4187ad811070SMark Rutland  * @v: pointer to atomic64_t
4188ad811070SMark Rutland  * @old: s64 value to compare with
4189ad811070SMark Rutland  * @new: s64 value to assign
4190ad811070SMark Rutland  *
4191ad811070SMark Rutland  * If (@v == @old), atomically updates @v to @new with release ordering.
41926dfee110SMark Rutland  * Otherwise, @v is not modified and relaxed ordering is provided.
4193ad811070SMark Rutland  *
4194ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_cmpxchg_release() elsewhere.
4195ad811070SMark Rutland  *
4196ad811070SMark Rutland  * Return: The original value of @v.
4197ad811070SMark Rutland  */
41989257959aSMark Rutland static __always_inline s64
raw_atomic64_cmpxchg_release(atomic64_t * v,s64 old,s64 new)41999257959aSMark Rutland raw_atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
4200e3d18ceeSMark Rutland {
42011d78814dSMark Rutland #if defined(arch_atomic64_cmpxchg_release)
42021d78814dSMark Rutland 	return arch_atomic64_cmpxchg_release(v, old, new);
42031d78814dSMark Rutland #elif defined(arch_atomic64_cmpxchg_relaxed)
4204e3d18ceeSMark Rutland 	__atomic_release_fence();
42059257959aSMark Rutland 	return arch_atomic64_cmpxchg_relaxed(v, old, new);
42069257959aSMark Rutland #elif defined(arch_atomic64_cmpxchg)
42071d78814dSMark Rutland 	return arch_atomic64_cmpxchg(v, old, new);
42089257959aSMark Rutland #else
42099257959aSMark Rutland 	return raw_cmpxchg_release(&v->counter, old, new);
4210e3d18ceeSMark Rutland #endif
42111d78814dSMark Rutland }
4212e3d18ceeSMark Rutland 
4213ad811070SMark Rutland /**
4214ad811070SMark Rutland  * raw_atomic64_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
4215ad811070SMark Rutland  * @v: pointer to atomic64_t
4216ad811070SMark Rutland  * @old: s64 value to compare with
4217ad811070SMark Rutland  * @new: s64 value to assign
4218ad811070SMark Rutland  *
4219ad811070SMark Rutland  * If (@v == @old), atomically updates @v to @new with relaxed ordering.
42206dfee110SMark Rutland  * Otherwise, @v is not modified and relaxed ordering is provided.
4221ad811070SMark Rutland  *
4222ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_cmpxchg_relaxed() elsewhere.
4223ad811070SMark Rutland  *
4224ad811070SMark Rutland  * Return: The original value of @v.
4225ad811070SMark Rutland  */
42269257959aSMark Rutland static __always_inline s64
raw_atomic64_cmpxchg_relaxed(atomic64_t * v,s64 old,s64 new)42279257959aSMark Rutland raw_atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
42289257959aSMark Rutland {
42291d78814dSMark Rutland #if defined(arch_atomic64_cmpxchg_relaxed)
42301d78814dSMark Rutland 	return arch_atomic64_cmpxchg_relaxed(v, old, new);
42311d78814dSMark Rutland #elif defined(arch_atomic64_cmpxchg)
42321d78814dSMark Rutland 	return arch_atomic64_cmpxchg(v, old, new);
42331d78814dSMark Rutland #else
42349257959aSMark Rutland 	return raw_cmpxchg_relaxed(&v->counter, old, new);
42359257959aSMark Rutland #endif
42361d78814dSMark Rutland }
42379257959aSMark Rutland 
4238ad811070SMark Rutland /**
4239ad811070SMark Rutland  * raw_atomic64_try_cmpxchg() - atomic compare and exchange with full ordering
4240ad811070SMark Rutland  * @v: pointer to atomic64_t
4241ad811070SMark Rutland  * @old: pointer to s64 value to compare with
4242ad811070SMark Rutland  * @new: s64 value to assign
4243ad811070SMark Rutland  *
4244ad811070SMark Rutland  * If (@v == @old), atomically updates @v to @new with full ordering.
42456dfee110SMark Rutland  * Otherwise, @v is not modified, @old is updated to the current value of @v,
42466dfee110SMark Rutland  * and relaxed ordering is provided.
4247ad811070SMark Rutland  *
4248ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_try_cmpxchg() elsewhere.
4249ad811070SMark Rutland  *
4250ad811070SMark Rutland  * Return: @true if the exchange occured, @false otherwise.
4251ad811070SMark Rutland  */
4252e3d18ceeSMark Rutland static __always_inline bool
raw_atomic64_try_cmpxchg(atomic64_t * v,s64 * old,s64 new)42539257959aSMark Rutland raw_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
4254e3d18ceeSMark Rutland {
42551d78814dSMark Rutland #if defined(arch_atomic64_try_cmpxchg)
42561d78814dSMark Rutland 	return arch_atomic64_try_cmpxchg(v, old, new);
42571d78814dSMark Rutland #elif defined(arch_atomic64_try_cmpxchg_relaxed)
4258e3d18ceeSMark Rutland 	bool ret;
4259e3d18ceeSMark Rutland 	__atomic_pre_full_fence();
4260e3d18ceeSMark Rutland 	ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
4261e3d18ceeSMark Rutland 	__atomic_post_full_fence();
4262e3d18ceeSMark Rutland 	return ret;
42639257959aSMark Rutland #else
42649257959aSMark Rutland 	s64 r, o = *old;
42659257959aSMark Rutland 	r = raw_atomic64_cmpxchg(v, o, new);
42669257959aSMark Rutland 	if (unlikely(r != o))
42679257959aSMark Rutland 		*old = r;
42689257959aSMark Rutland 	return likely(r == o);
4269e3d18ceeSMark Rutland #endif
42701d78814dSMark Rutland }
4271e3d18ceeSMark Rutland 
4272ad811070SMark Rutland /**
4273ad811070SMark Rutland  * raw_atomic64_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
4274ad811070SMark Rutland  * @v: pointer to atomic64_t
4275ad811070SMark Rutland  * @old: pointer to s64 value to compare with
4276ad811070SMark Rutland  * @new: s64 value to assign
4277ad811070SMark Rutland  *
4278ad811070SMark Rutland  * If (@v == @old), atomically updates @v to @new with acquire ordering.
42796dfee110SMark Rutland  * Otherwise, @v is not modified, @old is updated to the current value of @v,
42806dfee110SMark Rutland  * and relaxed ordering is provided.
4281ad811070SMark Rutland  *
4282ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_try_cmpxchg_acquire() elsewhere.
4283ad811070SMark Rutland  *
4284ad811070SMark Rutland  * Return: @true if the exchange occured, @false otherwise.
4285ad811070SMark Rutland  */
4286e3d18ceeSMark Rutland static __always_inline bool
raw_atomic64_try_cmpxchg_acquire(atomic64_t * v,s64 * old,s64 new)42879257959aSMark Rutland raw_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
4288e3d18ceeSMark Rutland {
42891d78814dSMark Rutland #if defined(arch_atomic64_try_cmpxchg_acquire)
42901d78814dSMark Rutland 	return arch_atomic64_try_cmpxchg_acquire(v, old, new);
42911d78814dSMark Rutland #elif defined(arch_atomic64_try_cmpxchg_relaxed)
42929257959aSMark Rutland 	bool ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
4293e5ab9effSThomas Gleixner 	__atomic_acquire_fence();
4294e5ab9effSThomas Gleixner 	return ret;
42959257959aSMark Rutland #elif defined(arch_atomic64_try_cmpxchg)
42961d78814dSMark Rutland 	return arch_atomic64_try_cmpxchg(v, old, new);
42979257959aSMark Rutland #else
42989257959aSMark Rutland 	s64 r, o = *old;
42999257959aSMark Rutland 	r = raw_atomic64_cmpxchg_acquire(v, o, new);
43009257959aSMark Rutland 	if (unlikely(r != o))
43019257959aSMark Rutland 		*old = r;
43029257959aSMark Rutland 	return likely(r == o);
4303e5ab9effSThomas Gleixner #endif
43041d78814dSMark Rutland }
4305e5ab9effSThomas Gleixner 
4306ad811070SMark Rutland /**
4307ad811070SMark Rutland  * raw_atomic64_try_cmpxchg_release() - atomic compare and exchange with release ordering
4308ad811070SMark Rutland  * @v: pointer to atomic64_t
4309ad811070SMark Rutland  * @old: pointer to s64 value to compare with
4310ad811070SMark Rutland  * @new: s64 value to assign
4311ad811070SMark Rutland  *
4312ad811070SMark Rutland  * If (@v == @old), atomically updates @v to @new with release ordering.
43136dfee110SMark Rutland  * Otherwise, @v is not modified, @old is updated to the current value of @v,
43146dfee110SMark Rutland  * and relaxed ordering is provided.
4315ad811070SMark Rutland  *
4316ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_try_cmpxchg_release() elsewhere.
4317ad811070SMark Rutland  *
4318ad811070SMark Rutland  * Return: @true if the exchange occured, @false otherwise.
4319ad811070SMark Rutland  */
4320e5ab9effSThomas Gleixner static __always_inline bool
raw_atomic64_try_cmpxchg_release(atomic64_t * v,s64 * old,s64 new)43219257959aSMark Rutland raw_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
4322e5ab9effSThomas Gleixner {
43231d78814dSMark Rutland #if defined(arch_atomic64_try_cmpxchg_release)
43241d78814dSMark Rutland 	return arch_atomic64_try_cmpxchg_release(v, old, new);
43251d78814dSMark Rutland #elif defined(arch_atomic64_try_cmpxchg_relaxed)
4326e5ab9effSThomas Gleixner 	__atomic_release_fence();
43279257959aSMark Rutland 	return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
43289257959aSMark Rutland #elif defined(arch_atomic64_try_cmpxchg)
43291d78814dSMark Rutland 	return arch_atomic64_try_cmpxchg(v, old, new);
43309257959aSMark Rutland #else
43319257959aSMark Rutland 	s64 r, o = *old;
43329257959aSMark Rutland 	r = raw_atomic64_cmpxchg_release(v, o, new);
43339257959aSMark Rutland 	if (unlikely(r != o))
43349257959aSMark Rutland 		*old = r;
43359257959aSMark Rutland 	return likely(r == o);
4336e5ab9effSThomas Gleixner #endif
43371d78814dSMark Rutland }
4338e5ab9effSThomas Gleixner 
4339ad811070SMark Rutland /**
4340ad811070SMark Rutland  * raw_atomic64_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
4341ad811070SMark Rutland  * @v: pointer to atomic64_t
4342ad811070SMark Rutland  * @old: pointer to s64 value to compare with
4343ad811070SMark Rutland  * @new: s64 value to assign
4344ad811070SMark Rutland  *
4345ad811070SMark Rutland  * If (@v == @old), atomically updates @v to @new with relaxed ordering.
43466dfee110SMark Rutland  * Otherwise, @v is not modified, @old is updated to the current value of @v,
43476dfee110SMark Rutland  * and relaxed ordering is provided.
4348ad811070SMark Rutland  *
4349ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_try_cmpxchg_relaxed() elsewhere.
4350ad811070SMark Rutland  *
4351ad811070SMark Rutland  * Return: @true if the exchange occured, @false otherwise.
4352ad811070SMark Rutland  */
4353e5ab9effSThomas Gleixner static __always_inline bool
raw_atomic64_try_cmpxchg_relaxed(atomic64_t * v,s64 * old,s64 new)43549257959aSMark Rutland raw_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
43559257959aSMark Rutland {
43561d78814dSMark Rutland #if defined(arch_atomic64_try_cmpxchg_relaxed)
43571d78814dSMark Rutland 	return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
43581d78814dSMark Rutland #elif defined(arch_atomic64_try_cmpxchg)
43591d78814dSMark Rutland 	return arch_atomic64_try_cmpxchg(v, old, new);
43601d78814dSMark Rutland #else
43619257959aSMark Rutland 	s64 r, o = *old;
43629257959aSMark Rutland 	r = raw_atomic64_cmpxchg_relaxed(v, o, new);
43639257959aSMark Rutland 	if (unlikely(r != o))
43649257959aSMark Rutland 		*old = r;
43659257959aSMark Rutland 	return likely(r == o);
43669257959aSMark Rutland #endif
43671d78814dSMark Rutland }
43689257959aSMark Rutland 
4369ad811070SMark Rutland /**
4370ad811070SMark Rutland  * raw_atomic64_sub_and_test() - atomic subtract and test if zero with full ordering
4371*f92a59f6SCarlos Llamas  * @i: s64 value to subtract
4372ad811070SMark Rutland  * @v: pointer to atomic64_t
4373ad811070SMark Rutland  *
4374ad811070SMark Rutland  * Atomically updates @v to (@v - @i) with full ordering.
4375ad811070SMark Rutland  *
4376ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_sub_and_test() elsewhere.
4377ad811070SMark Rutland  *
4378ad811070SMark Rutland  * Return: @true if the resulting value of @v is zero, @false otherwise.
4379ad811070SMark Rutland  */
43809257959aSMark Rutland static __always_inline bool
raw_atomic64_sub_and_test(s64 i,atomic64_t * v)43819257959aSMark Rutland raw_atomic64_sub_and_test(s64 i, atomic64_t *v)
43829257959aSMark Rutland {
43831d78814dSMark Rutland #if defined(arch_atomic64_sub_and_test)
43841d78814dSMark Rutland 	return arch_atomic64_sub_and_test(i, v);
43859257959aSMark Rutland #else
43861d78814dSMark Rutland 	return raw_atomic64_sub_return(i, v) == 0;
43871d78814dSMark Rutland #endif
43881d78814dSMark Rutland }
43891d78814dSMark Rutland 
4390ad811070SMark Rutland /**
4391ad811070SMark Rutland  * raw_atomic64_dec_and_test() - atomic decrement and test if zero with full ordering
4392ad811070SMark Rutland  * @v: pointer to atomic64_t
4393ad811070SMark Rutland  *
4394ad811070SMark Rutland  * Atomically updates @v to (@v - 1) with full ordering.
4395ad811070SMark Rutland  *
4396ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_dec_and_test() elsewhere.
4397ad811070SMark Rutland  *
4398ad811070SMark Rutland  * Return: @true if the resulting value of @v is zero, @false otherwise.
4399ad811070SMark Rutland  */
44009257959aSMark Rutland static __always_inline bool
raw_atomic64_dec_and_test(atomic64_t * v)44019257959aSMark Rutland raw_atomic64_dec_and_test(atomic64_t *v)
44029257959aSMark Rutland {
44031d78814dSMark Rutland #if defined(arch_atomic64_dec_and_test)
44041d78814dSMark Rutland 	return arch_atomic64_dec_and_test(v);
44059257959aSMark Rutland #else
44061d78814dSMark Rutland 	return raw_atomic64_dec_return(v) == 0;
44071d78814dSMark Rutland #endif
44081d78814dSMark Rutland }
44091d78814dSMark Rutland 
4410ad811070SMark Rutland /**
4411ad811070SMark Rutland  * raw_atomic64_inc_and_test() - atomic increment and test if zero with full ordering
4412ad811070SMark Rutland  * @v: pointer to atomic64_t
4413ad811070SMark Rutland  *
4414ad811070SMark Rutland  * Atomically updates @v to (@v + 1) with full ordering.
4415ad811070SMark Rutland  *
4416ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_inc_and_test() elsewhere.
4417ad811070SMark Rutland  *
4418ad811070SMark Rutland  * Return: @true if the resulting value of @v is zero, @false otherwise.
4419ad811070SMark Rutland  */
44209257959aSMark Rutland static __always_inline bool
raw_atomic64_inc_and_test(atomic64_t * v)44219257959aSMark Rutland raw_atomic64_inc_and_test(atomic64_t *v)
44229257959aSMark Rutland {
44231d78814dSMark Rutland #if defined(arch_atomic64_inc_and_test)
44241d78814dSMark Rutland 	return arch_atomic64_inc_and_test(v);
44251d78814dSMark Rutland #else
44269257959aSMark Rutland 	return raw_atomic64_inc_return(v) == 0;
44279257959aSMark Rutland #endif
44281d78814dSMark Rutland }
44299257959aSMark Rutland 
4430ad811070SMark Rutland /**
4431ad811070SMark Rutland  * raw_atomic64_add_negative() - atomic add and test if negative with full ordering
4432ad811070SMark Rutland  * @i: s64 value to add
4433ad811070SMark Rutland  * @v: pointer to atomic64_t
4434ad811070SMark Rutland  *
4435ad811070SMark Rutland  * Atomically updates @v to (@v + @i) with full ordering.
4436ad811070SMark Rutland  *
4437ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_add_negative() elsewhere.
4438ad811070SMark Rutland  *
4439ad811070SMark Rutland  * Return: @true if the resulting value of @v is negative, @false otherwise.
4440ad811070SMark Rutland  */
44419257959aSMark Rutland static __always_inline bool
raw_atomic64_add_negative(s64 i,atomic64_t * v)44429257959aSMark Rutland raw_atomic64_add_negative(s64 i, atomic64_t *v)
4443e5ab9effSThomas Gleixner {
44441d78814dSMark Rutland #if defined(arch_atomic64_add_negative)
44451d78814dSMark Rutland 	return arch_atomic64_add_negative(i, v);
44461d78814dSMark Rutland #elif defined(arch_atomic64_add_negative_relaxed)
4447e5ab9effSThomas Gleixner 	bool ret;
4448e5ab9effSThomas Gleixner 	__atomic_pre_full_fence();
4449e5ab9effSThomas Gleixner 	ret = arch_atomic64_add_negative_relaxed(i, v);
4450e5ab9effSThomas Gleixner 	__atomic_post_full_fence();
4451e5ab9effSThomas Gleixner 	return ret;
44529257959aSMark Rutland #else
44539257959aSMark Rutland 	return raw_atomic64_add_return(i, v) < 0;
4454e5ab9effSThomas Gleixner #endif
44551d78814dSMark Rutland }
4456e5ab9effSThomas Gleixner 
4457ad811070SMark Rutland /**
4458ad811070SMark Rutland  * raw_atomic64_add_negative_acquire() - atomic add and test if negative with acquire ordering
4459ad811070SMark Rutland  * @i: s64 value to add
4460ad811070SMark Rutland  * @v: pointer to atomic64_t
4461ad811070SMark Rutland  *
4462ad811070SMark Rutland  * Atomically updates @v to (@v + @i) with acquire ordering.
4463ad811070SMark Rutland  *
4464ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_add_negative_acquire() elsewhere.
4465ad811070SMark Rutland  *
4466ad811070SMark Rutland  * Return: @true if the resulting value of @v is negative, @false otherwise.
4467ad811070SMark Rutland  */
44689257959aSMark Rutland static __always_inline bool
raw_atomic64_add_negative_acquire(s64 i,atomic64_t * v)44699257959aSMark Rutland raw_atomic64_add_negative_acquire(s64 i, atomic64_t *v)
4470e3d18ceeSMark Rutland {
44711d78814dSMark Rutland #if defined(arch_atomic64_add_negative_acquire)
44721d78814dSMark Rutland 	return arch_atomic64_add_negative_acquire(i, v);
44731d78814dSMark Rutland #elif defined(arch_atomic64_add_negative_relaxed)
44749257959aSMark Rutland 	bool ret = arch_atomic64_add_negative_relaxed(i, v);
44759257959aSMark Rutland 	__atomic_acquire_fence();
44769257959aSMark Rutland 	return ret;
44779257959aSMark Rutland #elif defined(arch_atomic64_add_negative)
44781d78814dSMark Rutland 	return arch_atomic64_add_negative(i, v);
44799257959aSMark Rutland #else
44809257959aSMark Rutland 	return raw_atomic64_add_return_acquire(i, v) < 0;
44819257959aSMark Rutland #endif
44821d78814dSMark Rutland }
44839257959aSMark Rutland 
4484ad811070SMark Rutland /**
4485ad811070SMark Rutland  * raw_atomic64_add_negative_release() - atomic add and test if negative with release ordering
4486ad811070SMark Rutland  * @i: s64 value to add
4487ad811070SMark Rutland  * @v: pointer to atomic64_t
4488ad811070SMark Rutland  *
4489ad811070SMark Rutland  * Atomically updates @v to (@v + @i) with release ordering.
4490ad811070SMark Rutland  *
4491ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_add_negative_release() elsewhere.
4492ad811070SMark Rutland  *
4493ad811070SMark Rutland  * Return: @true if the resulting value of @v is negative, @false otherwise.
4494ad811070SMark Rutland  */
44959257959aSMark Rutland static __always_inline bool
raw_atomic64_add_negative_release(s64 i,atomic64_t * v)44969257959aSMark Rutland raw_atomic64_add_negative_release(s64 i, atomic64_t *v)
44979257959aSMark Rutland {
44981d78814dSMark Rutland #if defined(arch_atomic64_add_negative_release)
44991d78814dSMark Rutland 	return arch_atomic64_add_negative_release(i, v);
45001d78814dSMark Rutland #elif defined(arch_atomic64_add_negative_relaxed)
45019257959aSMark Rutland 	__atomic_release_fence();
45029257959aSMark Rutland 	return arch_atomic64_add_negative_relaxed(i, v);
45039257959aSMark Rutland #elif defined(arch_atomic64_add_negative)
45041d78814dSMark Rutland 	return arch_atomic64_add_negative(i, v);
45059257959aSMark Rutland #else
45069257959aSMark Rutland 	return raw_atomic64_add_return_release(i, v) < 0;
45079257959aSMark Rutland #endif
45081d78814dSMark Rutland }
45099257959aSMark Rutland 
4510ad811070SMark Rutland /**
4511ad811070SMark Rutland  * raw_atomic64_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
4512ad811070SMark Rutland  * @i: s64 value to add
4513ad811070SMark Rutland  * @v: pointer to atomic64_t
4514ad811070SMark Rutland  *
4515ad811070SMark Rutland  * Atomically updates @v to (@v + @i) with relaxed ordering.
4516ad811070SMark Rutland  *
4517ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_add_negative_relaxed() elsewhere.
4518ad811070SMark Rutland  *
4519ad811070SMark Rutland  * Return: @true if the resulting value of @v is negative, @false otherwise.
4520ad811070SMark Rutland  */
45219257959aSMark Rutland static __always_inline bool
raw_atomic64_add_negative_relaxed(s64 i,atomic64_t * v)45229257959aSMark Rutland raw_atomic64_add_negative_relaxed(s64 i, atomic64_t *v)
45239257959aSMark Rutland {
45241d78814dSMark Rutland #if defined(arch_atomic64_add_negative_relaxed)
45251d78814dSMark Rutland 	return arch_atomic64_add_negative_relaxed(i, v);
45261d78814dSMark Rutland #elif defined(arch_atomic64_add_negative)
45271d78814dSMark Rutland 	return arch_atomic64_add_negative(i, v);
45289257959aSMark Rutland #else
45291d78814dSMark Rutland 	return raw_atomic64_add_return_relaxed(i, v) < 0;
45301d78814dSMark Rutland #endif
45311d78814dSMark Rutland }
45321d78814dSMark Rutland 
4533ad811070SMark Rutland /**
4534ad811070SMark Rutland  * raw_atomic64_fetch_add_unless() - atomic add unless value with full ordering
4535ad811070SMark Rutland  * @v: pointer to atomic64_t
4536ad811070SMark Rutland  * @a: s64 value to add
4537ad811070SMark Rutland  * @u: s64 value to compare with
4538ad811070SMark Rutland  *
4539ad811070SMark Rutland  * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
45406dfee110SMark Rutland  * Otherwise, @v is not modified and relaxed ordering is provided.
4541ad811070SMark Rutland  *
4542ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_fetch_add_unless() elsewhere.
4543ad811070SMark Rutland  *
4544ad811070SMark Rutland  * Return: The original value of @v.
4545ad811070SMark Rutland  */
45469257959aSMark Rutland static __always_inline s64
raw_atomic64_fetch_add_unless(atomic64_t * v,s64 a,s64 u)45479257959aSMark Rutland raw_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
45489257959aSMark Rutland {
45491d78814dSMark Rutland #if defined(arch_atomic64_fetch_add_unless)
45501d78814dSMark Rutland 	return arch_atomic64_fetch_add_unless(v, a, u);
45511d78814dSMark Rutland #else
45529257959aSMark Rutland 	s64 c = raw_atomic64_read(v);
4553e3d18ceeSMark Rutland 
4554e3d18ceeSMark Rutland 	do {
4555e3d18ceeSMark Rutland 		if (unlikely(c == u))
4556e3d18ceeSMark Rutland 			break;
45579257959aSMark Rutland 	} while (!raw_atomic64_try_cmpxchg(v, &c, c + a));
4558e3d18ceeSMark Rutland 
4559e3d18ceeSMark Rutland 	return c;
4560e3d18ceeSMark Rutland #endif
45611d78814dSMark Rutland }
4562e3d18ceeSMark Rutland 
4563ad811070SMark Rutland /**
4564ad811070SMark Rutland  * raw_atomic64_add_unless() - atomic add unless value with full ordering
4565ad811070SMark Rutland  * @v: pointer to atomic64_t
4566ad811070SMark Rutland  * @a: s64 value to add
4567ad811070SMark Rutland  * @u: s64 value to compare with
4568ad811070SMark Rutland  *
4569ad811070SMark Rutland  * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
45706dfee110SMark Rutland  * Otherwise, @v is not modified and relaxed ordering is provided.
4571ad811070SMark Rutland  *
4572ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_add_unless() elsewhere.
4573ad811070SMark Rutland  *
4574ad811070SMark Rutland  * Return: @true if @v was updated, @false otherwise.
4575ad811070SMark Rutland  */
4576e3d18ceeSMark Rutland static __always_inline bool
raw_atomic64_add_unless(atomic64_t * v,s64 a,s64 u)45779257959aSMark Rutland raw_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
4578e3d18ceeSMark Rutland {
45791d78814dSMark Rutland #if defined(arch_atomic64_add_unless)
45801d78814dSMark Rutland 	return arch_atomic64_add_unless(v, a, u);
45819257959aSMark Rutland #else
45821d78814dSMark Rutland 	return raw_atomic64_fetch_add_unless(v, a, u) != u;
45831d78814dSMark Rutland #endif
45841d78814dSMark Rutland }
45851d78814dSMark Rutland 
4586ad811070SMark Rutland /**
4587ad811070SMark Rutland  * raw_atomic64_inc_not_zero() - atomic increment unless zero with full ordering
4588ad811070SMark Rutland  * @v: pointer to atomic64_t
4589ad811070SMark Rutland  *
4590ad811070SMark Rutland  * If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
45916dfee110SMark Rutland  * Otherwise, @v is not modified and relaxed ordering is provided.
4592ad811070SMark Rutland  *
4593ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_inc_not_zero() elsewhere.
4594ad811070SMark Rutland  *
4595ad811070SMark Rutland  * Return: @true if @v was updated, @false otherwise.
4596ad811070SMark Rutland  */
4597e3d18ceeSMark Rutland static __always_inline bool
raw_atomic64_inc_not_zero(atomic64_t * v)45989257959aSMark Rutland raw_atomic64_inc_not_zero(atomic64_t *v)
4599e3d18ceeSMark Rutland {
46001d78814dSMark Rutland #if defined(arch_atomic64_inc_not_zero)
46011d78814dSMark Rutland 	return arch_atomic64_inc_not_zero(v);
46029257959aSMark Rutland #else
46031d78814dSMark Rutland 	return raw_atomic64_add_unless(v, 1, 0);
46041d78814dSMark Rutland #endif
46051d78814dSMark Rutland }
46061d78814dSMark Rutland 
4607ad811070SMark Rutland /**
4608ad811070SMark Rutland  * raw_atomic64_inc_unless_negative() - atomic increment unless negative with full ordering
4609ad811070SMark Rutland  * @v: pointer to atomic64_t
4610ad811070SMark Rutland  *
4611ad811070SMark Rutland  * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
46126dfee110SMark Rutland  * Otherwise, @v is not modified and relaxed ordering is provided.
4613ad811070SMark Rutland  *
4614ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_inc_unless_negative() elsewhere.
4615ad811070SMark Rutland  *
4616ad811070SMark Rutland  * Return: @true if @v was updated, @false otherwise.
4617ad811070SMark Rutland  */
4618e3d18ceeSMark Rutland static __always_inline bool
raw_atomic64_inc_unless_negative(atomic64_t * v)46199257959aSMark Rutland raw_atomic64_inc_unless_negative(atomic64_t *v)
4620e3d18ceeSMark Rutland {
46211d78814dSMark Rutland #if defined(arch_atomic64_inc_unless_negative)
46221d78814dSMark Rutland 	return arch_atomic64_inc_unless_negative(v);
46231d78814dSMark Rutland #else
46249257959aSMark Rutland 	s64 c = raw_atomic64_read(v);
4625e3d18ceeSMark Rutland 
4626e3d18ceeSMark Rutland 	do {
4627e3d18ceeSMark Rutland 		if (unlikely(c < 0))
4628e3d18ceeSMark Rutland 			return false;
46299257959aSMark Rutland 	} while (!raw_atomic64_try_cmpxchg(v, &c, c + 1));
4630e3d18ceeSMark Rutland 
4631e3d18ceeSMark Rutland 	return true;
4632e3d18ceeSMark Rutland #endif
46331d78814dSMark Rutland }
4634e3d18ceeSMark Rutland 
4635ad811070SMark Rutland /**
4636ad811070SMark Rutland  * raw_atomic64_dec_unless_positive() - atomic decrement unless positive with full ordering
4637ad811070SMark Rutland  * @v: pointer to atomic64_t
4638ad811070SMark Rutland  *
4639ad811070SMark Rutland  * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
46406dfee110SMark Rutland  * Otherwise, @v is not modified and relaxed ordering is provided.
4641ad811070SMark Rutland  *
4642ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_dec_unless_positive() elsewhere.
4643ad811070SMark Rutland  *
4644ad811070SMark Rutland  * Return: @true if @v was updated, @false otherwise.
4645ad811070SMark Rutland  */
4646e3d18ceeSMark Rutland static __always_inline bool
raw_atomic64_dec_unless_positive(atomic64_t * v)46479257959aSMark Rutland raw_atomic64_dec_unless_positive(atomic64_t *v)
4648e3d18ceeSMark Rutland {
46491d78814dSMark Rutland #if defined(arch_atomic64_dec_unless_positive)
46501d78814dSMark Rutland 	return arch_atomic64_dec_unless_positive(v);
46511d78814dSMark Rutland #else
46529257959aSMark Rutland 	s64 c = raw_atomic64_read(v);
4653e3d18ceeSMark Rutland 
4654e3d18ceeSMark Rutland 	do {
4655e3d18ceeSMark Rutland 		if (unlikely(c > 0))
4656e3d18ceeSMark Rutland 			return false;
46579257959aSMark Rutland 	} while (!raw_atomic64_try_cmpxchg(v, &c, c - 1));
4658e3d18ceeSMark Rutland 
4659e3d18ceeSMark Rutland 	return true;
4660e3d18ceeSMark Rutland #endif
46611d78814dSMark Rutland }
4662e3d18ceeSMark Rutland 
4663ad811070SMark Rutland /**
4664ad811070SMark Rutland  * raw_atomic64_dec_if_positive() - atomic decrement if positive with full ordering
4665ad811070SMark Rutland  * @v: pointer to atomic64_t
4666ad811070SMark Rutland  *
4667ad811070SMark Rutland  * If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
46686dfee110SMark Rutland  * Otherwise, @v is not modified and relaxed ordering is provided.
4669ad811070SMark Rutland  *
4670ad811070SMark Rutland  * Safe to use in noinstr code; prefer atomic64_dec_if_positive() elsewhere.
4671ad811070SMark Rutland  *
4672b33eb50aSMark Rutland  * Return: The old value of (@v - 1), regardless of whether @v was updated.
4673ad811070SMark Rutland  */
4674e3d18ceeSMark Rutland static __always_inline s64
raw_atomic64_dec_if_positive(atomic64_t * v)46759257959aSMark Rutland raw_atomic64_dec_if_positive(atomic64_t *v)
4676e3d18ceeSMark Rutland {
46771d78814dSMark Rutland #if defined(arch_atomic64_dec_if_positive)
46781d78814dSMark Rutland 	return arch_atomic64_dec_if_positive(v);
46791d78814dSMark Rutland #else
46809257959aSMark Rutland 	s64 dec, c = raw_atomic64_read(v);
4681e3d18ceeSMark Rutland 
4682e3d18ceeSMark Rutland 	do {
4683e3d18ceeSMark Rutland 		dec = c - 1;
4684e3d18ceeSMark Rutland 		if (unlikely(dec < 0))
4685e3d18ceeSMark Rutland 			break;
46869257959aSMark Rutland 	} while (!raw_atomic64_try_cmpxchg(v, &c, dec));
4687e3d18ceeSMark Rutland 
4688e3d18ceeSMark Rutland 	return dec;
4689e3d18ceeSMark Rutland #endif
46901d78814dSMark Rutland }
4691e3d18ceeSMark Rutland 
4692e3d18ceeSMark Rutland #endif /* _LINUX_ATOMIC_FALLBACK_H */
4693*f92a59f6SCarlos Llamas // b565db590afeeff0d7c9485ccbca5bb6e155749f
4694